aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-dpdk
diff options
context:
space:
mode:
Diffstat (limited to 'platform/linux-dpdk')
-rw-r--r--platform/linux-dpdk/.gitignore3
-rw-r--r--platform/linux-dpdk/Makefile.am571
-rw-r--r--platform/linux-dpdk/Makefile.inc10
-rw-r--r--platform/linux-dpdk/README209
l---------platform/linux-dpdk/arch1
l---------platform/linux-dpdk/arch/aarch64/cpu_flags.c1
l---------platform/linux-dpdk/arch/aarch64/cpu_flags.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic_inlines.h1
-rw-r--r--platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu.h26
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu_inlines.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/hash_crc32.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/sync_inlines.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/time_cpu.h1
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h1
l---------platform/linux-dpdk/arch/aarch64/odp_atomic.c1
l---------platform/linux-dpdk/arch/aarch64/odp_atomic.h1
l---------platform/linux-dpdk/arch/aarch64/odp_cpu.h1
l---------platform/linux-dpdk/arch/aarch64/odp_cpu_cycles.c1
l---------platform/linux-dpdk/arch/aarch64/odp_random.h1
l---------platform/linux-dpdk/arch/aarch64/odp_sysinfo_parse.c1
l---------platform/linux-dpdk/arch/aarch64/odp_wait_until.h1
-rw-r--r--platform/linux-dpdk/arch/arm/odp/api/abi/cpu.h26
l---------platform/linux-dpdk/arch/arm/odp/api/abi/cpu_inlines.h1
l---------platform/linux-dpdk/arch/arm/odp_cpu.h1
l---------platform/linux-dpdk/arch/arm/odp_sysinfo_parse.c1
l---------platform/linux-dpdk/arch/default/odp/api/abi/atomic_generic.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/atomic_inlines.h1
-rw-r--r--platform/linux-dpdk/arch/default/odp/api/abi/cpu.h26
l---------platform/linux-dpdk/arch/default/odp/api/abi/cpu_generic.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/cpu_inlines.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/hash_crc32.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/sync_inlines.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h1
l---------platform/linux-dpdk/arch/default/odp_atomic.c1
l---------platform/linux-dpdk/arch/default/odp_atomic.h1
l---------platform/linux-dpdk/arch/default/odp_cpu.h1
l---------platform/linux-dpdk/arch/default/odp_cpu_cycles.c1
l---------platform/linux-dpdk/arch/default/odp_hash_crc32.c1
l---------platform/linux-dpdk/arch/default/odp_random.c1
l---------platform/linux-dpdk/arch/default/odp_random.h1
l---------platform/linux-dpdk/arch/default/odp_sysinfo_parse.c1
l---------platform/linux-dpdk/arch/default/odp_wait_until.h1
-rw-r--r--platform/linux-dpdk/arch/powerpc/odp/api/abi/cpu.h26
l---------platform/linux-dpdk/arch/powerpc/odp_sysinfo_parse.c1
l---------platform/linux-dpdk/arch/x86/cpu_flags.c1
l---------platform/linux-dpdk/arch/x86/cpu_flags.h1
-rw-r--r--platform/linux-dpdk/arch/x86/odp/api/abi/cpu.h26
l---------platform/linux-dpdk/arch/x86/odp/api/abi/cpu_inlines.h1
l---------platform/linux-dpdk/arch/x86/odp/api/abi/cpu_rdtsc.h1
l---------platform/linux-dpdk/arch/x86/odp/api/abi/hash_crc32.h1
l---------platform/linux-dpdk/arch/x86/odp/api/abi/sync_inlines.h1
-rw-r--r--platform/linux-dpdk/arch/x86/odp/api/abi/time_cpu.h18
l---------platform/linux-dpdk/arch/x86/odp_cpu.h1
l---------platform/linux-dpdk/arch/x86/odp_cpu_cycles.c1
l---------platform/linux-dpdk/arch/x86/odp_random.h1
l---------platform/linux-dpdk/arch/x86/odp_sysinfo_parse.c1
l---------platform/linux-dpdk/check-globals.sh1
-rw-r--r--platform/linux-dpdk/doc/platform_specific.dox4
-rw-r--r--platform/linux-dpdk/dumpconfig/.gitignore1
-rw-r--r--platform/linux-dpdk/dumpconfig/Makefile.am10
-rw-r--r--platform/linux-dpdk/example/Makefile.am5
-rw-r--r--platform/linux-dpdk/example/ml/.gitignore5
-rw-r--r--platform/linux-dpdk/example/ml/Makefile.am54
l---------platform/linux-dpdk/example/ml/README.md1
l---------platform/linux-dpdk/example/ml/example_digit.csv1
l---------platform/linux-dpdk/example/ml/mnist-12.onnx1
l---------platform/linux-dpdk/example/ml/odp_ml_run_mnist.sh1
l---------platform/linux-dpdk/example/ml/odp_ml_run_model_explorer.sh1
l---------platform/linux-dpdk/example/ml/odp_ml_run_simple_linear.sh1
l---------platform/linux-dpdk/example/ml/simple_linear.onnx1
l---------platform/linux-dpdk/include-abi/odp/api/abi/align.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/atomic.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/barrier.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/buffer.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/buffer_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/byteorder.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/classification.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/comp.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/cpumask.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/crypto.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/crypto_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/debug.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/dma.h1
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h40
l---------platform/linux-dpdk/include-abi/odp/api/abi/errno.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/event.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/event_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/hash.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/init.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/ipsec.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/ml_types.h1
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/packet.h27
l---------platform/linux-dpdk/include-abi/odp/api/abi/packet_flags.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/packet_io.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/packet_io_types.h1
-rw-r--r--platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h106
l---------platform/linux-dpdk/include-abi/odp/api/abi/pool.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/pool_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/queue.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/random.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/rwlock.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/rwlock_recursive.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/schedule.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/schedule_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/shared_memory.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/spinlock.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/spinlock_recursive.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/stash.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/stash_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/std.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/std_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/sync.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/thread.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/thrmask.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/ticketlock.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/time.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/time_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/timer.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/traffic_mngr.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/version.h1
l---------platform/linux-dpdk/include/odp/api/align.h1
l---------platform/linux-dpdk/include/odp/api/atomic.h1
l---------platform/linux-dpdk/include/odp/api/barrier.h1
l---------platform/linux-dpdk/include/odp/api/buffer.h1
l---------platform/linux-dpdk/include/odp/api/byteorder.h1
l---------platform/linux-dpdk/include/odp/api/classification.h1
l---------platform/linux-dpdk/include/odp/api/compiler.h1
l---------platform/linux-dpdk/include/odp/api/cpu.h1
l---------platform/linux-dpdk/include/odp/api/cpumask.h1
l---------platform/linux-dpdk/include/odp/api/crypto.h1
l---------platform/linux-dpdk/include/odp/api/debug.h1
l---------platform/linux-dpdk/include/odp/api/errno.h1
l---------platform/linux-dpdk/include/odp/api/event.h1
l---------platform/linux-dpdk/include/odp/api/hash.h1
l---------platform/linux-dpdk/include/odp/api/hints.h1
l---------platform/linux-dpdk/include/odp/api/init.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/packet.h46
-rw-r--r--platform/linux-dpdk/include/odp/api/packet_flags.h31
l---------platform/linux-dpdk/include/odp/api/packet_io.h1
l---------platform/linux-dpdk/include/odp/api/packet_io_stats.h1
l---------platform/linux-dpdk/include/odp/api/plat/atomic_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/barrier_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/buffer_inline_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h156
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/buffer_types.h50
-rw-r--r--[l---------]platform/linux-dpdk/include/odp/api/plat/byteorder_inlines.h117
l---------platform/linux-dpdk/include/odp/api/plat/byteorder_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/classification_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/cpu_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/cpumask_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/crypto_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/crypto_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/debug_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/dma_inlines.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/event_inline_types.h46
l---------platform/linux-dpdk/include/odp/api/plat/event_inlines.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/event_types.h57
l---------platform/linux-dpdk/include/odp/api/plat/event_validation_external.h1
l---------platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/hash_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/init_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h269
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines_api.h46
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h174
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inlines.h786
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inlines_api.h118
l---------platform/linux-dpdk/include/odp/api/plat/packet_io_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/packet_io_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_types.h149
l---------platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/pool_inline_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/pool_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/pool_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/queue_inline_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/queue_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/queue_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/rwlock_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/rwlock_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/schedule_inline_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/schedule_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/schedule_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/shared_memory_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/spinlock_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/spinlock_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/static_inline.h.in43
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h38
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/std_inlines.h56
l---------platform/linux-dpdk/include/odp/api/plat/thread_inline_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/thread_inlines.h1
l---------platform/linux-dpdk/include/odp/api/plat/thread_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/thrmask_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/ticketlock_inlines_api.h1
l---------platform/linux-dpdk/include/odp/api/plat/ticketlock_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/time_inlines.h260
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/time_types.h43
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h48
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inlines.h123
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_types.h51
l---------platform/linux-dpdk/include/odp/api/plat/traffic_mngr_types.h1
l---------platform/linux-dpdk/include/odp/api/plat/version_types.h1
l---------platform/linux-dpdk/include/odp/api/pool.h1
l---------platform/linux-dpdk/include/odp/api/queue.h1
l---------platform/linux-dpdk/include/odp/api/random.h1
l---------platform/linux-dpdk/include/odp/api/rwlock.h1
l---------platform/linux-dpdk/include/odp/api/rwlock_recursive.h1
l---------platform/linux-dpdk/include/odp/api/schedule.h1
l---------platform/linux-dpdk/include/odp/api/schedule_types.h1
l---------platform/linux-dpdk/include/odp/api/shared_memory.h1
l---------platform/linux-dpdk/include/odp/api/spinlock.h1
l---------platform/linux-dpdk/include/odp/api/spinlock_recursive.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/std_clib.h28
l---------platform/linux-dpdk/include/odp/api/std_types.h1
l---------platform/linux-dpdk/include/odp/api/sync.h1
l---------platform/linux-dpdk/include/odp/api/system_info.h1
l---------platform/linux-dpdk/include/odp/api/thread.h1
l---------platform/linux-dpdk/include/odp/api/thrmask.h1
l---------platform/linux-dpdk/include/odp/api/ticketlock.h1
l---------platform/linux-dpdk/include/odp/api/time.h1
l---------platform/linux-dpdk/include/odp/api/timer.h1
l---------platform/linux-dpdk/include/odp/api/traffic_mngr.h1
l---------platform/linux-dpdk/include/odp/api/version.h1
l---------platform/linux-dpdk/include/odp/drv1
l---------platform/linux-dpdk/include/odp/visibility_begin.h1
l---------platform/linux-dpdk/include/odp/visibility_end.h1
-rw-r--r--platform/linux-dpdk/include/odp_buffer_inlines.h46
-rw-r--r--platform/linux-dpdk/include/odp_buffer_internal.h115
-rw-r--r--platform/linux-dpdk/include/odp_config_internal.h144
-rw-r--r--platform/linux-dpdk/include/odp_errno_define.h26
-rw-r--r--platform/linux-dpdk/include/odp_event_internal.h104
-rw-r--r--platform/linux-dpdk/include/odp_event_vector_internal.h84
-rw-r--r--platform/linux-dpdk/include/odp_eventdev_internal.h194
-rw-r--r--platform/linux-dpdk/include/odp_packet_dpdk.h49
-rw-r--r--platform/linux-dpdk/include/odp_packet_internal.h400
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h292
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h158
-rw-r--r--platform/linux-dpdk/include/odp_ptr_ring_mpmc_internal.h100
-rw-r--r--platform/linux-dpdk/include/odp_ptr_ring_spsc_internal.h107
-rw-r--r--platform/linux-dpdk/include/odp_ptr_ring_st_internal.h99
-rw-r--r--platform/linux-dpdk/include/odp_queue_basic_internal.h126
-rw-r--r--platform/linux-dpdk/include/odp_shm_internal.h34
-rw-r--r--platform/linux-dpdk/include/odp_thread_internal.h36
-rw-r--r--platform/linux-dpdk/include/odp_time_internal.h44
-rw-r--r--platform/linux-dpdk/include/odp_timer_internal.h76
-rw-r--r--platform/linux-dpdk/libodp-dpdk.pc.in12
-rw-r--r--platform/linux-dpdk/m4/configure.m4186
l---------platform/linux-dpdk/m4/odp_cpu.m41
l---------platform/linux-dpdk/m4/odp_event_validation.m41
-rw-r--r--platform/linux-dpdk/m4/odp_libconfig.m436
l---------platform/linux-dpdk/m4/odp_ml.m41
l---------platform/linux-dpdk/m4/odp_pcapng.m41
l---------platform/linux-dpdk/m4/odp_pthread.m41
l---------platform/linux-dpdk/m4/odp_scheduler.m41
l---------platform/linux-dpdk/m4/odp_wfe.m41
-rw-r--r--platform/linux-dpdk/odp_buffer.c110
-rw-r--r--platform/linux-dpdk/odp_crypto.c2215
-rw-r--r--platform/linux-dpdk/odp_dma.c1174
-rw-r--r--platform/linux-dpdk/odp_errno.c9
-rw-r--r--platform/linux-dpdk/odp_event.c134
-rw-r--r--platform/linux-dpdk/odp_init.c995
-rw-r--r--platform/linux-dpdk/odp_packet.c2031
-rw-r--r--platform/linux-dpdk/odp_packet_dpdk.c1814
-rw-r--r--platform/linux-dpdk/odp_packet_flags.c232
-rw-r--r--platform/linux-dpdk/odp_pool.c1704
-rw-r--r--platform/linux-dpdk/odp_queue_basic.c1257
-rw-r--r--platform/linux-dpdk/odp_queue_eventdev.c1359
-rw-r--r--platform/linux-dpdk/odp_queue_if.c146
-rw-r--r--platform/linux-dpdk/odp_queue_spsc.c95
-rw-r--r--platform/linux-dpdk/odp_schedule_eventdev.c1131
-rw-r--r--platform/linux-dpdk/odp_schedule_if.c175
-rw-r--r--platform/linux-dpdk/odp_shared_memory.c590
-rw-r--r--platform/linux-dpdk/odp_std_api.c11
-rw-r--r--platform/linux-dpdk/odp_std_clib.c38
-rw-r--r--platform/linux-dpdk/odp_system_info.c559
-rw-r--r--platform/linux-dpdk/odp_thread.c194
-rw-r--r--platform/linux-dpdk/odp_time.c356
-rw-r--r--platform/linux-dpdk/odp_timer.c1433
-rw-r--r--platform/linux-dpdk/test/.gitignore3
-rw-r--r--platform/linux-dpdk/test/Makefile.am48
-rw-r--r--platform/linux-dpdk/test/crypto.conf8
-rw-r--r--platform/linux-dpdk/test/default-timer.conf8
-rw-r--r--platform/linux-dpdk/test/example/Makefile.am11
-rw-r--r--platform/linux-dpdk/test/example/classifier/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/classifier/pktio_env47
-rw-r--r--platform/linux-dpdk/test/example/generator/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/generator/pktio_env34
-rw-r--r--platform/linux-dpdk/test/example/ipsec_api/Makefile.am21
-rw-r--r--platform/linux-dpdk/test/example/ipsec_api/pktio_env72
-rw-r--r--platform/linux-dpdk/test/example/ipsec_crypto/Makefile.am21
-rw-r--r--platform/linux-dpdk/test/example/ipsec_crypto/pktio_env72
-rw-r--r--platform/linux-dpdk/test/example/l2fwd_simple/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/l2fwd_simple/pktio_env54
-rw-r--r--platform/linux-dpdk/test/example/l3fwd/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/l3fwd/pktio_env57
-rw-r--r--platform/linux-dpdk/test/example/packet/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/packet/pktio_env55
-rw-r--r--platform/linux-dpdk/test/example/ping/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/ping/pktio_env54
-rw-r--r--platform/linux-dpdk/test/example/simple_pipeline/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/simple_pipeline/pktio_env52
-rw-r--r--platform/linux-dpdk/test/example/switch/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/example/switch/pktio_env62
-rw-r--r--platform/linux-dpdk/test/performance/Makefile.am1
-rw-r--r--platform/linux-dpdk/test/performance/dmafwd/Makefile.am18
-rw-r--r--platform/linux-dpdk/test/performance/dmafwd/pktio_env59
-rw-r--r--platform/linux-dpdk/test/process-mode.conf7
-rw-r--r--platform/linux-dpdk/test/sched-basic.conf14
-rw-r--r--platform/linux-dpdk/test/stash-custom.conf8
-rw-r--r--platform/linux-dpdk/test/validation/api/Makefile.inc1
-rw-r--r--platform/linux-dpdk/test/validation/api/ml/.gitignore1
-rw-r--r--platform/linux-dpdk/test/validation/api/ml/Makefile.am29
l---------platform/linux-dpdk/test/validation/api/ml/README.md1
l---------platform/linux-dpdk/test/validation/api/ml/batch_add.onnx1
l---------platform/linux-dpdk/test/validation/api/ml/batch_add_gen.py1
l---------platform/linux-dpdk/test/validation/api/ml/gen_models.sh1
l---------platform/linux-dpdk/test/validation/api/ml/requirements.txt1
l---------platform/linux-dpdk/test/validation/api/ml/simple_linear.onnx1
l---------platform/linux-dpdk/test/validation/api/ml/simple_linear_gen.py1
l---------platform/linux-dpdk/test/validation/api/pktio/.gitignore1
-rw-r--r--platform/linux-dpdk/test/validation/api/pktio/Makefile.am24
l---------platform/linux-dpdk/test/validation/api/pktio/pktio_env1
-rwxr-xr-xplatform/linux-dpdk/test/validation/api/pktio/pktio_run.sh105
-rwxr-xr-xplatform/linux-dpdk/test/wrapper-script.sh70
335 files changed, 20989 insertions, 4405 deletions
diff --git a/platform/linux-dpdk/.gitignore b/platform/linux-dpdk/.gitignore
index 909756a1f..ef6cd5af8 100644
--- a/platform/linux-dpdk/.gitignore
+++ b/platform/linux-dpdk/.gitignore
@@ -1 +1,2 @@
-include/odp/api/plat/static_inline.h
+libodp-dpdk.pc
+odp_libconfig_config.h
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 9ffd6fd00..0bfe35092 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -1,228 +1,439 @@
include $(top_srcdir)/platform/Makefile.inc
+if PLATFORM_IS_LINUX_DPDK
include $(top_srcdir)/platform/@with_platform@/Makefile.inc
-
-PLAT_CFLAGS =
-if ARCH_X86
-PLAT_CFLAGS += -msse4.2
endif
+lib_LTLIBRARIES += $(LIB)/libodp-dpdk.la
-if DPDK_DEFAULT_DIR
-PLAT_CFLAGS += -include /usr/include/dpdk/rte_config.h
-else
-PLAT_CFLAGS += -include $(SDK_INSTALL_PATH)/include/rte_config.h
-endif
+AM_CPPFLAGS = $(ODP_INCLUDES)
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_builddir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/linux-generic/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/@ARCH_DIR@
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/default
+
+AM_CPPFLAGS += $(OPENSSL_CPPFLAGS)
+AM_CPPFLAGS += $(ORT_CPPFLAGS)
-AM_CFLAGS += $(PLAT_CFLAGS)
-AM_CFLAGS += -I$(srcdir)/include
-AM_CFLAGS += -I$(top_srcdir)/platform/linux-generic/include
-AM_CFLAGS += -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@
-AM_CFLAGS += -I$(top_srcdir)/include
-AM_CFLAGS += -I$(top_builddir)/include
-AM_CFLAGS += -Iinclude
-
-include_HEADERS = \
- $(top_srcdir)/include/odp.h \
- $(top_srcdir)/include/odp_api.h
-
-odpincludedir= $(includedir)/odp
-odpinclude_HEADERS = \
- $(srcdir)/include/odp/visibility_begin.h \
- $(srcdir)/include/odp/visibility_end.h
-
-odpapiincludedir= $(includedir)/odp/api
-odpapiinclude_HEADERS = \
- $(srcdir)/include/odp/api/align.h \
- $(srcdir)/include/odp/api/atomic.h \
- $(srcdir)/include/odp/api/barrier.h \
- $(srcdir)/include/odp/api/buffer.h \
- $(srcdir)/include/odp/api/byteorder.h \
- $(srcdir)/include/odp/api/classification.h \
- $(srcdir)/include/odp/api/compiler.h \
- $(srcdir)/include/odp/api/cpu.h \
- $(srcdir)/include/odp/api/cpumask.h \
- $(srcdir)/include/odp/api/crypto.h \
- $(srcdir)/include/odp/api/debug.h \
- $(srcdir)/include/odp/api/errno.h \
- $(srcdir)/include/odp/api/event.h \
- $(srcdir)/include/odp/api/hash.h \
- $(srcdir)/include/odp/api/hints.h \
- $(srcdir)/include/odp/api/init.h \
- $(srcdir)/include/odp/api/packet_flags.h \
- $(srcdir)/include/odp/api/packet.h \
- $(srcdir)/include/odp/api/packet_io.h \
- $(srcdir)/include/odp/api/packet_io_stats.h \
- $(srcdir)/include/odp/api/pool.h \
- $(srcdir)/include/odp/api/queue.h \
- $(srcdir)/include/odp/api/random.h \
- $(srcdir)/include/odp/api/rwlock.h \
- $(srcdir)/include/odp/api/rwlock_recursive.h \
- $(srcdir)/include/odp/api/schedule.h \
- $(srcdir)/include/odp/api/schedule_types.h \
- $(srcdir)/include/odp/api/shared_memory.h \
- $(srcdir)/include/odp/api/spinlock.h \
- $(srcdir)/include/odp/api/spinlock_recursive.h \
- $(srcdir)/include/odp/api/std_clib.h \
- $(srcdir)/include/odp/api/std_types.h \
- $(srcdir)/include/odp/api/sync.h \
- $(srcdir)/include/odp/api/system_info.h \
- $(srcdir)/include/odp/api/thread.h \
- $(srcdir)/include/odp/api/thrmask.h \
- $(srcdir)/include/odp/api/ticketlock.h \
- $(srcdir)/include/odp/api/time.h \
- $(srcdir)/include/odp/api/timer.h \
- $(srcdir)/include/odp/api/traffic_mngr.h \
- $(srcdir)/include/odp/api/version.h \
- $(srcdir)/arch/@ARCH_DIR@/odp/api/cpu_arch.h
+AM_CFLAGS += $(DPDK_CFLAGS)
+AM_CFLAGS += $(LIBCONFIG_CFLAGS)
+DISTCLEANFILES = include/odp_libconfig_config.h
+include/odp_libconfig_config.h: $(top_builddir)/$(rel_default_config_path) $(top_builddir)/config.status
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+
+odpapiabiarchincludedir = $(archincludedir)/odp/api/abi
+odpapiabiarchinclude_HEADERS =
+
+if !ODP_ABI_COMPAT
odpapiplatincludedir= $(includedir)/odp/api/plat
odpapiplatinclude_HEADERS = \
- $(builddir)/include/odp/api/plat/static_inline.h \
- $(srcdir)/include/odp/api/plat/atomic_inlines.h \
- $(srcdir)/include/odp/api/plat/atomic_types.h \
- $(srcdir)/include/odp/api/plat/barrier_types.h \
- $(srcdir)/include/odp/api/plat/buffer_types.h \
- $(srcdir)/include/odp/api/plat/byteorder_inlines.h \
- $(srcdir)/include/odp/api/plat/byteorder_types.h \
- $(srcdir)/include/odp/api/plat/classification_types.h \
- $(srcdir)/include/odp/api/plat/cpumask_types.h \
- $(srcdir)/include/odp/api/plat/crypto_types.h \
- $(srcdir)/include/odp/api/plat/event_types.h \
- $(srcdir)/include/odp/api/plat/init_types.h \
- $(srcdir)/include/odp/api/plat/packet_flag_inlines.h \
- $(srcdir)/include/odp/api/plat/packet_flag_inlines_api.h \
- $(srcdir)/include/odp/api/plat/packet_inlines.h \
- $(srcdir)/include/odp/api/plat/packet_inlines_api.h \
- $(srcdir)/include/odp/api/plat/packet_io_types.h \
- $(srcdir)/include/odp/api/plat/packet_types.h \
- $(srcdir)/include/odp/api/plat/pool_types.h \
- $(srcdir)/include/odp/api/plat/queue_types.h \
- $(srcdir)/include/odp/api/plat/rwlock_types.h \
- $(srcdir)/include/odp/api/plat/rwlock_recursive_types.h \
- $(srcdir)/include/odp/api/plat/schedule_types.h \
- $(srcdir)/include/odp/api/plat/shared_memory_types.h \
- $(srcdir)/include/odp/api/plat/spinlock_types.h \
- $(srcdir)/include/odp/api/plat/spinlock_recursive_types.h \
- $(srcdir)/include/odp/api/plat/std_clib_inlines.h \
- $(srcdir)/include/odp/api/plat/strong_types.h \
- $(srcdir)/include/odp/api/plat/sync_inlines.h \
- $(srcdir)/include/odp/api/plat/thread_types.h \
- $(srcdir)/include/odp/api/plat/thrmask_types.h \
- $(srcdir)/include/odp/api/plat/ticketlock_inlines.h \
- $(srcdir)/include/odp/api/plat/ticketlock_inlines_api.h \
- $(srcdir)/include/odp/api/plat/ticketlock_types.h \
- $(srcdir)/include/odp/api/plat/time_types.h \
- $(srcdir)/include/odp/api/plat/timer_types.h \
- $(srcdir)/include/odp/api/plat/traffic_mngr_types.h \
- $(srcdir)/include/odp/api/plat/version_types.h
-
-odpdrvincludedir = $(includedir)/odp/drv
-odpdrvinclude_HEADERS = \
- $(srcdir)/include/odp/drv/compiler.h
+ include/odp/api/plat/atomic_inlines.h \
+ include/odp/api/plat/buffer_inlines.h \
+ include/odp/api/plat/buffer_inline_types.h \
+ include/odp/api/plat/byteorder_inlines.h \
+ include/odp/api/plat/cpu_inlines.h \
+ include/odp/api/plat/crypto_inlines.h \
+ include/odp/api/plat/dma_inlines.h \
+ include/odp/api/plat/debug_inlines.h \
+ include/odp/api/plat/event_inlines.h \
+ include/odp/api/plat/event_inline_types.h \
+ include/odp/api/plat/event_validation_external.h \
+ include/odp/api/plat/event_vector_inline_types.h \
+ include/odp/api/plat/hash_inlines.h \
+ include/odp/api/plat/ipsec_inlines.h \
+ include/odp/api/plat/packet_flag_inlines.h \
+ include/odp/api/plat/packet_inline_types.h \
+ include/odp/api/plat/packet_inlines.h \
+ include/odp/api/plat/packet_vector_inlines.h \
+ include/odp/api/plat/packet_io_inlines.h \
+ include/odp/api/plat/pool_inlines.h \
+ include/odp/api/plat/pool_inline_types.h \
+ include/odp/api/plat/queue_inlines.h \
+ include/odp/api/plat/queue_inline_types.h \
+ include/odp/api/plat/schedule_inlines.h \
+ include/odp/api/plat/schedule_inline_types.h \
+ include/odp/api/plat/rwlock_inlines.h \
+ include/odp/api/plat/rwlock_recursive_inlines.h \
+ include/odp/api/plat/spinlock_inlines.h \
+ include/odp/api/plat/spinlock_recursive_inlines.h \
+ include/odp/api/plat/std_inlines.h \
+ include/odp/api/plat/strong_types.h \
+ include/odp/api/plat/sync_inlines.h \
+ include/odp/api/plat/thread_inlines.h \
+ include/odp/api/plat/thread_inline_types.h \
+ include/odp/api/plat/ticketlock_inlines.h \
+ include/odp/api/plat/time_inlines.h \
+ include/odp/api/plat/timer_inlines.h \
+ include/odp/api/plat/timer_inline_types.h
+
+odpapiabiarchinclude_HEADERS += \
+ include-abi/odp/api/abi/align.h \
+ include-abi/odp/api/abi/atomic.h \
+ include-abi/odp/api/abi/barrier.h \
+ include-abi/odp/api/abi/buffer.h \
+ include-abi/odp/api/abi/buffer_types.h \
+ include-abi/odp/api/abi/byteorder.h \
+ include-abi/odp/api/abi/classification.h \
+ include-abi/odp/api/abi/comp.h \
+ include-abi/odp/api/abi/cpumask.h \
+ include-abi/odp/api/abi/crypto.h \
+ include-abi/odp/api/abi/crypto_types.h \
+ include-abi/odp/api/abi/debug.h \
+ include-abi/odp/api/abi/dma.h \
+ include-abi/odp/api/abi/dma_types.h \
+ include-abi/odp/api/abi/errno.h \
+ include-abi/odp/api/abi/event.h \
+ include-abi/odp/api/abi/event_types.h \
+ include-abi/odp/api/abi/hash.h \
+ include-abi/odp/api/abi/init.h \
+ include-abi/odp/api/abi/ipsec.h \
+ include-abi/odp/api/abi/ipsec_types.h \
+ include-abi/odp/api/abi/ml_types.h \
+ include-abi/odp/api/abi/packet.h \
+ include-abi/odp/api/abi/packet_types.h \
+ include-abi/odp/api/abi/packet_flags.h \
+ include-abi/odp/api/abi/packet_io.h \
+ include-abi/odp/api/abi/packet_io_types.h \
+ include-abi/odp/api/abi/proto_stats.h \
+ include-abi/odp/api/abi/proto_stats_types.h \
+ include-abi/odp/api/abi/pool.h \
+ include-abi/odp/api/abi/pool_types.h \
+ include-abi/odp/api/abi/queue.h \
+ include-abi/odp/api/abi/queue_types.h \
+ include-abi/odp/api/abi/random.h \
+ include-abi/odp/api/abi/rwlock.h \
+ include-abi/odp/api/abi/rwlock_recursive.h \
+ include-abi/odp/api/abi/schedule.h \
+ include-abi/odp/api/abi/schedule_types.h \
+ include-abi/odp/api/abi/shared_memory.h \
+ include-abi/odp/api/abi/spinlock.h \
+ include-abi/odp/api/abi/spinlock_recursive.h \
+ include-abi/odp/api/abi/stash.h \
+ include-abi/odp/api/abi/stash_types.h \
+ include-abi/odp/api/abi/std.h \
+ include-abi/odp/api/abi/std_types.h \
+ include-abi/odp/api/abi/sync.h \
+ include-abi/odp/api/abi/thread.h \
+ include-abi/odp/api/abi/thread_types.h \
+ include-abi/odp/api/abi/thrmask.h \
+ include-abi/odp/api/abi/ticketlock.h \
+ include-abi/odp/api/abi/time.h \
+ include-abi/odp/api/abi/time_types.h \
+ include-abi/odp/api/abi/timer.h \
+ include-abi/odp/api/abi/timer_types.h \
+ include-abi/odp/api/abi/traffic_mngr.h \
+ include-abi/odp/api/abi/version.h
+endif
noinst_HEADERS = \
- ${top_srcdir}/platform/linux-generic/include/_fdserver_internal.h \
- ${top_srcdir}/platform/linux-generic/include/_ishm_internal.h \
- ${top_srcdir}/platform/linux-generic/include/_ishmphy_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_align_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_atomic_internal.h \
- ${srcdir}/include/odp_buffer_inlines.h \
- ${srcdir}/include/odp_buffer_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_bitmap_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_bitset.h \
+ include/odp_buffer_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_chksum_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_internal.h \
- ${srcdir}/include/odp_config_internal.h \
+ include/odp_config_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_debug_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_datamodel.h \
- ${top_srcdir}/platform/linux-generic/include/odp_classification_inlines.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_crypto_internal.h \
+ include/odp_eventdev_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_forward_typedefs_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_internal.h \
- ${srcdir}/include/odp_packet_dpdk.h \
- ${srcdir}/include/odp_packet_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ml_fp16.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_global_data.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_init_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ipsec_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_libconfig_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_llqueue.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_macros_internal.h \
+ include/odp_packet_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_name_table_internal.h \
- ${srcdir}/include/odp_packet_io_internal.h \
- ${srcdir}/include/odp_errno_define.h \
- ${top_srcdir}/platform/linux-generic/include/odp_packet_io_ipc_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_packet_io_queue.h \
- ${top_srcdir}/platform/linux-generic/include/odp_packet_io_ring_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_packet_socket.h \
+ include/odp_packet_io_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_parse_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_print_internal.h \
+ include/odp_errno_define.h \
+ include/odp_event_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_event_validation_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_packet_dpdk.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_pcapng.h \
${top_srcdir}/platform/linux-generic/include/odp_pkt_queue_internal.h \
- ${srcdir}/include/odp_pool_internal.h \
- ${srcdir}/include/odp_posix_extensions.h \
- ${top_srcdir}/platform/linux-generic/include/odp_queue_internal.h \
+ include/odp_pool_internal.h \
+ include/odp_posix_extensions.h \
+ include/odp_queue_basic_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_queue_if.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_queue_lf.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_random_std_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_random_openssl_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_common.h \
${top_srcdir}/platform/linux-generic/include/odp_ring_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_mpmc_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_ptr_internal.h \
+ include/odp_ptr_ring_mpmc_internal.h \
+ include/odp_ptr_ring_spsc_internal.h \
+ include/odp_ptr_ring_st_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_u32_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_ring_u64_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_schedule_if.h \
${top_srcdir}/platform/linux-generic/include/odp_sorted_list_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_shm_internal.h \
- ${srcdir}/include/odp_time_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_sysinfo_internal.h \
+ include/odp_shm_internal.h \
+ include/odp_thread_internal.h \
+ include/odp_timer_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_timer_wheel_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_traffic_mngr_internal.h \
- ${srcdir}/include/protocols/eth.h \
- ${srcdir}/include/protocols/ip.h \
- ${srcdir}/include/protocols/ipsec.h \
- ${srcdir}/include/protocols/tcp.h \
- ${srcdir}/include/protocols/udp.h \
- ${srcdir}/Makefile.inc
+ ${top_srcdir}/platform/linux-generic/include/odp_types_internal.h \
+ include/odp_event_vector_internal.h \
+ include/protocols/eth.h \
+ include/protocols/ip.h \
+ include/protocols/ipsec.h \
+ include/protocols/sctp.h \
+ include/protocols/tcp.h \
+ include/protocols/thash.h \
+ include/protocols/udp.h \
+ Makefile.inc
+
+BUILT_SOURCES = \
+ include/odp_libconfig_config.h
__LIB__libodp_dpdk_la_SOURCES = \
- ../linux-generic/_fdserver.c \
- ../linux-generic/_ishm.c \
- ../linux-generic/_ishmphy.c \
- ../linux-generic/odp_atomic.c \
../linux-generic/odp_barrier.c \
- ../linux-generic/odp_bitmap.c \
odp_buffer.c \
- ../linux-generic/odp_byteorder.c \
+ ../linux-generic/odp_chksum.c \
../linux-generic/odp_classification.c \
- ../linux-generic/odp_cpu.c \
+ ../linux-generic/odp_comp.c \
+ ../linux-generic/miniz/miniz.c ../linux-generic/miniz/miniz.h ../linux-generic/miniz/miniz_common.h \
+ ../linux-generic/miniz/miniz_tdef.c ../linux-generic/miniz/miniz_tdef.h \
+ ../linux-generic/miniz/miniz_tinfl.c ../linux-generic/miniz/miniz_tinfl.h \
../linux-generic/odp_cpumask.c \
../linux-generic/odp_cpumask_task.c \
- ../linux-generic/odp_crypto.c \
+ odp_dma.c \
+ odp_crypto.c \
odp_errno.c \
- ../linux-generic/odp_event.c \
- ../linux-generic/odp_hash.c \
+ odp_event.c \
+ ../linux-generic/odp_event_validation.c \
+ ../linux-generic/odp_hash_crc_gen.c \
odp_init.c \
../linux-generic/odp_impl.c \
+ ../linux-generic/odp_ipsec.c \
+ ../linux-generic/odp_ipsec_events.c \
+ ../linux-generic/odp_ipsec_sad.c \
../linux-generic/odp_name_table.c \
+ ../linux-generic/odp_libconfig.c \
+ ../linux-generic/odp_ml_fp16.c \
+ ../linux-generic/odp_ml_quantize.c \
odp_packet.c \
odp_packet_dpdk.c \
+ ../linux-generic/odp_packet_vector.c \
odp_packet_flags.c \
../linux-generic/odp_packet_io.c \
+ ../linux-generic/odp_parse.c \
../linux-generic/pktio/loop.c \
+ ../linux-generic/pktio/null.c \
../linux-generic/odp_pkt_queue.c \
+ ../linux-generic/odp_print.c \
odp_pool.c \
- ../linux-generic/odp_queue.c \
- ../linux-generic/odp_rwlock.c \
- ../linux-generic/odp_rwlock_recursive.c \
- ../linux-generic/odp_schedule.c \
- ../linux-generic/odp_schedule_if.c \
- ../linux-generic/odp_schedule_iquery.c \
- ../linux-generic/odp_shared_memory.c \
+ odp_queue_basic.c \
+ odp_queue_eventdev.c \
+ odp_queue_if.c \
+ ../linux-generic/odp_queue_lf.c \
+ odp_queue_spsc.c \
+ ../linux-generic/odp_random.c \
+ ../linux-generic/odp_random_std.c \
+ ../linux-generic/odp_random_openssl.c \
+ ../linux-generic/odp_schedule_basic.c \
+ odp_schedule_eventdev.c \
+ odp_schedule_if.c \
+ ../linux-generic/odp_schedule_sp.c \
+ odp_shared_memory.c \
../linux-generic/odp_sorted_list.c \
- ../linux-generic/odp_spinlock.c \
- ../linux-generic/odp_spinlock_recursive.c \
- odp_std_clib.c \
- ../linux-generic/odp_sync.c \
- ../linux-generic/odp_system_info.c \
+ ../linux-generic/odp_stash.c \
+ ../linux-generic/odp_std.c \
+ odp_system_info.c \
+ ../linux-generic/odp_pcapng.c \
odp_thread.c \
../linux-generic/odp_thrmask.c \
- ../linux-generic/odp_ticketlock.c \
odp_time.c \
- ../linux-generic/odp_timer.c \
+ odp_timer.c \
../linux-generic/odp_timer_wheel.c \
../linux-generic/odp_traffic_mngr.c \
../linux-generic/odp_version.c \
- ../linux-generic/odp_weak.c \
- arch/@ARCH_DIR@/odp_cpu_arch.c \
- arch/@ARCH_DIR@/odp_sysinfo_parse.c
-
-# Create symlink for ABI header files. Application does not need to use the arch
-# specific include path for installed files.
-install-data-hook:
- if [ -h $(prefix)/include/odp/api/abi ]; then \
- : \
- else \
- $(LN_S) -rf $(prefix)/include/odp/arch/@ARCH_ABI@/odp/api/abi \
- $(prefix)/include/odp/api/abi; \
- fi
+ ../linux-generic/odp_weak.c
+
+if WITH_ML
+__LIB__libodp_dpdk_la_SOURCES += \
+ ../linux-generic/odp_ml.c
+else
+__LIB__libodp_dpdk_la_SOURCES += \
+ ../linux-generic/odp_ml_null.c
+endif
+
+if ODP_ABI_COMPAT
+__LIB__libodp_dpdk_la_SOURCES += \
+ ../linux-generic/odp_atomic_api.c \
+ ../linux-generic/odp_buffer_api.c \
+ ../linux-generic/odp_byteorder_api.c \
+ ../linux-generic/odp_cpu_api.c \
+ ../linux-generic/odp_crypto_api.c \
+ ../linux-generic/odp_dma_api.c \
+ ../linux-generic/odp_event_api.c \
+ ../linux-generic/odp_hash_api.c \
+ ../linux-generic/odp_ipsec_api.c \
+ ../linux-generic/odp_packet_api.c \
+ ../linux-generic/odp_packet_flags_api.c \
+ ../linux-generic/odp_packet_io_api.c \
+ ../linux-generic/odp_pool_api.c \
+ ../linux-generic/odp_queue_api.c \
+ ../linux-generic/odp_schedule_api.c \
+ ../linux-generic/odp_rwlock_api.c \
+ ../linux-generic/odp_rwlock_recursive_api.c \
+ ../linux-generic/odp_spinlock_api.c \
+ ../linux-generic/odp_spinlock_recursive_api.c \
+ odp_std_api.c \
+ ../linux-generic/odp_sync_api.c \
+ ../linux-generic/odp_thread_api.c \
+ ../linux-generic/odp_ticketlock_api.c \
+ ../linux-generic/odp_time_api.c \
+ ../linux-generic/odp_timer_api.c
+endif
+
+if ARCH_IS_ARM
+__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/arm/odp_sysinfo_parse.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/arm/odp/api/abi/cpu_inlines.h \
+ arch/arm/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/arm/odp_cpu.h \
+ arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_AARCH64
+__LIB__libodp_dpdk_la_SOURCES += arch/aarch64/odp_atomic.c \
+ arch/aarch64/odp_cpu_cycles.c \
+ arch/aarch64/cpu_flags.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/aarch64/odp_sysinfo_parse.c
+odpapiabiarchinclude_HEADERS += arch/aarch64/odp/api/abi/hash_crc32.h \
+ arch/aarch64/odp/api/abi/time_cpu.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/aarch64/odp/api/abi/atomic_inlines.h \
+ arch/aarch64/odp/api/abi/atomic.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/aarch64/odp/api/abi/cpu_inlines.h \
+ arch/aarch64/odp/api/abi/cpu.h \
+ arch/aarch64/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/aarch64/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/aarch64/odp_atomic.h \
+ arch/aarch64/odp_cpu.h \
+ arch/aarch64/cpu_flags.h \
+ arch/aarch64/odp_random.h \
+ arch/aarch64/odp_wait_until.h
+endif
+if ARCH_IS_DEFAULT
+__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/default/odp_sysinfo_parse.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/default/odp/api/abi/cpu_inlines.h \
+ arch/default/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_POWERPC
+__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/powerpc/odp_sysinfo_parse.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/default/odp/api/abi/cpu_inlines.h \
+ arch/powerpc/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_X86
+__LIB__libodp_dpdk_la_SOURCES += arch/default/odp_atomic.c \
+ arch/x86/cpu_flags.c \
+ arch/x86/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/x86/odp_sysinfo_parse.c
+odpapiabiarchinclude_HEADERS += arch/x86/odp/api/abi/cpu_rdtsc.h \
+ arch/x86/odp/api/abi/hash_crc32.h \
+ arch/x86/odp/api/abi/time_cpu.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/x86/odp/api/abi/cpu_inlines.h \
+ arch/x86/odp/api/abi/cpu.h \
+ arch/x86/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/x86/cpu_flags.h \
+ arch/x86/odp_cpu.h \
+ arch/x86/odp_random.h \
+ arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_wait_until.h
+endif
+
+__LIB__libodp_dpdk_la_LIBADD = $(ATOMIC_LIBS)
+__LIB__libodp_dpdk_la_LIBADD += $(OPENSSL_LIBS)
+__LIB__libodp_dpdk_la_LIBADD += $(LIBCONFIG_LIBS)
+__LIB__libodp_dpdk_la_LIBADD += $(DPDK_LIBS_LIBODP)
+__LIB__libodp_dpdk_la_LIBADD += $(PTHREAD_LIBS)
+__LIB__libodp_dpdk_la_LIBADD += $(TIMER_LIBS)
+__LIB__libodp_dpdk_la_LIBADD += $(ORT_LIBS)
+
+CHECK_GLOBALS_REGEX = " (odp_|_odp_|_deprecated_odp_|miniz_|mz_|tdefl_|tinfl_|mp_hdlr_init_odp_pool_ops)"
+
+TESTS_ENVIRONMENT = \
+ LIBTOOL="$(LIBTOOL)" \
+ NM="$(NM)" \
+ LIB="$(LIB)" \
+ lib_LTLIBRARIES="$(lib_LTLIBRARIES)" \
+ CHECK_GLOBALS_REGEX=$(CHECK_GLOBALS_REGEX)
+
+dist_check_SCRIPTS = check-globals.sh
+
+TESTS = $(dist_check_SCRIPTS)
diff --git a/platform/linux-dpdk/Makefile.inc b/platform/linux-dpdk/Makefile.inc
index d26b2cc82..6e153015b 100644
--- a/platform/linux-dpdk/Makefile.inc
+++ b/platform/linux-dpdk/Makefile.inc
@@ -1,4 +1,6 @@
-AM_CFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH_DIR)
-AM_CXXFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH_DIR)
-AM_LDFLAGS += -R$(SDK_INSTALL_PATH)/lib
-LOG_COMPILER = $(top_srcdir)/test/linux-dpdk/wrapper-script.sh
+AM_CFLAGS += $(DPDK_CFLAGS)
+AM_CXXFLAGS += $(DPDK_CFLAGS)
+
+LOG_COMPILER = $(top_builddir)/platform/linux-dpdk/test/wrapper-script.sh
+SH_LOG_COMPILER = $(LOG_COMPILER)
+EXTRA_DIST += $(top_builddir)/platform/linux-dpdk/test/wrapper-script.sh
diff --git a/platform/linux-dpdk/README b/platform/linux-dpdk/README
index 64419334b..c0298ab34 100644
--- a/platform/linux-dpdk/README
+++ b/platform/linux-dpdk/README
@@ -1,16 +1,10 @@
-Copyright (c) 2014, Linaro Limited
+Copyright (c) 2018-2019, Linaro Limited
+Copyright (c) 2019-2024, Nokia
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
-ERRATA:
-- DPDK 16.07 and earlier supports pool names with RTE_MEMZONE_NAMESIZE
- characters (including terminating NULL), which is 6 characters less than
- ODP_POOL_NAME_LEN. Names reaching into this interval might collide if the
- first 25 characters are not unique.
-
-
1. Rationale
=================================================
@@ -25,71 +19,64 @@ Prerequisites and considerations:
- it's also possible to use odp-dpdk for evaluation purposes without a DPDK
compatible NIC, using the pcap poll mode driver
- DPDK code must be downloaded, configured and compiled, details below
-- ODP-DPDK has been compiled and tested on an x86 host with Ubuntu 16.04 LTS
- (4.4.0 kernel).
+- ODP-DPDK has been compiled and tested on an x86 host with Ubuntu 22.04 LTS
+ (5.15.0 kernel).
- DPDK only works on a selected range of network cards. The list of known and
supported devices can be found in the DPDK documentation:
- http://dpdk.org/doc/guides-17.02/nics/index.html
+ https://doc.dpdk.org/guides/nics/index.html
2. Preparing DPDK
=================================================
-Fetching the DPDK code:
-----------------------
- git clone http://dpdk.org/git/dpdk ./<dpdk-dir>
-
-Right now odp-dpdk only supports DPDK v17.02:
- cd <dpdk-dir>
- git tag -l -- will list all the tags available
- git checkout -b 17.02 tags/v17.02
-
-Compile DPDK:
-------------
-Please refer to http://dpdk.org/doc/guides/linux_gsg/build_dpdk.html for more
+Please refer to https://dpdk.org/doc/guides/linux_gsg/build_dpdk.html for more
details on how to build DPDK. Best effort is done to provide some help on DPDK
cmds below for Ubuntu, where it has been compiled and tested.
On Ubuntu install pcap development library:
sudo apt-get install libpcap-dev
-This has to be done only once:
- cd <dpdk-dir>
- make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc
-
-Enable pcap pmd to use ODP-DPDK without DPDK supported NIC's:
- cd <dpdk-dir>/x86_64-native-linuxapp-gcc
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
+Right now ODP-DPDK supports DPDK v21.11, v22.11 (recommended version), and
+v23.11.
-Now return to parent directory and build DPDK:
- cd ..
+Compile DPDK
+------------
+Fetch the DPDK code:
+ git clone https://dpdk.org/git/dpdk-stable --branch 22.11 --depth 1 ./<dpdk-dir>
-The last step depends on if shared libraries are required.
-SHARED libraries:
- make install T=x86_64-native-linuxapp-gcc DESTDIR=./install EXTRA_CFLAGS="-fPIC"
+Prepare the build directory:
+ cd <dpdk-dir>
+ meson build
+ cd build
-STATIC libraries:
- make install T=x86_64-native-linuxapp-gcc DESTDIR=./install
+Optionally, configure the location where DPDK will be installed. By default,
+DPDK will be installed in /usr/local:
+ meson configure -Dprefix=$(pwd)/../install
-This only ensures building DPDK, but traffic is not tested with this build yet.
+Build and install DPDK:
+ ninja install
3. Compile ODP-DPDK
=================================================
Build dependencies are listed in DEPENDENCIES. Use absolute DPDK directory
-path with the --with-sdk-install-path option.
+path with the --with-dpdk-path option.
cd <odp-dir>
./bootstrap
-The following step depends on if shared libraries are required.
+The following step depends on whether ODP shared libraries are to be built.
SHARED libraries:
- ./configure --with-sdk-install-path=<dpdk-dir>/x86_64-native-linuxapp-gcc
+ ./configure --enable-dpdk-shared
STATIC libraries (better performance):
- ./configure --with-sdk-install-path=<dpdk-dir>/x86_64-native-linuxapp-gcc --disable-shared
+ ./configure --disable-shared
+
+Or, if DPDK was not installed to the default location, set PKG_CONFIG_PATH:
+ PKG_CONFIG_PATH=<dpdk-dir>/install/lib/x86_64-linux-gnu/pkgconfig ./configure
+Once configure has completed successfully:
make
@@ -116,12 +103,15 @@ Insert DPDK kernel module:
-------------------------
DPDK uses userspace poll mode drivers, so it's necessary to insert a couple of
modules to allow DPDK to map the NIC's registers to userspace:
- sudo /sbin/modprobe uio
- ulimit -Sn 2048
+If UIO is used:
+ sudo modprobe uio
cd <dpdk-dir>
sudo insmod x86_64-native-linuxapp-gcc/kmod/igb_uio.ko
+or VFIO is used (requires kernel and BIOS support e.g. iommu=pt intel_iommu=on)
+ sudo modprobe vfio-pci
+
Bind NIC's to DPDK:
------------------
The DPDK code contains a tool used to bind drivers to the network cards.
@@ -154,9 +144,14 @@ The easiest way is to let the tool automatically switch the regular drivers. For
that the interface must not be active i.e. no IP addresses assigned:
sudo ifconfig eth0 0
sudo ifconfig eth1 0
+
+If UIO is used:
sudo ./usertools/dpdk-devbind.py --bind=igb_uio eth0
sudo ./usertools/dpdk-devbind.py --bind=igb_uio eth1
+or if VFIO is used:
+ sudo ./usertools/dpdk-devbind.py --bind=vfio-pci eth0
+ sudo ./usertools/dpdk-devbind.py --bind=vfio-pci eth1
Bind using PCI ids:
------------------
@@ -176,16 +171,15 @@ To restore the NIC's back to kernel use something like this:
5. Running ODP apps
=================================================
-ODP-DPDK applications need to be run as root. You also need to supply the DPDK
-command line parameters either as a null-terminated array of char's to
-odp_global_init()'s platform_params parameter:
+ODP-DPDK applications need to be run as root. You may also need to
+supply DPDK command line parameters either as a null-terminated array of
+char's to odp_global_init()'s platform_params parameter:
- odp_global_init([params], "-n 4");
+ odp_global_init([params], "--no-huge");
Or, if it's NULL the platform tries to read the ODP_PLATFORM_PARAMS environment
variable.
-You need to pass at least "-n [1..4]" to specify the number of memory channels.
The coremask (-c) is calculated by ODP-DPDK based on the process affinity at
startup. You can influence that with 'taskset'. DPDK init changes the affinity
of the calling thread, so after it returns the original affinity is restored.
@@ -195,9 +189,9 @@ rte_eal_[mp_]remote_launch(), but not through ODP API's. Nevertheless,
odp_local_init() makes sure for the rest of the DPDK libraries ODP threads look
like proper DPDK threads.
-Exaple how to run an ODP-DPDK L2 forwarding application:
+Example how to run an ODP-DPDK L2 forwarding application:
- sudo ODP_PLATFORM_PARAMS="-n 4" ./odp_l2fwd -i 0,1 -c 2
+ sudo ./odp_l2fwd -i 0,1 -c 2
-i 0,1 - interface numbers
-c 2 - number of worker cpus
@@ -230,40 +224,9 @@ CONFIG_RTE_LIBRTE_PMD_PCAP=y
mount -t hugetlbfs none /mnt/huge
Finally give l2fwd fake devices:
- ./l2fwd -c '0xf' -n 4 --vdev "eth_pcap0,iface=veth2-1" --vdev="eth_pcap1,iface=veth2-3" -- -p 3
+ ./l2fwd -c '0xf' --vdev "eth_pcap0,iface=veth2-1" --vdev="eth_pcap1,iface=veth2-3" -- -p 3
-7. Build with devbuild.sh
-=================================================
-
-scripts/devbuild.sh contains an example script aimed for developers. It uses
-the CI scripts from https://git.linaro.org/lng/check-odp.git to build DPDK and
-ODP-DPDK. It can also run "make check" or individual unit tests, but you need to
-install CUnit as a prerequisite.
-If you have build problems, try to run it and see if it works. An example:
- export REPOS=${PWD}
- git clone https://git.linaro.org/lng/check-odp.git
- git clone https://git.linaro.org/lng/odp-dpdk.git
- odp-dpdk/scripts/devbuild.sh dpdk
- odp-dpdk/scripts/devbuild.sh odp
- odp-dpdk/scripts/devbuild.sh odp-check
-
-It can also run unit tests individually, optionally with gdb. If the first
-parameter is not dpdk, odp or odp-check, it tries to run it in
-"$CHECK_ODP_DIR/new-build/bin/", with the help of the wrapper script. That's
-where example programs are, but you can also use it to run the unit tests, by
-traversing the directories:
-
- odp-dpdk/scripts/devbuild.sh \
- ../../../odp-dpdk/test/common_plat/validation/api/atomic/atomic_main
- odp-dpdk/scripts/devbuild.sh \
- ../../../odp-dpdk/test/linux-dpdk/validation/api/pktio/pktio_run.sh
-
-The wrapper will prepend the executable with ODP_GDB env. variable, or pass it
-down if its name ends ".sh". With prepending your command line with ODP_GDB=gdb
-you can run the tests in GDB. If the unit test is wrapped into yet another
-shell script (like pktio), it has to do the prepending itself!
-
-8. Upgrading ODP-DPDK to newer ODP API level
+7. Upgrading ODP-DPDK to newer ODP API level
=================================================
This repository is based on odp.git, it also retains the history of that. There
@@ -272,7 +235,7 @@ changes are in platform/linux-dpdk. That directory's Makefile.am builds our
code and the required parts from platform/linux-generic.
This allows us to easily pull the necessary changes from odp.git with git:
-git remote add odp_base https://git.linaro.org/lng/odp.git
+git remote add odp_base https://github.com/OpenDataPlane/odp.git
git pull odp_base master
This will result in a merge commit, and possibly some conflict resolving if
@@ -287,3 +250,79 @@ scripts/git-transplant.py platform/linux-generic/ platform/linux-dpdk/ \
It prints the list of prospective patches to be ported. See its comments about
what it does.
+
+8. Building odp-dpdk with DPDK crypto PMDs
+======================================================
+
+Refer to the DPDK crypto documentation for detailed crypto PMD build instructions:
+https://dpdk.org/doc/guides/cryptodevs/index.html
+
+To build odp-dpdk with DPDK virtual crypto devices, we need to build supporting
+Intel Multi-Buffer Crypto for IPsec library prior to DPDK build.
+
+Get the Intel Multi-Buffer Crypto library from
+https://github.com/intel/intel-ipsec-mb and follow the README from the repo on
+how to build the library.
+
+Building DPDK
+-------------
+Follow the instructions from "Compile DPDK" section. If libIPSec_MB has been
+installed outside the normal search paths, configure the compiler and linker
+options with:
+
+meson configure -Dc_args=-I/path-to/Intel-multi-buffer-crypto/include \
+ -Dc_link_args=-L/path-to/Intel-multi-buffer-crypto/lib
+
+Runtime parameters
+------------------
+When running an ODP application, include the required crypto devices in
+ODP_PLATFORM_PARAMS environment variable.
+E.g. ODP_PLATFORM_PARAMS="--vdev crypto_aesni_mb --vdev crypto_null"
+
+9. Using eventdev scheduling and queues (experimental)
+======================================================
+
+ODP-DPDK includes experimental implementations of ODP scheduler, queues, and
+scheduled pktio using DPDK event device library. The initial implementation
+has been validated using only the standard software eventdev poll mode driver
+with DPDK v18.11. Due to some pending eventdev bugs the implementation is not
+yet tested in the ODP CI.
+
+To use eventdev one must set ODP_SCHEDULER environment variable to "eventdev"
+and provide the necessary platform parameters to DPDK.
+
+Refer to DPDK event device driver documentation for platform details:
+https://doc.dpdk.org/guides/eventdevs/index.html
+
+In case of the standard software eventdev implementation one must enable a DPDK
+service core, which will perform scheduling and receive packets for the
+scheduled pktio input queues. The DPDK service cores and the ODP application
+cores should not overlap.
+
+Example how to run odp_scheduling test application using eventdev:
+ sudo ODP_SCHEDULER="eventdev" ODP_PLATFORM_PARAMS="--vdev event_sw0 -s 0x4" \
+ ./odp_scheduling -c 1
+
+10. Using dmadev for DMA transfers
+==================================
+
+ODP-DPDK implements ODP DMA API utilizing DPDK dmadev APIs. DPDK 21.11 or newer
+is required, otherwise only a dummy implementation is provided. More
+information about dmadev can be found under official documentation:
+https://doc.dpdk.org/guides/prog_guide/dmadev.html
+
+Before running applications, required DMA devices need to be set up. Official
+documentation lists steps for setting up drivers for each supported device,
+VFIO drivers typically being the least cumbersome to set up:
+https://doc.dpdk.org/guides/dmadevs/index.html
+
+As ODP DMA API provides a single capability for the underlying DMA machinery,
+a set of similar capability devices is always tried to be discovered. If
+application requires a certain device, this can be controlled with the normal
+allowed/blocked device lists. E.g.:
+
+ sudo ODP_PLATFORM_PARAMS="-a 0000:f2:01.0" ./odp_dma_perf -t 1 -i 1 -o 1 -s 61440 -S 0 -m 0 -f 16
+
+Additionally, a few configuration file parameters are available under the "dma"
+section in DPDK configuration file. These should be configured according to used
+device capabilities.
diff --git a/platform/linux-dpdk/arch b/platform/linux-dpdk/arch
deleted file mode 120000
index b552ee6b9..000000000
--- a/platform/linux-dpdk/arch
+++ /dev/null
@@ -1 +0,0 @@
-../linux-generic/arch/ \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/cpu_flags.c b/platform/linux-dpdk/arch/aarch64/cpu_flags.c
new file mode 120000
index 000000000..a5f786955
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/cpu_flags.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/cpu_flags.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/cpu_flags.h b/platform/linux-dpdk/arch/aarch64/cpu_flags.h
new file mode 120000
index 000000000..27ca5d5ea
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/cpu_flags.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/cpu_flags.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic.h
new file mode 120000
index 000000000..b3a71d10a
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/atomic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic_inlines.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic_inlines.h
new file mode 120000
index 000000000..6a5aa1319
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/atomic_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu.h
new file mode 100644
index 000000000..35d59f108
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2021, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define ODP_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu_inlines.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu_inlines.h
new file mode 120000
index 000000000..c37661ccd
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/cpu_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/cpu_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/hash_crc32.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/hash_crc32.h
new file mode 120000
index 000000000..e18244b85
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/hash_crc32.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/hash_crc32.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/sync_inlines.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/sync_inlines.h
new file mode 120000
index 000000000..1281f2376
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/sync_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/sync_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/time_cpu.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/time_cpu.h
new file mode 120000
index 000000000..f00ac2b3f
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/time_cpu.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/time_cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h
new file mode 120000
index 000000000..65a5f4381
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_atomic.c b/platform/linux-dpdk/arch/aarch64/odp_atomic.c
new file mode 120000
index 000000000..acccfc3ce
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_atomic.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_atomic.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_atomic.h b/platform/linux-dpdk/arch/aarch64/odp_atomic.h
new file mode 120000
index 000000000..7c30adee0
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_atomic.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_atomic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_cpu.h b/platform/linux-dpdk/arch/aarch64/odp_cpu.h
new file mode 120000
index 000000000..284bbb20b
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_cpu.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_cpu_cycles.c b/platform/linux-dpdk/arch/aarch64/odp_cpu_cycles.c
new file mode 120000
index 000000000..7cf2fef20
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_cpu_cycles.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_cpu_cycles.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_random.h b/platform/linux-dpdk/arch/aarch64/odp_random.h
new file mode 120000
index 000000000..892ad529c
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_random.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_sysinfo_parse.c b/platform/linux-dpdk/arch/aarch64/odp_sysinfo_parse.c
new file mode 120000
index 000000000..4d2a2b6af
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_sysinfo_parse.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_sysinfo_parse.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/aarch64/odp_wait_until.h b/platform/linux-dpdk/arch/aarch64/odp_wait_until.h
new file mode 120000
index 000000000..f7d35f0ca
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp_wait_until.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/aarch64/odp_wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/arm/odp/api/abi/cpu.h b/platform/linux-dpdk/arch/arm/odp/api/abi/cpu.h
new file mode 100644
index 000000000..6644a1ed3
--- /dev/null
+++ b/platform/linux-dpdk/arch/arm/odp/api/abi/cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define ODP_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/arm/odp/api/abi/cpu_inlines.h b/platform/linux-dpdk/arch/arm/odp/api/abi/cpu_inlines.h
new file mode 120000
index 000000000..d3b507a76
--- /dev/null
+++ b/platform/linux-dpdk/arch/arm/odp/api/abi/cpu_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/arm/odp/api/abi/cpu_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/arm/odp_cpu.h b/platform/linux-dpdk/arch/arm/odp_cpu.h
new file mode 120000
index 000000000..fd183aea2
--- /dev/null
+++ b/platform/linux-dpdk/arch/arm/odp_cpu.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/arm/odp_cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/arm/odp_sysinfo_parse.c b/platform/linux-dpdk/arch/arm/odp_sysinfo_parse.c
new file mode 120000
index 000000000..9cd6f01b0
--- /dev/null
+++ b/platform/linux-dpdk/arch/arm/odp_sysinfo_parse.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/arm/odp_sysinfo_parse.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/atomic_generic.h b/platform/linux-dpdk/arch/default/odp/api/abi/atomic_generic.h
new file mode 120000
index 000000000..5ea39e9d5
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/atomic_generic.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/atomic_generic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/atomic_inlines.h b/platform/linux-dpdk/arch/default/odp/api/abi/atomic_inlines.h
new file mode 120000
index 000000000..8509a6be6
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/atomic_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/atomic_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/cpu.h b/platform/linux-dpdk/arch/default/odp/api/abi/cpu.h
new file mode 100644
index 000000000..e4fe6f631
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define ODP_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/cpu_generic.h b/platform/linux-dpdk/arch/default/odp/api/abi/cpu_generic.h
new file mode 120000
index 000000000..f1b7066e1
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/cpu_generic.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/cpu_generic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/cpu_inlines.h b/platform/linux-dpdk/arch/default/odp/api/abi/cpu_inlines.h
new file mode 120000
index 000000000..27f7b9d11
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/cpu_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/cpu_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/hash_crc32.h b/platform/linux-dpdk/arch/default/odp/api/abi/hash_crc32.h
new file mode 120000
index 000000000..691f0e77f
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/hash_crc32.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/hash_crc32.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/sync_inlines.h b/platform/linux-dpdk/arch/default/odp/api/abi/sync_inlines.h
new file mode 120000
index 000000000..328ff07d6
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/sync_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/sync_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h
new file mode 120000
index 000000000..ae06629ce
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h
new file mode 120000
index 000000000..c43ede9ad
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/wait_until_generic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_atomic.c b/platform/linux-dpdk/arch/default/odp_atomic.c
new file mode 120000
index 000000000..d1d5ad459
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_atomic.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_atomic.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_atomic.h b/platform/linux-dpdk/arch/default/odp_atomic.h
new file mode 120000
index 000000000..8cf21dd63
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_atomic.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_atomic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_cpu.h b/platform/linux-dpdk/arch/default/odp_cpu.h
new file mode 120000
index 000000000..6648bf7fc
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_cpu.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_cpu_cycles.c b/platform/linux-dpdk/arch/default/odp_cpu_cycles.c
new file mode 120000
index 000000000..02964ecd9
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_cpu_cycles.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_cpu_cycles.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_hash_crc32.c b/platform/linux-dpdk/arch/default/odp_hash_crc32.c
new file mode 120000
index 000000000..236476886
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_hash_crc32.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_hash_crc32.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_random.c b/platform/linux-dpdk/arch/default/odp_random.c
new file mode 120000
index 000000000..a1889b546
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_random.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_random.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_random.h b/platform/linux-dpdk/arch/default/odp_random.h
new file mode 120000
index 000000000..232858671
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_random.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_sysinfo_parse.c b/platform/linux-dpdk/arch/default/odp_sysinfo_parse.c
new file mode 120000
index 000000000..67c2a32f5
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_sysinfo_parse.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_sysinfo_parse.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp_wait_until.h b/platform/linux-dpdk/arch/default/odp_wait_until.h
new file mode 120000
index 000000000..d2e7b5316
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp_wait_until.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/default/odp_wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/powerpc/odp/api/abi/cpu.h b/platform/linux-dpdk/arch/powerpc/odp/api/abi/cpu.h
new file mode 100644
index 000000000..7b7720a42
--- /dev/null
+++ b/platform/linux-dpdk/arch/powerpc/odp/api/abi/cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define ODP_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/powerpc/odp_sysinfo_parse.c b/platform/linux-dpdk/arch/powerpc/odp_sysinfo_parse.c
new file mode 120000
index 000000000..23de9345a
--- /dev/null
+++ b/platform/linux-dpdk/arch/powerpc/odp_sysinfo_parse.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/powerpc/odp_sysinfo_parse.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/cpu_flags.c b/platform/linux-dpdk/arch/x86/cpu_flags.c
new file mode 120000
index 000000000..09f23fe11
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/cpu_flags.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/cpu_flags.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/cpu_flags.h b/platform/linux-dpdk/arch/x86/cpu_flags.h
new file mode 120000
index 000000000..c487a9186
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/cpu_flags.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/cpu_flags.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/cpu.h b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu.h
new file mode 100644
index 000000000..6644a1ed3
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <rte_common.h>
+
+#define ODP_CACHE_LINE_SIZE RTE_CACHE_LINE_SIZE
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_inlines.h b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_inlines.h
new file mode 120000
index 000000000..8fa978601
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/x86/odp/api/abi/cpu_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_rdtsc.h b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_rdtsc.h
new file mode 120000
index 000000000..ab25afae8
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/cpu_rdtsc.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/x86/odp/api/abi/cpu_rdtsc.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/hash_crc32.h b/platform/linux-dpdk/arch/x86/odp/api/abi/hash_crc32.h
new file mode 120000
index 000000000..07948b258
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/hash_crc32.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/x86/odp/api/abi/hash_crc32.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/sync_inlines.h b/platform/linux-dpdk/arch/x86/odp/api/abi/sync_inlines.h
new file mode 120000
index 000000000..d5dba2679
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/sync_inlines.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/x86/odp/api/abi/sync_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp/api/abi/time_cpu.h b/platform/linux-dpdk/arch/x86/odp/api/abi/time_cpu.h
new file mode 100644
index 000000000..cc313cff1
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp/api/abi/time_cpu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ARCH_TIME_CPU_H_
+#define ODP_ARCH_TIME_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int _odp_time_cpu_global_freq_is_const(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/arch/x86/odp_cpu.h b/platform/linux-dpdk/arch/x86/odp_cpu.h
new file mode 120000
index 000000000..511f06a2e
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp_cpu.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/odp_cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp_cpu_cycles.c b/platform/linux-dpdk/arch/x86/odp_cpu_cycles.c
new file mode 120000
index 000000000..bcd3676ab
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp_cpu_cycles.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/odp_cpu_cycles.c \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp_random.h b/platform/linux-dpdk/arch/x86/odp_random.h
new file mode 120000
index 000000000..50a20427d
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp_random.h
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/odp_random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/x86/odp_sysinfo_parse.c b/platform/linux-dpdk/arch/x86/odp_sysinfo_parse.c
new file mode 120000
index 000000000..613ef5de7
--- /dev/null
+++ b/platform/linux-dpdk/arch/x86/odp_sysinfo_parse.c
@@ -0,0 +1 @@
+../../../linux-generic/arch/x86/odp_sysinfo_parse.c \ No newline at end of file
diff --git a/platform/linux-dpdk/check-globals.sh b/platform/linux-dpdk/check-globals.sh
new file mode 120000
index 000000000..c999a29ef
--- /dev/null
+++ b/platform/linux-dpdk/check-globals.sh
@@ -0,0 +1 @@
+../../scripts/check-globals.sh \ No newline at end of file
diff --git a/platform/linux-dpdk/doc/platform_specific.dox b/platform/linux-dpdk/doc/platform_specific.dox
index bdab65640..ad3126e20 100644
--- a/platform/linux-dpdk/doc/platform_specific.dox
+++ b/platform/linux-dpdk/doc/platform_specific.dox
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -28,5 +28,5 @@
* to odp_init_local() is actually fully defined by these
* requirements: It has to be the value returned by the
* unique call to odp_init_global() made by one single
- * ascendant of the current process.
+ * ancestor of the current process.
*/
diff --git a/platform/linux-dpdk/dumpconfig/.gitignore b/platform/linux-dpdk/dumpconfig/.gitignore
new file mode 100644
index 000000000..091a9d4b7
--- /dev/null
+++ b/platform/linux-dpdk/dumpconfig/.gitignore
@@ -0,0 +1 @@
+odp_linuxdpdk_dumpconfig
diff --git a/platform/linux-dpdk/dumpconfig/Makefile.am b/platform/linux-dpdk/dumpconfig/Makefile.am
new file mode 100644
index 000000000..9a5b8b091
--- /dev/null
+++ b/platform/linux-dpdk/dumpconfig/Makefile.am
@@ -0,0 +1,10 @@
+include $(top_srcdir)/Makefile.inc
+
+AM_CPPFLAGS = -I$(top_builddir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/include
+
+bin_PROGRAMS = odp_linuxdpdk_dumpconfig
+
+odp_linuxdpdk_dumpconfig_SOURCES = ../../linux-generic/dumpconfig/dumpconfig.c
+
+TESTS = odp_linuxdpdk_dumpconfig
diff --git a/platform/linux-dpdk/example/Makefile.am b/platform/linux-dpdk/example/Makefile.am
new file mode 100644
index 000000000..84f337387
--- /dev/null
+++ b/platform/linux-dpdk/example/Makefile.am
@@ -0,0 +1,5 @@
+SUBDIRS =
+
+if WITH_ML
+SUBDIRS += ml
+endif
diff --git a/platform/linux-dpdk/example/ml/.gitignore b/platform/linux-dpdk/example/ml/.gitignore
new file mode 100644
index 000000000..d845f6bb5
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/.gitignore
@@ -0,0 +1,5 @@
+model_explorer
+simple_linear
+mnist
+*.log
+*.trs
diff --git a/platform/linux-dpdk/example/ml/Makefile.am b/platform/linux-dpdk/example/ml/Makefile.am
new file mode 100644
index 000000000..7abbc3828
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/Makefile.am
@@ -0,0 +1,54 @@
+include $(top_srcdir)/example/Makefile.inc
+
+LDADD += -lm
+
+bin_PROGRAMS = model_explorer simple_linear mnist
+
+simple_linear_SOURCES = \
+ ../../../linux-generic/example/ml/simple_linear.c \
+ ../../../linux-generic/example/ml/model_read.c \
+ ../../../linux-generic/example/ml/model_read.h
+model_explorer_SOURCES = \
+ ../../../linux-generic/example/ml/model_explorer.c \
+ ../../../linux-generic/example/ml/model_read.c \
+ ../../../linux-generic/example/ml/model_read.h
+mnist_SOURCES = \
+ ../../../linux-generic/example/ml/mnist.c \
+ ../../../linux-generic/example/ml/model_read.c \
+ ../../../linux-generic/example/ml/model_read.h
+
+EXTRA_DIST = \
+ odp_ml_run_mnist.sh \
+ example_digit.csv \
+ mnist-12.onnx \
+ odp_ml_run_model_explorer.sh \
+ odp_ml_run_simple_linear.sh \
+ simple_linear.onnx
+
+if test_example
+TESTS = \
+ odp_ml_run_mnist.sh \
+ odp_ml_run_model_explorer.sh \
+ odp_ml_run_simple_linear.sh
+endif
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/example/ml/README.md b/platform/linux-dpdk/example/ml/README.md
new file mode 120000
index 000000000..ddeec649f
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/README.md
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/README.md \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/example_digit.csv b/platform/linux-dpdk/example/ml/example_digit.csv
new file mode 120000
index 000000000..5e5514aaf
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/example_digit.csv
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/example_digit.csv \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/mnist-12.onnx b/platform/linux-dpdk/example/ml/mnist-12.onnx
new file mode 120000
index 000000000..94d4515b8
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/mnist-12.onnx
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/mnist-12.onnx \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/odp_ml_run_mnist.sh b/platform/linux-dpdk/example/ml/odp_ml_run_mnist.sh
new file mode 120000
index 000000000..7d9c6f84c
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/odp_ml_run_mnist.sh
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/odp_ml_run_mnist.sh \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/odp_ml_run_model_explorer.sh b/platform/linux-dpdk/example/ml/odp_ml_run_model_explorer.sh
new file mode 120000
index 000000000..f28535b64
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/odp_ml_run_model_explorer.sh
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/odp_ml_run_model_explorer.sh \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/odp_ml_run_simple_linear.sh b/platform/linux-dpdk/example/ml/odp_ml_run_simple_linear.sh
new file mode 120000
index 000000000..2691d9282
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/odp_ml_run_simple_linear.sh
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/odp_ml_run_simple_linear.sh \ No newline at end of file
diff --git a/platform/linux-dpdk/example/ml/simple_linear.onnx b/platform/linux-dpdk/example/ml/simple_linear.onnx
new file mode 120000
index 000000000..5893a9176
--- /dev/null
+++ b/platform/linux-dpdk/example/ml/simple_linear.onnx
@@ -0,0 +1 @@
+../../../linux-generic/example/ml/simple_linear.onnx \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/align.h b/platform/linux-dpdk/include-abi/odp/api/abi/align.h
new file mode 120000
index 000000000..d23c3a333
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/align.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/align.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/atomic.h b/platform/linux-dpdk/include-abi/odp/api/abi/atomic.h
new file mode 120000
index 000000000..694a8b066
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/atomic.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/atomic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/barrier.h b/platform/linux-dpdk/include-abi/odp/api/abi/barrier.h
new file mode 120000
index 000000000..7fd29520a
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/barrier.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/barrier.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/buffer.h b/platform/linux-dpdk/include-abi/odp/api/abi/buffer.h
new file mode 120000
index 000000000..06a66d30d
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/buffer.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/buffer.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/buffer_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/buffer_types.h
new file mode 120000
index 000000000..49e88bd2e
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/buffer_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/buffer_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/byteorder.h b/platform/linux-dpdk/include-abi/odp/api/abi/byteorder.h
new file mode 120000
index 000000000..f5ff5808b
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/byteorder.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/byteorder.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/classification.h b/platform/linux-dpdk/include-abi/odp/api/abi/classification.h
new file mode 120000
index 000000000..8610c56af
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/classification.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/classification.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/comp.h b/platform/linux-dpdk/include-abi/odp/api/abi/comp.h
new file mode 120000
index 000000000..e4588bc93
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/comp.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/comp.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/cpumask.h b/platform/linux-dpdk/include-abi/odp/api/abi/cpumask.h
new file mode 120000
index 000000000..23b4f40e7
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/cpumask.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/cpumask.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/crypto.h b/platform/linux-dpdk/include-abi/odp/api/abi/crypto.h
new file mode 120000
index 000000000..3fb47a4c7
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/crypto.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/crypto.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/crypto_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/crypto_types.h
new file mode 120000
index 000000000..ce973bb2d
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/crypto_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/crypto_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/debug.h b/platform/linux-dpdk/include-abi/odp/api/abi/debug.h
new file mode 120000
index 000000000..7f59ed930
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/debug.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/debug.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/dma.h b/platform/linux-dpdk/include-abi/odp/api/abi/dma.h
new file mode 120000
index 000000000..112a7b5a7
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/dma.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/dma.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..318c2c385
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/dma_types.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2023 Nokia
+ */
+
+#ifndef ODP_API_ABI_DMA_TYPES_H_
+#define ODP_API_ABI_DMA_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_dma
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_dma_t);
+
+#define ODP_DMA_INVALID _odp_cast_scalar(odp_dma_t, 0)
+
+typedef ODP_HANDLE_T(odp_dma_transfer_id_t);
+
+#define ODP_DMA_TRANSFER_ID_INVALID _odp_cast_scalar(odp_dma_transfer_id_t, 0)
+
+typedef ODP_HANDLE_T(odp_dma_compl_t);
+
+#define ODP_DMA_COMPL_INVALID _odp_cast_scalar(odp_dma_compl_t, 0)
+
+#define ODP_DMA_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/errno.h b/platform/linux-dpdk/include-abi/odp/api/abi/errno.h
new file mode 120000
index 000000000..0bc8b623d
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/errno.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/errno.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/event.h b/platform/linux-dpdk/include-abi/odp/api/abi/event.h
new file mode 120000
index 000000000..91dd1ebce
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/event.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/event.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/event_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/event_types.h
new file mode 120000
index 000000000..0e5c90929
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/event_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/event_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/hash.h b/platform/linux-dpdk/include-abi/odp/api/abi/hash.h
new file mode 120000
index 000000000..cdcd8260a
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/hash.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/hash.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/init.h b/platform/linux-dpdk/include-abi/odp/api/abi/init.h
new file mode 120000
index 000000000..b147ed64c
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/init.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/init.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/ipsec.h b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec.h
new file mode 120000
index 000000000..d4775452f
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/ipsec.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h
new file mode 120000
index 000000000..b6ca88309
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/ipsec_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/ipsec_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/ml_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/ml_types.h
new file mode 120000
index 000000000..18b483da1
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/ml_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/ml_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
new file mode 100644
index 000000000..913181b7a
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_H_
+#define ODP_API_ABI_PACKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_flags.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_flags.h
new file mode 120000
index 000000000..82d4e0000
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_flags.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/packet_flags.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_io.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_io.h
new file mode 120000
index 000000000..79a8fc3f2
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_io.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/packet_io.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_io_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_io_types.h
new file mode 120000
index 000000000..068a6b64e
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_io_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/packet_io_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..9ca66db54
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/packet_types.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_TYPES_H_
+#define ODP_API_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_packet
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_packet_t);
+
+#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef ODP_HANDLE_T(odp_packet_seg_t);
+
+#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_buf_t);
+
+#define ODP_PACKET_BUF_INVALID _odp_cast_scalar(odp_packet_buf_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
+
+#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
+
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+typedef struct odp_packet_parse_result_flag_t {
+ union {
+ uint64_t all;
+
+ struct {
+ uint64_t has_error : 1;
+ uint64_t has_l2_error : 1;
+ uint64_t has_l3_error : 1;
+ uint64_t has_l4_error : 1;
+ uint64_t has_l2 : 1;
+ uint64_t has_l3 : 1;
+ uint64_t has_l4 : 1;
+ uint64_t has_eth : 1;
+ uint64_t has_eth_bcast : 1;
+ uint64_t has_eth_mcast : 1;
+ uint64_t has_jumbo : 1;
+ uint64_t has_vlan : 1;
+ uint64_t has_vlan_qinq : 1;
+ uint64_t has_arp : 1;
+ uint64_t has_ipv4 : 1;
+ uint64_t has_ipv6 : 1;
+ uint64_t has_ip_bcast : 1;
+ uint64_t has_ip_mcast : 1;
+ uint64_t has_ipfrag : 1;
+ uint64_t has_ipopt : 1;
+ uint64_t has_ipsec : 1;
+ uint64_t has_udp : 1;
+ uint64_t has_tcp : 1;
+ uint64_t has_sctp : 1;
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/pool.h b/platform/linux-dpdk/include-abi/odp/api/abi/pool.h
new file mode 120000
index 000000000..87cbcd21a
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/pool.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/pool.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/pool_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/pool_types.h
new file mode 120000
index 000000000..7f60f8231
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/pool_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/pool_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h
new file mode 120000
index 000000000..812d2a7a8
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/proto_stats.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h
new file mode 120000
index 000000000..5988e9a2e
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/proto_stats_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/proto_stats_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/queue.h b/platform/linux-dpdk/include-abi/odp/api/abi/queue.h
new file mode 120000
index 000000000..5d40402fa
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/queue.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/queue.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h
new file mode 120000
index 000000000..caf4bceb7
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/queue_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/queue_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/random.h b/platform/linux-dpdk/include-abi/odp/api/abi/random.h
new file mode 120000
index 000000000..3c7940e55
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/random.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/rwlock.h b/platform/linux-dpdk/include-abi/odp/api/abi/rwlock.h
new file mode 120000
index 000000000..4d0fbd045
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/rwlock.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/rwlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/rwlock_recursive.h b/platform/linux-dpdk/include-abi/odp/api/abi/rwlock_recursive.h
new file mode 120000
index 000000000..e9a7f2705
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/rwlock_recursive.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/rwlock_recursive.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/schedule.h b/platform/linux-dpdk/include-abi/odp/api/abi/schedule.h
new file mode 120000
index 000000000..8dc7ccedd
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/schedule.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/schedule.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/schedule_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/schedule_types.h
new file mode 120000
index 000000000..88811bebf
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/schedule_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/schedule_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/shared_memory.h b/platform/linux-dpdk/include-abi/odp/api/abi/shared_memory.h
new file mode 120000
index 000000000..17d60f745
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/shared_memory.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/shared_memory.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/spinlock.h b/platform/linux-dpdk/include-abi/odp/api/abi/spinlock.h
new file mode 120000
index 000000000..cafab6630
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/spinlock.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/spinlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/spinlock_recursive.h b/platform/linux-dpdk/include-abi/odp/api/abi/spinlock_recursive.h
new file mode 120000
index 000000000..7857beedd
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/spinlock_recursive.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/spinlock_recursive.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/stash.h b/platform/linux-dpdk/include-abi/odp/api/abi/stash.h
new file mode 120000
index 000000000..44618016e
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/stash.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/stash.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/stash_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/stash_types.h
new file mode 120000
index 000000000..f9fa4bfec
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/stash_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/stash_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/std.h b/platform/linux-dpdk/include-abi/odp/api/abi/std.h
new file mode 120000
index 000000000..8cc2509ea
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/std.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/std.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/std_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/std_types.h
new file mode 120000
index 000000000..dbc3f316f
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/std_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/std_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/sync.h b/platform/linux-dpdk/include-abi/odp/api/abi/sync.h
new file mode 120000
index 000000000..def6e53f3
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/sync.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/sync.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/thread.h b/platform/linux-dpdk/include-abi/odp/api/abi/thread.h
new file mode 120000
index 000000000..db34e297a
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/thread.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/thread.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h
new file mode 120000
index 000000000..b665090d0
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/thread_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/thrmask.h b/platform/linux-dpdk/include-abi/odp/api/abi/thrmask.h
new file mode 120000
index 000000000..4c99d9d97
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/thrmask.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/thrmask.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/ticketlock.h b/platform/linux-dpdk/include-abi/odp/api/abi/ticketlock.h
new file mode 120000
index 000000000..c4eb36297
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/ticketlock.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/ticketlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/time.h b/platform/linux-dpdk/include-abi/odp/api/abi/time.h
new file mode 120000
index 000000000..31f85b0ef
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/time.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/time.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/time_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/time_types.h
new file mode 120000
index 000000000..2f280c709
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/time_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/time_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/timer.h b/platform/linux-dpdk/include-abi/odp/api/abi/timer.h
new file mode 120000
index 000000000..e65d4faf5
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/timer.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/timer.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h
new file mode 120000
index 000000000..4b815a27b
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/timer_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/timer_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/traffic_mngr.h b/platform/linux-dpdk/include-abi/odp/api/abi/traffic_mngr.h
new file mode 120000
index 000000000..94b0862a8
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/traffic_mngr.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/traffic_mngr.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/version.h b/platform/linux-dpdk/include-abi/odp/api/abi/version.h
new file mode 120000
index 000000000..4a999ae5b
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/version.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/version.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/align.h b/platform/linux-dpdk/include/odp/api/align.h
deleted file mode 120000
index 10c13402c..000000000
--- a/platform/linux-dpdk/include/odp/api/align.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/align.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/atomic.h b/platform/linux-dpdk/include/odp/api/atomic.h
deleted file mode 120000
index e6d28fdf5..000000000
--- a/platform/linux-dpdk/include/odp/api/atomic.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/atomic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/barrier.h b/platform/linux-dpdk/include/odp/api/barrier.h
deleted file mode 120000
index 72a22f10f..000000000
--- a/platform/linux-dpdk/include/odp/api/barrier.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/barrier.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/buffer.h b/platform/linux-dpdk/include/odp/api/buffer.h
deleted file mode 120000
index 6587bdcb2..000000000
--- a/platform/linux-dpdk/include/odp/api/buffer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/buffer.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/byteorder.h b/platform/linux-dpdk/include/odp/api/byteorder.h
deleted file mode 120000
index cef6d89b6..000000000
--- a/platform/linux-dpdk/include/odp/api/byteorder.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/byteorder.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/classification.h b/platform/linux-dpdk/include/odp/api/classification.h
deleted file mode 120000
index 43cf93626..000000000
--- a/platform/linux-dpdk/include/odp/api/classification.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/classification.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/compiler.h b/platform/linux-dpdk/include/odp/api/compiler.h
deleted file mode 120000
index 6b99f796c..000000000
--- a/platform/linux-dpdk/include/odp/api/compiler.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/compiler.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/cpu.h b/platform/linux-dpdk/include/odp/api/cpu.h
deleted file mode 120000
index eb6a69f83..000000000
--- a/platform/linux-dpdk/include/odp/api/cpu.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/cpu.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/cpumask.h b/platform/linux-dpdk/include/odp/api/cpumask.h
deleted file mode 120000
index 49ca0f2c3..000000000
--- a/platform/linux-dpdk/include/odp/api/cpumask.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/cpumask.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/crypto.h b/platform/linux-dpdk/include/odp/api/crypto.h
deleted file mode 120000
index d69e8a0e2..000000000
--- a/platform/linux-dpdk/include/odp/api/crypto.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/crypto.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/debug.h b/platform/linux-dpdk/include/odp/api/debug.h
deleted file mode 120000
index 2814f8f54..000000000
--- a/platform/linux-dpdk/include/odp/api/debug.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/debug.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/errno.h b/platform/linux-dpdk/include/odp/api/errno.h
deleted file mode 120000
index f274d6a73..000000000
--- a/platform/linux-dpdk/include/odp/api/errno.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/errno.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/event.h b/platform/linux-dpdk/include/odp/api/event.h
deleted file mode 120000
index 674d3539a..000000000
--- a/platform/linux-dpdk/include/odp/api/event.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/event.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/hash.h b/platform/linux-dpdk/include/odp/api/hash.h
deleted file mode 120000
index 80cbce721..000000000
--- a/platform/linux-dpdk/include/odp/api/hash.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/hash.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/hints.h b/platform/linux-dpdk/include/odp/api/hints.h
deleted file mode 120000
index 60a8912b5..000000000
--- a/platform/linux-dpdk/include/odp/api/hints.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/hints.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/init.h b/platform/linux-dpdk/include/odp/api/init.h
deleted file mode 120000
index 0f4bd6c09..000000000
--- a/platform/linux-dpdk/include/odp/api/init.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/init.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/packet.h b/platform/linux-dpdk/include/odp/api/packet.h
deleted file mode 100644
index 9df56021f..000000000
--- a/platform/linux-dpdk/include/odp/api/packet.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP packet descriptor
- */
-
-#ifndef ODP_PLAT_PACKET_H_
-#define ODP_PLAT_PACKET_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/plat/packet_io_types.h>
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/plat/buffer_types.h>
-#include <odp/api/plat/pool_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/packet_inlines.h>
-#endif
-
-/**
- * @}
- */
-
-#include <odp/api/spec/packet.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ODP_PLAT_PACKET_H_ */
diff --git a/platform/linux-dpdk/include/odp/api/packet_flags.h b/platform/linux-dpdk/include/odp/api/packet_flags.h
deleted file mode 100644
index e3725a60f..000000000
--- a/platform/linux-dpdk/include/odp/api/packet_flags.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP packet flags
- */
-
-#ifndef ODP_PLAT_PACKET_FLAGS_H_
-#define ODP_PLAT_PACKET_FLAGS_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/packet_flag_inlines.h>
-#endif
-
-#include <odp/api/spec/packet_flags.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ODP_PLAT_PACKET_FLAGS_H_ */
diff --git a/platform/linux-dpdk/include/odp/api/packet_io.h b/platform/linux-dpdk/include/odp/api/packet_io.h
deleted file mode 120000
index 1e0d9e415..000000000
--- a/platform/linux-dpdk/include/odp/api/packet_io.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/packet_io.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/packet_io_stats.h b/platform/linux-dpdk/include/odp/api/packet_io_stats.h
deleted file mode 120000
index 58fdde2de..000000000
--- a/platform/linux-dpdk/include/odp/api/packet_io_stats.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/packet_io_stats.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/atomic_types.h b/platform/linux-dpdk/include/odp/api/plat/atomic_types.h
deleted file mode 120000
index 2febdca91..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/atomic_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/atomic_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/barrier_types.h b/platform/linux-dpdk/include/odp/api/plat/barrier_types.h
deleted file mode 120000
index 8869ae8cc..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/barrier_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/barrier_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/buffer_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/buffer_inline_types.h
new file mode 120000
index 000000000..4ef8ae6f2
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/buffer_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/buffer_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h
new file mode 100644
index 000000000..1740c36b3
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/buffer_inlines.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2019-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_BUFFER_INLINES_H_
+#define ODP_PLAT_BUFFER_INLINES_H_
+
+#include <odp/api/buffer_types.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/pool_types.h>
+
+#include <odp/api/plat/buffer_inline_types.h>
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_inline_types.h>
+#include <odp/api/plat/event_validation_external.h>
+
+#include <rte_mbuf.h>
+#include <rte_mempool.h>
+#if defined(__PPC64__) && defined(bool)
+ #undef bool
+ #define bool _Bool
+#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_buffer_from_event __odp_buffer_from_event
+ #define odp_buffer_from_event_multi __odp_buffer_from_event_multi
+ #define odp_buffer_to_event __odp_buffer_to_event
+ #define odp_buffer_to_event_multi __odp_buffer_to_event_multi
+ #define odp_buffer_addr __odp_buffer_addr
+ #define odp_buffer_size __odp_buffer_size
+ #define odp_buffer_pool __odp_buffer_pool
+ #define odp_buffer_user_area __odp_buffer_user_area
+ #define odp_buffer_free __odp_buffer_free
+ #define odp_buffer_free_multi __odp_buffer_free_multi
+ #define odp_buffer_is_valid __odp_buffer_is_valid
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_buffer_t odp_buffer_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
+
+ return (odp_buffer_t)ev;
+}
+
+_ODP_INLINE void odp_buffer_from_event_multi(odp_buffer_t buf[], const odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++)
+ buf[i] = odp_buffer_from_event(ev[i]);
+}
+
+_ODP_INLINE odp_event_t odp_buffer_to_event(odp_buffer_t buf)
+{
+ return (odp_event_t)buf;
+}
+
+_ODP_INLINE void odp_buffer_to_event_multi(const odp_buffer_t buf[], odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++)
+ ev[i] = odp_buffer_to_event(buf[i]);
+}
+
+_ODP_INLINE void *odp_buffer_addr(odp_buffer_t buf)
+{
+ return _odp_event_hdr_field(buf, void *, base_data);
+}
+
+_ODP_INLINE uint32_t odp_buffer_size(odp_buffer_t buf)
+{
+ return _odp_event_hdr_field(buf, uint16_t, buf_len);
+}
+
+_ODP_INLINE odp_pool_t odp_buffer_pool(odp_buffer_t buf)
+{
+ return (odp_pool_t)(uintptr_t)_odp_event_hdr_field(buf, void *, pool);
+}
+
+_ODP_INLINE void *odp_buffer_user_area(odp_buffer_t buf)
+{
+ return _odp_buffer_get(buf, void *, uarea_addr);
+}
+
+_ODP_INLINE void odp_buffer_free(odp_buffer_t buf)
+{
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf;
+
+ _odp_buffer_validate(buf, _ODP_EV_BUFFER_FREE);
+
+ rte_mempool_put(mbuf->pool, mbuf);
+}
+
+_ODP_INLINE void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
+{
+ struct rte_mbuf *mbuf_tbl[num];
+ struct rte_mempool *mp_pending;
+ unsigned int num_pending;
+
+ if (odp_unlikely(num <= 0))
+ return;
+
+ _odp_buffer_validate_multi(buf, num, _ODP_EV_BUFFER_FREE_MULTI);
+
+ mbuf_tbl[0] = (struct rte_mbuf *)buf[0];
+ mp_pending = mbuf_tbl[0]->pool;
+ num_pending = 1;
+
+/*
+ * num_pending is less than or equal to num, but GCC 13 is not able figure that out, so we have to
+ * ignore array-bounds warnings in the rte_mempool_put_bulk() calls.
+ */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+ for (int i = 1; i < num; i++) {
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)buf[i];
+
+ if (mbuf->pool != mp_pending) {
+ rte_mempool_put_bulk(mp_pending, (void **)mbuf_tbl, num_pending);
+ mbuf_tbl[0] = mbuf;
+ num_pending = 1;
+ mp_pending = mbuf->pool;
+ } else {
+ mbuf_tbl[num_pending++] = mbuf;
+ }
+ }
+ rte_mempool_put_bulk(mp_pending, (void **)mbuf_tbl, num_pending);
+#pragma GCC diagnostic pop
+}
+
+_ODP_INLINE int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ if (odp_event_is_valid(odp_buffer_to_event(buf)) == 0)
+ return 0;
+
+ if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
+ return 0;
+
+ if (odp_unlikely(_odp_buffer_validate(buf, _ODP_EV_BUFFER_IS_VALID)))
+ return 0;
+
+ return 1;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/buffer_types.h b/platform/linux-dpdk/include/odp/api/plat/buffer_types.h
deleted file mode 100644
index 809768f3d..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/buffer_types.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP buffer descriptor
- */
-
-#ifndef ODP_BUFFER_TYPES_H_
-#define ODP_BUFFER_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/buffer.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_buffer
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_buffer_t);
-
-#define ODP_BUFFER_INVALID _odp_cast_scalar(odp_buffer_t, 0xffffffff)
-
-typedef ODP_HANDLE_T(odp_buffer_seg_t);
-
-#define ODP_SEGMENT_INVALID ((odp_buffer_seg_t)ODP_BUFFER_INVALID)
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/byteorder_inlines.h b/platform/linux-dpdk/include/odp/api/plat/byteorder_inlines.h
index 5f3751602..3fdb5179c 120000..100644
--- a/platform/linux-dpdk/include/odp/api/plat/byteorder_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/byteorder_inlines.h
@@ -1 +1,116 @@
-../../../../../linux-generic/include/odp/api/plat/byteorder_inlines.h \ No newline at end of file
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP byteorder
+ */
+
+#ifndef ODP_PLAT_BYTEORDER_INLINES_H_
+#define ODP_PLAT_BYTEORDER_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/byteorder.h>
+
+#include <rte_config.h>
+#include <rte_byteorder.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef __odp_force
+#define __odp_force
+#endif
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_be_to_cpu_16 __odp_be_to_cpu_16
+ #define odp_be_to_cpu_32 __odp_be_to_cpu_32
+ #define odp_be_to_cpu_64 __odp_be_to_cpu_64
+ #define odp_cpu_to_be_16 __odp_cpu_to_be_16
+ #define odp_cpu_to_be_32 __odp_cpu_to_be_32
+ #define odp_cpu_to_be_64 __odp_cpu_to_be_64
+ #define odp_le_to_cpu_16 __odp_le_to_cpu_16
+ #define odp_le_to_cpu_32 __odp_le_to_cpu_32
+ #define odp_le_to_cpu_64 __odp_le_to_cpu_64
+ #define odp_cpu_to_le_16 __odp_cpu_to_le_16
+ #define odp_cpu_to_le_32 __odp_cpu_to_le_32
+ #define odp_cpu_to_le_64 __odp_cpu_to_le_64
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE uint16_t odp_be_to_cpu_16(odp_u16be_t be16)
+{
+ return rte_be_to_cpu_16((__odp_force uint16_t)be16);
+}
+
+_ODP_INLINE uint32_t odp_be_to_cpu_32(odp_u32be_t be32)
+{
+ return rte_be_to_cpu_32((__odp_force uint32_t)be32);
+}
+
+_ODP_INLINE uint64_t odp_be_to_cpu_64(odp_u64be_t be64)
+{
+ return rte_be_to_cpu_64((__odp_force uint64_t)be64);
+}
+
+_ODP_INLINE odp_u16be_t odp_cpu_to_be_16(uint16_t cpu16)
+{
+ return (__odp_force odp_u16be_t)rte_cpu_to_be_16(cpu16);
+}
+
+_ODP_INLINE odp_u32be_t odp_cpu_to_be_32(uint32_t cpu32)
+{
+ return (__odp_force odp_u32be_t)rte_cpu_to_be_32(cpu32);
+}
+
+_ODP_INLINE odp_u64be_t odp_cpu_to_be_64(uint64_t cpu64)
+{
+ return (__odp_force odp_u64be_t)rte_cpu_to_be_64(cpu64);
+}
+
+_ODP_INLINE uint16_t odp_le_to_cpu_16(odp_u16le_t le16)
+{
+ return rte_le_to_cpu_16((__odp_force uint16_t)le16);
+}
+
+_ODP_INLINE uint32_t odp_le_to_cpu_32(odp_u32le_t le32)
+{
+ return rte_le_to_cpu_32((__odp_force uint32_t)le32);
+}
+
+_ODP_INLINE uint64_t odp_le_to_cpu_64(odp_u64le_t le64)
+{
+ return rte_le_to_cpu_64((__odp_force uint64_t)le64);
+}
+
+_ODP_INLINE odp_u16le_t odp_cpu_to_le_16(uint16_t cpu16)
+{
+ return (__odp_force odp_u16le_t)rte_cpu_to_le_16(cpu16);
+}
+
+_ODP_INLINE odp_u32le_t odp_cpu_to_le_32(uint32_t cpu32)
+{
+ return (__odp_force odp_u32le_t)rte_cpu_to_le_32(cpu32);
+}
+
+_ODP_INLINE odp_u64le_t odp_cpu_to_le_64(uint64_t cpu64)
+{
+ return (__odp_force odp_u64le_t)rte_cpu_to_le_64(cpu64);
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/byteorder_types.h b/platform/linux-dpdk/include/odp/api/plat/byteorder_types.h
deleted file mode 120000
index 13d0058a7..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/byteorder_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/byteorder_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/classification_types.h b/platform/linux-dpdk/include/odp/api/plat/classification_types.h
deleted file mode 120000
index 1d1183de3..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/classification_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/classification_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/cpu_inlines.h b/platform/linux-dpdk/include/odp/api/plat/cpu_inlines.h
new file mode 120000
index 000000000..c6f02807a
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/cpu_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/cpu_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/cpumask_types.h b/platform/linux-dpdk/include/odp/api/plat/cpumask_types.h
deleted file mode 120000
index cdf1f996f..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/cpumask_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/cpumask_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/crypto_inlines.h b/platform/linux-dpdk/include/odp/api/plat/crypto_inlines.h
new file mode 120000
index 000000000..0c1935696
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/crypto_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/crypto_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/crypto_types.h b/platform/linux-dpdk/include/odp/api/plat/crypto_types.h
deleted file mode 120000
index 36d39fdc5..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/crypto_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/crypto_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/debug_inlines.h b/platform/linux-dpdk/include/odp/api/plat/debug_inlines.h
new file mode 120000
index 000000000..05e4e91d3
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/debug_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/debug_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/dma_inlines.h b/platform/linux-dpdk/include/odp/api/plat/dma_inlines.h
new file mode 120000
index 000000000..5b60f374e
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/dma_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/dma_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h
new file mode 100644
index 000000000..94a95a889
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/event_inline_types.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_EVENT_INLINE_TYPES_H_
+#define ODP_PLAT_EVENT_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Event header field accessors */
+#define _odp_event_hdr_field(event_hdr, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)event_hdr + \
+ _odp_event_inline_offset.field))
+#define _odp_event_hdr_ptr(event_hdr, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)event_hdr + \
+ _odp_event_inline_offset.field))
+
+/* Event header field offsets for inline functions */
+typedef struct _odp_event_inline_offset_t {
+ uint16_t event_type;
+ uint16_t base_data;
+ uint16_t subtype;
+ uint16_t flow_id;
+ uint16_t pool;
+ uint16_t buf_len;
+
+} _odp_event_inline_offset_t;
+
+extern const _odp_event_inline_offset_t _odp_event_inline_offset;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_inlines.h b/platform/linux-dpdk/include/odp/api/plat/event_inlines.h
new file mode 120000
index 000000000..f4f1fd63f
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/event_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/event_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_types.h b/platform/linux-dpdk/include/odp/api/plat/event_types.h
deleted file mode 100644
index 086e83d51..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/event_types.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP event
- */
-
-#ifndef ODP_EVENT_TYPES_H_
-#define ODP_EVENT_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/event.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_event
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_event_t);
-
-#define ODP_EVENT_INVALID _odp_cast_scalar(odp_event_t, 0xffffffff)
-
-/**
- * Event types
- */
-typedef enum odp_event_type_t {
- ODP_EVENT_BUFFER = 1,
- ODP_EVENT_PACKET = 2,
- ODP_EVENT_TIMEOUT = 3,
- ODP_EVENT_CRYPTO_COMPL = 4,
-} odp_event_type_t;
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_validation_external.h b/platform/linux-dpdk/include/odp/api/plat/event_validation_external.h
new file mode 120000
index 000000000..001662d8b
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/event_validation_external.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/event_validation_external.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h
new file mode 120000
index 000000000..30b894e27
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/event_vector_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/event_vector_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/hash_inlines.h b/platform/linux-dpdk/include/odp/api/plat/hash_inlines.h
new file mode 120000
index 000000000..fcb963c41
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/hash_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/hash_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/init_types.h b/platform/linux-dpdk/include/odp/api/plat/init_types.h
deleted file mode 120000
index 6b19ee415..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/init_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/init_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h b/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h
new file mode 120000
index 000000000..72c865d7d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/ipsec_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/ipsec_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
index fef6ff97b..b6876e6d7 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, Linaro Limited
+/* Copyright (c) 2017-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,20 +13,57 @@
#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_H_
#define _ODP_PLAT_PACKET_FLAG_INLINES_H_
-#include <odp/api/plat/packet_types.h>
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/packet_types.h>
+#include <odp/api/plat/packet_inline_types.h>
-/** @internal Inline function offsets */
-extern const _odp_packet_inline_offset_t _odp_packet_inline;
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
-/** @internal Inline function @param pkt @return */
static inline uint64_t _odp_packet_input_flags(odp_packet_t pkt)
{
- return *(uint64_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.input_flags);
+ return _odp_pkt_get(pkt, uint64_t, input_flags);
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_l2(odp_packet_t pkt)
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_has_l2 __odp_packet_has_l2
+ #define odp_packet_has_l3 __odp_packet_has_l3
+ #define odp_packet_has_l4 __odp_packet_has_l4
+ #define odp_packet_has_eth __odp_packet_has_eth
+ #define odp_packet_has_jumbo __odp_packet_has_jumbo
+ #define odp_packet_has_flow_hash __odp_packet_has_flow_hash
+ #define odp_packet_has_flow_hash_clr __odp_packet_has_flow_hash_clr
+ #define odp_packet_has_ts __odp_packet_has_ts
+ #define odp_packet_has_ipsec __odp_packet_has_ipsec
+ #define odp_packet_has_eth_bcast __odp_packet_has_eth_bcast
+ #define odp_packet_has_eth_mcast __odp_packet_has_eth_mcast
+ #define odp_packet_has_vlan __odp_packet_has_vlan
+ #define odp_packet_has_vlan_qinq __odp_packet_has_vlan_qinq
+ #define odp_packet_has_arp __odp_packet_has_arp
+ #define odp_packet_has_ipv4 __odp_packet_has_ipv4
+ #define odp_packet_has_ipv6 __odp_packet_has_ipv6
+ #define odp_packet_has_ip_bcast __odp_packet_has_ip_bcast
+ #define odp_packet_has_ip_mcast __odp_packet_has_ip_mcast
+ #define odp_packet_has_ipfrag __odp_packet_has_ipfrag
+ #define odp_packet_has_ipopt __odp_packet_has_ipopt
+ #define odp_packet_has_udp __odp_packet_has_udp
+ #define odp_packet_has_tcp __odp_packet_has_tcp
+ #define odp_packet_has_sctp __odp_packet_has_sctp
+ #define odp_packet_has_icmp __odp_packet_has_icmp
+ #define odp_packet_has_error __odp_packet_has_error
+ #define odp_packet_has_l2_error __odp_packet_has_l2_error
+ #define odp_packet_has_l3_error __odp_packet_has_l3_error
+ #define odp_packet_has_l4_error __odp_packet_has_l4_error
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE int odp_packet_has_l2(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -34,8 +71,23 @@ static inline int _odp_packet_has_l2(odp_packet_t pkt)
return flags.l2;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_eth(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_l3(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.l3;
+}
+
+_ODP_INLINE int odp_packet_has_l4(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.l4;
+}
+
+_ODP_INLINE int odp_packet_has_eth(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -43,8 +95,7 @@ static inline int _odp_packet_has_eth(odp_packet_t pkt)
return flags.eth;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_jumbo(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_jumbo(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -52,22 +103,19 @@ static inline int _odp_packet_has_jumbo(odp_packet_t pkt)
return flags.jumbo;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_flow_hash(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_flow_hash(odp_packet_t pkt)
{
- return *(uint64_t *)((char *)pkt + _odp_packet_inline.ol_flags) &
- _odp_packet_inline.rss_flag;
+ return !!(_odp_pkt_get(pkt, uint64_t, ol_flags) & _odp_packet_inline.rss_flag);
}
-/** @internal Inline function @param pkt */
-static inline void _odp_packet_has_flow_hash_clr(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_has_flow_hash_clr(odp_packet_t pkt)
{
- *(uint64_t *)((char *)pkt + _odp_packet_inline.ol_flags) &=
- ~_odp_packet_inline.rss_flag;
+ uint64_t *ol_flags = &_odp_pkt_get(pkt, uint64_t, ol_flags);
+
+ *ol_flags &= ~_odp_packet_inline.rss_flag;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_ts(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_ts(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -75,20 +123,175 @@ static inline int _odp_packet_has_ts(odp_packet_t pkt)
return flags.timestamp;
}
-/* Include inlined versions of API functions */
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
+_ODP_INLINE int odp_packet_has_ipsec(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
-/** @ingroup odp_packet
- * @{
- */
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipsec;
+}
-#include <odp/api/plat/packet_flag_inlines_api.h>
+_ODP_INLINE int odp_packet_has_eth_bcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
-/**
- * @}
- */
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.eth_bcast;
+}
+
+_ODP_INLINE int odp_packet_has_eth_mcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.eth_mcast;
+}
+
+_ODP_INLINE int odp_packet_has_vlan(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.vlan;
+}
+
+_ODP_INLINE int odp_packet_has_vlan_qinq(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.vlan_qinq;
+}
+
+_ODP_INLINE int odp_packet_has_arp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.arp;
+}
+
+_ODP_INLINE int odp_packet_has_ipv4(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipv4;
+}
+
+_ODP_INLINE int odp_packet_has_ipv6(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipv6;
+}
+
+_ODP_INLINE int odp_packet_has_ip_bcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ip_bcast;
+}
+
+_ODP_INLINE int odp_packet_has_ip_mcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ip_mcast;
+}
+
+_ODP_INLINE int odp_packet_has_ipfrag(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipfrag;
+}
+
+_ODP_INLINE int odp_packet_has_ipopt(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipopt;
+}
+
+_ODP_INLINE int odp_packet_has_udp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.udp;
+}
+
+_ODP_INLINE int odp_packet_has_tcp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.tcp;
+}
+
+_ODP_INLINE int odp_packet_has_sctp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.sctp;
+}
+
+_ODP_INLINE int odp_packet_has_icmp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.icmp;
+}
+
+_ODP_INLINE int odp_packet_has_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ return flags.all.error != 0;
+}
+
+_ODP_INLINE int odp_packet_has_l2_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ /* L2 parsing is always done by default and hence
+ no additional check is required. */
+ return flags.snap_len_err;
+}
+
+_ODP_INLINE int odp_packet_has_l3_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.ip_err;
+}
+
+_ODP_INLINE int odp_packet_has_l4_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.tcp_err | flags.udp_err;
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
#endif
#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines_api.h b/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines_api.h
deleted file mode 100644
index 24062bf26..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/packet_flag_inlines_api.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Packet flag inline functions
- */
-
-#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_API_H_
-#define _ODP_PLAT_PACKET_FLAG_INLINES_API_H_
-
-_ODP_INLINE int odp_packet_has_l2(odp_packet_t pkt)
-{
- return _odp_packet_has_l2(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_eth(odp_packet_t pkt)
-{
- return _odp_packet_has_eth(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_jumbo(odp_packet_t pkt)
-{
- return _odp_packet_has_jumbo(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_flow_hash(odp_packet_t pkt)
-{
- return _odp_packet_has_flow_hash(pkt);
-}
-
-_ODP_INLINE void odp_packet_has_flow_hash_clr(odp_packet_t pkt)
-{
- _odp_packet_has_flow_hash_clr(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_ts(odp_packet_t pkt)
-{
- return _odp_packet_has_ts(pkt);
-}
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
new file mode 100644
index 000000000..255db9d78
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
@@ -0,0 +1,174 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_PACKET_INLINE_TYPES_H_
+#define ODP_PACKET_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Packet field accessor */
+#define _odp_pkt_get(pkt, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
+#define _odp_pkt_get_ptr(pkt, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
+/* Packet header field offsets for inline functions */
+typedef struct _odp_packet_inline_offset_t {
+ uint16_t mb;
+ uint16_t pool;
+ uint16_t input;
+ uint16_t user_ptr;
+ uint16_t l2_offset;
+ uint16_t l3_offset;
+ uint16_t l4_offset;
+ uint16_t timestamp;
+ uint16_t input_flags;
+ uint16_t flags;
+ uint16_t cls_mark;
+ uint16_t ipsec_ctx;
+ uint16_t crypto_op;
+ uint16_t buf_addr;
+ uint16_t data;
+ uint16_t pkt_len;
+ uint16_t seg_len;
+ uint16_t nb_segs;
+ uint16_t user_area;
+ uint16_t rss;
+ uint16_t ol_flags;
+ uint64_t rss_flag;
+
+} _odp_packet_inline_offset_t;
+
+extern const _odp_packet_inline_offset_t _odp_packet_inline;
+
+/* Packet input & protocol flags */
+typedef union {
+ /* All input flags */
+ uint64_t all;
+
+ /* Individual input flags */
+ struct {
+ uint64_t dst_queue:1; /* Dst queue present */
+ uint64_t cls_mark: 1; /* Classifier mark value present*/
+
+ uint64_t timestamp:1; /* Timestamp present */
+
+ uint64_t l2:1; /* known L2 protocol present */
+ uint64_t l3:1; /* known L3 protocol present */
+ uint64_t l4:1; /* known L4 protocol present */
+
+ uint64_t eth:1; /* Ethernet */
+ uint64_t eth_bcast:1; /* Ethernet broadcast */
+ uint64_t eth_mcast:1; /* Ethernet multicast */
+ uint64_t jumbo:1; /* Jumbo frame */
+ uint64_t vlan:1; /* VLAN hdr found */
+ uint64_t vlan_qinq:1; /* Stacked VLAN found, QinQ */
+
+ uint64_t snap:1; /* SNAP */
+ uint64_t arp:1; /* ARP */
+
+ uint64_t ipv4:1; /* IPv4 */
+ uint64_t ipv6:1; /* IPv6 */
+ uint64_t ip_bcast:1; /* IP broadcast */
+ uint64_t ip_mcast:1; /* IP multicast */
+ uint64_t ipfrag:1; /* IP fragment */
+ uint64_t ipopt:1; /* IP optional headers */
+
+ uint64_t ipsec:1; /* IPSec packet. Required by the
+ odp_packet_has_ipsec_set() func. */
+ uint64_t ipsec_ah:1; /* IPSec authentication header */
+ uint64_t ipsec_esp:1; /* IPSec encapsulating security
+ payload */
+ uint64_t udp:1; /* UDP */
+ uint64_t tcp:1; /* TCP */
+ uint64_t sctp:1; /* SCTP */
+ uint64_t icmp:1; /* ICMP */
+ uint64_t no_next_hdr:1; /* No Next Header */
+
+ uint64_t color:2; /* Packet color for traffic mgmt */
+ uint64_t nodrop:1; /* Drop eligibility status */
+
+ uint64_t l3_chksum_done:1; /* L3 checksum validation done */
+ uint64_t l4_chksum_done:1; /* L4 checksum validation done */
+ uint64_t ipsec_udp:1; /* UDP-encapsulated IPsec packet */
+ uint64_t udp_chksum_zero:1; /* UDP header had 0 as chksum */
+ };
+} _odp_packet_input_flags_t;
+
+/*
+ * Additional packet flags
+ */
+typedef union {
+ /* All flags */
+ uint32_t all_flags;
+
+ struct {
+ uint32_t reserved1: 4;
+
+ /*
+ * Init flags
+ */
+ uint32_t user_ptr_set: 1; /* User has set a non-NULL value */
+ uint32_t user_flag: 1;
+
+ /*
+ * Packet output flags
+ */
+ uint32_t lso: 1; /* LSO requested */
+ uint32_t payload_off: 1; /* Payload offset is valid */
+ uint32_t l3_chksum_set: 1; /* L3 chksum bit is valid */
+ uint32_t l3_chksum: 1; /* L3 chksum override */
+ uint32_t l4_chksum_set: 1; /* L4 chksum bit is valid */
+ uint32_t l4_chksum: 1; /* L4 chksum override */
+ uint32_t ts_set: 1; /* Set Tx timestamp */
+ uint32_t tx_compl_ev: 1; /* Tx completion event requested */
+ uint32_t tx_compl_poll: 1; /* Tx completion poll requested */
+ uint32_t free_ctrl: 1; /* Don't free option */
+ uint32_t tx_aging: 1; /* Packet aging at Tx requested */
+ uint32_t shaper_len_adj: 8; /* Adjustment for traffic mgr */
+
+ /*
+ * Error flags
+ */
+ uint32_t snap_len_err: 1; /* Snap length error */
+ uint32_t ip_err: 1; /* IP error */
+ uint32_t l3_chksum_err: 1; /* L3 checksum error */
+ uint32_t tcp_err: 1; /* TCP error */
+ uint32_t udp_err: 1; /* UDP error */
+ uint32_t sctp_err: 1; /* SCTP error */
+ uint32_t l4_chksum_err: 1; /* L4 checksum error */
+ };
+
+ /* Flag groups */
+ struct {
+ uint32_t reserved2: 4;
+ uint32_t other: 21; /* All other flags */
+ uint32_t error: 7; /* All error flags */
+ } all;
+
+} _odp_packet_flags_t;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
index 93102974b..b41a272ef 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,151 +18,508 @@
extern "C" {
#endif
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/pool.h>
-#include <odp/api/packet_io.h>
+#include <odp/api/event.h>
#include <odp/api/hints.h>
-
+#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
+#include <odp/api/time_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_inline_types.h>
+#include <odp/api/plat/event_validation_external.h>
+#include <odp/api/plat/packet_io_inlines.h>
+#include <odp/api/plat/packet_inline_types.h>
+#include <odp/api/plat/pool_inline_types.h>
+
+#include <stdint.h>
+#include <string.h>
+
+/* Required by rte_mbuf.h */
+#include <sys/types.h>
+#include <rte_config.h>
#include <rte_mbuf.h>
-/** @internal Inline function offsets */
-extern const _odp_packet_inline_offset_t _odp_packet_inline;
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
+#include <rte_memcpy.h>
+#if defined(__PPC64__) && defined(bool)
+ #undef bool
+ #define bool _Bool
+#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_offset __odp_packet_offset
+ #define odp_packet_data __odp_packet_data
+ #define odp_packet_seg_len __odp_packet_seg_len
+ #define odp_packet_data_seg_len __odp_packet_data_seg_len
+ #define odp_packet_len __odp_packet_len
+ #define odp_packet_buf_len __odp_packet_buf_len
+ #define odp_packet_headroom __odp_packet_headroom
+ #define odp_packet_tailroom __odp_packet_tailroom
+ #define odp_packet_pool __odp_packet_pool
+ #define odp_packet_input __odp_packet_input
+ #define odp_packet_input_set __odp_packet_input_set
+ #define odp_packet_input_index __odp_packet_input_index
+ #define odp_packet_num_segs __odp_packet_num_segs
+ #define odp_packet_user_ptr __odp_packet_user_ptr
+ #define odp_packet_user_ptr_set __odp_packet_user_ptr_set
+ #define odp_packet_user_area __odp_packet_user_area
+ #define odp_packet_user_area_size __odp_packet_user_area_size
+ #define odp_packet_user_flag __odp_packet_user_flag
+ #define odp_packet_user_flag_set __odp_packet_user_flag_set
+ #define odp_packet_l2_offset __odp_packet_l2_offset
+ #define odp_packet_l3_offset __odp_packet_l3_offset
+ #define odp_packet_l4_offset __odp_packet_l4_offset
+ #define odp_packet_l2_offset_set __odp_packet_l2_offset_set
+ #define odp_packet_l3_offset_set __odp_packet_l3_offset_set
+ #define odp_packet_l4_offset_set __odp_packet_l4_offset_set
+ #define odp_packet_l2_ptr __odp_packet_l2_ptr
+ #define odp_packet_l3_ptr __odp_packet_l3_ptr
+ #define odp_packet_l4_ptr __odp_packet_l4_ptr
+ #define odp_packet_l2_type __odp_packet_l2_type
+ #define odp_packet_l3_type __odp_packet_l3_type
+ #define odp_packet_l4_type __odp_packet_l4_type
+ #define odp_packet_l3_chksum_status __odp_packet_l3_chksum_status
+ #define odp_packet_l4_chksum_status __odp_packet_l4_chksum_status
+ #define odp_packet_l3_chksum_insert __odp_packet_l3_chksum_insert
+ #define odp_packet_l4_chksum_insert __odp_packet_l4_chksum_insert
+ #define odp_packet_flow_hash __odp_packet_flow_hash
+ #define odp_packet_flow_hash_set __odp_packet_flow_hash_set
+ #define odp_packet_ts __odp_packet_ts
+ #define odp_packet_ts_set __odp_packet_ts_set
+ #define odp_packet_ts_request __odp_packet_ts_request
+ #define odp_packet_head __odp_packet_head
+ #define odp_packet_is_segmented __odp_packet_is_segmented
+ #define odp_packet_first_seg __odp_packet_first_seg
+ #define odp_packet_last_seg __odp_packet_last_seg
+ #define odp_packet_next_seg __odp_packet_next_seg
+ #define odp_packet_prefetch __odp_packet_prefetch
+ #define odp_packet_copy_from_mem __odp_packet_copy_from_mem
+ #define odp_packet_copy_to_mem __odp_packet_copy_to_mem
+ #define odp_packet_from_event __odp_packet_from_event
+ #define odp_packet_to_event __odp_packet_to_event
+ #define odp_packet_from_event_multi __odp_packet_from_event_multi
+ #define odp_packet_to_event_multi __odp_packet_to_event_multi
+ #define odp_packet_subtype __odp_packet_subtype
+ #define odp_packet_tx_compl_from_event __odp_packet_tx_compl_from_event
+ #define odp_packet_tx_compl_to_event __odp_packet_tx_compl_to_event
+ #define odp_packet_free __odp_packet_free
+ #define odp_packet_free_multi __odp_packet_free_multi
+ #define odp_packet_free_sp __odp_packet_free_sp
+ #define odp_packet_seg_data __odp_packet_seg_data
+ #define odp_packet_seg_data_len __odp_packet_seg_data_len
+ #define odp_packet_ref_static __odp_packet_ref_static
+ #define odp_packet_has_ref __odp_packet_has_ref
+ #define odp_packet_color __odp_packet_color
+ #define odp_packet_drop_eligible __odp_packet_drop_eligible
+ #define odp_packet_shaper_len_adjust __odp_packet_shaper_len_adjust
+ #define odp_packet_cls_mark __odp_packet_cls_mark
+ #define odp_packet_buf_data_len __odp_packet_buf_data_len
+ #define odp_packet_buf_size __odp_packet_buf_size
+ #define odp_packet_buf_head __odp_packet_buf_head
+ #define odp_packet_buf_data_offset __odp_packet_buf_data_offset
+ #define odp_packet_buf_data_set __odp_packet_buf_data_set
+ #define odp_packet_buf_from_head __odp_packet_buf_from_head
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void *odp_packet_offset(odp_packet_t pkt, uint32_t offset,
+ uint32_t *len, odp_packet_seg_t *seg)
+{
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt;
+
+ if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ goto err;
+
+ do {
+ if (mb->data_len > offset)
+ break;
+ offset -= mb->data_len;
+ mb = mb->next;
+ } while (mb);
+
+ if (mb) {
+ if (len)
+ *len = mb->data_len - offset;
+ if (seg)
+ *seg = (odp_packet_seg_t)(uintptr_t)mb;
+ return (void *)(rte_pktmbuf_mtod(mb, char *) + offset);
+ }
+err:
+ if (len)
+ *len = 0;
+ if (seg)
+ *seg = NULL;
+ return NULL;
+}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_data(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_data(odp_packet_t pkt)
{
- char **buf_addr = (char **)(void *)((char *)pkt +
- _odp_packet_inline.buf_addr);
- uint16_t data_off = *(uint16_t *)(void *)((char *)pkt +
- _odp_packet_inline.data);
+ uint8_t *buf_addr = (uint8_t *)_odp_pkt_get(pkt, void *, buf_addr);
+ uint16_t data_off = _odp_pkt_get(pkt, uint16_t, data);
- return (void *)(*buf_addr + data_off);
+ return (void *)(buf_addr + data_off);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_seg_len(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_seg_len(odp_packet_t pkt)
{
- return *(uint16_t *)(void *)((char *)pkt + _odp_packet_inline.seg_len);
+ return _odp_pkt_get(pkt, uint16_t, seg_len);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_len(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_len(odp_packet_t pkt)
{
- return *(uint32_t *)(void *)((char *)pkt + _odp_packet_inline.pkt_len);
+ return _odp_pkt_get(pkt, uint32_t, pkt_len);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_headroom(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_buf_len(odp_packet_t pkt)
{
- struct rte_mbuf *mb = (struct rte_mbuf *)((uint8_t *)pkt +
- _odp_packet_inline.mb);
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt;
- return rte_pktmbuf_headroom(mb);
+ return (uint32_t)(mb->nb_segs * mb->buf_len);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_tailroom(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_data_seg_len(odp_packet_t pkt,
+ uint32_t *seg_len)
{
- struct rte_mbuf *mb = (struct rte_mbuf *)((uint8_t *)pkt +
- _odp_packet_inline.mb);
+ *seg_len = odp_packet_seg_len(pkt);
+ return odp_packet_data(pkt);
+}
+
+_ODP_INLINE uint32_t odp_packet_headroom(odp_packet_t pkt)
+{
+ return rte_pktmbuf_headroom((struct rte_mbuf *)pkt);
+}
+
+_ODP_INLINE uint32_t odp_packet_tailroom(odp_packet_t pkt)
+{
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt;
return rte_pktmbuf_tailroom(rte_pktmbuf_lastseg(mb));
}
-/** @internal Inline function @param pkt @return */
-static inline odp_pool_t _odp_packet_pool(odp_packet_t pkt)
+_ODP_INLINE odp_pool_t odp_packet_pool(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, odp_pool_t, pool);
+}
+
+_ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, odp_pktio_t, input);
+}
+
+_ODP_INLINE void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_pktio_t *pktio_ptr = _odp_pkt_get_ptr(pkt, odp_pktio_t, input);
+
+ *pktio_ptr = pktio;
+}
+
+_ODP_INLINE int odp_packet_input_index(odp_packet_t pkt)
+{
+ odp_pktio_t pktio = odp_packet_input(pkt);
+
+ return odp_pktio_index(pktio);
+}
+
+_ODP_INLINE int odp_packet_num_segs(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint16_t, nb_segs);
+}
+
+_ODP_INLINE void *odp_packet_user_ptr(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ if (flags.user_ptr_set == 0)
+ return NULL;
+
+ return _odp_pkt_get(pkt, void *, user_ptr);
+}
+
+_ODP_INLINE void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+ const void **user_ptr = _odp_pkt_get_ptr(pkt, const void *, user_ptr);
+
+ if (odp_unlikely(ptr == NULL)) {
+ flags->user_ptr_set = 0;
+ return;
+ }
+
+ *user_ptr = ptr;
+ flags->user_ptr_set = 1;
+}
+
+_ODP_INLINE void *odp_packet_user_area(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, void *, user_area);
+}
+
+_ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
+{
+ void *pool = _odp_pkt_get(pkt, void *, pool);
+
+ return _odp_pool_get(pool, uint32_t, uarea_size);
+}
+
+_ODP_INLINE int odp_packet_user_flag(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.user_flag;
+}
+
+_ODP_INLINE void odp_packet_user_flag_set(odp_packet_t pkt, int val)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->user_flag = !!val;
+}
+
+_ODP_INLINE uint32_t odp_packet_l2_offset(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint16_t, l2_offset);
+}
+
+_ODP_INLINE uint32_t odp_packet_l3_offset(odp_packet_t pkt)
{
- return *(odp_pool_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.pool);
+ return _odp_pkt_get(pkt, uint16_t, l3_offset);
}
-/** @internal Inline function @param pkt @return */
-static inline odp_pktio_t _odp_packet_input(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_l4_offset(odp_packet_t pkt)
{
- return *(odp_pktio_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.input);
+ return _odp_pkt_get(pkt, uint16_t, l4_offset);
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_num_segs(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
{
- return *(uint8_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.nb_segs);
+ uint16_t *l2_offset = _odp_pkt_get_ptr(pkt, uint16_t, l2_offset);
+ _odp_packet_input_flags_t *input_flags = _odp_pkt_get_ptr(pkt, _odp_packet_input_flags_t,
+ input_flags);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ input_flags->l2 = 1;
+ *l2_offset = (uint16_t)offset;
+ return 0;
+}
+
+_ODP_INLINE int odp_packet_l3_offset_set(odp_packet_t pkt, uint32_t offset)
+{
+ uint16_t *l3_offset = _odp_pkt_get_ptr(pkt, uint16_t, l3_offset);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ *l3_offset = (uint16_t)offset;
+ return 0;
+}
+
+_ODP_INLINE int odp_packet_l4_offset_set(odp_packet_t pkt, uint32_t offset)
+{
+ uint16_t *l4_offset = _odp_pkt_get_ptr(pkt, uint16_t, l4_offset);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ *l4_offset = (uint16_t)offset;
+ return 0;
+}
+
+_ODP_INLINE void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
+{
+ return odp_packet_offset(pkt, _odp_pkt_get(pkt, uint16_t, l2_offset),
+ len, NULL);
+}
+
+_ODP_INLINE void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
+{
+ return odp_packet_offset(pkt, _odp_pkt_get(pkt, uint16_t, l3_offset),
+ len, NULL);
+}
+
+_ODP_INLINE void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
+{
+ return odp_packet_offset(pkt, _odp_pkt_get(pkt, uint16_t, l4_offset),
+ len, NULL);
+}
+
+_ODP_INLINE odp_proto_l2_type_t odp_packet_l2_type(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return input_flags.eth ? ODP_PROTO_L2_TYPE_ETH : ODP_PROTO_L2_TYPE_NONE;
+}
+
+_ODP_INLINE odp_proto_l3_type_t odp_packet_l3_type(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (input_flags.ipv4)
+ return ODP_PROTO_L3_TYPE_IPV4;
+ else if (input_flags.ipv6)
+ return ODP_PROTO_L3_TYPE_IPV6;
+ else if (input_flags.arp)
+ return ODP_PROTO_L3_TYPE_ARP;
+
+ return ODP_PROTO_L3_TYPE_NONE;
+}
+
+_ODP_INLINE odp_proto_l4_type_t odp_packet_l4_type(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (input_flags.tcp)
+ return ODP_PROTO_L4_TYPE_TCP;
+ else if (input_flags.udp)
+ return ODP_PROTO_L4_TYPE_UDP;
+ else if (input_flags.sctp)
+ return ODP_PROTO_L4_TYPE_SCTP;
+ else if (input_flags.ipsec_ah)
+ return ODP_PROTO_L4_TYPE_AH;
+ else if (input_flags.ipsec_esp)
+ return ODP_PROTO_L4_TYPE_ESP;
+ else if (input_flags.icmp && input_flags.ipv4)
+ return ODP_PROTO_L4_TYPE_ICMPV4;
+ else if (input_flags.icmp && input_flags.ipv6)
+ return ODP_PROTO_L4_TYPE_ICMPV6;
+ else if (input_flags.no_next_hdr)
+ return ODP_PROTO_L4_TYPE_NO_NEXT;
+
+ return ODP_PROTO_L4_TYPE_NONE;
+}
+
+_ODP_INLINE odp_packet_chksum_status_t odp_packet_l3_chksum_status(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+ _odp_packet_input_flags_t input_flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (!input_flags.l3_chksum_done)
+ return ODP_PACKET_CHKSUM_UNKNOWN;
+
+ if (flags.l3_chksum_err)
+ return ODP_PACKET_CHKSUM_BAD;
+
+ return ODP_PACKET_CHKSUM_OK;
+}
+
+_ODP_INLINE odp_packet_chksum_status_t odp_packet_l4_chksum_status(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+ _odp_packet_input_flags_t input_flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (!input_flags.l4_chksum_done)
+ return ODP_PACKET_CHKSUM_UNKNOWN;
+
+ if (flags.l4_chksum_err)
+ return ODP_PACKET_CHKSUM_BAD;
+
+ return ODP_PACKET_CHKSUM_OK;
+}
+
+_ODP_INLINE void odp_packet_l3_chksum_insert(odp_packet_t pkt, int insert)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->l3_chksum_set = 1;
+ flags->l3_chksum = !!insert;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_user_ptr(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_l4_chksum_insert(odp_packet_t pkt, int insert)
{
- return *(void **)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.user_ptr);
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->l4_chksum_set = 1;
+ flags->l4_chksum = !!insert;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_user_area(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_flow_hash(odp_packet_t pkt)
{
- return (void *)((char *)pkt + _odp_packet_inline.udata);
+ return _odp_pkt_get(pkt, uint32_t, rss);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_user_area_size(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_flow_hash_set(odp_packet_t pkt,
+ uint32_t flow_hash)
{
- return *(uint32_t *)(void *)((char *)pkt +
- _odp_packet_inline.udata_len);
+ uint32_t *rss = &_odp_pkt_get(pkt, uint32_t, rss);
+ uint64_t *ol_flags = &_odp_pkt_get(pkt, uint64_t, ol_flags);
+
+ *rss = flow_hash;
+ *ol_flags |= _odp_packet_inline.rss_flag;
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_flow_hash(odp_packet_t pkt)
+_ODP_INLINE odp_time_t odp_packet_ts(odp_packet_t pkt)
{
- return *(uint32_t *)(void *)((char *)pkt + _odp_packet_inline.rss);
+ return _odp_pkt_get(pkt, odp_time_t, timestamp);
}
-/** @internal Inline function @param pkt @param flow_hash */
-static inline void _odp_packet_flow_hash_set(odp_packet_t pkt, uint32_t flow_hash)
+_ODP_INLINE void odp_packet_ts_set(odp_packet_t pkt, odp_time_t timestamp)
{
- *(uint32_t *)(void *)((char *)pkt + _odp_packet_inline.rss)
- = flow_hash;
- *(uint64_t *)(void *)((char *)pkt + _odp_packet_inline.ol_flags)
- |= _odp_packet_inline.rss_flag;
+ odp_time_t *ts = _odp_pkt_get_ptr(pkt, odp_time_t, timestamp);
+ _odp_packet_input_flags_t *input_flags = _odp_pkt_get_ptr(pkt, _odp_packet_input_flags_t,
+ input_flags);
+
+ *ts = timestamp;
+ input_flags->timestamp = 1;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_time_t _odp_packet_ts(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_ts_request(odp_packet_t pkt, int enable)
{
- return *(odp_time_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.timestamp);
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->ts_set = !!enable;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_head(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_head(odp_packet_t pkt)
{
- return (uint8_t *)_odp_packet_data(pkt) - _odp_packet_headroom(pkt);
+ return (uint8_t *)odp_packet_data(pkt) - odp_packet_headroom(pkt);
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_is_segmented(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_is_segmented(odp_packet_t pkt)
{
- return !rte_pktmbuf_is_contiguous((struct rte_mbuf *)((uint8_t *)pkt +
- _odp_packet_inline.mb));
+ return !rte_pktmbuf_is_contiguous((struct rte_mbuf *)pkt);
}
-/** @internal Inline function @param pkt @return */
-static inline odp_packet_seg_t _odp_packet_first_seg(odp_packet_t pkt)
+_ODP_INLINE odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
{
return (odp_packet_seg_t)(uintptr_t)pkt;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_packet_seg_t _odp_packet_last_seg(odp_packet_t pkt)
+_ODP_INLINE odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt)
{
- struct rte_mbuf *mb = (struct rte_mbuf *)((uint8_t *)pkt +
- _odp_packet_inline.mb);
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt;
return (odp_packet_seg_t)(uintptr_t)rte_pktmbuf_lastseg(mb);
}
-/** @internal Inline function @param pkt @param seg @return */
-static inline odp_packet_seg_t _odp_packet_next_seg(odp_packet_t pkt,
- odp_packet_seg_t seg)
+_ODP_INLINE odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt,
+ odp_packet_seg_t seg)
{
struct rte_mbuf *mb = (struct rte_mbuf *)(uintptr_t)seg;
(void)pkt;
@@ -172,27 +530,277 @@ static inline odp_packet_seg_t _odp_packet_next_seg(odp_packet_t pkt,
return (odp_packet_seg_t)(uintptr_t)mb->next;
}
-/** @internal Inline function @param pkt @param offset @param len */
-static inline void _odp_packet_prefetch(odp_packet_t pkt, uint32_t offset, uint32_t len)
+_ODP_INLINE void odp_packet_prefetch(odp_packet_t pkt, uint32_t offset,
+ uint32_t len)
{
- const char *addr = (char *)_odp_packet_data(pkt) + offset;
+ const char *addr = (char *)odp_packet_data(pkt) + offset;
size_t ofs;
for (ofs = 0; ofs < len; ofs += RTE_CACHE_LINE_SIZE)
rte_prefetch0(addr + ofs);
}
-/* Include inlined versions of API functions */
-#include <odp/api/plat/static_inline.h>
+static inline int _odp_packet_copy_to_mem_seg(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, void *dst)
+{
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cpylen;
+ uint8_t *dstaddr = (uint8_t *)dst;
+
+ if (offset + len > odp_packet_len(pkt))
+ return -1;
+
+ while (len > 0) {
+ mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ cpylen = len > seglen ? seglen : len;
+ rte_memcpy(dstaddr, mapaddr, cpylen);
+ offset += cpylen;
+ dstaddr += cpylen;
+ len -= cpylen;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, void *dst)
+{
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset + len > seg_len))
+ return _odp_packet_copy_to_mem_seg(pkt, offset, len, dst);
+
+ rte_memcpy(dst, data + offset, len);
+
+ return 0;
+}
+
+static inline int _odp_packet_copy_from_mem_seg(odp_packet_t pkt,
+ uint32_t offset, uint32_t len,
+ const void *src)
+{
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cpylen;
+ const uint8_t *srcaddr = (const uint8_t *)src;
+
+ if (offset + len > odp_packet_len(pkt))
+ return -1;
+
+ while (len > 0) {
+ mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ cpylen = len > seglen ? seglen : len;
+ rte_memcpy(mapaddr, srcaddr, cpylen);
+ offset += cpylen;
+ srcaddr += cpylen;
+ len -= cpylen;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, const void *src)
+{
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset + len > seg_len))
+ return _odp_packet_copy_from_mem_seg(pkt, offset, len, src);
+
+ rte_memcpy(data + offset, src, len);
+
+ return 0;
+}
+
+_ODP_INLINE odp_packet_t odp_packet_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+
+ return (odp_packet_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_to_event(odp_packet_t pkt)
+{
+ return (odp_event_t)pkt;
+}
+
+_ODP_INLINE void odp_packet_from_event_multi(odp_packet_t pkt[],
+ const odp_event_t ev[],
+ int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ pkt[i] = odp_packet_from_event(ev[i]);
+}
+
+_ODP_INLINE void odp_packet_to_event_multi(const odp_packet_t pkt[],
+ odp_event_t ev[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ ev[i] = odp_packet_to_event(pkt[i]);
+}
+
+_ODP_INLINE odp_event_subtype_t odp_packet_subtype(odp_packet_t pkt)
+{
+ return (odp_event_subtype_t)_odp_event_hdr_field((odp_event_t)(uintptr_t)pkt,
+ int8_t, subtype);
+}
+
+_ODP_INLINE odp_packet_tx_compl_t odp_packet_tx_compl_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET_TX_COMPL);
+
+ return (odp_packet_tx_compl_t)(uintptr_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_tx_compl_to_event(odp_packet_tx_compl_t tx_compl)
+{
+ return (odp_event_t)(uintptr_t)tx_compl;
+}
+
+_ODP_INLINE void odp_packet_free(odp_packet_t pkt)
+{
+ _odp_packet_validate(pkt, _ODP_EV_PACKET_FREE);
+
+ rte_pktmbuf_free((struct rte_mbuf *)pkt);
+}
+
+_ODP_INLINE void odp_packet_free_multi(const odp_packet_t pkt[], int num)
+{
+ _odp_packet_validate_multi(pkt, num, _ODP_EV_PACKET_FREE_MULTI);
+
+ rte_pktmbuf_free_bulk((struct rte_mbuf **)(uintptr_t)pkt, (unsigned int)num);
+}
+
+_ODP_INLINE void odp_packet_free_sp(const odp_packet_t pkt[], int num)
+{
+ _odp_packet_validate_multi(pkt, num, _ODP_EV_PACKET_FREE_SP);
+
+ rte_pktmbuf_free_bulk((struct rte_mbuf **)(uintptr_t)pkt, (unsigned int)num);
+}
+
+_ODP_INLINE void *odp_packet_seg_data(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_seg_t seg)
+{
+ return odp_packet_data((odp_packet_t)seg);
+}
+
+_ODP_INLINE uint32_t odp_packet_seg_data_len(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_seg_t seg)
+{
+ return odp_packet_seg_len((odp_packet_t)seg);
+}
-#if ODP_ABI_COMPAT == 0
+_ODP_INLINE odp_packet_t odp_packet_ref_static(odp_packet_t pkt)
+{
+ rte_pktmbuf_refcnt_update((struct rte_mbuf *)(pkt), 1);
+
+ return pkt;
+}
+
+_ODP_INLINE int odp_packet_has_ref(odp_packet_t pkt)
+{
+ return (rte_mbuf_refcnt_read((struct rte_mbuf *)(pkt)) > 1);
+}
+
+_ODP_INLINE odp_packet_color_t odp_packet_color(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
-#include <odp/api/plat/packet_inlines_api.h>
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return (odp_packet_color_t)input_flags.color;
+}
+
+_ODP_INLINE odp_bool_t odp_packet_drop_eligible(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return !input_flags.nodrop;
+}
+
+_ODP_INLINE int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return (int8_t)flags.shaper_len_adj;
+}
+
+_ODP_INLINE uint64_t odp_packet_cls_mark(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
-#endif /* ODP_ABI_COMPAT */
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return input_flags.cls_mark ? _odp_pkt_get(pkt, uint16_t, cls_mark) : 0;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ return odp_packet_seg_data_len(ODP_PACKET_INVALID, (odp_packet_seg_t)pkt_buf);
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get((odp_packet_buf_t)pkt_buf, odp_pool_t, pool);
+
+ return _odp_pool_get(pool, uint32_t, seg_len);
+}
+
+_ODP_INLINE void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return NULL;
+
+ return (uint8_t *)(uintptr_t)pkt_buf + head_offset;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ void *data = odp_packet_seg_data(ODP_PACKET_INVALID, (odp_packet_seg_t)pkt_buf);
+ void *head = odp_packet_buf_head(pkt_buf);
+
+ return (uint32_t)((uintptr_t)data - (uintptr_t)head);
+}
+
+_ODP_INLINE void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
+ uint32_t data_len)
+{
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt_buf;
+
+ mb->data_off = (uint16_t)data_offset;
+ mb->data_len = (uint16_t)data_len;
+}
+
+_ODP_INLINE odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool, void *head)
+{
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return ODP_PACKET_BUF_INVALID;
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
#ifdef __cplusplus
}
#endif
+/** @endcond */
+
#endif /* ODP_PLAT_PACKET_INLINES_H_ */
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines_api.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines_api.h
deleted file mode 100644
index 16a445a13..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines_api.h
+++ /dev/null
@@ -1,118 +0,0 @@
-/* Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Packet inline functions
- */
-
-#ifndef _ODP_PLAT_PACKET_INLINES_API_H_
-#define _ODP_PLAT_PACKET_INLINES_API_H_
-
-_ODP_INLINE void *odp_packet_data(odp_packet_t pkt)
-{
- return _odp_packet_data(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_seg_len(odp_packet_t pkt)
-{
- return _odp_packet_seg_len(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_len(odp_packet_t pkt)
-{
- return _odp_packet_len(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_headroom(odp_packet_t pkt)
-{
- return _odp_packet_headroom(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_tailroom(odp_packet_t pkt)
-{
- return _odp_packet_tailroom(pkt);
-}
-
-_ODP_INLINE odp_pool_t odp_packet_pool(odp_packet_t pkt)
-{
- return _odp_packet_pool(pkt);
-}
-
-_ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
-{
- return _odp_packet_input(pkt);
-}
-
-_ODP_INLINE int odp_packet_num_segs(odp_packet_t pkt)
-{
- return _odp_packet_num_segs(pkt);
-}
-
-_ODP_INLINE void *odp_packet_user_ptr(odp_packet_t pkt)
-{
- return _odp_packet_user_ptr(pkt);
-}
-
-_ODP_INLINE void *odp_packet_user_area(odp_packet_t pkt)
-{
- return _odp_packet_user_area(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
-{
- return _odp_packet_user_area_size(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_flow_hash(odp_packet_t pkt)
-{
- return _odp_packet_flow_hash(pkt);
-}
-
-_ODP_INLINE void odp_packet_flow_hash_set(odp_packet_t pkt, uint32_t flow_hash)
-{
- return _odp_packet_flow_hash_set(pkt, flow_hash);
-}
-
-_ODP_INLINE odp_time_t odp_packet_ts(odp_packet_t pkt)
-{
- return _odp_packet_ts(pkt);
-}
-
-_ODP_INLINE void *odp_packet_head(odp_packet_t pkt)
-{
- return _odp_packet_head(pkt);
-}
-
-_ODP_INLINE int odp_packet_is_segmented(odp_packet_t pkt)
-{
- return _odp_packet_is_segmented(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
-{
- return _odp_packet_first_seg(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt)
-{
- return _odp_packet_last_seg(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt,
- odp_packet_seg_t seg)
-{
- return _odp_packet_next_seg(pkt, seg);
-}
-
-_ODP_INLINE void odp_packet_prefetch(odp_packet_t pkt, uint32_t offset,
- uint32_t len)
-{
- return _odp_packet_prefetch(pkt, offset, len);
-}
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_io_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_io_inlines.h
new file mode 120000
index 000000000..9d1d2e8f3
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_io_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/packet_io_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_io_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_io_types.h
deleted file mode 120000
index 9ff4d49da..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/packet_io_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/packet_io_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_types.h
deleted file mode 100644
index c6ee3966f..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/packet_types.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP packet descriptor
- */
-
-#ifndef ODP_PACKET_TYPES_H_
-#define ODP_PACKET_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/packet.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_packet_t);
-
-#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, NULL)
-
-#define ODP_PACKET_OFFSET_INVALID (0x0fffffff)
-
-typedef ODP_HANDLE_T(odp_packet_seg_t);
-
-#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, NULL)
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-#define ODP_NUM_PACKET_COLORS 3
-
-/**
- * @}
- */
-
-#endif
-
-/** @internal Packet header field offsets for inline functions */
-typedef struct _odp_packet_inline_offset_t {
- /** @internal field offset */
- size_t mb;
- /** @internal field offset */
- size_t pool;
- /** @internal field offset */
- size_t input;
- /** @internal field offset */
- size_t user_ptr;
- /** @internal field offset */
- size_t timestamp;
- /** @internal field offset */
- size_t input_flags;
- /** @internal field offset */
- unsigned int buf_addr;
- /** @internal field offset */
- unsigned int data;
- /** @internal field offset */
- unsigned int pkt_len;
- /** @internal field offset */
- unsigned int seg_len;
- /** @internal field offset */
- unsigned int nb_segs;
- /** @internal field offset */
- unsigned int udata_len;
- /** @internal field offset */
- unsigned int udata;
- /** @internal field offset */
- unsigned int rss;
- /** @internal field offset */
- unsigned int ol_flags;
- /** @internal field offset */
- uint64_t rss_flag;
-
-} _odp_packet_inline_offset_t;
-
-/** @internal Packet input & protocol flags */
-typedef union {
- /** All input flags */
- uint64_t all;
-
- struct {
- uint64_t parsed_l2:1; /**< L2 parsed */
- uint64_t dst_queue:1; /**< Dst queue present */
-
- uint64_t timestamp:1; /**< Timestamp present */
-
- uint64_t l2:1; /**< known L2 protocol present */
- uint64_t l3:1; /**< known L3 protocol present */
- uint64_t l4:1; /**< known L4 protocol present */
-
- uint64_t eth:1; /**< Ethernet */
- uint64_t eth_bcast:1; /**< Ethernet broadcast */
- uint64_t eth_mcast:1; /**< Ethernet multicast */
- uint64_t jumbo:1; /**< Jumbo frame */
- uint64_t vlan:1; /**< VLAN hdr found */
- uint64_t vlan_qinq:1; /**< Stacked VLAN found, QinQ */
-
- uint64_t snap:1; /**< SNAP */
- uint64_t arp:1; /**< ARP */
-
- uint64_t ipv4:1; /**< IPv4 */
- uint64_t ipv6:1; /**< IPv6 */
- uint64_t ip_bcast:1; /**< IP broadcast */
- uint64_t ip_mcast:1; /**< IP multicast */
- uint64_t ipfrag:1; /**< IP fragment */
- uint64_t ipopt:1; /**< IP optional headers */
-
- uint64_t ipsec:1; /**< IPSec packet. Required by the
- odp_packet_has_ipsec_set() func. */
- uint64_t ipsec_ah:1; /**< IPSec authentication header */
- uint64_t ipsec_esp:1; /**< IPSec encapsulating security
- payload */
- uint64_t udp:1; /**< UDP */
- uint64_t tcp:1; /**< TCP */
- uint64_t tcpopt:1; /**< TCP options present */
- uint64_t sctp:1; /**< SCTP */
- uint64_t icmp:1; /**< ICMP */
-
- uint64_t color:2; /**< Packet color for traffic mgmt */
- uint64_t nodrop:1; /**< Drop eligibility status */
- };
-} _odp_packet_input_flags_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h
new file mode 120000
index 000000000..30dd89e0d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_vector_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/packet_vector_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/pool_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/pool_inline_types.h
new file mode 120000
index 000000000..ef34e573c
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/pool_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/pool_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/pool_inlines.h b/platform/linux-dpdk/include/odp/api/plat/pool_inlines.h
new file mode 120000
index 000000000..e6c601a0d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/pool_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/pool_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/pool_types.h b/platform/linux-dpdk/include/odp/api/plat/pool_types.h
deleted file mode 120000
index 1ea2b43b4..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/pool_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/pool_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/queue_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/queue_inline_types.h
new file mode 120000
index 000000000..545f81352
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/queue_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/queue_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/queue_inlines.h b/platform/linux-dpdk/include/odp/api/plat/queue_inlines.h
new file mode 120000
index 000000000..13317a03d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/queue_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/queue_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/queue_types.h b/platform/linux-dpdk/include/odp/api/plat/queue_types.h
deleted file mode 120000
index a47d16b0d..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/queue_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/queue_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/rwlock_inlines.h b/platform/linux-dpdk/include/odp/api/plat/rwlock_inlines.h
new file mode 120000
index 000000000..9b8d33f6a
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/rwlock_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/rwlock_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_inlines.h b/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_inlines.h
new file mode 120000
index 000000000..1b37e53d2
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/rwlock_recursive_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_types.h b/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_types.h
deleted file mode 120000
index 516efcdd8..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/rwlock_recursive_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/rwlock_recursive_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/rwlock_types.h b/platform/linux-dpdk/include/odp/api/plat/rwlock_types.h
deleted file mode 120000
index 286e6ea88..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/rwlock_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/rwlock_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/schedule_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/schedule_inline_types.h
new file mode 120000
index 000000000..a9b051e5b
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/schedule_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/schedule_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/schedule_inlines.h b/platform/linux-dpdk/include/odp/api/plat/schedule_inlines.h
new file mode 120000
index 000000000..a15dbee6a
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/schedule_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/schedule_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/schedule_types.h b/platform/linux-dpdk/include/odp/api/plat/schedule_types.h
deleted file mode 120000
index 8d6af9b3f..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/schedule_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/schedule_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/shared_memory_types.h b/platform/linux-dpdk/include/odp/api/plat/shared_memory_types.h
deleted file mode 120000
index 936573e48..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/shared_memory_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/shared_memory_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/spinlock_inlines.h b/platform/linux-dpdk/include/odp/api/plat/spinlock_inlines.h
new file mode 120000
index 000000000..bbec045a5
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/spinlock_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/spinlock_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_inlines.h b/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_inlines.h
new file mode 120000
index 000000000..580627ade
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/spinlock_recursive_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_types.h b/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_types.h
deleted file mode 120000
index 6c7dc4683..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/spinlock_recursive_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/spinlock_recursive_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/spinlock_types.h b/platform/linux-dpdk/include/odp/api/plat/spinlock_types.h
deleted file mode 120000
index 30ef7c85a..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/spinlock_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/spinlock_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/static_inline.h.in b/platform/linux-dpdk/include/odp/api/plat/static_inline.h.in
deleted file mode 100644
index 3cf004347..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/static_inline.h.in
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Macro for static inline functions
- */
-
-#ifndef ODP_PLAT_STATIC_INLINE_H_
-#define ODP_PLAT_STATIC_INLINE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @internal
- * @def ODP_ABI_COMPAT
- * Control ABI compatibility
- */
-
-/**
- * @internal
- * @def _ODP_INLINE
- * Define a function as inlined or not inlined (for ABI compatibility)
- */
-#if @ODP_ABI_COMPAT@
-#define ODP_ABI_COMPAT 1
-#define _ODP_INLINE
-#else
-#define ODP_ABI_COMPAT 0
-#define _ODP_INLINE static inline
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h b/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
deleted file mode 100644
index b305fe8a2..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/std_clib_inlines.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_STD_CLIB_INLINE_H_
-#define ODP_PLAT_STD_CLIB_INLINE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/spec/std_types.h>
-#include <string.h>
-
-extern void* (*const dpdk_memcpy)(void*, const void*, size_t);
-
-_ODP_INLINE void *odp_memcpy(void *dst, const void *src, size_t num)
-{
- return (*dpdk_memcpy)(dst, src, num);
-}
-
-_ODP_INLINE void *odp_memset(void *ptr, int value, size_t num)
-{
- return memset(ptr, value, num);
-}
-
-_ODP_INLINE int odp_memcmp(const void *ptr1, const void *ptr2, size_t num)
-{
- return memcmp(ptr1, ptr2, num);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/std_inlines.h b/platform/linux-dpdk/include/odp/api/plat/std_inlines.h
new file mode 100644
index 000000000..b52b0512a
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/std_inlines.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_STD_INLINE_H_
+#define ODP_PLAT_STD_INLINE_H_
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#include <string.h>
+
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
+#include <rte_memcpy.h>
+#if defined(__PPC64__) && defined(bool)
+ #undef bool
+ #define bool _Bool
+#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_memcpy __odp_memcpy
+ #define odp_memset __odp_memset
+ #define odp_memcmp __odp_memcmp
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void *odp_memcpy(void *dst, const void *src, size_t num)
+{
+ return rte_memcpy(dst, src, num);
+}
+
+_ODP_INLINE void *odp_memset(void *ptr, int value, size_t num)
+{
+ return memset(ptr, value, num);
+}
+
+_ODP_INLINE int odp_memcmp(const void *ptr1, const void *ptr2, size_t num)
+{
+ return memcmp(ptr1, ptr2, num);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/thread_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/thread_inline_types.h
new file mode 120000
index 000000000..8f59ca214
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/thread_inline_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/thread_inline_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/thread_inlines.h b/platform/linux-dpdk/include/odp/api/plat/thread_inlines.h
new file mode 120000
index 000000000..059b069c2
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/thread_inlines.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include/odp/api/plat/thread_inlines.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/thread_types.h b/platform/linux-dpdk/include/odp/api/plat/thread_types.h
deleted file mode 120000
index a05d87900..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/thread_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/thread_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/thrmask_types.h b/platform/linux-dpdk/include/odp/api/plat/thrmask_types.h
deleted file mode 120000
index bf40ebb5b..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/thrmask_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/thrmask_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/ticketlock_inlines_api.h b/platform/linux-dpdk/include/odp/api/plat/ticketlock_inlines_api.h
deleted file mode 120000
index 63c076049..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/ticketlock_inlines_api.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/ticketlock_inlines_api.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/ticketlock_types.h b/platform/linux-dpdk/include/odp/api/plat/ticketlock_types.h
deleted file mode 120000
index 885299b93..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/ticketlock_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/ticketlock_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/time_inlines.h b/platform/linux-dpdk/include/odp/api/plat/time_inlines.h
new file mode 100644
index 000000000..f3d2a6947
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/time_inlines.h
@@ -0,0 +1,260 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIME_INLINES_H_
+#define ODP_PLAT_TIME_INLINES_H_
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
+
+#include <rte_config.h>
+#include <rte_atomic.h>
+#include <rte_cycles.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+typedef struct _odp_time_global_t {
+ uint64_t freq_hz;
+ uint64_t start_cycles;
+ uint64_t start_ns;
+
+} _odp_time_global_t;
+
+extern _odp_time_global_t _odp_time_glob;
+
+static inline odp_time_t _odp_time_cur(void)
+{
+ odp_time_t time;
+
+ time.u64 = rte_get_timer_cycles();
+
+ return time;
+}
+
+static inline odp_time_t _odp_time_cur_strict(void)
+{
+ odp_time_t time;
+
+ rte_mb();
+ time.u64 = rte_get_timer_cycles();
+
+ return time;
+}
+
+static inline uint64_t _odp_time_to_ns(odp_time_t time)
+{
+ uint64_t nsec;
+ uint64_t count = time.count;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_time_glob.freq_hz;
+ const uint64_t giga_hz = 1000000000;
+
+ if (count >= freq_hz) {
+ sec = count / freq_hz;
+ count = count - sec * freq_hz;
+ }
+
+ nsec = (giga_hz * count) / freq_hz;
+
+ return (sec * giga_hz) + nsec;
+}
+
+static inline odp_time_t _odp_time_from_ns(uint64_t ns)
+{
+ odp_time_t time;
+ uint64_t count;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_time_glob.freq_hz;
+
+ if (ns >= ODP_TIME_SEC_IN_NS) {
+ sec = ns / ODP_TIME_SEC_IN_NS;
+ ns = ns - sec * ODP_TIME_SEC_IN_NS;
+ }
+
+ count = sec * freq_hz;
+ count += (ns * freq_hz) / ODP_TIME_SEC_IN_NS;
+
+ time.count = count;
+
+ return time;
+}
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_time_local_res __odp_time_local_res
+ #define odp_time_global_res __odp_time_global_res
+ #define odp_time_local __odp_time_local
+ #define odp_time_global __odp_time_global
+ #define odp_time_local_strict __odp_time_local_strict
+ #define odp_time_global_strict __odp_time_global_strict
+ #define odp_time_local_ns __odp_time_local_ns
+ #define odp_time_global_ns __odp_time_global_ns
+ #define odp_time_local_from_ns __odp_time_local_from_ns
+ #define odp_time_global_from_ns __odp_time_global_from_ns
+ #define odp_time_local_strict_ns __odp_time_local_strict_ns
+ #define odp_time_global_strict_ns __odp_time_global_strict_ns
+ #define odp_time_to_ns __odp_time_to_ns
+ #define odp_time_cmp __odp_time_cmp
+ #define odp_time_diff __odp_time_diff
+ #define odp_time_diff_ns __odp_time_diff_ns
+ #define odp_time_add_ns __odp_time_add_ns
+ #define odp_time_sum __odp_time_sum
+ #define odp_time_wait_ns __odp_time_wait_ns
+ #define odp_time_wait_until __odp_time_wait_until
+ #define odp_time_startup __odp_time_startup
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE uint64_t odp_time_local_res(void)
+{
+ return _odp_time_glob.freq_hz;
+}
+
+_ODP_INLINE uint64_t odp_time_global_res(void)
+{
+ return _odp_time_glob.freq_hz;
+}
+
+_ODP_INLINE odp_time_t odp_time_local(void)
+{
+ return _odp_time_cur();
+}
+
+_ODP_INLINE odp_time_t odp_time_global(void)
+{
+ return _odp_time_cur();
+}
+
+_ODP_INLINE odp_time_t odp_time_local_strict(void)
+{
+ return _odp_time_cur_strict();
+}
+
+_ODP_INLINE odp_time_t odp_time_global_strict(void)
+{
+ return _odp_time_cur_strict();
+}
+
+_ODP_INLINE uint64_t odp_time_local_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
+_ODP_INLINE uint64_t odp_time_global_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
+_ODP_INLINE odp_time_t odp_time_local_from_ns(uint64_t ns)
+{
+ return _odp_time_from_ns(ns);
+}
+
+_ODP_INLINE odp_time_t odp_time_global_from_ns(uint64_t ns)
+{
+ return _odp_time_from_ns(ns);
+}
+
+_ODP_INLINE uint64_t odp_time_local_strict_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur_strict());
+}
+
+_ODP_INLINE uint64_t odp_time_global_strict_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur_strict());
+}
+
+_ODP_INLINE uint64_t odp_time_to_ns(odp_time_t time)
+{
+ return _odp_time_to_ns(time);
+}
+
+_ODP_INLINE int odp_time_cmp(odp_time_t t2, odp_time_t t1)
+{
+ if (odp_likely(t2.u64 > t1.u64))
+ return 1;
+
+ if (t2.u64 < t1.u64)
+ return -1;
+
+ return 0;
+}
+
+_ODP_INLINE odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1)
+{
+ odp_time_t time;
+
+ time.u64 = t2.u64 - t1.u64;
+
+ return time;
+}
+
+_ODP_INLINE uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1)
+{
+ odp_time_t time;
+
+ time.u64 = t2.u64 - t1.u64;
+
+ return odp_time_to_ns(time);
+}
+
+_ODP_INLINE odp_time_t odp_time_add_ns(odp_time_t time, uint64_t ns)
+{
+ odp_time_t t = _odp_time_from_ns(ns);
+
+ t.u64 += time.u64;
+
+ return t;
+}
+
+_ODP_INLINE odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
+{
+ odp_time_t time;
+
+ time.u64 = t1.u64 + t2.u64;
+
+ return time;
+}
+
+static inline void _odp_time_wait_until(odp_time_t time)
+{
+ odp_time_t cur;
+
+ do {
+ cur = _odp_time_cur();
+ } while (odp_time_cmp(time, cur) > 0);
+}
+
+_ODP_INLINE void odp_time_wait_ns(uint64_t ns)
+{
+ const odp_time_t cur = _odp_time_cur();
+ const odp_time_t wait = _odp_time_from_ns(ns);
+ const odp_time_t end_time = odp_time_sum(cur, wait);
+
+ _odp_time_wait_until(end_time);
+}
+
+_ODP_INLINE void odp_time_wait_until(odp_time_t time)
+{
+ _odp_time_wait_until(time);
+}
+
+_ODP_INLINE void odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.u64 = _odp_time_glob.start_cycles;
+ startup->global_ns = _odp_time_glob.start_ns;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/time_types.h b/platform/linux-dpdk/include/odp/api/plat/time_types.h
deleted file mode 100644
index e53ad2f97..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/time_types.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP time service
- */
-
-#ifndef ODP_TIME_TYPES_H_
-#define ODP_TIME_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_time
- * @{
- **/
-
-/**
- * @internal Time structure used to isolate odp-linux implementation from
- * the linux timespec structure, which is dependent on POSIX extension level.
- */
-typedef struct odp_time_t {
- int64_t tv_sec; /**< @internal Seconds or DPDK ticks */
- int64_t tv_nsec; /**< @internal Nanoseconds */
-} odp_time_t;
-
-#define ODP_TIME_NULL ((odp_time_t){0, 0})
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
new file mode 100644
index 000000000..6c997e80d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIMER_INLINE_TYPES_H_
+#define ODP_PLAT_TIMER_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Timeout header field accessor */
+#define _odp_timeout_hdr_field(hdr, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)hdr + \
+ _odp_timeout_inline_offset.field))
+
+/* Timeout header field offsets for inline functions */
+typedef struct _odp_timeout_inline_offset_t {
+ uint16_t expiration;
+ uint16_t timer;
+ uint16_t user_ptr;
+ uint16_t uarea_addr;
+
+} _odp_timeout_inline_offset_t;
+
+extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset;
+
+/* Timer global data */
+typedef struct _odp_timer_global_t {
+ uint64_t freq_hz;
+
+} _odp_timer_global_t;
+
+extern _odp_timer_global_t _odp_timer_glob;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
new file mode 100644
index 000000000..a85c7582d
--- /dev/null
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2022-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIMER_INLINES_H_
+#define ODP_PLAT_TIMER_INLINES_H_
+
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
+#include <odp/api/timer_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/timer_inline_types.h>
+
+#include <rte_config.h>
+#include <rte_cycles.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_timeout_timer __odp_timeout_timer
+ #define odp_timeout_tick __odp_timeout_tick
+ #define odp_timeout_user_ptr __odp_timeout_user_ptr
+ #define odp_timeout_user_area __odp_timeout_user_area
+ #define odp_timer_current_tick __odp_timer_current_tick
+ #define odp_timeout_from_event __odp_timeout_from_event
+ #define odp_timeout_from_event_multi __odp_timeout_from_event_multi
+ #define odp_timeout_to_event __odp_timeout_to_event
+ #define odp_timer_tick_to_ns __odp_timer_tick_to_ns
+ #define odp_timer_ns_to_tick __odp_timer_ns_to_tick
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_timer_t odp_timeout_timer(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, odp_timer_t, timer);
+}
+
+_ODP_INLINE uint64_t odp_timeout_tick(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, uint64_t, expiration);
+}
+
+_ODP_INLINE void *odp_timeout_user_ptr(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, user_ptr);
+}
+
+_ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
+}
+
+_ODP_INLINE uint64_t odp_timer_current_tick(odp_timer_pool_t tp ODP_UNUSED)
+{
+ return rte_get_timer_cycles();
+}
+
+_ODP_INLINE odp_timeout_t odp_timeout_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+
+ return (odp_timeout_t)ev;
+}
+
+_ODP_INLINE void odp_timeout_from_event_multi(odp_timeout_t tmo[], const odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++) {
+ _ODP_ASSERT(odp_event_type(ev[i]) == ODP_EVENT_TIMEOUT);
+
+ tmo[i] = (odp_timeout_t)ev[i];
+ }
+}
+
+_ODP_INLINE odp_event_t odp_timeout_to_event(odp_timeout_t tmo)
+{
+ return (odp_event_t)tmo;
+}
+
+_ODP_INLINE uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp ODP_UNUSED, uint64_t ticks)
+{
+ uint64_t nsec;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_timer_glob.freq_hz;
+
+ if (ticks >= freq_hz) {
+ sec = ticks / freq_hz;
+ ticks = ticks - sec * freq_hz;
+ }
+
+ nsec = (ODP_TIME_SEC_IN_NS * ticks) / freq_hz;
+
+ return (sec * ODP_TIME_SEC_IN_NS) + nsec;
+}
+
+_ODP_INLINE uint64_t odp_timer_ns_to_tick(odp_timer_pool_t tp ODP_UNUSED, uint64_t ns)
+{
+ uint64_t ticks;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_timer_glob.freq_hz;
+
+ if (ns >= ODP_TIME_SEC_IN_NS) {
+ sec = ns / ODP_TIME_SEC_IN_NS;
+ ns = ns - sec * ODP_TIME_SEC_IN_NS;
+ }
+
+ ticks = sec * freq_hz;
+ ticks += (ns * freq_hz) / ODP_TIME_SEC_IN_NS;
+
+ return ticks;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_types.h b/platform/linux-dpdk/include/odp/api/plat/timer_types.h
deleted file mode 100644
index 8821bed60..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/timer_types.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP timer service
- */
-
-#ifndef ODP_TIMER_TYPES_H_
-#define ODP_TIMER_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/strong_types.h>
-
-/** @addtogroup odp_timer
- * @{
- **/
-
-struct odp_timer_pool_s; /**< Forward declaration */
-
-typedef struct odp_timer_pool_s *odp_timer_pool_t;
-
-#define ODP_TIMER_POOL_INVALID NULL
-
-#define ODP_TIMER_POOL_NAME_LEN 32
-
-typedef ODP_HANDLE_T(odp_timer_t);
-
-#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0xffffffff)
-
-typedef ODP_HANDLE_T(odp_timeout_t);
-
-#define ODP_TIMEOUT_INVALID _odp_cast_scalar(odp_timeout_t, 0xffffffff)
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/traffic_mngr_types.h b/platform/linux-dpdk/include/odp/api/plat/traffic_mngr_types.h
deleted file mode 120000
index 708f8ae56..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/traffic_mngr_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/traffic_mngr_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/version_types.h b/platform/linux-dpdk/include/odp/api/plat/version_types.h
deleted file mode 120000
index 5c41e4611..000000000
--- a/platform/linux-dpdk/include/odp/api/plat/version_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../linux-generic/include/odp/api/plat/version_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/pool.h b/platform/linux-dpdk/include/odp/api/pool.h
deleted file mode 120000
index dc61addfc..000000000
--- a/platform/linux-dpdk/include/odp/api/pool.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/pool.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/queue.h b/platform/linux-dpdk/include/odp/api/queue.h
deleted file mode 120000
index de33df69d..000000000
--- a/platform/linux-dpdk/include/odp/api/queue.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/queue.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/random.h b/platform/linux-dpdk/include/odp/api/random.h
deleted file mode 120000
index f686acb80..000000000
--- a/platform/linux-dpdk/include/odp/api/random.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/random.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/rwlock.h b/platform/linux-dpdk/include/odp/api/rwlock.h
deleted file mode 120000
index 8730e75e4..000000000
--- a/platform/linux-dpdk/include/odp/api/rwlock.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/rwlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/rwlock_recursive.h b/platform/linux-dpdk/include/odp/api/rwlock_recursive.h
deleted file mode 120000
index 7e8ef995d..000000000
--- a/platform/linux-dpdk/include/odp/api/rwlock_recursive.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/rwlock_recursive.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/schedule.h b/platform/linux-dpdk/include/odp/api/schedule.h
deleted file mode 120000
index 144acd72b..000000000
--- a/platform/linux-dpdk/include/odp/api/schedule.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/schedule.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/schedule_types.h b/platform/linux-dpdk/include/odp/api/schedule_types.h
deleted file mode 120000
index 4fcc59073..000000000
--- a/platform/linux-dpdk/include/odp/api/schedule_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/schedule_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/shared_memory.h b/platform/linux-dpdk/include/odp/api/shared_memory.h
deleted file mode 120000
index 2f79b2769..000000000
--- a/platform/linux-dpdk/include/odp/api/shared_memory.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/shared_memory.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/spinlock.h b/platform/linux-dpdk/include/odp/api/spinlock.h
deleted file mode 120000
index 276271a1d..000000000
--- a/platform/linux-dpdk/include/odp/api/spinlock.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/spinlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/spinlock_recursive.h b/platform/linux-dpdk/include/odp/api/spinlock_recursive.h
deleted file mode 120000
index b738f8b13..000000000
--- a/platform/linux-dpdk/include/odp/api/spinlock_recursive.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/spinlock_recursive.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/std_clib.h b/platform/linux-dpdk/include/odp/api/std_clib.h
deleted file mode 100644
index fea472543..000000000
--- a/platform/linux-dpdk/include/odp/api/std_clib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_STD_CLIB_H_
-#define ODP_PLAT_STD_CLIB_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/spec/std_types.h>
-#include <string.h>
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/std_clib_inlines.h>
-#endif
-
-#include <odp/api/spec/std_clib.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp/api/std_types.h b/platform/linux-dpdk/include/odp/api/std_types.h
deleted file mode 120000
index 2cc6bb77b..000000000
--- a/platform/linux-dpdk/include/odp/api/std_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/std_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/sync.h b/platform/linux-dpdk/include/odp/api/sync.h
deleted file mode 120000
index 06b6852ff..000000000
--- a/platform/linux-dpdk/include/odp/api/sync.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/sync.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/system_info.h b/platform/linux-dpdk/include/odp/api/system_info.h
deleted file mode 120000
index ce04866eb..000000000
--- a/platform/linux-dpdk/include/odp/api/system_info.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/system_info.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/thread.h b/platform/linux-dpdk/include/odp/api/thread.h
deleted file mode 120000
index f3f731ed1..000000000
--- a/platform/linux-dpdk/include/odp/api/thread.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/thread.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/thrmask.h b/platform/linux-dpdk/include/odp/api/thrmask.h
deleted file mode 120000
index 709adaaf7..000000000
--- a/platform/linux-dpdk/include/odp/api/thrmask.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/thrmask.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/ticketlock.h b/platform/linux-dpdk/include/odp/api/ticketlock.h
deleted file mode 120000
index a8a5d832f..000000000
--- a/platform/linux-dpdk/include/odp/api/ticketlock.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/ticketlock.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/time.h b/platform/linux-dpdk/include/odp/api/time.h
deleted file mode 120000
index c0743d883..000000000
--- a/platform/linux-dpdk/include/odp/api/time.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/time.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/timer.h b/platform/linux-dpdk/include/odp/api/timer.h
deleted file mode 120000
index c81bfbed9..000000000
--- a/platform/linux-dpdk/include/odp/api/timer.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/timer.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/traffic_mngr.h b/platform/linux-dpdk/include/odp/api/traffic_mngr.h
deleted file mode 120000
index e9faa26d8..000000000
--- a/platform/linux-dpdk/include/odp/api/traffic_mngr.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/traffic_mngr.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/version.h b/platform/linux-dpdk/include/odp/api/version.h
deleted file mode 120000
index da44655a4..000000000
--- a/platform/linux-dpdk/include/odp/api/version.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../linux-generic/include/odp/api/version.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/drv b/platform/linux-dpdk/include/odp/drv
deleted file mode 120000
index cdf2348ba..000000000
--- a/platform/linux-dpdk/include/odp/drv
+++ /dev/null
@@ -1 +0,0 @@
-../../../linux-generic/include/odp/drv \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/visibility_begin.h b/platform/linux-dpdk/include/odp/visibility_begin.h
deleted file mode 120000
index 1ee971179..000000000
--- a/platform/linux-dpdk/include/odp/visibility_begin.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../linux-generic/include/odp/visibility_begin.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/visibility_end.h b/platform/linux-dpdk/include/odp/visibility_end.h
deleted file mode 120000
index 9628087c9..000000000
--- a/platform/linux-dpdk/include/odp/visibility_end.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../linux-generic/include/odp/visibility_end.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp_buffer_inlines.h b/platform/linux-dpdk/include/odp_buffer_inlines.h
deleted file mode 100644
index c481390be..000000000
--- a/platform/linux-dpdk/include/odp_buffer_inlines.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Inline functions for ODP buffer mgmt routines - implementation internal
- */
-
-#ifndef ODP_BUFFER_INLINES_H_
-#define ODP_BUFFER_INLINES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp_buffer_internal.h>
-
-static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr)
-{
- return (odp_buffer_t)hdr;
-}
-
-static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
-{
- return (odp_buffer_hdr_t *)(void *)buf;
-}
-
-static inline odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
-{
- return buf_hdl_to_hdr(buf)->event_type;
-}
-
-static inline void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
-{
- buf_hdl_to_hdr(buf)->event_type = ev;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp_buffer_internal.h b/platform/linux-dpdk/include/odp_buffer_internal.h
index b33a30da1..cb7f50073 100644
--- a/platform/linux-dpdk/include/odp_buffer_internal.h
+++ b/platform/linux-dpdk/include/odp_buffer_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -18,96 +18,67 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/atomic.h>
-#include <odp/api/pool.h>
-#include <odp/api/buffer.h>
-#include <odp/api/debug.h>
#include <odp/api/align.h>
-#include <odp_align_internal.h>
-#include <odp_config_internal.h>
+#include <odp/api/buffer.h>
#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/pool.h>
+#include <odp/api/std_types.h>
#include <odp/api/thread.h>
+
+#include <odp_config_internal.h>
+#include <odp_event_internal.h>
+
#include <sys/types.h>
-#include <odp/api/event.h>
-#include <odp_forward_typedefs_internal.h>
-#include <odp_schedule_if.h>
#include <stddef.h>
/* DPDK */
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
#include <rte_mbuf.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
-ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256,
- "ODP Segment size must be a minimum of 256 bytes");
-
-ODP_STATIC_ASSERT(CONFIG_PACKET_MAX_SEGS < 256,
- "Maximum of 255 segments supported");
-
-typedef union odp_buffer_bits_t {
- odp_buffer_t handle;
-} odp_buffer_bits_t;
-
-#define BUFFER_BURST_SIZE CONFIG_BURST_SIZE
+/* Type size limits number of flow IDs supported */
+#define BUF_HDR_MAX_FLOW_ID 255
-struct odp_buffer_hdr_t {
+/* Internal buffer header */
+typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
/* Underlying DPDK rte_mbuf */
struct rte_mbuf mb;
- /* Handle union */
- odp_buffer_bits_t handle;
-
- /* ODP buffer type, not DPDK buf type */
- int type;
- /* Burst counts */
- int burst_num;
- int burst_first;
+ /* Common internal header */
+ _odp_event_hdr_int_t event_hdr;
- /* Next buf in a list */
- struct odp_buffer_hdr_t *next;
+ /* User area pointer */
+ void *uarea_addr;
- /* User context pointer or u64 */
- union {
- uint64_t buf_u64;
- void *buf_ctx;
- const void *buf_cctx; /* const alias for ctx */
- };
+} odp_buffer_hdr_t;
- /* Event type. Maybe different than pool type (crypto compl event) */
- odp_event_type_t event_type;
+ODP_STATIC_ASSERT(sizeof(odp_buffer_hdr_t) <= 3 * RTE_CACHE_LINE_SIZE,
+ "Additional cache line required for odp_buffer_hdr_t");
- /* Burst table */
- struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE];
-
- /* Pool handle */
- odp_pool_t pool_hdl;
-
- /* Total size of all allocated segs */
- uint32_t totsize;
- /* Index in the rte_mempool */
- uint32_t index;
-};
-
-ODP_STATIC_ASSERT(BUFFER_BURST_SIZE < 256, "BUFFER_BURST_SIZE_TOO_LARGE");
+static inline struct rte_mbuf *_odp_buf_to_mbuf(odp_buffer_t buf)
+{
+ return (struct rte_mbuf *)(uintptr_t)buf;
+}
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
+static inline odp_buffer_hdr_t *_odp_buf_hdr(odp_buffer_t buf)
+{
+ return (odp_buffer_hdr_t *)(uintptr_t)buf;
+}
-/*
- * Buffer type
- *
- * @param buf Buffer handle
- *
- * @return Buffer type
- */
-int _odp_buffer_type(odp_buffer_t buf);
+static inline void _odp_buffer_subtype_set(odp_buffer_t buf, int subtype)
+{
+ odp_buffer_hdr_t *buf_hdr = _odp_buf_hdr(buf);
-/*
- * Buffer type set
- *
- * @param buf Buffer handle
- * @param type New type value
- *
- */
-void _odp_buffer_type_set(odp_buffer_t buf, int type);
+ buf_hdr->event_hdr.subtype = subtype;
+}
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/include/odp_config_internal.h b/platform/linux-dpdk/include/odp_config_internal.h
index 21db09b84..c423ec14b 100644
--- a/platform/linux-dpdk/include/odp_config_internal.h
+++ b/platform/linux-dpdk/include/odp_config_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,25 +12,76 @@
extern "C" {
#endif
+#include <rte_config.h>
+
+#include <stdint.h>
+
+/*
+ * Maximum number of supported CPU identifiers. The maximum supported CPU ID is
+ * CONFIG_NUM_CPU_IDS - 1. Note that the maximum number of ODP threads is
+ * defined by ODP_THREAD_COUNT_MAX.
+ */
+#define CONFIG_NUM_CPU_IDS 256
+
+/*
+ * Maximum number of packet IO resources
+ */
+#define CONFIG_PKTIO_ENTRIES 64
+
+/*
+ * Maximum number of DMA sessions
+ */
+#define CONFIG_MAX_DMA_SESSIONS 32
+
+/*
+ * Pools reserved for internal usage, 1 for IPsec status events and one per packet
+ * I/O for TX completion
+ */
+#define CONFIG_INTERNAL_POOLS (1 + CONFIG_PKTIO_ENTRIES)
+
/*
* Maximum number of pools
*/
-#define ODP_CONFIG_POOLS 256
+#define CONFIG_POOLS 256
+
+/*
+ * Queues reserved for ODP internal use
+ */
+#define CONFIG_INTERNAL_QUEUES 64
+
+/*
+ * Maximum number of plain ODP queues
+ */
+#define CONFIG_MAX_PLAIN_QUEUES 1024
+
+/*
+ * Maximum number of scheduled ODP queues
+ *
+ * Must be a power of two.
+ */
+#define CONFIG_MAX_SCHED_QUEUES 1024
/*
* Maximum number of queues
*/
-#define ODP_CONFIG_QUEUES 1024
+#define CONFIG_MAX_QUEUES (CONFIG_INTERNAL_QUEUES + \
+ CONFIG_MAX_PLAIN_QUEUES + \
+ CONFIG_MAX_SCHED_QUEUES)
/*
* Maximum number of ordered locks per queue
*/
-#define CONFIG_QUEUE_MAX_ORD_LOCKS 4
+#define CONFIG_QUEUE_MAX_ORD_LOCKS 2
/*
- * Maximum number of packet IO resources
+ * Stashes reserved for internal usage
+ */
+#define CONFIG_INTERNAL_STASHES CONFIG_MAX_DMA_SESSIONS
+
+/*
+ * Maximum number of stashes
*/
-#define ODP_CONFIG_PKTIO_ENTRIES 64
+#define CONFIG_MAX_STASHES 2048
/*
* Minimum buffer alignment
@@ -37,7 +89,7 @@ extern "C" {
* This defines the minimum supported buffer alignment. Requests for values
* below this will be rounded up to this value.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MIN 16
+#define CONFIG_BUFFER_ALIGN_MIN 16
/*
* Maximum buffer alignment
@@ -45,21 +97,7 @@ extern "C" {
* This defines the maximum supported buffer alignment. Requests for values
* above this will fail.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
-
-/*
- * Default packet headroom
- *
- * This defines the minimum number of headroom bytes that newly created packets
- * have by default. The default apply to both ODP packet input and user
- * allocated packets. Implementations may reserve a larger than minimum headroom
- * size e.g. due to HW or a protocol specific alignment requirement.
- *
- * @internal In odp-linux implementation:
- * The default value (66) allows a 1500-byte packet to be received into a single
- * segment with Ethernet offset alignment and room for some header expansion.
- */
-#define CONFIG_PACKET_HEADROOM 128
+#define CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
/*
* Default packet tailroom
@@ -73,9 +111,9 @@ extern "C" {
#define CONFIG_PACKET_TAILROOM 0
/*
- * Maximum number of segments per packet
+ * Maximum packet segment size including head- and tailrooms
*/
-#define CONFIG_PACKET_MAX_SEGS 60
+#define CONFIG_PACKET_SEG_SIZE (UINT16_MAX)
/*
* Minimum packet segment length
@@ -93,16 +131,27 @@ extern "C" {
* defined segment length (seg_len in odp_pool_param_t) must not be larger than
* this.
*/
-#define CONFIG_PACKET_SEG_LEN_MAX (CONFIG_PACKET_MAX_SEGS * \
- (CONFIG_PACKET_SEG_LEN_MIN - \
- CONFIG_PACKET_HEADROOM - \
- CONFIG_PACKET_TAILROOM))
+#define CONFIG_PACKET_MAX_SEG_LEN (CONFIG_PACKET_SEG_SIZE - \
+ RTE_PKTMBUF_HEADROOM - \
+ CONFIG_PACKET_TAILROOM - \
+ CONFIG_BUFFER_ALIGN_MIN)
-/* Maximum number of shared memory blocks.
+/*
+ * Number of shared memory blocks reserved for implementation internal use.
+ *
+ * Each packet pool requires one SHM block, 20 blocks are reserved for ODP
+ * module global data, and one block per packet I/O is reserved for TX
+ * completion usage.
+ */
+#define CONFIG_INTERNAL_SHM_BLOCKS (CONFIG_POOLS + 20 + CONFIG_PKTIO_ENTRIES)
+
+/*
+ * Maximum number of shared memory blocks.
*
- * This the the number of separate SHM areas that can be reserved concurrently
+ * This is the number of separate SHM blocks that an application can reserve
+ * concurrently.
*/
-#define ODP_CONFIG_SHM_BLOCKS (ODP_CONFIG_POOLS + 48)
+#define CONFIG_SHM_BLOCKS 64
/*
* Maximum event burst size
@@ -110,27 +159,32 @@ extern "C" {
* This controls the burst size on various enqueue, dequeue, etc calls. Large
* burst size improves throughput, but may degrade QoS (increase latency).
*/
-#define CONFIG_BURST_SIZE 16
+#define CONFIG_BURST_SIZE 32
/*
- * Maximum number of events in a pool
+ * Maximum number of events in a pool. Power of two minus one results optimal
+ * memory usage.
*/
-#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024)
+#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
-/*
- * Maximum number of events in a thread local pool cache
- */
-#define CONFIG_POOL_CACHE_SIZE 256
+/* Maximum packet vector size */
+#define CONFIG_PACKET_VECTOR_MAX_SIZE 256
/*
- * Size of the virtual address space pre-reserver for ISHM
- *
- * This is just virtual space preallocation size, not memory allocation.
- * This address space is used by ISHM to map things at a common address in
- * all ODP threads (when the _ODP_ISHM_SINGLE_VA flag is used).
- * In bytes.
+ * Maximum number of IPsec SAs. The actual maximum number can be further
+ * limited by the number of sessions supported by the crypto subsystem and
+ * is reported by odp_ipsec_capability().
*/
-#define ODP_CONFIG_ISHM_VA_PREALLOC_SZ (536870912L)
+#define CONFIG_IPSEC_MAX_NUM_SA 4000
+
+/* Maximum number of ML models that can be created or loaded. */
+#define CONFIG_ML_MAX_MODELS 4
+
+/* Maximum number of inputs for a ML model. */
+#define CONFIG_ML_MAX_INPUTS 4
+
+/* Maximum number of outputs for a ML model. */
+#define CONFIG_ML_MAX_OUTPUTS 4
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/include/odp_errno_define.h b/platform/linux-dpdk/include/odp_errno_define.h
index 914d41b40..9f9486267 100644
--- a/platform/linux-dpdk/include/odp_errno_define.h
+++ b/platform/linux-dpdk/include/odp_errno_define.h
@@ -1,4 +1,28 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP error number define
+ */
+
+#ifndef ODP_ERRNO_DEFINE_H_
+#define ODP_ERRNO_DEFINE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
#include <rte_errno.h>
-#define __odp_errno (rte_errno)
+#define _odp_errno (rte_errno)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_event_internal.h b/platform/linux-dpdk/include/odp_event_internal.h
new file mode 100644
index 000000000..cbe6d960b
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_event_internal.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_INTERNAL_H_
+#define ODP_EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/event.h>
+#include <odp/api/pool_types.h>
+
+#include <stdint.h>
+
+/* DPDK */
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
+#include <rte_mbuf.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
+
+typedef struct _odp_event_hdr_int_t {
+ /* Pool handle */
+ odp_pool_t pool;
+
+ /* Buffer index in the pool */
+ uint32_t index;
+
+ /* Pool type */
+ int8_t type;
+
+ /* Event type. Maybe different than pool type (crypto compl event) */
+ int8_t event_type;
+
+ /* Event subtype */
+ int8_t subtype;
+
+ /* Event flow id */
+ uint8_t flow_id;
+
+} _odp_event_hdr_int_t;
+
+/* Common header for all event types. Helper for casting, actual pool element types should begin
+ * with explicit struct rte_mbuf and _odp_event_hdr_int_t fields. */
+typedef struct ODP_ALIGNED_CACHE _odp_event_hdr_t {
+ /* Underlying DPDK rte_mbuf */
+ struct rte_mbuf mb;
+
+ /* Common internal header */
+ _odp_event_hdr_int_t hdr;
+
+} _odp_event_hdr_t;
+
+static inline odp_event_t _odp_event_from_hdr(_odp_event_hdr_t *hdr)
+{
+ return (odp_event_t)hdr;
+}
+
+static inline _odp_event_hdr_t *_odp_event_hdr(odp_event_t event)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)event;
+}
+
+static inline odp_event_t _odp_event_from_mbuf(struct rte_mbuf *mbuf)
+{
+ return (odp_event_t)(uintptr_t)mbuf;
+}
+
+static inline struct rte_mbuf *_odp_event_to_mbuf(odp_event_t event)
+{
+ return (struct rte_mbuf *)(uintptr_t)event;
+}
+
+static inline void _odp_event_type_set(odp_event_t event, int ev)
+{
+ _odp_event_hdr(event)->hdr.event_type = ev;
+}
+
+static inline uint64_t *_odp_event_endmark_get_ptr(odp_event_t event)
+{
+ struct rte_mbuf *mbuf = _odp_event_to_mbuf(event);
+
+ return (uint64_t *)((uint8_t *)mbuf->buf_addr + mbuf->buf_len);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_event_vector_internal.h b/platform/linux-dpdk/include/odp_event_vector_internal.h
new file mode 100644
index 000000000..0d4cd9048
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_event_vector_internal.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event vector descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_VECTOR_INTERNAL_H_
+#define ODP_EVENT_VECTOR_INTERNAL_H_
+
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/api/packet.h>
+
+#include <odp/api/plat/event_vector_inline_types.h>
+
+#include <odp_event_internal.h>
+
+#include <rte_config.h>
+
+#include <stdint.h>
+
+/**
+ * Internal event vector header
+ */
+typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
+ /* Underlying DPDK rte_mbuf */
+ struct rte_mbuf mb;
+
+ /* Common internal header */
+ _odp_event_hdr_int_t event_hdr;
+
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* Event vector size */
+ uint32_t size;
+
+ /* Flags */
+ _odp_event_vector_flags_t flags;
+
+ /* Vector of packet handles */
+ odp_packet_t packet[];
+
+} odp_event_vector_hdr_t;
+
+ODP_STATIC_ASSERT(sizeof(odp_event_vector_hdr_t) <= 3 * RTE_CACHE_LINE_SIZE,
+ "Additional cache line required for odp_event_vector_hdr_t");
+
+/**
+ * Return the vector header
+ */
+static inline odp_event_vector_hdr_t *_odp_packet_vector_hdr(odp_packet_vector_t pktv)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)pktv;
+}
+
+/**
+ * Return the event header
+ */
+static inline _odp_event_hdr_t *_odp_packet_vector_to_event_hdr(odp_packet_vector_t pktv)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)_odp_packet_vector_hdr(pktv);
+}
+
+/**
+ * Free packet vector and contained packets
+ */
+static inline void _odp_packet_vector_free_full(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ if (pktv_hdr->size)
+ odp_packet_free_multi(pktv_hdr->packet, pktv_hdr->size);
+
+ odp_packet_vector_free(pktv);
+}
+
+#endif /* ODP_EVENT_VECTOR_INTERNAL_H_ */
diff --git a/platform/linux-dpdk/include/odp_eventdev_internal.h b/platform/linux-dpdk/include/odp_eventdev_internal.h
new file mode 100644
index 000000000..a36b85ff5
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_eventdev_internal.h
@@ -0,0 +1,194 @@
+/* Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP eventdev - implementation internal
+ */
+
+#ifndef ODP_EVENTDEV_H_
+#define ODP_EVENTDEV_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/plat/strong_types.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule_types.h>
+#include <odp/api/thread.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_config_internal.h>
+#include <odp_forward_typedefs_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_ptr_ring_mpmc_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+
+#include <rte_config.h>
+#include <rte_eventdev.h>
+
+#include <stdint.h>
+
+#define _ODP_SCHED_ID_EVENTDEV (_ODP_SCHED_ID_SCALABLE + 1)
+
+#define RX_ADAPTER_INIT 0
+#define RX_ADAPTER_STOPPED 1
+#define RX_ADAPTER_RUNNING 2
+
+/* Maximum schedule burst size */
+#define MAX_SCHED_BURST 128
+ODP_STATIC_ASSERT(MAX_SCHED_BURST <= UINT16_MAX,
+ "too large schedule burst");
+
+/* Number of scheduling groups */
+#define NUM_SCHED_GRPS 32
+
+ODP_STATIC_ASSERT(sizeof(((struct rte_event *)0)->queue_id) == sizeof(uint8_t),
+ "eventdev queue ID size changed");
+
+ODP_STATIC_ASSERT(CONFIG_MAX_QUEUES >= RTE_EVENT_MAX_QUEUES_PER_DEV,
+ "unable to map all eventdev queues");
+
+struct ODP_ALIGNED_CACHE queue_entry_s {
+ /* The first cache line is read only */
+ queue_enq_fn_t enqueue ODP_ALIGNED_CACHE;
+ queue_deq_fn_t dequeue;
+ queue_enq_multi_fn_t enqueue_multi;
+ queue_deq_multi_fn_t dequeue_multi;
+ uint32_t index;
+ odp_queue_type_t type;
+
+ struct {
+ uint8_t prio;
+ } eventdev;
+
+ ring_mpmc_t ring_mpmc;
+
+ odp_ticketlock_t lock;
+
+ odp_atomic_u64_t num_timers;
+ int status;
+ odp_schedule_sync_t sync;
+
+ queue_deq_multi_fn_t orig_dequeue_multi;
+ odp_queue_param_t param;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ char name[ODP_QUEUE_NAME_LEN];
+};
+
+/* Eventdev global data */
+typedef struct {
+ queue_entry_t queue[CONFIG_MAX_QUEUES];
+ odp_shm_t shm;
+ struct rte_event_dev_config config;
+ struct {
+ odp_ticketlock_t lock;
+ int status;
+ uint8_t id;
+ uint8_t single_queue;
+ } rx_adapter;
+ odp_atomic_u32_t num_started;
+ uint8_t dev_id;
+ uint8_t num_event_ports;
+ uint8_t num_prio;
+
+ struct {
+ uint8_t num_atomic;
+ uint8_t num_ordered;
+ uint8_t num_parallel;
+ } event_queue;
+ pktio_entry_t *pktio[RTE_MAX_ETHPORTS];
+
+ odp_ticketlock_t port_lock;
+ struct {
+ uint8_t linked;
+ } port[ODP_THREAD_COUNT_MAX];
+
+ struct {
+ uint32_t max_queue_size;
+ uint32_t default_queue_size;
+ } plain_config;
+
+ struct {
+ uint32_t max_queue_size;
+ } sched_config;
+
+ /* Schedule groups */
+ odp_thrmask_t mask_all;
+ struct {
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+ odp_thrmask_t mask;
+ uint8_t allocated;
+ queue_entry_t *queue[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ } grp[NUM_SCHED_GRPS];
+ odp_ticketlock_t grp_lock;
+
+ /* Scheduler interface config options (not used in fast path) */
+ schedule_config_t config_if;
+
+} eventdev_global_t;
+
+/* Eventdev local data */
+typedef struct {
+ struct {
+ struct rte_event event[MAX_SCHED_BURST];
+ uint16_t idx;
+ uint16_t count;
+ } cache;
+ uint8_t port_id;
+ uint8_t paused;
+ uint8_t started;
+} eventdev_local_t;
+
+extern eventdev_global_t *_odp_eventdev_gbl;
+extern __thread eventdev_local_t _odp_eventdev_local;
+
+int _odp_service_setup(uint32_t service_id);
+
+int _odp_dummy_link_queues(uint8_t dev_id, uint8_t dummy_linked_queues[], int num);
+
+int _odp_dummy_unlink_queues(uint8_t dev_id, uint8_t dummy_linked_queues[], int num);
+
+void _odp_rx_adapter_port_stop(uint16_t port_id);
+
+int _odp_rx_adapter_close(void);
+
+static inline uint8_t event_schedule_type(odp_schedule_sync_t sync)
+{
+ /* Ordered queues implemented using atomic queues */
+ if (sync == ODP_SCHED_SYNC_PARALLEL)
+ return RTE_SCHED_TYPE_PARALLEL;
+ else
+ return RTE_SCHED_TYPE_ATOMIC;
+}
+
+static inline odp_queue_t queue_from_qentry(queue_entry_t *queue)
+{
+ return (odp_queue_t)queue;
+}
+
+static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
+{
+ return &_odp_eventdev_gbl->queue[queue_id];
+}
+
+static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_packet_dpdk.h b/platform/linux-dpdk/include/odp_packet_dpdk.h
deleted file mode 100644
index 495d5e6f1..000000000
--- a/platform/linux-dpdk/include/odp_packet_dpdk.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PACKET_DPDK_H
-#define ODP_PACKET_DPDK_H
-
-#include <stdint.h>
-#include <net/if.h>
-
-#include <protocols/eth.h>
-#include <odp/api/align.h>
-#include <odp/api/debug.h>
-#include <odp/api/packet.h>
-#include <odp_packet_internal.h>
-#include <odp/api/pool.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp/api/std_types.h>
-
-#include <rte_config.h>
-#include <rte_memory.h>
-#include <rte_memzone.h>
-#include <rte_launch.h>
-#include <rte_tailq.h>
-#include <rte_eal.h>
-#include <rte_per_lcore.h>
-#include <rte_lcore.h>
-#include <rte_branch_prediction.h>
-#include <rte_prefetch.h>
-#include <rte_cycles.h>
-#include <rte_errno.h>
-#include <rte_debug.h>
-#include <rte_log.h>
-#include <rte_byteorder.h>
-#include <rte_pci.h>
-#include <rte_random.h>
-#include <rte_ether.h>
-#include <rte_ethdev.h>
-#include <rte_hash.h>
-#include <rte_jhash.h>
-#include <rte_hash_crc.h>
-
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
-
-#endif
diff --git a/platform/linux-dpdk/include/odp_packet_internal.h b/platform/linux-dpdk/include/odp_packet_internal.h
index d3f00847e..cae77245a 100644
--- a/platform/linux-dpdk/include/odp_packet_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -19,47 +19,54 @@ extern "C" {
#endif
#include <odp/api/align.h>
-#include <odp_debug_internal.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_buffer_inlines.h>
-#include <odp_pool_internal.h>
+#include <odp/api/hints.h>
+#include <odp/api/ipsec.h>
#include <odp/api/packet.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
-#include <odp_crypto_internal.h>
-#include <protocols/eth.h>
-#include <odp/api/plat/packet_types.h>
+#include <odp/api/comp.h>
+#include <odp/api/std.h>
+
+#include <odp/api/plat/packet_inline_types.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_pool_internal.h>
-#include <rte_acl_osdep.h>
+#include <protocols/eth.h>
+#include <stdint.h>
+#include <string.h>
-/** Minimum segment length expected by packet_parse_common() */
-#define PACKET_PARSE_SEG_LEN 96
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
+#include <rte_mbuf.h>
+#include <rte_memory.h>
+/* ppc64 rte_memcpy.h (included through rte_mbuf.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
ODP_STATIC_ASSERT(sizeof(_odp_packet_input_flags_t) == sizeof(uint64_t),
"INPUT_FLAGS_SIZE_ERROR");
-/**
- * Packet error flags
- */
-typedef union {
- /* All error flags */
- uint32_t all;
+ODP_STATIC_ASSERT(sizeof(_odp_packet_flags_t) == sizeof(uint32_t),
+ "PACKET_FLAGS_SIZE_ERROR");
- struct {
- /* Bitfield flags for each detected error */
- uint32_t app_error:1; /**< Error bit for application use */
- uint32_t frame_len:1; /**< Frame length error */
- uint32_t snap_len:1; /**< Snap length error */
- uint32_t l2_chksum:1; /**< L2 checksum error, checks TBD */
- uint32_t ip_err:1; /**< IP error, checks TBD */
- uint32_t tcp_err:1; /**< TCP error, checks TBD */
- uint32_t udp_err:1; /**< UDP error, checks TBD */
- };
-} error_flags_t;
+/* Maximum number of segments per packet */
+#define PKT_MAX_SEGS 60
+
+ODP_STATIC_ASSERT(PKT_MAX_SEGS < 256, "Maximum of 255 segments supported");
-ODP_STATIC_ASSERT(sizeof(error_flags_t) == sizeof(uint32_t),
- "ERROR_FLAGS_SIZE_ERROR");
+ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256, "Segment size must be a minimum of 256 bytes");
+
+ODP_STATIC_ASSERT(CONFIG_PACKET_MAX_SEG_LEN <= UINT16_MAX, "Segment size must fit in uint16_t");
+
+/* We can't enforce tailroom reservation for received packets */
+ODP_STATIC_ASSERT(CONFIG_PACKET_TAILROOM == 0, "Tailroom has to be 0, DPDK doesn't support this");
/**
* Packet output flags
@@ -84,36 +91,23 @@ ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t),
"OUTPUT_FLAGS_SIZE_ERROR");
/**
- * Protocol stack layers
- */
-typedef enum {
- LAYER_NONE = 0,
- LAYER_L1,
- LAYER_L2,
- LAYER_L3,
- LAYER_L4,
- LAYER_ALL
-} layer_t;
-
-/**
* Packet parser metadata
*/
typedef struct {
+ /* Packet input flags */
_odp_packet_input_flags_t input_flags;
- error_flags_t error_flags;
- output_flags_t output_flags;
- uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
- uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
- uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+ /* Other flags */
+ _odp_packet_flags_t flags;
- uint32_t l3_len; /**< Layer 3 length */
- uint32_t l4_len; /**< Layer 4 length */
+ /* offset to L2 hdr, e.g. Eth */
+ uint16_t l2_offset;
- uint16_t ethtype; /**< EtherType */
- uint8_t ip_proto; /**< IP protocol */
- uint8_t parsed_layers; /**< Highest parsed protocol stack layer */
+ /* offset to L3 hdr, e.g. IPv4, IPv6 */
+ uint16_t l3_offset;
+ /* offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+ uint16_t l4_offset;
} packet_parser_t;
/**
@@ -123,139 +117,272 @@ typedef struct {
* packet_init(). Because of this any new fields added must be reviewed for
* initialization requirements.
*/
-typedef struct {
- /* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
+ /* Underlying DPDK rte_mbuf */
+ struct rte_mbuf mb;
- /*
- * Following members are initialized by packet_init()
- */
+ /* Common internal header */
+ _odp_event_hdr_int_t event_hdr;
+ /* Parser metadata */
packet_parser_t p;
+ /* Input interface */
odp_pktio_t input;
- /*
- * Members below are not initialized by packet_init()
- */
-
- /* User metadata size, it's right after odp_packet_hdr_t */
- uint32_t uarea_size;
-
/* Timestamp value */
odp_time_t timestamp;
- /* Classifier destination queue */
+ /* Used as classifier destination queue, in IPsec inline input
+ * processing and as Tx completion event queue. */
odp_queue_t dst_queue;
- /* Result for crypto */
- odp_crypto_generic_op_result_t op_result;
-} odp_packet_hdr_t __rte_cache_aligned;
+ /* --- 64-byte cache line boundary --- */
+
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* User context pointer */
+ const void *user_ptr;
+
+ /* Classifier mark */
+ uint16_t cls_mark;
+
+ /* Classifier handle index */
+ uint16_t cos;
+
+ /* Offset to payload start */
+ uint16_t payload_offset;
+
+ /* Max payload size in a LSO segment */
+ uint16_t lso_max_payload;
+
+ /* Packet aging drop timeout before enqueue. Once enqueued holds the maximum age (time of
+ * request + requested drop timeout). */
+ uint64_t tx_aging_ns;
+
+ /* Tx completion poll completion identifier */
+ uint32_t tx_compl_id;
+
+ /* LSO profile index */
+ uint8_t lso_profile_idx;
+
+ union {
+ /* Result for crypto packet op */
+ odp_crypto_packet_result_t crypto_op_result;
+
+ /* Context for IPsec */
+ odp_ipsec_packet_result_t ipsec_ctx;
+
+ /* Result for comp packet op */
+ odp_comp_packet_result_t comp_op_result;
+ };
+
+ /* Temp storage for digest */
+#define PACKET_DIGEST_MAX 64
+ uint8_t crypto_digest_buf[PACKET_DIGEST_MAX];
+
+ /* Temp storage for AAD */
+#define PACKET_AAD_MAX 32
+ uint8_t crypto_aad_buf[PACKET_AAD_MAX];
+} odp_packet_hdr_t;
+
+ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 6 * RTE_CACHE_LINE_SIZE,
+ "Additional cache line required for odp_packet_hdr_t");
/**
* Return the packet header
*/
-static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
+static inline odp_packet_hdr_t *packet_hdr(odp_packet_t pkt)
{
return (odp_packet_hdr_t *)(uintptr_t)pkt;
}
-static inline struct rte_mbuf *pkt_to_mbuf(odp_packet_hdr_t *pkt_hdr)
+static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr)
{
- return &pkt_hdr->buf_hdr.mb;
+ return (odp_packet_t)pkt_hdr;
}
-static inline void copy_packet_parser_metadata(odp_packet_hdr_t *src_hdr,
- odp_packet_hdr_t *dst_hdr)
+static inline _odp_event_hdr_t *packet_to_event_hdr(odp_packet_t pkt)
{
- dst_hdr->p = src_hdr->p;
+ return (_odp_event_hdr_t *)(uintptr_t)packet_hdr(pkt);
}
-static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
- odp_packet_hdr_t *dst_hdr)
+static inline odp_packet_t packet_from_event_hdr(_odp_event_hdr_t *event_hdr)
{
- dst_hdr->p = src_hdr->p;
- dst_hdr->dst_queue = src_hdr->dst_queue;
- dst_hdr->timestamp = src_hdr->timestamp;
- dst_hdr->op_result = src_hdr->op_result;
+ return (odp_packet_t)(uintptr_t)event_hdr;
}
-static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
+static inline struct rte_mbuf *pkt_to_mbuf(odp_packet_t pkt)
{
- return rte_pktmbuf_pkt_len(&pkt_hdr->buf_hdr.mb);
+ return (struct rte_mbuf *)(uintptr_t)pkt;
}
-static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+static inline void packet_subtype_set(odp_packet_t pkt, int subtype)
{
- rte_pktmbuf_pkt_len(&pkt_hdr->buf_hdr.mb) = len;
+ packet_hdr(pkt)->event_hdr.subtype = subtype;
}
-static inline int packet_parse_l2_not_done(packet_parser_t *prs)
+/**
+ * Initialize ODP headers
+ */
+static inline void packet_init(odp_packet_hdr_t *pkt_hdr, odp_pktio_t input)
{
- return !prs->input_flags.parsed_l2;
-}
+ /* Clear all flags. Resets also return value of cls_mark, user_ptr, etc. */
+ pkt_hdr->p.input_flags.all = 0;
+ pkt_hdr->p.flags.all_flags = 0;
-static inline int packet_parse_not_complete(odp_packet_hdr_t *pkt_hdr)
-{
- return pkt_hdr->p.parsed_layers != LAYER_ALL;
-}
+ pkt_hdr->p.l2_offset = 0;
+ pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
-/* Forward declarations */
-int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt);
+ if (odp_unlikely(pkt_hdr->event_hdr.subtype != ODP_EVENT_PACKET_BASIC))
+ pkt_hdr->event_hdr.subtype = ODP_EVENT_PACKET_BASIC;
-/* Fill in parser metadata for L2 */
-static inline void packet_parse_l2(packet_parser_t *prs, uint32_t frame_len)
+ pkt_hdr->input = input;
+}
+
+/**
+ * Check if copying packet metadata between pools is possible
+ *
+ * @retval 0 when possible without user area copy
+ * @retval >0 when possible with user area copy
+ * @retval <0 when not possible
+ */
+static inline int _odp_packet_copy_md_possible(odp_pool_t dst_pool,
+ odp_pool_t src_pool)
{
- /* Packet alloc or reset have already init other offsets and flags */
+ const pool_t *src_hdr;
+ const pool_t *dst_hdr;
- /* We only support Ethernet for now */
- prs->input_flags.eth = 1;
+ if (src_pool == dst_pool)
+ return 0;
- /* Detect jumbo frames */
- if (frame_len > _ODP_ETH_LEN_MAX)
- prs->input_flags.jumbo = 1;
+ src_hdr = _odp_pool_entry(src_pool);
+ dst_hdr = _odp_pool_entry(dst_pool);
- /* Assume valid L2 header, no CRC/FCS check in SW */
- prs->input_flags.l2 = 1;
+ if (dst_hdr->params.pkt.uarea_size < src_hdr->params.pkt.uarea_size)
+ return -1;
- prs->input_flags.parsed_l2 = 1;
+ return 1;
}
-static inline void _odp_packet_reset_parse(odp_packet_t pkt)
+/**
+ * Copy packet metadata
+ *
+ * This function is assumed to never fail. Use _odp_packet_copy_md_possible() to
+ * check beforehand that copying packet metadata between source and destination
+ * packet pools is possible.
+ *
+ * @param uarea_copy Copy user area data. If false, user area pointers
+ * are swapped between the packet headers (allowed
+ * only when packets are from the same pool).
+ */
+static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
+ odp_packet_hdr_t *src_hdr,
+ odp_bool_t uarea_copy)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ const int8_t subtype = src_hdr->event_hdr.subtype;
- uint32_t frame_len = rte_pktmbuf_pkt_len(&pkt_hdr->buf_hdr.mb);
+ dst_hdr->input = src_hdr->input;
+ dst_hdr->event_hdr.subtype = subtype;
+ dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
+ dst_hdr->cls_mark = src_hdr->cls_mark;
+ dst_hdr->user_ptr = src_hdr->user_ptr;
+
+ dst_hdr->mb.port = src_hdr->mb.port;
+ dst_hdr->mb.ol_flags = src_hdr->mb.ol_flags;
+ dst_hdr->mb.packet_type = src_hdr->mb.packet_type;
+ dst_hdr->mb.vlan_tci = src_hdr->mb.vlan_tci;
+ dst_hdr->mb.hash.rss = src_hdr->mb.hash.rss;
+ dst_hdr->mb.hash = src_hdr->mb.hash;
+ dst_hdr->mb.vlan_tci_outer = src_hdr->mb.vlan_tci_outer;
+ dst_hdr->mb.tx_offload = src_hdr->mb.tx_offload;
+
+ if (src_hdr->p.input_flags.timestamp)
+ dst_hdr->timestamp = src_hdr->timestamp;
+
+ if (src_hdr->p.flags.lso) {
+ dst_hdr->lso_max_payload = src_hdr->lso_max_payload;
+ dst_hdr->lso_profile_idx = src_hdr->lso_profile_idx;
+ }
- pkt_hdr->p.parsed_layers = LAYER_NONE;
- pkt_hdr->p.input_flags.all = 0;
- pkt_hdr->p.output_flags.all = 0;
- pkt_hdr->p.error_flags.all = 0;
- pkt_hdr->p.l2_offset = 0;
+ if (src_hdr->p.flags.payload_off)
+ dst_hdr->payload_offset = src_hdr->payload_offset;
- packet_parse_l2(&pkt_hdr->p, frame_len);
-}
+ dst_hdr->p = src_hdr->p;
-/* Perform packet parse up to a given protocol layer */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer);
+ if (src_hdr->uarea_addr) {
+ if (uarea_copy) {
+ const pool_t *src_pool = _odp_pool_entry(src_hdr->event_hdr.pool);
+ const pool_t *dst_pool = _odp_pool_entry(dst_hdr->event_hdr.pool);
+ const uint32_t src_uarea_size = src_pool->params.pkt.uarea_size;
+ const uint32_t dst_uarea_size = dst_pool->params.pkt.uarea_size;
-/* Reset parser metadata for a new parse */
-void packet_parse_reset(odp_packet_hdr_t *pkt_hdr);
+ _ODP_ASSERT(dst_hdr->uarea_addr != NULL);
+ _ODP_ASSERT(dst_uarea_size >= src_uarea_size);
-/* Convert a packet handle to a buffer handle */
-odp_buffer_t _odp_packet_to_buffer(odp_packet_t pkt);
+ memcpy(dst_hdr->uarea_addr, src_hdr->uarea_addr, src_uarea_size);
+ } else {
+ void *src_uarea = src_hdr->uarea_addr;
-/* Convert a buffer handle to a packet handle */
-odp_packet_t _odp_packet_from_buffer(odp_buffer_t buf);
+ /* If user area exists, packets should always be from the same pool, so
+ * user area pointers can simply be swapped. */
+ _ODP_ASSERT(dst_hdr->event_hdr.pool == src_hdr->event_hdr.pool);
-static inline int packet_hdr_has_l2(odp_packet_hdr_t *pkt_hdr)
+ src_hdr->uarea_addr = dst_hdr->uarea_addr;
+ dst_hdr->uarea_addr = src_uarea;
+ }
+ }
+
+ if (odp_unlikely(subtype != ODP_EVENT_PACKET_BASIC)) {
+ if (subtype == ODP_EVENT_PACKET_IPSEC)
+ dst_hdr->ipsec_ctx = src_hdr->ipsec_ctx;
+ else if (subtype == ODP_EVENT_PACKET_CRYPTO)
+ dst_hdr->crypto_op_result = src_hdr->crypto_op_result;
+ else if (subtype == ODP_EVENT_PACKET_COMP)
+ dst_hdr->comp_op_result = src_hdr->comp_op_result;
+ }
+}
+
+static inline void _odp_packet_copy_cls_md(odp_packet_hdr_t *dst_hdr,
+ odp_packet_hdr_t *src_hdr)
{
- return pkt_hdr->p.input_flags.l2;
+ dst_hdr->p = src_hdr->p;
+ dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
+ dst_hdr->cls_mark = src_hdr->cls_mark;
}
-static inline void packet_hdr_has_l2_set(odp_packet_hdr_t *pkt_hdr, int val)
+static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
+{
+ return rte_pktmbuf_pkt_len(&pkt_hdr->mb);
+}
+
+static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
- pkt_hdr->p.input_flags.l2 = val;
+ rte_pktmbuf_pkt_len(&pkt_hdr->mb) = len;
+}
+
+/* Reset parser metadata for a new parse */
+static inline void packet_parse_reset(odp_packet_hdr_t *pkt_hdr, int all)
+{
+ /* Reset parser metadata before new parse */
+ pkt_hdr->p.input_flags.all = 0;
+ pkt_hdr->p.l2_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+
+ if (all)
+ pkt_hdr->p.flags.all_flags = 0;
+ else /* Keep user ptr and pktout flags */
+ pkt_hdr->p.flags.all.error = 0;
+}
+
+static inline int packet_hdr_has_l2(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->p.input_flags.l2;
}
static inline int packet_hdr_has_eth(odp_packet_hdr_t *pkt_hdr)
@@ -276,12 +403,19 @@ static inline void packet_set_ts(odp_packet_hdr_t *pkt_hdr, odp_time_t *ts)
}
}
-int packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
- uint32_t pkt_len, uint32_t seg_len, layer_t layer);
+int _odp_packet_set_data(odp_packet_t pkt, uint32_t offset,
+ uint8_t c, uint32_t len);
-/* We can't enforce tailroom reservation for received packets */
-ODP_STATIC_ASSERT(CONFIG_PACKET_TAILROOM == 0,
- "ERROR: Tailroom has to be 0, DPDK doesn't support this");
+int _odp_packet_cmp_data(odp_packet_t pkt, uint32_t offset,
+ const void *s, uint32_t len);
+
+int _odp_packet_ipv4_chksum_insert(odp_packet_t pkt);
+int _odp_packet_tcp_chksum_insert(odp_packet_t pkt);
+int _odp_packet_udp_chksum_insert(odp_packet_t pkt);
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt);
+
+int _odp_packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
+ odp_pktin_config_opt_t opt, uint64_t l4_part_sum);
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index 9dc420c4b..bab750a22 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -18,18 +18,33 @@
extern "C" {
#endif
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
#include <odp/api/spinlock.h>
#include <odp/api/ticketlock.h>
-#include <odp_classification_datamodel.h>
-#include <odp_align_internal.h>
-#include <odp_debug_internal.h>
+#include <odp/api/time.h>
+#include <odp/api/plat/packet_io_inlines.h>
+
+#include <odp_classification_datamodel.h>
#include <odp_config_internal.h>
-#include <odp/api/hints.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_queue_if.h>
-#define PKTIO_MAX_QUEUES 64
+#include <inttypes.h>
#include <linux/if_ether.h>
-#include <odp_packet_dpdk.h>
+#include <sys/select.h>
+
+#define PKTIO_LSO_PROFILES 16
+/* Assume at least Ethernet header per each segment */
+#define PKTIO_LSO_MIN_PAYLOAD_OFFSET 14
+#define PKTIO_LSO_MAX_PAYLOAD_OFFSET 128
+/* Allow 64 kB packet to be split into about 1kB segments */
+#define PKTIO_LSO_MAX_SEGMENTS 64
+
+ODP_STATIC_ASSERT(PKTIO_LSO_PROFILES < UINT8_MAX, "PKTIO_LSO_PROFILES_ERROR");
#define PKTIO_NAME_LEN 256
@@ -38,41 +53,38 @@ extern "C" {
/* Forward declaration */
struct pktio_if_ops;
-struct pkt_dpdk_t;
-typedef struct {
- odp_queue_t loopq; /**< loopback queue for "loop" device */
- odp_bool_t promisc; /**< promiscuous mode state */
-} pkt_loop_t;
+#define PKTIO_PRIVATE_SIZE 9216
-/** Packet socket using dpdk mmaped rings for both Rx and Tx */
-typedef struct {
- odp_pktio_capability_t capa; /**< interface capabilities */
-
- /********************************/
- char ifname[32];
- uint8_t min_rx_burst;
- uint8_t portid;
- odp_bool_t vdev_sysc_promisc; /**< promiscuous mode defined with
- system call */
- odp_pktin_hash_proto_t hash; /**< Packet input hash protocol */
- odp_bool_t lockless_rx; /**< no locking for rx */
- odp_bool_t lockless_tx; /**< no locking for tx */
- odp_ticketlock_t rx_lock[PKTIO_MAX_QUEUES]; /**< RX queue locks */
- odp_ticketlock_t tx_lock[PKTIO_MAX_QUEUES]; /**< TX queue locks */
-} pkt_dpdk_t;
-
-struct pktio_entry {
+typedef struct ODP_ALIGNED_CACHE {
const struct pktio_if_ops *ops; /**< Implementation specific methods */
/* These two locks together lock the whole pktio device */
odp_ticketlock_t rxl; /**< RX ticketlock */
odp_ticketlock_t txl; /**< TX ticketlock */
- int cls_enabled; /**< is classifier enabled */
+ odp_proto_layer_t parse_layer;
+ uint16_t pktin_frame_offset;
+
+ struct {
+ union {
+ uint8_t all_flags;
+
+ struct {
+ /* Pktout checksum offload */
+ uint8_t chksum_insert : 1;
+ /* Classifier */
+ uint8_t cls : 1;
+ /* Tx timestamp */
+ uint8_t tx_ts : 1;
+ /* Tx completion events */
+ uint8_t tx_compl : 1;
+ /* Packet aging */
+ uint8_t tx_aging : 1;
+ };
+ };
+ } enabled;
+
odp_pktio_t handle; /**< pktio handle */
- union {
- pkt_loop_t pkt_loop; /**< Using loopback for IO */
- pkt_dpdk_t pkt_dpdk; /**< using DPDK API for IO */
- };
+ unsigned char pkt_priv[PKTIO_PRIVATE_SIZE] ODP_ALIGNED_CACHE;
enum {
/* Not allocated */
PKTIO_STATE_FREE = 0,
@@ -93,37 +105,75 @@ struct pktio_entry {
} state;
odp_pktio_config_t config; /**< Device configuration */
classifier_t cls; /**< classifier linked with this pktio*/
- odp_pktio_stats_t stats; /**< statistic counters for pktio */
+ /* Driver level statistics counters */
+ odp_pktio_stats_t stats;
+ /* Statistics counters used also outside drivers */
+ struct {
+ odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t in_errors;
+ odp_atomic_u64_t out_discards;
+ } stats_extra;
+ /* Latest Tx timestamp */
+ odp_atomic_u64_t tx_ts;
char name[PKTIO_NAME_LEN]; /**< name of pktio provided to
- pktio_open() */
+ internal pktio_open() calls */
+ char full_name[PKTIO_NAME_LEN]; /**< original pktio name passed to
+ odp_pktio_open() and returned by
+ odp_pktio_info() */
odp_pool_t pool;
odp_pktio_param_t param;
+ odp_pktio_capability_t capa; /**< Packet IO capabilities */
+
+ /* Pool for Tx completion events */
+ odp_pool_t tx_compl_pool;
+ /* Status map SHM handle */
+ odp_shm_t tx_compl_status_shm;
+ /* Status map for Tx completion identifiers */
+ odp_atomic_u32_t *tx_compl_status;
/* Storage for queue handles
* Multi-queue support is pktio driver specific */
- unsigned num_in_queue;
- unsigned num_out_queue;
+ uint32_t num_in_queue;
+ uint32_t num_out_queue;
struct {
odp_queue_t queue;
odp_pktin_queue_t pktin;
- } in_queue[PKTIO_MAX_QUEUES];
+ odp_pktin_vector_config_t vector;
+ } in_queue[ODP_PKTIN_MAX_QUEUES];
struct {
odp_queue_t queue;
odp_pktout_queue_t pktout;
- } out_queue[PKTIO_MAX_QUEUES];
-};
+ } out_queue[ODP_PKTOUT_MAX_QUEUES];
-typedef union {
- struct pktio_entry s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
} pktio_entry_t;
typedef struct {
+ odp_lso_profile_param_t param;
+ int used;
+ uint8_t index;
+
+} lso_profile_t;
+
+/* Global variables */
+typedef struct {
odp_spinlock_t lock;
- pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
-} pktio_table_t;
+ odp_shm_t shm;
+
+ struct {
+ /* Frame start offset from base pointer at packet input */
+ uint16_t pktin_frame_offset;
+ /* Pool size for potential completion events */
+ uint32_t tx_compl_pool_size;
+ } config;
+
+ pktio_entry_t entries[CONFIG_PKTIO_ENTRIES];
+
+ lso_profile_t lso_profile[PKTIO_LSO_PROFILES];
+ int num_lso_profiles;
+
+} pktio_global_t;
typedef struct pktio_if_ops {
const char *name;
@@ -138,17 +188,36 @@ typedef struct pktio_if_ops {
int (*stop)(pktio_entry_t *pktio_entry);
int (*stats)(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats);
int (*stats_reset)(pktio_entry_t *pktio_entry);
- uint64_t (*pktin_ts_res)(pktio_entry_t *pktio_entry);
- odp_time_t (*pktin_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
+ int (*pktin_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats);
+ int (*pktout_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats);
+ int (*extra_stat_info)(pktio_entry_t *pktio_entry, odp_pktio_extra_stat_info_t info[],
+ int num);
+ int (*extra_stats)(pktio_entry_t *pktio_entry, uint64_t stats[], int num);
+ int (*extra_stat_counter)(pktio_entry_t *pktio_entry, uint32_t id, uint64_t *stat);
+ uint64_t (*pktio_ts_res)(pktio_entry_t *pktio_entry);
+ odp_time_t (*pktio_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
+ odp_time_t (*pktio_time)(pktio_entry_t *pktio_entry, odp_time_t *global_ts);
int (*recv)(pktio_entry_t *entry, int index, odp_packet_t packets[],
int num);
+ int (*recv_tmo)(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num, uint64_t wait_usecs);
+ int (*recv_mq_tmo)(pktio_entry_t *entry[], int index[], uint32_t num_q,
+ odp_packet_t packets[], int num, uint32_t *from,
+ uint64_t wait_usecs);
+ int (*fd_set)(pktio_entry_t *entry, int index, fd_set *readfds);
int (*send)(pktio_entry_t *entry, int index,
const odp_packet_t packets[], int num);
- uint32_t (*mtu_get)(pktio_entry_t *pktio_entry);
+ uint32_t (*maxlen_get)(pktio_entry_t *pktio_entry);
+ int (*maxlen_set)(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output);
int (*promisc_mode_set)(pktio_entry_t *pktio_entry, int enable);
int (*promisc_mode_get)(pktio_entry_t *pktio_entry);
int (*mac_get)(pktio_entry_t *pktio_entry, void *mac_addr);
+ int (*mac_set)(pktio_entry_t *pktio_entry, const void *mac_addr);
int (*link_status)(pktio_entry_t *pktio_entry);
+ int (*link_info)(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info);
int (*capability)(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa);
int (*config)(pktio_entry_t *pktio_entry,
@@ -159,53 +228,118 @@ typedef struct pktio_if_ops {
const odp_pktout_queue_param_t *p);
} pktio_if_ops_t;
-extern void *pktio_entry_ptr[];
+typedef struct {
+ const void *user_ptr;
+} _odp_pktio_tx_compl_t;
-static inline int pktio_to_id(odp_pktio_t pktio)
-{
- return _odp_typeval(pktio) - 1;
-}
+extern void *_odp_pktio_entry_ptr[];
static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
{
+ int idx;
+
if (odp_unlikely(pktio == ODP_PKTIO_INVALID))
return NULL;
- if (odp_unlikely(_odp_typeval(pktio) > ODP_CONFIG_PKTIO_ENTRIES)) {
- ODP_DBG("pktio limit %d/%d exceed\n",
- _odp_typeval(pktio), ODP_CONFIG_PKTIO_ENTRIES);
+ if (odp_unlikely(_odp_typeval(pktio) > CONFIG_PKTIO_ENTRIES)) {
+ _ODP_DBG("pktio limit %" PRIuPTR "/%d exceed\n",
+ _odp_typeval(pktio), CONFIG_PKTIO_ENTRIES);
return NULL;
}
- return pktio_entry_ptr[pktio_to_id(pktio)];
+ idx = odp_pktio_index(pktio);
+
+ return _odp_pktio_entry_ptr[idx];
}
static inline int pktio_cls_enabled(pktio_entry_t *entry)
{
- return entry->s.cls_enabled;
+ return entry->enabled.cls;
}
-static inline void pktio_cls_enabled_set(pktio_entry_t *entry, int ena)
+uint16_t _odp_dpdk_pktio_port_id(pktio_entry_t *entry);
+
+int _odp_input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], int num);
+
+static inline int _odp_pktio_tx_ts_enabled(pktio_entry_t *entry)
{
- entry->s.cls_enabled = ena;
+ return entry->enabled.tx_ts;
}
-/*
- * Dummy single queue implementations of multi-queue API
- */
-int single_capability(odp_pktio_capability_t *capa);
-int single_input_queues_config(pktio_entry_t *entry,
- const odp_pktin_queue_param_t *param);
-int single_output_queues_config(pktio_entry_t *entry,
- const odp_pktout_queue_param_t *param);
-int single_recv_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
- int num);
-int single_send_queue(pktio_entry_t *entry, int index,
- const odp_packet_t packets[], int num);
-
-extern const pktio_if_ops_t loopback_pktio_ops;
-extern const pktio_if_ops_t dpdk_pktio_ops;
-extern const pktio_if_ops_t * const pktio_if_ops[];
+static inline int _odp_pktio_tx_compl_enabled(const pktio_entry_t *entry)
+{
+ return entry->enabled.tx_compl;
+}
+
+static inline int _odp_pktio_tx_aging_enabled(pktio_entry_t *entry)
+{
+ return entry->enabled.tx_aging;
+}
+
+static inline void _odp_pktio_tx_ts_set(pktio_entry_t *entry)
+{
+ odp_time_t ts_val = odp_time_global();
+
+ odp_atomic_store_u64(&entry->tx_ts, ts_val.u64);
+}
+
+extern const pktio_if_ops_t _odp_loopback_pktio_ops;
+extern const pktio_if_ops_t _odp_null_pktio_ops;
+extern const pktio_if_ops_t _odp_dpdk_pktio_ops;
+extern const pktio_if_ops_t * const _odp_pktio_if_ops[];
+
+/* Dummy function required by odp_pktin_recv_mq_tmo() */
+static inline int
+_odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ uint32_t num_q ODP_UNUSED,
+ uint32_t *from ODP_UNUSED,
+ odp_packet_t packets[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ uint64_t usecs ODP_UNUSED,
+ int *trial_successful) {
+ (void)queues;
+
+ *trial_successful = 0;
+ return 0;
+}
+
+/* Setup PKTOUT with single queue for TM */
+int _odp_pktio_pktout_tm_config(odp_pktio_t pktio_hdl,
+ odp_pktout_queue_t *queue, bool reconf);
+
+/* LSO functions shared with TM */
+odp_lso_profile_t _odp_lso_prof_from_idx(uint8_t idx);
+
+int _odp_lso_num_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t *len_out, uint32_t *left_over_out);
+
+int _odp_lso_create_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t payload_len, uint32_t left_over_len,
+ odp_packet_t pkt_out[], int num_pkt);
+
+void _odp_pktio_process_tx_compl(const pktio_entry_t *entry, const odp_packet_t packets[],
+ int num);
+
+static inline int _odp_pktio_packet_to_pool(odp_packet_t *pkt,
+ odp_packet_hdr_t **pkt_hdr,
+ odp_pool_t new_pool)
+{
+ odp_packet_t new_pkt;
+
+ if (odp_likely(new_pool == odp_packet_pool(*pkt)))
+ return 0;
+
+ new_pkt = odp_packet_copy(*pkt, new_pool);
+
+ if (odp_unlikely(new_pkt == ODP_PACKET_INVALID))
+ return 1;
+
+ odp_packet_free(*pkt);
+ *pkt = new_pkt;
+ *pkt_hdr = packet_hdr(new_pkt);
+
+ return 0;
+}
#ifdef __cplusplus
}
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 0c157592c..b8fd17314 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -18,20 +18,36 @@
extern "C" {
#endif
-#include <odp/api/std_types.h>
-#include <odp/api/pool.h>
-#include <odp_buffer_internal.h>
-#include <odp/api/packet_io.h>
#include <odp/api/align.h>
+#include <odp/api/event.h>
#include <odp/api/hints.h>
+#include <odp/api/pool.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/std_types.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/strong_types.h>
+
#include <odp_config_internal.h>
-#include <odp/api/debug.h>
#include <odp_debug_internal.h>
-#include <odp/api/plat/strong_types.h>
+#include <odp_event_internal.h>
+
#include <string.h>
/* for DPDK */
+#include <rte_config.h>
+#include <rte_mbuf.h>
#include <rte_mempool.h>
+/* ppc64 rte_memcpy.h may overwrite bool with an incompatible type and define
+ * vector */
+#if defined(__PPC64__) && defined(bool)
+ #undef bool
+ #define bool _Bool
+#endif
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
/* Use ticketlock instead of spinlock */
#define POOL_USE_TICKETLOCK
@@ -39,55 +55,137 @@ extern "C" {
/* Extra error checks */
/* #define POOL_ERROR_CHECK */
-
#ifdef POOL_USE_TICKETLOCK
#include <odp/api/ticketlock.h>
#else
#include <odp/api/spinlock.h>
#endif
-
-struct pool_entry_s {
+typedef struct ODP_ALIGNED_CACHE {
#ifdef POOL_USE_TICKETLOCK
- odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
#else
- odp_spinlock_t lock ODP_ALIGNED_CACHE;
+ odp_spinlock_t lock ODP_ALIGNED_CACHE;
#endif
- char name[ODP_POOL_NAME_LEN];
- odp_pool_param_t params;
- odp_pool_t pool_hdl;
+ uint32_t pool_idx;
+
+ /* Everything under this mark is memset() to zero on pool create */
+ uint8_t memset_mark;
struct rte_mempool *rte_mempool;
-};
+ uint32_t seg_len; /* Initial packet segment length (excludes endmark) */
+ uint32_t ext_head_offset;
+ uint32_t num;
+ uint32_t num_populated;
+ odp_pool_type_t type_2;
+ uint8_t type;
+ uint8_t pool_ext;
+ odp_pool_param_t params;
+ odp_pool_ext_param_t ext_param;
+ odp_shm_t uarea_shm;
+ uint64_t uarea_shm_size;
+ uint32_t param_uarea_size;
+ uint32_t uarea_size;
+ uint32_t trailer_size; /* Endmark size */
+ uint8_t *uarea_base_addr;
+ char name[ODP_POOL_NAME_LEN];
-typedef union pool_entry_u {
- struct pool_entry_s s;
+} pool_t;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pool_entry_s))];
+typedef struct pool_global_t {
+ pool_t pool[CONFIG_POOLS];
+ odp_shm_t shm;
-} pool_entry_t;
+ struct {
+ uint32_t pkt_max_num;
+ } config;
-extern void *pool_entry_ptr[];
+} pool_global_t;
-static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl)
+extern pool_global_t *_odp_pool_glb;
+
+static inline pool_t *_odp_pool_entry_from_idx(uint32_t pool_idx)
+{
+ return &_odp_pool_glb->pool[pool_idx];
+}
+
+static inline pool_t *_odp_pool_entry(odp_pool_t pool_hdl)
+{
+ return (pool_t *)(uintptr_t)pool_hdl;
+}
+
+static inline odp_pool_t _odp_pool_handle(pool_t *pool)
+{
+ return (odp_pool_t)(uintptr_t)pool;
+}
+
+static inline int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int num)
{
- return _odp_typeval(pool_hdl);
+ int i;
+ struct rte_mempool *mp = pool->rte_mempool;
+
+ if (odp_likely(rte_mempool_get_bulk(mp, (void **)event_hdr, num) == 0))
+ return num;
+
+ for (i = 0; i < num; i++) {
+ struct rte_mbuf *mbuf;
+
+ if (odp_unlikely(rte_mempool_get(mp, (void **)&mbuf) != 0))
+ return i;
+
+ event_hdr[i] = _odp_event_hdr(_odp_event_from_mbuf(mbuf));
+ }
+
+ return i;
}
-static inline void *get_pool_entry(uint32_t pool_id)
+static inline odp_event_t _odp_event_alloc(pool_t *pool)
{
- return pool_entry_ptr[pool_id];
+ struct rte_mbuf *mbuf;
+ struct rte_mempool *mp = pool->rte_mempool;
+
+ if (odp_unlikely(rte_mempool_get(mp, (void **)&mbuf) != 0))
+ return ODP_EVENT_INVALID;
+
+ return _odp_event_from_mbuf(mbuf);
}
-static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool)
+static inline void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_free)
{
- return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool));
+ struct rte_mbuf *mbuf_tbl[num_free];
+ struct rte_mempool *mp_pending;
+ unsigned int num_pending;
+
+ mbuf_tbl[0] = &event_hdr[0]->mb;
+ mp_pending = mbuf_tbl[0]->pool;
+ num_pending = 1;
+
+ for (int i = 1; i < num_free; i++) {
+ struct rte_mbuf *mbuf = &event_hdr[i]->mb;
+
+ if (mbuf->pool != mp_pending) {
+ rte_mempool_put_bulk(mp_pending, (void **)mbuf_tbl, num_pending);
+ mbuf_tbl[0] = mbuf;
+ num_pending = 1;
+ mp_pending = mbuf->pool;
+ } else {
+ mbuf_tbl[num_pending++] = mbuf;
+ }
+ }
+ rte_mempool_put_bulk(mp_pending, (void **)mbuf_tbl, num_pending);
}
-static inline odp_pool_t pool_index_to_handle(uint32_t pool_id)
+static inline void _odp_event_free(odp_event_t event)
{
- return _odp_cast_scalar(odp_pool_t, pool_id);
+ struct rte_mbuf *mbuf = _odp_event_to_mbuf(event);
+
+ rte_mempool_put(mbuf->pool, mbuf);
}
+int _odp_event_is_valid(odp_event_t event);
+
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/include/odp_ptr_ring_mpmc_internal.h b/platform/linux-dpdk/include/odp_ptr_ring_mpmc_internal.h
new file mode 100644
index 000000000..3e56c7f0e
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_ptr_ring_mpmc_internal.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_INTERNAL_H_
+#define ODP_RING_MPMC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/queue.h>
+
+#include <odp_debug_internal.h>
+
+#include <rte_config.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+/* Lock-free ring for multi-producer / multi-consumer usage.
+ *
+ * Enqueue and dequeue operations can be done concurrently.
+ */
+typedef struct rte_ring *ring_mpmc_t;
+
+static void ring_mpmc_name_to_mz_name(const char *name, char *ring_name)
+{
+ int i = 0;
+ int max_len = ODP_QUEUE_NAME_LEN < RTE_RING_NAMESIZE ?
+ ODP_QUEUE_NAME_LEN : RTE_RING_NAMESIZE;
+
+ do {
+ snprintf(ring_name, max_len, "%d-mpmc-%s", i++, name);
+ ring_name[max_len - 1] = 0;
+ } while (rte_ring_lookup(ring_name) != NULL);
+}
+
+/* Initialize ring. Ring size must be a power of two. */
+static inline ring_mpmc_t ring_mpmc_create(const char *name, uint32_t size)
+{
+ struct rte_ring *rte_ring;
+ char ring_name[RTE_RING_NAMESIZE];
+
+ /* Ring name must be unique */
+ ring_mpmc_name_to_mz_name(name, ring_name);
+
+ rte_ring = rte_ring_create(ring_name, size, rte_socket_id(), 0);
+ if (rte_ring == NULL) {
+ _ODP_ERR("Creating DPDK ring failed: %s\n", rte_strerror(rte_errno));
+ return NULL;
+ }
+
+ return rte_ring;
+}
+
+/* Free all memory used by the ring. */
+static inline void ring_mpmc_free(ring_mpmc_t ring)
+{
+ rte_ring_free(ring);
+}
+
+/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
+static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t ring, void **data,
+ uint32_t max_num)
+{
+ return rte_ring_mc_dequeue_burst(ring, data, max_num, NULL);
+}
+
+/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
+static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t ring, void **data,
+ uint32_t num_data)
+{
+ return rte_ring_mp_enqueue_burst(ring, data, num_data, NULL);
+}
+
+/* Check if ring is empty */
+static inline int ring_mpmc_is_empty(ring_mpmc_t ring)
+{
+ return rte_ring_empty(ring);
+}
+
+/* Return current ring length */
+static inline int ring_mpmc_length(ring_mpmc_t ring)
+{
+ return rte_ring_count(ring);
+}
+
+/* Return maximum ring length */
+static inline int ring_mpmc_max_length(ring_mpmc_t ring)
+{
+ return rte_ring_get_capacity(ring);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_ptr_ring_spsc_internal.h b/platform/linux-dpdk/include/odp_ptr_ring_spsc_internal.h
new file mode 100644
index 000000000..3680f087a
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_ptr_ring_spsc_internal.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_SPSC_INTERNAL_H_
+#define ODP_RING_SPSC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/queue.h>
+
+#include <odp_debug_internal.h>
+
+#include <rte_config.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+/* Lock-free ring for single-producer / single-consumer usage.
+ *
+ * Thread doing an operation may be different each time, but the same operation
+ * (enq- or dequeue) must not be called concurrently. The next thread may call
+ * the same operation only when it's sure that the previous thread have returned
+ * from the call, or will never return back to finish the call when interrupted
+ * during the call.
+ *
+ * Enqueue and dequeue operations can be done concurrently.
+ */
+typedef struct rte_ring *ring_spsc_t;
+
+static void ring_spsc_name_to_mz_name(const char *name, char *ring_name)
+{
+ int i = 0;
+ int max_len = ODP_QUEUE_NAME_LEN < RTE_RING_NAMESIZE ?
+ ODP_QUEUE_NAME_LEN : RTE_RING_NAMESIZE;
+
+ do {
+ snprintf(ring_name, max_len, "%d-%s", i++, name);
+ ring_name[max_len - 1] = 0;
+ } while (rte_ring_lookup(ring_name) != NULL);
+}
+
+/* Initialize ring. Ring size must be a power of two. */
+static inline ring_spsc_t ring_spsc_create(const char *name, uint32_t size)
+{
+ struct rte_ring *rte_ring;
+ char ring_name[RTE_RING_NAMESIZE];
+
+ /* Ring name must be unique */
+ ring_spsc_name_to_mz_name(name, ring_name);
+
+ rte_ring = rte_ring_create(ring_name, size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (rte_ring == NULL) {
+ _ODP_ERR("Creating DPDK ring failed: %s\n", rte_strerror(rte_errno));
+ return NULL;
+ }
+
+ return rte_ring;
+}
+
+/* Free all memory used by the ring. */
+static inline void ring_spsc_free(ring_spsc_t ring)
+{
+ rte_ring_free(ring);
+}
+
+/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
+static inline uint32_t ring_spsc_deq_multi(ring_spsc_t ring, void **data,
+ uint32_t max_num)
+{
+ return rte_ring_sc_dequeue_burst(ring, data, max_num, NULL);
+}
+
+/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
+static inline uint32_t ring_spsc_enq_multi(ring_spsc_t ring, void **data,
+ uint32_t num_data)
+{
+ return rte_ring_sp_enqueue_burst(ring, data, num_data, NULL);
+}
+
+/* Check if ring is empty */
+static inline int ring_spsc_is_empty(ring_spsc_t ring)
+{
+ return rte_ring_empty(ring);
+}
+
+/* Return current ring length */
+static inline int ring_spsc_length(ring_spsc_t ring)
+{
+ return rte_ring_count(ring);
+}
+
+/* Return maximum ring length */
+static inline int ring_spsc_max_length(ring_spsc_t ring)
+{
+ return rte_ring_get_capacity(ring);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_ptr_ring_st_internal.h b/platform/linux-dpdk/include/odp_ptr_ring_st_internal.h
new file mode 100644
index 000000000..cc258aeb4
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_ptr_ring_st_internal.h
@@ -0,0 +1,99 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_ST_INTERNAL_H_
+#define ODP_RING_ST_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/queue.h>
+
+#include <odp_debug_internal.h>
+
+#include <rte_config.h>
+#include <rte_ring.h>
+#include <rte_errno.h>
+
+typedef struct rte_ring *ring_st_t;
+
+/* Basic ring for single thread usage. Operations must be synchronized by using
+ * locks (or other means), when multiple threads use the same ring. */
+
+static void name_to_mz_name(const char *name, char *ring_name)
+{
+ int i = 0;
+ int max_len = ODP_QUEUE_NAME_LEN < RTE_RING_NAMESIZE ?
+ ODP_QUEUE_NAME_LEN : RTE_RING_NAMESIZE;
+
+ do {
+ snprintf(ring_name, max_len, "%d-%s", i++, name);
+ ring_name[max_len - 1] = 0;
+ } while (rte_ring_lookup(ring_name) != NULL);
+}
+
+/* Initialize ring. Ring size must be a power of two. */
+static inline ring_st_t ring_st_create(const char *name, uint32_t size)
+{
+ struct rte_ring *rte_ring;
+ char ring_name[RTE_RING_NAMESIZE];
+
+ /* Ring name must be unique */
+ name_to_mz_name(name, ring_name);
+
+ rte_ring = rte_ring_create(ring_name, size, rte_socket_id(),
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ if (rte_ring == NULL) {
+ _ODP_ERR("Creating DPDK ring failed: %s\n", rte_strerror(rte_errno));
+ return NULL;
+ }
+
+ return rte_ring;
+}
+
+static inline void ring_st_free(ring_st_t ring)
+{
+ rte_ring_free(ring);
+}
+
+/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
+static inline uint32_t ring_st_deq_multi(ring_st_t ring, void **data,
+ uint32_t max_num)
+{
+ return rte_ring_dequeue_burst(ring, data, max_num, NULL);
+}
+
+/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
+static inline uint32_t ring_st_enq_multi(ring_st_t ring, void **data,
+ uint32_t num_data)
+{
+ return rte_ring_enqueue_burst(ring, data, num_data, NULL);
+}
+
+/* Check if ring is empty */
+static inline int ring_st_is_empty(ring_st_t ring)
+{
+ return rte_ring_empty(ring);
+}
+
+/* Return current ring length */
+static inline int ring_st_length(ring_st_t ring)
+{
+ return rte_ring_count(ring);
+}
+
+/* Return maximum ring length */
+static inline int ring_st_max_length(ring_st_t ring)
+{
+ return rte_ring_get_capacity(ring);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_queue_basic_internal.h b/platform/linux-dpdk/include/odp_queue_basic_internal.h
new file mode 100644
index 000000000..4e693d9b0
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_queue_basic_internal.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_QUEUE_BASIC_INTERNAL_H_
+#define ODP_QUEUE_BASIC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/atomic.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_buffer_internal.h>
+#include <odp_config_internal.h>
+#include <odp_forward_typedefs_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ptr_ring_mpmc_internal.h>
+#include <odp_ptr_ring_st_internal.h>
+#include <odp_ptr_ring_spsc_internal.h>
+#include <odp_queue_if.h>
+#include <odp_queue_lf.h>
+
+#include <stdint.h>
+
+#define QUEUE_STATUS_FREE 0
+#define QUEUE_STATUS_DESTROYED 1
+#define QUEUE_STATUS_READY 2
+#define QUEUE_STATUS_NOTSCHED 3
+#define QUEUE_STATUS_SCHED 4
+
+typedef struct ODP_ALIGNED_CACHE queue_entry_s {
+ /* The first cache line is read only */
+ queue_enq_fn_t enqueue ODP_ALIGNED_CACHE;
+ queue_deq_fn_t dequeue;
+ queue_enq_multi_fn_t enqueue_multi;
+ queue_deq_multi_fn_t dequeue_multi;
+ uint32_t index;
+ odp_queue_t handle;
+ odp_queue_type_t type;
+
+ ring_mpmc_t ring_mpmc;
+
+ odp_ticketlock_t lock;
+ union {
+ ring_st_t ring_st;
+ ring_spsc_t ring_spsc;
+ };
+
+ odp_atomic_u64_t num_timers;
+ int status;
+
+ queue_deq_multi_fn_t orig_dequeue_multi;
+ odp_queue_param_t param;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ void *queue_lf;
+ int spsc;
+ char name[ODP_QUEUE_NAME_LEN];
+} queue_entry_t;
+
+typedef struct queue_global_t {
+ queue_entry_t queue[CONFIG_MAX_QUEUES];
+ uint32_t *ring_data;
+ uint32_t queue_lf_num;
+ uint32_t queue_lf_size;
+ queue_lf_func_t queue_lf_func;
+ odp_shm_t queue_gbl_shm;
+ odp_shm_t queue_ring_shm;
+
+ struct {
+ uint32_t max_queue_size;
+ uint32_t default_queue_size;
+ } config;
+
+} queue_global_t;
+
+extern queue_global_t *_odp_queue_glb;
+
+static inline uint32_t queue_to_index(odp_queue_t handle)
+{
+ queue_entry_t *qentry = (queue_entry_t *)(uintptr_t)handle;
+
+ return qentry->index;
+}
+
+static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
+{
+ return &_odp_queue_glb->queue[queue_id];
+}
+
+static inline odp_queue_t queue_from_index(uint32_t queue_id)
+{
+ return (odp_queue_t)qentry_from_index(queue_id);
+}
+
+static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
+
+/* Functions for schedulers */
+void _odp_sched_queue_set_status(uint32_t queue_index, int status);
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
+ int update_status);
+int _odp_sched_queue_empty(uint32_t queue_index);
+
+/* Functions by schedulers */
+int _odp_sched_basic_get_spread(uint32_t queue_index);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_shm_internal.h b/platform/linux-dpdk/include/odp_shm_internal.h
new file mode 100644
index 000000000..b83b63a91
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_shm_internal.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP shared memory - implementation internal
+ */
+
+#ifndef ODP_SHM_INTERNAL_H_
+#define ODP_SHM_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/init.h>
+
+int _odp_shm_init_global(const odp_init_t *init);
+
+int _odp_shm_init_local(void);
+
+int _odp_shm_term_global(void);
+
+int _odp_shm_term_local(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-dpdk/include/odp_thread_internal.h b/platform/linux-dpdk/include/odp_thread_internal.h
new file mode 100644
index 000000000..6342cc420
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_thread_internal.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_THREAD_INTERNAL_H_
+#define ODP_THREAD_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/**
+ * Read CPU IDs of active ODP threads
+ *
+ * @param[out] cpu_ids CPU ID array
+ * @param max_num Maximum number of CPU IDs to write
+ *
+ * @return Number of CPU IDs written to the output array
+ */
+int _odp_thread_cpu_ids(unsigned int cpu_ids[], int max_num);
+
+/**
+ * Read current epoch value of thread mask all
+ *
+ * @return Thread mask all epoch value
+ */
+uint64_t _odp_thread_thrmask_epoch(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/platform/linux-dpdk/include/odp_time_internal.h b/platform/linux-dpdk/include/odp_time_internal.h
deleted file mode 100644
index 8cbf81419..000000000
--- a/platform/linux-dpdk/include/odp_time_internal.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP time service
- */
-
-#ifndef ODP_TIME_INTERNAL_H_
-#define ODP_TIME_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef uint64_t (*time_to_ns_fn) (odp_time_t time);
-typedef odp_time_t (*time_diff_fn) (odp_time_t t2, odp_time_t t1);
-typedef odp_time_t (*time_curr_fn)(void);
-typedef int (*time_cmp_fn) (odp_time_t t2, odp_time_t t1);
-typedef odp_time_t (*time_sum_fn) (odp_time_t t1, odp_time_t t2);
-typedef odp_time_t (*time_local_from_ns_fn) (uint64_t ns);
-typedef uint64_t (*time_local_res_fn)(void);
-typedef uint64_t (*time_to_u64_fn) (odp_time_t time);
-
-typedef struct time_handler_ {
- time_to_ns_fn time_to_ns;
- time_diff_fn time_diff;
- time_curr_fn time_curr;
- time_cmp_fn time_cmp;
- time_sum_fn time_sum;
- time_local_from_ns_fn time_local_from_ns;
- time_local_res_fn time_local_res;
- time_to_u64_fn time_to_u64;
-} time_handler_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-dpdk/include/odp_timer_internal.h b/platform/linux-dpdk/include/odp_timer_internal.h
new file mode 100644
index 000000000..35a4911af
--- /dev/null
+++ b/platform/linux-dpdk/include/odp_timer_internal.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP timeout descriptor - implementation internal
+ */
+
+#ifndef ODP_TIMER_INTERNAL_H_
+#define ODP_TIMER_INTERNAL_H_
+
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/api/timer.h>
+
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_pool_internal.h>
+
+#include <rte_config.h>
+
+#include <stdint.h>
+
+/*
+ * Use as the argument to timer_run() to force a scan and to ignore rate
+ * limit.
+ */
+#define TIMER_SCAN_FORCE INT32_MAX
+
+/**
+ * Internal Timeout header
+ */
+typedef struct ODP_ALIGNED_CACHE odp_timeout_hdr_t {
+ /* Underlying DPDK rte_mbuf */
+ struct rte_mbuf mb;
+
+ /* Common internal header */
+ _odp_event_hdr_int_t event_hdr;
+
+ /* Requested expiration time */
+ uint64_t expiration;
+
+ /* User ptr inherited from parent timer */
+ const void *user_ptr;
+
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* Parent timer */
+ odp_timer_t timer;
+
+} odp_timeout_hdr_t;
+
+ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) <= 3 * RTE_CACHE_LINE_SIZE,
+ "Additional cache line required for odp_timeout_hdr_t");
+
+/* A larger decrement value should be used after receiving events compared to
+ * an 'empty' call. */
+void _odp_timer_run_inline(int dec);
+
+/* Static inline wrapper to minimize modification of schedulers. */
+static inline uint64_t timer_run(int dec)
+{
+ if (odp_global_rw->inline_timers)
+ _odp_timer_run_inline(dec);
+
+ /* Time to the next timeout not available with DPDK timers */
+ return UINT64_MAX;
+}
+
+#endif
diff --git a/platform/linux-dpdk/libodp-dpdk.pc.in b/platform/linux-dpdk/libodp-dpdk.pc.in
new file mode 100644
index 000000000..8fcc4ac0a
--- /dev/null
+++ b/platform/linux-dpdk/libodp-dpdk.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: lib@ODP_LIB_NAME@
+Description: The ODP packet processing engine
+Version: @PKGCONFIG_VERSION@
+Requires.private: libconfig
+Libs: -L${libdir} -l@ODP_LIB_NAME@ @DPDK_LIBS_NON_ABI_COMPAT@ @ATOMIC_LIBS_NON_ABI_COMPAT@
+Libs.private: @DPDK_LIBS_ABI_COMPAT@ @OPENSSL_STATIC_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@ @ORT_LIBS@
+Cflags: -I${includedir} @DPDK_CFLAGS@
diff --git a/platform/linux-dpdk/m4/configure.m4 b/platform/linux-dpdk/m4/configure.m4
index fb9a91399..535cfc5fa 100644
--- a/platform/linux-dpdk/m4/configure.m4
+++ b/platform/linux-dpdk/m4/configure.m4
@@ -1,107 +1,117 @@
-# Enable -fvisibility=hidden if using a gcc that supports it
-OLD_CFLAGS="$CFLAGS"
-AC_MSG_CHECKING([whether $CC supports -fvisibility=hidden])
-VISIBILITY_CFLAGS="-fvisibility=hidden"
-CFLAGS="$CFLAGS $VISIBILITY_CFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM()], AC_MSG_RESULT([yes]),
- [VISIBILITY_CFLAGS=""; AC_MSG_RESULT([no])]);
+ODP_IMPLEMENTATION_NAME="odp-dpdk"
+ODP_LIB_NAME="odp-dpdk"
-AC_SUBST(VISIBILITY_CFLAGS)
-# Restore CFLAGS; VISIBILITY_CFLAGS are added to it where needed.
-CFLAGS=$OLD_CFLAGS
+ODP_VISIBILITY
+ODP_ATOMIC
-AC_MSG_CHECKING(for GCC atomic builtins)
-AC_LINK_IFELSE(
- [AC_LANG_SOURCE(
- [[int main() {
- int v = 1;
- __atomic_fetch_add(&v, 1, __ATOMIC_RELAXED);
- __atomic_fetch_sub(&v, 1, __ATOMIC_RELAXED);
- __atomic_store_n(&v, 1, __ATOMIC_RELAXED);
- __atomic_load_n(&v, __ATOMIC_RELAXED);
- return 0;
- }
- ]])],
- AC_MSG_RESULT(yes),
- AC_MSG_RESULT(no)
- echo "GCC-style __atomic builtins not supported by the compiler."
- echo "Use newer version. For gcc > 4.7.0"
- exit -1)
-
-# linux-generic PCAP support is not relevant as the code doesn't use
-# linux-generic pktio at all. And DPDK has its own PCAP support anyway
-AM_CONDITIONAL([HAVE_PCAP], [false])
-m4_include([platform/linux-dpdk/m4/odp_pthread.m4])
+m4_include([platform/linux-dpdk/m4/odp_cpu.m4])
+m4_include([platform/linux-dpdk/m4/odp_event_validation.m4])
+m4_include([platform/linux-dpdk/m4/odp_libconfig.m4])
m4_include([platform/linux-dpdk/m4/odp_openssl.m4])
+m4_include([platform/linux-dpdk/m4/odp_pcapng.m4])
+m4_include([platform/linux-dpdk/m4/odp_scheduler.m4])
+m4_include([platform/linux-dpdk/m4/odp_wfe.m4])
+m4_include([platform/linux-dpdk/m4/odp_ml.m4])
+
+ODP_EVENT_VALIDATION
+ODP_PTHREAD
+ODP_SCHEDULER
+ODP_TIMER
##########################################################################
-# DPDK build variables
+# Set DPDK install path
##########################################################################
-DPDK_DRIVER_DIR=/usr/lib/$(uname -m)-linux-gnu
-AS_CASE($host_cpu, [x86_64], [AM_CPPFLAGS="$AM_CPPFLAGS -msse4.2"])
-if test ${DPDK_DEFAULT_DIR} = 1; then
- AM_CPPFLAGS="$AM_CPPFLAGS -I/usr/include/dpdk"
-else
- DPDK_DRIVER_DIR=$SDK_INSTALL_PATH/lib
- AM_CPPFLAGS="$AM_CPPFLAGS -I$SDK_INSTALL_PATH/include"
- AM_LDFLAGS="$AM_LDFLAGS -L$SDK_INSTALL_PATH/lib"
-fi
-
-# Check if we should link against the static or dynamic DPDK library
-AC_ARG_ENABLE([shared-dpdk],
- [ --enable-shared-dpdk link against the shared DPDK library],
- [if test "x$enableval" = "xyes"; then
- shared_dpdk=true
- fi])
-AM_CONDITIONAL([SHARED_DPDK], [test x$shared_dpdk = xtrue])
+AC_ARG_WITH([dpdk-path],
+[AS_HELP_STRING([--with-dpdk-path=DIR],
+ [path to DPDK build directory [default=system] (linux-dpdk)])],
+ [DPDK_PATH="$withval"],[DPDK_PATH=system])
##########################################################################
-# Save and set temporary compilation flags
+# Use shared DPDK library
##########################################################################
-OLD_LDFLAGS=$LDFLAGS
-OLD_CPPFLAGS=$CPPFLAGS
-LDFLAGS="$AM_LDFLAGS $LDFLAGS"
-CPPFLAGS="$AM_CPPFLAGS $CPPFLAGS"
+dpdk_shared=no
+AC_ARG_ENABLE([dpdk-shared],
+ [AS_HELP_STRING([--enable-dpdk-shared],
+ [use shared DPDK library [default=disabled] (linux-dpdk)])],
+ [if test x$enableval = xyes; then
+ dpdk_shared=yes
+ fi])
##########################################################################
# Check for DPDK availability
+#
+# DPDK pmd drivers are not linked unless the --whole-archive option is
+# used. No spaces are allowed between the --whole-archive flags.
##########################################################################
-AC_CHECK_HEADERS([rte_config.h], [],
- [AC_MSG_FAILURE(["can't find DPDK headers"])])
-
-AC_SEARCH_LIBS([rte_eal_init], [dpdk], [],
- [AC_MSG_ERROR([DPDK libraries required])], [-ldl])
+ODP_DPDK([$DPDK_PATH], [$dpdk_shared], [],
+ [AC_MSG_FAILURE([can't find DPDK])])
+AM_CONDITIONAL([ODP_PKTIO_PCAP], [test x$have_pmd_pcap = xyes])
-##########################################################################
-# In case of static linking DPDK pmd drivers are not linked unless the
-# --whole-archive option is used. No spaces are allowed between the
-# --whole-arhive flags.
-##########################################################################
-if test "x$shared_dpdk" = "xtrue"; then
- LIBS="$LIBS -Wl,--no-as-needed,-ldpdk,-as-needed -ldl -lm -lpcap"
+# In non-abi-compat mode DPDK is exposed to the application
+if test $ODP_ABI_COMPAT -eq 1; then
+ DPDK_LIBS_ABI_COMPAT=$DPDK_LIBS
+ AC_SUBST([DPDK_LIBS_ABI_COMPAT])
else
- DPDK_PMD=--whole-archive,
- for filename in $DPDK_DRIVER_DIR/*.a; do
- cur_driver=`echo $(basename "$filename" .a) | \
- sed -n 's/^\(librte_pmd_\)/-lrte_pmd_/p' | sed -n 's/$/,/p'`
- # rte_pmd_nfp has external dependencies which break linking
- if test "$cur_driver" = "-lrte_pmd_nfp,"; then
- echo "skip linking rte_pmd_nfp"
- else
- DPDK_PMD+=$cur_driver
- fi
- done
- DPDK_PMD+=--no-whole-archive
-
- LIBS="$LIBS -ldpdk -ldl -lm -lpcap"
- AM_LDFLAGS="$AM_LDFLAGS -Wl,$DPDK_PMD"
+ DPDK_LIBS_NON_ABI_COMPAT=$DPDK_LIBS
+ AC_SUBST([DPDK_LIBS_NON_ABI_COMPAT])
+ # DPDK uses strnlen() internally
+ DPDK_CFLAGS="${DPDK_CFLAGS} -D_GNU_SOURCE"
fi
-##########################################################################
-# Restore old saved variables
-##########################################################################
-LDFLAGS=$OLD_LDFLAGS
-CPPFLAGS=$OLD_CPPFLAGS
+case "${host}" in
+ i?86* | x86*)
+ DPDK_CFLAGS="${DPDK_CFLAGS} -msse4.2"
+ ;;
+esac
+
+# Required for experimental rte_event_port_unlinks_in_progress() API
+DPDK_CFLAGS="${DPDK_CFLAGS} -DALLOW_EXPERIMENTAL_API"
+
+AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS} ${ORT_LIBS}"])
+
+# Add text to the end of configure with platform specific settings.
+# Make sure it's aligned same as other lines in configure.ac.
+AS_VAR_APPEND([PLAT_CFG_TEXT], ["
+ event_validation: ${enable_event_validation}
+ openssl: ${with_openssl}
+ openssl_rand: ${openssl_rand}
+ pcap: ${have_pmd_pcap}
+ pcapng: ${have_pcapng}
+ wfe_locks: ${use_wfe_locks}
+ ml_support: ${ml_support}
+ default_config_path: ${default_config_path}"])
+
+ODP_CHECK_CFLAG([-Wno-error=cast-align])
+
+# Ignore Clang specific errors about fields with variable sized type not at the
+# end of a struct or usage of these structs in arrays. This style is used by
+# e.g. timer_pool_t.
+ODP_CHECK_CFLAG([-Wno-error=gnu-variable-sized-type-not-at-end])
+ODP_CHECK_CFLAG([-Wno-error=flexible-array-extensions])
+AC_DEFINE([_ODP_PKTIO_DPDK], [1])
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([PLATFORM_IS_LINUX_DPDK],
+ [test "${with_platform}" = "linux-dpdk"])
AC_CONFIG_FILES([platform/linux-dpdk/Makefile
- platform/linux-dpdk/include/odp/api/plat/static_inline.h])
+ platform/linux-dpdk/libodp-dpdk.pc
+ platform/linux-dpdk/dumpconfig/Makefile
+ platform/linux-dpdk/example/Makefile
+ platform/linux-dpdk/example/ml/Makefile
+ platform/linux-dpdk/test/Makefile
+ platform/linux-dpdk/test/example/Makefile
+ platform/linux-dpdk/test/example/classifier/Makefile
+ platform/linux-dpdk/test/example/generator/Makefile
+ platform/linux-dpdk/test/example/ipsec_api/Makefile
+ platform/linux-dpdk/test/example/ipsec_crypto/Makefile
+ platform/linux-dpdk/test/example/l2fwd_simple/Makefile
+ platform/linux-dpdk/test/example/l3fwd/Makefile
+ platform/linux-dpdk/test/example/packet/Makefile
+ platform/linux-dpdk/test/example/ping/Makefile
+ platform/linux-dpdk/test/example/simple_pipeline/Makefile
+ platform/linux-dpdk/test/example/switch/Makefile
+ platform/linux-dpdk/test/performance/Makefile
+ platform/linux-dpdk/test/performance/dmafwd/Makefile
+ platform/linux-dpdk/test/validation/api/ml/Makefile
+ platform/linux-dpdk/test/validation/api/pktio/Makefile])
+])
diff --git a/platform/linux-dpdk/m4/odp_cpu.m4 b/platform/linux-dpdk/m4/odp_cpu.m4
new file mode 120000
index 000000000..effd70d56
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_cpu.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_cpu.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_event_validation.m4 b/platform/linux-dpdk/m4/odp_event_validation.m4
new file mode 120000
index 000000000..0d457c6ff
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_event_validation.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_event_validation.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_libconfig.m4 b/platform/linux-dpdk/m4/odp_libconfig.m4
new file mode 100644
index 000000000..2bf89ac2e
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_libconfig.m4
@@ -0,0 +1,36 @@
+##########################################################################
+# Configuration file version
+##########################################################################
+m4_define([_odp_config_version_generation], [0])
+m4_define([_odp_config_version_major], [1])
+m4_define([_odp_config_version_minor], [26])
+
+m4_define([_odp_config_version],
+ [_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
+
+_ODP_CONFIG_VERSION_GENERATION=_odp_config_version_generation
+AC_SUBST(_ODP_CONFIG_VERSION_GENERATION)
+_ODP_CONFIG_VERSION_MAJOR=_odp_config_version_major
+AC_SUBST(_ODP_CONFIG_VERSION_MAJOR)
+_ODP_CONFIG_VERSION_MINOR=_odp_config_version_minor
+AC_SUBST(_ODP_CONFIG_VERSION_MINOR)
+
+##########################################################################
+# Set optional path for the default configuration file
+##########################################################################
+default_config_path="${srcdir}/config/odp-linux-dpdk.conf"
+
+AC_CHECK_PROGS([REALPATH], [realpath])
+AS_IF([test -z "$REALPATH"], [AC_MSG_ERROR([Could not find 'realpath'])])
+
+AC_ARG_WITH([config-file],
+AS_HELP_STRING([--with-config-file=FILE], [path to the default configuration file]
+ [(this file must include all configuration options)]
+ [[default=SRCDIR/config/odp-<platform>.conf]]),
+ [default_config_path=$withval], [])
+
+rel_default_config_path=`realpath --relative-to=$(pwd) ${default_config_path}`
+AC_SUBST(default_config_path)
+AC_SUBST(rel_default_config_path)
+
+ODP_LIBCONFIG([linux-dpdk], [$rel_default_config_path])
diff --git a/platform/linux-dpdk/m4/odp_ml.m4 b/platform/linux-dpdk/m4/odp_ml.m4
new file mode 120000
index 000000000..6e76047e5
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_ml.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_ml.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_pcapng.m4 b/platform/linux-dpdk/m4/odp_pcapng.m4
new file mode 120000
index 000000000..fbdc23722
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_pcapng.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_pcapng.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_pthread.m4 b/platform/linux-dpdk/m4/odp_pthread.m4
deleted file mode 120000
index e24304ae3..000000000
--- a/platform/linux-dpdk/m4/odp_pthread.m4
+++ /dev/null
@@ -1 +0,0 @@
-../../linux-generic/m4/odp_pthread.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_scheduler.m4 b/platform/linux-dpdk/m4/odp_scheduler.m4
new file mode 120000
index 000000000..984404ed5
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_scheduler.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_scheduler.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/m4/odp_wfe.m4 b/platform/linux-dpdk/m4/odp_wfe.m4
new file mode 120000
index 000000000..2526a9c83
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_wfe.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_wfe.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/odp_buffer.c b/platform/linux-dpdk/odp_buffer.c
index 5207f2530..6cabe41e2 100644
--- a/platform/linux-dpdk/odp_buffer.c
+++ b/platform/linux-dpdk/odp_buffer.c
@@ -1,105 +1,61 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
#include <odp/api/buffer.h>
+
+#include <odp/api/plat/buffer_inline_types.h>
+
#include <odp_buffer_internal.h>
-#include <odp_buffer_inlines.h>
#include <odp_debug_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
-odp_buffer_t odp_buffer_from_event(odp_event_t ev)
-{
- return (odp_buffer_t)ev;
-}
-
-odp_event_t odp_buffer_to_event(odp_buffer_t buf)
-{
- return (odp_event_t)buf;
-}
-
-void *odp_buffer_addr(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
-
- return hdr->mb.buf_addr;
-}
-
-uint32_t odp_buffer_size(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
- struct rte_mbuf *mbuf = (struct rte_mbuf *)hdr;
-
- return mbuf->buf_len;
-}
-
-int _odp_buffer_type(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
-
- return hdr->type;
-}
+#include <odp/visibility_begin.h>
-void _odp_buffer_type_set(odp_buffer_t buf, int type)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
+/* Buffer header field offsets for inline functions */
+const _odp_buffer_inline_offset_t _odp_buffer_inline_offset ODP_ALIGNED_CACHE = {
+ .uarea_addr = offsetof(odp_buffer_hdr_t, uarea_addr)
+};
- hdr->type = type;
-}
+#include <odp/visibility_end.h>
-int odp_buffer_is_valid(odp_buffer_t buf)
-{
- /* We could call rte_mbuf_sanity_check, but that panics
- * and aborts the program */
- return buf != ODP_BUFFER_INVALID;
-}
-
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
+void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
+ pool_t *pool;
int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
if (!odp_buffer_is_valid(buf)) {
- ODP_PRINT("Buffer is not valid.\n");
- return len;
+ _ODP_ERR("Buffer is not valid.\n");
+ return;
}
- hdr = buf_hdl_to_hdr(buf);
-
- len += snprintf(&str[len], n-len,
- "Buffer\n");
- len += snprintf(&str[len], n-len,
- " pool %p\n", hdr->mb.pool);
- len += snprintf(&str[len], n-len,
- " phy_addr %"PRIu64"\n", hdr->mb.buf_physaddr);
- len += snprintf(&str[len], n-len,
- " addr %p\n", hdr->mb.buf_addr);
- len += snprintf(&str[len], n-len,
- " size %u\n", hdr->mb.buf_len);
- len += snprintf(&str[len], n-len,
- " ref_count %i\n",
- rte_mbuf_refcnt_read(&hdr->mb));
- len += snprintf(&str[len], n-len,
- " odp type %i\n", hdr->type);
-
- return len;
-}
-
-void odp_buffer_print(odp_buffer_t buf)
-{
- int max_len = 512;
- char str[max_len];
- int len;
-
- len = odp_buffer_snprint(str, max_len-1, buf);
+ hdr = _odp_buf_hdr(buf);
+ pool = _odp_pool_entry(hdr->event_hdr.pool);
+
+ len += _odp_snprint(&str[len], n - len, "Buffer\n------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_buffer_to_u64(buf));
+ len += _odp_snprint(&str[len], n - len, " pool index %u\n", pool->pool_idx);
+ len += _odp_snprint(&str[len], n - len, " buffer index %u\n", hdr->event_hdr.index);
+ len += _odp_snprint(&str[len], n - len, " addr %p\n", odp_buffer_addr(buf));
+ len += _odp_snprint(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
+ len += _odp_snprint(&str[len], n - len, " user area %p\n", hdr->uarea_addr);
str[len] = 0;
- ODP_PRINT("\n%s\n", str);
+ _ODP_PRINT("%s\n", str);
}
uint64_t odp_buffer_to_u64(odp_buffer_t hdl)
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
new file mode 100644
index 000000000..6170dd8df
--- /dev/null
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -0,0 +1,2215 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2018-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/crypto.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/packet.h>
+#include <odp/api/random.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_macros_internal.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+
+#include <rte_config.h>
+#include <rte_crypto.h>
+#include <rte_cryptodev.h>
+#include <rte_malloc.h>
+#include <rte_version.h>
+
+#include <string.h>
+#include <math.h>
+
+#define MAX_BURST 32
+#define MAX_SESSIONS 4000
+/*
+ * Max size of per-thread session object cache. May be useful if sessions
+ * are created and destroyed very frequently.
+ */
+#define SESSION_CACHE_SIZE 16
+/*
+ * Max size of per-thread crypto operation cache. We can have at most
+ * MAX_BURST operations per thread in flight at a time. Make op cache
+ * larger than MAX_BURST to avoid frequent access of the shared pool.
+ */
+#define OP_CACHE_SIZE (2 * MAX_BURST)
+/*
+ * Have enough descriptors for a full burst plus some extra as required
+ * by the crypto drivers.
+ */
+#define NB_DESC_PER_QUEUE_PAIR (2 * MAX_BURST)
+/* Required by crypto_aesni_mb driver */
+ODP_STATIC_ASSERT(NB_DESC_PER_QUEUE_PAIR > MAX_BURST,
+ "NB_DESC_PER_QUEUE_PAIR must be greater than MAX_BURST");
+/* Required by crypto_aesni_mb driver */
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(NB_DESC_PER_QUEUE_PAIR),
+ "NB_DESC_PER_QUEUE_PAIR must be a power of 2");
+
+#define MAX_IV_LENGTH 16
+#define AES_CCM_AAD_OFFSET 18
+
+/* Max number of rte_cryptodev_dequeue_burst() retries before error printout */
+#define MAX_DEQ_RETRIES (10 * 1000 * 1000)
+/* Min delay between rte_cryptodev_dequeue_burst() retries in nanoseconds */
+#define DEQ_RETRY_DELAY_NS 10
+
+typedef struct crypto_session_entry_s {
+ struct crypto_session_entry_s *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+ struct rte_cryptodev_sym_session *rte_session;
+ struct {
+ unsigned int cdev_qpairs_shared:1;
+ unsigned int chained_bufs_ok:1;
+ unsigned int aead:1;
+ } flags;
+ uint8_t cdev_id;
+
+} crypto_session_entry_t;
+
+typedef struct crypto_global_s {
+ odp_spinlock_t lock;
+ uint8_t enabled_crypto_devs;
+ uint8_t enabled_crypto_dev_ids[RTE_CRYPTO_MAX_DEVS];
+ uint16_t enabled_crypto_dev_qpairs[RTE_CRYPTO_MAX_DEVS];
+ odp_bool_t enabled_crypto_dev_qpairs_shared[RTE_CRYPTO_MAX_DEVS];
+ int is_crypto_dev_initialized;
+ struct rte_mempool *crypto_op_pool;
+ struct rte_mempool *session_mempool[RTE_MAX_NUMA_NODES];
+ odp_shm_t shm;
+ crypto_session_entry_t *free;
+ crypto_session_entry_t sessions[];
+} crypto_global_t;
+
+typedef enum op_status_t {
+ S_OK, /* everything ok this far */
+ S_NOP, /* no-op: null crypto & null auth */
+ S_DEV, /* processed by cryptodev */
+ S_ERROR, /* error occurred */
+ S_ERROR_LIN, /* packet linearization error occurred */
+ S_ERROR_HASH_OFFSET, /* hash offset in cipher range */
+} op_status_t;
+
+typedef struct crypto_op_state_t {
+ uint8_t cipher_iv[MAX_IV_LENGTH] ODP_ALIGNED(8);
+ uint8_t auth_iv[MAX_IV_LENGTH] ODP_ALIGNED(8);
+ odp_packet_t pkt;
+ op_status_t status;
+ crypto_session_entry_t *session;
+ uint32_t hash_result_offset;
+} crypto_op_state_t;
+
+typedef struct crypto_op_t {
+ /* these must be first */
+ struct rte_crypto_op op;
+ struct rte_crypto_sym_op sym_op;
+
+ crypto_op_state_t state;
+} crypto_op_t;
+
+#define IV_OFFSET offsetof(crypto_op_t, state.cipher_iv)
+#define AUTH_IV_OFFSET offsetof(crypto_op_t, state.auth_iv)
+
+static crypto_global_t *global;
+
+static inline int is_valid_size(uint16_t length,
+ const struct rte_crypto_param_range *range)
+{
+ uint16_t supp_size;
+
+ if (length < range->min)
+ return 0;
+
+ if (range->min != length && range->increment == 0)
+ return 0;
+
+ for (supp_size = range->min;
+ supp_size <= range->max;
+ supp_size += range->increment) {
+ if (length == supp_size)
+ return 1;
+ }
+
+ return 0;
+}
+
+static int cipher_is_aead(odp_cipher_alg_t cipher_alg)
+{
+ switch (cipher_alg) {
+ case ODP_CIPHER_ALG_AES_GCM:
+ case ODP_CIPHER_ALG_AES_CCM:
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int auth_is_aead(odp_auth_alg_t auth_alg)
+{
+ switch (auth_alg) {
+ case ODP_AUTH_ALG_AES_GCM:
+ case ODP_AUTH_ALG_AES_CCM:
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int cipher_aead_alg_odp_to_rte(odp_cipher_alg_t cipher_alg,
+ struct rte_crypto_sym_xform *aead_xform)
+{
+ int rc = 0;
+
+ switch (cipher_alg) {
+ case ODP_CIPHER_ALG_AES_GCM:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_AES_CCM;
+ break;
+#if RTE_VERSION >= RTE_VERSION_NUM(20, 11, 0, 0)
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
+ break;
+#endif
+ default:
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int auth_aead_alg_odp_to_rte(odp_auth_alg_t auth_alg,
+ struct rte_crypto_sym_xform *aead_xform)
+{
+ int rc = 0;
+
+ switch (auth_alg) {
+ case ODP_AUTH_ALG_AES_GCM:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_AES_GCM;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_AES_CCM;
+ break;
+#if RTE_VERSION >= RTE_VERSION_NUM(20, 11, 0, 0)
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ aead_xform->aead.algo = RTE_CRYPTO_AEAD_CHACHA20_POLY1305;
+ break;
+#endif
+ default:
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int cipher_alg_odp_to_rte(odp_cipher_alg_t cipher_alg,
+ struct rte_crypto_sym_xform *cipher_xform)
+{
+ int rc = 0;
+
+ switch (cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_NULL;
+ break;
+ case ODP_CIPHER_ALG_DES:
+ case ODP_CIPHER_ALG_3DES_CBC:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_3DES_CBC;
+ break;
+ case ODP_CIPHER_ALG_3DES_ECB:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_3DES_ECB;
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CBC;
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_CTR;
+ break;
+ case ODP_CIPHER_ALG_AES_ECB:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_ECB;
+ break;
+ case ODP_CIPHER_ALG_AES_XTS:
+ cipher_xform->cipher.algo = RTE_CRYPTO_CIPHER_AES_XTS;
+ break;
+ default:
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int auth_alg_odp_to_rte(odp_auth_alg_t auth_alg,
+ struct rte_crypto_sym_xform *auth_xform)
+{
+ int rc = 0;
+
+ /* Process based on auth */
+ switch (auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_NULL;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_MD5_HMAC;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA256_HMAC;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA1_HMAC;
+ break;
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA224_HMAC;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA384_HMAC;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_SHA512_HMAC;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_GMAC;
+ break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_CMAC;
+ break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ auth_xform->auth.algo = RTE_CRYPTO_AUTH_AES_XCBC_MAC;
+ break;
+ default:
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static crypto_session_entry_t *alloc_session(void)
+{
+ crypto_session_entry_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ return session;
+}
+
+static void free_session(crypto_session_entry_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+int _odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ int idx;
+ int16_t cdev_id, cdev_count;
+ int rc = -1;
+ unsigned int pool_size;
+ unsigned int nb_queue_pairs = 0, queue_pair;
+ uint32_t max_sess_sz = 0, sess_sz;
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(*global);
+ mem_size += (MAX_SESSIONS * sizeof(crypto_session_entry_t));
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("_odp_crypto_global", mem_size,
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm != ODP_SHM_INVALID) {
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ _ODP_ERR("Failed to find the reserved shm block");
+ return -1;
+ }
+ } else {
+ _ODP_ERR("Shared memory reserve failed.\n");
+ return -1;
+ }
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+ global->shm = shm;
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+
+ global->enabled_crypto_devs = 0;
+ odp_spinlock_init(&global->lock);
+
+ if (global->is_crypto_dev_initialized)
+ return 0;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_PRINT("No crypto devices available\n");
+ return 0;
+ }
+
+ for (cdev_id = 0; cdev_id < rte_cryptodev_count(); cdev_id++) {
+ sess_sz = rte_cryptodev_sym_get_private_session_size(cdev_id);
+
+ if (sess_sz > max_sess_sz)
+ max_sess_sz = sess_sz;
+ }
+
+ for (cdev_id = cdev_count - 1; cdev_id >= 0; cdev_id--) {
+ struct rte_cryptodev_info dev_info;
+ struct rte_mempool *mp;
+ odp_bool_t queue_pairs_shared = false;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ nb_queue_pairs = odp_thread_count_max();
+ if (nb_queue_pairs > dev_info.max_nb_queue_pairs) {
+ nb_queue_pairs = dev_info.max_nb_queue_pairs;
+ queue_pairs_shared = true;
+ _ODP_PRINT("Using shared queue pairs for crypto device %"
+ PRIu16 " (driver: %s)\n",
+ cdev_id, dev_info.driver_name);
+ }
+
+ struct rte_cryptodev_qp_conf qp_conf;
+ uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
+
+ struct rte_cryptodev_config conf = {
+ .nb_queue_pairs = nb_queue_pairs,
+ .socket_id = socket_id,
+ };
+
+ if (global->session_mempool[socket_id] == NULL) {
+ char mp_name[RTE_MEMPOOL_NAMESIZE];
+
+ snprintf(mp_name, RTE_MEMPOOL_NAMESIZE,
+ "sess_mp_%u", socket_id);
+
+ /*
+ * Create enough objects for session headers and
+ * device private data. Since we use shared pool,
+ * the pool has to have twice as many elements
+ * as the maximum number of sessions.
+ */
+ pool_size = 2 * MAX_SESSIONS;
+ /*
+ * Add the number of elements that may get lost
+ * in thread local caches. The mempool implementation
+ * can actually cache a bit more than the specified
+ * cache size, so we multiply by 2.
+ */
+ pool_size += 2 * odp_thread_count_max() * SESSION_CACHE_SIZE;
+ mp = rte_cryptodev_sym_session_pool_create(mp_name,
+ pool_size,
+ max_sess_sz,
+ SESSION_CACHE_SIZE,
+ 0,
+ socket_id);
+ if (mp == NULL) {
+ _ODP_ERR("Cannot create session pool on socket %d\n", socket_id);
+ return -1;
+ }
+
+ _ODP_PRINT("Allocated session pool on socket %d\n", socket_id);
+ global->session_mempool[socket_id] = mp;
+ }
+ mp = global->session_mempool[socket_id];
+
+ rc = rte_cryptodev_configure(cdev_id, &conf);
+ if (rc < 0) {
+ _ODP_ERR("Failed to configure cryptodev %u", cdev_id);
+ return -1;
+ }
+
+ qp_conf.nb_descriptors = NB_DESC_PER_QUEUE_PAIR;
+
+ for (queue_pair = 0; queue_pair < nb_queue_pairs;
+ queue_pair++) {
+ qp_conf.mp_session = mp;
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ qp_conf.mp_session_private = mp;
+#endif
+ rc = rte_cryptodev_queue_pair_setup(cdev_id, queue_pair,
+ &qp_conf,
+ socket_id);
+ if (rc < 0) {
+ _ODP_ERR("Fail to setup queue pair %u on dev %u",
+ queue_pair, cdev_id);
+ return -1;
+ }
+ }
+
+ rc = rte_cryptodev_start(cdev_id);
+ if (rc < 0) {
+ _ODP_ERR("Failed to start device %u: error %d\n", cdev_id, rc);
+ return -1;
+ }
+
+ global->enabled_crypto_dev_ids[global->enabled_crypto_devs] =
+ cdev_id;
+ global->enabled_crypto_dev_qpairs[cdev_id] = nb_queue_pairs;
+ global->enabled_crypto_dev_qpairs_shared[cdev_id] =
+ queue_pairs_shared;
+ global->enabled_crypto_devs++;
+ }
+
+ /*
+ * Make pool size big enough to fill all per-thread caches.
+ * Multiply by 2 since mempool can cache 1.5 times more elements
+ * than the specified cache size.
+ */
+ pool_size = 2 * odp_thread_count_max() * OP_CACHE_SIZE;
+
+ /* create crypto op pool */
+ global->crypto_op_pool =
+ rte_crypto_op_pool_create("crypto_op_pool",
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ pool_size, OP_CACHE_SIZE,
+ sizeof(crypto_op_t)
+ - sizeof(struct rte_crypto_op)
+ - sizeof(struct rte_crypto_sym_op),
+ rte_socket_id());
+
+ if (global->crypto_op_pool == NULL) {
+ _ODP_ERR("Cannot create crypto op pool\n");
+ return -1;
+ }
+
+ global->is_crypto_dev_initialized = 1;
+
+ return 0;
+}
+
+int _odp_crypto_init_local(void)
+{
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ return 0;
+}
+
+static int is_dev_aesni_mb(const struct rte_cryptodev_info *dev_info)
+{
+ return dev_info->driver_name &&
+ !strcmp(dev_info->driver_name, "crypto_aesni_mb");
+}
+
+static void capability_process(struct rte_cryptodev_info *dev_info,
+ odp_crypto_cipher_algos_t *ciphers,
+ odp_crypto_auth_algos_t *auths)
+{
+ const struct rte_cryptodev_capabilities *cap;
+
+ /* NULL is always supported, it is done in software */
+ ciphers->bit.null = 1;
+ auths->bit.null = 1;
+
+ for (cap = &dev_info->capabilities[0];
+ cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ cap++) {
+ if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
+ continue;
+
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ enum rte_crypto_cipher_algorithm cap_cipher_algo;
+
+ cap_cipher_algo = cap->sym.cipher.algo;
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_3DES_CBC) {
+ ciphers->bit.trides_cbc = 1;
+ ciphers->bit.des = 1;
+ }
+ cap_cipher_algo = cap->sym.cipher.algo;
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_3DES_ECB) {
+ ciphers->bit.trides_ecb = 1;
+ ciphers->bit.des = 1;
+ }
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_CBC)
+ ciphers->bit.aes_cbc = 1;
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_CTR)
+ ciphers->bit.aes_ctr = 1;
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_ECB)
+ ciphers->bit.aes_ecb = 1;
+ if (cap_cipher_algo == RTE_CRYPTO_CIPHER_AES_XTS)
+ ciphers->bit.aes_xts = 1;
+ }
+
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ enum rte_crypto_auth_algorithm cap_auth_algo;
+
+ cap_auth_algo = cap->sym.auth.algo;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_MD5_HMAC)
+ auths->bit.md5_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA256_HMAC)
+ auths->bit.sha256_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA1_HMAC)
+ auths->bit.sha1_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA224_HMAC)
+ auths->bit.sha224_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA384_HMAC)
+ auths->bit.sha384_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_SHA512_HMAC)
+ auths->bit.sha512_hmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_GMAC)
+ auths->bit.aes_gmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_CMAC)
+ auths->bit.aes_cmac = 1;
+ if (cap_auth_algo == RTE_CRYPTO_AUTH_AES_XCBC_MAC)
+ auths->bit.aes_xcbc_mac = 1;
+ }
+
+ if (cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ enum rte_crypto_aead_algorithm cap_aead_algo;
+
+ cap_aead_algo = cap->sym.aead.algo;
+ if (cap_aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ ciphers->bit.aes_gcm = 1;
+ auths->bit.aes_gcm = 1;
+ }
+ if (cap_aead_algo == RTE_CRYPTO_AEAD_AES_CCM) {
+ ciphers->bit.aes_ccm = 1;
+ auths->bit.aes_ccm = 1;
+ }
+#if RTE_VERSION >= RTE_VERSION_NUM(20, 11, 0, 0)
+ if (cap_aead_algo == RTE_CRYPTO_AEAD_CHACHA20_POLY1305) {
+ ciphers->bit.chacha20_poly1305 = 1;
+ auths->bit.chacha20_poly1305 = 1;
+ }
+#endif
+ }
+ }
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capability)
+{
+ uint8_t cdev_id, cdev_count;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ return -1;
+ }
+
+ if (NULL == capability)
+ return -1;
+
+ /* Initialize crypto capability structure */
+ memset(capability, 0, sizeof(odp_crypto_capability_t));
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ return 0;
+ }
+
+ capability->sync_mode = ODP_SUPPORT_YES;
+ capability->async_mode = ODP_SUPPORT_PREFERRED;
+ capability->max_sessions = MAX_SESSIONS;
+ capability->queue_type_plain = 1;
+ capability->queue_type_sched = 1;
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ capability_process(&dev_info, &capability->ciphers,
+ &capability->auths);
+ if ((dev_info.feature_flags &
+ RTE_CRYPTODEV_FF_HW_ACCELERATED)) {
+ capability->hw_ciphers = capability->ciphers;
+ capability->hw_auths = capability->auths;
+ }
+
+ /* Report the lowest max_nb_sessions of all devices */
+ if (dev_info.sym.max_nb_sessions != 0 &&
+ dev_info.sym.max_nb_sessions < capability->max_sessions)
+ capability->max_sessions = dev_info.sym.max_nb_sessions;
+ }
+
+ return 0;
+}
+
+static int cipher_capa_insert(odp_crypto_cipher_capability_t *src,
+ odp_crypto_cipher_capability_t *capa,
+ int idx,
+ int size)
+{
+ int i = 0;
+
+ while (1) {
+ if (i >= size) {
+ return idx + 1;
+ } else if (i == idx) {
+ src[i] = *capa;
+ return idx + 1;
+ } else if (src[i].key_len < capa->key_len ||
+ (src[i].key_len == capa->key_len &&
+ src[i].iv_len < capa->iv_len)) {
+ i++;
+ } else {
+ memmove(&src[i + 1], &src[i],
+ sizeof(*capa) * (idx - i));
+ src[i] = *capa;
+ return idx + 1;
+ }
+ }
+}
+
+static int cipher_gen_capability(const struct rte_crypto_param_range *key_size,
+ const struct rte_crypto_param_range *iv_size,
+ odp_crypto_cipher_capability_t *src,
+ int offset,
+ int num_copy)
+{
+ int idx = offset;
+
+ uint32_t key_size_min = key_size->min;
+ uint32_t key_size_max = key_size->max;
+ uint32_t key_inc = key_size->increment;
+ uint32_t iv_size_max = iv_size->max;
+ uint32_t iv_size_min = iv_size->min;
+ uint32_t iv_inc = iv_size->increment;
+
+ for (uint32_t key_len = key_size_min; key_len <= key_size_max;
+ key_len += key_inc) {
+ for (uint32_t iv_len = iv_size_min; iv_len <= iv_size_max; iv_len += iv_inc) {
+ odp_crypto_cipher_capability_t capa;
+
+ capa.key_len = key_len;
+ capa.iv_len = iv_len;
+ capa.bit_mode = false;
+
+ idx = cipher_capa_insert(src, &capa, idx, num_copy);
+
+ if (iv_inc == 0)
+ break;
+ }
+
+ if (key_inc == 0)
+ break;
+ }
+
+ return idx;
+}
+
+static const struct rte_cryptodev_capabilities *
+find_capa_for_alg(const struct rte_cryptodev_info *dev_info,
+ const struct rte_crypto_sym_xform *xform)
+{
+ const struct rte_cryptodev_capabilities *cap;
+
+ for (cap = &dev_info->capabilities[0];
+ cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED;
+ cap++) {
+ if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC ||
+ cap->sym.xform_type != xform->type)
+ continue;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
+ cap->sym.cipher.algo == xform->cipher.algo)
+ return cap;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
+ cap->sym.auth.algo == xform->auth.algo)
+ return cap;
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
+ cap->sym.aead.algo == xform->aead.algo)
+ return cap;
+ }
+ return NULL;
+}
+
+static int cipher_aead_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ odp_crypto_cipher_capability_t src[num_copy];
+ int idx = 0, rc = 0;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ uint8_t cdev_id, cdev_count;
+ const struct rte_cryptodev_capabilities *cap;
+ struct rte_crypto_sym_xform aead_xform;
+
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ rc = cipher_aead_alg_odp_to_rte(cipher, &aead_xform);
+
+ /* Check result */
+ if (rc)
+ return -1;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ return -1;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ cap = find_capa_for_alg(&dev_info, &aead_xform);
+ if (cap == NULL)
+ continue;
+
+ idx = cipher_gen_capability(&cap->sym.aead.key_size,
+ &cap->sym.aead.iv_size,
+ src, idx,
+ num_copy);
+ }
+
+ if (idx < num_copy)
+ num_copy = idx;
+
+ memcpy(dst, src, num_copy * size);
+
+ return idx;
+}
+
+static int cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ odp_crypto_cipher_capability_t src[num_copy];
+ int idx = 0, rc = 0;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+ uint8_t cdev_id, cdev_count;
+ const struct rte_cryptodev_capabilities *cap;
+ struct rte_crypto_sym_xform cipher_xform;
+
+ cipher_xform.type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ rc = cipher_alg_odp_to_rte(cipher, &cipher_xform);
+
+ /* Check result */
+ if (rc)
+ return -1;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ return -1;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ cap = find_capa_for_alg(&dev_info, &cipher_xform);
+ if (cap == NULL)
+ continue;
+
+ idx = cipher_gen_capability(&cap->sym.cipher.key_size,
+ &cap->sym.cipher.iv_size,
+ src, idx,
+ num_copy);
+ }
+
+ if (idx < num_copy)
+ num_copy = idx;
+
+ memcpy(dst, src, num_copy * size);
+
+ return idx;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ /* We implement NULL in software, so always return capability */
+ if (cipher == ODP_CIPHER_ALG_NULL) {
+ if (num_copy >= 1)
+ memset(dst, 0, sizeof(odp_crypto_cipher_capability_t));
+ return 1;
+ }
+
+ if (cipher_is_aead(cipher))
+ return cipher_aead_capability(cipher, dst, num_copy);
+ else
+ return cipher_capability(cipher, dst, num_copy);
+}
+
+static int auth_capa_insert(odp_crypto_auth_capability_t *src,
+ odp_crypto_auth_capability_t *capa,
+ int idx,
+ int size)
+{
+ int i = 0;
+
+ while (1) {
+ if (i >= size) {
+ return idx + 1;
+ } else if (i == idx) {
+ src[i] = *capa;
+ return idx + 1;
+ } else if (src[i].digest_len < capa->digest_len ||
+ (src[i].digest_len == capa->digest_len &&
+ src[i].key_len < capa->key_len) ||
+ (src[i].digest_len == capa->digest_len &&
+ src[i].key_len == capa->key_len &&
+ src[i].iv_len < capa->iv_len)) {
+ i++;
+ } else {
+ memmove(&src[i + 1], &src[i],
+ sizeof(*capa) * (idx - i));
+ src[i] = *capa;
+ return idx + 1;
+ }
+ }
+}
+
+static int auth_gen_capability(const struct rte_crypto_param_range *key_size,
+ const struct rte_crypto_param_range *iv_size,
+ const struct rte_crypto_param_range *digest_size,
+ const struct rte_crypto_param_range *aad_size,
+ odp_crypto_auth_capability_t *src,
+ int offset,
+ int num_copy)
+{
+ int idx = offset;
+
+ uint16_t key_size_min = key_size->min;
+ uint16_t key_size_max = key_size->max;
+ uint16_t key_inc = key_size->increment;
+ uint16_t iv_size_max = iv_size->max;
+ uint16_t iv_size_min = iv_size->min;
+ uint16_t iv_inc = iv_size->increment;
+ uint16_t digest_size_min = digest_size->min;
+ uint16_t digest_size_max = digest_size->max;
+ uint16_t digest_inc = digest_size->increment;
+
+ for (uint16_t digest_len = digest_size_min;
+ digest_len <= digest_size_max;
+ digest_len += digest_inc) {
+ for (uint16_t key_len = key_size_min;
+ key_len <= key_size_max;
+ key_len += key_inc) {
+ for (uint16_t iv_len = iv_size_min;
+ iv_len <= iv_size_max;
+ iv_len += iv_inc) {
+ odp_crypto_auth_capability_t capa;
+
+ capa.digest_len = digest_len;
+ capa.key_len = key_len;
+ capa.iv_len = iv_len;
+ capa.bit_mode = false;
+ capa.aad_len.min = aad_size->min;
+ capa.aad_len.max = aad_size->max;
+ capa.aad_len.inc = aad_size->increment;
+
+ idx = auth_capa_insert(src, &capa, idx,
+ num_copy);
+
+ if (iv_inc == 0)
+ break;
+ }
+
+ if (key_inc == 0)
+ break;
+ }
+
+ if (digest_inc == 0)
+ break;
+ }
+
+ return idx;
+}
+
+static const struct rte_crypto_param_range zero_range = {
+ .min = 0, .max = 0, .increment = 0
+};
+
+static int auth_aead_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[],
+ int num_copy)
+{
+ odp_crypto_auth_capability_t src[num_copy];
+ int idx = 0, rc = 0;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ uint8_t cdev_id, cdev_count;
+ const struct rte_cryptodev_capabilities *cap;
+ struct rte_crypto_sym_xform aead_xform;
+
+ aead_xform.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ rc = auth_aead_alg_odp_to_rte(auth, &aead_xform);
+
+ /* Check result */
+ if (rc)
+ return -1;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ return -1;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ cap = find_capa_for_alg(&dev_info, &aead_xform);
+ if (cap == NULL)
+ continue;
+
+ idx = auth_gen_capability(&zero_range,
+ &zero_range,
+ &cap->sym.aead.digest_size,
+ &cap->sym.aead.aad_size,
+ src, idx,
+ num_copy);
+ }
+
+ if (idx < num_copy)
+ num_copy = idx;
+
+ memcpy(dst, src, num_copy * size);
+
+ return idx;
+}
+
+static int auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[],
+ int num_copy)
+{
+ odp_crypto_auth_capability_t src[num_copy];
+ int idx = 0, rc = 0;
+ int size = sizeof(odp_crypto_auth_capability_t);
+ uint8_t cdev_id, cdev_count;
+ const struct rte_cryptodev_capabilities *cap;
+ struct rte_crypto_sym_xform auth_xform;
+ uint16_t key_size_override;
+ struct rte_crypto_param_range key_range_override;
+
+ auth_xform.type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ rc = auth_alg_odp_to_rte(auth, &auth_xform);
+
+ /* Check result */
+ if (rc)
+ return -1;
+
+ /* Don't generate thousands of useless capabilities for HMAC
+ * algorithms. In ODP we need support for small amount of key
+ * lengths. So we limit key size to what is practical for ODP. */
+ switch (auth) {
+ case ODP_AUTH_ALG_MD5_HMAC:
+ key_size_override = 16;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ key_size_override = 20;
+ break;
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ key_size_override = 28;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ key_size_override = 32;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ key_size_override = 48;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ key_size_override = 64;
+ break;
+ default:
+ key_size_override = 0;
+ break;
+ }
+
+ key_range_override.min = key_size_override;
+ key_range_override.max = key_size_override;
+ key_range_override.increment = 0;
+
+ cdev_count = rte_cryptodev_count();
+ if (cdev_count == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ return -1;
+ }
+
+ for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) {
+ struct rte_cryptodev_info dev_info;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ cap = find_capa_for_alg(&dev_info, &auth_xform);
+ if (cap == NULL)
+ continue;
+
+ if (key_size_override != 0 &&
+ !is_valid_size(key_size_override,
+ &cap->sym.auth.key_size))
+ continue;
+
+ idx = auth_gen_capability(key_size_override ?
+ &key_range_override :
+ &cap->sym.auth.key_size,
+ &cap->sym.auth.iv_size,
+ &cap->sym.auth.digest_size,
+ &cap->sym.auth.aad_size,
+ src, idx,
+ num_copy);
+ }
+
+ if (idx < num_copy)
+ num_copy = idx;
+
+ memcpy(dst, src, num_copy * size);
+
+ return idx;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[],
+ int num_copy)
+{
+ /* We implement NULL in software, so always return capability */
+ if (auth == ODP_AUTH_ALG_NULL) {
+ if (num_copy >= 1)
+ memset(dst, 0, sizeof(odp_crypto_auth_capability_t));
+ return 1;
+ }
+
+ if (auth_is_aead(auth))
+ return auth_aead_capability(auth, dst, num_copy);
+ else
+ return auth_capability(auth, dst, num_copy);
+}
+
+static odp_crypto_ses_create_err_t
+get_crypto_aead_dev(struct rte_crypto_sym_xform *aead_xform,
+ uint8_t *dev_id)
+{
+ uint8_t cdev_id, id;
+ const struct rte_cryptodev_capabilities *cap;
+
+ for (id = 0; id < global->enabled_crypto_devs; id++) {
+ struct rte_cryptodev_info dev_info;
+
+ cdev_id = global->enabled_crypto_dev_ids[id];
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ cap = find_capa_for_alg(&dev_info, aead_xform);
+ if (cap == NULL)
+ continue;
+
+ /* Check if key size is supported by the algorithm. */
+ if (!is_valid_size(aead_xform->aead.key.length,
+ &cap->sym.aead.key_size)) {
+ _ODP_DBG("Unsupported aead key length\n");
+ continue;
+ }
+
+ /* Check if iv length is supported by the algorithm. */
+ if (aead_xform->aead.iv.length > MAX_IV_LENGTH ||
+ !is_valid_size(aead_xform->aead.iv.length,
+ &cap->sym.aead.iv_size)) {
+ _ODP_DBG("Unsupported iv length\n");
+ continue;
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (!is_valid_size(aead_xform->aead.digest_length,
+ &cap->sym.aead.digest_size)) {
+ _ODP_DBG("Unsupported digest length\n");
+ continue;
+ }
+
+ *dev_id = cdev_id;
+ return ODP_CRYPTO_SES_ERR_NONE;
+ }
+
+ return ODP_CRYPTO_SES_ERR_CIPHER;
+}
+
+static int is_cipher_supported(const struct rte_cryptodev_info *dev_info,
+ const struct rte_crypto_sym_xform *cipher_xform)
+{
+ const struct rte_cryptodev_capabilities *cap;
+
+ _ODP_ASSERT(cipher_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER);
+
+ if (cipher_xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
+ return 1;
+
+ cap = find_capa_for_alg(dev_info, cipher_xform);
+ if (cap == NULL)
+ return 0;
+
+ /* Check if key size is supported by the algorithm. */
+ if (!is_valid_size(cipher_xform->cipher.key.length,
+ &cap->sym.cipher.key_size)) {
+ _ODP_DBG("Unsupported cipher key length\n");
+ return 0;
+ }
+
+ /* Check if iv length is supported by the algorithm. */
+ if (cipher_xform->cipher.iv.length > MAX_IV_LENGTH ||
+ !is_valid_size(cipher_xform->cipher.iv.length,
+ &cap->sym.cipher.iv_size)) {
+ _ODP_DBG("Unsupported iv length\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int is_auth_supported(const struct rte_cryptodev_info *dev_info,
+ const struct rte_crypto_sym_xform *auth_xform)
+{
+ const struct rte_cryptodev_capabilities *cap;
+
+ _ODP_ASSERT(auth_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH);
+
+ if (auth_xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
+ return 1;
+
+ cap = find_capa_for_alg(dev_info, auth_xform);
+ if (cap == NULL)
+ return 0;
+
+ /* Check if key size is supported by the algorithm. */
+ if (!is_valid_size(auth_xform->auth.key.length,
+ &cap->sym.auth.key_size)) {
+ _ODP_DBG("Unsupported auth key length\n");
+ return 0;
+ }
+
+ /* Check if digest size is supported by the algorithm. */
+ if (!is_valid_size(auth_xform->auth.digest_length,
+ &cap->sym.auth.digest_size)) {
+ _ODP_DBG("Unsupported digest length\n");
+ return 0;
+ }
+
+ /* Check if iv length is supported by the algorithm. */
+ if (auth_xform->auth.iv.length > MAX_IV_LENGTH ||
+ !is_valid_size(auth_xform->auth.iv.length,
+ &cap->sym.auth.iv_size)) {
+ _ODP_DBG("Unsupported iv length\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int is_combo_buggy(struct rte_cryptodev_info *dev_info,
+ enum rte_crypto_cipher_algorithm cipher,
+ enum rte_crypto_auth_algorithm auth)
+{
+ /*
+ * Certain algorithm combinations do not work in the aesni_mb
+ * crypto driver because of bugs in the driver.
+ */
+ if (is_dev_aesni_mb(dev_info)) {
+ if (cipher == RTE_CRYPTO_CIPHER_3DES_CBC &&
+ (auth == RTE_CRYPTO_AUTH_AES_XCBC_MAC ||
+ auth == RTE_CRYPTO_AUTH_AES_CMAC))
+ return 1;
+ }
+ return 0;
+}
+
+static odp_crypto_ses_create_err_t
+get_crypto_dev(struct rte_crypto_sym_xform *cipher_xform,
+ struct rte_crypto_sym_xform *auth_xform,
+ uint8_t *dev_id)
+{
+ uint8_t cdev_id, id;
+ int cipher_supported = 0;
+ int auth_supported = 0;
+
+ for (id = 0; id < global->enabled_crypto_devs; id++) {
+ struct rte_cryptodev_info dev_info;
+ int cipher_ok, auth_ok;
+
+ cdev_id = global->enabled_crypto_dev_ids[id];
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ cipher_ok = is_cipher_supported(&dev_info, cipher_xform);
+ auth_ok = is_auth_supported(&dev_info, auth_xform);
+
+ if (cipher_ok)
+ cipher_supported = 1;
+ if (auth_ok)
+ auth_supported = 1;
+
+ if (is_combo_buggy(&dev_info,
+ cipher_xform->cipher.algo,
+ auth_xform->auth.algo))
+ continue;
+
+ if (cipher_ok && auth_ok) {
+ *dev_id = cdev_id;
+ return ODP_CRYPTO_SES_ERR_NONE;
+ }
+ }
+ if (cipher_supported && auth_supported)
+ return ODP_CRYPTO_SES_ERR_ALG_COMBO;
+
+ return !cipher_supported ? ODP_CRYPTO_SES_ERR_CIPHER
+ : ODP_CRYPTO_SES_ERR_AUTH;
+}
+
+static int chained_bufs_ok(const odp_crypto_session_param_t *param,
+ uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ int chained_bufs_ok;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+ chained_bufs_ok = !!(dev_info.feature_flags & RTE_CRYPTODEV_FF_IN_PLACE_SGL);
+
+ /*
+ * Some crypto devices do not support chained buffers with all
+ * algorithms despite advertizing SG support in feature flags.
+ */
+
+ if (dev_info.driver_name &&
+ !strcmp(dev_info.driver_name, "crypto_aesni_mb"))
+ chained_bufs_ok = 0;
+
+ if (dev_info.driver_name &&
+ !strcmp(dev_info.driver_name, "crypto_aesni_gcm") &&
+ param->auth_alg == ODP_AUTH_ALG_AES_GMAC)
+ chained_bufs_ok = 0;
+
+ if (dev_info.driver_name &&
+ !strcmp(dev_info.driver_name, "crypto_openssl") &&
+ (param->cipher_alg == ODP_CIPHER_ALG_AES_GCM ||
+ param->cipher_alg == ODP_CIPHER_ALG_AES_CCM ||
+ param->auth_alg == ODP_AUTH_ALG_AES_GMAC))
+ chained_bufs_ok = 0;
+
+ return chained_bufs_ok;
+}
+
+static int crypto_fill_cipher_xform(struct rte_crypto_sym_xform *cipher_xform,
+ odp_crypto_session_param_t *param)
+{
+ cipher_xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ cipher_xform->next = NULL;
+
+ if (cipher_alg_odp_to_rte(param->cipher_alg, cipher_xform))
+ return -1;
+
+ cipher_xform->cipher.key.data = param->cipher_key.data;
+ cipher_xform->cipher.key.length = param->cipher_key.length;
+ cipher_xform->cipher.iv.offset = IV_OFFSET;
+ cipher_xform->cipher.iv.length = param->cipher_iv_len;
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ cipher_xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT;
+ else
+ cipher_xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ return 0;
+}
+
+static int crypto_fill_auth_xform(struct rte_crypto_sym_xform *auth_xform,
+ odp_crypto_session_param_t *param)
+{
+ auth_xform->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ auth_xform->next = NULL;
+
+ if (auth_alg_odp_to_rte(param->auth_alg, auth_xform))
+ return -1;
+
+ auth_xform->auth.digest_length = param->auth_digest_len;
+ if (auth_xform->auth.digest_length > PACKET_DIGEST_MAX) {
+ _ODP_ERR("Requested too long digest\n");
+ return -1;
+ }
+
+ auth_xform->auth.key.data = param->auth_key.data;
+ auth_xform->auth.key.length = param->auth_key.length;
+ auth_xform->auth.iv.offset = AUTH_IV_OFFSET;
+ auth_xform->auth.iv.length = param->auth_iv_len;
+
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE;
+ else
+ auth_xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ return 0;
+}
+
+static int crypto_fill_aead_xform(struct rte_crypto_sym_xform *aead_xform,
+ odp_crypto_session_param_t *param)
+{
+ aead_xform->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ aead_xform->next = NULL;
+
+ if (cipher_aead_alg_odp_to_rte(param->cipher_alg, aead_xform))
+ return -1;
+
+ aead_xform->aead.key.data = param->cipher_key.data;
+ aead_xform->aead.key.length = param->cipher_key.length;
+ aead_xform->aead.iv.offset = IV_OFFSET;
+ aead_xform->aead.iv.length = param->cipher_iv_len;
+
+ aead_xform->aead.aad_length = param->auth_aad_len;
+ if (aead_xform->aead.aad_length > PACKET_AAD_MAX) {
+ _ODP_ERR("Requested too long AAD\n");
+ return -1;
+ }
+
+ if (aead_xform->aead.algo == RTE_CRYPTO_AEAD_AES_CCM &&
+ aead_xform->aead.aad_length + AES_CCM_AAD_OFFSET >
+ PACKET_AAD_MAX) {
+ _ODP_ERR("Requested too long AAD for CCM\n");
+ return -1;
+ }
+
+ aead_xform->aead.digest_length = param->auth_digest_len;
+ if (aead_xform->aead.digest_length > PACKET_DIGEST_MAX) {
+ _ODP_ERR("Requested too long digest\n");
+ return -1;
+ }
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ aead_xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ else
+ aead_xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ return 0;
+}
+
+int odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ odp_crypto_ses_create_err_t rc = ODP_CRYPTO_SES_ERR_NONE;
+ uint8_t cdev_id = 0;
+ uint8_t socket_id;
+ struct rte_crypto_sym_xform cipher_xform;
+ struct rte_crypto_sym_xform auth_xform;
+ struct rte_crypto_sym_xform *first_xform;
+ struct rte_cryptodev_sym_session *rte_session;
+ struct rte_mempool *sess_mp;
+ crypto_session_entry_t *session = NULL;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->cipher_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->auth_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->auth_alg == ODP_AUTH_ALG_AES_GMAC &&
+ param->cipher_alg != ODP_CIPHER_ALG_NULL) {
+ *status = ODP_CRYPTO_SES_ERR_ALG_COMBO;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->op_type != ODP_CRYPTO_OP_TYPE_BASIC &&
+ param->op_type != ODP_CRYPTO_OP_TYPE_LEGACY) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (rte_cryptodev_count() == 0) {
+ _ODP_ERR("No crypto devices available\n");
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (session == NULL) {
+ _ODP_ERR("Failed to allocate a session session");
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ /* Copy parameters */
+ session->p = *param;
+
+ if (cipher_is_aead(param->cipher_alg)) {
+ session->flags.aead = 1;
+
+ if (crypto_fill_aead_xform(&cipher_xform, &session->p) < 0) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ first_xform = &cipher_xform;
+
+ rc = get_crypto_aead_dev(&cipher_xform,
+ &cdev_id);
+ } else {
+ odp_bool_t do_cipher_first;
+
+ session->flags.aead = 0;
+
+ if (crypto_fill_cipher_xform(&cipher_xform, &session->p) < 0) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ if (crypto_fill_auth_xform(&auth_xform, &session->p) < 0) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ do_cipher_first = param->auth_cipher_text;
+ else
+ do_cipher_first = !param->auth_cipher_text;
+
+ /* Derive order */
+ if (param->cipher_alg == ODP_CIPHER_ALG_NULL &&
+ param->auth_alg == ODP_AUTH_ALG_NULL) {
+ rte_session = NULL;
+ cdev_id = ~0;
+ session->flags.chained_bufs_ok = 1;
+ goto out_null;
+ } else if (param->cipher_alg == ODP_CIPHER_ALG_NULL) {
+ first_xform = &auth_xform;
+ } else if (param->auth_alg == ODP_AUTH_ALG_NULL) {
+ first_xform = &cipher_xform;
+ } else if (do_cipher_first) {
+ first_xform = &cipher_xform;
+ first_xform->next = &auth_xform;
+ } else {
+ first_xform = &auth_xform;
+ first_xform->next = &cipher_xform;
+ }
+
+ rc = get_crypto_dev(&cipher_xform,
+ &auth_xform,
+ &cdev_id);
+ }
+ if (rc != ODP_CRYPTO_SES_ERR_NONE) {
+ _ODP_DBG("Couldn't find a crypto device (error %d)", rc);
+ *status = rc;
+ goto err;
+ }
+
+ socket_id = rte_cryptodev_socket_id(cdev_id);
+ sess_mp = global->session_mempool[socket_id];
+
+ /* Setup session */
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ rte_session = rte_cryptodev_sym_session_create(sess_mp);
+ if (rte_session == NULL) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ if (rte_cryptodev_sym_session_init(cdev_id, rte_session,
+ first_xform, sess_mp) < 0) {
+ /* remove the crypto_session_entry_t */
+ rte_cryptodev_sym_session_free(rte_session);
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+#else
+ rte_session = rte_cryptodev_sym_session_create(cdev_id, first_xform, sess_mp);
+ if (rte_session == NULL) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+#endif
+
+ session->flags.chained_bufs_ok = chained_bufs_ok(param, cdev_id);
+ if (global->enabled_crypto_dev_qpairs_shared[cdev_id])
+ session->flags.cdev_qpairs_shared = 1;
+ else
+ session->flags.cdev_qpairs_shared = 0;
+out_null:
+ session->rte_session = rte_session;
+ session->cdev_id = cdev_id;
+
+ /* We're happy */
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL) {
+ memset(session, 0, sizeof(*session));
+ free_session(session);
+ }
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t _session)
+{
+ struct rte_cryptodev_sym_session *rte_session = NULL;
+ crypto_session_entry_t *session;
+
+ session = (crypto_session_entry_t *)(intptr_t)_session;
+
+ rte_session = session->rte_session;
+
+ if (rte_session != NULL) {
+#if RTE_VERSION < RTE_VERSION_NUM(22, 11, 0, 0)
+ if (rte_cryptodev_sym_session_clear(session->cdev_id,
+ rte_session) < 0)
+ return -1;
+
+ if (rte_cryptodev_sym_session_free(rte_session) < 0)
+ return -1;
+#else
+ if (rte_cryptodev_sym_session_free(session->cdev_id, rte_session) < 0)
+ return -1;
+#endif
+ }
+
+ /* remove the crypto_session_entry_t */
+ memset(session, 0, sizeof(*session));
+ free_session(session);
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ crypto_session_entry_t *session;
+
+ if (odp_global_ro.disable.crypto || global == NULL)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+ if (global->crypto_op_pool != NULL)
+ rte_mempool_free(global->crypto_op_pool);
+
+ ret = odp_shm_free(global->shm);
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for crypto_pool\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static uint8_t *crypto_prepare_digest(const crypto_session_entry_t *session,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ rte_iova_t *phys_addr)
+{
+ struct rte_mbuf *mb;
+ uint8_t *data;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (session->p.op == ODP_CRYPTO_OP_DECODE) {
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len,
+ pkt_hdr->crypto_digest_buf);
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0,
+ session->p.auth_digest_len);
+ }
+ data = pkt_hdr->crypto_digest_buf;
+ mb = &pkt_hdr->mb;
+ *phys_addr = rte_pktmbuf_iova_offset(mb, data - rte_pktmbuf_mtod(mb, uint8_t *));
+
+ return data;
+}
+
+static void crypto_fill_aead_param(const crypto_session_entry_t *session,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ struct rte_crypto_op *op)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t iv_len = session->p.cipher_iv_len;
+ uint8_t *iv_ptr;
+
+ op->sym->aead.digest.data =
+ crypto_prepare_digest(session, pkt, param,
+ &op->sym->aead.digest.phys_addr);
+
+ if (session->p.cipher_alg == ODP_CIPHER_ALG_AES_CCM)
+ memcpy(pkt_hdr->crypto_aad_buf + AES_CCM_AAD_OFFSET,
+ param->aad_ptr,
+ session->p.auth_aad_len);
+ else
+ memcpy(pkt_hdr->crypto_aad_buf,
+ param->aad_ptr,
+ session->p.auth_aad_len);
+ op->sym->aead.aad.data = pkt_hdr->crypto_aad_buf;
+ op->sym->aead.aad.phys_addr =
+ rte_pktmbuf_iova_offset(&pkt_hdr->mb,
+ op->sym->aead.aad.data -
+ rte_pktmbuf_mtod(&pkt_hdr->mb, uint8_t *));
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ if (session->p.cipher_alg == ODP_CIPHER_ALG_AES_CCM) {
+ *iv_ptr = iv_len;
+ iv_ptr++;
+ }
+
+ _ODP_ASSERT(iv_len == 0 || param->cipher_iv_ptr != NULL);
+ memcpy(iv_ptr, param->cipher_iv_ptr, iv_len);
+
+ op->sym->aead.data.offset = param->cipher_range.offset;
+ op->sym->aead.data.length = param->cipher_range.length;
+}
+
+static void crypto_fill_sym_param(const crypto_session_entry_t *session,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ struct rte_crypto_op *op)
+{
+ uint32_t cipher_iv_len = session->p.cipher_iv_len;
+ uint32_t auth_iv_len = session->p.auth_iv_len;
+ uint8_t *iv_ptr;
+
+ if (session->p.auth_digest_len == 0) {
+ op->sym->auth.digest.data = NULL;
+ op->sym->auth.digest.phys_addr = 0;
+ } else {
+ op->sym->auth.digest.data =
+ crypto_prepare_digest(session, pkt, param,
+ &op->sym->auth.digest.phys_addr);
+ }
+
+ _ODP_ASSERT(cipher_iv_len == 0 || param->cipher_iv_ptr != NULL);
+ _ODP_ASSERT(auth_iv_len == 0 || param->auth_iv_ptr != NULL);
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET);
+ memcpy(iv_ptr, param->cipher_iv_ptr, cipher_iv_len);
+
+ if (odp_unlikely(auth_iv_len > 0)) {
+ iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *, IV_OFFSET + MAX_IV_LENGTH);
+ memcpy(iv_ptr, param->auth_iv_ptr, auth_iv_len);
+ }
+
+ op->sym->cipher.data.offset = param->cipher_range.offset;
+ op->sym->cipher.data.length = param->cipher_range.length;
+
+ op->sym->auth.data.offset = param->auth_range.offset;
+ op->sym->auth.data.length = param->auth_range.length;
+}
+
+/*
+ * Attempt to change a multi segment packet to a single segment packet by
+ * reducing the headroom. Shift packet data toward the start of the first
+ * segment and trim the tail, hopefully getting rid of the tail segment.
+ *
+ * This fails if the packet data does not fit in the first segment with
+ * the new headroom. A temporary copy to a bigger buffer would be needed
+ * in that case.
+ *
+ * Do nothing for single segment packets.
+ *
+ * We assume that odp_crypto_operation() makes no promise to not shift
+ * packet data within the packet. If that is not the case, the shifting
+ * done here needs to be undone after the crypto operation.
+ *
+ */
+static int linearize_pkt(const crypto_session_entry_t *session, odp_packet_t pkt)
+{
+ const uint32_t new_headroom = RTE_PKTMBUF_HEADROOM;
+ uint32_t headroom;
+ uint32_t len;
+ uint32_t shift;
+ int rc;
+
+ if (odp_likely(odp_packet_num_segs(pkt) == 1))
+ return 0;
+ if (session->flags.chained_bufs_ok)
+ return 0;
+
+ headroom = odp_packet_headroom(pkt);
+ if (odp_unlikely(new_headroom >= headroom))
+ return -1;
+
+ len = odp_packet_len(pkt);
+ shift = headroom - new_headroom;
+ odp_packet_push_head(pkt, shift);
+ odp_packet_move_data(pkt, 0, shift, len);
+ /* We rely on our trunc implementation to not change the handle */
+ rc = odp_packet_trunc_tail(&pkt, shift, NULL, NULL);
+ _ODP_ASSERT(rc == 0);
+
+ return odp_packet_num_segs(pkt) != 1;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const crypto_session_entry_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+/*
+ * Return number of ops allocated and packets consumed.
+ */
+static int op_alloc(crypto_op_t *op[],
+ const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkts)
+{
+ crypto_session_entry_t *session;
+ int n;
+
+ if (odp_unlikely(rte_crypto_op_bulk_alloc(global->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC,
+ (struct rte_crypto_op **)op,
+ num_pkts) == 0)) {
+ /* This should not happen since we made op pool big enough */
+ _ODP_DBG("falling back to single crypto op alloc\n");
+ op[0] = (crypto_op_t *)rte_crypto_op_alloc(global->crypto_op_pool,
+ RTE_CRYPTO_OP_TYPE_SYMMETRIC);
+ if (odp_unlikely(op[0] == NULL)) {
+ _ODP_ERR("Failed to allocate crypto operation\n");
+ return 0;
+ }
+ num_pkts = 1;
+ }
+
+ for (n = 0; n < num_pkts; n++) {
+ odp_packet_t pkt;
+
+ session = (crypto_session_entry_t *)(intptr_t)param[n].session;
+ _ODP_ASSERT(session != NULL);
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ pkt = pkt_in[n];
+ } else {
+ pkt = get_output_packet(session, pkt_in[n], pkt_out[n]);
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
+ for (int i = n; i < num_pkts; i++)
+ rte_crypto_op_free((struct rte_crypto_op *)op[i]);
+ break;
+ }
+ }
+ op[n]->state.pkt = pkt;
+ }
+ return n;
+}
+
+static int is_op_supported(const crypto_session_entry_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ const uint32_t c_start = param->cipher_range.offset;
+ const uint32_t c_end = param->cipher_range.offset + param->cipher_range.length;
+
+ if (odp_likely(c_end <= param->hash_result_offset))
+ return 1;
+ if (odp_likely(c_start >= param->hash_result_offset + session->p.auth_digest_len))
+ return 1;
+ if (session->p.cipher_alg == ODP_CIPHER_ALG_NULL)
+ return 1;
+ if (odp_unlikely(session->p.auth_alg == ODP_AUTH_ALG_NULL))
+ return 1;
+
+ return 0;
+}
+
+static void op_prepare(crypto_op_t *ops[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_op)
+{
+ for (int n = 0; n < num_op; n++) {
+ struct crypto_op_t *op = ops[n];
+ struct rte_crypto_op *rte_op = (struct rte_crypto_op *)op;
+ crypto_session_entry_t *session;
+ struct rte_cryptodev_sym_session *rte_session;
+
+ session = (crypto_session_entry_t *)(intptr_t)param[n].session;
+ rte_session = session->rte_session;
+
+ op->state.status = S_OK;
+ op->state.session = session;
+ op->state.hash_result_offset = param[n].hash_result_offset;
+
+ /* NULL rte_session means that it is a NULL-NULL operation. */
+ if (odp_unlikely(rte_session == NULL)) {
+ op->state.status = S_NOP;
+ continue;
+ }
+ if (odp_unlikely(session->p.null_crypto_enable && param->null_crypto)) {
+ op->state.status = S_NOP;
+ continue;
+ }
+
+ if (odp_unlikely(linearize_pkt(session, op->state.pkt))) {
+ op->state.status = S_ERROR_LIN;
+ continue;
+ }
+
+ if (session->flags.aead) {
+ crypto_fill_aead_param(session, op->state.pkt, &param[n], rte_op);
+ } else {
+ if (odp_unlikely(!is_op_supported(session, &param[n]))) {
+ op->state.status = S_ERROR_HASH_OFFSET;
+ continue;
+ }
+ crypto_fill_sym_param(session, op->state.pkt, &param[n], rte_op);
+ }
+
+ rte_crypto_op_attach_sym_session(rte_op, rte_session);
+ rte_op->sym->m_src = pkt_to_mbuf(op->state.pkt);
+ }
+}
+
+static void dev_enq_deq(uint8_t cdev_id, int thread_id, crypto_op_t *op[], int num_op)
+{
+ int retry_count = 0;
+ int rc;
+ int queue_pairs_shared;
+ int queue_pair;
+ struct rte_crypto_op *deq_op[MAX_BURST];
+
+ queue_pairs_shared = op[0]->state.session->flags.cdev_qpairs_shared;
+ if (odp_unlikely(queue_pairs_shared))
+ queue_pair = thread_id % global->enabled_crypto_dev_qpairs[cdev_id];
+ else
+ queue_pair = thread_id;
+
+ /*
+ * If queue pairs are shared between multiple threads,
+ * we protect enqueue and dequeue using a lock. In addition,
+ * we keep the lock over the whole enqueue-dequeue sequence
+ * to guarantee that we get the same op back as what we
+ * enqueued. Otherwise synchronous ODP crypto operations
+ * could report the completion and status of an unrelated
+ * operation that was sent to the same queue pair from
+ * another thread.
+ */
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_lock(&global->lock);
+
+ rc = rte_cryptodev_enqueue_burst(cdev_id, queue_pair,
+ (struct rte_crypto_op **)op, num_op);
+ if (odp_unlikely(rc < num_op)) {
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_unlock(&global->lock);
+ /*
+ * This should not happen since we allocated enough
+ * descriptors for our max burst and there are no other ops
+ * in flight using this queue pair.
+ */
+ for (int n = rc; n < num_op; n++)
+ op[n]->state.status = S_ERROR;
+ _ODP_ERR("Failed to enqueue crypto operations\n");
+ num_op = rc;
+ if (num_op == 0)
+ return;
+ }
+
+ /* There may be a delay until the crypto operation is completed. */
+ int num_dequeued = 0;
+
+ while (1) {
+ int num_left = num_op - num_dequeued;
+
+ rc = rte_cryptodev_dequeue_burst(cdev_id, queue_pair,
+ &deq_op[num_dequeued],
+ num_left);
+ num_dequeued += rc;
+ if (odp_likely(rc == num_left))
+ break;
+ if (odp_unlikely(rc == 0)) {
+ odp_time_wait_ns(DEQ_RETRY_DELAY_NS);
+ if (++retry_count == MAX_DEQ_RETRIES) {
+ _ODP_ERR("Failed to dequeue crypto operations\n");
+ /*
+ * We cannot give up and return to the caller
+ * since some packets and crypto operations
+ * are still owned by the cryptodev.
+ */
+ }
+ }
+ };
+
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_unlock(&global->lock);
+
+ for (int n = 0; n < num_dequeued; n++) {
+ _ODP_ASSERT((crypto_op_t *)deq_op[n] == op[n]);
+ _ODP_ASSERT((odp_packet_t)deq_op[n]->sym->m_src == op[n]->state.pkt);
+ }
+}
+
+static void op_enq_deq(crypto_op_t *op[], int num_op)
+{
+ crypto_op_t *burst[MAX_BURST];
+ int burst_size = 0;
+ int idx = 0;
+ uint8_t cdev_id;
+ int tid = odp_thread_id();
+ int done = 0;
+
+ while (done < num_op) {
+ if (op[idx]->state.status != S_OK) {
+ idx++;
+ done++;
+ continue;
+ }
+ burst[0] = op[idx];
+ burst_size = 1;
+ cdev_id = op[idx]->state.session->cdev_id;
+ op[idx]->state.status = S_DEV;
+ idx++;
+
+ /*
+ * Build a burst of ops that are for the same device
+ * and have not failed already and are not no-ops.
+ */
+ for (int n = idx; n < num_op; n++) {
+ if (odp_likely(op[n]->state.session->cdev_id == cdev_id) &&
+ odp_likely(op[n]->state.status == S_OK)) {
+ burst[burst_size++] = op[n];
+ op[n]->state.status = S_DEV;
+ }
+ }
+ /*
+ * Process burst.
+ */
+ dev_enq_deq(cdev_id, tid, burst, burst_size);
+ done += burst_size;
+ }
+}
+
+static void op_finish(crypto_op_t *op)
+{
+ crypto_session_entry_t *session = op->state.session;
+ odp_packet_t pkt = op->state.pkt;
+ struct rte_crypto_op *rte_op = (struct rte_crypto_op *)op;
+ odp_crypto_alg_err_t rc_cipher;
+ odp_crypto_alg_err_t rc_auth;
+ odp_crypto_packet_result_t *op_result;
+
+ if (odp_likely(op->state.status == S_DEV)) {
+ /* cryptodev processed packet */
+ if (odp_likely(rte_op->status == RTE_CRYPTO_OP_STATUS_SUCCESS)) {
+ rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+ if (session->p.op == ODP_CRYPTO_OP_ENCODE &&
+ session->p.auth_digest_len != 0) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ odp_packet_copy_from_mem(pkt,
+ op->state.hash_result_offset,
+ session->p.auth_digest_len,
+ pkt_hdr->crypto_digest_buf);
+ }
+ } else if (rte_op->status == RTE_CRYPTO_OP_STATUS_AUTH_FAILED) {
+ rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+ } else {
+ rc_cipher = ODP_CRYPTO_ALG_ERR_OTHER;
+ rc_auth = ODP_CRYPTO_ALG_ERR_OTHER;
+ }
+ } else if (odp_unlikely(op->state.status == S_NOP)) {
+ /* null cipher & null auth, cryptodev skipped */
+ rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+ } else if (op->state.status == S_ERROR_LIN) {
+ /* packet linearization error before cryptodev enqueue */
+ rc_cipher = ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ } else if (op->state.status == S_ERROR_HASH_OFFSET) {
+ /* hash offset not supported */
+ rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ } else {
+ /*
+ * other error before cryptodev enqueue
+ */
+ rc_cipher = ODP_CRYPTO_ALG_ERR_OTHER;
+ rc_auth = ODP_CRYPTO_ALG_ERR_OTHER;
+ }
+
+ /* Fill in result */
+ packet_subtype_set(pkt, ODP_EVENT_PACKET_CRYPTO);
+ op_result = &packet_hdr(pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = rc_cipher;
+ op_result->auth_status.alg_err = rc_auth;
+}
+
+static
+int odp_crypto_int(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ crypto_op_t *op[MAX_BURST];
+
+ num_pkt = op_alloc(op, pkt_in, pkt_out, param, num_pkt);
+ if (odp_unlikely(num_pkt == 0))
+ return 0;
+
+ op_prepare(op, param, num_pkt);
+
+ op_enq_deq(op, num_pkt);
+
+ for (int n = 0; n < num_pkt; n++) {
+ op_finish(op[n]);
+ pkt_out[n] = op[n]->state.pkt;
+ rte_crypto_op_free((struct rte_crypto_op *)op[n]);
+ }
+ return num_pkt;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ crypto_session_entry_t *session;
+ int i;
+
+ if (num_pkt > MAX_BURST)
+ num_pkt = MAX_BURST;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (crypto_session_entry_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+ }
+ return odp_crypto_int(pkt_in, pkt_out, param, num_pkt);
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_event_t event;
+ crypto_session_entry_t *session;
+ int i;
+ odp_packet_t out_pkts[MAX_BURST];
+
+ if (num_pkt > MAX_BURST)
+ num_pkt = MAX_BURST;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (crypto_session_entry_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ out_pkts[i] = pkt_out[i];
+ }
+
+ num_pkt = odp_crypto_int(pkt_in, out_pkts, param, num_pkt);
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (crypto_session_entry_t *)(intptr_t)param[i].session;
+ event = odp_packet_to_event(out_pkts[i]);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-dpdk/odp_dma.c b/platform/linux-dpdk/odp_dma.c
new file mode 100644
index 000000000..0a470ea23
--- /dev/null
+++ b/platform/linux-dpdk/odp_dma.c
@@ -0,0 +1,1174 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/dma.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+#include <rte_version.h>
+
+#if RTE_VERSION >= RTE_VERSION_NUM(21, 11, 0, 0)
+
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/debug.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/stash.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_global_data.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+
+#include <rte_dmadev.h>
+#include <rte_mbuf_core.h>
+#include <rte_memory.h>
+
+#include <sys/queue.h>
+
+#include <stdio.h>
+#include <string.h>
+
+#define MAX_SESSIONS CONFIG_MAX_DMA_SESSIONS
+#define CONF_BASE_STR "dma"
+#define CONF_SEG_LEN "max_seg_len"
+#define CONF_INFLIGHT "max_inflight"
+#define MAX_SEG_LEN UINT16_MAX
+#define MAX_TRANSFERS 256U
+#define DEF_VCHAN 0U
+#define MAX_DEQ 32U
+#define LOCK_IF(cond, lock) \
+ do { \
+ if ((cond)) \
+ odp_ticketlock_lock((lock)); \
+ } while (0)
+#define UNLOCK_IF(cond, lock) \
+ do { \
+ if ((cond)) \
+ odp_ticketlock_unlock((lock)); \
+ } while (0)
+
+ODP_STATIC_ASSERT(MAX_TRANSFERS <= UINT16_MAX + 1U, "Too many inflight transfers");
+ODP_STATIC_ASSERT(MAX_DEQ <= UINT8_MAX, "Too large dequeue burst");
+
+typedef struct {
+ struct rte_dma_info dev;
+ uint32_t num_devices;
+ uint32_t max_seg_len;
+ uint32_t max_transfers;
+} dev_info_t;
+
+typedef int32_t (*trs_fn_t)(int16_t dev_id, const odp_dma_transfer_param_t *trs_param);
+
+typedef struct transfer_s {
+ TAILQ_ENTRY(transfer_s) q;
+
+ void *user_ptr;
+ odp_event_t ev;
+ odp_queue_t queue;
+ uint16_t idx;
+ int8_t status;
+ uint8_t is_m_none;
+} transfer_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ TAILQ_HEAD(transfers_s, transfer_s) infl_trs;
+
+ odp_ticketlock_t lock;
+ odp_stash_t trs_stash;
+ trs_fn_t trs_fn;
+ odp_dma_param_t dma_param;
+ int32_t latest_idx;
+ int16_t dev_id;
+ uint8_t max_deq;
+ uint8_t is_mt;
+ uint8_t is_active;
+ transfer_t trs[MAX_TRANSFERS];
+ transfer_t *trs_map[UINT16_MAX + 1U];
+ char name[ODP_DMA_NAME_LEN];
+} dma_session_t;
+
+typedef struct {
+ odp_shm_t shm;
+ /* Buffer pool capability and default parameters */
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+ dev_info_t dev_info;
+ dma_session_t sessions[MAX_SESSIONS];
+} dma_global_t;
+
+static dma_global_t *_odp_dma_glb;
+
+static odp_bool_t is_matching_capa(const struct rte_dma_info *first,
+ const struct rte_dma_info *second)
+{
+ return first->dev_capa == second->dev_capa &&
+ first->max_vchans == second->max_vchans &&
+ first->max_desc == second->max_desc &&
+ first->min_desc == second->min_desc &&
+ first->max_sges == second->max_sges;
+}
+
+static odp_bool_t parse_options(dev_info_t *dev_info)
+{
+ /* No way to reliably get supported maximum segment length or maximum number of inflight
+ * transfers via RTE capabilities, so use config file values. */
+ int val;
+
+ if (!_odp_libconfig_lookup_ext_int(CONF_BASE_STR, NULL, CONF_SEG_LEN, &val)) {
+ _ODP_ERR("Unable to parse " CONF_SEG_LEN " configuration\n");
+ return false;
+ }
+
+ dev_info->max_seg_len = val;
+
+ if (!_odp_libconfig_lookup_ext_int(CONF_BASE_STR, NULL, CONF_INFLIGHT, &val)) {
+ _ODP_ERR("Unable to parse " CONF_INFLIGHT " configuration\n");
+ return false;
+ }
+
+ dev_info->max_transfers = val;
+ _ODP_DBG("DMA device: (%s):\n", dev_info->dev.dev_name);
+ _ODP_DBG(" max_seg_len: %u\n", dev_info->max_seg_len);
+ _ODP_DBG(" max_transfers: %u\n", dev_info->max_transfers);
+
+ return true;
+}
+
+static odp_bool_t get_dma_dev_info(dma_global_t *config)
+{
+ uint32_t num_devices = 0U, max_transfers;
+ int16_t id = 0;
+ struct rte_dma_info dev_info;
+ odp_bool_t is_dev_found = false;
+ dma_session_t *session;
+
+ memset(&dev_info, 0, sizeof(dev_info));
+
+ while (true) {
+ if (num_devices == MAX_SESSIONS)
+ break;
+
+ id = rte_dma_next_dev(id);
+
+ if (id == -1)
+ break;
+
+ if (rte_dma_info_get(id, &dev_info) < 0)
+ continue;
+
+ /* Find matching devices based on the ones matching the first found one. */
+ if (num_devices == 0U)
+ config->dev_info.dev = dev_info;
+
+ if (is_matching_capa(&config->dev_info.dev, &dev_info))
+ is_dev_found = true;
+
+ if (is_dev_found) {
+ session = &config->sessions[num_devices];
+ session->dev_id = id;
+ ++num_devices;
+ is_dev_found = false;
+ }
+
+ id++;
+ }
+
+ /* Based on scatter-gather support, set to actual maximum for ease of use later. */
+ config->dev_info.dev.max_sges = config->dev_info.dev.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG ?
+ config->dev_info.dev.max_sges : 1U;
+ config->dev_info.num_devices = num_devices;
+
+ if (!parse_options(&config->dev_info))
+ return false;
+
+ max_transfers = _ODP_MIN(config->dev_info.dev.max_desc, MAX_TRANSFERS);
+ max_transfers = _ODP_MIN(config->dev_info.max_transfers, max_transfers);
+ config->dev_info.max_transfers = max_transfers;
+
+ return true;
+}
+
+int _odp_dma_init_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.dma) {
+ _ODP_PRINT("DMA is DISABLED\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_dma_global", sizeof(dma_global_t), ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("SHM reserve failed\n");
+ return -1;
+ }
+
+ _odp_dma_glb = odp_shm_addr(shm);
+
+ if (_odp_dma_glb == NULL) {
+ _ODP_ERR("SHM address resolution failed\n");
+ return -1;
+ }
+
+ memset(_odp_dma_glb, 0, sizeof(dma_global_t));
+ _odp_dma_glb->shm = shm;
+ odp_pool_param_init(&_odp_dma_glb->pool_param);
+
+ if (odp_pool_capability(&_odp_dma_glb->pool_capa)) {
+ _ODP_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ for (int i = 0; i < MAX_SESSIONS; i++)
+ odp_ticketlock_init(&_odp_dma_glb->sessions[i].lock);
+
+ if (!get_dma_dev_info(_odp_dma_glb)) {
+ _ODP_ERR("Device info parsing failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int _odp_dma_term_global(void)
+{
+ if (odp_global_ro.disable.dma || _odp_dma_glb == NULL)
+ return 0;
+
+ for (uint32_t i = 0U; i < _odp_dma_glb->dev_info.num_devices; ++i)
+ (void)rte_dma_close(_odp_dma_glb->sessions[i].dev_id);
+
+ if (odp_shm_free(_odp_dma_glb->shm)) {
+ _ODP_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_dma_capability(odp_dma_capability_t *capa)
+{
+ _ODP_ASSERT(capa != NULL);
+
+ memset(capa, 0, sizeof(*capa));
+
+ if (odp_global_ro.disable.dma) {
+ _ODP_ERR("DMA is disabled\n");
+ return -1;
+ }
+
+ if ((_odp_dma_glb->dev_info.dev.dev_capa & RTE_DMA_CAPA_MEM_TO_MEM) == 0U ||
+ ((_odp_dma_glb->dev_info.dev.dev_capa & RTE_DMA_CAPA_OPS_COPY) == 0U &&
+ (_odp_dma_glb->dev_info.dev.dev_capa & RTE_DMA_CAPA_OPS_COPY_SG) == 0U))
+ return 0;
+
+ capa->max_sessions = _odp_dma_glb->dev_info.num_devices;
+ capa->max_transfers = _odp_dma_glb->dev_info.max_transfers;
+ capa->max_src_segs = _odp_dma_glb->dev_info.dev.max_sges;
+ capa->max_dst_segs = _odp_dma_glb->dev_info.dev.max_sges;
+ capa->max_segs = 2U * _odp_dma_glb->dev_info.dev.max_sges;
+ capa->max_seg_len = _odp_dma_glb->dev_info.max_seg_len;
+ capa->compl_mode_mask = ODP_DMA_COMPL_SYNC | ODP_DMA_COMPL_NONE | ODP_DMA_COMPL_POLL |
+ ODP_DMA_COMPL_EVENT;
+ capa->queue_type_sched = 1;
+ capa->queue_type_plain = 1;
+ capa->pool.max_pools = _odp_dma_glb->pool_capa.buf.max_pools;
+ capa->pool.max_num = _odp_dma_glb->pool_capa.buf.max_num;
+ capa->pool.max_uarea_size = _odp_dma_glb->pool_capa.buf.max_uarea_size;
+ capa->pool.uarea_persistence = _odp_dma_glb->pool_capa.buf.uarea_persistence;
+ capa->pool.min_cache_size = _odp_dma_glb->pool_capa.buf.min_cache_size;
+ capa->pool.max_cache_size = _odp_dma_glb->pool_capa.buf.max_cache_size;
+
+ return 0;
+}
+
+void odp_dma_param_init(odp_dma_param_t *param)
+{
+ _ODP_ASSERT(param != NULL);
+
+ memset(param, 0, sizeof(*param));
+ param->direction = ODP_DMA_MAIN_TO_MAIN;
+ param->type = ODP_DMA_TYPE_COPY;
+ param->mt_mode = ODP_DMA_MT_SAFE;
+ param->order = ODP_DMA_ORDER_NONE;
+}
+
+static odp_stash_t create_trs_stash(transfer_t trs[], odp_stash_op_mode_t mode, uint32_t num)
+{
+ odp_stash_param_t stash_param;
+ odp_stash_t stash;
+ uint32_t i;
+ uintptr_t tmp;
+ int32_t ret = 0;
+
+ odp_stash_param_init(&stash_param);
+ stash_param.put_mode = mode;
+ stash_param.get_mode = mode;
+ stash_param.num_obj = num;
+ stash_param.obj_size = sizeof(uintptr_t);
+ stash_param.cache_size = 0;
+ stash = odp_stash_create("_odp_dma_transfer_id", &stash_param);
+
+ if (stash == ODP_STASH_INVALID) {
+ _ODP_ERR("Stash create failed\n");
+ return ODP_STASH_INVALID;
+ }
+
+ for (i = 0U; i < num; ++i) {
+ tmp = (uintptr_t)&trs[i];
+ ret = odp_stash_put_ptr(stash, &tmp, 1);
+
+ if (ret != 1) {
+ _ODP_ERR("Stash put failed: %d\n", ret);
+ break;
+ }
+ }
+
+ if (ret != 1) {
+ for (uint32_t j = 0; j < i; ++j) {
+ if (odp_stash_get_ptr(stash, &tmp, 1) != 1) {
+ _ODP_ERR("Stash get failed: %d\n", j);
+ break;
+ }
+ }
+
+ if (odp_stash_destroy(stash))
+ _ODP_ERR("Stash destroy failed\n");
+
+ return ODP_STASH_INVALID;
+ }
+
+ return stash;
+}
+
+static odp_bool_t configure_dma_dev(uint32_t dev_id, uint16_t num_desc)
+{
+ const struct rte_dma_conf dev_config = {
+ .nb_vchans = 1 };
+ int ret;
+ const struct rte_dma_vchan_conf qconf = {
+ .direction = RTE_DMA_DIR_MEM_TO_MEM,
+ .nb_desc = num_desc };
+
+ ret = rte_dma_configure(dev_id, &dev_config);
+
+ if (ret < 0) {
+ _ODP_ERR("DMA device configuration failed for ID %u: %d\n", dev_id, ret);
+ return false;
+ }
+
+ ret = rte_dma_vchan_setup(dev_id, DEF_VCHAN, &qconf);
+
+ if (ret < 0) {
+ _ODP_ERR("DMA device vchannel setup failed for ID %u: %d\n", dev_id, ret);
+ return false;
+ }
+
+ ret = rte_dma_start(dev_id);
+
+ if (ret < 0) {
+ _ODP_ERR("DMA device start failed for ID %u: %d\n", dev_id, ret);
+ return false;
+ }
+
+ return true;
+}
+
+static void destroy_trs_stash(odp_stash_t stash)
+{
+ uintptr_t tmp;
+ int32_t num;
+
+ while (true) {
+ num = odp_stash_get_ptr(stash, &tmp, 1);
+
+ if (num == 1)
+ continue;
+
+ if (num == 0)
+ break;
+
+ _ODP_ERR("Stash get failed: %d\n", num);
+ break;
+ }
+
+ if (odp_stash_destroy(stash))
+ _ODP_ERR("Stash destroy failed\n");
+}
+
+static inline rte_iova_t get_iova(odp_dma_data_format_t format, const odp_dma_seg_t *seg)
+{
+ if (format == ODP_DMA_FORMAT_ADDR)
+ return rte_mem_virt2iova(seg->addr);
+
+ return rte_pktmbuf_iova_offset(pkt_to_mbuf(seg->packet), seg->offset);
+}
+
+static int32_t enqueue_single_trs(int16_t dev_id, const odp_dma_transfer_param_t *trs_param)
+{
+ rte_iova_t src = get_iova(trs_param->src_format, trs_param->src_seg),
+ dst = get_iova(trs_param->dst_format, trs_param->dst_seg);
+ int32_t ret;
+
+ ret = rte_dma_copy(dev_id, DEF_VCHAN, src, dst, trs_param->src_seg->len,
+ RTE_DMA_OP_FLAG_SUBMIT);
+
+ if (odp_unlikely(ret < 0))
+ return ret == -ENOSPC ? -1 : -2;
+
+ return ret;
+}
+
+static inline void prepare_trs_sg_arr(odp_dma_data_format_t format, const odp_dma_seg_t segs[],
+ struct rte_dma_sge out_segs[], uint32_t num)
+{
+ struct rte_dma_sge *out_seg;
+ const odp_dma_seg_t *seg;
+
+ for (uint32_t i = 0U; i < num; ++i) {
+ seg = &segs[i];
+ out_seg = &out_segs[i];
+ out_seg->addr = get_iova(format, seg);
+ out_seg->length = seg->len;
+ }
+}
+
+static int32_t enqueue_sg_trs(int16_t dev_id, const odp_dma_transfer_param_t *trs_param)
+{
+ const uint32_t num_src = trs_param->num_src, num_dst = trs_param->num_dst;
+ struct rte_dma_sge src_segs[num_src], dst_segs[num_dst];
+ int32_t ret;
+
+ prepare_trs_sg_arr(trs_param->src_format, trs_param->src_seg, src_segs, num_src);
+ prepare_trs_sg_arr(trs_param->dst_format, trs_param->dst_seg, dst_segs, num_dst);
+ ret = rte_dma_copy_sg(dev_id, DEF_VCHAN, src_segs, dst_segs, num_src, num_dst,
+ RTE_DMA_OP_FLAG_SUBMIT);
+
+ if (odp_unlikely(ret < 0))
+ return ret == -ENOSPC ? -1 : -2;
+
+ return ret;
+}
+
+odp_dma_t odp_dma_create(const char *name, const odp_dma_param_t *param)
+{
+ odp_dma_capability_t dma_capa;
+ dma_session_t *temp, *session = NULL;
+
+ _ODP_ASSERT(param != NULL);
+
+ if (odp_global_ro.disable.dma) {
+ _ODP_ERR("DMA is disabled\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if ((param->direction != ODP_DMA_MAIN_TO_MAIN) || (param->type != ODP_DMA_TYPE_COPY)) {
+ _ODP_ERR("Bad DMA parameter\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask == 0) {
+ _ODP_ERR("Empty compl mode mask\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (odp_dma_capability(&dma_capa) < 0) {
+ _ODP_ERR("DMA capa failed\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask & ~dma_capa.compl_mode_mask) {
+ _ODP_ERR("Compl mode not supported\n");
+ return ODP_DMA_INVALID;
+ }
+
+ for (int i = 0; i < MAX_SESSIONS; i++) {
+ temp = &_odp_dma_glb->sessions[i];
+
+ if (temp->is_active)
+ continue;
+
+ odp_ticketlock_lock(&temp->lock);
+
+ if (temp->is_active) {
+ odp_ticketlock_unlock(&temp->lock);
+ continue;
+ }
+
+ session = temp;
+ session->is_active = 1;
+ odp_ticketlock_unlock(&temp->lock);
+ break;
+ }
+
+ if (session == NULL) {
+ _ODP_ERR("Out of DMA sessions\n");
+ return ODP_DMA_INVALID;
+ }
+
+ session->trs_stash = create_trs_stash(session->trs, param->mt_mode == ODP_DMA_MT_SAFE ?
+ ODP_STASH_OP_MT : ODP_STASH_OP_ST,
+ _odp_dma_glb->dev_info.max_transfers);
+
+ if (session->trs_stash == ODP_STASH_INVALID) {
+ session->is_active = 0;
+ return ODP_DMA_INVALID;
+ }
+
+ if (!configure_dma_dev(session->dev_id, _odp_dma_glb->dev_info.dev.max_desc)) {
+ destroy_trs_stash(session->trs_stash);
+ session->is_active = 0;
+ return ODP_DMA_INVALID;
+ }
+
+ session->trs_fn = _odp_dma_glb->dev_info.dev.max_sges == 1U ?
+ enqueue_single_trs : enqueue_sg_trs;
+ session->dma_param = *param;
+ TAILQ_INIT(&session->infl_trs);
+ session->latest_idx = -1;
+ session->max_deq = _ODP_MIN(MAX_DEQ, _odp_dma_glb->dev_info.max_transfers);
+ session->is_mt = param->mt_mode == ODP_DMA_MT_SAFE;
+ session->name[0] = 0;
+
+ if (name) {
+ strncpy(session->name, name, ODP_DMA_NAME_LEN - 1);
+ session->name[ODP_DMA_NAME_LEN - 1] = 0;
+ }
+
+ return (odp_dma_t)session;
+}
+
+static inline dma_session_t *dma_session_from_handle(odp_dma_t dma)
+{
+ return (dma_session_t *)(uintptr_t)dma;
+}
+
+int odp_dma_destroy(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->is_active == 0) {
+ _ODP_ERR("Session not created\n");
+ odp_ticketlock_unlock(&session->lock);
+ return -1;
+ }
+
+ (void)rte_dma_stop(session->dev_id);
+ destroy_trs_stash(session->trs_stash);
+ session->is_active = 0;
+ odp_ticketlock_unlock(&session->lock);
+
+ return 0;
+}
+
+odp_dma_t odp_dma_lookup(const char *name)
+{
+ dma_session_t *session;
+
+ for (int i = 0; i < MAX_SESSIONS; i++) {
+ session = &_odp_dma_glb->sessions[i];
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->is_active == 0) {
+ odp_ticketlock_unlock(&session->lock);
+ continue;
+ }
+
+ if (strcmp(session->name, name) == 0) {
+ odp_ticketlock_unlock(&session->lock);
+ return (odp_dma_t)session;
+ }
+
+ odp_ticketlock_unlock(&session->lock);
+ }
+
+ return ODP_DMA_INVALID;
+}
+
+static uint32_t get_transfer_len(const odp_dma_transfer_param_t *trs_param)
+{
+ uint32_t src_len = 0, dst_len = 0;
+
+ for (uint32_t i = 0U; i < trs_param->num_src; ++i)
+ src_len += trs_param->src_seg[i].len;
+
+ for (uint32_t i = 0U; i < trs_param->num_dst; ++i)
+ dst_len += trs_param->dst_seg[i].len;
+
+ if (src_len != dst_len)
+ return 0U;
+
+ return src_len;
+}
+
+static inline void dequeue_trs(dma_session_t *session)
+{
+ const uint16_t dev_id = session->dev_id;
+ uint16_t num_deq = 0U, done_idx, real_idx;
+ bool has_error = false, is_op_error;
+ const uint8_t max_deq = session->max_deq;
+ enum rte_dma_status_code status[max_deq];
+ int32_t *latest_idx = &session->latest_idx;
+
+ num_deq = rte_dma_completed(dev_id, DEF_VCHAN, max_deq, &done_idx, &has_error);
+
+ if (odp_unlikely(has_error))
+ num_deq = rte_dma_completed_status(dev_id, DEF_VCHAN, num_deq, &done_idx, status);
+
+ for (uint16_t i = 0U; i < num_deq; ++i) {
+ is_op_error = false;
+ real_idx = *latest_idx + 1U + i;
+
+ if (odp_unlikely(has_error && status[i] != RTE_DMA_STATUS_SUCCESSFUL)) {
+ is_op_error = true;
+ _ODP_DBG("Transfer failed, index: %u, status: %d\n", real_idx, status[i]);
+ }
+
+ session->trs_map[real_idx]->status = is_op_error ? -1 : 1;
+ }
+
+ if (num_deq)
+ *latest_idx = done_idx;
+}
+
+static inline transfer_t *trs_from_id(odp_dma_transfer_id_t id)
+{
+ return (transfer_t *)(uintptr_t)id;
+}
+
+int odp_dma_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param,
+ odp_dma_result_t *result)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ odp_dma_transfer_id_t id;
+ int32_t idx;
+ transfer_t *trs;
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+ _ODP_ASSERT(trs_param != NULL);
+ _ODP_ASSERT(session->is_active > 0U);
+ _ODP_ASSERT(trs_param->num_src > 0U ||
+ trs_param->num_src <= _odp_dma_glb->dev_info.dev.max_sges);
+ _ODP_ASSERT(trs_param->num_dst > 0U ||
+ trs_param->num_dst <= _odp_dma_glb->dev_info.dev.max_sges);
+ _ODP_ASSERT(get_transfer_len(trs_param) != 0U);
+
+ id = odp_dma_transfer_id_alloc(dma);
+
+ if (odp_unlikely(id == ODP_DMA_TRANSFER_ID_INVALID))
+ return 0;
+
+ LOCK_IF(session->is_mt, &session->lock);
+ idx = session->trs_fn(session->dev_id, trs_param);
+ UNLOCK_IF(session->is_mt, &session->lock);
+
+ if (odp_unlikely(idx < 0)) {
+ odp_dma_transfer_id_free(dma, id);
+ return idx == -1 ? 0 : -1;
+ }
+
+ trs = trs_from_id(id);
+ trs->status = 0;
+ session->trs_map[idx] = trs;
+ LOCK_IF(session->is_mt, &session->lock);
+
+ while (trs->status == 0)
+ dequeue_trs(session);
+
+ UNLOCK_IF(session->is_mt, &session->lock);
+
+ if (result)
+ result->success = trs->status == 1;
+
+ odp_dma_transfer_id_free(dma, id);
+
+ return trs->status == 1 ? 1 : -1;
+}
+
+int odp_dma_transfer_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ odp_dma_result_t *result[], int num)
+{
+ int i;
+ odp_dma_result_t *res = NULL;
+ int ret = -1;
+
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ if (result)
+ res = result[i];
+
+ ret = odp_dma_transfer(dma, trs_param[i], res);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+static inline void free_ord_entry(struct transfers_s *head, transfer_t *entry,
+ dma_session_t *session)
+{
+ TAILQ_REMOVE(head, entry, q);
+ odp_dma_transfer_id_free((odp_dma_t)session, (odp_dma_transfer_id_t)(uintptr_t)entry);
+}
+
+static int get_ordered_evs(dma_session_t *session, odp_queue_t queue, _odp_event_hdr_t **ev_hdr,
+ int num)
+{
+ transfer_t *e;
+ int num_evs = 0;
+ odp_dma_result_t *res;
+
+ TAILQ_FOREACH(e, &session->infl_trs, q) {
+ if (session->dma_param.order != ODP_DMA_ORDER_NONE && e->queue != queue &&
+ e->status == 0)
+ break;
+
+ if (e->is_m_none && e->status != 0) {
+ free_ord_entry(&session->infl_trs, e, session);
+ continue;
+ }
+
+ if (e->queue != queue || e->status == 0)
+ continue;
+
+ if (num - num_evs) {
+ res = odp_buffer_addr((odp_buffer_t)(uintptr_t)e->ev);
+ res->success = e->status == 1;
+ res->user_ptr = e->user_ptr;
+ ev_hdr[num_evs++] = _odp_event_hdr(e->ev);
+ free_ord_entry(&session->infl_trs, e, session);
+ } else {
+ break;
+ }
+ }
+
+ return num_evs;
+}
+
+static int dequeue_evs(dma_session_t *session, odp_queue_t queue, _odp_event_hdr_t **event_hdr,
+ int num)
+{
+ int num_deq = 0;
+
+ if (odp_ticketlock_trylock(&session->lock) == 0)
+ return num_deq;
+
+ dequeue_trs(session);
+ num_deq = get_ordered_evs(session, queue, event_hdr, num);
+ odp_ticketlock_unlock(&session->lock);
+
+ return num_deq;
+}
+
+int odp_dma_transfer_start(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param,
+ const odp_dma_compl_param_t *compl_param)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ odp_dma_transfer_id_t id = ODP_DMA_TRANSFER_ID_INVALID;
+ int32_t idx;
+ transfer_t *trs;
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+ _ODP_ASSERT(trs_param != NULL);
+ _ODP_ASSERT(compl_param != NULL);
+ _ODP_ASSERT(session->is_active > 0U);
+ _ODP_ASSERT(trs_param->num_src > 0U ||
+ trs_param->num_src <= _odp_dma_glb->dev_info.dev.max_sges);
+ _ODP_ASSERT(trs_param->num_dst > 0U ||
+ trs_param->num_dst <= _odp_dma_glb->dev_info.dev.max_sges);
+ _ODP_ASSERT(get_transfer_len(trs_param) != 0U);
+
+ if (compl_param->compl_mode != ODP_DMA_COMPL_POLL) {
+ id = odp_dma_transfer_id_alloc(dma);
+
+ if (odp_unlikely(id == ODP_DMA_TRANSFER_ID_INVALID))
+ return 0;
+ }
+
+ LOCK_IF(session->is_mt, &session->lock);
+ idx = session->trs_fn(session->dev_id, trs_param);
+
+ if (odp_unlikely(idx < 0)) {
+ if (compl_param->compl_mode != ODP_DMA_COMPL_POLL)
+ odp_dma_transfer_id_free(dma, id);
+
+ UNLOCK_IF(session->is_mt, &session->lock);
+ return idx == -1 ? 0 : -1;
+ }
+
+ if (compl_param->compl_mode == ODP_DMA_COMPL_POLL) {
+ _ODP_ASSERT(compl_param->transfer_id != ODP_DMA_TRANSFER_ID_INVALID);
+
+ trs = trs_from_id(compl_param->transfer_id);
+ trs->ev = ODP_EVENT_INVALID;
+ trs->queue = ODP_QUEUE_INVALID;
+ } else {
+ trs = trs_from_id(id);
+ trs->ev = ODP_EVENT_INVALID;
+ trs->queue = ODP_QUEUE_INVALID;
+
+ if (compl_param->compl_mode == ODP_DMA_COMPL_EVENT) {
+ _ODP_ASSERT(compl_param->event != ODP_EVENT_INVALID);
+ _ODP_ASSERT(compl_param->queue != ODP_QUEUE_INVALID);
+
+ trs->ev = compl_param->event;
+ trs->queue = compl_param->queue;
+ }
+ }
+
+ trs->user_ptr = compl_param->user_ptr;
+ trs->idx = idx;
+ trs->status = 0;
+ trs->is_m_none = compl_param->compl_mode == ODP_DMA_COMPL_NONE;
+ TAILQ_INSERT_TAIL(&session->infl_trs, trs, q);
+ session->trs_map[idx] = trs;
+ UNLOCK_IF(session->is_mt, &session->lock);
+
+ /* TODO: Remove the following section once proper DMA-dequeue support in scheduling. */
+ if (compl_param->compl_mode == ODP_DMA_COMPL_EVENT) {
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ do {
+ ret = dequeue_evs(session, compl_param->queue, &event_hdr, 1);
+ } while (ret < 1);
+
+ if (odp_unlikely(odp_queue_enq(compl_param->queue, (odp_event_t)event_hdr) < 0))
+ _ODP_ABORT("Completion event enqueue failed\n");
+ }
+
+ return 1;
+}
+
+int odp_dma_transfer_start_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ const odp_dma_compl_param_t *compl_param[], int num)
+{
+ int i;
+ int ret = -1;
+
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ ret = odp_dma_transfer_start(dma, trs_param[i], compl_param[i]);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+static int8_t get_ordered_polled(dma_session_t *session, const transfer_t *trs)
+{
+ transfer_t *e;
+ int8_t status = -1;
+
+ TAILQ_FOREACH(e, &session->infl_trs, q) {
+ if (session->dma_param.order != ODP_DMA_ORDER_NONE && e != trs &&
+ e->status == 0) {
+ status = 0;
+ break;
+ }
+
+ if (e->is_m_none && e->status != 0) {
+ free_ord_entry(&session->infl_trs, e, session);
+ continue;
+ }
+
+ if (e != trs)
+ continue;
+
+ status = e->status;
+
+ if (status != 0)
+ TAILQ_REMOVE(&session->infl_trs, e, q);
+
+ break;
+ }
+
+ return status;
+}
+
+int odp_dma_transfer_done(odp_dma_t dma, odp_dma_transfer_id_t transfer_id,
+ odp_dma_result_t *result)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ transfer_t *trs = trs_from_id(transfer_id);
+ int8_t status;
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+ _ODP_ASSERT(transfer_id != ODP_DMA_TRANSFER_ID_INVALID);
+
+ LOCK_IF(session->is_mt, &session->lock);
+
+ if (!trs->status)
+ dequeue_trs(session);
+
+ status = get_ordered_polled(session, trs);
+ UNLOCK_IF(session->is_mt, &session->lock);
+
+ if (result) {
+ result->success = status == 1;
+ result->user_ptr = trs->user_ptr;
+ }
+
+ return status;
+}
+
+odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ uintptr_t trs;
+ int32_t num;
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+
+ num = odp_stash_get_ptr(session->trs_stash, &trs, 1);
+
+ if (odp_unlikely(num != 1))
+ return ODP_DMA_TRANSFER_ID_INVALID;
+
+ return (odp_dma_transfer_id_t)trs;
+}
+
+void odp_dma_transfer_id_free(odp_dma_t dma, odp_dma_transfer_id_t transfer_id)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ uintptr_t trs = (uintptr_t)transfer_id;
+ int32_t num;
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+ _ODP_ASSERT(transfer_id != ODP_DMA_TRANSFER_ID_INVALID);
+
+ num = odp_stash_put_ptr(session->trs_stash, &trs, 1);
+
+ if (odp_unlikely(num != 1))
+ _ODP_ABORT("Stash put failed\n");
+}
+
+uint64_t odp_dma_to_u64(odp_dma_t dma)
+{
+ return _odp_pri(dma);
+}
+
+void odp_dma_print(odp_dma_t dma)
+{
+ const dma_session_t *session = dma_session_from_handle(dma);
+
+ _ODP_ASSERT(dma != ODP_DMA_INVALID);
+
+ _ODP_PRINT("\nDMA info\n");
+ _ODP_PRINT("--------\n");
+ _ODP_PRINT(" DMA handle 0x%" PRIx64 "\n", odp_dma_to_u64(dma));
+ _ODP_PRINT(" name %s\n", session->name);
+ _ODP_PRINT(" device info:\n\n");
+ (void)rte_dma_dump(session->dev_id, stdout);
+ _ODP_PRINT("\n");
+}
+
+uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl)
+{
+ return _odp_pri(dma_compl);
+}
+
+void odp_dma_compl_print(odp_dma_compl_t dma_compl)
+{
+ odp_dma_result_t result;
+ int ret;
+
+ _ODP_ASSERT(dma_compl != ODP_DMA_COMPL_INVALID);
+
+ ret = odp_dma_compl_result(dma_compl, &result);
+ _ODP_PRINT("\nDMA completion\n");
+ _ODP_PRINT("--------------\n");
+ _ODP_PRINT(" Compl event handle: 0x%" PRIx64 "\n", _odp_pri(dma_compl));
+
+ if (ret == 0) {
+ _ODP_PRINT(" Result: %s\n", result.success ? "success" : "fail");
+ _ODP_PRINT(" User pointer: 0x%" PRIx64 "\n", _odp_pri(result.user_ptr));
+ } else {
+ _ODP_PRINT(" No result metadata\n");
+ }
+
+ _ODP_PRINT("\n");
+}
+
+void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param)
+{
+ memset(pool_param, 0, sizeof(*pool_param));
+ pool_param->cache_size = _odp_dma_glb->pool_param.buf.cache_size;
+}
+
+odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *pool_param)
+{
+ const uint32_t num = pool_param->num;
+ const uint32_t uarea_size = pool_param->uarea_size;
+ const uint32_t cache_size = pool_param->cache_size;
+ odp_pool_param_t param;
+
+ if (num > _odp_dma_glb->pool_capa.buf.max_num) {
+ _ODP_ERR("Too many DMA completion events: %u\n", num);
+ return ODP_POOL_INVALID;
+ }
+
+ if (uarea_size > _odp_dma_glb->pool_capa.buf.max_uarea_size) {
+ _ODP_ERR("Bad uarea size: %u\n", uarea_size);
+ return ODP_POOL_INVALID;
+ }
+
+ if (cache_size < _odp_dma_glb->pool_capa.buf.min_cache_size ||
+ cache_size > _odp_dma_glb->pool_capa.buf.max_cache_size) {
+ _ODP_ERR("Bad cache size: %u\n", cache_size);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_BUFFER;
+ param.uarea_init.init_fn = pool_param->uarea_init.init_fn;
+ param.uarea_init.args = pool_param->uarea_init.args;
+ param.buf.num = num;
+ param.buf.uarea_size = uarea_size;
+ param.buf.cache_size = cache_size;
+ param.buf.size = sizeof(odp_dma_result_t);
+
+ return _odp_pool_create(name, &param, ODP_POOL_DMA_COMPL);
+}
+
+#else
+
+int _odp_dma_init_global(void)
+{
+ return 0;
+}
+
+int _odp_dma_term_global(void)
+{
+ return 0;
+}
+
+int odp_dma_capability(odp_dma_capability_t *capa)
+{
+ _ODP_ASSERT(capa != NULL);
+
+ memset(capa, 0, sizeof(*capa));
+
+ return 0;
+}
+
+void odp_dma_param_init(odp_dma_param_t *param ODP_UNUSED)
+{
+}
+
+odp_dma_t odp_dma_create(const char *name ODP_UNUSED, const odp_dma_param_t *param ODP_UNUSED)
+{
+ return ODP_DMA_INVALID;
+}
+
+int odp_dma_destroy(odp_dma_t dma ODP_UNUSED)
+{
+ return 0;
+}
+
+odp_dma_t odp_dma_lookup(const char *name ODP_UNUSED)
+{
+ return ODP_DMA_INVALID;
+}
+
+int odp_dma_transfer(odp_dma_t dma ODP_UNUSED,
+ const odp_dma_transfer_param_t *trs_param ODP_UNUSED,
+ odp_dma_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_dma_transfer_multi(odp_dma_t dma ODP_UNUSED,
+ const odp_dma_transfer_param_t *trs_param[] ODP_UNUSED,
+ odp_dma_result_t *result[] ODP_UNUSED, int num ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_dma_transfer_start(odp_dma_t dma ODP_UNUSED,
+ const odp_dma_transfer_param_t *trs_param ODP_UNUSED,
+ const odp_dma_compl_param_t *compl_param ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_dma_transfer_start_multi(odp_dma_t dma ODP_UNUSED,
+ const odp_dma_transfer_param_t *trs_param[] ODP_UNUSED,
+ const odp_dma_compl_param_t *compl_param[] ODP_UNUSED,
+ int num ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_dma_transfer_done(odp_dma_t dma ODP_UNUSED, odp_dma_transfer_id_t transfer_id ODP_UNUSED,
+ odp_dma_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma ODP_UNUSED)
+{
+ return ODP_DMA_TRANSFER_ID_INVALID;
+}
+
+void odp_dma_transfer_id_free(odp_dma_t dma ODP_UNUSED,
+ odp_dma_transfer_id_t transfer_id ODP_UNUSED)
+{
+}
+
+uint64_t odp_dma_to_u64(odp_dma_t dma ODP_UNUSED)
+{
+ return _odp_pri(ODP_DMA_INVALID);
+}
+
+void odp_dma_print(odp_dma_t dma ODP_UNUSED)
+{
+}
+
+uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl ODP_UNUSED)
+{
+ return _odp_pri(ODP_DMA_COMPL_INVALID);
+}
+
+void odp_dma_compl_print(odp_dma_compl_t dma_compl ODP_UNUSED)
+{
+}
+
+void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param ODP_UNUSED)
+{
+}
+
+odp_pool_t odp_dma_pool_create(const char *name ODP_UNUSED,
+ const odp_dma_pool_param_t *pool_param ODP_UNUSED)
+{
+ return ODP_POOL_INVALID;
+}
+
+#endif
diff --git a/platform/linux-dpdk/odp_errno.c b/platform/linux-dpdk/odp_errno.c
index 5fb698f26..2f03ee898 100644
--- a/platform/linux-dpdk/odp_errno.c
+++ b/platform/linux-dpdk/odp_errno.c
@@ -1,11 +1,10 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/errno.h>
-#include <odp_internal.h>
#include <string.h>
#include <stdio.h>
#include <odp_debug_internal.h>
@@ -24,9 +23,9 @@ void odp_errno_zero(void)
void odp_errno_print(const char *str)
{
if (str != NULL)
- printf("%s ", str);
-
- ODP_PRINT("%s\n", strerror(rte_errno));
+ _ODP_PRINT("%s %s\n", str, strerror(rte_errno));
+ else
+ _ODP_PRINT("%s\n", strerror(rte_errno));
}
const char *odp_errno_str(int errnum)
diff --git a/platform/linux-dpdk/odp_event.c b/platform/linux-dpdk/odp_event.c
new file mode 100644
index 000000000..0c2f3d22e
--- /dev/null
+++ b/platform/linux-dpdk/odp_event.c
@@ -0,0 +1,134 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/event.h>
+#include <odp/api/buffer.h>
+#include <odp/api/crypto.h>
+#include <odp/api/dma.h>
+#include <odp/api/packet.h>
+#include <odp/api/timer.h>
+#include <odp/api/pool.h>
+#include <odp/api/ml.h>
+
+#include <odp_buffer_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_event_vector_internal.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+#include <odp/api/plat/timer_inlines.h>
+
+#include <odp/api/plat/event_inline_types.h>
+
+#include <odp/visibility_begin.h>
+
+/* Fill in event header field offsets for inline functions */
+const _odp_event_inline_offset_t
+_odp_event_inline_offset ODP_ALIGNED_CACHE = {
+ .event_type = offsetof(_odp_event_hdr_t, hdr.event_type),
+ .base_data = offsetof(_odp_event_hdr_t, mb.buf_addr),
+ .subtype = offsetof(_odp_event_hdr_t, hdr.subtype),
+ .flow_id = offsetof(_odp_event_hdr_t, hdr.flow_id),
+ .pool = offsetof(_odp_event_hdr_t, hdr.pool),
+ .buf_len = offsetof(_odp_event_hdr_t, mb.buf_len)
+};
+
+#include <odp/visibility_end.h>
+
+static inline void event_free(odp_event_t event, _odp_ev_id_t id)
+{
+ switch (odp_event_type(event)) {
+ case ODP_EVENT_BUFFER:
+ _odp_buffer_validate(odp_buffer_from_event(event), id);
+ odp_buffer_free(odp_buffer_from_event(event));
+ break;
+ case ODP_EVENT_PACKET:
+ _odp_packet_validate(odp_packet_from_event(event), id);
+ odp_packet_free(odp_packet_from_event(event));
+ break;
+ case ODP_EVENT_PACKET_VECTOR:
+ _odp_packet_vector_free_full(odp_packet_vector_from_event(event));
+ break;
+ case ODP_EVENT_TIMEOUT:
+ odp_timeout_free(odp_timeout_from_event(event));
+ break;
+ case ODP_EVENT_IPSEC_STATUS:
+ _odp_ipsec_status_free(_odp_ipsec_status_from_event(event));
+ break;
+ case ODP_EVENT_PACKET_TX_COMPL:
+ odp_packet_tx_compl_free(odp_packet_tx_compl_from_event(event));
+ break;
+ case ODP_EVENT_DMA_COMPL:
+ odp_dma_compl_free(odp_dma_compl_from_event(event));
+ break;
+ case ODP_EVENT_ML_COMPL:
+ odp_ml_compl_free(odp_ml_compl_from_event(event));
+ break;
+ default:
+ _ODP_ABORT("Invalid event type: %d\n", odp_event_type(event));
+ }
+}
+
+void odp_event_free(odp_event_t event)
+{
+ event_free(event, _ODP_EV_EVENT_FREE);
+}
+
+void odp_event_free_multi(const odp_event_t event[], int num)
+{
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_MULTI);
+}
+
+void odp_event_free_sp(const odp_event_t event[], int num)
+{
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_SP);
+}
+
+uint64_t odp_event_to_u64(odp_event_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+int odp_event_is_valid(odp_event_t event)
+{
+ if (event == ODP_EVENT_INVALID)
+ return 0;
+
+ if (_odp_event_is_valid(event) == 0)
+ return 0;
+
+ switch (odp_event_type(event)) {
+ case ODP_EVENT_BUFFER:
+ return !_odp_buffer_validate(odp_buffer_from_event(event), _ODP_EV_EVENT_IS_VALID);
+ case ODP_EVENT_PACKET:
+ return !_odp_packet_validate(odp_packet_from_event(event), _ODP_EV_EVENT_IS_VALID);
+ case ODP_EVENT_TIMEOUT:
+ /* Fall through */
+ case ODP_EVENT_IPSEC_STATUS:
+ /* Fall through */
+ case ODP_EVENT_PACKET_VECTOR:
+ /* Fall through */
+ case ODP_EVENT_DMA_COMPL:
+ /* Fall through */
+ case ODP_EVENT_ML_COMPL:
+ /* Fall through */
+ case ODP_EVENT_PACKET_TX_COMPL:
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/platform/linux-dpdk/odp_init.c b/platform/linux-dpdk/odp_init.c
index 6cea393bc..79c449f1d 100644
--- a/platform/linux-dpdk/odp_init.c
+++ b/platform/linux-dpdk/odp_init.c
@@ -1,446 +1,798 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp_posix_extensions.h>
-#include <odp_packet_dpdk.h>
+
#include <odp/api/init.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp/api/plat/thread_inlines.h>
+
#include <odp_debug_internal.h>
-#include <odp/api/debug.h>
-#include <unistd.h>
-#include <odp_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
#include <odp_schedule_if.h>
+#include <odp_shm_internal.h>
+
+#include <ctype.h>
#include <string.h>
#include <stdio.h>
-#include <linux/limits.h>
-#include <dirent.h>
#include <unistd.h>
-#include <stdlib.h>
-#include <errno.h>
-
-#define _ODP_FILES_FMT "odp-%d-"
-#define _ODP_TMPDIR "/tmp"
-
-#define MEMPOOL_OPS(hdl) extern void mp_hdlr_init_##hdl(void);
-MEMPOOL_OPS(ops_mp_mc)
-MEMPOOL_OPS(ops_sp_sc)
-MEMPOOL_OPS(ops_mp_sc)
-MEMPOOL_OPS(ops_sp_mc)
-MEMPOOL_OPS(ops_stack)
-
-#ifndef RTE_BUILD_SHARED_LIB
-/*
- * This function is not called from anywhere, it's only purpose is to make sure
- * that if ODP and DPDK are statically linked to an application, the GCC
- * constructors of mempool handlers are linked as well. Otherwise the linker
- * would omit them. It's not an issue with dynamic linking. */
-void refer_constructors(void);
-void refer_constructors(void) {
- mp_hdlr_init_ops_mp_mc();
- mp_hdlr_init_ops_sp_sc();
- mp_hdlr_init_ops_mp_sc();
- mp_hdlr_init_ops_sp_mc();
- mp_hdlr_init_ops_stack();
+#include <inttypes.h>
+
+#include <rte_config.h>
+#include <rte_debug.h>
+#include <rte_eal.h>
+#include <rte_string_fns.h>
+
+enum init_stage {
+ NO_INIT = 0, /* No init stages completed */
+ LIBCONFIG_INIT,
+ CPUMASK_INIT,
+ SYSINFO_INIT,
+ CPU_CYCLES_INIT,
+ TIME_INIT,
+ ISHM_INIT,
+ FDSERVER_INIT,
+ GLOBAL_RW_DATA_INIT,
+ HASH_INIT,
+ THREAD_INIT,
+ POOL_INIT,
+ EVENT_VALIDATION_INIT,
+ STASH_INIT,
+ QUEUE_INIT,
+ SCHED_INIT,
+ PKTIO_INIT,
+ TIMER_INIT,
+ RANDOM_INIT,
+ CRYPTO_INIT,
+ COMP_INIT,
+ CLASSIFICATION_INIT,
+ TRAFFIC_MNGR_INIT,
+ NAME_TABLE_INIT,
+ IPSEC_EVENTS_INIT,
+ IPSEC_SAD_INIT,
+ IPSEC_INIT,
+ DMA_INIT,
+ ML_INIT,
+ ALL_INIT /* All init stages completed */
+};
+
+odp_global_data_ro_t odp_global_ro;
+odp_global_data_rw_t *odp_global_rw;
+
+/* Global function pointers for inline header usage. The values are written
+ * during odp_init_global() (enables process mode support). */
+#include <odp/visibility_begin.h>
+
+odp_log_func_t ODP_PRINTF_FORMAT(2, 3) _odp_log_fn;
+odp_abort_func_t _odp_abort_fn;
+
+#include <odp/visibility_end.h>
+
+/* odp_init_local() call status */
+static __thread uint8_t init_local_called;
+
+static void disable_features(odp_global_data_ro_t *global_ro,
+ const odp_init_t *init_param)
+{
+ int disable_ipsec, disable_crypto;
+ int disable_dma;
+
+ if (init_param == NULL)
+ return;
+
+ disable_ipsec = init_param->not_used.feat.ipsec;
+ global_ro->disable.ipsec = disable_ipsec;
+
+ disable_crypto = init_param->not_used.feat.crypto;
+ /* Crypto can be disabled only if IPSec is disabled */
+ if (disable_ipsec && disable_crypto)
+ global_ro->disable.crypto = 1;
+
+ disable_dma = init_param->not_used.feat.dma;
+ global_ro->disable.dma = disable_dma;
+
+ /* DMA uses stash. Disable stash only when both are disabled. */
+ if (disable_dma && init_param->not_used.feat.stash)
+ global_ro->disable.stash = 1;
+
+ global_ro->disable.traffic_mngr = init_param->not_used.feat.tm;
+ global_ro->disable.compress = init_param->not_used.feat.compress;
+ global_ro->disable.ml = init_param->not_used.feat.ml;
}
-#endif
-static void print_dpdk_env_help(void)
+static int read_pci_config(char **pci_cmd)
{
- char prgname[] = "odpdpdk";
- char help_str[] = "--help";
- char *dpdk_argv[] = {prgname, help_str};
- int dpdk_argc = 2;
-
- ODP_ERR("Neither (char *)platform_params were provided to "
- "odp_init_global(),\n");
- ODP_ERR("nor ODP_PLATFORM_PARAMS environment variable were "
- "specified.\n");
- ODP_ERR("A string of DPDK command line arguments should be provided");
- ODP_ERR("Example: export ODP_PLATFORM_PARAMS=\"-n 4 --no-huge\"\n");
- ODP_ERR("Note: -c argument substitutes automatically from odp coremask\n");
- rte_eal_init(dpdk_argc, dpdk_argv);
+ const char *pci_str[2] = {"dpdk.pci_whitelist", "dpdk.pci_blacklist"};
+ char pci_type[2] = {'w', 'b'};
+ const int str_size = 100;
+ char *buf = NULL;
+ int pci_count;
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ /* get the size of the array */
+ pci_count = _odp_libconfig_lookup_array_str(pci_str[i], NULL, 0, 0);
+
+ if (pci_count < 0)
+ return -1;
+
+ /* skip if list is empty */
+ if (pci_count == 0)
+ continue;
+
+ char pci_list[pci_count][str_size];
+ char *pci_list_addr[pci_count];
+
+ for (j = 0; j < pci_count; j++)
+ pci_list_addr[j] = pci_list[j];
+
+ if (pci_count != _odp_libconfig_lookup_array_str(pci_str[i],
+ pci_list_addr,
+ pci_count, str_size))
+ return -1;
+
+ /* Buffer to concatenate list of '-w/-b <pci addr>' strings */
+ buf = malloc(pci_count * (str_size + 3));
+ if (buf == NULL) {
+ _ODP_ERR("PCI config buffer alloc fail\n");
+ return -1;
+ }
+
+ memset(buf, '\0', pci_count * str_size);
+ for (j = 0; j < pci_count; j++) {
+ char addr_str[str_size];
+
+ snprintf(addr_str, str_size, "-%c %s ", pci_type[i], pci_list[j]);
+ strcat(buf, addr_str);
+ }
+
+ _ODP_PRINT(" %s: %s\n\n", pci_str[i], buf);
+
+ /* No need to read blacklist if whitelist is defined */
+ *pci_cmd = buf;
+ return strlen(buf);
+ }
+
+ return 0;
}
+static int read_eal_cmdstr(char **eal_cmd)
+{
+ const char *dpdk_str = "dpdk.eal_params";
+ int length;
+ char *buf;
+
+ length = _odp_libconfig_lookup_str(dpdk_str, NULL, 0);
+ if (length <= 0)
+ return length;
+
+ buf = malloc(length);
+ if (buf == NULL) {
+ _ODP_ERR("DPDK EAL command string buffer alloc fail\n");
+ return -1;
+ }
+
+ if (_odp_libconfig_lookup_str(dpdk_str, buf, length) < 0) {
+ free(buf);
+ return -1;
+ }
+
+ _ODP_PRINT(" %s: %s\n\n", dpdk_str, buf);
-static int odp_init_dpdk(const char *cmdline)
+ *eal_cmd = buf;
+ return length;
+}
+
+static int _odp_init_dpdk(const char *cmdline)
{
- char **dpdk_argv;
int dpdk_argc;
- char *full_cmdline;
- int i, cmdlen;
- odp_cpumask_t mask;
- char mask_str[ODP_CPUMASK_STR_SIZE];
- int32_t masklen;
- cpu_set_t original_cpuset;
+ int i, cmdlen, pcicmdlen, ealcmdlen;
+ const char *str, *pci_str = "", *eal_str = "";
+ uint32_t mem_prealloc;
+ int val = 0;
+ char *pci_cmd = NULL, *eal_cmd = NULL;
+
+ _ODP_PRINT("DPDK config:\n");
+
+ str = "dpdk.process_mode_memory_mb";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ mem_prealloc = val;
+
+ _ODP_PRINT(" %s: %" PRIu32 "\n", str, mem_prealloc);
if (cmdline == NULL) {
cmdline = getenv("ODP_PLATFORM_PARAMS");
- if (cmdline == NULL) {
- print_dpdk_env_help();
- return -1;
- }
+ if (cmdline == NULL)
+ cmdline = "";
}
- CPU_ZERO(&original_cpuset);
- i = pthread_getaffinity_np(pthread_self(),
- sizeof(original_cpuset), &original_cpuset);
- if (i != 0) {
- ODP_ERR("Failed to read thread affinity: %d\n", i);
+ pcicmdlen = read_pci_config(&pci_cmd);
+ if (pcicmdlen < 0) {
+ _ODP_ERR("Error reading PCI config\n");
return -1;
}
- odp_cpumask_zero(&mask);
- for (i = 0; i < CPU_SETSIZE; i++) {
- if (CPU_ISSET(i, &original_cpuset)) {
- odp_cpumask_set(&mask, i);
- break;
- }
- }
- masklen = odp_cpumask_to_str(&mask, mask_str, ODP_CPUMASK_STR_SIZE);
-
- if (masklen < 0) {
- ODP_ERR("CPU mask error: d\n", masklen);
+ /* Read any additional EAL command string from config */
+ ealcmdlen = read_eal_cmdstr(&eal_cmd);
+ if (ealcmdlen < 0) {
+ _ODP_ERR("Error reading additional DPDK EAL command string\n");
+ if (pci_cmd != NULL)
+ free(pci_cmd);
return -1;
}
+ cmdlen = snprintf(NULL, 0, "odpdpdk --legacy-mem -m %" PRIu32 " %s ", mem_prealloc,
+ cmdline) + pcicmdlen + ealcmdlen;
- /* masklen includes the terminating null as well */
- full_cmdline = calloc(1, strlen("odpdpdk -c ") + masklen +
- strlen(" ") + strlen(cmdline));
+ if (pci_cmd != NULL)
+ pci_str = pci_cmd;
- /* first argument is facility log, simply bind it to odpdpdk for now.*/
- cmdlen = sprintf(full_cmdline, "odpdpdk -c %s %s", mask_str, cmdline);
+ if (eal_cmd != NULL)
+ eal_str = eal_cmd;
+
+ char full_cmdline[cmdlen];
+
+ /* First argument is facility log, simply bind it to odpdpdk for now. In
+ * process mode DPDK memory has to be preallocated. */
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS)
+ cmdlen = snprintf(full_cmdline, cmdlen, "odpdpdk --legacy-mem -m %" PRIu32 " %s %s %s",
+ mem_prealloc, cmdline, pci_str, eal_str);
+ else
+ cmdlen = snprintf(full_cmdline, cmdlen, "odpdpdk %s %s %s",
+ cmdline, pci_str, eal_str);
+
+ if (pci_cmd != NULL)
+ free(pci_cmd);
+
+ if (eal_cmd != NULL)
+ free(eal_cmd);
for (i = 0, dpdk_argc = 1; i < cmdlen; ++i) {
- if (isspace(full_cmdline[i])) {
+ if (isspace(full_cmdline[i]))
++dpdk_argc;
- }
}
- dpdk_argv = malloc(dpdk_argc * sizeof(char *));
+
+ char *dpdk_argv[dpdk_argc];
dpdk_argc = rte_strsplit(full_cmdline, strlen(full_cmdline), dpdk_argv,
dpdk_argc, ' ');
for (i = 0; i < dpdk_argc; ++i)
- ODP_DBG("arg[%d]: %s\n", i, dpdk_argv[i]);
+ _ODP_DBG("arg[%d]: %s\n", i, dpdk_argv[i]);
fflush(stdout);
i = rte_eal_init(dpdk_argc, dpdk_argv);
- free(dpdk_argv);
- free(full_cmdline);
if (i < 0) {
- ODP_ERR("Cannot init the Intel DPDK EAL!\n");
+ _ODP_ERR("Cannot init the Intel DPDK EAL!\n");
return -1;
} else if (i + 1 != dpdk_argc) {
- ODP_DBG("Some DPDK args were not processed!\n");
- ODP_DBG("Passed: %d Consumed %d\n", dpdk_argc, i + 1);
+ _ODP_DBG("Some DPDK args were not processed!\n");
+ _ODP_DBG("Passed: %d Consumed %d\n", dpdk_argc, i + 1);
}
- ODP_DBG("rte_eal_init OK\n");
+ _ODP_DBG("rte_eal_init OK\n");
- i = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
- &original_cpuset);
- if (i)
- ODP_ERR("Failed to reset thread affinity: %d\n", i);
+ /* Reset to 0 to force getopt() internal initialization routine */
+ optind = 0;
return 0;
}
-struct odp_global_data_s odp_global_data;
+void odp_init_param_init(odp_init_t *param)
+{
+ memset(param, 0, sizeof(odp_init_t));
+}
+
+static int global_rw_data_init(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_global_rw_data",
+ sizeof(odp_global_data_rw_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ odp_global_rw = odp_shm_addr(shm);
+ if (odp_global_rw == NULL) {
+ _ODP_ERR("Global RW data shm reserve failed.\n");
+ return -1;
+ }
+
+ memset(odp_global_rw, 0, sizeof(odp_global_data_rw_t));
+
+ return 0;
+}
-/* remove all files staring with "odp-<pid>" from a directory "dir" */
-static int cleanup_files(const char *dirpath, int odp_pid)
+static int global_rw_data_term(void)
{
- struct dirent *e;
- DIR *dir;
- char prefix[PATH_MAX];
- char *fullpath;
- int d_len = strlen(dirpath);
- int p_len;
- int f_len;
-
- dir = opendir(dirpath);
- if (!dir) {
- /* ok if the dir does not exist. no much to delete then! */
- ODP_DBG("opendir failed for %s: %s\n",
- dirpath, strerror(errno));
- return 0;
- }
- snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_pid);
- p_len = strlen(prefix);
- while ((e = readdir(dir)) != NULL) {
- if (strncmp(e->d_name, prefix, p_len) == 0) {
- f_len = strlen(e->d_name);
- fullpath = malloc(d_len + f_len + 2);
- if (fullpath == NULL) {
- closedir(dir);
- return -1;
- }
- snprintf(fullpath, PATH_MAX, "%s/%s",
- dirpath, e->d_name);
- ODP_DBG("deleting obsolete file: %s\n", fullpath);
- if (unlink(fullpath))
- ODP_ERR("unlink failed for %s: %s\n",
- fullpath, strerror(errno));
- free(fullpath);
- }
- }
- closedir(dir);
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup("_odp_global_rw_data");
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Unable to find global RW data shm.\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ _ODP_ERR("Global RW data shm free failed.\n");
+ return -1;
+ }
return 0;
}
+static int term_global(enum init_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case ALL_INIT:
+ case ML_INIT:
+ if (_odp_ml_term_global()) {
+ _ODP_ERR("ODP ML term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case DMA_INIT:
+ if (_odp_dma_term_global()) {
+ _ODP_ERR("ODP DMA term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_INIT:
+ if (_odp_ipsec_term_global()) {
+ _ODP_ERR("ODP IPsec term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_SAD_INIT:
+ if (_odp_ipsec_sad_term_global()) {
+ _ODP_ERR("ODP IPsec SAD term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_EVENTS_INIT:
+ if (_odp_ipsec_events_term_global()) {
+ _ODP_ERR("ODP IPsec events term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case NAME_TABLE_INIT:
+ if (_odp_int_name_tbl_term_global()) {
+ _ODP_ERR("Name table term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TRAFFIC_MNGR_INIT:
+ if (_odp_tm_term_global()) {
+ _ODP_ERR("TM term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CLASSIFICATION_INIT:
+ if (_odp_classification_term_global()) {
+ _ODP_ERR("ODP classification term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case COMP_INIT:
+ if (_odp_comp_term_global()) {
+ _ODP_ERR("ODP comp term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CRYPTO_INIT:
+ if (_odp_crypto_term_global()) {
+ _ODP_ERR("ODP crypto term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TIMER_INIT:
+ if (_odp_timer_term_global()) {
+ _ODP_ERR("ODP timer term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case PKTIO_INIT:
+ if (_odp_pktio_term_global()) {
+ _ODP_ERR("ODP pktio term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case SCHED_INIT:
+ if (_odp_schedule_term_global()) {
+ _ODP_ERR("ODP schedule term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case QUEUE_INIT:
+ if (_odp_queue_term_global()) {
+ _ODP_ERR("ODP queue term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case STASH_INIT:
+ if (_odp_stash_term_global()) {
+ _ODP_ERR("ODP stash term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case EVENT_VALIDATION_INIT:
+ if (_odp_event_validation_term_global()) {
+ _ODP_ERR("ODP event validation term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case POOL_INIT:
+ if (_odp_pool_term_global()) {
+ _ODP_ERR("ODP buffer pool term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case THREAD_INIT:
+ if (_odp_thread_term_global()) {
+ _ODP_ERR("ODP thread term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case HASH_INIT:
+ if (_odp_hash_term_global()) {
+ _ODP_ERR("ODP hash term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case GLOBAL_RW_DATA_INIT:
+ if (global_rw_data_term()) {
+ _ODP_ERR("ODP global RW data term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ /* Needed to prevent compiler warning */
+ case FDSERVER_INIT:
+ case ISHM_INIT:
+ if (_odp_shm_term_global()) {
+ _ODP_ERR("ODP shm term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TIME_INIT:
+ if (_odp_time_term_global()) {
+ _ODP_ERR("ODP time term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CPU_CYCLES_INIT:
+ case SYSINFO_INIT:
+ if (_odp_system_info_term()) {
+ _ODP_ERR("ODP system info term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CPUMASK_INIT:
+ if (_odp_cpumask_term_global()) {
+ _ODP_ERR("ODP cpumask term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case LIBCONFIG_INIT:
+ if (_odp_libconfig_term_global()) {
+ _ODP_ERR("ODP runtime config term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ default:
+ break;
+ }
+
+ return rc;
+}
+
int odp_init_global(odp_instance_t *instance,
const odp_init_t *params,
const odp_platform_init_t *platform_params)
{
- char *hpdir;
-
- memset(&odp_global_data, 0, sizeof(struct odp_global_data_s));
- odp_global_data.main_pid = getpid();
-
enum init_stage stage = NO_INIT;
- odp_global_data.log_fn = odp_override_log;
- odp_global_data.abort_fn = odp_override_abort;
+ memset(&odp_global_ro, 0, sizeof(odp_global_data_ro_t));
+ odp_global_ro.main_pid = getpid();
+ _odp_log_fn = odp_override_log;
+ _odp_abort_fn = odp_override_abort;
+
+ odp_init_param_init(&odp_global_ro.init_param);
if (params != NULL) {
+ odp_global_ro.init_param = *params;
+
if (params->log_fn != NULL)
- odp_global_data.log_fn = params->log_fn;
+ _odp_log_fn = params->log_fn;
if (params->abort_fn != NULL)
- odp_global_data.abort_fn = params->abort_fn;
+ _odp_abort_fn = params->abort_fn;
+ if (params->mem_model == ODP_MEM_MODEL_PROCESS)
+ odp_global_ro.shm_single_va = 1;
+ }
+
+ if (_odp_libconfig_init_global()) {
+ _ODP_ERR("ODP runtime config init failed.\n");
+ goto init_failed;
}
+ stage = LIBCONFIG_INIT;
- cleanup_files(_ODP_TMPDIR, odp_global_data.main_pid);
+ disable_features(&odp_global_ro, params);
- if (odp_cpumask_init_global(params)) {
- ODP_ERR("ODP cpumask init failed.\n");
+ if (_odp_cpumask_init_global(params)) {
+ _ODP_ERR("ODP cpumask init failed.\n");
goto init_failed;
}
stage = CPUMASK_INIT;
- if (odp_init_dpdk((const char *)platform_params)) {
- ODP_ERR("ODP dpdk init failed.\n");
+ if (_odp_init_dpdk((const char *)platform_params)) {
+ _ODP_ERR("ODP dpdk init failed.\n");
return -1;
}
- if (odp_time_init_global()) {
- ODP_ERR("ODP time init failed.\n");
+ if (_odp_system_info_init()) {
+ _ODP_ERR("ODP system_info init failed.\n");
goto init_failed;
}
- stage = TIME_INIT;
+ stage = SYSINFO_INIT;
- if (odp_system_info_init()) {
- ODP_ERR("ODP system_info init failed.\n");
+ if (_odp_cpu_cycles_init_global()) {
+ _ODP_ERR("ODP cpu cycle init failed.\n");
goto init_failed;
}
- hpdir = odp_global_data.hugepage_info.default_huge_page_dir;
- /* cleanup obsolete huge page files, if any */
- if (hpdir)
- cleanup_files(hpdir, odp_global_data.main_pid);
- stage = SYSINFO_INIT;
+ stage = CPU_CYCLES_INIT;
- if (_odp_fdserver_init_global()) {
- ODP_ERR("ODP fdserver init failed.\n");
+ if (_odp_time_init_global()) {
+ _ODP_ERR("ODP time init failed.\n");
goto init_failed;
}
- stage = FDSERVER_INIT;
+ stage = TIME_INIT;
- if (_odp_ishm_init_global()) {
- ODP_ERR("ODP ishm init failed.\n");
+ if (_odp_shm_init_global(params)) {
+ _ODP_ERR("ODP shm init failed.\n");
goto init_failed;
}
stage = ISHM_INIT;
- if (odp_thread_init_global()) {
- ODP_ERR("ODP thread init failed.\n");
+ if (global_rw_data_init()) {
+ _ODP_ERR("ODP global RW data init failed.\n");
+ goto init_failed;
+ }
+ stage = GLOBAL_RW_DATA_INIT;
+
+ if (_odp_hash_init_global()) {
+ _ODP_ERR("ODP hash init failed.\n");
+ goto init_failed;
+ }
+ stage = HASH_INIT;
+
+ if (_odp_thread_init_global()) {
+ _ODP_ERR("ODP thread init failed.\n");
goto init_failed;
}
stage = THREAD_INIT;
- if (odp_pool_init_global()) {
- ODP_ERR("ODP pool init failed.\n");
+ if (_odp_pool_init_global()) {
+ _ODP_ERR("ODP pool init failed.\n");
goto init_failed;
}
stage = POOL_INIT;
- if (odp_queue_init_global()) {
- ODP_ERR("ODP queue init failed.\n");
+ if (_odp_event_validation_init_global()) {
+ _ODP_ERR("ODP event validation init failed.\n");
+ goto init_failed;
+ }
+ stage = EVENT_VALIDATION_INIT;
+
+ if (_odp_stash_init_global()) {
+ _ODP_ERR("ODP stash init failed.\n");
+ goto init_failed;
+ }
+ stage = STASH_INIT;
+
+ if (_odp_queue_init_global()) {
+ _ODP_ERR("ODP queue init failed.\n");
goto init_failed;
}
stage = QUEUE_INIT;
- if (sched_fn->init_global()) {
- ODP_ERR("ODP schedule init failed.\n");
+ if (_odp_schedule_init_global()) {
+ _ODP_ERR("ODP schedule init failed.\n");
goto init_failed;
}
stage = SCHED_INIT;
- if (odp_pktio_init_global()) {
- ODP_ERR("ODP packet io init failed.\n");
+ if (_odp_pktio_init_global()) {
+ _ODP_ERR("ODP packet io init failed.\n");
goto init_failed;
}
stage = PKTIO_INIT;
- if (odp_timer_init_global()) {
- ODP_ERR("ODP timer init failed.\n");
+ if (_odp_timer_init_global(params)) {
+ _ODP_ERR("ODP timer init failed.\n");
goto init_failed;
}
stage = TIMER_INIT;
- if (odp_crypto_init_global()) {
- ODP_ERR("ODP crypto init failed.\n");
+ if (_odp_crypto_init_global()) {
+ _ODP_ERR("ODP crypto init failed.\n");
goto init_failed;
}
stage = CRYPTO_INIT;
- if (odp_classification_init_global()) {
- ODP_ERR("ODP classification init failed.\n");
+ if (_odp_comp_init_global()) {
+ _ODP_ERR("ODP comp init failed.\n");
+ goto init_failed;
+ }
+ stage = COMP_INIT;
+
+ if (_odp_classification_init_global()) {
+ _ODP_ERR("ODP classification init failed.\n");
goto init_failed;
}
stage = CLASSIFICATION_INIT;
- if (odp_tm_init_global()) {
- ODP_ERR("ODP traffic manager init failed\n");
+ if (_odp_tm_init_global()) {
+ _ODP_ERR("ODP traffic manager init failed\n");
goto init_failed;
}
stage = TRAFFIC_MNGR_INIT;
if (_odp_int_name_tbl_init_global()) {
- ODP_ERR("ODP name table init failed\n");
+ _ODP_ERR("ODP name table init failed\n");
+ goto init_failed;
+ }
+ stage = NAME_TABLE_INIT;
+
+ if (_odp_ipsec_events_init_global()) {
+ _ODP_ERR("ODP IPsec events init failed.\n");
+ goto init_failed;
+ }
+ stage = IPSEC_EVENTS_INIT;
+
+ if (_odp_ipsec_sad_init_global()) {
+ _ODP_ERR("ODP IPsec SAD init failed.\n");
+ goto init_failed;
+ }
+ stage = IPSEC_SAD_INIT;
+
+ if (_odp_ipsec_init_global()) {
+ _ODP_ERR("ODP IPsec init failed.\n");
+ goto init_failed;
+ }
+ stage = IPSEC_INIT;
+
+ if (_odp_dma_init_global()) {
+ _ODP_ERR("ODP DMA init failed.\n");
+ goto init_failed;
+ }
+ stage = DMA_INIT;
+
+ if (_odp_ml_init_global()) {
+ _ODP_ERR("ODP ML init failed.\n");
goto init_failed;
}
+ stage = ML_INIT;
/* Dummy support for single instance */
- *instance = (odp_instance_t)odp_global_data.main_pid;
+ *instance = (odp_instance_t)odp_global_ro.main_pid;
return 0;
init_failed:
- _odp_term_global(stage);
+ term_global(stage);
return -1;
}
int odp_term_global(odp_instance_t instance)
{
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
- ODP_ERR("Bad instance.\n");
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
+ _ODP_ERR("Bad instance.\n");
return -1;
}
- return _odp_term_global(ALL_INIT);
+ return term_global(ALL_INIT);
}
-int _odp_term_global(enum init_stage stage)
+static int term_local(enum init_stage stage)
{
int rc = 0;
+ int rc_thd = 0;
switch (stage) {
case ALL_INIT:
- case NAME_TABLE_INIT:
- if (_odp_int_name_tbl_term_global()) {
- ODP_ERR("Name table term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case TRAFFIC_MNGR_INIT:
- if (odp_tm_term_global()) {
- ODP_ERR("TM term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case CLASSIFICATION_INIT:
- if (odp_classification_term_global()) {
- ODP_ERR("ODP classification term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case CRYPTO_INIT:
- if (odp_crypto_term_global()) {
- ODP_ERR("ODP crypto term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case TIMER_INIT:
- if (odp_timer_term_global()) {
- ODP_ERR("ODP timer term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case PKTIO_INIT:
- if (odp_pktio_term_global()) {
- ODP_ERR("ODP pktio term failed.\n");
- rc = -1;
- }
- /* Fall through */
case SCHED_INIT:
- if (sched_fn->term_global()) {
- ODP_ERR("ODP schedule term failed.\n");
+ if (_odp_sched_fn->term_local()) {
+ _ODP_ERR("ODP schedule local term failed.\n");
rc = -1;
}
/* Fall through */
case QUEUE_INIT:
- if (odp_queue_term_global()) {
- ODP_ERR("ODP queue term failed.\n");
+ if (_odp_queue_fn->term_local()) {
+ _ODP_ERR("ODP queue local term failed.\n");
rc = -1;
}
/* Fall through */
case POOL_INIT:
- if (odp_pool_term_global()) {
- ODP_ERR("ODP buffer pool term failed.\n");
+ if (_odp_pool_term_local()) {
+ _ODP_ERR("ODP buffer pool local term failed.\n");
rc = -1;
}
/* Fall through */
- case THREAD_INIT:
- if (odp_thread_term_global()) {
- ODP_ERR("ODP thread term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case ISHM_INIT:
- if (_odp_ishm_term_global()) {
- ODP_ERR("ODP ishm term failed.\n");
+ case CRYPTO_INIT:
+ if (_odp_crypto_term_local()) {
+ _ODP_ERR("ODP crypto local term failed.\n");
rc = -1;
}
/* Fall through */
- case FDSERVER_INIT:
- if (_odp_fdserver_term_global()) {
- ODP_ERR("ODP fdserver term failed.\n");
+ case RANDOM_INIT:
+ if (_odp_random_term_local()) {
+ _ODP_ERR("ODP random local term failed.\n");
rc = -1;
}
/* Fall through */
- case SYSINFO_INIT:
- if (odp_system_info_term()) {
- ODP_ERR("ODP system info term failed.\n");
+ case TIMER_INIT:
+ if (_odp_timer_term_local()) {
+ _ODP_ERR("ODP timer local term failed.\n");
rc = -1;
}
/* Fall through */
- case TIME_INIT:
- if (odp_time_term_global()) {
- ODP_ERR("ODP time term failed.\n");
+ case THREAD_INIT:
+ rc_thd = _odp_thread_term_local();
+ if (rc_thd < 0) {
+ _ODP_ERR("ODP thread local term failed.\n");
rc = -1;
+ } else {
+ if (!rc)
+ rc = (rc_thd == 0) ? 0 : 1;
}
/* Fall through */
- case CPUMASK_INIT:
- if (odp_cpumask_term_global()) {
- ODP_ERR("ODP cpumask term failed.\n");
+ case ISHM_INIT:
+ if (_odp_shm_term_local()) {
+ _ODP_ERR("ODP shm local term failed.\n");
rc = -1;
}
/* Fall through */
- case NO_INIT:
- ;
+ default:
+ break;
}
return rc;
@@ -450,37 +802,68 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
{
enum init_stage stage = NO_INIT;
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
- ODP_ERR("Bad instance.\n");
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
+ _ODP_ERR("Bad instance.\n");
+ goto init_fail;
+ }
+
+ /* Detect if odp_init_local() has been already called from this thread */
+ if (getpid() == odp_global_ro.main_pid && init_local_called) {
+ _ODP_ERR("%s() called multiple times by the same thread\n", __func__);
goto init_fail;
}
+ init_local_called = 1;
- if (_odp_ishm_init_local()) {
- ODP_ERR("ODP ishm local init failed.\n");
+ if (_odp_shm_init_local()) {
+ _ODP_ERR("ODP shm local init failed.\n");
goto init_fail;
}
stage = ISHM_INIT;
- if (odp_thread_init_local(thr_type)) {
- ODP_ERR("ODP thread local init failed.\n");
+ if (_odp_thread_init_local(thr_type)) {
+ _ODP_ERR("ODP thread local init failed.\n");
goto init_fail;
}
stage = THREAD_INIT;
- if (odp_pktio_init_local()) {
- ODP_ERR("ODP packet io local init failed.\n");
+ if (_odp_pktio_init_local()) {
+ _ODP_ERR("ODP packet io local init failed.\n");
goto init_fail;
}
stage = PKTIO_INIT;
- if (odp_pool_init_local()) {
- ODP_ERR("ODP pool local init failed.\n");
+ if (_odp_timer_init_local()) {
+ _ODP_ERR("ODP timer local init failed.\n");
+ goto init_fail;
+ }
+ stage = TIMER_INIT;
+
+ if (_odp_random_init_local()) {
+ _ODP_ERR("ODP random local init failed.\n");
+ goto init_fail;
+ }
+ stage = RANDOM_INIT;
+
+ if (_odp_crypto_init_local()) {
+ _ODP_ERR("ODP crypto local init failed.\n");
+ goto init_fail;
+ }
+ stage = CRYPTO_INIT;
+
+ if (_odp_pool_init_local()) {
+ _ODP_ERR("ODP pool local init failed.\n");
goto init_fail;
}
stage = POOL_INIT;
- if (sched_fn->init_local()) {
- ODP_ERR("ODP schedule local init failed.\n");
+ if (_odp_queue_fn->init_local()) {
+ _ODP_ERR("ODP queue local init failed.\n");
+ goto init_fail;
+ }
+ stage = QUEUE_INIT;
+
+ if (_odp_sched_fn->init_local()) {
+ _ODP_ERR("ODP schedule local init failed.\n");
goto init_fail;
}
/* stage = SCHED_INIT; */
@@ -488,58 +871,52 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
return 0;
init_fail:
- _odp_term_local(stage);
+ term_local(stage);
return -1;
}
int odp_term_local(void)
{
- return _odp_term_local(ALL_INIT);
+ /* Check that odp_init_local() has been called by this thread */
+ if (!init_local_called) {
+ _ODP_ERR("%s() called by a non-initialized thread\n", __func__);
+ return -1;
+ }
+ init_local_called = 0;
+
+ return term_local(ALL_INIT);
}
-int _odp_term_local(enum init_stage stage)
+int odp_term_abnormal(odp_instance_t instance, uint64_t flags, void *data ODP_UNUSED)
{
- int rc = 0;
- int rc_thd = 0;
+ rte_dump_stack();
- switch (stage) {
- case ALL_INIT:
+ if (flags & ODP_TERM_FROM_SIGH)
+ /* Called from signal handler, not safe to terminate with local/global,
+ * return with failure as not able to perform all actions */
+ return -1;
- case SCHED_INIT:
- if (sched_fn->term_local()) {
- ODP_ERR("ODP schedule local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+ if (odp_term_local() < 0) {
+ _ODP_ERR("ODP local terminate failed.\n");
+ return -2;
+ }
- case POOL_INIT:
- if (odp_pool_term_local()) {
- ODP_ERR("ODP buffer pool local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+ if (odp_term_global(instance) < 0) {
+ _ODP_ERR("ODP global terminate failed.\n");
+ return -3;
+ }
- case THREAD_INIT:
- rc_thd = odp_thread_term_local();
- if (rc_thd < 0) {
- ODP_ERR("ODP thread local term failed.\n");
- rc = -1;
- } else {
- if (!rc)
- rc = rc_thd;
- }
- /* Fall through */
+ return 0;
+}
- case ISHM_INIT:
- if (_odp_ishm_term_local()) {
- ODP_ERR("ODP ishm local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+void odp_log_thread_fn_set(odp_log_func_t func)
+{
+ _odp_this_thread->log_fn = func;
+}
- default:
- break;
- }
+int odp_instance(odp_instance_t *instance)
+{
+ *instance = (odp_instance_t)odp_global_ro.main_pid;
- return rc;
+ return 0;
}
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index 68ad26b29..bcd2c2fb3 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -1,18 +1,42 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/hash.h>
+#include <odp/api/hints.h>
#include <odp/api/packet.h>
-#include <odp_packet_internal.h>
+#include <odp/api/packet_flags.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/proto_stats.h>
+#include <odp/api/timer.h>
+
+#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_io_inlines.h>
+
+#include <odp_chksum_internal.h>
#include <odp_debug_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/byteorder.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_parse_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
+
+#include <rte_version.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
+#include <protocols/sctp.h>
#include <protocols/tcp.h>
#include <protocols/udp.h>
@@ -21,302 +45,343 @@
#include <stddef.h>
#include <inttypes.h>
+#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
+ #define RTE_MBUF_F_RX_RSS_HASH PKT_RX_RSS_HASH
+#endif
+
+#include <odp/visibility_begin.h>
+
/* Fill in packet header field offsets for inline functions */
const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
- .mb = offsetof(odp_packet_hdr_t, buf_hdr.mb),
- .pool = offsetof(odp_packet_hdr_t, buf_hdr.pool_hdl),
+ .mb = offsetof(odp_packet_hdr_t, mb),
+ .pool = offsetof(odp_packet_hdr_t, event_hdr.pool),
.input = offsetof(odp_packet_hdr_t, input),
- .user_ptr = offsetof(odp_packet_hdr_t, buf_hdr.buf_ctx),
+ .user_ptr = offsetof(odp_packet_hdr_t, user_ptr),
+ .l2_offset = offsetof(odp_packet_hdr_t, p.l2_offset),
+ .l3_offset = offsetof(odp_packet_hdr_t, p.l3_offset),
+ .l4_offset = offsetof(odp_packet_hdr_t, p.l4_offset),
.timestamp = offsetof(odp_packet_hdr_t, timestamp),
.input_flags = offsetof(odp_packet_hdr_t, p.input_flags),
- .buf_addr = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- offsetof(const struct rte_mbuf, buf_addr),
- .data = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- offsetof(struct rte_mbuf, data_off),
- .pkt_len = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- (size_t)&rte_pktmbuf_pkt_len((struct rte_mbuf *)0),
- .seg_len = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- (size_t)&rte_pktmbuf_data_len((struct rte_mbuf *)0),
- .nb_segs = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- offsetof(struct rte_mbuf, nb_segs),
- .udata_len = offsetof(odp_packet_hdr_t, uarea_size),
- .udata = sizeof(odp_packet_hdr_t),
- .rss = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- offsetof(struct rte_mbuf, hash.rss),
- .ol_flags = offsetof(odp_packet_hdr_t, buf_hdr.mb) +
- offsetof(struct rte_mbuf, ol_flags),
- .rss_flag = PKT_RX_RSS_HASH
+ .flags = offsetof(odp_packet_hdr_t, p.flags),
+ .cls_mark = offsetof(odp_packet_hdr_t, cls_mark),
+ .ipsec_ctx = offsetof(odp_packet_hdr_t, ipsec_ctx),
+ .crypto_op = offsetof(odp_packet_hdr_t, crypto_op_result),
+ .buf_addr = offsetof(odp_packet_hdr_t, mb.buf_addr),
+ .data = offsetof(odp_packet_hdr_t, mb.data_off),
+ .pkt_len = offsetof(odp_packet_hdr_t, mb.pkt_len),
+ .seg_len = offsetof(odp_packet_hdr_t, mb.data_len),
+ .nb_segs = offsetof(odp_packet_hdr_t, mb.nb_segs),
+ .user_area = offsetof(odp_packet_hdr_t, uarea_addr),
+ .rss = offsetof(odp_packet_hdr_t, mb.hash.rss),
+ .ol_flags = offsetof(odp_packet_hdr_t, mb.ol_flags),
+ .rss_flag = RTE_MBUF_F_RX_RSS_HASH
};
-struct rte_mbuf dummy;
-ODP_STATIC_ASSERT(sizeof(dummy.data_off) == sizeof(uint16_t),
+#include <odp/visibility_end.h>
+
+/* Catch if DPDK mbuf members sizes have changed */
+struct rte_mbuf _odp_dummy_mbuf;
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.data_off) == sizeof(uint16_t),
"data_off should be uint16_t");
-ODP_STATIC_ASSERT(sizeof(dummy.pkt_len) == sizeof(uint32_t),
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.pkt_len) == sizeof(uint32_t),
"pkt_len should be uint32_t");
-ODP_STATIC_ASSERT(sizeof(dummy.data_len) == sizeof(uint16_t),
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.data_len) == sizeof(uint16_t),
"data_len should be uint16_t");
-ODP_STATIC_ASSERT(sizeof(dummy.hash.rss) == sizeof(uint32_t),
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.nb_segs) == sizeof(uint16_t),
+ "nb_segs should be uint16_t");
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.hash.rss) == sizeof(uint32_t),
"hash.rss should be uint32_t");
-ODP_STATIC_ASSERT(sizeof(dummy.ol_flags) == sizeof(uint64_t),
+ODP_STATIC_ASSERT(sizeof(_odp_dummy_mbuf.ol_flags) == sizeof(uint64_t),
"ol_flags should be uint64_t");
-/*
- *
- * Alloc and free
- * ********************************************************
+
+/* Check that invalid values are the same. Some versions of Clang and pedantic
+ * build have trouble with the strong type casting, and complain that these
+ * invalid values are not integral constants.
*
- */
+ * Invalid values are required to be equal for _odp_buffer_is_valid() to work
+ * properly. */
+#ifndef __clang__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ODP_STATIC_ASSERT(ODP_PACKET_INVALID == 0, "Packet invalid not 0");
+ODP_STATIC_ASSERT(ODP_BUFFER_INVALID == 0, "Buffer invalid not 0");
+ODP_STATIC_ASSERT(ODP_EVENT_INVALID == 0, "Event invalid not 0");
+ODP_STATIC_ASSERT(ODP_PACKET_VECTOR_INVALID == 0, "Packet vector invalid not 0");
+ODP_STATIC_ASSERT(ODP_PACKET_TX_COMPL_INVALID == 0, "Packet TX completion invalid not 0");
+ODP_STATIC_ASSERT(ODP_TIMEOUT_INVALID == 0, "Timeout invalid not 0");
+#pragma GCC diagnostic pop
+#endif
-static inline odp_buffer_t buffer_handle(odp_packet_hdr_t *pkt_hdr)
+/* Calculate the number of segments */
+static inline int num_segments(uint32_t len, uint32_t seg_len)
{
- return pkt_hdr->buf_hdr.handle.handle;
+ int num = 1;
+
+ if (odp_unlikely(len > seg_len)) {
+ num = len / seg_len;
+
+ if (odp_likely((num * seg_len) != len))
+ num += 1;
+ }
+
+ return num;
}
-static inline odp_packet_hdr_t *buf_to_packet_hdr(odp_buffer_t buf)
+static inline int packet_reset(odp_packet_t pkt, uint32_t len)
{
- return (odp_packet_hdr_t *)buf_hdl_to_hdr(buf);
+ odp_packet_hdr_t *const pkt_hdr = packet_hdr(pkt);
+ struct rte_mbuf *ms, *mb = &pkt_hdr->mb;
+ uint8_t nb_segs = 0;
+ int32_t lenleft = len;
+
+ packet_init(pkt_hdr, ODP_PKTIO_INVALID);
+
+ mb->port = 0xff;
+ mb->pkt_len = len;
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->vlan_tci = 0;
+ nb_segs = 1;
+
+ if (RTE_PKTMBUF_HEADROOM + lenleft <= mb->buf_len) {
+ mb->data_len = lenleft;
+ } else {
+ mb->data_len = mb->buf_len - RTE_PKTMBUF_HEADROOM;
+ lenleft -= mb->data_len;
+ ms = mb->next;
+ while (lenleft > 0) {
+ nb_segs++;
+ ms->data_len = lenleft <= ms->buf_len ?
+ lenleft : ms->buf_len;
+ lenleft -= ms->buf_len;
+ ms = ms->next;
+ }
+ }
+
+ mb->nb_segs = nb_segs;
+ return 0;
}
-static inline void packet_parse_disable(odp_packet_hdr_t *pkt_hdr)
+/* Reset unmodified single segment packet after rte_pktmbuf_alloc(), which has already called
+ * rte_pktmbuf_reset() internally. */
+static inline void packet_reset_fresh(odp_packet_t pkt, uint32_t len)
{
- pkt_hdr->p.input_flags.parsed_l2 = 1;
- pkt_hdr->p.parsed_layers = LAYER_ALL;
+ struct rte_mbuf *mb = pkt_to_mbuf(pkt);
+
+ packet_init(packet_hdr(pkt), ODP_PKTIO_INVALID);
+
+ mb->pkt_len = len;
+ mb->data_len = len;
}
-void packet_parse_reset(odp_packet_hdr_t *pkt_hdr)
+static inline int pktmbuf_alloc_multi(struct rte_mempool *mp, struct rte_mbuf **mbufs, int num)
{
- /* Reset parser metadata before new parse */
- pkt_hdr->p.parsed_layers = LAYER_NONE;
- pkt_hdr->p.error_flags.all = 0;
- pkt_hdr->p.input_flags.all = 0;
- pkt_hdr->p.output_flags.all = 0;
- pkt_hdr->p.l2_offset = 0;
- pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+ if (odp_likely(rte_pktmbuf_alloc_bulk(mp, mbufs, num) == 0))
+ return num;
+
+ for (int i = 0; i < num; i++) {
+ mbufs[i] = rte_pktmbuf_alloc(mp);
+
+ if (odp_unlikely(mbufs[i] == NULL))
+ return i;
+ }
+
+ return num;
}
-static odp_packet_t packet_alloc(pool_entry_t* pool, uint32_t len)
+static odp_packet_t packet_alloc(pool_t *pool, uint32_t len)
{
odp_packet_t pkt;
uintmax_t totsize = RTE_PKTMBUF_HEADROOM + len;
odp_packet_hdr_t *pkt_hdr;
- struct rte_mbuf *mbuf;
+ uint16_t seg_len = pool->seg_len;
+ int num_seg;
- if (pool->s.params.type != ODP_POOL_PACKET)
- return ODP_PACKET_INVALID;
+ num_seg = num_segments(totsize, seg_len);
- mbuf = rte_pktmbuf_alloc(pool->s.rte_mempool);
- if (mbuf == NULL) {
- rte_errno = ENOMEM;
- return ODP_PACKET_INVALID;
- }
- pkt_hdr = (odp_packet_hdr_t *)mbuf;
- pkt_hdr->buf_hdr.totsize = mbuf->buf_len;
+ if (odp_likely(num_seg == 1)) {
+ struct rte_mbuf *mbuf = rte_pktmbuf_alloc(pool->rte_mempool);
- if (mbuf->buf_len < totsize) {
- intmax_t needed = totsize - mbuf->buf_len;
- struct rte_mbuf *curseg = mbuf;
+ if (odp_unlikely(mbuf == NULL))
+ return ODP_PACKET_INVALID;
- do {
- struct rte_mbuf *nextseg =
- rte_pktmbuf_alloc(pool->s.rte_mempool);
+ pkt_hdr = (odp_packet_hdr_t *)mbuf;
+ odp_prefetch((uint8_t *)mbuf + sizeof(struct rte_mbuf));
+ odp_prefetch((uint8_t *)mbuf + sizeof(struct rte_mbuf) +
+ ODP_CACHE_LINE_SIZE);
- if (nextseg == NULL) {
- rte_pktmbuf_free(mbuf);
- return ODP_PACKET_INVALID;
- }
+ pkt = packet_handle(pkt_hdr);
+ packet_reset_fresh(pkt, len);
- curseg->next = nextseg;
- curseg = nextseg;
- curseg->data_off = 0;
- pkt_hdr->buf_hdr.totsize += curseg->buf_len;
- needed -= curseg->buf_len;
- } while (needed > 0);
+ return pkt;
}
- pkt = (odp_packet_t)mbuf;
-
- if (odp_packet_reset(pkt, len) != 0)
- return ODP_PACKET_INVALID;
+ /* Create segmented packet */
- return pkt;
-}
+ struct rte_mbuf *mbufs[num_seg];
+ struct rte_mbuf *head;
+ int ret;
-odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
-{
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
+ /* Check num_seg here so rte_pktmbuf_chain() always succeeds */
+ if (odp_unlikely(num_seg > RTE_MBUF_MAX_NB_SEGS))
+ return ODP_PACKET_INVALID;
- return packet_alloc(pool, len);
-}
+ /* Avoid invalid 'maybe-uninitialized' warning with GCC 12 */
+ mbufs[0] = NULL;
-int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
- odp_packet_t pkt[], int num)
-{
- int i;
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
+ ret = pktmbuf_alloc_multi(pool->rte_mempool, mbufs, num_seg);
+ if (odp_unlikely(ret != num_seg)) {
+ for (int i = 0; i < ret; i++)
+ rte_pktmbuf_free(mbufs[i]);
- for (i = 0; i < num; i++) {
- pkt[i] = packet_alloc(pool, len);
- if (pkt[i] == ODP_PACKET_INVALID)
- return rte_errno == ENOMEM ? i : -EINVAL;
+ return ODP_PACKET_INVALID;
}
- return i;
-}
-void odp_packet_free(odp_packet_t pkt)
-{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)pkt;
- rte_pktmbuf_free(mbuf);
-}
+ head = mbufs[0];
+ pkt_hdr = (odp_packet_hdr_t *)head;
+ odp_prefetch((uint8_t *)head + sizeof(struct rte_mbuf));
+ odp_prefetch((uint8_t *)head + sizeof(struct rte_mbuf) +
+ ODP_CACHE_LINE_SIZE);
-void odp_packet_free_multi(const odp_packet_t pkt[], int num)
-{
- int i;
+ for (int i = 1; i < num_seg; i++) {
+ struct rte_mbuf *nextseg = mbufs[i];
- for (i = 0; i < num; i++) {
- struct rte_mbuf *mbuf = (struct rte_mbuf *)pkt[i];
+ nextseg->data_off = 0;
- rte_pktmbuf_free(mbuf);
+ rte_pktmbuf_chain(head, nextseg);
}
-}
-int odp_packet_reset(odp_packet_t pkt, uint32_t len)
-{
- odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt);
- struct rte_mbuf *ms, *mb = &pkt_hdr->buf_hdr.mb;
- uint8_t nb_segs = 0;
- int32_t lenleft = len;
+ pkt = packet_handle(pkt_hdr);
+ packet_reset(pkt, len);
- if (RTE_PKTMBUF_HEADROOM + len > odp_packet_buf_len(pkt)) {
- ODP_DBG("Not enought head room for that packet %d/%d\n",
- RTE_PKTMBUF_HEADROOM + len,
- odp_packet_buf_len(pkt));
- return -1;
- }
+ return pkt;
+}
- pkt_hdr->p.parsed_layers = LAYER_NONE;
- pkt_hdr->p.input_flags.all = 0;
- pkt_hdr->p.output_flags.all = 0;
- pkt_hdr->p.error_flags.all = 0;
+static int packet_alloc_multi(pool_t *pool, uint32_t len, odp_packet_t pkt[], int num)
+{
+ uintmax_t totsize = RTE_PKTMBUF_HEADROOM + len;
+ int num_seg, i;
- pkt_hdr->p.l2_offset = 0;
- pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+ num_seg = num_segments(totsize, pool->seg_len);
- pkt_hdr->buf_hdr.next = NULL;
+ if (odp_likely(num_seg == 1)) {
+ int ret;
- pkt_hdr->input = ODP_PKTIO_INVALID;
+ ret = pktmbuf_alloc_multi(pool->rte_mempool, (struct rte_mbuf **)pkt, num);
- /* Disable lazy parsing on user allocated packets */
- packet_parse_disable(pkt_hdr);
+ for (i = 0; i < ret; i++) {
+ struct rte_mbuf *mbuf = pkt_to_mbuf(pkt[i]);
- mb->port = 0xff;
- mb->pkt_len = len;
- mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->vlan_tci = 0;
- nb_segs = 1;
+ odp_prefetch((uint8_t *)mbuf + sizeof(struct rte_mbuf));
+ odp_prefetch((uint8_t *)mbuf + sizeof(struct rte_mbuf) +
+ ODP_CACHE_LINE_SIZE);
- if (RTE_PKTMBUF_HEADROOM + lenleft <= mb->buf_len) {
- mb->data_len = lenleft;
- } else {
- mb->data_len = mb->buf_len - RTE_PKTMBUF_HEADROOM;
- lenleft -= mb->data_len;
- ms = mb->next;
- while (lenleft > 0) {
- nb_segs++;
- ms->data_len = lenleft <= ms->buf_len ?
- lenleft : ms->buf_len;
- lenleft -= ms->buf_len;
- ms = ms->next;
+ packet_reset_fresh(pkt[i], len);
}
+ return ret;
}
- mb->nb_segs = nb_segs;
- return 0;
+ /* Fall back to using packet_alloc() for segmented packets */
+
+ for (i = 0; i < num; i++) {
+ pkt[i] = packet_alloc(pool, len);
+ if (odp_unlikely(pkt[i] == ODP_PACKET_INVALID))
+ return i;
+ }
+ return i;
}
-odp_packet_t _odp_packet_from_buffer(odp_buffer_t buf)
+odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
{
- if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_PACKET);
+
+ if (odp_unlikely(len == 0))
return ODP_PACKET_INVALID;
- return (odp_packet_t)buf_to_packet_hdr(buf);
+ return packet_alloc(pool, len);
}
-odp_buffer_t _odp_packet_to_buffer(odp_packet_t pkt)
+int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
+ odp_packet_t pkt[], int num)
{
- if (odp_unlikely(pkt == ODP_PACKET_INVALID))
- return ODP_BUFFER_INVALID;
+ pool_t *pool = _odp_pool_entry(pool_hdl);
- return buffer_handle(odp_packet_hdr(pkt));
-}
+ _ODP_ASSERT(pool->type == ODP_POOL_PACKET);
-odp_packet_t odp_packet_from_event(odp_event_t ev)
-{
- if (odp_unlikely(ev == ODP_EVENT_INVALID))
- return ODP_PACKET_INVALID;
+ if (odp_unlikely(len == 0))
+ return -1;
- return (odp_packet_t)buf_to_packet_hdr((odp_buffer_t)ev);
+ return packet_alloc_multi(pool, len, pkt, num);
}
-odp_event_t odp_packet_to_event(odp_packet_t pkt)
+int odp_packet_reset(odp_packet_t pkt, uint32_t len)
{
- if (odp_unlikely(pkt == ODP_PACKET_INVALID))
- return ODP_EVENT_INVALID;
+ if (odp_unlikely(len == 0))
+ return -1;
- return (odp_event_t)buffer_handle(odp_packet_hdr(pkt));
+ if (RTE_PKTMBUF_HEADROOM + len > odp_packet_buf_len(pkt)) {
+ _ODP_DBG("Not enough head room for that packet %d/%d\n",
+ RTE_PKTMBUF_HEADROOM + len, odp_packet_buf_len(pkt));
+ return -1;
+ }
+
+ return packet_reset(pkt, len);
}
-uint32_t odp_packet_buf_len(odp_packet_t pkt)
+int odp_event_filter_packet(const odp_event_t event[],
+ odp_packet_t packet[],
+ odp_event_t remain[], int num)
{
- return odp_packet_hdr(pkt)->buf_hdr.totsize;
+ int i;
+ int num_pkt = 0;
+ int num_rem = 0;
+
+ for (i = 0; i < num; i++) {
+ if (odp_event_type(event[i]) == ODP_EVENT_PACKET) {
+ packet[num_pkt] = odp_packet_from_event(event[i]);
+ num_pkt++;
+ } else {
+ remain[num_rem] = event[i];
+ num_rem++;
+ }
+ }
+
+ return num_pkt;
}
void *odp_packet_tail(odp_packet_t pkt)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->mb);
+
mb = rte_pktmbuf_lastseg(mb);
return (void *)(rte_pktmbuf_mtod(mb, char *) + mb->data_len);
}
void *odp_packet_push_head(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->mb);
+
return (void *)rte_pktmbuf_prepend(mb, len);
}
static void _copy_head_metadata(struct rte_mbuf *newhead,
struct rte_mbuf *oldhead)
{
- odp_packet_t pkt = (odp_packet_t)newhead;
- uint32_t saved_index = odp_packet_hdr(pkt)->buf_hdr.index;
-
rte_mbuf_refcnt_set(newhead, rte_mbuf_refcnt_read(oldhead));
- newhead->port = oldhead->port;
- newhead->ol_flags = oldhead->ol_flags;
- newhead->packet_type = oldhead->packet_type;
- newhead->vlan_tci = oldhead->vlan_tci;
- newhead->hash.rss = 0;
- newhead->seqn = oldhead->seqn;
- newhead->vlan_tci_outer = oldhead->vlan_tci_outer;
- newhead->udata64 = oldhead->udata64;
- memcpy(&newhead->tx_offload, &oldhead->tx_offload,
- sizeof(odp_packet_hdr_t) -
- offsetof(struct rte_mbuf, tx_offload));
- odp_packet_hdr(pkt)->buf_hdr.handle.handle =
- (odp_buffer_t)newhead;
- odp_packet_hdr(pkt)->buf_hdr.index = saved_index;
+
+ _odp_packet_copy_md((odp_packet_hdr_t *)newhead, (odp_packet_hdr_t *)oldhead, 0);
}
int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
uint32_t *seg_len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(*pkt)->mb);
int addheadsize = len - rte_pktmbuf_headroom(mb);
if (addheadsize > 0) {
struct rte_mbuf *newhead, *t;
- uint32_t totsize_change;
int i;
newhead = rte_pktmbuf_alloc(mb->pool);
@@ -340,19 +405,17 @@ int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
t->data_len = t->buf_len;
t->data_off = 0;
}
- totsize_change = newhead->nb_segs * newhead->buf_len;
if (rte_pktmbuf_chain(newhead, mb)) {
rte_pktmbuf_free(newhead);
return -1;
}
/* Expand the original head segment*/
newhead->pkt_len += rte_pktmbuf_headroom(mb);
+ mb->data_len += rte_pktmbuf_headroom(mb);
mb->data_off = 0;
- mb->data_len = mb->buf_len;
_copy_head_metadata(newhead, mb);
mb = newhead;
*pkt = (odp_packet_t)newhead;
- odp_packet_hdr(*pkt)->buf_hdr.totsize += totsize_change;
} else {
rte_pktmbuf_prepend(mb, len);
}
@@ -367,28 +430,30 @@ int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
void *odp_packet_pull_head(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = pkt_to_mbuf(pkt);
+
+ if (odp_unlikely(len >= mb->data_len))
+ return NULL;
+
return (void *)rte_pktmbuf_adj(mb, len);
}
int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
uint32_t *seg_len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = pkt_to_mbuf(*pkt);
- if (odp_packet_len(*pkt) < len)
+ if (odp_unlikely(len >= odp_packet_len(*pkt)))
return -1;
if (len > mb->data_len) {
struct rte_mbuf *newhead = mb, *prev = NULL;
uint32_t left = len;
- uint32_t totsize_change = 0;
while (newhead->next != NULL) {
if (newhead->data_len > left)
break;
left -= newhead->data_len;
- totsize_change += newhead->buf_len;
prev = newhead;
newhead = newhead->next;
--mb->nb_segs;
@@ -400,8 +465,8 @@ int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
_copy_head_metadata(newhead, mb);
prev->next = NULL;
rte_pktmbuf_free(mb);
+ mb = newhead;
*pkt = (odp_packet_t)newhead;
- odp_packet_hdr(*pkt)->buf_hdr.totsize -= totsize_change;
} else {
rte_pktmbuf_adj(mb, len);
}
@@ -416,7 +481,7 @@ int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len, void **data_ptr,
void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(pkt)->mb);
return (void *)rte_pktmbuf_append(mb, len);
}
@@ -424,7 +489,7 @@ void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr,
uint32_t *seg_len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = &(packet_hdr(*pkt)->mb);
int newtailsize = len - odp_packet_tailroom(*pkt);
uint32_t old_pkt_len = odp_packet_len(*pkt);
@@ -469,8 +534,6 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr,
/* Expand the original tail */
m_last->data_len = m_last->buf_len - m_last->data_off;
mb->pkt_len += len - newtailsize;
- odp_packet_hdr(*pkt)->buf_hdr.totsize +=
- newtail->nb_segs * newtail->buf_len;
} else {
rte_pktmbuf_append(mb, len);
}
@@ -483,7 +546,11 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len, void **data_ptr,
void *odp_packet_pull_tail(odp_packet_t pkt, uint32_t len)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = pkt_to_mbuf(pkt);
+ struct rte_mbuf *mb_last = rte_pktmbuf_lastseg(mb);
+
+ if (odp_unlikely(len >= mb_last->data_len))
+ return NULL;
if (rte_pktmbuf_trim(mb, len))
return NULL;
@@ -494,12 +561,19 @@ void *odp_packet_pull_tail(odp_packet_t pkt, uint32_t len)
int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len, void **tail_ptr,
uint32_t *tailroom)
{
- struct rte_mbuf *mb = &(odp_packet_hdr(*pkt)->buf_hdr.mb);
+ struct rte_mbuf *mb = pkt_to_mbuf(*pkt);
+ struct rte_mbuf *last_mb = rte_pktmbuf_lastseg(mb);
- if (odp_packet_len(*pkt) < len)
+ if (odp_unlikely(len >= odp_packet_len(*pkt)))
return -1;
- if (rte_pktmbuf_trim(mb, len)) {
+ /*
+ * Trim only if the last segment does not become zero length.
+ */
+ if (odp_likely(len < last_mb->data_len)) {
+ if (odp_unlikely(rte_pktmbuf_trim(mb, len)))
+ return -1;
+ } else {
struct rte_mbuf *reverse[mb->nb_segs];
struct rte_mbuf *t = mb;
int i;
@@ -535,31 +609,6 @@ int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len, void **tail_ptr,
return 0;
}
-void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len,
- odp_packet_seg_t *seg)
-{
- struct rte_mbuf *mb = &(odp_packet_hdr(pkt)->buf_hdr.mb);
-
- do {
- if (mb->data_len > offset) {
- break;
- } else {
- offset -= mb->data_len;
- mb = mb->next;
- }
- } while (mb);
-
- if (mb) {
- if (len)
- *len = mb->data_len - offset;
- if (seg)
- *seg = (odp_packet_seg_t)(uintptr_t)mb;
- return (void *)(rte_pktmbuf_mtod(mb, char *) + offset);
- } else {
- return NULL;
- }
-}
-
/*
*
* Meta-data
@@ -567,146 +616,14 @@ void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len,
*
*/
-int odp_packet_input_index(odp_packet_t pkt)
-{
- return odp_pktio_index(odp_packet_hdr(pkt)->input);
-}
-
-void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ctx)
-{
- odp_packet_hdr(pkt)->buf_hdr.buf_cctx = ctx;
-}
-
-static inline void *packet_offset_to_ptr(odp_packet_t pkt, uint32_t *len,
- const size_t offset)
-{
- if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
- return NULL;
-
- if (len)
- return odp_packet_offset(pkt, offset, len, NULL);
- else
- return odp_packet_offset(pkt, offset, NULL, NULL);
-}
-
-void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (!packet_hdr_has_l2(pkt_hdr))
- return NULL;
- return packet_offset_to_ptr(pkt, len, pkt_hdr->p.l2_offset);
-}
-
-uint32_t odp_packet_l2_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (!packet_hdr_has_l2(pkt_hdr))
- return ODP_PACKET_OFFSET_INVALID;
- return pkt_hdr->p.l2_offset;
-}
-
-int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (odp_unlikely(offset >= (odp_packet_len(pkt) - 1)))
- return -1;
-
- packet_hdr_has_l2_set(pkt_hdr, 1);
- pkt_hdr->p.l2_offset = offset;
- return 0;
-}
-
-void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
+uint16_t odp_packet_ones_comp(odp_packet_t pkt, odp_packet_data_range_t *range)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- return packet_offset_to_ptr(pkt, len, pkt_hdr->p.l3_offset);
-}
-
-uint32_t odp_packet_l3_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- return pkt_hdr->p.l3_offset;
-}
-
-int odp_packet_l3_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (odp_unlikely(offset >= (odp_packet_len(pkt) - 1)))
- return -1;
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- pkt_hdr->p.l3_offset = offset;
- return 0;
-}
-
-void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- return packet_offset_to_ptr(pkt, len, pkt_hdr->p.l4_offset);
-}
-
-uint32_t odp_packet_l4_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- return pkt_hdr->p.l4_offset;
-}
-
-int odp_packet_l4_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (odp_unlikely(offset >= (odp_packet_len(pkt) - 1)))
- return -1;
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- pkt_hdr->p.l4_offset = offset;
+ (void)pkt;
+ range->length = 0;
+ range->offset = 0;
return 0;
}
-void odp_packet_ts_set(odp_packet_t pkt, odp_time_t timestamp)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- pkt_hdr->timestamp = timestamp;
- pkt_hdr->p.input_flags.timestamp = 1;
-}
-
-/*
- *
- * Segment level
- * ********************************************************
- *
- */
-
-void *odp_packet_seg_data(odp_packet_t pkt ODP_UNUSED, odp_packet_seg_t seg)
-{
- return odp_packet_data((odp_packet_t)(uintptr_t)seg);
-}
-
-uint32_t odp_packet_seg_data_len(odp_packet_t pkt ODP_UNUSED,
- odp_packet_seg_t seg)
-{
- return odp_packet_seg_len((odp_packet_t)(uintptr_t)seg);
-}
-
/*
*
* Manipulation
@@ -717,14 +634,15 @@ uint32_t odp_packet_seg_data_len(odp_packet_t pkt ODP_UNUSED,
int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
{
odp_packet_t pkt = *pkt_ptr;
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = odp_packet_len(pkt);
+ odp_pool_t pool = pkt_hdr->event_hdr.pool;
odp_packet_t newpkt;
if (offset > pktlen)
return -1;
- newpkt = odp_packet_alloc(pkt_hdr->buf_hdr.pool_hdl, pktlen + len);
+ newpkt = odp_packet_alloc(pool, pktlen + len);
if (newpkt == ODP_PACKET_INVALID)
return -1;
@@ -736,7 +654,7 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
return -1;
}
- _odp_packet_copy_md_to_packet(pkt, newpkt);
+ _odp_packet_copy_md(packet_hdr(newpkt), pkt_hdr, 0);
odp_packet_free(pkt);
*pkt_ptr = newpkt;
@@ -746,14 +664,15 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
{
odp_packet_t pkt = *pkt_ptr;
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = odp_packet_len(pkt);
+ odp_pool_t pool = pkt_hdr->event_hdr.pool;
odp_packet_t newpkt;
- if (offset > pktlen || offset + len > pktlen)
+ if (odp_unlikely(offset + len >= pktlen))
return -1;
- newpkt = odp_packet_alloc(pkt_hdr->buf_hdr.pool_hdl, pktlen - len);
+ newpkt = odp_packet_alloc(pool, pktlen - len);
if (newpkt == ODP_PACKET_INVALID)
return -1;
@@ -765,7 +684,7 @@ int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
return -1;
}
- _odp_packet_copy_md_to_packet(pkt, newpkt);
+ _odp_packet_copy_md(packet_hdr(newpkt), pkt_hdr, 0);
odp_packet_free(pkt);
*pkt_ptr = newpkt;
@@ -787,7 +706,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
if (seglen >= len) {
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign == 0)
return 0;
shift = align - misalign;
@@ -797,7 +716,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
shift = len - seglen;
uaddr -= shift;
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign)
shift += align - misalign;
}
@@ -815,18 +734,29 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
{
- odp_packet_hdr_t *dst_hdr = odp_packet_hdr(*dst);
- odp_packet_hdr_t *src_hdr = odp_packet_hdr(src);
- struct rte_mbuf *mb_dst = pkt_to_mbuf(dst_hdr);
- struct rte_mbuf *mb_src = pkt_to_mbuf(src_hdr);
+ struct rte_mbuf *mb_dst = pkt_to_mbuf(*dst);
+ struct rte_mbuf *mb_src = pkt_to_mbuf(src);
odp_packet_t new_dst;
odp_pool_t pool;
uint32_t dst_len;
uint32_t src_len;
- if (odp_likely(!rte_pktmbuf_chain(mb_dst, mb_src))) {
- dst_hdr->buf_hdr.totsize += src_hdr->buf_hdr.totsize;
- return 0;
+ /* Copy if packets are from different pools */
+ if (odp_likely(mb_dst->pool == mb_src->pool)) {
+ if (odp_likely(!rte_pktmbuf_chain(mb_dst, mb_src)))
+ return 0;
+ } else {
+ odp_packet_t new_src = odp_packet_copy_part(src, 0, odp_packet_len(src),
+ odp_packet_pool(*dst));
+
+ if (odp_unlikely(new_src == ODP_PACKET_INVALID))
+ return -1;
+
+ if (odp_likely(!rte_pktmbuf_chain(mb_dst, pkt_to_mbuf(new_src)))) {
+ odp_packet_free(src);
+ return 1;
+ }
+ odp_packet_free(new_src);
}
/* Fall back to using standard copy operations after maximum number of
@@ -856,7 +786,7 @@ int odp_packet_split(odp_packet_t *pkt, uint32_t len, odp_packet_t *tail)
{
uint32_t pktlen = odp_packet_len(*pkt);
- if (len >= pktlen || tail == NULL)
+ if (odp_unlikely(len == 0 || len >= pktlen || tail == NULL))
return -1;
*tail = odp_packet_copy_part(*pkt, len, pktlen - len,
@@ -878,16 +808,24 @@ int odp_packet_split(odp_packet_t *pkt, uint32_t len, odp_packet_t *tail)
odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_pool_t pool)
{
uint32_t pktlen = odp_packet_len(pkt);
- odp_packet_t newpkt = odp_packet_alloc(pool, pktlen);
+ odp_packet_t newpkt;
- if (newpkt != ODP_PACKET_INVALID) {
- if (_odp_packet_copy_md_to_packet(pkt, newpkt) ||
- odp_packet_copy_from_pkt(newpkt, 0, pkt, 0, pktlen)) {
- odp_packet_free(newpkt);
- newpkt = ODP_PACKET_INVALID;
- }
+ if (odp_unlikely(_odp_packet_copy_md_possible(pool, odp_packet_pool(pkt)) < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return ODP_PACKET_INVALID;
}
+ newpkt = odp_packet_alloc(pool, pktlen);
+ if (odp_unlikely(newpkt == ODP_PACKET_INVALID))
+ return ODP_PACKET_INVALID;
+
+ if (odp_unlikely(odp_packet_copy_from_pkt(newpkt, 0, pkt, 0, pktlen))) {
+ odp_packet_free(newpkt);
+ newpkt = ODP_PACKET_INVALID;
+ }
+
+ _odp_packet_copy_md(packet_hdr(newpkt), packet_hdr(pkt), 1);
+
return newpkt;
}
@@ -907,58 +845,12 @@ odp_packet_t odp_packet_copy_part(odp_packet_t pkt, uint32_t offset,
return newpkt;
}
-int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset,
- uint32_t len, void *dst)
-{
- void *mapaddr;
- uint32_t seglen = 0; /* GCC */
- uint32_t cpylen;
- uint8_t *dstaddr = (uint8_t *)dst;
-
- if (offset + len > odp_packet_len(pkt))
- return -1;
-
- while (len > 0) {
- mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
- cpylen = len > seglen ? seglen : len;
- memcpy(dstaddr, mapaddr, cpylen);
- offset += cpylen;
- dstaddr += cpylen;
- len -= cpylen;
- }
-
- return 0;
-}
-
-int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
- uint32_t len, const void *src)
-{
- void *mapaddr;
- uint32_t seglen = 0; /* GCC */
- uint32_t cpylen;
- const uint8_t *srcaddr = (const uint8_t *)src;
-
- if (offset + len > odp_packet_len(pkt))
- return -1;
-
- while (len > 0) {
- mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
- cpylen = len > seglen ? seglen : len;
- memcpy(mapaddr, srcaddr, cpylen);
- offset += cpylen;
- srcaddr += cpylen;
- len -= cpylen;
- }
-
- return 0;
-}
-
int odp_packet_copy_from_pkt(odp_packet_t dst, uint32_t dst_offset,
odp_packet_t src, uint32_t src_offset,
uint32_t len)
{
- odp_packet_hdr_t *dst_hdr = odp_packet_hdr(dst);
- odp_packet_hdr_t *src_hdr = odp_packet_hdr(src);
+ odp_packet_hdr_t *dst_hdr = packet_hdr(dst);
+ odp_packet_hdr_t *src_hdr = packet_hdr(src);
void *dst_map;
void *src_map;
uint32_t cpylen, minseg;
@@ -1021,80 +913,256 @@ int odp_packet_move_data(odp_packet_t pkt, uint32_t dst_offset,
pkt, src_offset, len);
}
+int _odp_packet_set_data(odp_packet_t pkt, uint32_t offset,
+ uint8_t c, uint32_t len)
+{
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t setlen;
+
+ if (offset + len > odp_packet_len(pkt))
+ return -1;
+
+ while (len > 0) {
+ mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ setlen = len > seglen ? seglen : len;
+ if (odp_unlikely(setlen == 0))
+ return -1;
+ memset(mapaddr, c, setlen);
+ offset += setlen;
+ len -= setlen;
+ }
+
+ return 0;
+}
+
+int _odp_packet_cmp_data(odp_packet_t pkt, uint32_t offset,
+ const void *s, uint32_t len)
+{
+ const uint8_t *ptr = s;
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cmplen;
+ int ret;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ while (len > 0) {
+ mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ cmplen = len > seglen ? seglen : len;
+ ret = memcmp(mapaddr, ptr, cmplen);
+ if (ret != 0)
+ return ret;
+ offset += cmplen;
+ len -= cmplen;
+ ptr += cmplen;
+ }
+
+ return 0;
+}
+
/*
*
* Debugging
* ********************************************************
*
*/
+static int packet_print_input_flags(odp_packet_hdr_t *hdr, char *str, int max)
+{
+ int len = 0;
+
+ if (hdr->p.input_flags.l2)
+ len += _odp_snprint(&str[len], max - len, "l2 ");
+ if (hdr->p.input_flags.l3)
+ len += _odp_snprint(&str[len], max - len, "l3 ");
+ if (hdr->p.input_flags.l4)
+ len += _odp_snprint(&str[len], max - len, "l4 ");
+ if (hdr->p.input_flags.eth)
+ len += _odp_snprint(&str[len], max - len, "eth ");
+ if (hdr->p.input_flags.vlan)
+ len += _odp_snprint(&str[len], max - len, "vlan ");
+ if (hdr->p.input_flags.arp)
+ len += _odp_snprint(&str[len], max - len, "arp ");
+ if (hdr->p.input_flags.ipv4)
+ len += _odp_snprint(&str[len], max - len, "ipv4 ");
+ if (hdr->p.input_flags.ipv6)
+ len += _odp_snprint(&str[len], max - len, "ipv6 ");
+ if (hdr->p.input_flags.ipsec)
+ len += _odp_snprint(&str[len], max - len, "ipsec ");
+ if (hdr->p.input_flags.udp)
+ len += _odp_snprint(&str[len], max - len, "udp ");
+ if (hdr->p.input_flags.tcp)
+ len += _odp_snprint(&str[len], max - len, "tcp ");
+ if (hdr->p.input_flags.sctp)
+ len += _odp_snprint(&str[len], max - len, "sctp ");
+ if (hdr->p.input_flags.icmp)
+ len += _odp_snprint(&str[len], max - len, "icmp ");
+
+ return len;
+}
void odp_packet_print(odp_packet_t pkt)
{
odp_packet_seg_t seg;
- int max_len = 512;
+ int max_len = 1024;
char str[max_len];
- uint8_t *p;
int len = 0;
int n = max_len - 1;
- odp_packet_hdr_t *hdr = odp_packet_hdr(pkt);
- odp_buffer_t buf = _odp_packet_to_buffer(pkt);
-
- len += snprintf(&str[len], n - len, "Packet ");
- len += odp_buffer_snprint(&str[len], n - len, buf);
- len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
- hdr->p.input_flags.all);
- len += snprintf(&str[len], n - len, " error_flags 0x%" PRIx32 "\n",
- hdr->p.error_flags.all);
- len += snprintf(&str[len], n - len, " output_flags 0x%" PRIx32 "\n",
- hdr->p.output_flags.all);
- len += snprintf(&str[len], n - len,
- " l2_offset %" PRIu32 "\n", hdr->p.l2_offset);
- len += snprintf(&str[len], n - len,
- " l3_offset %" PRIu32 "\n", hdr->p.l3_offset);
- len += snprintf(&str[len], n - len,
- " l4_offset %" PRIu32 "\n", hdr->p.l4_offset);
- len += snprintf(&str[len], n - len,
- " frame_len %" PRIu32 "\n",
- hdr->buf_hdr.mb.pkt_len);
- len += snprintf(&str[len], n - len,
- " input %" PRIu64 "\n",
- odp_pktio_to_u64(hdr->input));
- len += snprintf(&str[len], n - len,
- " headroom %" PRIu32 "\n",
- odp_packet_headroom(pkt));
- len += snprintf(&str[len], n - len,
- " tailroom %" PRIu32 "\n",
- odp_packet_tailroom(pkt));
- len += snprintf(&str[len], n - len,
- " num_segs %i\n", odp_packet_num_segs(pkt));
+ odp_packet_hdr_t *hdr = packet_hdr(pkt);
+ pool_t *pool = _odp_pool_entry(hdr->event_hdr.pool);
+
+ len += _odp_snprint(&str[len], n - len, "Packet info\n");
+ len += _odp_snprint(&str[len], n - len, "-----------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_packet_to_u64(pkt));
+ len += _odp_snprint(&str[len], n - len, " pool index %u\n", pool->pool_idx);
+ len += _odp_snprint(&str[len], n - len, " buf index %u\n", hdr->event_hdr.index);
+ len += _odp_snprint(&str[len], n - len, " ev subtype %i\n", hdr->event_hdr.subtype);
+ len += _odp_snprint(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
+ hdr->p.input_flags.all);
+ if (hdr->p.input_flags.all) {
+ len += _odp_snprint(&str[len], n - len, " ");
+ len += packet_print_input_flags(hdr, &str[len], n - len);
+ len += _odp_snprint(&str[len], n - len, "\n");
+ }
+ len += _odp_snprint(&str[len], n - len,
+ " flags 0x%" PRIx32 "\n", hdr->p.flags.all_flags);
+ len += _odp_snprint(&str[len], n - len,
+ " cls_mark %" PRIu64 "\n", odp_packet_cls_mark(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " user ptr %p\n", hdr->user_ptr);
+ len += _odp_snprint(&str[len], n - len,
+ " user area %p\n", hdr->uarea_addr);
+ len += _odp_snprint(&str[len], n - len,
+ " l2_offset %" PRIu32 "\n", hdr->p.l2_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " l3_offset %" PRIu32 "\n", hdr->p.l3_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " l4_offset %" PRIu32 "\n", hdr->p.l4_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " frame_len %" PRIu32 "\n", hdr->mb.pkt_len);
+ len += _odp_snprint(&str[len], n - len,
+ " input %" PRIu64 "\n", odp_pktio_to_u64(hdr->input));
+ len += _odp_snprint(&str[len], n - len,
+ " headroom %" PRIu32 "\n", odp_packet_headroom(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " tailroom %" PRIu32 "\n", odp_packet_tailroom(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " num_segs %i\n", odp_packet_num_segs(pkt));
seg = odp_packet_first_seg(pkt);
- while (seg != ODP_PACKET_SEG_INVALID) {
- len += snprintf(&str[len], n - len,
- " seg_len %" PRIu32 "\n",
- odp_packet_seg_data_len(pkt, seg));
+ for (int seg_idx = 0; seg != ODP_PACKET_SEG_INVALID; seg_idx++) {
+ len += _odp_snprint(&str[len], n - len,
+ " [%d] seg_len %-4" PRIu32 " seg_data %p\n",
+ seg_idx, odp_packet_seg_data_len(pkt, seg),
+ odp_packet_seg_data(pkt, seg));
seg = odp_packet_next_seg(pkt, seg);
}
str[len] = '\0';
- ODP_PRINT("\n%s\n", str);
- rte_pktmbuf_dump(stdout, &hdr->buf_hdr.mb, 32);
+ _ODP_PRINT("%s\n", str);
+}
+
+void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
+ uint32_t byte_len)
+{
+ odp_packet_hdr_t *hdr = packet_hdr(pkt);
+ uint32_t bytes_per_row = 16;
+ int num_rows = (byte_len + bytes_per_row - 1) / bytes_per_row;
+ int max_len = 256 + (3 * byte_len) + (3 * num_rows);
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+ uint32_t data_len = odp_packet_len(pkt);
+ pool_t *pool = _odp_pool_entry(hdr->event_hdr.pool);
+
+ len += _odp_snprint(&str[len], n - len, "Packet data\n");
+ len += _odp_snprint(&str[len], n - len, "-----------\n");
+ len += _odp_snprint(&str[len], n - len,
+ " handle 0x%" PRIx64 "\n", odp_packet_to_u64(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " pool name %s\n", pool->name);
+ len += _odp_snprint(&str[len], n - len,
+ " buf index %" PRIu32 "\n", hdr->event_hdr.index);
+ len += _odp_snprint(&str[len], n - len,
+ " segcount %" PRIu8 "\n", hdr->mb.nb_segs);
+ len += _odp_snprint(&str[len], n - len,
+ " data len %" PRIu32 "\n", data_len);
+ len += _odp_snprint(&str[len], n - len,
+ " data ptr %p\n", odp_packet_data(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " print offset %" PRIu32 "\n", offset);
+ len += _odp_snprint(&str[len], n - len,
+ " print length %" PRIu32 "\n", byte_len);
+
+ if (offset + byte_len > data_len) {
+ len += _odp_snprint(&str[len], n - len, " BAD OFFSET OR LEN\n");
+ _ODP_PRINT("%s\n", str);
+ return;
+ }
+
+ while (byte_len) {
+ uint32_t copy_len;
+ uint8_t data[bytes_per_row];
+ uint32_t i;
+
+ if (byte_len > bytes_per_row)
+ copy_len = bytes_per_row;
+ else
+ copy_len = byte_len;
+
+ odp_packet_copy_to_mem(pkt, offset, copy_len, data);
+
+ len += _odp_snprint(&str[len], n - len, " ");
+
+ for (i = 0; i < copy_len; i++)
+ len += _odp_snprint(&str[len], n - len, " %02x", data[i]);
+
+ len += _odp_snprint(&str[len], n - len, "\n");
- p = odp_packet_data(pkt);
- ODP_ERR("00000000: %02X %02X %02X %02X %02X %02X %02X %02X\n",
- p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]);
- ODP_ERR("00000008: %02X %02X %02X %02X %02X %02X %02X %02X\n",
- p[8], p[9], p[10], p[11], p[12], p[13], p[14], p[15]);
+ byte_len -= copy_len;
+ offset += copy_len;
+ }
+
+ _ODP_PRINT("%s\n", str);
}
int odp_packet_is_valid(odp_packet_t pkt)
{
- odp_buffer_t buf = _odp_packet_to_buffer(pkt);
+ odp_event_t ev;
+
+ if (pkt == ODP_PACKET_INVALID)
+ return 0;
+
+ ev = odp_packet_to_event(pkt);
+
+ if (_odp_event_is_valid(ev) == 0)
+ return 0;
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET)
+ return 0;
- return odp_buffer_is_valid(buf);
+ if (odp_unlikely(_odp_packet_validate(pkt, _ODP_EV_PACKET_IS_VALID)))
+ return 0;
+
+ switch (odp_event_subtype(ev)) {
+ case ODP_EVENT_PACKET_BASIC:
+ /* Fall through */
+ case ODP_EVENT_PACKET_COMP:
+ /* Fall through */
+ case ODP_EVENT_PACKET_CRYPTO:
+ /* Fall through */
+ case ODP_EVENT_PACKET_IPSEC:
+ /* Fall through */
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
}
/*
@@ -1104,393 +1172,473 @@ int odp_packet_is_valid(odp_packet_t pkt)
*
*/
-int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
+static uint64_t packet_sum_partial(odp_packet_hdr_t *pkt_hdr,
+ uint32_t l3_offset,
+ uint32_t offset,
+ uint32_t len)
{
- odp_packet_hdr_t *srchdr = odp_packet_hdr(srcpkt);
- odp_packet_hdr_t *dsthdr = odp_packet_hdr(dstpkt);
- uint32_t src_size = odp_packet_user_area_size(srcpkt);
- uint32_t dst_size = odp_packet_user_area_size(dstpkt);
+ uint64_t sum = 0;
+ uint32_t frame_len = odp_packet_len(packet_handle(pkt_hdr));
+
+ if (offset + len > frame_len)
+ return 0;
- dsthdr->input = srchdr->input;
- dsthdr->dst_queue = srchdr->dst_queue;
- dsthdr->buf_hdr.buf_u64 = srchdr->buf_hdr.buf_u64;
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(packet_handle(pkt_hdr), offset, &seglen, NULL);
- dsthdr->buf_hdr.mb.port = srchdr->buf_hdr.mb.port;
- dsthdr->buf_hdr.mb.ol_flags = srchdr->buf_hdr.mb.ol_flags;
- dsthdr->buf_hdr.mb.packet_type = srchdr->buf_hdr.mb.packet_type;
- dsthdr->buf_hdr.mb.vlan_tci = srchdr->buf_hdr.mb.vlan_tci;
- dsthdr->buf_hdr.mb.hash = srchdr->buf_hdr.mb.hash;
- dsthdr->buf_hdr.mb.vlan_tci_outer = srchdr->buf_hdr.mb.vlan_tci_outer;
- dsthdr->buf_hdr.mb.tx_offload = srchdr->buf_hdr.mb.tx_offload;
+ if (seglen > len)
+ seglen = len;
- if (dst_size != 0)
- memcpy(odp_packet_user_area(dstpkt),
- odp_packet_user_area(srcpkt),
- dst_size <= src_size ? dst_size : src_size);
+ sum += chksum_partial(mapaddr, seglen, offset - l3_offset);
+ len -= seglen;
+ offset += seglen;
+ }
- copy_packet_parser_metadata(srchdr, dsthdr);
+ return sum;
+}
- /* Metadata copied, but return indication of whether the packet
- * user area was truncated in the process. Note this can only
- * happen when copying between different pools.
- */
- return dst_size < src_size;
+static inline uint16_t packet_sum(odp_packet_hdr_t *pkt_hdr,
+ uint32_t l3_offset,
+ uint32_t offset,
+ uint32_t len,
+ uint64_t sum)
+{
+ sum += packet_sum_partial(pkt_hdr, l3_offset, offset, len);
+ return chksum_finalize(sum);
}
-/**
- * Parser helper function for IPv4
- */
-static inline uint8_t parse_ipv4(packet_parser_t *prs, const uint8_t **parseptr,
- uint32_t *offset, uint32_t frame_len)
+static uint32_t packet_sum_crc32c(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset,
+ uint32_t len,
+ uint32_t init_val)
{
- const _odp_ipv4hdr_t *ipv4 = (const _odp_ipv4hdr_t *)*parseptr;
- uint8_t ver = _ODP_IPV4HDR_VER(ipv4->ver_ihl);
- uint8_t ihl = _ODP_IPV4HDR_IHL(ipv4->ver_ihl);
- uint16_t frag_offset;
- uint32_t dstaddr = odp_be_to_cpu_32(ipv4->dst_addr);
+ uint32_t sum = init_val;
- prs->l3_len = odp_be_to_cpu_16(ipv4->tot_len);
+ if (offset + len > odp_packet_len(packet_handle(pkt_hdr)))
+ return sum;
- if (odp_unlikely(ihl < _ODP_IPV4HDR_IHL_MIN) ||
- odp_unlikely(ver != 4) ||
- (prs->l3_len > frame_len - *offset)) {
- prs->error_flags.ip_err = 1;
- return 0;
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+
+ void *mapaddr = odp_packet_offset(packet_handle(pkt_hdr),
+ offset, &seglen, NULL);
+
+ if (seglen > len)
+ seglen = len;
+
+ sum = odp_hash_crc32c(mapaddr, seglen, sum);
+ len -= seglen;
+ offset += seglen;
}
- *offset += ihl * 4;
- *parseptr += ihl * 4;
+ return sum;
+}
- if (odp_unlikely(ihl > _ODP_IPV4HDR_IHL_MIN))
- prs->input_flags.ipopt = 1;
+static inline int packet_ipv4_chksum(odp_packet_t pkt,
+ uint32_t offset,
+ _odp_ipv4hdr_t *ip,
+ odp_u16sum_t *chksum)
+{
+ unsigned int nleft = _ODP_IPV4HDR_IHL(ip->ver_ihl) * 4;
+ uint16_t buf[nleft / 2];
+ int res;
- /* A packet is a fragment if:
- * "more fragments" flag is set (all fragments except the last)
- * OR
- * "fragment offset" field is nonzero (all fragments except the first)
- */
- frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
- if (odp_unlikely(_ODP_IPV4HDR_IS_FRAGMENT(frag_offset)))
- prs->input_flags.ipfrag = 1;
+ if (odp_unlikely(nleft < sizeof(*ip)))
+ return -1;
+ ip->chksum = 0;
+ memcpy(buf, ip, sizeof(*ip));
+ res = odp_packet_copy_to_mem(pkt, offset + sizeof(*ip),
+ nleft - sizeof(*ip),
+ buf + sizeof(*ip) / 2);
+ if (odp_unlikely(res < 0))
+ return res;
- /* Handle IPv4 broadcast / multicast */
- prs->input_flags.ip_bcast = (dstaddr == 0xffffffff);
- prs->input_flags.ip_mcast = (dstaddr >> 28) == 0xd;
+ *chksum = ~chksum_finalize(chksum_partial(buf, nleft, 0));
- return ipv4->proto;
+ return 0;
}
+#define _ODP_IPV4HDR_CSUM_OFFSET ODP_OFFSETOF(_odp_ipv4hdr_t, chksum)
+#define _ODP_IPV4ADDR_OFFSSET ODP_OFFSETOF(_odp_ipv4hdr_t, src_addr)
+#define _ODP_IPV6ADDR_OFFSSET ODP_OFFSETOF(_odp_ipv6hdr_t, src_addr)
+#define _ODP_IPV4HDR_CSUM_OFFSET ODP_OFFSETOF(_odp_ipv4hdr_t, chksum)
+#define _ODP_UDP_LEN_OFFSET ODP_OFFSETOF(_odp_udphdr_t, length)
+#define _ODP_UDP_CSUM_OFFSET ODP_OFFSETOF(_odp_udphdr_t, chksum)
+
/**
- * Parser helper function for IPv6
+ * Calculate and fill in IPv4 checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-static inline uint8_t parse_ipv6(packet_parser_t *prs, const uint8_t **parseptr,
- uint32_t *offset, uint32_t frame_len,
- uint32_t seg_len)
+int _odp_packet_ipv4_chksum_insert(odp_packet_t pkt)
{
- const _odp_ipv6hdr_t *ipv6 = (const _odp_ipv6hdr_t *)*parseptr;
- const _odp_ipv6hdr_ext_t *ipv6ext;
- uint32_t dstaddr0 = odp_be_to_cpu_32(ipv6->dst_addr.u8[0]);
-
- prs->l3_len = odp_be_to_cpu_16(ipv6->payload_len) +
- _ODP_IPV6HDR_LEN;
+ uint32_t offset;
+ _odp_ipv4hdr_t ip;
+ odp_u16sum_t chksum;
+ int res;
- /* Basic sanity checks on IPv6 header */
- if ((odp_be_to_cpu_32(ipv6->ver_tc_flow) >> 28) != 6 ||
- prs->l3_len > frame_len - *offset) {
- prs->error_flags.ip_err = 1;
- return 0;
- }
+ offset = odp_packet_l3_offset(pkt);
+ if (offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- /* IPv6 broadcast / multicast flags */
- prs->input_flags.ip_mcast = (dstaddr0 & 0xff000000) == 0xff000000;
- prs->input_flags.ip_bcast = 0;
+ res = odp_packet_copy_to_mem(pkt, offset, sizeof(ip), &ip);
+ if (odp_unlikely(res < 0))
+ return res;
- /* Skip past IPv6 header */
- *offset += sizeof(_odp_ipv6hdr_t);
- *parseptr += sizeof(_odp_ipv6hdr_t);
+ res = packet_ipv4_chksum(pkt, offset, &ip, &chksum);
+ if (odp_unlikely(res < 0))
+ return res;
- /* Skip past any IPv6 extension headers */
- if (ipv6->next_hdr == _ODP_IPPROTO_HOPOPTS ||
- ipv6->next_hdr == _ODP_IPPROTO_ROUTE) {
- prs->input_flags.ipopt = 1;
+ return odp_packet_copy_from_mem(pkt,
+ offset + _ODP_IPV4HDR_CSUM_OFFSET,
+ 2, &chksum);
+}
- do {
- ipv6ext = (const _odp_ipv6hdr_ext_t *)*parseptr;
- uint16_t extlen = 8 + ipv6ext->ext_len * 8;
+static int _odp_packet_tcp_udp_chksum_insert(odp_packet_t pkt, uint16_t proto)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t zero = 0;
+ uint64_t sum;
+ uint16_t l3_ver = 0; /* GCC */
+ uint16_t chksum;
+ uint32_t chksum_offset;
+ uint32_t frame_len = odp_packet_len(pkt);
- *offset += extlen;
- *parseptr += extlen;
- } while ((ipv6ext->next_hdr == _ODP_IPPROTO_HOPOPTS ||
- ipv6ext->next_hdr == _ODP_IPPROTO_ROUTE) &&
- *offset < seg_len);
+ if (pkt_hdr->p.l3_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
+ if (pkt_hdr->p.l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- if (*offset >= prs->l3_offset +
- odp_be_to_cpu_16(ipv6->payload_len)) {
- prs->error_flags.ip_err = 1;
- return 0;
- }
+ odp_packet_copy_to_mem(pkt, pkt_hdr->p.l3_offset, 2, &l3_ver);
- if (ipv6ext->next_hdr == _ODP_IPPROTO_FRAG)
- prs->input_flags.ipfrag = 1;
+ if (_ODP_IPV4HDR_VER(l3_ver) == _ODP_IPV4)
+ sum = packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l3_offset +
+ _ODP_IPV4ADDR_OFFSSET,
+ 2 * _ODP_IPV4ADDR_LEN);
+ else
+ sum = packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l3_offset +
+ _ODP_IPV6ADDR_OFFSSET,
+ 2 * _ODP_IPV6ADDR_LEN);
+#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
+ sum += proto;
+#else
+ sum += proto << 8;
+#endif
- return ipv6ext->next_hdr;
+ if (proto == _ODP_IPPROTO_TCP) {
+ sum += odp_cpu_to_be_16(frame_len -
+ pkt_hdr->p.l4_offset);
+ chksum_offset = pkt_hdr->p.l4_offset + _ODP_UDP_CSUM_OFFSET;
+ } else {
+ sum += packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset +
+ _ODP_UDP_LEN_OFFSET,
+ 2);
+ chksum_offset = pkt_hdr->p.l4_offset + _ODP_UDP_CSUM_OFFSET;
}
+ odp_packet_copy_from_mem(pkt, chksum_offset, 2, &zero);
- if (odp_unlikely(ipv6->next_hdr == _ODP_IPPROTO_FRAG)) {
- prs->input_flags.ipopt = 1;
- prs->input_flags.ipfrag = 1;
- }
+ sum += packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ frame_len -
+ pkt_hdr->p.l4_offset);
+
+ chksum = ~chksum_finalize(sum);
- return ipv6->next_hdr;
+ if (proto == _ODP_IPPROTO_UDP && chksum == 0)
+ chksum = 0xffff;
+
+ return odp_packet_copy_from_mem(pkt,
+ chksum_offset,
+ 2, &chksum);
}
/**
- * Parser helper function for TCP
+ * Calculate and fill in TCP checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-static inline void parse_tcp(packet_parser_t *prs,
- const uint8_t **parseptr, uint32_t *offset)
+int _odp_packet_tcp_chksum_insert(odp_packet_t pkt)
{
- const _odp_tcphdr_t *tcp = (const _odp_tcphdr_t *)*parseptr;
-
- if (tcp->hl < sizeof(_odp_tcphdr_t) / sizeof(uint32_t))
- prs->error_flags.tcp_err = 1;
- else if ((uint32_t)tcp->hl * 4 > sizeof(_odp_tcphdr_t))
- prs->input_flags.tcpopt = 1;
-
- prs->l4_len = prs->l3_len +
- prs->l3_offset - prs->l4_offset;
-
- if (offset)
- *offset += (uint32_t)tcp->hl * 4;
- *parseptr += (uint32_t)tcp->hl * 4;
+ return _odp_packet_tcp_udp_chksum_insert(pkt, _ODP_IPPROTO_TCP);
}
/**
- * Parser helper function for UDP
+ * Calculate and fill in UDP checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-static inline void parse_udp(packet_parser_t *prs,
- const uint8_t **parseptr, uint32_t *offset)
+int _odp_packet_udp_chksum_insert(odp_packet_t pkt)
{
- const _odp_udphdr_t *udp = (const _odp_udphdr_t *)*parseptr;
- uint32_t udplen = odp_be_to_cpu_16(udp->length);
-
- if (udplen < sizeof(_odp_udphdr_t) ||
- udplen > (prs->l3_len +
- prs->l4_offset - prs->l3_offset)) {
- prs->error_flags.udp_err = 1;
- }
-
- prs->l4_len = udplen;
-
- if (offset)
- *offset += sizeof(_odp_udphdr_t);
- *parseptr += sizeof(_odp_udphdr_t);
+ return _odp_packet_tcp_udp_chksum_insert(pkt, _ODP_IPPROTO_UDP);
}
/**
- * Parse common packet headers up to given layer
+ * Calculate and fill in SCTP checksum
*
- * The function expects at least PACKET_PARSE_SEG_LEN bytes of data to be
- * available from the ptr.
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-int packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
- uint32_t frame_len, uint32_t seg_len, layer_t layer)
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt)
{
- uint32_t offset;
- const uint8_t *parseptr;
-
- switch (prs->parsed_layers) {
- case LAYER_NONE:
- /* Fall through */
-
- case LAYER_L2:
- {
- const _odp_ethhdr_t *eth;
- uint16_t macaddr0, macaddr2, macaddr4;
- const _odp_vlanhdr_t *vlan;
-
- offset = sizeof(_odp_ethhdr_t);
- if (packet_parse_l2_not_done(prs))
- packet_parse_l2(prs, frame_len);
-
- eth = (const _odp_ethhdr_t *)ptr;
-
- /* Handle Ethernet broadcast/multicast addresses */
- macaddr0 = odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth));
- prs->input_flags.eth_mcast = (macaddr0 & 0x0100) == 0x0100;
-
- if (macaddr0 == 0xffff) {
- macaddr2 =
- odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth + 1));
- macaddr4 =
- odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth + 2));
- prs->input_flags.eth_bcast =
- (macaddr2 == 0xffff) && (macaddr4 == 0xffff);
- } else {
- prs->input_flags.eth_bcast = 0;
- }
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t sum;
+ uint32_t frame_len = odp_packet_len(pkt);
- /* Get Ethertype */
- prs->ethtype = odp_be_to_cpu_16(eth->type);
- parseptr = (const uint8_t *)(eth + 1);
+ if (pkt_hdr->p.l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- /* Check for SNAP vs. DIX */
- if (prs->ethtype < _ODP_ETH_LEN_MAX) {
- prs->input_flags.snap = 1;
- if (prs->ethtype > frame_len - offset) {
- prs->error_flags.snap_len = 1;
- goto parse_exit;
- }
- prs->ethtype = odp_be_to_cpu_16(*((const uint16_t *)
- (uintptr_t)
- (parseptr + 6)));
- offset += 8;
- parseptr += 8;
+ sum = 0;
+ odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+ sum = ~packet_sum_crc32c(pkt_hdr, pkt_hdr->p.l4_offset,
+ frame_len - pkt_hdr->p.l4_offset,
+ ~0);
+ return odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+}
+
+int _odp_packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
+ odp_pktin_config_opt_t opt, uint64_t l4_part_sum)
+
+{
+ uint32_t frame_len = odp_packet_len(packet_handle(pkt_hdr));
+
+ /* UDP chksum == 0 case is covered in parse_udp() */
+ if (opt.bit.udp_chksum &&
+ pkt_hdr->p.input_flags.udp &&
+ !pkt_hdr->p.input_flags.ipfrag &&
+ !pkt_hdr->p.input_flags.udp_chksum_zero) {
+ uint16_t sum = ~packet_sum(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ frame_len -
+ pkt_hdr->p.l4_offset,
+ l4_part_sum);
+
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != 0) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.udp_err = 1;
+ _ODP_DBG("UDP chksum fail (%x)!\n", sum);
+ if (opt.bit.drop_udp_err)
+ return -1;
}
+ }
- /* Parse the VLAN header(s), if present */
- if (prs->ethtype == _ODP_ETHTYPE_VLAN_OUTER) {
- prs->input_flags.vlan_qinq = 1;
- prs->input_flags.vlan = 1;
-
- vlan = (const _odp_vlanhdr_t *)parseptr;
- prs->ethtype = odp_be_to_cpu_16(vlan->type);
- offset += sizeof(_odp_vlanhdr_t);
- parseptr += sizeof(_odp_vlanhdr_t);
+ if (opt.bit.tcp_chksum &&
+ pkt_hdr->p.input_flags.tcp &&
+ !pkt_hdr->p.input_flags.ipfrag) {
+ uint16_t sum = ~packet_sum(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ frame_len -
+ pkt_hdr->p.l4_offset,
+ l4_part_sum);
+
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != 0) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.tcp_err = 1;
+ _ODP_DBG("TCP chksum fail (%x)!\n", sum);
+ if (opt.bit.drop_tcp_err)
+ return -1;
}
+ }
- if (prs->ethtype == _ODP_ETHTYPE_VLAN) {
- prs->input_flags.vlan = 1;
- vlan = (const _odp_vlanhdr_t *)parseptr;
- prs->ethtype = odp_be_to_cpu_16(vlan->type);
- offset += sizeof(_odp_vlanhdr_t);
- parseptr += sizeof(_odp_vlanhdr_t);
+ if (opt.bit.sctp_chksum &&
+ pkt_hdr->p.input_flags.sctp &&
+ !pkt_hdr->p.input_flags.ipfrag) {
+ uint32_t seg_len = 0;
+ _odp_sctphdr_t hdr_copy;
+ uint32_t sum = ~packet_sum_crc32c(pkt_hdr,
+ pkt_hdr->p.l4_offset +
+ _ODP_SCTPHDR_LEN,
+ frame_len -
+ pkt_hdr->p.l4_offset -
+ _ODP_SCTPHDR_LEN,
+ l4_part_sum);
+ _odp_sctphdr_t *sctp = odp_packet_offset(packet_handle(pkt_hdr),
+ pkt_hdr->p.l4_offset,
+ &seg_len, NULL);
+ if (odp_unlikely(seg_len < sizeof(*sctp))) {
+ odp_packet_t pkt = packet_handle(pkt_hdr);
+
+ sctp = &hdr_copy;
+ odp_packet_copy_to_mem(pkt, pkt_hdr->p.l4_offset,
+ sizeof(*sctp), sctp);
}
-
- prs->l3_offset = offset;
- prs->parsed_layers = LAYER_L2;
- if (layer == LAYER_L2)
- return prs->error_flags.all != 0;
- }
- /* Fall through */
-
- case LAYER_L3:
- {
- offset = prs->l3_offset;
- parseptr = (const uint8_t *)(ptr + offset);
- /* Set l3_offset+flag only for known ethtypes */
- prs->input_flags.l3 = 1;
-
- /* Parse Layer 3 headers */
- switch (prs->ethtype) {
- case _ODP_ETHTYPE_IPV4:
- prs->input_flags.ipv4 = 1;
- prs->ip_proto = parse_ipv4(prs, &parseptr, &offset,
- frame_len);
- break;
-
- case _ODP_ETHTYPE_IPV6:
- prs->input_flags.ipv6 = 1;
- prs->ip_proto = parse_ipv6(prs, &parseptr, &offset,
- frame_len, seg_len);
- break;
-
- case _ODP_ETHTYPE_ARP:
- prs->input_flags.arp = 1;
- prs->ip_proto = 255; /* Reserved invalid by IANA */
- break;
-
- default:
- prs->input_flags.l3 = 0;
- prs->l3_offset = ODP_PACKET_OFFSET_INVALID;
- prs->ip_proto = 255; /* Reserved invalid by IANA */
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != sctp->chksum) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.sctp_err = 1;
+ _ODP_DBG("SCTP chksum fail (%x/%x)!\n", sum, sctp->chksum);
+ if (opt.bit.drop_sctp_err)
+ return -1;
}
+ }
- /* Set l4_offset+flag only for known ip_proto */
- prs->l4_offset = offset;
- prs->parsed_layers = LAYER_L3;
- if (layer == LAYER_L3)
- return prs->error_flags.all != 0;
+ return pkt_hdr->p.flags.all.error != 0;
+}
+
+int odp_packet_parse(odp_packet_t pkt, uint32_t offset,
+ const odp_packet_parse_param_t *param)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ const uint8_t *data;
+ uint32_t seg_len;
+ uint32_t seg_end;
+ uint32_t packet_len = odp_packet_len(pkt);
+ odp_proto_t proto = param->proto;
+ odp_proto_layer_t layer = param->last_layer;
+ int ret;
+ uint16_t ethtype;
+ uint64_t l4_part_sum = 0;
+ const uint32_t min_seglen = PARSE_ETH_BYTES + PARSE_L3_L4_BYTES;
+ uint8_t buf[min_seglen];
+ odp_pktin_config_opt_t opt;
+
+ if (proto == ODP_PROTO_NONE || layer == ODP_PROTO_LAYER_NONE)
+ return -1;
+
+ data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ if (data == NULL)
+ return -1;
+
+ /*
+ * We must not have a packet segment boundary within the parsed
+ * packet data range. Copy enough data to a temporary buffer for
+ * parsing if necessary.
+ */
+ if (odp_unlikely(pkt_hdr->mb.nb_segs > 1) &&
+ odp_unlikely(seg_len < min_seglen)) {
+ seg_len = min_seglen;
+ if (seg_len > packet_len - offset)
+ seg_len = packet_len - offset;
+ odp_packet_copy_to_mem(pkt, offset, seg_len, buf);
+ data = buf;
}
- /* Fall through */
-
- case LAYER_L4:
- {
- offset = prs->l4_offset;
- parseptr = (const uint8_t *)(ptr + offset);
- prs->input_flags.l4 = 1;
-
- /* Parse Layer 4 headers */
- switch (prs->ip_proto) {
- case _ODP_IPPROTO_ICMP:
- prs->input_flags.icmp = 1;
- break;
-
- case _ODP_IPPROTO_TCP:
- if (odp_unlikely(offset + _ODP_TCPHDR_LEN > seg_len))
- return -1;
- prs->input_flags.tcp = 1;
- parse_tcp(prs, &parseptr, NULL);
- break;
- case _ODP_IPPROTO_UDP:
- if (odp_unlikely(offset + _ODP_UDPHDR_LEN > seg_len))
- return -1;
- prs->input_flags.udp = 1;
- parse_udp(prs, &parseptr, NULL);
- break;
-
- case _ODP_IPPROTO_AH:
- prs->input_flags.ipsec = 1;
- prs->input_flags.ipsec_ah = 1;
- break;
-
- case _ODP_IPPROTO_ESP:
- prs->input_flags.ipsec = 1;
- prs->input_flags.ipsec_esp = 1;
- break;
-
- case _ODP_IPPROTO_SCTP:
- prs->input_flags.sctp = 1;
- break;
-
- default:
- prs->input_flags.l4 = 0;
- prs->l4_offset = ODP_PACKET_OFFSET_INVALID;
- break;
- }
+ seg_end = offset + seg_len; /* one past the maximum offset */
- prs->parsed_layers = LAYER_L4;
- break;
+ /* Reset parser flags, keep other flags */
+ packet_parse_reset(pkt_hdr, 0);
+
+ if (proto == ODP_PROTO_ETH) {
+ /* Assume valid L2 header, no CRC/FCS check in SW */
+ pkt_hdr->p.l2_offset = offset;
+
+ ethtype = _odp_parse_eth(&pkt_hdr->p, &data, &offset, packet_len);
+ } else if (proto == ODP_PROTO_IPV4) {
+ ethtype = _ODP_ETHTYPE_IPV4;
+ } else if (proto == ODP_PROTO_IPV6) {
+ ethtype = _ODP_ETHTYPE_IPV6;
+ } else {
+ ethtype = 0; /* Invalid */
}
- case LAYER_ALL:
- break;
+ opt.all_bits = 0;
+ opt.bit.ipv4_chksum = param->chksums.chksum.ipv4;
+ opt.bit.udp_chksum = param->chksums.chksum.udp;
+ opt.bit.tcp_chksum = param->chksums.chksum.tcp;
+ opt.bit.sctp_chksum = param->chksums.chksum.sctp;
- default:
- ODP_ERR("Invalid parse layer: %d\n", (int)layer);
+ ret = _odp_packet_parse_common_l3_l4(&pkt_hdr->p, data, offset,
+ packet_len, seg_end, layer,
+ ethtype, &l4_part_sum, opt);
+
+ if (ret)
return -1;
- }
- prs->parsed_layers = LAYER_ALL;
+ if (layer >= ODP_PROTO_LAYER_L4) {
+ ret = _odp_packet_l4_chksum(pkt_hdr, opt, l4_part_sum);
+ if (ret)
+ return -1;
+ }
-parse_exit:
- return prs->error_flags.all != 0;
+ return 0;
}
-/**
- * Simple packet parser
- */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer)
+int odp_packet_parse_multi(const odp_packet_t pkt[], const uint32_t offset[],
+ int num, const odp_packet_parse_param_t *param)
{
- uint32_t seg_len = odp_packet_seg_len((odp_packet_t)pkt_hdr);
- uint32_t len = packet_len(pkt_hdr);
- void *base = odp_packet_data((odp_packet_t)pkt_hdr);
+ int i;
- return packet_parse_common(&pkt_hdr->p, base, len, seg_len, layer);
+ for (i = 0; i < num; i++)
+ if (odp_packet_parse(pkt[i], offset[i], param))
+ return i;
+
+ return num;
+}
+
+void odp_packet_parse_result(odp_packet_t pkt,
+ odp_packet_parse_result_t *result)
+{
+ /* TODO: optimize to single word copy when packet header stores bits
+ * directly into odp_packet_parse_result_flag_t */
+ result->flag.all = 0;
+ result->flag.has_error = odp_packet_has_error(pkt);
+ result->flag.has_l2_error = odp_packet_has_l2_error(pkt);
+ result->flag.has_l3_error = odp_packet_has_l3_error(pkt);
+ result->flag.has_l4_error = odp_packet_has_l4_error(pkt);
+ result->flag.has_l2 = odp_packet_has_l2(pkt);
+ result->flag.has_l3 = odp_packet_has_l3(pkt);
+ result->flag.has_l4 = odp_packet_has_l4(pkt);
+ result->flag.has_eth = odp_packet_has_eth(pkt);
+ result->flag.has_eth_bcast = odp_packet_has_eth_bcast(pkt);
+ result->flag.has_eth_mcast = odp_packet_has_eth_mcast(pkt);
+ result->flag.has_jumbo = odp_packet_has_jumbo(pkt);
+ result->flag.has_vlan = odp_packet_has_vlan(pkt);
+ result->flag.has_vlan_qinq = odp_packet_has_vlan_qinq(pkt);
+ result->flag.has_arp = odp_packet_has_arp(pkt);
+ result->flag.has_ipv4 = odp_packet_has_ipv4(pkt);
+ result->flag.has_ipv6 = odp_packet_has_ipv6(pkt);
+ result->flag.has_ip_bcast = odp_packet_has_ip_bcast(pkt);
+ result->flag.has_ip_mcast = odp_packet_has_ip_mcast(pkt);
+ result->flag.has_ipfrag = odp_packet_has_ipfrag(pkt);
+ result->flag.has_ipopt = odp_packet_has_ipopt(pkt);
+ result->flag.has_ipsec = odp_packet_has_ipsec(pkt);
+ result->flag.has_udp = odp_packet_has_udp(pkt);
+ result->flag.has_tcp = odp_packet_has_tcp(pkt);
+ result->flag.has_sctp = odp_packet_has_sctp(pkt);
+ result->flag.has_icmp = odp_packet_has_icmp(pkt);
+
+ result->packet_len = odp_packet_len(pkt);
+ result->l2_offset = odp_packet_l2_offset(pkt);
+ result->l3_offset = odp_packet_l3_offset(pkt);
+ result->l4_offset = odp_packet_l4_offset(pkt);
+ result->l3_chksum_status = odp_packet_l3_chksum_status(pkt);
+ result->l4_chksum_status = odp_packet_l4_chksum_status(pkt);
+ result->l2_type = odp_packet_l2_type(pkt);
+ result->l3_type = odp_packet_l3_type(pkt);
+ result->l4_type = odp_packet_l4_type(pkt);
+}
+
+void odp_packet_parse_result_multi(const odp_packet_t pkt[],
+ odp_packet_parse_result_t *result[],
+ int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ odp_packet_parse_result(pkt[i], result[i]);
}
uint64_t odp_packet_to_u64(odp_packet_t hdl)
@@ -1503,27 +1651,24 @@ uint64_t odp_packet_seg_to_u64(odp_packet_seg_t hdl)
return _odp_pri(hdl);
}
-odp_packet_t odp_packet_ref_static(odp_packet_t pkt)
-{
- return odp_packet_copy(pkt, odp_packet_pool(pkt));
-}
-
odp_packet_t odp_packet_ref(odp_packet_t pkt, uint32_t offset)
{
odp_packet_t new;
int ret;
+ _ODP_ASSERT(!odp_packet_has_ref(pkt));
+
new = odp_packet_copy(pkt, odp_packet_pool(pkt));
if (new == ODP_PACKET_INVALID) {
- ODP_ERR("copy failed\n");
+ _ODP_ERR("copy failed\n");
return ODP_PACKET_INVALID;
}
ret = odp_packet_trunc_head(&new, offset, NULL, NULL);
if (ret < 0) {
- ODP_ERR("trunk_head failed\n");
+ _ODP_ERR("trunk_head failed\n");
odp_packet_free(new);
return ODP_PACKET_INVALID;
}
@@ -1537,10 +1682,12 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
odp_packet_t new;
int ret;
+ _ODP_ASSERT(!odp_packet_has_ref(pkt));
+
new = odp_packet_copy(pkt, odp_packet_pool(pkt));
if (new == ODP_PACKET_INVALID) {
- ODP_ERR("copy failed\n");
+ _ODP_ERR("copy failed\n");
return ODP_PACKET_INVALID;
}
@@ -1548,7 +1695,7 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
ret = odp_packet_trunc_head(&new, offset, NULL, NULL);
if (ret < 0) {
- ODP_ERR("trunk_head failed\n");
+ _ODP_ERR("trunk_head failed\n");
odp_packet_free(new);
return ODP_PACKET_INVALID;
}
@@ -1557,7 +1704,7 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
ret = odp_packet_concat(&hdr, new);
if (ret < 0) {
- ODP_ERR("concat failed\n");
+ _ODP_ERR("concat failed\n");
odp_packet_free(new);
return ODP_PACKET_INVALID;
}
@@ -1565,19 +1712,255 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
return hdr;
}
-int odp_packet_has_ref(odp_packet_t pkt)
+void odp_packet_lso_request_clr(odp_packet_t pkt)
{
- (void)pkt;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.lso = 0;
+}
+
+int odp_packet_has_lso_request(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.lso;
+}
+
+uint32_t odp_packet_payload_offset(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.payload_off)
+ return pkt_hdr->payload_offset;
+
+ return ODP_PACKET_OFFSET_INVALID;
+}
+
+int odp_packet_payload_offset_set(odp_packet_t pkt, uint32_t offset)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.payload_off = 1;
+ pkt_hdr->payload_offset = offset;
return 0;
}
-uint32_t odp_packet_unshared_len(odp_packet_t pkt)
+void odp_packet_aging_tmo_set(odp_packet_t pkt, uint64_t tmo_ns)
{
- return odp_packet_len(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.tx_aging = tmo_ns ? 1 : 0;
+ pkt_hdr->tx_aging_ns = tmo_ns;
}
-/* Include non-inlined versions of API functions */
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/packet_inlines_api.h>
-#endif
+uint64_t odp_packet_aging_tmo(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.tx_aging ? pkt_hdr->tx_aging_ns : 0;
+}
+
+int odp_packet_tx_compl_request(odp_packet_t pkt, const odp_packet_tx_compl_opt_t *opt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ switch (opt->mode) {
+ case ODP_PACKET_TX_COMPL_DISABLED:
+ pkt_hdr->p.flags.tx_compl_ev = 0;
+ pkt_hdr->p.flags.tx_compl_poll = 0;
+ break;
+ case ODP_PACKET_TX_COMPL_EVENT:
+ _ODP_ASSERT(opt->queue != ODP_QUEUE_INVALID);
+ pkt_hdr->p.flags.tx_compl_ev = 1;
+ pkt_hdr->p.flags.tx_compl_poll = 0;
+ pkt_hdr->dst_queue = opt->queue;
+ break;
+ case ODP_PACKET_TX_COMPL_POLL:
+ pkt_hdr->p.flags.tx_compl_ev = 0;
+ pkt_hdr->p.flags.tx_compl_poll = 1;
+ pkt_hdr->tx_compl_id = opt->compl_id;
+ break;
+ default:
+ _ODP_ERR("Bad TX completion mode: %i\n", opt->mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_packet_has_tx_compl_request(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.tx_compl_ev || pkt_hdr->p.flags.tx_compl_poll;
+}
+
+void odp_packet_tx_compl_free(odp_packet_tx_compl_t tx_compl)
+{
+ if (odp_unlikely(tx_compl == ODP_PACKET_TX_COMPL_INVALID)) {
+ _ODP_ERR("Bad TX completion event handle\n");
+ return;
+ }
+
+ odp_buffer_free((odp_buffer_t)tx_compl);
+}
+
+void *odp_packet_tx_compl_user_ptr(odp_packet_tx_compl_t tx_compl)
+{
+ if (odp_unlikely(tx_compl == ODP_PACKET_TX_COMPL_INVALID)) {
+ _ODP_ERR("Bad TX completion event handle\n");
+ return NULL;
+ }
+
+ _odp_pktio_tx_compl_t *data = odp_buffer_addr((odp_buffer_t)tx_compl);
+
+ return (void *)(uintptr_t)data->user_ptr;
+}
+
+int odp_packet_tx_compl_done(odp_pktio_t pktio, uint32_t compl_id)
+{
+ return odp_atomic_load_acq_u32(&get_pktio_entry(pktio)->tx_compl_status[compl_id]);
+}
+
+void odp_packet_free_ctrl_set(odp_packet_t pkt, odp_packet_free_ctrl_t ctrl)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (ctrl == ODP_PACKET_FREE_CTRL_DONT_FREE)
+ pkt_hdr->p.flags.free_ctrl = 1;
+ else
+ pkt_hdr->p.flags.free_ctrl = 0;
+}
+
+odp_packet_free_ctrl_t odp_packet_free_ctrl(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.free_ctrl)
+ return ODP_PACKET_FREE_CTRL_DONT_FREE;
+
+ return ODP_PACKET_FREE_CTRL_DISABLED;
+}
+
+odp_packet_reass_status_t odp_packet_reass_status(odp_packet_t pkt)
+{
+ (void)pkt;
+ return ODP_PACKET_REASS_NONE;
+}
+
+int odp_packet_reass_info(odp_packet_t pkt, odp_packet_reass_info_t *info)
+{
+ (void)pkt;
+ (void)info;
+ return -1;
+}
+
+int odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
+ odp_packet_reass_partial_state_t *res)
+{
+ (void)pkt;
+ (void)frags;
+ (void)res;
+ return -ENOTSUP;
+}
+
+uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[],
+ uint32_t num)
+{
+ uint32_t i;
+ odp_packet_seg_t seg;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
+ uint32_t num_segs = odp_packet_num_segs(pkt);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ _ODP_ERR("Not a packet pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ _ODP_ERR("Not an external memory pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(num < num_segs)) {
+ _ODP_ERR("Not enough buffer handles %u. Packet has %u segments.\n", num, num_segs);
+ return 0;
+ }
+
+ seg = odp_packet_first_seg(pkt);
+
+ for (i = 0; i < num_segs; i++) {
+ pkt_buf[i] = (odp_packet_buf_t)(uintptr_t)seg;
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ return num_segs;
+}
+
+odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl,
+ odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i, data_len;
+ odp_packet_hdr_t *cur_seg, *next_seg;
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[0];
+ uint32_t headroom = odp_packet_buf_data_offset(pkt_buf[0]);
+
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ _ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ _ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(num == 0)) {
+ _ODP_ERR("Bad number of buffers: %u\n", num);
+ return ODP_PACKET_INVALID;
+ }
+
+ cur_seg = pkt_hdr;
+ data_len = 0;
+
+ for (i = 0; i < num; i++) {
+ struct rte_mbuf *mb;
+
+ next_seg = NULL;
+ if (i < num - 1)
+ next_seg = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[i + 1];
+
+ data_len += cur_seg->mb.data_len;
+ mb = (struct rte_mbuf *)(uintptr_t)cur_seg;
+ mb->next = (struct rte_mbuf *)next_seg;
+ cur_seg = next_seg;
+ }
+
+ pkt_hdr->mb.nb_segs = num;
+ pkt_hdr->mb.pkt_len = data_len;
+ pkt_hdr->mb.data_off = headroom;
+
+ /* Reset metadata */
+ pkt_hdr->event_hdr.subtype = ODP_EVENT_PACKET_BASIC;
+ pkt_hdr->input = ODP_PKTIO_INVALID;
+ packet_parse_reset(pkt_hdr, 1);
+
+ return packet_handle(pkt_hdr);
+}
+
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt)
+{
+ (void)pkt;
+ (void)opt;
+}
+
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt)
+{
+ (void)pkt;
+
+ return ODP_PROTO_STATS_INVALID;
+}
diff --git a/platform/linux-dpdk/odp_packet_dpdk.c b/platform/linux-dpdk/odp_packet_dpdk.c
index 26d32370b..da76db06d 100644
--- a/platform/linux-dpdk/odp_packet_dpdk.c
+++ b/platform/linux-dpdk/odp_packet_dpdk.c
@@ -1,51 +1,258 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp_posix_extensions.h>
+
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/pool.h>
+#include <odp/api/std_types.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+
+#include <odp_classification_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_eventdev_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_packet_dpdk.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <protocols/eth.h>
+
+#include <rte_config.h>
+#if defined(__clang__)
+#undef RTE_TOOLCHAIN_GCC
+#endif
+#include <rte_common.h>
+#include <rte_ethdev.h>
+#include <rte_ip_frag.h>
+#include <rte_udp.h>
+#include <rte_tcp.h>
+#include <rte_version.h>
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+
+#include <ctype.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <net/if.h>
+#include <poll.h>
+#include <stdlib.h>
#include <stdio.h>
-#include <errno.h>
+#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-#include <poll.h>
#include <unistd.h>
-#include <fcntl.h>
-#include <string.h>
-#include <stdlib.h>
-#include <linux/ethtool.h>
-#include <linux/sockios.h>
-
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-#include <odp/api/thread.h>
-
-#include <odp/api/system_info.h>
-#include <odp_debug_internal.h>
-#include <odp_classification_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_packet_dpdk.h>
-#include <net/if.h>
-#include <math.h>
+#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
+ #define RTE_MBUF_F_TX_IPV4 PKT_TX_IPV4
+ #define RTE_MBUF_F_TX_IPV6 PKT_TX_IPV6
+ #define RTE_MBUF_F_TX_IP_CKSUM PKT_TX_IP_CKSUM
+ #define RTE_MBUF_F_TX_UDP_CKSUM PKT_TX_UDP_CKSUM
+ #define RTE_MBUF_F_TX_TCP_CKSUM PKT_TX_TCP_CKSUM
+
+ #define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4
+ #define RTE_ETH_RSS_FRAG_IPV4 ETH_RSS_FRAG_IPV4
+ #define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP
+ #define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP
+ #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER ETH_RSS_NONFRAG_IPV4_OTHER
+
+ #define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6
+ #define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX
+ #define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX
+ #define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX
+ #define RTE_ETH_RSS_FRAG_IPV6 ETH_RSS_FRAG_IPV6
+ #define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP
+ #define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP
+ #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER ETH_RSS_NONFRAG_IPV6_OTHER
+
+ #define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS
+ #define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE
+
+ #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM
+ #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM
+ #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM
+
+ #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM DEV_TX_OFFLOAD_IPV4_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM DEV_TX_OFFLOAD_SCTP_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM DEV_TX_OFFLOAD_TCP_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM DEV_TX_OFFLOAD_UDP_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_MULTI_SEGS DEV_TX_OFFLOAD_MULTI_SEGS
+
+ #define RTE_ETH_FC_FULL RTE_FC_FULL
+ #define RTE_ETH_FC_RX_PAUSE RTE_FC_RX_PAUSE
+ #define RTE_ETH_FC_TX_PAUSE RTE_FC_TX_PAUSE
+ #define RTE_ETH_LINK_AUTONEG ETH_LINK_AUTONEG
+ #define RTE_ETH_LINK_FULL_DUPLEX ETH_LINK_FULL_DUPLEX
+ #define RTE_ETH_LINK_UP ETH_LINK_UP
+ #define RTE_ETH_SPEED_NUM_NONE ETH_SPEED_NUM_NONE
+#endif
+
+/* DPDK poll mode drivers requiring minimum RX burst size DPDK_MIN_RX_BURST */
+#define IXGBE_DRV_NAME "net_ixgbe"
+#define I40E_DRV_NAME "net_i40e"
+
+/* Minimum RX burst size */
+#define DPDK_MIN_RX_BURST 4
+
+/* Limits for setting link MTU */
+#define DPDK_MTU_MIN (RTE_ETHER_MIN_MTU + _ODP_ETHHDR_LEN)
+#define DPDK_MTU_MAX (9000 + _ODP_ETHHDR_LEN)
+
+/* Number of packet buffers to prefetch in RX */
+#define NUM_RX_PREFETCH 4
+
+/** DPDK runtime configuration options */
+typedef struct {
+ int multicast_enable;
+ int num_rx_desc_default;
+ int num_tx_desc_default;
+ int rx_drop_en;
+ int tx_offload_multi_segs;
+} dpdk_opt_t;
+
+/* DPDK pktio specific data */
+typedef struct ODP_ALIGNED_CACHE {
+ /* --- Fast path data --- */
+
+ /* Function for mbuf to ODP packet conversion */
+ int (*mbuf_to_pkt_fn)(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], uint16_t num);
+
+ /* DPDK port identifier */
+ uint16_t port_id;
+ struct {
+ /* No locking for rx */
+ uint8_t lockless_rx : 1;
+ /* No locking for tx */
+ uint8_t lockless_tx : 1;
+ } flags;
+ /* Minimum RX burst size */
+ uint8_t min_rx_burst;
+
+ /* --- Control path data --- */
+
+ /* Configuration options */
+ dpdk_opt_t opt;
+ /* RSS configuration */
+ struct rte_eth_rss_conf rss_conf;
+ /* Maximum transmission unit */
+ uint16_t mtu;
+ /* Maximum supported MTU value */
+ uint32_t mtu_max;
+ /* DPDK MTU has been modified */
+ uint8_t mtu_set;
+ /* Number of RX descriptors per queue */
+ uint16_t num_rx_desc[ODP_PKTIN_MAX_QUEUES];
+ /* Number of TX descriptors per queue */
+ uint16_t num_tx_desc[ODP_PKTOUT_MAX_QUEUES];
+
+ /* --- Locks for MT safe operations --- */
+
+ /* RX queue locks */
+ odp_ticketlock_t rx_lock[ODP_PKTIN_MAX_QUEUES] ODP_ALIGNED_CACHE;
+ /* TX queue locks */
+ odp_ticketlock_t tx_lock[ODP_PKTOUT_MAX_QUEUES] ODP_ALIGNED_CACHE;
+
+} pkt_dpdk_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_dpdk_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_dpdk_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_dpdk_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
/* Ops for all implementation of pktio.
* Order matters. The first implementation to setup successfully
* will be picked.
* Array must be NULL terminated */
-const pktio_if_ops_t * const pktio_if_ops[] = {
- &loopback_pktio_ops,
- &dpdk_pktio_ops,
+const pktio_if_ops_t * const _odp_pktio_if_ops[] = {
+ &_odp_loopback_pktio_ops,
+ &_odp_null_pktio_ops,
+ &_odp_dpdk_pktio_ops,
NULL
};
-extern pktio_table_t *pktio_tbl;
+extern void *pktio_entry_ptr[CONFIG_PKTIO_ENTRIES];
static uint32_t mtu_get_pkt_dpdk(pktio_entry_t *pktio_entry);
+static inline int input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], uint16_t num);
+
+static inline int input_pkts_minimal(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ uint16_t num);
+
+uint16_t _odp_dpdk_pktio_port_id(pktio_entry_t *pktio_entry)
+{
+ const pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+
+ return pkt_dpdk->port_id;
+}
+
+static int lookup_opt(const char *opt_name, const char *drv_name, int *val)
+{
+ const char *base = "pktio_dpdk";
+ int ret;
+
+ ret = _odp_libconfig_lookup_ext_int(base, drv_name, opt_name, val);
+ if (ret == 0)
+ _ODP_ERR("Unable to find DPDK configuration option: %s\n", opt_name);
+
+ return ret;
+}
+
+static int init_options(pktio_entry_t *pktio_entry,
+ const struct rte_eth_dev_info *dev_info)
+{
+ dpdk_opt_t *opt = &pkt_priv(pktio_entry)->opt;
+
+ if (!lookup_opt("num_rx_desc", dev_info->driver_name,
+ &opt->num_rx_desc_default))
+ return -1;
+
+ if (!lookup_opt("num_tx_desc", dev_info->driver_name,
+ &opt->num_tx_desc_default))
+ return -1;
+
+ if (!lookup_opt("rx_drop_en", dev_info->driver_name,
+ &opt->rx_drop_en))
+ return -1;
+ opt->rx_drop_en = !!opt->rx_drop_en;
+
+ if (!lookup_opt("multicast_en", dev_info->driver_name,
+ &opt->multicast_enable))
+ return -1;
+ opt->multicast_enable = !!opt->multicast_enable;
+
+ if (!lookup_opt("tx_offload_multi_segs", dev_info->driver_name,
+ &opt->tx_offload_multi_segs))
+ return -1;
+ opt->tx_offload_multi_segs = !!opt->tx_offload_multi_segs;
+
+ _ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name,
+ pkt_priv(pktio_entry)->port_id);
+ _ODP_DBG(" multicast: %d\n", opt->multicast_enable);
+ _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc_default);
+ _ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc_default);
+ _ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en);
+ _ODP_DBG(" tx_offload_multi_segs: %d\n", opt->tx_offload_multi_segs);
+
+ return 0;
+}
+
/* Test if s has only digits or not. Dpdk pktio uses only digits.*/
static int _dpdk_netdev_is_valid(const char *s)
{
@@ -58,240 +265,674 @@ static int _dpdk_netdev_is_valid(const char *s)
return 1;
}
-static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
+static void hash_proto_to_rss_conf(struct rte_eth_rss_conf *rss_conf,
const odp_pktin_hash_proto_t *hash_proto)
{
- memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
-
if (hash_proto->proto.ipv4_udp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (hash_proto->proto.ipv4_tcp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (hash_proto->proto.ipv4)
- rss_conf->rss_hf |= ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
if (hash_proto->proto.ipv6_udp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (hash_proto->proto.ipv6_tcp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (hash_proto->proto.ipv6)
- rss_conf->rss_hf |= ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_IPV6_EX;
rss_conf->rss_key = NULL;
}
-static void _dpdk_print_port_mac(uint8_t portid)
+static int dpdk_maxlen_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t mtu;
+ int ret;
+
+ /* DPDK MTU value does not include Ethernet header */
+ mtu = maxlen_input - _ODP_ETHHDR_LEN;
+
+ ret = rte_eth_dev_set_mtu(pkt_dpdk->port_id, mtu);
+ if (odp_unlikely(ret))
+ _ODP_ERR("rte_eth_dev_set_mtu() failed: %d\n", ret);
+
+ pkt_dpdk->mtu = maxlen_input;
+ pkt_dpdk->mtu_set = 1;
+
+ return ret;
+}
+
+static int get_eth_overhead(const struct rte_eth_dev_info *dev_info)
+{
+ uint32_t eth_overhead;
+
+ if (dev_info->max_mtu != UINT16_MAX &&
+ dev_info->max_rx_pktlen > dev_info->max_mtu)
+ eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
+ else
+ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return eth_overhead;
+}
+
+static int dpdk_setup_eth_dev(pktio_entry_t *pktio_entry, const struct rte_eth_dev_info *dev_info)
{
- struct ether_addr eth_addr;
+ int ret;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ struct rte_eth_conf eth_conf;
+ pool_t *pool = _odp_pool_entry(pktio_entry->pool);
+ uint64_t rx_offloads = 0;
+ uint64_t tx_offloads = 0;
+
+ memset(&eth_conf, 0, sizeof(eth_conf));
+
+ eth_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+ eth_conf.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
+ eth_conf.rx_adv_conf.rss_conf = pkt_dpdk->rss_conf;
+
+ /* Setup RX checksum offloads */
+ if (pktio_entry->config.pktin.bit.ipv4_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->config.pktin.bit.udp_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->config.pktin.bit.tcp_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+
+ eth_conf.rxmode.offloads = rx_offloads;
+
+ /* Setup TX checksum offloads */
+ if (pktio_entry->config.pktout.bit.ipv4_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.udp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.tcp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.sctp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
+
+ if (tx_offloads)
+ pktio_entry->enabled.chksum_insert = 1;
+
+ /* Enable multi segment transmit offload */
+ if (pkt_dpdk->opt.tx_offload_multi_segs) {
+ if ((dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) == 0) {
+ _ODP_ERR("TX multi segment offload not supported by PMD\n");
+ return -1;
+ }
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
+ }
+
+ eth_conf.txmode.offloads = tx_offloads;
+
+ /* RX packet len same size as pool segment minus headroom and double
+ * VLAN tag
+ */
+#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
+ eth_conf.rxmode.max_rx_pkt_len =
+#else
+ eth_conf.rxmode.mtu =
+#endif
+ rte_pktmbuf_data_room_size(pool->rte_mempool) -
+ get_eth_overhead(dev_info) - RTE_PKTMBUF_HEADROOM;
+
+ ret = rte_eth_dev_configure(pkt_dpdk->port_id,
+ pktio_entry->num_in_queue,
+ pktio_entry->num_out_queue, &eth_conf);
+ if (ret < 0) {
+ _ODP_ERR("Failed to setup device: err=%d, port=%" PRIu8 "\n",
+ ret, pkt_dpdk->port_id);
+ return -1;
+ }
+ return 0;
+}
+
+static void _dpdk_print_port_mac(uint16_t port_id)
+{
+ struct rte_ether_addr eth_addr;
memset(&eth_addr, 0, sizeof(eth_addr));
- rte_eth_macaddr_get(portid, &eth_addr);
- ODP_DBG("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
- (unsigned)portid,
- eth_addr.addr_bytes[0],
- eth_addr.addr_bytes[1],
- eth_addr.addr_bytes[2],
- eth_addr.addr_bytes[3],
- eth_addr.addr_bytes[4],
- eth_addr.addr_bytes[5]);
+ rte_eth_macaddr_get(port_id, &eth_addr);
+ _ODP_DBG("Port %" PRIu16 ", MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ port_id,
+ eth_addr.addr_bytes[0],
+ eth_addr.addr_bytes[1],
+ eth_addr.addr_bytes[2],
+ eth_addr.addr_bytes[3],
+ eth_addr.addr_bytes[4],
+ eth_addr.addr_bytes[5]);
}
-static int input_queues_config_pkt_dpdk(pktio_entry_t *pktio_entry,
- const odp_pktin_queue_param_t *p)
+static void prepare_rss_conf(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
{
- odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
+ struct rte_eth_dev_info dev_info;
+ uint64_t rss_hf_capa;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+
+ memset(&pkt_dpdk->rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+
+ if (!p->hash_enable)
+ return;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ rss_hf_capa = dev_info.flow_type_rss_offloads;
+
+ /* Print debug info about unsupported hash protocols */
+ if (p->hash_proto.proto.ipv4 &&
+ ((rss_hf_capa & RTE_ETH_RSS_IPV4) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv4_udp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV4_UDP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv4_tcp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV4_TCP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6 &&
+ ((rss_hf_capa & RTE_ETH_RSS_IPV6) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6_udp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV6_UDP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6_tcp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV6_TCP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ hash_proto_to_rss_conf(&pkt_dpdk->rss_conf, &p->hash_proto);
+
+ /* Filter out unsupported hash functions */
+ pkt_dpdk->rss_conf.rss_hf &= rss_hf_capa;
+}
+
+static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pktin_mode_t mode = pktio_entry->param.in_mode;
+ uint8_t lockless;
+
+ prepare_rss_conf(pktio_entry, p);
/**
* Scheduler synchronizes input queue polls. Only single thread
* at a time polls a queue */
- if (mode == ODP_PKTIN_MODE_SCHED ||
- p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
- pktio_entry->s.pkt_dpdk.lockless_rx = 1;
+ if (mode == ODP_PKTIN_MODE_SCHED || p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
+ lockless = 1;
else
- pktio_entry->s.pkt_dpdk.lockless_rx = 0;
+ lockless = 0;
- if (p->hash_enable && p->num_queues > 1) {
- pktio_entry->s.pkt_dpdk.hash = p->hash_proto;
- } else {
- pktio_entry->s.pkt_dpdk.hash.proto.ipv4_udp = 1;
- pktio_entry->s.pkt_dpdk.hash.proto.ipv4_tcp = 1;
- pktio_entry->s.pkt_dpdk.hash.proto.ipv4 = 1;
- pktio_entry->s.pkt_dpdk.hash.proto.ipv6_udp = 1;
- pktio_entry->s.pkt_dpdk.hash.proto.ipv6_tcp = 1;
- pktio_entry->s.pkt_dpdk.hash.proto.ipv6 = 1;
+ pkt_dpdk->flags.lockless_rx = lockless;
+
+ /* Configure RX descriptors */
+ for (uint32_t i = 0; i < p->num_queues; i++) {
+ uint16_t num_rx_desc = pkt_dpdk->opt.num_rx_desc_default;
+ int ret;
+
+ if (mode == ODP_PKTIN_MODE_DIRECT && p->queue_size[i] != 0)
+ num_rx_desc = p->queue_size[i];
+
+ /* Adjust descriptor count */
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(pkt_dpdk->port_id, &num_rx_desc, NULL);
+ if (ret && ret != -ENOTSUP) {
+ _ODP_ERR("DPDK: rte_eth_dev_adjust_nb_rx_tx_desc() failed: %d\n", ret);
+ return -1;
+ }
+ pkt_dpdk->num_rx_desc[i] = num_rx_desc;
+
+ _ODP_DBG("Port %" PRIu16 " RX queue %" PRIu32 " using %" PRIu16 " descriptors\n",
+ pkt_dpdk->port_id, i, num_rx_desc);
}
return 0;
}
-static int output_queues_config_pkt_dpdk(pktio_entry_t *pktio_entry,
- const odp_pktout_queue_param_t *p)
+static int dpdk_output_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktout_queue_param_t *p)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint8_t lockless;
if (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
- pkt_dpdk->lockless_tx = 1;
+ lockless = 1;
else
- pkt_dpdk->lockless_tx = 0;
+ lockless = 0;
+
+ pkt_dpdk->flags.lockless_tx = lockless;
+
+ /* Configure TX descriptors */
+ for (uint32_t i = 0; i < p->num_queues; i++) {
+ uint16_t num_tx_desc = pkt_dpdk->opt.num_tx_desc_default;
+ int ret;
+
+ if (p->queue_size[i] != 0)
+ num_tx_desc = p->queue_size[i];
+
+ /* Adjust descriptor count */
+ ret = rte_eth_dev_adjust_nb_rx_tx_desc(pkt_dpdk->port_id, NULL, &num_tx_desc);
+ if (ret && ret != -ENOTSUP) {
+ _ODP_ERR("DPDK: rte_eth_dev_adjust_nb_rx_tx_desc() failed: %d\n", ret);
+ return -1;
+ }
+ pkt_dpdk->num_tx_desc[i] = num_tx_desc;
+
+ _ODP_DBG("Port %" PRIu16 " TX queue %" PRIu32 " using %" PRIu16 " descriptors\n",
+ pkt_dpdk->port_id, i, num_tx_desc); }
+ return 0;
+}
+
+static int dpdk_init_global(void)
+{
+ return 0;
+}
+static int dpdk_term_global(void)
+{
+ /* Eventdev takes care of closing pktio devices */
+ if (!_odp_eventdev_gbl ||
+ _odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT) {
+ uint16_t port_id;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ rte_eth_dev_close(port_id);
+ }
+ }
return 0;
}
-static int setup_pkt_dpdk(odp_pktio_t pktio ODP_UNUSED, pktio_entry_t *pktio_entry,
- const char *netdev, odp_pool_t pool ODP_UNUSED)
+static int promisc_mode_check(pkt_dpdk_t *pkt_dpdk)
{
- uint8_t portid = 0;
+ int ret;
+
+ ret = rte_eth_promiscuous_enable(pkt_dpdk->port_id);
+ if (ret) {
+ _ODP_DBG("Promisc mode enable not supported: %d\n", ret);
+ return 0;
+ }
+
+ ret = rte_eth_promiscuous_disable(pkt_dpdk->port_id);
+ if (ret) {
+ _ODP_DBG("Promisc mode disable not supported: %d\n", ret);
+ return 0;
+ }
+
+ return 1;
+}
+
+static int dpdk_init_capability(pktio_entry_t *pktio_entry,
+ const struct rte_eth_dev_info *dev_info)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pktio_capability_t *capa = &pktio_entry->capa;
+ struct rte_ether_addr mac_addr;
+ int ret;
+ int ptype_cnt;
+ int ptype_l3_ipv4 = 0;
+ int ptype_l4_tcp = 0;
+ int ptype_l4_udp = 0;
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
+
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+
+ capa->max_input_queues = RTE_MIN(dev_info->max_rx_queues, ODP_PKTIN_MAX_QUEUES);
+ capa->min_input_queue_size = dev_info->rx_desc_lim.nb_min;
+ capa->max_input_queue_size = dev_info->rx_desc_lim.nb_max;
+
+ /* ixgbe devices support only 16 RX queues in RSS mode */
+ if (!strncmp(dev_info->driver_name, IXGBE_DRV_NAME,
+ strlen(IXGBE_DRV_NAME)))
+ capa->max_input_queues = RTE_MIN(16,
+ (int)capa->max_input_queues);
+
+ capa->max_output_queues = RTE_MIN(dev_info->max_tx_queues, ODP_PKTOUT_MAX_QUEUES);
+ capa->min_output_queue_size = dev_info->tx_desc_lim.nb_min;
+ capa->max_output_queue_size = dev_info->tx_desc_lim.nb_max;
+
+ capa->set_op.op.promisc_mode = promisc_mode_check(pkt_dpdk);
+
+ /* Check if setting default MAC address is supported */
+ rte_eth_macaddr_get(pkt_dpdk->port_id, &mac_addr);
+ ret = rte_eth_dev_default_mac_addr_set(pkt_dpdk->port_id, &mac_addr);
+ if (ret == 0) {
+ capa->set_op.op.mac_addr = 1;
+ } else if (ret != -ENOTSUP && ret != -EPERM) {
+ _ODP_ERR("Failed to set interface default MAC: %d\n", ret);
+ return -1;
+ }
+
+ /* Check if setting MTU is supported */
+ ret = rte_eth_dev_set_mtu(pkt_dpdk->port_id, pkt_dpdk->mtu - _ODP_ETHHDR_LEN);
+ /* From DPDK 21.11 onwards, calling rte_eth_dev_set_mtu() before device is configured with
+ * rte_eth_dev_configure() will result in failure. The least hacky (unfortunately still
+ * very hacky) way to continue checking the support is to take into account that the
+ * function will fail earlier with -ENOTSUP if MTU setting is not supported by device than
+ * if the device was not yet configured. */
+ if (ret != -ENOTSUP) {
+ capa->set_op.op.maxlen = 1;
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = DPDK_MTU_MIN;
+ capa->maxlen.max_input = pkt_dpdk->mtu_max;
+ capa->maxlen.min_output = DPDK_MTU_MIN;
+ capa->maxlen.max_output = pkt_dpdk->mtu_max;
+ }
+
+ ptype_cnt = rte_eth_dev_get_supported_ptypes(pkt_dpdk->port_id,
+ ptype_mask, NULL, 0);
+ if (ptype_cnt > 0) {
+ uint32_t ptypes[ptype_cnt];
+ int i;
+
+ ptype_cnt = rte_eth_dev_get_supported_ptypes(pkt_dpdk->port_id,
+ ptype_mask, ptypes,
+ ptype_cnt);
+ for (i = 0; i < ptype_cnt; i++)
+ switch (ptypes[i]) {
+ case RTE_PTYPE_L3_IPV4:
+ /* Fall through */
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ /* Fall through */
+ case RTE_PTYPE_L3_IPV4_EXT:
+ ptype_l3_ipv4 = 1;
+ break;
+ case RTE_PTYPE_L4_TCP:
+ ptype_l4_tcp = 1;
+ break;
+ case RTE_PTYPE_L4_UDP:
+ ptype_l4_udp = 1;
+ break;
+ }
+ }
+
+ odp_pktio_config_init(&capa->config);
+ capa->config.pktin.bit.ts_all = 1;
+ capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktin.bit.ipv4_chksum = ptype_l3_ipv4 &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.ipv4_chksum)
+ capa->config.pktin.bit.drop_ipv4_err = 1;
+
+ capa->config.pktin.bit.udp_chksum = ptype_l4_udp &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.udp_chksum)
+ capa->config.pktin.bit.drop_udp_err = 1;
+
+ capa->config.pktin.bit.tcp_chksum = ptype_l4_tcp &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.tcp_chksum)
+ capa->config.pktin.bit.drop_tcp_err = 1;
+
+ capa->config.pktout.bit.ipv4_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? 1 : 0;
+ capa->config.pktout.bit.udp_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? 1 : 0;
+ capa->config.pktout.bit.tcp_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? 1 : 0;
+
+ capa->config.pktout.bit.ipv4_chksum_ena =
+ capa->config.pktout.bit.ipv4_chksum;
+ capa->config.pktout.bit.udp_chksum_ena =
+ capa->config.pktout.bit.udp_chksum;
+ capa->config.pktout.bit.tcp_chksum_ena =
+ capa->config.pktout.bit.tcp_chksum;
+
+ capa->config.pktout.bit.ts_ena = 1;
+
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+ capa->stats.pktio.counter.out_discards = 1;
+
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
+
+ return 0;
+}
+
+static int setup_pkt_dpdk(odp_pktio_t pktio ODP_UNUSED,
+ pktio_entry_t *pktio_entry,
+ const char *netdev, odp_pool_t pool ODP_UNUSED)
+{
+ uint32_t mtu;
struct rte_eth_dev_info dev_info;
- pkt_dpdk_t * const pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- int i;
+ pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
+ int i, ret;
+ uint16_t port_id;
+
+ if (!rte_eth_dev_get_port_by_name(netdev, &port_id))
+ pkt_dpdk->port_id = port_id;
+ else if (_dpdk_netdev_is_valid(netdev))
+ pkt_dpdk->port_id = atoi(netdev);
+ else {
+ _ODP_ERR("Invalid interface name!: %s\n", netdev);
+ return -1;
+ }
- if (!_dpdk_netdev_is_valid(netdev)) {
- ODP_DBG("Interface name should only contain numbers!: %s\n",
- netdev);
+ if (!rte_eth_dev_is_valid_port(pkt_dpdk->port_id)) {
+ _ODP_ERR("Port id=%" PRIu16 " not attached\n", pkt_dpdk->port_id);
return -1;
}
- portid = atoi(netdev);
- pkt_dpdk->portid = portid;
memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
- rte_eth_dev_info_get(portid, &dev_info);
- if (dev_info.driver_name == NULL) {
- ODP_DBG("No driver found for interface: %s\n", netdev);
+ ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info);
+ if (ret) {
+ _ODP_ERR("Failed to read device info: %d\n", ret);
return -1;
}
- if (!strcmp(dev_info.driver_name, "rte_ixgbe_pmd"))
- pkt_dpdk->min_rx_burst = 4;
+
+ /* Initialize runtime options */
+ if (init_options(pktio_entry, &dev_info)) {
+ _ODP_ERR("Initializing runtime options failed\n");
+ return -1;
+ }
+
+ /* Drivers requiring minimum burst size. Supports also *_vf versions
+ * of the drivers. */
+ if (!strncmp(dev_info.driver_name, IXGBE_DRV_NAME,
+ strlen(IXGBE_DRV_NAME)) ||
+ !strncmp(dev_info.driver_name, I40E_DRV_NAME,
+ strlen(I40E_DRV_NAME)))
+ pkt_dpdk->min_rx_burst = DPDK_MIN_RX_BURST;
else
pkt_dpdk->min_rx_burst = 0;
- _dpdk_print_port_mac(portid);
+ _dpdk_print_port_mac(pkt_dpdk->port_id);
+
+ mtu = mtu_get_pkt_dpdk(pktio_entry);
+ if (mtu == 0) {
+ _ODP_ERR("Failed to read interface MTU\n");
+ return -1;
+ }
+ pkt_dpdk->mtu = mtu + _ODP_ETHHDR_LEN;
+ pkt_dpdk->mtu_max = RTE_MAX(pkt_dpdk->mtu, DPDK_MTU_MAX);
+ pkt_dpdk->mtu_set = 0;
+
+ if (dpdk_init_capability(pktio_entry, &dev_info)) {
+ _ODP_ERR("Failed to initialize capability\n");
+ return -1;
+ }
- pkt_dpdk->capa.max_input_queues = RTE_MIN(dev_info.max_rx_queues,
- PKTIO_MAX_QUEUES);
- pkt_dpdk->capa.max_output_queues = RTE_MIN(dev_info.max_tx_queues,
- PKTIO_MAX_QUEUES);
+ /* Setup multicast */
+ if (pkt_dpdk->opt.multicast_enable)
+ rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ rte_eth_allmulticast_disable(pkt_dpdk->port_id);
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++)
odp_ticketlock_init(&pkt_dpdk->rx_lock[i]);
+ for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++)
odp_ticketlock_init(&pkt_dpdk->tx_lock[i]);
- }
+
return 0;
}
static int close_pkt_dpdk(pktio_entry_t *pktio_entry)
{
- pkt_dpdk_t * const pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
+
+ if (_odp_eventdev_gbl &&
+ _odp_eventdev_gbl->rx_adapter.status != RX_ADAPTER_INIT)
+ _odp_rx_adapter_port_stop(pkt_dpdk->port_id);
+ else
+ rte_eth_dev_stop(pkt_dpdk->port_id);
- if (pktio_entry->s.state == PKTIO_STATE_STOPPED)
- rte_eth_dev_close(pkt_dpdk->portid);
return 0;
}
-static int start_pkt_dpdk(pktio_entry_t *pktio_entry)
+static int dpdk_setup_eth_tx(pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
{
- int ret, i;
- pkt_dpdk_t * const pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- uint8_t portid = pkt_dpdk->portid;
- int sid = rte_eth_dev_socket_id(pkt_dpdk->portid);
- int socket_id = sid < 0 ? 0 : sid;
- uint16_t nbrxq, nbtxq;
- pool_entry_t *pool_entry =
- get_pool_entry(_odp_typeval(pktio_entry->s.pool));
- uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT;
- uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
- struct rte_eth_rss_conf rss_conf;
-
- /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
- if (!pktio_entry->s.num_in_queue)
- pktio_entry->s.num_in_queue = 1;
- if (!pktio_entry->s.num_out_queue)
- pktio_entry->s.num_out_queue = 1;
-
- rss_conf_to_hash_proto(&rss_conf, &pkt_dpdk->hash);
-
- struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split */
- .hw_ip_checksum = 0, /**< IP checksum offload */
- .hw_vlan_filter = 0, /**< VLAN filtering */
- .jumbo_frame = 1, /**< Jumbo Frame Support */
- .hw_strip_crc = 0, /**< CRC stripp by hardware */
- },
- .rx_adv_conf = {
- .rss_conf = rss_conf,
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- };
-
- /* rx packet len same size as pool segment minus headroom and double
- * VLAN tag
- */
- port_conf.rxmode.max_rx_pkt_len =
- rte_pktmbuf_data_room_size(pool_entry->s.rte_mempool) -
- 2 * 4 - RTE_PKTMBUF_HEADROOM;
+ uint32_t i;
+ int ret;
+ uint16_t port_id = pkt_dpdk->port_id;
- nbtxq = pktio_entry->s.num_out_queue;
- nbrxq = pktio_entry->s.num_in_queue;
+ for (i = 0; i < pktio_entry->num_out_queue; i++) {
+ ret = rte_eth_tx_queue_setup(port_id, i,
+ pkt_dpdk->num_tx_desc[i],
+ rte_eth_dev_socket_id(port_id),
+ &dev_info->default_txconf);
+ if (ret < 0) {
+ _ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
+ return -1;
+ }
+ }
- ret = rte_eth_dev_configure(portid, nbrxq, nbtxq, &port_conf);
- if (ret < 0) {
- ODP_ERR("Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned)portid);
- return -1;
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->num_out_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ _ODP_DBG("Mapping per TX queue statistics not supported: %d\n", ret);
+ break;
+ }
}
+ _ODP_DBG("Mapped %" PRIu32 "/%d TX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ return 0;
+}
+
+static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
+{
+ struct rte_eth_rxconf rxconf;
+ uint32_t i;
+ int ret;
+ uint16_t port_id = pkt_dpdk->port_id;
+ pool_t *pool = _odp_pool_entry(pktio_entry->pool);
+
+ rxconf = dev_info->default_rxconf;
- if (nb_rxd + nb_txd > pool_entry->s.params.pkt.num / 4) {
- double downrate = (double)(pool_entry->s.params.pkt.num / 4) /
- (double)(nb_rxd + nb_txd);
- nb_rxd >>= (int)ceil(downrate);
- nb_txd >>= (int)ceil(downrate);
- ODP_DBG("downrate %f\n", downrate);
- ODP_DBG("Descriptors scaled down. RX: %u TX: %u pool: %u\n",
- nb_rxd, nb_txd, pool_entry->s.params.pkt.num);
- }
- /* init one RX queue on each port */
- for (i = 0; i < nbrxq; i++) {
- ret = rte_eth_rx_queue_setup(portid, i, nb_rxd, socket_id,
- NULL,
- pool_entry->s.rte_mempool);
+ rxconf.rx_drop_en = pkt_dpdk->opt.rx_drop_en;
+
+ for (i = 0; i < pktio_entry->num_in_queue; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i, pkt_dpdk->num_rx_desc[i],
+ rte_eth_dev_socket_id(port_id),
+ &rxconf, pool->rte_mempool);
if (ret < 0) {
- ODP_ERR("rxq:err=%d, port=%u\n", ret, (unsigned)portid);
+ _ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
return -1;
}
}
- /* init one TX queue on each port */
- for (i = 0; i < nbtxq; i++) {
- ret = rte_eth_tx_queue_setup(portid, i, nb_txd, socket_id,
- NULL);
- if (ret < 0) {
- ODP_ERR("txq:err=%d, port=%u\n", ret, (unsigned)portid);
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->num_in_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ _ODP_DBG("Mapping per RX queue statistics not supported: %d\n", ret);
+ break;
+ }
+ }
+ _ODP_DBG("Mapped %" PRIu32 "/%d RX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ return 0;
+}
+
+static int dpdk_start(pktio_entry_t *pktio_entry)
+{
+ struct rte_eth_dev_info dev_info;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+ int ret;
+
+ if (pktio_entry->state == PKTIO_STATE_STOPPED ||
+ pktio_entry->state == PKTIO_STATE_STOP_PENDING)
+ rte_eth_dev_stop(pkt_dpdk->port_id);
+
+ /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
+ if (!pktio_entry->num_in_queue)
+ pktio_entry->num_in_queue = 1;
+ if (!pktio_entry->num_out_queue)
+ pktio_entry->num_out_queue = 1;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ /* Setup device */
+ if (dpdk_setup_eth_dev(pktio_entry, &dev_info)) {
+ _ODP_ERR("Failed to configure device\n");
+ return -1;
+ }
+
+ /* Setup TX queues */
+ if (dpdk_setup_eth_tx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
+
+ /* Setup RX queues */
+ if (dpdk_setup_eth_rx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
+
+ /* Restore MTU value resetted by dpdk_setup_eth_rx() */
+ if (pkt_dpdk->mtu_set && pktio_entry->capa.set_op.op.maxlen) {
+ ret = dpdk_maxlen_set(pktio_entry, pkt_dpdk->mtu, 0);
+ if (ret) {
+ _ODP_ERR("Restoring device MTU failed: err=%d, port=%" PRIu8 "\n",
+ ret, port_id);
return -1;
}
}
- rte_eth_promiscuous_enable(portid);
- /* Some DPDK PMD vdev like pcap do not support promisc mode change. Use
- * system call for them. */
- if (!rte_eth_promiscuous_get(portid))
- pkt_dpdk->vdev_sysc_promisc = 1;
+ /* Use simpler function when packet parsing and classifying are not required */
+ if (pktio_entry->parse_layer == ODP_PROTO_LAYER_NONE)
+ pkt_dpdk->mbuf_to_pkt_fn = input_pkts_minimal;
else
- pkt_dpdk->vdev_sysc_promisc = 0;
+ pkt_dpdk->mbuf_to_pkt_fn = input_pkts;
- rte_eth_allmulticast_enable(portid);
-
- ret = rte_eth_dev_start(portid);
+ /* Start device */
+ ret = rte_eth_dev_start(port_id);
if (ret < 0) {
- ODP_ERR("rte_eth_dev_start:err=%d, port=%u\n",
- ret, portid);
- return ret;
+ _ODP_ERR("Device start failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
+ return -1;
}
return 0;
@@ -299,196 +940,362 @@ static int start_pkt_dpdk(pktio_entry_t *pktio_entry)
static int stop_pkt_dpdk(pktio_entry_t *pktio_entry)
{
- rte_eth_dev_stop(pktio_entry->s.pkt_dpdk.portid);
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ unsigned int i;
+ uint16_t port_id = pkt_dpdk->port_id;
+
+ for (i = 0; i < pktio_entry->num_in_queue; i++)
+ rte_eth_dev_rx_queue_stop(port_id, i);
+ for (i = 0; i < pktio_entry->num_out_queue; i++)
+ rte_eth_dev_tx_queue_stop(port_id, i);
+
return 0;
}
-/* Forward declaration */
-static int send_pkt_dpdk(pktio_entry_t *pktio_entry, int index,
- const odp_packet_t pkt_table[], int len);
+static inline void prefetch_pkt(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ odp_prefetch_store(&pkt_hdr->p);
+}
-/* This function can't be called if pkt_dpdk->lockless_tx is true */
-static void _odp_pktio_send_completion(pktio_entry_t *pktio_entry)
+/**
+ * Input packets when packet parsing and classifier are disabled
+ */
+static inline int input_pkts_minimal(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ uint16_t num)
{
- int i;
- unsigned j;
- odp_packet_t dummy;
- pool_entry_t *pool_entry =
- get_pool_entry(_odp_typeval(pktio_entry->s.pool));
- struct rte_mempool *rte_mempool = pool_entry->s.rte_mempool;
+ uint16_t i;
+ odp_time_t ts_val;
+ odp_time_t *ts = NULL;
+ const uint8_t ts_ena = (pktio_entry->config.pktin.bit.ts_all ||
+ pktio_entry->config.pktin.bit.ts_ptp);
+ const odp_pktio_t input = pktio_entry->handle;
+ const uint16_t num_prefetch = RTE_MIN(num, NUM_RX_PREFETCH);
- for (j = 0; j < pktio_entry->s.num_out_queue; j++)
- send_pkt_dpdk(pktio_entry, j, &dummy, 0);
+ for (i = 0; i < num_prefetch; i++)
+ prefetch_pkt(pkt_table[i]);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
- pktio_entry_t *entry = &pktio_tbl->entries[i];
+ if (ts_ena) {
+ ts_val = odp_time_global();
+ ts = &ts_val;
+ }
- if (rte_mempool_avail_count(rte_mempool) != 0)
- return;
+ for (i = 0; i < num; ++i) {
+ odp_packet_t pkt = pkt_table[i];
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- if (entry == pktio_entry)
- continue;
+ if (odp_likely(i + num_prefetch < num))
+ prefetch_pkt(pkt_table[i + num_prefetch]);
- if (odp_ticketlock_trylock(&entry->s.txl)) {
- if (entry->s.state != PKTIO_STATE_FREE &&
- entry->s.ops == &dpdk_pktio_ops) {
- for (j = 0; j < pktio_entry->s.num_out_queue;
- j++)
- send_pkt_dpdk(pktio_entry, j,
- &dummy, 0);
- }
- odp_ticketlock_unlock(&entry->s.txl);
- }
+ packet_init(pkt_hdr, input);
+
+ packet_set_ts(pkt_hdr, ts);
}
- return;
+ return num;
}
-static int recv_pkt_dpdk(pktio_entry_t *pktio_entry, int index,
- odp_packet_t pkt_table[], int len)
+/**
+ * input packets when packet parsing is required
+ */
+static inline int input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], uint16_t num)
{
- uint16_t nb_rx, i;
- odp_packet_t *saved_pkt_table;
- pkt_dpdk_t * const pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- uint8_t min = pkt_dpdk->min_rx_burst;
+ uint16_t i;
+ uint16_t num_pkts = 0, num_cls = 0;
odp_time_t ts_val;
odp_time_t *ts = NULL;
+ const odp_pktin_config_opt_t pktin_cfg = pktio_entry->config.pktin;
+ const odp_pktio_t input = pktio_entry->handle;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ const uint16_t num_prefetch = RTE_MIN(num, NUM_RX_PREFETCH);
- if (odp_unlikely(min > len)) {
- ODP_DBG("PMD requires >%d buffers burst. "
- "Current %d, dropped %d\n", min, len, min - len);
- saved_pkt_table = pkt_table;
- pkt_table = malloc(min * sizeof(odp_packet_t));
- }
+ for (i = 0; i < num_prefetch; i++)
+ prefetch_pkt(pkt_table[i]);
- if (!pkt_dpdk->lockless_rx)
- odp_ticketlock_lock(&pkt_dpdk->rx_lock[index]);
+ if (pktin_cfg.bit.ts_all || pktin_cfg.bit.ts_ptp) {
+ ts_val = odp_time_global();
+ ts = &ts_val;
+ }
- nb_rx = rte_eth_rx_burst((uint8_t)pkt_dpdk->portid,
- (uint16_t)index,
- (struct rte_mbuf **)pkt_table,
- (uint16_t)RTE_MAX(len, min));
+ for (i = 0; i < num; ++i) {
+ odp_packet_t pkt = pkt_table[i];
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ struct rte_mbuf *mbuf = pkt_to_mbuf(pkt);
+ int ret;
- if (nb_rx == 0 && !pkt_dpdk->lockless_tx) {
- pool_entry_t *pool_entry =
- get_pool_entry(_odp_typeval(pktio_entry->s.pool));
- struct rte_mempool *rte_mempool =
- pool_entry->s.rte_mempool;
- if (rte_mempool_avail_count(rte_mempool) == 0)
- _odp_pktio_send_completion(pktio_entry);
- }
+ if (odp_likely(i + num_prefetch < num))
+ prefetch_pkt(pkt_table[i + num_prefetch]);
- if (!pkt_dpdk->lockless_rx)
- odp_ticketlock_unlock(&pkt_dpdk->rx_lock[index]);
+ packet_init(pkt_hdr, input);
- for (i = 0; i < nb_rx; ++i) {
- _odp_packet_reset_parse(pkt_table[i]);
- odp_packet_hdr(pkt_table[i])->input = pktio_entry->s.handle;
- }
+ ret = _odp_dpdk_packet_parse_common(pkt_hdr,
+ rte_pktmbuf_mtod(mbuf, uint8_t *),
+ rte_pktmbuf_pkt_len(mbuf),
+ rte_pktmbuf_data_len(mbuf),
+ mbuf, layer, pktin_cfg);
+ if (odp_unlikely(ret)) {
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp) {
- ts_val = odp_time_global();
- ts = &ts_val;
- }
+ if (ret < 0) {
+ odp_packet_free(pkt);
+ continue;
+ }
+ }
- if (odp_unlikely(min > len)) {
- memcpy(saved_pkt_table, pkt_table,
- len * sizeof(odp_packet_t));
- for (i = len; i < nb_rx; i++)
- odp_packet_free(pkt_table[i]);
- nb_rx = RTE_MIN(len, nb_rx);
- free(pkt_table);
- pktio_entry->s.stats.in_discards += min - len;
- pkt_table = saved_pkt_table;
- }
+ packet_set_ts(pkt_hdr, ts);
- if (pktio_cls_enabled(pktio_entry)) {
- int failed = 0, success = 0;
+ odp_prefetch(rte_pktmbuf_mtod(mbuf, char *));
- for (i = 0; i < nb_rx; i++) {
- odp_packet_t new_pkt;
+ if (cls_enabled) {
odp_pool_t new_pool;
- uint8_t *pkt_addr;
- odp_packet_hdr_t parsed_hdr;
- int ret;
- odp_packet_hdr_t *pkt_hdr =
- odp_packet_hdr(pkt_table[i]);
-
- pkt_addr = odp_packet_data(pkt_table[i]);
- ret = cls_classify_packet(pktio_entry, pkt_addr,
- odp_packet_len(pkt_table[i]),
- odp_packet_len(pkt_table[i]),
- &new_pool, &parsed_hdr);
- if (ret) {
- failed++;
- odp_packet_free(pkt_table[i]);
+ uint8_t *data = odp_packet_data(pkt);
+
+ ret = _odp_cls_classify_packet(pktio_entry, data, &new_pool, pkt_hdr);
+ if (odp_unlikely(ret)) {
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ odp_packet_free(pkt);
continue;
}
- if (new_pool != odp_packet_pool(pkt_table[i])) {
- new_pkt = odp_packet_copy(pkt_table[i],
- new_pool);
- odp_packet_free(pkt_table[i]);
- if (new_pkt == ODP_PACKET_INVALID) {
- failed++;
+ if (new_pool != odp_packet_pool(pkt)) {
+ odp_packet_t new_pkt = odp_packet_copy(pkt, new_pool);
+
+ odp_packet_free(pkt);
+ if (odp_unlikely(new_pkt == ODP_PACKET_INVALID)) {
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
continue;
}
- pkt_table[i] = new_pkt;
+ pkt = new_pkt;
}
- packet_set_ts(pkt_hdr, ts);
- pktio_entry->s.stats.in_octets +=
- odp_packet_len(pkt_table[i]);
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
- if (success != i)
- pkt_table[success] = pkt_table[i];
- ++success;
+
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(pkt_table, num_cls, (i + 1 == num));
+ } else {
+ pkt_table[num_pkts++] = pkt;
}
- pktio_entry->s.stats.in_errors += failed;
- pktio_entry->s.stats.in_ucast_pkts += nb_rx - failed;
- nb_rx = success;
}
- return nb_rx;
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(pkt_table, num_cls, true);
+
+ return num_pkts;
+}
+
+int _odp_input_pkts(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], int num)
+{
+ pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
+
+ return pkt_dpdk->mbuf_to_pkt_fn(pktio_entry, pkt_table, num);
+}
+
+static int recv_pkt_dpdk(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num)
+{
+ pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
+ const uint16_t port_id = pkt_dpdk->port_id;
+ const uint8_t min = pkt_dpdk->min_rx_burst;
+ const uint8_t lockless = pkt_dpdk->flags.lockless_rx;
+ uint16_t nb_rx;
+
+ if (!lockless)
+ odp_ticketlock_lock(&pkt_dpdk->rx_lock[index]);
+
+ if (odp_likely(num >= min)) {
+ nb_rx = rte_eth_rx_burst(port_id, (uint16_t)index,
+ (struct rte_mbuf **)pkt_table,
+ (uint16_t)num);
+ } else {
+ odp_packet_t min_burst[min];
+
+ _ODP_DBG("PMD requires >%d buffers burst. Current %d, dropped %d\n",
+ min, num, min - num);
+ nb_rx = rte_eth_rx_burst(port_id, (uint16_t)index,
+ (struct rte_mbuf **)min_burst, min);
+
+ for (uint16_t i = 0; i < nb_rx; i++) {
+ if (i < num)
+ pkt_table[i] = min_burst[i];
+ else
+ odp_packet_free(min_burst[i]);
+ }
+
+ nb_rx = RTE_MIN(num, nb_rx);
+ }
+
+ if (!lockless)
+ odp_ticketlock_unlock(&pkt_dpdk->rx_lock[index]);
+
+ /* Packets may also me received through eventdev, so don't add any
+ * processing here. Instead, perform all processing in mbuf_to_pkt_fn()
+ * which is also called by eventdev. */
+ if (nb_rx)
+ return pkt_dpdk->mbuf_to_pkt_fn(pktio_entry, pkt_table, nb_rx);
+ return 0;
+}
+
+static inline int check_proto(void *l3_hdr, odp_bool_t *l3_proto_v4,
+ uint8_t *l4_proto)
+{
+ uint8_t l3_proto_ver = _ODP_IPV4HDR_VER(*(uint8_t *)l3_hdr);
+
+ if (l3_proto_ver == _ODP_IPV4) {
+ struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)l3_hdr;
+
+ *l3_proto_v4 = 1;
+ if (!rte_ipv4_frag_pkt_is_fragmented(ip))
+ *l4_proto = ip->next_proto_id;
+ else
+ *l4_proto = 0;
+
+ return 0;
+ } else if (l3_proto_ver == _ODP_IPV6) {
+ struct rte_ipv6_hdr *ipv6 = (struct rte_ipv6_hdr *)l3_hdr;
+
+ *l3_proto_v4 = 0;
+ *l4_proto = ipv6->proto;
+ return 0;
+ }
+
+ return -1;
+}
+
+static inline uint16_t phdr_csum(odp_bool_t ipv4, void *l3_hdr,
+ uint64_t ol_flags)
+{
+ if (ipv4)
+ return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
+ else /*ipv6*/
+ return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
+}
+
+#define OL_TX_CHKSUM_PKT(_cfg, _capa, _proto, _ovr_set, _ovr) \
+ (_capa && _proto && (_ovr_set ? _ovr : _cfg))
+
+static inline void pkt_set_ol_tx(odp_pktout_config_opt_t *pktout_cfg,
+ odp_pktout_config_opt_t *pktout_capa,
+ odp_packet_hdr_t *pkt_hdr,
+ struct rte_mbuf *mbuf,
+ char *mbuf_data)
+{
+ void *l3_hdr, *l4_hdr;
+ uint8_t l4_proto;
+ odp_bool_t l3_proto_v4;
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt;
+ packet_parser_t *pkt_p = &pkt_hdr->p;
+
+ if (pkt_p->l3_offset == ODP_PACKET_OFFSET_INVALID)
+ return;
+
+ l3_hdr = (void *)(mbuf_data + pkt_p->l3_offset);
+
+ if (check_proto(l3_hdr, &l3_proto_v4, &l4_proto))
+ return;
+
+ ipv4_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.ipv4_chksum,
+ pktout_capa->bit.ipv4_chksum,
+ l3_proto_v4,
+ pkt_p->flags.l3_chksum_set,
+ pkt_p->flags.l3_chksum);
+ udp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.udp_chksum,
+ pktout_capa->bit.udp_chksum,
+ (l4_proto == _ODP_IPPROTO_UDP),
+ pkt_p->flags.l4_chksum_set,
+ pkt_p->flags.l4_chksum);
+ tcp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.tcp_chksum,
+ pktout_capa->bit.tcp_chksum,
+ (l4_proto == _ODP_IPPROTO_TCP),
+ pkt_p->flags.l4_chksum_set,
+ pkt_p->flags.l4_chksum);
+
+ if (!ipv4_chksum_pkt && !udp_chksum_pkt && !tcp_chksum_pkt)
+ return;
+
+ mbuf->l2_len = pkt_p->l3_offset - pkt_p->l2_offset;
+
+ if (l3_proto_v4)
+ mbuf->ol_flags = RTE_MBUF_F_TX_IPV4;
+ else
+ mbuf->ol_flags = RTE_MBUF_F_TX_IPV6;
+
+ if (ipv4_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+
+ ((struct rte_ipv4_hdr *)l3_hdr)->hdr_checksum = 0;
+ mbuf->l3_len = _ODP_IPV4HDR_IHL(*(uint8_t *)l3_hdr) * 4;
+ }
+
+ if (pkt_p->l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return;
+
+ mbuf->l3_len = pkt_p->l4_offset - pkt_p->l3_offset;
+
+ l4_hdr = (void *)(mbuf_data + pkt_p->l4_offset);
+
+ if (udp_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
+
+ ((struct rte_udp_hdr *)l4_hdr)->dgram_cksum =
+ phdr_csum(l3_proto_v4, l3_hdr, mbuf->ol_flags);
+ } else if (tcp_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
+
+ ((struct rte_tcp_hdr *)l4_hdr)->cksum =
+ phdr_csum(l3_proto_v4, l3_hdr, mbuf->ol_flags);
+ }
}
static int send_pkt_dpdk(pktio_entry_t *pktio_entry, int index,
- const odp_packet_t pkt_table[], int len)
+ const odp_packet_t pkt_table[], int num)
{
- int pkts;
- pkt_dpdk_t * const pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t * const pkt_dpdk = pkt_priv(pktio_entry);
+ const uint8_t chksum_insert_ena = pktio_entry->enabled.chksum_insert;
+ const uint8_t tx_ts_ena = pktio_entry->enabled.tx_ts;
+ odp_pktout_config_opt_t *pktout_cfg = &pktio_entry->config.pktout;
+ odp_pktout_config_opt_t *pktout_capa = &pktio_entry->capa.config.pktout;
+ uint16_t tx_ts_idx = 0;
+ uint16_t pkts;
+
+ if (chksum_insert_ena || tx_ts_ena) {
+ for (uint16_t i = 0; i < num; i++) {
+ struct rte_mbuf *mbuf = pkt_to_mbuf(pkt_table[i]);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt_table[i]);
+
+ if (chksum_insert_ena)
+ pkt_set_ol_tx(pktout_cfg, pktout_capa, pkt_hdr, mbuf,
+ rte_pktmbuf_mtod(mbuf, char *));
+
+ if (odp_unlikely(tx_ts_ena && tx_ts_idx == 0 && pkt_hdr->p.flags.ts_set))
+ tx_ts_idx = i + 1;
+ }
+ }
- if (!pkt_dpdk->lockless_tx)
+ if (!pkt_dpdk->flags.lockless_tx)
odp_ticketlock_lock(&pkt_dpdk->tx_lock[index]);
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
- pkts = rte_eth_tx_burst(pkt_dpdk->portid, index,
- (struct rte_mbuf **)pkt_table, len);
+ pkts = rte_eth_tx_burst(pkt_dpdk->port_id, index,
+ (struct rte_mbuf **)pkt_table, num);
#pragma GCC diagnostic pop
- if (!pkt_dpdk->lockless_tx)
+ if (!pkt_dpdk->flags.lockless_tx)
odp_ticketlock_unlock(&pkt_dpdk->tx_lock[index]);
- if (pkts == 0) {
- uint32_t mtu;
+ if (odp_unlikely(tx_ts_idx && pkts >= tx_ts_idx))
+ _odp_pktio_tx_ts_set(pktio_entry);
- if (odp_unlikely(rte_errno != 0))
- return -1;
-
- mtu = mtu_get_pkt_dpdk(pktio_entry);
- if (odp_unlikely(odp_packet_len(pkt_table[0]) > mtu)) {
- __odp_errno = EMSGSIZE;
- return -1;
- }
- }
- rte_errno = 0;
return pkts;
}
-static uint32_t _dpdk_vdev_mtu(uint8_t port_id)
+static uint32_t _dpdk_vdev_mtu(uint16_t port_id)
{
- struct rte_eth_dev_info dev_info = {0};
+ struct rte_eth_dev_info dev_info;
struct ifreq ifr;
int ret;
int sockfd;
@@ -499,7 +1306,7 @@ static uint32_t _dpdk_vdev_mtu(uint8_t port_id)
ret = ioctl(sockfd, SIOCGIFMTU, &ifr);
close(sockfd);
if (ret < 0) {
- ODP_DBG("ioctl SIOCGIFMTU error\n");
+ _ODP_DBG("ioctl SIOCGIFMTU error\n");
return 0;
}
@@ -511,7 +1318,7 @@ static uint32_t mtu_get_pkt_dpdk(pktio_entry_t *pktio_entry)
uint16_t mtu = 0;
int ret;
- ret = rte_eth_dev_get_mtu(pktio_entry->s.pkt_dpdk.portid, &mtu);
+ ret = rte_eth_dev_get_mtu(pkt_priv(pktio_entry)->port_id, &mtu);
if (ret < 0)
return 0;
@@ -519,135 +1326,176 @@ static uint32_t mtu_get_pkt_dpdk(pktio_entry_t *pktio_entry)
* try to use system call if dpdk cannot get mtu value.
*/
if (mtu == 0)
- mtu = _dpdk_vdev_mtu(pktio_entry->s.pkt_dpdk.portid);
+ mtu = _dpdk_vdev_mtu(pkt_priv(pktio_entry)->port_id);
return mtu;
}
-static int _dpdk_vdev_promisc_mode_set(uint8_t port_id, int enable)
+static uint32_t dpdk_maxlen_get(pktio_entry_t *pktio_entry)
{
- struct rte_eth_dev_info dev_info = {0};
- struct ifreq ifr;
- int ret;
- int sockfd;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
- rte_eth_dev_info_get(port_id, &dev_info);
- if_indextoname(dev_info.if_index, ifr.ifr_name);
- sockfd = socket(AF_INET, SOCK_DGRAM, 0);
-
- ret = ioctl(sockfd, SIOCGIFFLAGS, &ifr);
- if (ret < 0) {
- close(sockfd);
- ODP_DBG("ioctl SIOCGIFFLAGS error\n");
- return -1;
- }
-
- if (enable)
- ifr.ifr_flags |= IFF_PROMISC;
- else
- ifr.ifr_flags &= ~(IFF_PROMISC);
-
- ret = ioctl(sockfd, SIOCSIFFLAGS, &ifr);
- if (ret < 0) {
- close(sockfd);
- ODP_DBG("ioctl SIOCSIFFLAGS error\n");
- return -1;
- }
-
- ret = ioctl(sockfd, SIOCGIFMTU, &ifr);
- if (ret < 0) {
- close(sockfd);
- ODP_DBG("ioctl SIOCGIFMTU error\n");
- return -1;
- }
-
- ODP_DBG("vdev promisc set to %d\n", enable);
- close(sockfd);
- return 0;
+ return pkt_dpdk->mtu;
}
static int promisc_mode_set_pkt_dpdk(pktio_entry_t *pktio_entry, int enable)
{
- uint8_t portid = pktio_entry->s.pkt_dpdk.portid;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int ret;
+
if (enable)
- rte_eth_promiscuous_enable(portid);
+ ret = rte_eth_promiscuous_enable(port_id);
else
- rte_eth_promiscuous_disable(portid);
+ ret = rte_eth_promiscuous_disable(port_id);
- if (pktio_entry->s.pkt_dpdk.vdev_sysc_promisc) {
- int ret = _dpdk_vdev_promisc_mode_set(portid, enable);
- if (ret < 0)
- ODP_DBG("vdev promisc mode fail\n");
+ if (ret) {
+ _ODP_ERR("Setting promisc mode failed: %d\n", ret);
+ return -1;
}
-
return 0;
}
-static int _dpdk_vdev_promisc_mode(uint8_t port_id)
+static int promisc_mode_get_pkt_dpdk(pktio_entry_t *pktio_entry)
{
- struct rte_eth_dev_info dev_info = {0};
- struct ifreq ifr;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
int ret;
- int sockfd;
- rte_eth_dev_info_get(port_id, &dev_info);
- if_indextoname(dev_info.if_index, ifr.ifr_name);
- sockfd = socket(AF_INET, SOCK_DGRAM, 0);
- ret = ioctl(sockfd, SIOCGIFFLAGS, &ifr);
- close(sockfd);
+ ret = rte_eth_promiscuous_get(port_id);
if (ret < 0) {
- ODP_DBG("ioctl SIOCGIFFLAGS error\n");
+ _ODP_ERR("Getting promisc mode failed: %d\n", ret);
return -1;
}
-
- if (ifr.ifr_flags & IFF_PROMISC) {
- ODP_DBG("promisc is 1\n");
- return 1;
- } else
- return 0;
-}
-
-static int promisc_mode_get_pkt_dpdk(pktio_entry_t *pktio_entry)
-{
- uint8_t portid = pktio_entry->s.pkt_dpdk.portid;
- if (pktio_entry->s.pkt_dpdk.vdev_sysc_promisc)
- return _dpdk_vdev_promisc_mode(portid);
- else
- return rte_eth_promiscuous_get(portid);
-
+ return ret;
}
static int mac_get_pkt_dpdk(pktio_entry_t *pktio_entry, void *mac_addr)
{
- rte_eth_macaddr_get(pktio_entry->s.pkt_dpdk.portid,
- (struct ether_addr *)mac_addr);
+ rte_eth_macaddr_get(pkt_priv(pktio_entry)->port_id,
+ (struct rte_ether_addr *)mac_addr);
return ETH_ALEN;
}
+static int mac_set_pkt_dpdk(pktio_entry_t *pktio_entry, const void *mac_addr)
+{
+ struct rte_ether_addr addr = *(const struct rte_ether_addr *)mac_addr;
+
+ return rte_eth_dev_default_mac_addr_set(pkt_priv(pktio_entry)->port_id,
+ &addr);
+}
static int capability_pkt_dpdk(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
- *capa = pktio_entry->s.pkt_dpdk.capa;
+ *capa = pktio_entry->capa;
return 0;
}
+
static int link_status_pkt_dpdk(pktio_entry_t *pktio_entry)
{
struct rte_eth_link link;
+ int ret;
- rte_eth_link_get(pktio_entry->s.pkt_dpdk.portid, &link);
- return link.link_status;
+ ret = rte_eth_link_get_nowait(pkt_priv(pktio_entry)->port_id, &link);
+ if (ret) {
+ if (ret == -ENOTSUP)
+ _ODP_DBG("rte_eth_link_get_nowait() not supported\n");
+ else
+ _ODP_ERR("rte_eth_link_get_nowait() failed\n");
+ return ODP_PKTIO_LINK_STATUS_UNKNOWN;
+ }
+
+ if (link.link_status)
+ return ODP_PKTIO_LINK_STATUS_UP;
+ return ODP_PKTIO_LINK_STATUS_DOWN;
+}
+
+static int dpdk_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ struct rte_eth_link link;
+ struct rte_eth_fc_conf fc_conf;
+ odp_pktio_link_info_t link_info;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int ret;
+
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ memset(&link_info, 0, sizeof(odp_pktio_link_info_t));
+
+ ret = rte_eth_dev_flow_ctrl_get(port_id, &fc_conf);
+ if (ret) {
+ if (ret != -ENOTSUP) {
+ _ODP_ERR("rte_eth_dev_flow_ctrl_get() failed\n");
+ return -1;
+ }
+ _ODP_DBG("rte_eth_dev_flow_ctrl_get() not supported\n");
+ link_info.pause_rx = ODP_PKTIO_LINK_PAUSE_UNKNOWN;
+ link_info.pause_tx = ODP_PKTIO_LINK_PAUSE_UNKNOWN;
+ } else {
+ link_info.pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ link_info.pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE) {
+ link_info.pause_rx = ODP_PKTIO_LINK_PAUSE_ON;
+ } else if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE) {
+ link_info.pause_tx = ODP_PKTIO_LINK_PAUSE_ON;
+ } else if (fc_conf.mode == RTE_ETH_FC_FULL) {
+ link_info.pause_rx = ODP_PKTIO_LINK_PAUSE_ON;
+ link_info.pause_tx = ODP_PKTIO_LINK_PAUSE_ON;
+ }
+ }
+
+ ret = rte_eth_link_get_nowait(port_id, &link);
+ if (ret) {
+ if (ret != -ENOTSUP) {
+ _ODP_ERR("rte_eth_link_get_nowait() failed\n");
+ return -1;
+ }
+ _ODP_DBG("rte_eth_link_get_nowait() not supported\n");
+ link_info.autoneg = ODP_PKTIO_LINK_AUTONEG_UNKNOWN;
+ link_info.duplex = ODP_PKTIO_LINK_DUPLEX_UNKNOWN;
+ link_info.speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ link_info.status = ODP_PKTIO_LINK_STATUS_UNKNOWN;
+ } else {
+ if (link.link_autoneg == RTE_ETH_LINK_AUTONEG)
+ link_info.autoneg = ODP_PKTIO_LINK_AUTONEG_ON;
+ else
+ link_info.autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+
+ if (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
+ link_info.duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ else
+ link_info.duplex = ODP_PKTIO_LINK_DUPLEX_HALF;
+
+ if (link.link_speed == RTE_ETH_SPEED_NUM_NONE)
+ link_info.speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ else
+ link_info.speed = link.link_speed;
+
+ if (link.link_status == RTE_ETH_LINK_UP)
+ link_info.status = ODP_PKTIO_LINK_STATUS_UP;
+ else
+ link_info.status = ODP_PKTIO_LINK_STATUS_DOWN;
+ }
+
+ link_info.media = "unknown";
+
+ *info = link_info;
+ return 0;
}
static void stats_convert(struct rte_eth_stats *rte_stats,
odp_pktio_stats_t *stats)
{
stats->in_octets = rte_stats->ibytes;
+ stats->in_packets = rte_stats->ipackets;
stats->in_ucast_pkts = 0;
- stats->in_discards = rte_stats->imissed;
+ stats->in_mcast_pkts = 0;
+ stats->in_bcast_pkts = 0;
+ stats->in_discards = rte_stats->imissed + rte_stats->rx_nombuf;
stats->in_errors = rte_stats->ierrors;
- stats->in_unknown_protos = 0;
stats->out_octets = rte_stats->obytes;
+ stats->out_packets = rte_stats->opackets;
stats->out_ucast_pkts = 0;
+ stats->out_mcast_pkts = 0;
+ stats->out_bcast_pkts = 0;
stats->out_discards = 0;
stats->out_errors = rte_stats->oerrors;
}
@@ -657,48 +1505,188 @@ static int stats_pkt_dpdk(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
int ret;
struct rte_eth_stats rte_stats;
- ret = rte_eth_stats_get(pktio_entry->s.pkt_dpdk.portid, &rte_stats);
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
if (ret == 0) {
stats_convert(&rte_stats, stats);
return 0;
- } else {
- if (ret > 0)
- return -ret;
- else
- return ret;
}
+
+ if (ret > 0)
+ return -ret;
+ else
+ return ret;
}
static int stats_reset_pkt_dpdk(pktio_entry_t *pktio_entry)
{
- rte_eth_stats_reset(pktio_entry->s.pkt_dpdk.portid);
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+
+ (void)rte_eth_stats_reset(port_id);
+ (void)rte_eth_xstats_reset(port_id);
+ return 0;
+}
+
+static int dpdk_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if (num_stats < 0) {
+ _ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (info == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat_name xstats_names[num_stats];
+
+ ret = rte_eth_xstats_get_names(port_id, xstats_names, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ _ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ strncpy(info[i].name, xstats_names[i].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+
+ return num_stats;
+}
+
+static int dpdk_extra_stats(pktio_entry_t *pktio_entry,
+ uint64_t stats[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get(port_id, NULL, 0);
+ if (num_stats < 0) {
+ _ODP_ERR("rte_eth_xstats_get() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (stats == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat xstats[num_stats];
+
+ ret = rte_eth_xstats_get(port_id, xstats, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ _ODP_ERR("rte_eth_xstats_get() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ stats[i] = xstats[i].value;
+
+ return num_stats;
+}
+
+static int dpdk_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ uint64_t xstat_id = id;
+ int ret;
+
+ ret = rte_eth_xstats_get_by_id(port_id, &xstat_id, stat, 1);
+ if (ret != 1) {
+ _ODP_ERR("rte_eth_xstats_get_by_id() failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dpdk_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ _ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ _ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+
+ pktin_stats->packets = rte_stats.q_ipackets[index];
+ pktin_stats->octets = rte_stats.q_ibytes[index];
+ pktin_stats->errors = rte_stats.q_errors[index];
+
+ return 0;
+}
+
+static int dpdk_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ _ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ _ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+
+ pktout_stats->packets = rte_stats.q_opackets[index];
+ pktout_stats->octets = rte_stats.q_obytes[index];
+
return 0;
}
-const pktio_if_ops_t dpdk_pktio_ops = {
+const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.name = "odp-dpdk",
.print = NULL,
- .init_global = NULL,
+ .init_global = dpdk_init_global,
.init_local = NULL,
- .term = NULL,
+ .term = dpdk_term_global,
.open = setup_pkt_dpdk,
.close = close_pkt_dpdk,
- .start = start_pkt_dpdk,
+ .start = dpdk_start,
.stop = stop_pkt_dpdk,
.stats = stats_pkt_dpdk,
.stats_reset = stats_reset_pkt_dpdk,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
- .mtu_get = mtu_get_pkt_dpdk,
+ .pktin_queue_stats = dpdk_pktin_stats,
+ .pktout_queue_stats = dpdk_pktout_stats,
+ .extra_stat_info = dpdk_extra_stat_info,
+ .extra_stats = dpdk_extra_stats,
+ .extra_stat_counter = dpdk_extra_stat_counter,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
+ .maxlen_get = dpdk_maxlen_get,
+ .maxlen_set = dpdk_maxlen_set,
.promisc_mode_set = promisc_mode_set_pkt_dpdk,
.promisc_mode_get = promisc_mode_get_pkt_dpdk,
.mac_get = mac_get_pkt_dpdk,
+ .mac_set = mac_set_pkt_dpdk,
.link_status = link_status_pkt_dpdk,
+ .link_info = dpdk_link_info,
.capability = capability_pkt_dpdk,
.config = NULL,
- .input_queues_config = input_queues_config_pkt_dpdk,
- .output_queues_config = output_queues_config_pkt_dpdk,
+ .input_queues_config = dpdk_input_queues_config,
+ .output_queues_config = dpdk_output_queues_config,
.recv = recv_pkt_dpdk,
.send = send_pkt_dpdk
};
diff --git a/platform/linux-dpdk/odp_packet_flags.c b/platform/linux-dpdk/odp_packet_flags.c
index 20517e164..345ac8488 100644
--- a/platform/linux-dpdk/odp_packet_flags.c
+++ b/platform/linux-dpdk/odp_packet_flags.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,310 +8,142 @@
#include <odp/api/packet_flags.h>
#include <odp_packet_internal.h>
-#define retflag(pkt, x, layer) do { \
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); \
- if (pkt_hdr->p.parsed_layers < layer) \
- packet_parse_layer(pkt_hdr, layer); \
- return pkt_hdr->p.x; \
+#define setflag(pkt, x, v) do { \
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt); \
+ pkt_hdr->p.x = (v) & 1; \
} while (0)
-#define setflag(pkt, x, v, layer) do { \
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); \
- if (pkt_hdr->p.parsed_layers < layer) \
- packet_parse_layer(pkt_hdr, layer); \
- pkt_hdr->p.x = v & 1; \
- } while (0)
-
-int odp_packet_has_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
- return odp_packet_hdr(pkt)->p.error_flags.all != 0;
-}
-
-/* Get Input Flags */
-
-int odp_packet_has_l2_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* L2 parsing is always done by default and hence
- no additional check is required */
- return pkt_hdr->p.error_flags.frame_len
- | pkt_hdr->p.error_flags.snap_len
- | pkt_hdr->p.error_flags.l2_chksum;
-}
-
-int odp_packet_has_l3(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.l3, LAYER_L3);
-}
-
-int odp_packet_has_l3_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
- return pkt_hdr->p.error_flags.ip_err;
-}
-
-int odp_packet_has_l4(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.l4, LAYER_L4);
-}
-
-int odp_packet_has_l4_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
-
- return pkt_hdr->p.error_flags.tcp_err | pkt_hdr->p.error_flags.udp_err;
-}
-
-int odp_packet_has_eth_bcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.eth_bcast, LAYER_L2);
-}
-
-int odp_packet_has_eth_mcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.eth_mcast, LAYER_L2);
-}
-
-int odp_packet_has_vlan(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.vlan, LAYER_L2);
-}
-
-int odp_packet_has_vlan_qinq(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.vlan_qinq, LAYER_L2);
-}
-
-int odp_packet_has_arp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.arp, LAYER_L3);
-}
-
-int odp_packet_has_ipv4(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipv4, LAYER_L3);
-}
-
-int odp_packet_has_ipv6(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipv6, LAYER_L3);
-}
-
-int odp_packet_has_ip_bcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ip_bcast, LAYER_L3);
-}
-
-int odp_packet_has_ip_mcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ip_mcast, LAYER_L3);
-}
-
-int odp_packet_has_ipfrag(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipfrag, LAYER_L3);
-}
-
-int odp_packet_has_ipopt(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipopt, LAYER_L3);
-}
-
-int odp_packet_has_ipsec(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipsec, LAYER_L4);
-}
-
-int odp_packet_has_udp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.udp, LAYER_L4);
-}
-
-int odp_packet_has_tcp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.tcp, LAYER_L4);
-}
-
-int odp_packet_has_sctp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.sctp, LAYER_L4);
-}
-
-int odp_packet_has_icmp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.icmp, LAYER_L4);
-}
-
-odp_packet_color_t odp_packet_color(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.color, LAYER_ALL);
-}
-
void odp_packet_color_set(odp_packet_t pkt, odp_packet_color_t color)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
pkt_hdr->p.input_flags.color = color;
}
-odp_bool_t odp_packet_drop_eligible(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
- return !pkt_hdr->p.input_flags.nodrop;
-}
-
void odp_packet_drop_eligible_set(odp_packet_t pkt, odp_bool_t drop)
{
- setflag(pkt, input_flags.nodrop, !drop, LAYER_ALL);
-}
-
-int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
-{
- retflag(pkt, output_flags.shaper_len_adj, LAYER_ALL);
+ setflag(pkt, input_flags.nodrop, !drop);
}
void odp_packet_shaper_len_adjust_set(odp_packet_t pkt, int8_t adj)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
- pkt_hdr->p.output_flags.shaper_len_adj = adj;
+ pkt_hdr->p.flags.shaper_len_adj = adj;
}
/* Set Input Flags */
void odp_packet_has_l2_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l2, val, LAYER_L2);
+ setflag(pkt, input_flags.l2, val);
}
void odp_packet_has_l3_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l3, val, LAYER_L3);
+ setflag(pkt, input_flags.l3, val);
}
void odp_packet_has_l4_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l4, val, LAYER_L4);
+ setflag(pkt, input_flags.l4, val);
}
void odp_packet_has_eth_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth, val, LAYER_L2);
+ setflag(pkt, input_flags.eth, val);
}
void odp_packet_has_eth_bcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth_bcast, val, LAYER_L2);
+ setflag(pkt, input_flags.eth_bcast, val);
}
void odp_packet_has_eth_mcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth_mcast, val, LAYER_L2);
+ setflag(pkt, input_flags.eth_mcast, val);
}
void odp_packet_has_jumbo_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.jumbo, val, LAYER_L2);
+ setflag(pkt, input_flags.jumbo, val);
}
void odp_packet_has_vlan_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.vlan, val, LAYER_L2);
+ setflag(pkt, input_flags.vlan, val);
+ setflag(pkt, input_flags.vlan_qinq, 0);
}
void odp_packet_has_vlan_qinq_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.vlan_qinq, val, LAYER_L2);
+ setflag(pkt, input_flags.vlan, val);
+ setflag(pkt, input_flags.vlan_qinq, val);
}
void odp_packet_has_arp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.arp, val, LAYER_L3);
+ setflag(pkt, input_flags.arp, val);
}
void odp_packet_has_ipv4_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipv4, val, LAYER_L3);
+ setflag(pkt, input_flags.ipv4, val);
}
void odp_packet_has_ipv6_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipv6, val, LAYER_L3);
+ setflag(pkt, input_flags.ipv6, val);
}
void odp_packet_has_ip_bcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ip_bcast, val, LAYER_L3);
+ setflag(pkt, input_flags.ip_bcast, val);
}
void odp_packet_has_ip_mcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ip_mcast, val, LAYER_L3);
+ setflag(pkt, input_flags.ip_mcast, val);
}
void odp_packet_has_ipfrag_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipfrag, val, LAYER_L3);
+ setflag(pkt, input_flags.ipfrag, val);
}
void odp_packet_has_ipopt_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipopt, val, LAYER_L3);
+ setflag(pkt, input_flags.ipopt, val);
}
void odp_packet_has_ipsec_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipsec, val, LAYER_L4);
+ setflag(pkt, input_flags.ipsec, val);
}
void odp_packet_has_udp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.udp, val, LAYER_L4);
+ setflag(pkt, input_flags.udp, val);
}
void odp_packet_has_tcp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.tcp, val, LAYER_L4);
+ setflag(pkt, input_flags.tcp, val);
}
void odp_packet_has_sctp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.sctp, val, LAYER_L4);
+ setflag(pkt, input_flags.sctp, val);
}
void odp_packet_has_icmp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.icmp, val, LAYER_L4);
+ setflag(pkt, input_flags.icmp, val);
}
void odp_packet_has_ts_clr(odp_packet_t pkt)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
pkt_hdr->p.input_flags.timestamp = 0;
}
-
-/* Include non-inlined versions of API functions */
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/packet_flag_inlines_api.h>
-#endif
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index e6d415954..dfd14a978 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -1,32 +1,46 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_types.h>
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
#include <odp/api/pool.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp_packet_internal.h>
-#include <odp_timer_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/shared_memory.h>
-#include <odp/api/align.h>
-#include <odp_internal.h>
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/pool_inline_types.h>
+
+#include <odp_buffer_internal.h>
#include <odp_config_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/debug.h>
#include <odp_debug_internal.h>
-#include <odp/api/cpumask.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_event_vector_internal.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_timer_internal.h>
-#include <string.h>
-#include <stdlib.h>
-#include <math.h>
-#include <inttypes.h>
+#include <rte_config.h>
+#include <rte_errno.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_mbuf_pool_ops.h>
+/* ppc64 rte_memcpy.h (included through rte_mempool.h) may define vector */
+#if defined(__PPC64__) && defined(vector)
+ #undef vector
+#endif
-/* for DPDK */
-#include <odp_packet_dpdk.h>
+#include <inttypes.h>
+#include <math.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
#ifdef POOL_USE_TICKETLOCK
#include <odp/api/ticketlock.h>
@@ -40,604 +54,953 @@
#define LOCK_INIT(a) odp_spinlock_init(a)
#endif
+/* Pool name format */
+#define POOL_NAME_FORMAT "%" PRIu64 "-%d-%s"
+
/* Define a practical limit for contiguous memory allocations */
-#define MAX_SIZE (10 * 1024 * 1024)
+#define MAX_SIZE (CONFIG_PACKET_SEG_SIZE - CONFIG_BUFFER_ALIGN_MIN)
-typedef struct pool_table_t {
- pool_entry_t pool[ODP_CONFIG_POOLS];
- odp_shm_t shm;
-} pool_table_t;
+/* Maximum packet user area size */
+#define MAX_UAREA_SIZE 2048
+#define ROUNDUP_DIV(a, b) (((a) + ((b) - 1)) / (b))
+
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_POOLS < CONFIG_POOLS,
+ "Internal pool count needs to be less than total configured pool count");
/* The pool table ptr - resides in shared memory */
-static pool_table_t *pool_tbl;
+pool_global_t *_odp_pool_glb;
+
+#include <odp/visibility_begin.h>
+
+/* Fill in pool header field offsets for inline functions */
+const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
+ .index = offsetof(pool_t, pool_idx),
+ .seg_len = offsetof(pool_t, seg_len),
+ .uarea_size = offsetof(pool_t, params.pkt.uarea_size),
+ .ext_head_offset = offsetof(pool_t, ext_head_offset)
+};
+
+#include <odp/visibility_end.h>
+
+struct mem_cb_arg_t {
+ uint8_t *addr;
+ uintptr_t min_data_addr;
+ uintptr_t max_data_addr;
+ odp_bool_t match;
+};
+
+struct priv_data_t {
+ pool_t *pool;
+ odp_pool_type_t type;
+ int event_type;
+};
+
+static void ptr_from_mempool(struct rte_mempool *mp ODP_UNUSED, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx ODP_UNUSED)
+{
+ struct mem_cb_arg_t *args = (struct mem_cb_arg_t *)opaque;
+ uint8_t *min_addr = (uint8_t *)memhdr->addr;
+ uint8_t *max_addr = min_addr + memhdr->len;
+
+ /* Match found already */
+ if (args->match)
+ return;
+
+ if (args->addr >= min_addr && args->addr < max_addr)
+ args->match = true;
+}
+
+static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
+{
+ int i;
+
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
+ struct mem_cb_arg_t args;
-/* Pool entry pointers (for inlining) */
-void *pool_entry_ptr[ODP_CONFIG_POOLS];
+ if (pool->rte_mempool == NULL)
+ continue;
+ args.addr = (uint8_t *)event_hdr;
+ args.match = false;
+ rte_mempool_mem_iter(pool->rte_mempool, ptr_from_mempool, &args);
+
+ if (args.match)
+ return pool;
+ }
-int odp_pool_init_global(void)
+ return NULL;
+}
+
+static int read_config_file(pool_global_t *pool_gbl)
+{
+ const char *str;
+ int val = 0;
+
+ _ODP_PRINT("Pool config:\n");
+
+ str = "pool.pkt.max_num";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val < 0 || val > CONFIG_POOL_MAX_NUM) {
+ _ODP_ERR("Bad value %s = %u\n", str, val);
+ return -1;
+ }
+
+ pool_gbl->config.pkt_max_num = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ _ODP_PRINT("\n");
+
+ return 0;
+}
+
+int _odp_pool_init_global(void)
{
uint32_t i;
odp_shm_t shm;
- shm = odp_shm_reserve("odp_pools",
- sizeof(pool_table_t),
- sizeof(pool_entry_t), 0);
+ shm = odp_shm_reserve("_odp_pool_global", sizeof(pool_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
- pool_tbl = odp_shm_addr(shm);
+ _odp_pool_glb = odp_shm_addr(shm);
- if (pool_tbl == NULL)
+ if (_odp_pool_glb == NULL)
return -1;
- memset(pool_tbl, 0, sizeof(pool_table_t));
- pool_tbl->shm = shm;
+ memset(_odp_pool_glb, 0, sizeof(pool_global_t));
+ _odp_pool_glb->shm = shm;
+
+ if (read_config_file(_odp_pool_glb)) {
+ odp_shm_free(shm);
+ _odp_pool_glb = NULL;
+ return -1;
+ }
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- /* init locks */
- pool_entry_t *pool = &pool_tbl->pool[i];
- LOCK_INIT(&pool->s.lock);
- pool->s.pool_hdl = pool_index_to_handle(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
- pool_entry_ptr[i] = pool;
+ LOCK_INIT(&pool->lock);
+ pool->pool_idx = i;
}
- ODP_DBG("\nPool init global\n");
- ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
- ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
- ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
- ODP_DBG("\n");
+ _ODP_DBG("\nPool init global\n");
+ _ODP_DBG(" event_hdr_t size: %zu\n", sizeof(_odp_event_hdr_t));
+ _ODP_DBG(" odp_buffer_hdr_t size: %zu\n", sizeof(odp_buffer_hdr_t));
+ _ODP_DBG(" odp_packet_hdr_t size: %zu\n", sizeof(odp_packet_hdr_t));
+ _ODP_DBG(" odp_timeout_hdr_t size: %zu\n", sizeof(odp_timeout_hdr_t));
+ _ODP_DBG(" odp_event_vector_hdr_t size: %zu\n", sizeof(odp_event_vector_hdr_t));
+
+ _ODP_DBG("\n");
return 0;
}
-int odp_pool_init_local(void)
+int _odp_pool_init_local(void)
{
return 0;
}
-int odp_pool_term_global(void)
+int _odp_pool_term_global(void)
{
int ret;
- ret = odp_shm_free(pool_tbl->shm);
+ if (_odp_pool_glb == NULL)
+ return 0;
+
+ ret = odp_shm_free(_odp_pool_glb->shm);
if (ret < 0)
- ODP_ERR("shm free failed");
+ _ODP_ERR("SHM free failed\n");
return ret;
}
-int odp_pool_term_local(void)
+int _odp_pool_term_local(void)
{
return 0;
}
+int _odp_event_is_valid(odp_event_t event)
+{
+ pool_t *pool;
+ _odp_event_hdr_t *event_hdr = _odp_event_hdr(event);
+
+ if (event == ODP_EVENT_INVALID)
+ return 0;
+
+ /* Check that buffer header is from a known pool */
+ pool = find_pool(event_hdr);
+ if (pool == NULL)
+ return 0;
+
+ if (pool != _odp_pool_entry(event_hdr->hdr.pool))
+ return 0;
+
+ if (event_hdr->hdr.index >= pool->rte_mempool->size)
+ return 0;
+
+ return 1;
+}
+
int odp_pool_capability(odp_pool_capability_t *capa)
{
+ odp_pool_stats_opt_t supported_stats;
+ /* Reserve pools for internal usage */
+ unsigned int max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
+
memset(capa, 0, sizeof(odp_pool_capability_t));
- capa->max_pools = ODP_CONFIG_POOLS;
+ capa->max_pools = max_pools;
+
+ supported_stats.all = 0;
+ supported_stats.bit.available = 1;
/* Buffer pools */
- capa->buf.max_pools = ODP_CONFIG_POOLS;
- capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
+ capa->buf.max_pools = max_pools;
+ capa->buf.max_align = CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.max_uarea_size = MAX_UAREA_SIZE;
+ capa->buf.uarea_persistence = true;
+ capa->buf.min_cache_size = 0;
+ capa->buf.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->buf.stats.all = supported_stats.all;
/* Packet pools */
- capa->pkt.max_pools = ODP_CONFIG_POOLS;
- capa->pkt.max_len = 0;
- capa->pkt.max_num = CONFIG_POOL_MAX_NUM;
- capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_align = CONFIG_BUFFER_ALIGN_MIN;
+ capa->pkt.max_pools = max_pools;
+ capa->pkt.max_len = CONFIG_PACKET_MAX_SEG_LEN;
+ capa->pkt.max_num = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.min_headroom = RTE_PKTMBUF_HEADROOM;
+ capa->pkt.max_headroom = RTE_PKTMBUF_HEADROOM;
capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM;
- capa->pkt.max_segs_per_pkt = CONFIG_PACKET_MAX_SEGS;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
capa->pkt.min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
- capa->pkt.max_seg_len = CONFIG_PACKET_SEG_LEN_MAX;
- capa->pkt.max_uarea_size = MAX_SIZE;
+ capa->pkt.max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+ capa->pkt.max_uarea_size = MAX_UAREA_SIZE;
+ capa->pkt.uarea_persistence = true;
+ capa->pkt.min_cache_size = 0;
+ capa->pkt.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->pkt.stats.all = supported_stats.all;
/* Timeout pools */
- capa->tmo.max_pools = ODP_CONFIG_POOLS;
+ capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+ capa->tmo.max_uarea_size = MAX_UAREA_SIZE;
+ capa->tmo.uarea_persistence = true;
+ capa->tmo.min_cache_size = 0;
+ capa->tmo.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->tmo.stats.all = supported_stats.all;
+
+ /* Vector pools */
+ capa->vector.max_pools = max_pools;
+ capa->vector.max_num = CONFIG_POOL_MAX_NUM;
+ capa->vector.max_uarea_size = MAX_UAREA_SIZE;
+ capa->vector.uarea_persistence = true;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.min_cache_size = 0;
+ capa->vector.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->vector.stats.all = supported_stats.all;
return 0;
}
struct mbuf_ctor_arg {
+ pool_t *pool;
uint16_t seg_buf_offset; /* To skip the ODP buf/pkt/tmo header */
uint16_t seg_buf_size; /* size of user data */
- int type;
- int pkt_uarea_size; /* size of user area in bytes */
-};
-
-struct mbuf_pool_ctor_arg {
- /* This has to be the first member */
- struct rte_pktmbuf_pool_private pkt;
- odp_pool_t pool_hdl;
+ odp_pool_type_t type; /* ODP pool type */
+ int event_type; /* ODP event type */
};
-static void
-odp_dpdk_mbuf_pool_ctor(struct rte_mempool *mp,
- void *opaque_arg)
-{
- struct mbuf_pool_ctor_arg *mbp_priv;
-
- if (mp->private_data_size < sizeof(struct mbuf_pool_ctor_arg)) {
- ODP_ERR("(%s) private_data_size %d < %d",
- mp->name, (int) mp->private_data_size,
- (int) sizeof(struct mbuf_pool_ctor_arg));
- return;
- }
- mbp_priv = rte_mempool_get_priv(mp);
- *mbp_priv = *((struct mbuf_pool_ctor_arg *)opaque_arg);
-}
-
-/* ODP DPDK mbuf constructor.
- * This is a combination of rte_pktmbuf_init in rte_mbuf.c
- * and testpmd_mbuf_ctor in testpmd.c
- */
-static void
-odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
- void *opaque_arg,
- void *raw_mbuf,
- unsigned i)
-{
- struct mbuf_ctor_arg *mb_ctor_arg;
- struct rte_mbuf *mb = raw_mbuf;
- struct odp_buffer_hdr_t *buf_hdr;
- struct mbuf_pool_ctor_arg *mbp_ctor_arg = rte_mempool_get_priv(mp);
-
- /* The rte_mbuf is at the begninning in all cases */
- mb_ctor_arg = (struct mbuf_ctor_arg *)opaque_arg;
- mb = (struct rte_mbuf *)raw_mbuf;
-
- RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
-
- memset(mb, 0, mp->elt_size);
-
- /* Start of buffer is just after the ODP type specific header
- * which contains in the very beginning the rte_mbuf struct */
- mb->buf_addr = (char *)mb + mb_ctor_arg->seg_buf_offset;
- mb->buf_physaddr = rte_mempool_virt2phy(mp, mb) +
- mb_ctor_arg->seg_buf_offset;
- mb->buf_len = mb_ctor_arg->seg_buf_size;
- mb->priv_size = rte_pktmbuf_priv_size(mp);
-
- /* keep some headroom between start of buffer and data */
- if (mb_ctor_arg->type == ODP_POOL_PACKET) {
- odp_packet_hdr_t *pkt_hdr;
- mb->data_off = RTE_PKTMBUF_HEADROOM;
- mb->nb_segs = 1;
- mb->port = 0xff;
- mb->vlan_tci = 0;
- pkt_hdr = (odp_packet_hdr_t *)raw_mbuf;
- pkt_hdr->uarea_size = mb_ctor_arg->pkt_uarea_size;
- } else {
- mb->data_off = 0;
- }
-
- /* init some constant fields */
- mb->pool = mp;
- mb->ol_flags = 0;
-
- /* Save index, might be useful for debugging purposes */
- buf_hdr = (struct odp_buffer_hdr_t *)raw_mbuf;
- buf_hdr->index = i;
- buf_hdr->handle.handle = (odp_buffer_t)buf_hdr;
- buf_hdr->pool_hdl = mbp_ctor_arg->pool_hdl;
- buf_hdr->type = mb_ctor_arg->type;
- buf_hdr->event_type = mb_ctor_arg->type;
-}
-
-#define CHECK_U16_OVERFLOW(X) do { \
- if (odp_unlikely(X > UINT16_MAX)) { \
- ODP_ERR("Invalid size: %d", X); \
- UNLOCK(&pool->s.lock); \
- return ODP_POOL_INVALID; \
- } \
-} while (0)
-
-static int check_params(odp_pool_param_t *params)
+static int check_params(const odp_pool_param_t *params)
{
odp_pool_capability_t capa;
- odp_pool_capability(&capa);
+ if (!params || odp_pool_capability(&capa) < 0)
+ return -1;
switch (params->type) {
case ODP_POOL_BUFFER:
+ if (params->buf.num == 0) {
+ _ODP_ERR("buf.num zero\n");
+ return -1;
+ }
+
if (params->buf.num > capa.buf.max_num) {
- printf("buf.num too large %u\n", params->buf.num);
+ _ODP_ERR("buf.num too large %u\n", params->buf.num);
return -1;
}
if (params->buf.size > capa.buf.max_size) {
- printf("buf.size too large %u\n", params->buf.size);
+ _ODP_ERR("buf.size too large %u\n", params->buf.size);
return -1;
}
if (params->buf.align > capa.buf.max_align) {
- printf("buf.align too large %u\n", params->buf.align);
+ _ODP_ERR("buf.align too large %u\n", params->buf.align);
+ return -1;
+ }
+
+ if (!_ODP_CHECK_IS_POWER2(params->buf.align)) {
+ _ODP_ERR("buf.align not power of two %u\n", params->buf.align);
+ return -1;
+ }
+
+ if (params->buf.cache_size > capa.buf.max_cache_size) {
+ _ODP_ERR("buf.cache_size too large %u\n", params->buf.cache_size);
+ return -1;
+ }
+
+ if (params->buf.uarea_size > capa.buf.max_uarea_size) {
+ _ODP_ERR("buf.uarea_size too large %u\n", params->buf.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.buf.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
return -1;
}
break;
case ODP_POOL_PACKET:
+ if (params->pkt.align > capa.pkt.max_align) {
+ _ODP_ERR("pkt.align too large %u\n", params->pkt.align);
+ return -1;
+ }
+
+ if (!_ODP_CHECK_IS_POWER2(params->pkt.align)) {
+ _ODP_ERR("pkt.align not power of two %u\n", params->pkt.align);
+ return -1;
+ }
+
+ if (params->pkt.num == 0) {
+ _ODP_ERR("pkt.num zero\n");
+ return -1;
+ }
+
if (params->pkt.num > capa.pkt.max_num) {
- printf("pkt.num too large %u\n", params->pkt.num);
+ _ODP_ERR("pkt.num too large %u\n", params->pkt.num);
+ return -1;
+ }
+ if (params->pkt.max_num > capa.pkt.max_num) {
+ _ODP_ERR("pkt.max_num too large %u\n", params->pkt.max_num);
return -1;
}
if (params->pkt.seg_len > capa.pkt.max_seg_len) {
- printf("pkt.seg_len too large %u\n",
- params->pkt.seg_len);
+ _ODP_ERR("pkt.seg_len too large %u\n", params->pkt.seg_len);
return -1;
}
if (params->pkt.uarea_size > capa.pkt.max_uarea_size) {
- printf("pkt.uarea_size too large %u\n",
- params->pkt.uarea_size);
+ _ODP_ERR("pkt.uarea_size too large %u\n", params->pkt.uarea_size);
+ return -1;
+ }
+
+ if (params->pkt.headroom > capa.pkt.max_headroom) {
+ _ODP_ERR("pkt.headroom too large %u\n", params->pkt.headroom);
+ return -1;
+ }
+
+ if (params->pkt.cache_size > capa.pkt.max_cache_size) {
+ _ODP_ERR("pkt.cache_size too large %u\n", params->pkt.cache_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.pkt.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
return -1;
}
break;
case ODP_POOL_TIMEOUT:
+ if (params->tmo.num == 0) {
+ _ODP_ERR("tmo.num zero\n");
+ return -1;
+ }
+
if (params->tmo.num > capa.tmo.max_num) {
- printf("tmo.num too large %u\n", params->tmo.num);
+ _ODP_ERR("tmo.num too large %u\n", params->tmo.num);
+ return -1;
+ }
+
+ if (params->tmo.cache_size > capa.tmo.max_cache_size) {
+ _ODP_ERR("tmo.cache_size too large %u\n", params->tmo.cache_size);
+ return -1;
+ }
+
+ if (params->tmo.uarea_size > capa.tmo.max_uarea_size) {
+ _ODP_ERR("tmo.uarea_size too large %u\n", params->tmo.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.tmo.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
+ break;
+
+ case ODP_POOL_VECTOR:
+ if (params->vector.num == 0) {
+ _ODP_ERR("vector.num zero\n");
+ return -1;
+ }
+
+ if (params->vector.num > capa.vector.max_num) {
+ _ODP_ERR("vector.num too large %u\n", params->vector.num);
+ return -1;
+ }
+
+ if (params->vector.max_size == 0) {
+ _ODP_ERR("vector.max_size zero\n");
+ return -1;
+ }
+
+ if (params->vector.max_size > capa.vector.max_size) {
+ _ODP_ERR("vector.max_size too large %u\n", params->vector.max_size);
return -1;
}
+
+ if (params->vector.cache_size > capa.vector.max_cache_size) {
+ _ODP_ERR("vector.cache_size too large %u\n", params->vector.cache_size);
+ return -1;
+ }
+
+ if (params->vector.uarea_size > capa.vector.max_uarea_size) {
+ _ODP_ERR("vector.uarea_size too large %u\n", params->vector.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.vector.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
default:
- printf("bad pool type %i\n", params->type);
+ _ODP_ERR("bad pool type %i\n", params->type);
return -1;
}
return 0;
}
-odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
+static unsigned int calc_cache_size(uint32_t pool_size, uint32_t max_num)
{
- struct mbuf_pool_ctor_arg mbp_ctor_arg;
- struct mbuf_ctor_arg mb_ctor_arg;
- odp_pool_t pool_hdl = ODP_POOL_INVALID;
- unsigned mb_size, i, cache_size;
- size_t hdr_size;
- pool_entry_t *pool;
- uint32_t buf_align, blk_size, headroom, tailroom, min_seg_len;
- uint32_t max_len, min_align;
- char pool_name[ODP_POOL_NAME_LEN];
- char *rte_name = NULL;
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- unsigned j;
-#endif
+ unsigned int cache_size;
+ unsigned int max_supported = pool_size / 1.5;
+ int num_threads = odp_global_ro.init_param.num_control +
+ odp_global_ro.init_param.num_worker;
- if (check_params(params))
- return ODP_POOL_INVALID;
+ if (max_num == 0)
+ return 0;
- if (name == NULL) {
- pool_name[0] = 0;
- } else {
- strncpy(pool_name, name, ODP_POOL_NAME_LEN - 1);
- pool_name[ODP_POOL_NAME_LEN - 1] = 0;
+ cache_size = RTE_MIN(max_num, max_supported);
+
+ while (cache_size) {
+ if ((pool_size % cache_size) == 0)
+ break;
+ cache_size--;
}
- /* Find an unused buffer pool slot and initalize it as requested */
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- uint32_t num;
- struct rte_mempool *mp;
+ if (odp_unlikely(cache_size == 0)) {
+ cache_size = RTE_MIN(max_num, max_supported);
+ _ODP_DBG("Using nonoptimal cache size: %d\n", cache_size);
+ }
+
+ /* Cache size of one exposes DPDK implementation bug */
+ if (cache_size == 1)
+ cache_size = 0;
- pool = get_pool_entry(i);
+ _ODP_DBG("Cache_size: %d\n", cache_size);
+
+ if (num_threads && cache_size) {
+ unsigned int total_cache_size = num_threads * cache_size;
+
+ if (total_cache_size >= pool_size)
+ _ODP_DBG("Entire pool fits into thread local caches. "
+ "Pool starvation may occur if the pool is used "
+ "by multiple threads.\n");
+ }
+
+ return cache_size;
+}
+
+static void format_pool_name(const char *name, char *rte_name)
+{
+ int i = 0;
+
+ /* Use pid and counter to make name unique */
+ do {
+ snprintf(rte_name, RTE_MEMPOOL_NAMESIZE, POOL_NAME_FORMAT,
+ (odp_instance_t)odp_global_ro.main_pid, i++, name);
+ rte_name[RTE_MEMPOOL_NAMESIZE - 1] = 0;
+ } while (rte_mempool_lookup(rte_name) != NULL);
+}
- LOCK(&pool->s.lock);
- if (pool->s.rte_mempool != NULL) {
- UNLOCK(&pool->s.lock);
+static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt)
+{
+ odp_shm_t shm;
+ char uarea_name[ODP_SHM_NAME_LEN];
+
+ pool->uarea_shm = ODP_SHM_INVALID;
+
+ if (uarea_size == 0) {
+ pool->param_uarea_size = 0;
+ pool->uarea_size = 0;
+ pool->uarea_shm_size = 0;
+ return 0;
+ }
+
+ snprintf(uarea_name, ODP_SHM_NAME_LEN, "_odp_pool_%03i_uarea_%s",
+ pool->pool_idx, pool->name);
+ uarea_name[ODP_SHM_NAME_LEN - 1] = 0;
+
+ pool->param_uarea_size = uarea_size;
+ pool->uarea_size = _ODP_ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
+
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ pool->uarea_shm = shm;
+ pool->uarea_base_addr = odp_shm_addr(shm);
+ return 0;
+}
+
+/* If unused pool found, return it locked */
+static pool_t *get_unused_pool(void)
+{
+ pool_t *pool;
+
+ for (int i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
+ LOCK(&pool->lock);
+
+ if (pool->rte_mempool != NULL) {
+ UNLOCK(&pool->lock);
continue;
}
- switch (params->type) {
- case ODP_POOL_BUFFER:
- buf_align = params->buf.align;
- blk_size = params->buf.size;
-
- /* Validate requested buffer alignment */
- if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
- buf_align !=
- ROUNDDOWN_POWER2(buf_align, buf_align)) {
- UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- }
-
- /* Set correct alignment based on input request */
- if (buf_align == 0)
- buf_align = ODP_CACHE_LINE_SIZE;
- else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
- buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
-
- if (params->buf.align != 0)
- blk_size = ROUNDUP_ALIGN(blk_size,
- buf_align);
-
- hdr_size = sizeof(odp_buffer_hdr_t);
- CHECK_U16_OVERFLOW(blk_size);
- mbp_ctor_arg.pkt.mbuf_data_room_size = blk_size;
- num = params->buf.num;
- ODP_DBG("type: buffer name: %s num: "
- "%u size: %u align: %u\n", pool_name, num,
- params->buf.size, params->buf.align);
- break;
- case ODP_POOL_PACKET:
- headroom = CONFIG_PACKET_HEADROOM;
- tailroom = CONFIG_PACKET_TAILROOM;
- min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
- min_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
-
- blk_size = min_seg_len;
- if (params->pkt.seg_len > blk_size)
- blk_size = params->pkt.seg_len;
- if (params->pkt.len > blk_size)
- blk_size = params->pkt.len;
- /* Make sure at least one max len packet fits in the
- * pool.
- */
- max_len = 0;
- if (params->pkt.max_len != 0)
- max_len = params->pkt.max_len;
- if ((max_len + blk_size) / blk_size > params->pkt.num)
- blk_size = (max_len + params->pkt.num) /
- params->pkt.num;
- blk_size = ROUNDUP_ALIGN(headroom + blk_size +
- tailroom, min_align);
- /* Segment size minus headroom might be rounded down by
- * the driver to the nearest multiple of 1024. Round it
- * up here to make sure the requested size still going
- * to fit there without segmentation.
- */
- blk_size = ROUNDUP_ALIGN(blk_size - headroom,
- min_seg_len) + headroom;
-
- hdr_size = sizeof(odp_packet_hdr_t) +
- params->pkt.uarea_size;
- mb_ctor_arg.pkt_uarea_size = params->pkt.uarea_size;
- CHECK_U16_OVERFLOW(blk_size);
- mbp_ctor_arg.pkt.mbuf_data_room_size = blk_size;
- num = params->pkt.num;
-
- ODP_DBG("type: packet, name: %s, "
- "num: %u, len: %u, blk_size: %u, "
- "uarea_size %d, hdr_size %d\n",
- pool_name, num, params->pkt.len, blk_size,
- params->pkt.uarea_size, hdr_size);
- break;
- case ODP_POOL_TIMEOUT:
- hdr_size = sizeof(odp_timeout_hdr_t);
- mbp_ctor_arg.pkt.mbuf_data_room_size = 0;
- num = params->tmo.num;
- ODP_DBG("type: tmo name: %s num: %u\n",
- pool_name, num);
- break;
- default:
- ODP_ERR("Bad type %i\n",
- params->type);
- UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- break;
- }
+ return pool;
+ }
- mb_ctor_arg.seg_buf_offset =
- (uint16_t)ROUNDUP_CACHE_LINE(hdr_size);
- mb_ctor_arg.seg_buf_size = mbp_ctor_arg.pkt.mbuf_data_room_size;
- mb_ctor_arg.type = params->type;
- mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
- mbp_ctor_arg.pool_hdl = pool->s.pool_hdl;
- mbp_ctor_arg.pkt.mbuf_priv_size = mb_ctor_arg.seg_buf_offset -
- sizeof(struct rte_mbuf);
+ return NULL;
+}
- ODP_DBG("Metadata size: %u, mb_size %d\n",
- mb_ctor_arg.seg_buf_offset, mb_size);
- cache_size = 0;
-#if RTE_MEMPOOL_CACHE_MAX_SIZE > 0
- j = ceil((double)num / RTE_MEMPOOL_CACHE_MAX_SIZE);
- j = RTE_MAX(j, 2UL);
- for (; j <= (num / 2); ++j)
- if ((num % j) == 0) {
- cache_size = num / j;
- break;
- }
- if (odp_unlikely(cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
- (uint32_t) cache_size * 1.5 > num)) {
- ODP_ERR("cache_size calc failure: %d\n", cache_size);
- cache_size = 0;
- }
-#endif
- ODP_DBG("cache_size %d\n", cache_size);
-
- if (strlen(pool_name) > RTE_MEMPOOL_NAMESIZE - 1) {
- ODP_ERR("Max pool name size: %u. Trimming %u long, name collision might happen!\n",
- RTE_MEMPOOL_NAMESIZE - 1, strlen(pool_name));
- rte_name = malloc(RTE_MEMPOOL_NAMESIZE);
- snprintf(rte_name, RTE_MEMPOOL_NAMESIZE - 1, "%s",
- pool_name);
- }
-
- pool->s.rte_mempool =
- rte_mempool_create(rte_name ? rte_name : pool_name,
- num,
- mb_size,
- cache_size,
- sizeof(struct mbuf_pool_ctor_arg),
- odp_dpdk_mbuf_pool_ctor,
- &mbp_ctor_arg,
- odp_dpdk_mbuf_ctor,
- &mb_ctor_arg,
- rte_socket_id(),
- 0);
- free(rte_name);
- if (pool->s.rte_mempool == NULL) {
- ODP_ERR("Cannot init DPDK mbuf pool: %s\n",
- rte_strerror(rte_errno));
- UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- }
- /* found free pool */
- if (name == NULL) {
- pool->s.name[0] = 0;
- } else {
- strncpy(pool->s.name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
- }
-
- pool->s.params = *params;
- mp = pool->s.rte_mempool;
- ODP_DBG("Header/element/trailer size: %u/%u/%u, "
- "total pool size: %lu\n",
- mp->header_size, mp->elt_size, mp->trailer_size,
- (unsigned long)((mp->header_size + mp->elt_size +
- mp->trailer_size) * num));
- UNLOCK(&pool->s.lock);
- pool_hdl = pool->s.pool_hdl;
+static inline uint16_t get_mbuf_priv_size(uint16_t len)
+{
+ const uint16_t priv_size = len - sizeof(struct rte_mbuf);
+
+ return _ODP_ROUNDUP_ALIGN(priv_size, RTE_MBUF_PRIV_ALIGN);
+}
+
+static void init_obj_priv_data(struct rte_mempool *mp ODP_UNUSED, void *arg, void *mbuf,
+ unsigned int i)
+{
+ struct priv_data_t *priv_data = arg;
+ struct rte_mbuf *mb = mbuf;
+ _odp_event_hdr_t *event_hdr = (_odp_event_hdr_t *)mbuf;
+ pool_t *pool = priv_data->pool;
+ void *uarea = pool->uarea_base_addr + i * pool->uarea_size;
+ void **obj_uarea;
+
+ if (priv_data->type != ODP_POOL_PACKET)
+ /* No need for headroom in non-packet objects */
+ mb->data_off = 0;
+
+ event_hdr->hdr.index = i;
+ event_hdr->hdr.pool = _odp_pool_handle(pool);
+ event_hdr->hdr.type = priv_data->type;
+ event_hdr->hdr.event_type = priv_data->event_type;
+ event_hdr->hdr.subtype = ODP_EVENT_NO_SUBTYPE;
+
+ switch (priv_data->type) {
+ case ODP_POOL_BUFFER:
+ obj_uarea = &((odp_buffer_hdr_t *)mbuf)->uarea_addr;
break;
+ case ODP_POOL_PACKET:
+ obj_uarea = &((odp_packet_hdr_t *)mbuf)->uarea_addr;
+ break;
+ case ODP_POOL_TIMEOUT:
+ obj_uarea = &((odp_timeout_hdr_t *)mbuf)->uarea_addr;
+ break;
+ case ODP_POOL_VECTOR:
+ obj_uarea = &((odp_event_vector_hdr_t *)mbuf)->uarea_addr;
+ break;
+ default:
+ _ODP_ABORT("Invalid pool type: %i\n", priv_data->type);
}
- return pool_hdl;
+ *obj_uarea = uarea;
+
+ if (uarea && pool->params.uarea_init.init_fn)
+ pool->params.uarea_init.init_fn(uarea, pool->param_uarea_size,
+ pool->params.uarea_init.args, i);
+
+ if (priv_data->type == ODP_POOL_BUFFER || priv_data->type == ODP_POOL_PACKET) {
+ mb->buf_len -= _ODP_EV_ENDMARK_SIZE;
+ _odp_event_endmark_set(_odp_event_from_mbuf(mb));
+ }
}
-odp_pool_t odp_pool_lookup(const char *name)
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2)
{
- struct rte_mempool *mp = NULL;
+ pool_t *pool = get_unused_pool();
+ uint32_t num, cache_size, priv_size, align = 0, data_size, seg_size = 0, uarea_size = 0;
+ const uint32_t hroom = RTE_PKTMBUF_HEADROOM, trailer = _ODP_EV_ENDMARK_SIZE;
+ struct priv_data_t priv_data;
+ struct rte_mempool *mp;
+ char rte_name[RTE_MEMPOOL_NAMESIZE];
odp_pool_t pool_hdl = ODP_POOL_INVALID;
- int i;
- mp = rte_mempool_lookup(name);
- if (mp == NULL)
+ if (pool == NULL)
return ODP_POOL_INVALID;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool_entry_t *pool = get_pool_entry(i);
- LOCK(&pool->s.lock);
- if (pool->s.rte_mempool != mp) {
- UNLOCK(&pool->s.lock);
- continue;
- }
- UNLOCK(&pool->s.lock);
- pool_hdl = pool->s.pool_hdl;
+ memset(&pool->memset_mark, 0, sizeof(pool_t) - offsetof(pool_t, memset_mark));
+ priv_data.pool = pool;
+ priv_data.type = params->type;
+
+ if (name)
+ strncpy(pool->name, name, ODP_POOL_NAME_LEN - 1);
+
+ switch (params->type) {
+ case ODP_POOL_BUFFER:
+ num = params->buf.num;
+ cache_size = params->buf.cache_size;
+ priv_size = get_mbuf_priv_size(sizeof(odp_buffer_hdr_t));
+ align = params->buf.align > 0 ? params->buf.align : ODP_CACHE_LINE_SIZE;
+ align = _ODP_MAX(align, (uint32_t)CONFIG_BUFFER_ALIGN_MIN);
+ data_size = _ODP_ROUNDUP_ALIGN(params->buf.size + trailer, align);
+ uarea_size = params->buf.uarea_size;
+ priv_data.event_type = ODP_EVENT_BUFFER;
+ break;
+ case ODP_POOL_PACKET:
+ num = params->pkt.num;
+ cache_size = params->pkt.cache_size;
+ priv_size = get_mbuf_priv_size(sizeof(odp_packet_hdr_t));
+ align = params->pkt.align > 0 ? params->pkt.align : CONFIG_BUFFER_ALIGN_MIN;
+ align = _ODP_MAX(align, (uint32_t)CONFIG_BUFFER_ALIGN_MIN);
+ data_size = _ODP_MAX(params->pkt.seg_len, (uint32_t)CONFIG_PACKET_SEG_LEN_MIN);
+ data_size = _ODP_MAX(data_size, params->pkt.len);
+
+ if (params->pkt.max_len > 0 &&
+ ROUNDUP_DIV(params->pkt.max_len, data_size) > num)
+ data_size = ROUNDUP_DIV(params->pkt.max_len, num);
+
+ data_size = _ODP_ROUNDUP_ALIGN(hroom + data_size + CONFIG_PACKET_TAILROOM +
+ trailer, align);
+ /* Segment size minus headroom might be rounded down by the driver (e.g. ixgbe) to
+ * the nearest multiple of 1024. Round it up here to make sure the requested size
+ * is still going to fit without segmentation. */
+ data_size = _ODP_ROUNDUP_ALIGN(data_size - hroom, CONFIG_PACKET_SEG_LEN_MIN) +
+ hroom;
+ data_size = _ODP_MIN(data_size, (uint16_t)UINT16_MAX);
+ seg_size = data_size - trailer;
+ uarea_size = params->pkt.uarea_size;
+ priv_data.event_type = ODP_EVENT_PACKET;
+ break;
+ case ODP_POOL_TIMEOUT:
+ num = params->tmo.num;
+ cache_size = params->tmo.cache_size;
+ priv_size = get_mbuf_priv_size(sizeof(odp_timeout_hdr_t));
+ data_size = 0;
+ uarea_size = params->tmo.uarea_size;
+ priv_data.event_type = ODP_EVENT_TIMEOUT;
+ break;
+ case ODP_POOL_VECTOR:
+ num = params->vector.num;
+ cache_size = params->vector.cache_size;
+ priv_size = get_mbuf_priv_size(sizeof(odp_event_vector_hdr_t) +
+ params->vector.max_size * sizeof(odp_packet_t));
+ data_size = 0;
+ uarea_size = params->vector.uarea_size;
+ priv_data.event_type = ODP_EVENT_PACKET_VECTOR;
+ break;
+ default:
+ UNLOCK(&pool->lock);
+ _ODP_ERR("Bad pool type %i\n", params->type);
+ return ODP_POOL_INVALID;
+ }
+
+ _ODP_DBG("Pool type: %d, name: %s, num: %u, priv area size: %u, alignment: %u,"
+ "data room size: %u, segment size: %u, uarea_size: %u\n", priv_data.event_type,
+ pool->name, num, priv_size, align, data_size, seg_size, uarea_size);
+
+ if (priv_size > UINT16_MAX || data_size > UINT16_MAX) {
+ UNLOCK(&pool->lock);
+ _ODP_ERR("Invalid element size(s), private: %u, data room: %u (max: %u)\n",
+ priv_size, data_size, UINT16_MAX);
+ return ODP_POOL_INVALID;
+ }
+
+ format_pool_name(pool->name, rte_name);
+ mp = rte_pktmbuf_pool_create(rte_name, num, calc_cache_size(num, cache_size), priv_size,
+ data_size, rte_socket_id());
+
+ if (mp == NULL) {
+ UNLOCK(&pool->lock);
+ _ODP_ERR("Cannot init DPDK mbuf pool: %s\n", rte_strerror(rte_errno));
+ return ODP_POOL_INVALID;
}
+
+ if (reserve_uarea(pool, uarea_size, num)) {
+ UNLOCK(&pool->lock);
+ _ODP_ERR("User area SHM reserve failed\n");
+ rte_mempool_free(mp);
+ return ODP_POOL_INVALID;
+ }
+
+ pool->rte_mempool = mp;
+ pool->seg_len = seg_size;
+ pool->type_2 = type_2;
+ pool->type = params->type;
+ pool->params = *params;
+ pool->trailer_size = trailer;
+
+ rte_mempool_obj_iter(mp, init_obj_priv_data, &priv_data);
+
+ UNLOCK(&pool->lock);
+ pool_hdl = _odp_pool_handle(pool);
+
return pool_hdl;
}
-
-static odp_buffer_t buffer_alloc(pool_entry_t *pool)
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
{
- odp_buffer_t buffer;
+ if (check_params(params))
+ return ODP_POOL_INVALID;
- if (odp_unlikely(pool->s.params.type != ODP_POOL_BUFFER &&
- pool->s.params.type != ODP_POOL_TIMEOUT)) {
- rte_errno = EINVAL;
- return ODP_BUFFER_INVALID;
- }
+ return _odp_pool_create(name, params, params->type);
+}
- buffer = (odp_buffer_t)rte_ctrlmbuf_alloc(pool->s.rte_mempool);
+odp_pool_t odp_pool_lookup(const char *name)
+{
+ uint32_t i;
+ pool_t *pool;
- if ((struct rte_mbuf *)buffer == NULL) {
- rte_errno = ENOMEM;
- return ODP_BUFFER_INVALID;
- } else {
- buf_hdl_to_hdr(buffer)->next = NULL;
- return buffer;
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
+
+ LOCK(&pool->lock);
+ if (strcmp(name, pool->name) == 0) {
+ /* Found it */
+ UNLOCK(&pool->lock);
+ return _odp_pool_handle(pool);
+ }
+ UNLOCK(&pool->lock);
}
+
+ return ODP_POOL_INVALID;
}
odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
{
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
+ odp_event_t event;
+ pool_t *pool;
+
+ _ODP_ASSERT(ODP_POOL_INVALID != pool_hdl);
- return buffer_alloc(pool);
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
+
+ event = _odp_event_alloc(pool);
+ if (odp_likely(event != ODP_EVENT_INVALID))
+ return odp_buffer_from_event(event);
+
+ return ODP_BUFFER_INVALID;
}
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
{
- int i;
- pool_entry_t *pool = odp_pool_to_entry(pool_hdl);
+ pool_t *pool;
- for (i = 0; i < num; i++) {
- buf[i] = buffer_alloc(pool);
- if (buf[i] == ODP_BUFFER_INVALID)
- return rte_errno == ENOMEM ? i : -EINVAL;
+ _ODP_ASSERT(ODP_POOL_INVALID != pool_hdl);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
+
+ return _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)buf, num);
+}
+
+static const char *get_short_type_str(odp_pool_type_t type)
+{
+ switch (type) {
+ case ODP_POOL_BUFFER:
+ return "B";
+ case ODP_POOL_PACKET:
+ return "P";
+ case ODP_POOL_TIMEOUT:
+ return "T";
+ case ODP_POOL_VECTOR:
+ return "V";
+ case ODP_POOL_DMA_COMPL:
+ return "D";
+ case ODP_POOL_ML_COMPL:
+ return "M";
+ default:
+ return "-";
}
- return i;
}
-void odp_buffer_free(odp_buffer_t buf)
+void odp_pool_print(odp_pool_t pool_hdl)
{
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf;
+ pool_t *pool = _odp_pool_entry(pool_hdl);
- rte_ctrlmbuf_free(mbuf);
+ rte_mempool_dump(stdout, pool->rte_mempool);
}
-void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
+void odp_pool_print_all(void)
{
- int i;
+ uint64_t available;
+ uint32_t i, index, tot, cache_size;
+ uint32_t elt_size, elt_len = 0;
+ uint8_t type, ext;
+ const int col_width = 24;
+ const char *name, *type_c;
+
+ _ODP_PRINT("\nList of all pools\n");
+ _ODP_PRINT("-----------------\n");
+ _ODP_PRINT(" idx %-*s type free tot cache elt_len ext\n", col_width, "name");
+
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
+
+ LOCK(&pool->lock);
+
+ if (pool->rte_mempool == NULL) {
+ UNLOCK(&pool->lock);
+ continue;
+ }
+
+ available = rte_mempool_avail_count(pool->rte_mempool);
+ cache_size = pool->rte_mempool->cache_size;
+ ext = pool->pool_ext;
+ index = pool->pool_idx;
+ name = pool->name;
+ tot = pool->rte_mempool->size;
+ type = pool->type;
+ elt_size = pool->rte_mempool->elt_size;
+
+ UNLOCK(&pool->lock);
- for (i = 0; i < num; i++) {
- struct rte_mbuf *mbuf = (struct rte_mbuf *)buf[i];
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET)
+ elt_len = elt_size;
- rte_ctrlmbuf_free(mbuf);
+ type_c = get_short_type_str(pool->type_2);
+
+ _ODP_PRINT("%4u %-*s %s %6" PRIu64 " %6" PRIu32 " %6" PRIu32 " %8" PRIu32 " "
+ "%" PRIu8 "\n", index, col_width, name, type_c, available, tot,
+ cache_size, elt_len, ext);
}
+ _ODP_PRINT("\n");
}
-void odp_pool_print(odp_pool_t pool_hdl)
+static void mempool_addr_range(struct rte_mempool *mp ODP_UNUSED, void *opaque,
+ struct rte_mempool_memhdr *memhdr,
+ unsigned int mem_idx ODP_UNUSED)
{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- rte_mempool_dump(stdout, pool->s.rte_mempool);
+ struct mem_cb_arg_t *args = (struct mem_cb_arg_t *)opaque;
+ uintptr_t min_addr = (uintptr_t)memhdr->addr;
+ uintptr_t max_addr = min_addr + memhdr->len - 1;
+
+ if (!args->min_data_addr || min_addr < args->min_data_addr)
+ args->min_data_addr = min_addr;
+ if (!args->max_data_addr || max_addr > args->max_data_addr)
+ args->max_data_addr = max_addr;
}
int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+ struct mem_cb_arg_t args;
if (pool == NULL || info == NULL)
return -1;
- info->name = pool->s.name;
- info->params = pool->s.params;
+ memset(info, 0, sizeof(odp_pool_info_t));
+
+ info->type = pool->type_2;
+ info->name = pool->name;
+
+ if (pool->pool_ext) {
+ info->pool_ext = 1;
+ info->pool_ext_param = pool->ext_param;
+
+ } else if (pool->type_2 == ODP_POOL_DMA_COMPL) {
+ info->dma_pool_param.num = pool->params.buf.num;
+ info->dma_pool_param.uarea_size = pool->params.buf.uarea_size;
+ info->dma_pool_param.cache_size = pool->params.buf.cache_size;
+
+ } else if (pool->type_2 == ODP_POOL_ML_COMPL) {
+ info->ml_pool_param.num = pool->params.buf.num;
+ info->ml_pool_param.uarea_size = pool->params.buf.uarea_size;
+ info->ml_pool_param.cache_size = pool->params.buf.cache_size;
+
+ } else {
+ info->params = pool->params;
+ }
+
+ if (pool->type == ODP_POOL_PACKET)
+ info->pkt.max_num = pool->rte_mempool->size;
+
+ memset(&args, 0, sizeof(struct mem_cb_arg_t));
+ rte_mempool_mem_iter(pool->rte_mempool, mempool_addr_range, &args);
+ info->min_data_addr = args.min_data_addr;
+ info->max_data_addr = args.max_data_addr;
return 0;
}
-/*
- * DPDK doesn't support pool destroy at the moment. Instead we should improve
- * odp_pool_create() to try to reuse pools
- */
int odp_pool_destroy(odp_pool_t pool_hdl)
{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- struct rte_mempool *mp;
+ pool_t *pool;
- if ((mp = rte_mempool_lookup(pool->s.name)) == NULL) {
- ODP_ERR("Can't find pool with this name!\n");
+ if (pool_hdl == ODP_POOL_INVALID) {
+ _ODP_ERR("Invalid pool handle\n");
return -1;
}
- rte_mempool_free(mp);
- pool->s.rte_mempool = NULL;
- /* The pktio supposed to be closed by now */
- return 0;
-}
+ pool = _odp_pool_entry(pool_hdl);
+ if (pool->rte_mempool == NULL) {
+ _ODP_ERR("No rte_mempool handle available\n");
+ return -1;
+ }
-odp_pool_t odp_buffer_pool(odp_buffer_t buf)
-{
- return buf_hdl_to_hdr(buf)->pool_hdl;
+ rte_mempool_free(pool->rte_mempool);
+ pool->rte_mempool = NULL;
+
+ if (pool->uarea_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->uarea_shm);
+
+ return 0;
}
void odp_pool_param_init(odp_pool_param_t *params)
{
memset(params, 0, sizeof(odp_pool_param_t));
+ params->pkt.headroom = RTE_PKTMBUF_HEADROOM;
+ params->buf.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ params->pkt.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ params->tmo.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ params->vector.cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
@@ -645,3 +1008,498 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl)
return _odp_pri(hdl);
}
+unsigned int odp_pool_max_index(void)
+{
+ return CONFIG_POOLS - 1;
+}
+
+int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ _ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ /* Zero everything else but per thread statistics */
+ memset(stats, 0, offsetof(odp_pool_stats_t, thread));
+
+ if (pool->params.stats.bit.available)
+ stats->available = rte_mempool_avail_count(pool->rte_mempool);
+
+ return 0;
+}
+
+int odp_pool_stats_selected(odp_pool_t pool_hdl, odp_pool_stats_selected_t *stats,
+ const odp_pool_stats_opt_t *opt)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ _ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+ if (odp_unlikely(opt == NULL)) {
+ _ODP_ERR("Pool counters NULL\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ if (odp_unlikely(opt->all & ~pool->params.stats.all)) {
+ _ODP_ERR("Trying to read disabled counter\n");
+ return -1;
+ }
+
+ if (opt->bit.available)
+ stats->available = rte_mempool_avail_count(pool->rte_mempool);
+
+ return 0;
+}
+
+int odp_pool_stats_reset(odp_pool_t pool_hdl ODP_UNUSED)
+{
+ return 0;
+}
+
+/*
+ * No actual head pointer alignment requirement. Anyway, require even byte
+ * address.
+ */
+#define EXT_MIN_HEAD_ALIGN 2
+
+/*
+ * Round up the space we reserve for objhdr up to cache line size. The rte_mbuf
+ * that comes after this must be cache line aligned.
+ */
+#define SIZEOF_OBJHDR _ODP_ROUNDUP_CACHE_LINE(sizeof(struct rte_mempool_objhdr))
+
+int odp_pool_ext_capability(odp_pool_type_t type,
+ odp_pool_ext_capability_t *capa)
+{
+ odp_pool_stats_opt_t supported_stats;
+
+ _ODP_ASSERT(capa != NULL);
+
+ switch (type) {
+ case ODP_POOL_PACKET:
+ break;
+ case ODP_POOL_BUFFER:
+ case ODP_POOL_TIMEOUT:
+ case ODP_POOL_VECTOR:
+ case ODP_POOL_DMA_COMPL:
+ case ODP_POOL_ML_COMPL:
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+ return 0;
+ default:
+ _ODP_ERR("Invalid pool type: %d\n", type);
+ return -1;
+ }
+
+ supported_stats.all = 0;
+
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ capa->type = type;
+ capa->max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
+ capa->min_cache_size = 0;
+ capa->max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+ capa->stats.all = supported_stats.all;
+
+ capa->pkt.max_num_buf = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.max_buf_size = MAX_SIZE;
+ capa->pkt.odp_header_size = SIZEOF_OBJHDR + sizeof(odp_packet_hdr_t);
+ capa->pkt.odp_trailer_size = _ODP_EV_ENDMARK_SIZE;
+ capa->pkt.min_mem_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_buf_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_head_align = EXT_MIN_HEAD_ALIGN;
+ capa->pkt.buf_size_aligned = 0;
+ capa->pkt.max_headroom = RTE_PKTMBUF_HEADROOM;
+ capa->pkt.max_headroom_size = RTE_PKTMBUF_HEADROOM;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
+ capa->pkt.max_uarea_size = MAX_UAREA_SIZE;
+ capa->pkt.uarea_persistence = true;
+
+ return 0;
+}
+
+void odp_pool_ext_param_init(odp_pool_type_t type, odp_pool_ext_param_t *param)
+{
+ uint32_t default_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
+
+ memset(param, 0, sizeof(odp_pool_ext_param_t));
+
+ if (type != ODP_POOL_PACKET)
+ return;
+
+ param->type = ODP_POOL_PACKET;
+ param->cache_size = default_cache_size;
+ param->pkt.headroom = RTE_PKTMBUF_HEADROOM;
+}
+
+static int check_pool_ext_param(const odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset = SIZEOF_OBJHDR + sizeof(odp_packet_hdr_t) +
+ param->pkt.app_header_size;
+
+ if (param->type != ODP_POOL_PACKET) {
+ _ODP_ERR("Pool type not supported\n");
+ return -1;
+ }
+
+ if (odp_pool_ext_capability(param->type, &capa)) {
+ _ODP_ERR("Capa failed\n");
+ return -1;
+ }
+
+ if (param->cache_size > capa.max_cache_size) {
+ _ODP_ERR("Too large cache size %u\n", param->cache_size);
+ return -1;
+ }
+
+ if (param->stats.all != capa.stats.all) {
+ _ODP_ERR("Pool statistics not supported\n");
+ return -1;
+ }
+
+ if (param->pkt.num_buf > capa.pkt.max_num_buf) {
+ _ODP_ERR("Too many packet buffers\n");
+ return -1;
+ }
+
+ if (param->pkt.buf_size > capa.pkt.max_buf_size) {
+ _ODP_ERR("Too large packet buffer size %u\n", param->pkt.buf_size);
+ return -1;
+ }
+
+ if (param->pkt.uarea_size > capa.pkt.max_uarea_size) {
+ _ODP_ERR("Too large user area size %u\n", param->pkt.uarea_size);
+ return -1;
+ }
+
+ if (param->pkt.headroom > capa.pkt.max_headroom) {
+ _ODP_ERR("Too large headroom size\n");
+ return -1;
+ }
+
+ if (head_offset % capa.pkt.min_head_align) {
+ _ODP_ERR("Head pointer not %u byte aligned\n", capa.pkt.min_head_align);
+ return -1;
+ }
+
+ return 0;
+}
+
+odp_pool_t odp_pool_ext_create(const char *name,
+ const odp_pool_ext_param_t *params)
+{
+ odp_pool_t pool_hdl = ODP_POOL_INVALID;
+ unsigned int i, cache_size;
+ size_t hdr_size, priv_size;
+ pool_t *pool;
+ uint32_t buf_size, blk_size;
+ char pool_name[ODP_POOL_NAME_LEN];
+ char rte_name[RTE_MEMPOOL_NAMESIZE];
+
+ if (check_pool_ext_param(params))
+ return ODP_POOL_INVALID;
+
+ if (name == NULL) {
+ pool_name[0] = 0;
+ } else {
+ strncpy(pool_name, name, ODP_POOL_NAME_LEN - 1);
+ pool_name[ODP_POOL_NAME_LEN - 1] = 0;
+ }
+
+ /* Find an unused buffer pool slot and initialize it as requested */
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ uint32_t num;
+ struct rte_mempool *mp;
+
+ pool = _odp_pool_entry_from_idx(i);
+
+ LOCK(&pool->lock);
+ if (pool->rte_mempool != NULL) {
+ UNLOCK(&pool->lock);
+ continue;
+ }
+
+ memset(&pool->memset_mark, 0,
+ sizeof(pool_t) - offsetof(pool_t, memset_mark));
+
+ hdr_size = sizeof(odp_packet_hdr_t) + params->pkt.app_header_size;
+ priv_size = hdr_size - sizeof(struct rte_mbuf);
+ buf_size = params->pkt.buf_size;
+ blk_size = buf_size - SIZEOF_OBJHDR - hdr_size;
+ num = params->pkt.num_buf;
+
+ _ODP_DBG("type: packet, name: %s, num: %u, len: %u, blk_size: %u, "
+ "uarea_size: %d, hdr_size: %zu\n",
+ pool_name, num, buf_size, blk_size,
+ params->pkt.uarea_size, hdr_size);
+
+ cache_size = params->cache_size;
+ cache_size = calc_cache_size(num, cache_size);
+
+ format_pool_name(pool_name, rte_name);
+
+ mp = rte_mempool_create_empty(
+ rte_name, num, blk_size, cache_size,
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_socket_id(), 0);
+
+ if (mp == NULL) {
+ _ODP_ERR("Failed to create empty DPDK packet pool\n");
+ goto error;
+ }
+
+ if (rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(),
+ NULL)) {
+ _ODP_ERR("Failed setting mempool operations\n");
+ goto error;
+ }
+
+ struct rte_pktmbuf_pool_private mbp_priv = {
+ .mbuf_data_room_size = blk_size,
+ .mbuf_priv_size = priv_size,
+ };
+
+ rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+ /*
+ * Would like to call rte_mempool_ops_alloc(), but it doesn't
+ * appear to be included in the libraries provided by Ubuntu or
+ * Fedora.
+ */
+ if (rte_mempool_get_ops(mp->ops_index)->alloc(mp)) {
+ _ODP_ERR("Mempool alloc operation failed\n");
+ goto error;
+ }
+
+ pool->ext_param = *params;
+ pool->ext_head_offset = hdr_size;
+ pool->trailer_size = _ODP_EV_ENDMARK_SIZE;
+ pool->num = num;
+ pool->num_populated = 0;
+ pool->params.pkt.uarea_size = params->pkt.uarea_size;
+ pool->params.type = params->type;
+ pool->pool_ext = 1;
+ pool->seg_len = blk_size - pool->trailer_size;
+ pool->type = params->type;
+ strcpy(pool->name, pool_name);
+
+ if (reserve_uarea(pool, params->pkt.uarea_size, num)) {
+ _ODP_ERR("User area SHM reserve failed\n");
+ goto error;
+ }
+
+ pool->rte_mempool = mp;
+ _ODP_DBG("Header/element/trailer size: %u/%u/%u, total pool size: %lu\n",
+ mp->header_size, mp->elt_size, mp->trailer_size,
+ (unsigned long)((mp->header_size + mp->elt_size +
+ mp->trailer_size) * num));
+ UNLOCK(&pool->lock);
+ pool_hdl = _odp_pool_handle(pool);
+ break;
+ }
+
+ return pool_hdl;
+
+error:
+ UNLOCK(&pool->lock);
+ return ODP_POOL_INVALID;
+}
+
+/* External memory element initializer
+ *
+ * This is a combination of rte_pktmbuf_init in rte_mbuf.c and testpmd_mbuf_ctor in testpmd.c
+ */
+static void init_ext_obj(struct rte_mempool *mp, void *arg, void *mbuf, unsigned int i)
+{
+ struct mbuf_ctor_arg *mb_ctor_arg = arg;
+ struct rte_mbuf *mb = mbuf;
+ pool_t *pool = mb_ctor_arg->pool;
+ void *uarea = pool->uarea_base_addr + i * pool->uarea_size;
+ _odp_event_hdr_t *event_hdr = (_odp_event_hdr_t *)mbuf;
+ void **obj_uarea;
+ odp_pool_ext_param_t *p = &pool->ext_param;
+ uint32_t app_hdr_offset = sizeof(odp_packet_hdr_t);
+ uint32_t app_hdr_size = p->pkt.app_header_size;
+ uint32_t buf_size = p->pkt.buf_size;
+
+ RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
+ memset(mb, 0, app_hdr_offset);
+ memset((uint8_t *)mb + app_hdr_offset + app_hdr_size, 0,
+ buf_size - app_hdr_offset - app_hdr_size);
+ /* Start of buffer is just after the ODP type specific header
+ * which contains in the very beginning the rte_mbuf struct */
+ mb->buf_addr = (char *)mb + mb_ctor_arg->seg_buf_offset;
+ mb->buf_iova = rte_mempool_virt2iova(mb) + mb_ctor_arg->seg_buf_offset;
+ mb->buf_len = mb_ctor_arg->seg_buf_size;
+ mb->priv_size = rte_pktmbuf_priv_size(mp);
+
+ /* Keep some headroom between start of buffer and data */
+ if (mb_ctor_arg->type == ODP_POOL_PACKET) {
+ mb->data_off = RTE_PKTMBUF_HEADROOM;
+ mb->port = 0xff;
+ mb->vlan_tci = 0;
+ } else {
+ mb->data_off = 0;
+ }
+
+ /* Init some constant fields */
+ mb->pool = mp;
+ mb->nb_segs = 1;
+ mb->ol_flags = 0;
+ rte_mbuf_refcnt_set(mb, 1);
+ mb->next = NULL;
+ /* Save index, might be useful for debugging purposes */
+ event_hdr->hdr.index = i;
+ event_hdr->hdr.pool = _odp_pool_handle(pool);
+ event_hdr->hdr.type = mb_ctor_arg->type;
+ event_hdr->hdr.event_type = mb_ctor_arg->event_type;
+ event_hdr->hdr.subtype = ODP_EVENT_NO_SUBTYPE;
+
+ switch (mb_ctor_arg->type) {
+ case ODP_POOL_BUFFER:
+ obj_uarea = &((odp_buffer_hdr_t *)mbuf)->uarea_addr;
+ break;
+ case ODP_POOL_PACKET:
+ obj_uarea = &((odp_packet_hdr_t *)mbuf)->uarea_addr;
+ break;
+ case ODP_POOL_TIMEOUT:
+ obj_uarea = &((odp_timeout_hdr_t *)mbuf)->uarea_addr;
+ break;
+ case ODP_POOL_VECTOR:
+ obj_uarea = &((odp_event_vector_hdr_t *)mbuf)->uarea_addr;
+ break;
+ default:
+ _ODP_ABORT("Invalid pool type: %i\n", mb_ctor_arg->type);
+ }
+
+ *obj_uarea = uarea;
+
+ if (uarea && pool->ext_param.uarea_init.init_fn)
+ pool->ext_param.uarea_init.init_fn(uarea, pool->param_uarea_size,
+ pool->ext_param.uarea_init.args, i);
+
+ if (mb_ctor_arg->type == ODP_POOL_BUFFER || mb_ctor_arg->type == ODP_POOL_PACKET) {
+ mb->buf_len -= _ODP_EV_ENDMARK_SIZE;
+ _odp_event_endmark_set(_odp_event_from_mbuf(mb));
+ }
+}
+
+int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size,
+ uint32_t num, uint32_t flags)
+{
+ pool_t *pool;
+ uint32_t num_populated;
+
+ if (pool_hdl == ODP_POOL_INVALID) {
+ _ODP_ERR("Bad pool handle\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ if (pool->type != ODP_POOL_PACKET || pool->pool_ext == 0) {
+ _ODP_ERR("Bad pool type\n");
+ return -1;
+ }
+
+ if (buf_size != pool->ext_param.pkt.buf_size) {
+ _ODP_ERR("Bad buffer size\n");
+ return -1;
+ }
+
+ num_populated = pool->num_populated;
+
+ if (num_populated + num > pool->num) {
+ _ODP_ERR("Trying to over populate the pool\n");
+ return -1;
+ }
+
+ if ((num_populated + num == pool->num) &&
+ !(flags & ODP_POOL_POPULATE_DONE)) {
+ _ODP_ERR("Missing ODP_POOL_POPULATE_DONE flag\n");
+ return -1;
+ }
+
+ if ((num_populated + num < pool->num) && flags) {
+ _ODP_ERR("Unexpected flags: 0x%x\n", flags);
+ return -1;
+ }
+
+ struct rte_mempool *mp = pool->rte_mempool;
+
+ for (uint32_t i = 0; i < num; i++) {
+ struct rte_mempool_objhdr *hdr;
+ struct rte_mempool_memhdr *memhdr;
+ struct rte_mbuf *mb =
+ (struct rte_mbuf *)((uintptr_t)buf[i] + SIZEOF_OBJHDR);
+ struct odp_pool_ext_param_t *params = &pool->ext_param;
+ struct mbuf_ctor_arg mb_ctor_arg;
+
+ /*
+ * rte_mbuf must be cache line aligned, so that is our
+ * requirement also for buffers.
+ */
+ if ((uintptr_t)buf[i] & (ODP_CACHE_LINE_SIZE - 1)) {
+ _ODP_ERR("Buffer address (%p) does not meet alignment requirements\n",
+ buf[i]);
+ return -1;
+ }
+
+ if (rte_mempool_ops_enqueue_bulk(mp, (void *const *)&mb, 1) < 0) {
+ _ODP_ERR("Failed to enqueue buffer to rte_mempool\n");
+ return -1;
+ }
+
+ /*
+ * Since we don't know anything about the caller's memory areas,
+ * or even the page size, make a memhdr for each buffer.
+ */
+ memhdr = rte_zmalloc(NULL, sizeof(*memhdr), 0);
+
+ if (!memhdr) {
+ _ODP_ERR("Failed to allocate rte_mempool_memhdr\n");
+ return -1;
+ }
+
+ memhdr->mp = mp;
+ memhdr->addr = mb;
+ memhdr->iova = rte_mem_virt2iova(mb);
+ memhdr->len = buf_size;
+ STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
+ mp->nb_mem_chunks++;
+
+ hdr = RTE_PTR_SUB(mb, sizeof(*hdr));
+ hdr->mp = mp;
+ hdr->iova = rte_mem_virt2iova(mb);
+ STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
+ mp->populated_size++;
+
+ mb_ctor_arg.seg_buf_offset = sizeof(odp_packet_hdr_t) +
+ params->pkt.app_header_size;
+ mb_ctor_arg.seg_buf_size = pool->seg_len + pool->trailer_size;
+ mb_ctor_arg.type = params->type;
+ mb_ctor_arg.event_type = pool->type;
+ mb_ctor_arg.pool = pool;
+ init_ext_obj(mp, (void *)&mb_ctor_arg, (void *)mb, num_populated);
+ pool->num_populated++;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-dpdk/odp_queue_basic.c b/platform/linux-dpdk/odp_queue_basic.c
new file mode 100644
index 000000000..4b9dee891
--- /dev/null
+++ b/platform/linux-dpdk/odp_queue_basic.c
@@ -0,0 +1,1257 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/std_types.h>
+#include <odp/api/sync.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/traffic_mngr.h>
+
+#include <odp/api/plat/queue_inline_types.h>
+#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+
+#include <inttypes.h>
+#include <string.h>
+
+#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->lock))
+#define UNLOCK(queue_ptr) odp_ticketlock_unlock(&((queue_ptr)->lock))
+#define LOCK_INIT(queue_ptr) odp_ticketlock_init(&((queue_ptr)->lock))
+
+#define MIN_QUEUE_SIZE (32 - 1)
+#define MAX_QUEUE_SIZE ((1 * 1024 * 1024) - 1)
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param);
+
+queue_global_t *_odp_queue_glb;
+extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+
+static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
+{
+ memset(capa, 0, sizeof(odp_queue_capability_t));
+
+ /* Reserve some queues for internal use */
+ capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
+ capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
+ capa->plain.max_size = _odp_queue_glb->config.max_queue_size;
+ capa->plain.lockfree.max_num = _odp_queue_glb->queue_lf_num;
+ capa->plain.lockfree.max_size = _odp_queue_glb->queue_lf_size;
+
+ return 0;
+}
+
+static int read_config_file(queue_global_t *_odp_queue_glb)
+{
+ const char *str;
+ uint32_t val_u32;
+ int val = 0;
+
+ _ODP_PRINT("Queue config:\n");
+
+ str = "queue_basic.max_queue_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ val_u32 = val;
+
+ if (val_u32 > MAX_QUEUE_SIZE || val_u32 < MIN_QUEUE_SIZE) {
+ _ODP_ERR("Bad value %s = %u\n", str, val_u32);
+ return -1;
+ }
+
+ _odp_queue_glb->config.max_queue_size = val_u32;
+ _ODP_PRINT(" %s: %u\n", str, val_u32);
+
+ str = "queue_basic.default_queue_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ val_u32 = val;
+
+ if (val_u32 > _odp_queue_glb->config.max_queue_size ||
+ val_u32 < MIN_QUEUE_SIZE) {
+ _ODP_ERR("Bad value %s = %u\n", str, val_u32);
+ return -1;
+ }
+
+ _odp_queue_glb->config.default_queue_size = val_u32;
+ _ODP_PRINT(" %s: %u\n\n", str, val_u32);
+
+ return 0;
+}
+
+static int queue_init_global(void)
+{
+ uint32_t i;
+ odp_shm_t shm;
+ uint32_t lf_size = 0;
+ queue_lf_func_t *lf_func;
+ odp_queue_capability_t capa;
+
+ _ODP_DBG("Starts...\n");
+
+ /* Fill in queue entry field offsets for inline functions */
+ memset(&_odp_queue_inline_offset, 0,
+ sizeof(_odp_queue_inline_offset_t));
+ _odp_queue_inline_offset.context = offsetof(queue_entry_t, param.context);
+
+ shm = odp_shm_reserve("_odp_queue_basic_global",
+ sizeof(queue_global_t),
+ sizeof(queue_entry_t), 0);
+
+ _odp_queue_glb = odp_shm_addr(shm);
+
+ if (_odp_queue_glb == NULL)
+ return -1;
+
+ memset(_odp_queue_glb, 0, sizeof(queue_global_t));
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ /* init locks */
+ queue_entry_t *queue = qentry_from_index(i);
+
+ LOCK_INIT(queue);
+ queue->index = i;
+ queue->handle = (odp_queue_t)queue;
+ }
+
+ if (read_config_file(_odp_queue_glb)) {
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ _odp_queue_glb->queue_gbl_shm = shm;
+ _odp_queue_glb->queue_ring_shm = ODP_SHM_INVALID;
+ _odp_queue_glb->ring_data = NULL;
+
+ lf_func = &_odp_queue_glb->queue_lf_func;
+ _odp_queue_glb->queue_lf_num = _odp_queue_lf_init_global(&lf_size, lf_func);
+ _odp_queue_glb->queue_lf_size = lf_size;
+
+ queue_capa(&capa, 0);
+
+ _ODP_DBG("... done.\n");
+ _ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t));
+ _ODP_DBG(" max num queues %u\n", capa.max_queues);
+ _ODP_DBG(" max queue size %u\n", capa.plain.max_size);
+ _ODP_DBG(" max num lockfree %u\n", capa.plain.lockfree.max_num);
+ _ODP_DBG(" max lockfree size %u\n\n", capa.plain.lockfree.max_size);
+
+ return 0;
+}
+
+static int queue_init_local(void)
+{
+ return 0;
+}
+
+static int queue_term_local(void)
+{
+ return 0;
+}
+
+static int queue_term_global(void)
+{
+ int ret = 0;
+ queue_entry_t *queue;
+ int i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue = qentry_from_index(i);
+ LOCK(queue);
+ if (queue->status != QUEUE_STATUS_FREE) {
+ _ODP_ERR("Not destroyed queue: %s\n", queue->name);
+ ret = -1;
+ }
+ UNLOCK(queue);
+ }
+
+ _odp_queue_lf_term_global();
+
+ if (odp_shm_free(_odp_queue_glb->queue_gbl_shm)) {
+ _ODP_ERR("shm free failed");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int queue_capability(odp_queue_capability_t *capa)
+{
+ return queue_capa(capa, 1);
+}
+
+static odp_queue_type_t queue_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->type;
+}
+
+static odp_schedule_sync_t queue_sched_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.sync;
+}
+
+static odp_schedule_prio_t queue_sched_prio(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.prio;
+}
+
+static odp_schedule_group_t queue_sched_group(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.group;
+}
+
+static uint32_t queue_lock_count(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ queue->param.sched.lock_count : 0;
+}
+
+static odp_queue_t queue_create(const char *name,
+ const odp_queue_param_t *param)
+{
+ uint32_t i;
+ uint32_t max_idx;
+ queue_entry_t *queue;
+ void *queue_lf;
+ odp_queue_type_t type;
+ odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
+
+ if (param == NULL) {
+ odp_queue_param_init(&default_param);
+ param = &default_param;
+ }
+
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ _ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ if (param->nonblocking == ODP_BLOCKING) {
+ if (param->size > _odp_queue_glb->config.max_queue_size)
+ return ODP_QUEUE_INVALID;
+ } else if (param->nonblocking == ODP_NONBLOCKING_LF) {
+ /* Only plain type lock-free queues supported */
+ if (type != ODP_QUEUE_TYPE_PLAIN)
+ return ODP_QUEUE_INVALID;
+ if (param->size > _odp_queue_glb->queue_lf_size)
+ return ODP_QUEUE_INVALID;
+ } else {
+ /* Wait-free queues not supported */
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ /* Start scheduled queue indices from zero to enable direct
+ * mapping to scheduler implementation indices. */
+ i = 0;
+ max_idx = CONFIG_MAX_SCHED_QUEUES;
+ } else {
+ i = CONFIG_MAX_SCHED_QUEUES;
+ /* All internal queues are of type plain */
+ max_idx = CONFIG_MAX_QUEUES;
+ }
+
+ for (; i < max_idx; i++) {
+ queue = qentry_from_index(i);
+
+ if (queue->status != QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ if (queue_init(queue, name, param)) {
+ UNLOCK(queue);
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (!queue->spsc &&
+ param->nonblocking == ODP_NONBLOCKING_LF) {
+ queue_lf_func_t *lf_fn;
+
+ lf_fn = &_odp_queue_glb->queue_lf_func;
+
+ queue_lf = _odp_queue_lf_create(queue);
+
+ if (queue_lf == NULL) {
+ UNLOCK(queue);
+ return ODP_QUEUE_INVALID;
+ }
+ queue->queue_lf = queue_lf;
+
+ queue->enqueue = lf_fn->enq;
+ queue->enqueue_multi = lf_fn->enq_multi;
+ queue->dequeue = lf_fn->deq;
+ queue->dequeue_multi = lf_fn->deq_multi;
+ queue->orig_dequeue_multi = lf_fn->deq_multi;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+ else
+ queue->status = QUEUE_STATUS_READY;
+
+ handle = queue->handle;
+ UNLOCK(queue);
+ break;
+ }
+ UNLOCK(queue);
+ }
+
+ if (handle == ODP_QUEUE_INVALID)
+ return ODP_QUEUE_INVALID;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (_odp_sched_fn->create_queue(queue->index,
+ &queue->param.sched)) {
+ queue->status = QUEUE_STATUS_FREE;
+ _ODP_ERR("schedule queue init failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ return handle;
+}
+
+static int queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(param != NULL);
+ _ODP_ASSERT(queue != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ odp_queue_t cur_queue;
+ const char *cur_name = name != NULL ? name[i] : NULL;
+ const odp_queue_param_t *cur_param = share_param ? &param[0] : &param[i];
+
+ cur_queue = queue_create(cur_name, cur_param);
+ if (cur_queue == ODP_QUEUE_INVALID)
+ return (i == 0) ? -1 : i;
+
+ queue[i] = cur_queue;
+ }
+ return i;
+}
+
+void _odp_sched_queue_set_status(uint32_t queue_index, int status)
+{
+ queue_entry_t *queue = qentry_from_index(queue_index);
+
+ LOCK(queue);
+
+ queue->status = status;
+
+ UNLOCK(queue);
+}
+
+static int queue_destroy(odp_queue_t handle)
+{
+ int empty;
+ queue_entry_t *queue;
+
+ queue = qentry_from_handle(handle);
+
+ if (handle == ODP_QUEUE_INVALID)
+ return -1;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" already free\n", queue->name);
+ return -1;
+ }
+ if (queue->status == QUEUE_STATUS_DESTROYED) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" already destroyed\n", queue->name);
+ return -1;
+ }
+
+ if (queue->spsc)
+ empty = ring_spsc_is_empty(queue->ring_spsc);
+ else if (queue->type == ODP_QUEUE_TYPE_SCHED)
+ empty = ring_st_is_empty(queue->ring_st);
+ else
+ empty = ring_mpmc_is_empty(queue->ring_mpmc);
+
+ if (!empty) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" not empty\n", queue->name);
+ return -1;
+ }
+ if (queue->spsc)
+ ring_spsc_free(queue->ring_spsc);
+ else if (queue->type == ODP_QUEUE_TYPE_SCHED)
+ ring_st_free(queue->ring_st);
+ else
+ ring_mpmc_free(queue->ring_mpmc);
+
+ switch (queue->status) {
+ case QUEUE_STATUS_READY:
+ queue->status = QUEUE_STATUS_FREE;
+ break;
+ case QUEUE_STATUS_NOTSCHED:
+ queue->status = QUEUE_STATUS_FREE;
+ _odp_sched_fn->destroy_queue(queue->index);
+ break;
+ case QUEUE_STATUS_SCHED:
+ /* Queue is still in scheduling */
+ queue->status = QUEUE_STATUS_DESTROYED;
+ break;
+ default:
+ _ODP_ABORT("Unexpected queue status\n");
+ }
+
+ if (queue->queue_lf)
+ _odp_queue_lf_destroy(queue->queue_lf);
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static int queue_destroy_multi(odp_queue_t handle[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(handle != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ int ret = queue_destroy(handle[i]);
+
+ if (ret)
+ return (i == 0) ? ret : i;
+ }
+
+ return i;
+}
+
+static int queue_context_set(odp_queue_t handle, void *context,
+ uint32_t len ODP_UNUSED)
+{
+ odp_mb_full();
+ qentry_from_handle(handle)->param.context = context;
+ odp_mb_full();
+ return 0;
+}
+
+static odp_queue_t queue_lookup(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = qentry_from_index(i);
+
+ if (queue->status == QUEUE_STATUS_FREE ||
+ queue->status == QUEUE_STATUS_DESTROYED)
+ continue;
+
+ LOCK(queue);
+ if (strcmp(name, queue->name) == 0) {
+ /* found it */
+ UNLOCK(queue);
+ return queue->handle;
+ }
+ UNLOCK(queue);
+ }
+
+ return ODP_QUEUE_INVALID;
+}
+
+static inline int _plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ int ret, num_enq;
+ ring_mpmc_t ring_mpmc;
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = queue->ring_mpmc;
+
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
+ return ret;
+
+ num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)event_hdr, num);
+
+ return num_enq;
+}
+
+static inline int _plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int num_deq;
+ queue_entry_t *queue;
+ ring_mpmc_t ring_mpmc;
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = queue->ring_mpmc;
+
+ num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)event_hdr, num);
+
+ return num_deq;
+}
+
+static int plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_deq_multi(handle, event_hdr, num);
+}
+
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
+{
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ (void)event_hdr;
+
+ _ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static int error_enqueue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
+{
+ _ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return NULL;
+}
+
+static int error_dequeue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static void queue_param_init(odp_queue_param_t *params)
+{
+ memset(params, 0, sizeof(odp_queue_param_t));
+ params->type = ODP_QUEUE_TYPE_PLAIN;
+ params->enq_mode = ODP_QUEUE_OP_MT;
+ params->deq_mode = ODP_QUEUE_OP_MT;
+ params->nonblocking = ODP_BLOCKING;
+ params->order = ODP_QUEUE_ORDER_KEEP;
+ params->sched.prio = odp_schedule_default_prio();
+ params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ params->sched.group = ODP_SCHED_GROUP_ALL;
+}
+
+static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
+{
+ uint32_t queue_id;
+ queue_entry_t *queue;
+ int status;
+
+ if (odp_unlikely(info == NULL)) {
+ _ODP_ERR("Unable to store info, NULL ptr given\n");
+ return -1;
+ }
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return -1;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return -1;
+ }
+
+ info->name = queue->name;
+ info->param = queue->param;
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static void queue_print(odp_queue_t handle)
+{
+ odp_pktio_info_t pktio_info;
+ queue_entry_t *queue;
+ uint32_t queue_id;
+ int status, prio;
+ int max_prio = odp_schedule_max_prio();
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return;
+ }
+ _ODP_PRINT("\nQueue info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" handle %p\n", (void *)queue->handle);
+ _ODP_PRINT(" index %" PRIu32 "\n", queue_id);
+ _ODP_PRINT(" name %s\n", queue->name);
+ _ODP_PRINT(" enq mode %s\n",
+ queue->param.enq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" deq mode %s\n",
+ queue->param.deq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" non-blocking %s\n",
+ queue->param.nonblocking == ODP_BLOCKING ? "ODP_BLOCKING" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_LF ? "ODP_NONBLOCKING_LF" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_WF ? "ODP_NONBLOCKING_WF" :
+ "unknown")));
+ _ODP_PRINT(" type %s\n",
+ queue->type == ODP_QUEUE_TYPE_PLAIN ? "ODP_QUEUE_TYPE_PLAIN" :
+ (queue->type == ODP_QUEUE_TYPE_SCHED ? "ODP_QUEUE_TYPE_SCHED" : "unknown"));
+ if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" sync %s\n",
+ queue->param.sched.sync == ODP_SCHED_SYNC_PARALLEL ?
+ "ODP_SCHED_SYNC_PARALLEL" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ATOMIC ?
+ "ODP_SCHED_SYNC_ATOMIC" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ "ODP_SCHED_SYNC_ORDERED" : "unknown")));
+ prio = queue->param.sched.prio;
+ _ODP_PRINT(" priority %i (%i in API)\n", max_prio - prio, prio);
+ _ODP_PRINT(" group %i\n", queue->param.sched.group);
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" spread %i\n", _odp_sched_basic_get_spread(queue_id));
+ }
+ if (queue->pktin.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktin.pktio, &pktio_info))
+ _ODP_PRINT(" pktin %s\n", pktio_info.name);
+ }
+ if (queue->pktout.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktout.pktio, &pktio_info))
+ _ODP_PRINT(" pktout %s\n", pktio_info.name);
+ }
+ _ODP_PRINT(" timers %" PRIu64 "\n",
+ odp_atomic_load_u64(&queue->num_timers));
+ _ODP_PRINT(" status %s\n",
+ queue->status == QUEUE_STATUS_READY ? "ready" :
+ (queue->status == QUEUE_STATUS_NOTSCHED ? "not scheduled" :
+ (queue->status == QUEUE_STATUS_SCHED ? "scheduled" : "unknown")));
+ _ODP_PRINT(" param.size %" PRIu32 "\n", queue->param.size);
+ if (queue->queue_lf) {
+ _ODP_PRINT(" implementation queue_lf\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ _odp_queue_lf_length(queue->queue_lf), _odp_queue_lf_max_length());
+ } else if (queue->spsc) {
+ _ODP_PRINT(" implementation ring_spsc\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_spsc_length(queue->ring_spsc),
+ ring_spsc_max_length(queue->ring_spsc));
+ } else if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" implementation ring_st\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_st_length(queue->ring_st),
+ ring_st_max_length(queue->ring_st));
+ } else {
+ _ODP_PRINT(" implementation ring_mpmc\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_mpmc_length(queue->ring_mpmc),
+ ring_mpmc_max_length(queue->ring_mpmc));
+ }
+ _ODP_PRINT("\n");
+
+ UNLOCK(queue);
+}
+
+static void queue_print_all(void)
+{
+ uint32_t i, index, len, max_len;
+ const char *name;
+ int status;
+ odp_queue_type_t type;
+ odp_nonblocking_t blocking;
+ odp_queue_op_mode_t enq_mode;
+ odp_queue_op_mode_t deq_mode;
+ odp_queue_order_t order;
+ const char *status_str;
+ const char *bl_str;
+ char type_c, enq_c, deq_c, order_c, sync_c;
+ const int col_width = 24;
+ int prio = 0;
+ int spr = 0;
+ odp_schedule_sync_t sync = ODP_SCHED_SYNC_PARALLEL;
+ odp_schedule_group_t grp = ODP_SCHED_GROUP_INVALID;
+
+ _ODP_PRINT("\nList of all queues\n");
+ _ODP_PRINT("------------------\n");
+ _ODP_PRINT(" idx %-*s type stat blk enq deq ord len max_len sync prio grp",
+ col_width, "name");
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" spr\n");
+ else
+ _ODP_PRINT("\n");
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = qentry_from_index(i);
+
+ if (queue->status < QUEUE_STATUS_READY)
+ continue;
+
+ LOCK(queue);
+
+ status = queue->status;
+ index = queue->index;
+ name = queue->name;
+ type = queue->type;
+ blocking = queue->param.nonblocking;
+ enq_mode = queue->param.enq_mode;
+ deq_mode = queue->param.deq_mode;
+ order = queue->param.order;
+
+ if (queue->queue_lf) {
+ len = _odp_queue_lf_length(queue->queue_lf);
+ max_len = _odp_queue_lf_max_length();
+ } else if (queue->spsc) {
+ len = ring_spsc_length(queue->ring_spsc);
+ max_len = ring_spsc_max_length(queue->ring_spsc);
+ } else if (type == ODP_QUEUE_TYPE_SCHED) {
+ len = ring_st_length(queue->ring_st);
+ max_len = ring_st_max_length(queue->ring_st);
+ prio = queue->param.sched.prio;
+ grp = queue->param.sched.group;
+ sync = queue->param.sched.sync;
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ spr = _odp_sched_basic_get_spread(index);
+ } else {
+ len = ring_mpmc_length(queue->ring_mpmc);
+ max_len = ring_mpmc_max_length(queue->ring_mpmc);
+ }
+
+ UNLOCK(queue);
+
+ if (status < QUEUE_STATUS_READY)
+ continue;
+
+ status_str = (status == QUEUE_STATUS_READY) ? "R" :
+ ((status == QUEUE_STATUS_SCHED) ? "S" : "NS");
+
+ type_c = (type == ODP_QUEUE_TYPE_PLAIN) ? 'P' : 'S';
+
+ bl_str = (blocking == ODP_BLOCKING) ? "B" :
+ ((blocking == ODP_NONBLOCKING_LF) ? "LF" : "WF");
+
+ enq_c = (enq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((enq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ deq_c = (deq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((deq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ order_c = (order == ODP_QUEUE_ORDER_KEEP) ? 'K' : 'I';
+
+ _ODP_PRINT("%4u %-*s %c %2s %2s", index, col_width, name, type_c,
+ status_str, bl_str);
+ _ODP_PRINT(" %c %c %c %6u %6u", enq_c, deq_c, order_c, len, max_len);
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ sync_c = (sync == ODP_SCHED_SYNC_PARALLEL) ? 'P' :
+ ((sync == ODP_SCHED_SYNC_ATOMIC) ? 'A' : 'O');
+ /* Print prio level matching odp_schedule_print() output */
+ prio = odp_schedule_max_prio() - prio;
+
+ _ODP_PRINT(" %c %4i %3i", sync_c, prio, grp);
+
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" %3i", spr);
+ }
+
+ _ODP_PRINT("\n");
+ }
+
+ _ODP_PRINT("\n");
+}
+
+static inline int _sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int sched = 0;
+ int ret;
+ queue_entry_t *queue;
+ int num_enq;
+ ring_st_t ring_st;
+
+ queue = qentry_from_handle(handle);
+ ring_st = queue->ring_st;
+
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
+ return ret;
+
+ LOCK(queue);
+
+ num_enq = ring_st_enq_multi(ring_st, (void **)event_hdr, num);
+
+ if (odp_unlikely(num_enq == 0)) {
+ UNLOCK(queue);
+ return 0;
+ }
+
+ if (queue->status == QUEUE_STATUS_NOTSCHED) {
+ queue->status = QUEUE_STATUS_SCHED;
+ sched = 1;
+ }
+
+ UNLOCK(queue);
+
+ /* Add queue to scheduling */
+ if (sched && _odp_sched_fn->sched_queue(queue->index))
+ _ODP_ABORT("schedule_queue failed\n");
+
+ return num_enq;
+}
+
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
+ int update_status)
+{
+ int num_deq, status;
+ ring_st_t ring_st;
+ queue_entry_t *queue = qentry_from_index(queue_index);
+
+ ring_st = queue->ring_st;
+
+ LOCK(queue);
+
+ status = queue->status;
+
+ if (odp_unlikely(status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed.
+ * Inform scheduler about a destroyed queue. */
+ if (queue->status == QUEUE_STATUS_DESTROYED) {
+ queue->status = QUEUE_STATUS_FREE;
+ _odp_sched_fn->destroy_queue(queue_index);
+ }
+
+ UNLOCK(queue);
+ return -1;
+ }
+
+ num_deq = ring_st_deq_multi(ring_st, (void **)ev, max_num);
+
+ if (num_deq == 0) {
+ /* Already empty queue */
+ if (update_status && status == QUEUE_STATUS_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+
+ UNLOCK(queue);
+
+ return 0;
+ }
+
+ UNLOCK(queue);
+
+ return num_deq;
+}
+
+static int sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _sched_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+int _odp_sched_queue_empty(uint32_t queue_index)
+{
+ queue_entry_t *queue = qentry_from_index(queue_index);
+ int ret = 0;
+
+ LOCK(queue);
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed. */
+ UNLOCK(queue);
+ return -1;
+ }
+
+ if (ring_st_is_empty(queue->ring_st)) {
+ /* Already empty queue. Update status. */
+ if (queue->status == QUEUE_STATUS_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+
+ ret = 1;
+ }
+
+ UNLOCK(queue);
+
+ return ret;
+}
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param)
+{
+ uint32_t queue_size;
+ odp_queue_type_t queue_type;
+ int spsc;
+
+ queue_type = param->type;
+
+ if (name == NULL) {
+ queue->name[0] = 0;
+ } else {
+ strncpy(queue->name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ }
+ memcpy(&queue->param, param, sizeof(odp_queue_param_t));
+ if (queue->param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
+ return -1;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ queue->param.deq_mode = ODP_QUEUE_OP_DISABLED;
+
+ queue->type = queue_type;
+ odp_atomic_init_u64(&queue->num_timers, 0);
+
+ queue->pktin = PKTIN_INVALID;
+ queue->pktout = PKTOUT_INVALID;
+
+ queue_size = param->size;
+ if (queue_size == 0)
+ queue_size = _odp_queue_glb->config.default_queue_size;
+
+ if (queue_size < MIN_QUEUE_SIZE)
+ queue_size = MIN_QUEUE_SIZE;
+
+ if (queue_size > _odp_queue_glb->config.max_queue_size) {
+ _ODP_ERR("Too large queue size %u\n", queue_size);
+ return -1;
+ }
+
+ /* Ring size must larger than queue_size */
+ if (_ODP_CHECK_IS_POWER2(queue_size))
+ queue_size++;
+
+ /* Round up if not already a power of two */
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
+
+ /* Single-producer / single-consumer plain queue has simple and
+ * lock-free implementation */
+ spsc = (queue_type == ODP_QUEUE_TYPE_PLAIN) &&
+ (param->enq_mode == ODP_QUEUE_OP_MT_UNSAFE) &&
+ (param->deq_mode == ODP_QUEUE_OP_MT_UNSAFE);
+
+ queue->spsc = spsc;
+ queue->queue_lf = NULL;
+
+ /* Default to error functions */
+ queue->enqueue = error_enqueue;
+ queue->enqueue_multi = error_enqueue_multi;
+ queue->dequeue = error_dequeue;
+ queue->dequeue_multi = error_dequeue_multi;
+ queue->orig_dequeue_multi = error_dequeue_multi;
+
+ if (spsc) {
+ _odp_queue_spsc_init(queue, queue_size);
+ } else {
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
+ queue->enqueue = plain_queue_enq;
+ queue->enqueue_multi = plain_queue_enq_multi;
+ queue->dequeue = plain_queue_deq;
+ queue->dequeue_multi = plain_queue_deq_multi;
+ queue->orig_dequeue_multi = plain_queue_deq_multi;
+
+ queue->ring_mpmc = ring_mpmc_create(queue->name, queue_size);
+ if (queue->ring_mpmc == NULL) {
+ _ODP_ERR("Creating MPMC ring failed\n");
+ return -1;
+ }
+ } else {
+ queue->enqueue = sched_queue_enq;
+ queue->enqueue_multi = sched_queue_enq_multi;
+
+ queue->ring_st = ring_st_create(queue->name, queue_size);
+ if (queue->ring_st == NULL) {
+ _ODP_ERR("Creating ST ring failed\n");
+ return -1;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktout;
+}
+
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktout.pktio = pktio;
+ qentry->pktout.index = index;
+}
+
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktin;
+}
+
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktin.pktio = pktio;
+ qentry->pktin.index = index;
+}
+
+static void queue_set_enq_deq_func(odp_queue_t handle,
+ queue_enq_fn_t enq,
+ queue_enq_multi_fn_t enq_multi,
+ queue_deq_fn_t deq,
+ queue_deq_multi_fn_t deq_multi)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ if (enq)
+ qentry->enqueue = enq;
+
+ if (enq_multi)
+ qentry->enqueue_multi = enq_multi;
+
+ if (deq)
+ qentry->dequeue = deq;
+
+ if (deq_multi)
+ qentry->dequeue_multi = deq_multi;
+}
+
+static int queue_orig_multi(odp_queue_t handle,
+ _odp_event_hdr_t **event_hdr, int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->orig_dequeue_multi(handle, event_hdr, num);
+}
+
+static int queue_api_enq_multi(odp_queue_t handle,
+ const odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ return queue->enqueue_multi(handle,
+ (_odp_event_hdr_t **)(uintptr_t)ev, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_inc_u64(&queue->num_timers);
+}
+
+static void queue_timer_rem(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_dec_u64(&queue->num_timers);
+}
+
+static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->enqueue(handle,
+ (_odp_event_hdr_t *)(uintptr_t)ev);
+}
+
+static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ int ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ ret = queue->dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
+}
+
+static odp_event_t queue_api_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ odp_event_t ev = (odp_event_t)queue->dequeue(handle);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
+
+ return ev;
+}
+
+/* API functions */
+_odp_queue_api_fn_t _odp_queue_basic_api = {
+ .queue_create = queue_create,
+ .queue_create_multi = queue_create_multi,
+ .queue_destroy = queue_destroy,
+ .queue_destroy_multi = queue_destroy_multi,
+ .queue_lookup = queue_lookup,
+ .queue_capability = queue_capability,
+ .queue_context_set = queue_context_set,
+ .queue_enq = queue_api_enq,
+ .queue_enq_multi = queue_api_enq_multi,
+ .queue_deq = queue_api_deq,
+ .queue_deq_multi = queue_api_deq_multi,
+ .queue_type = queue_type,
+ .queue_sched_type = queue_sched_type,
+ .queue_sched_prio = queue_sched_prio,
+ .queue_sched_group = queue_sched_group,
+ .queue_lock_count = queue_lock_count,
+ .queue_to_u64 = queue_to_u64,
+ .queue_param_init = queue_param_init,
+ .queue_info = queue_info,
+ .queue_print = queue_print,
+ .queue_print_all = queue_print_all
+
+};
+
+/* Functions towards internal components */
+queue_fn_t _odp_queue_basic_fn = {
+ .init_global = queue_init_global,
+ .term_global = queue_term_global,
+ .init_local = queue_init_local,
+ .term_local = queue_term_local,
+ .get_pktout = queue_get_pktout,
+ .set_pktout = queue_set_pktout,
+ .get_pktin = queue_get_pktin,
+ .set_pktin = queue_set_pktin,
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
+};
diff --git a/platform/linux-dpdk/odp_queue_eventdev.c b/platform/linux-dpdk/odp_queue_eventdev.c
new file mode 100644
index 000000000..c0928bfd2
--- /dev/null
+++ b/platform/linux-dpdk/odp_queue_eventdev.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_eventdev_internal.h>
+#include <odp/api/hints.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/sync.h>
+#include <odp/api/plat/queue_inline_types.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_event_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+
+#include <rte_config.h>
+#include <rte_eventdev.h>
+#include <rte_service.h>
+
+#include <inttypes.h>
+#include <string.h>
+#include <unistd.h>
+
+#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->lock))
+#define UNLOCK(queue_ptr) odp_ticketlock_unlock(&((queue_ptr)->lock))
+#define LOCK_INIT(queue_ptr) odp_ticketlock_init(&((queue_ptr)->lock))
+
+#define MIN_QUEUE_SIZE 8
+#define DEFAULT_QUEUE_SIZE (4 * 1024)
+#define MAX_QUEUE_SIZE (8 * 1024)
+
+#define EVENT_QUEUE_FLOWS 32
+
+#define QUEUE_STATUS_FREE 0
+#define QUEUE_STATUS_READY 1
+#define QUEUE_STATUS_SCHED 2
+
+/* Number of priority levels */
+#define NUM_PRIO 8
+
+/* Thread local eventdev context */
+__thread eventdev_local_t _odp_eventdev_local;
+
+/* Global eventdev context */
+eventdev_global_t *_odp_eventdev_gbl;
+
+extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+
+static inline uint32_t queue_to_index(odp_queue_t handle)
+{
+ queue_entry_t *qentry = (queue_entry_t *)(uintptr_t)handle;
+
+ return qentry->index;
+}
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param);
+
+static uint8_t event_queue_ids(odp_schedule_sync_t sync, uint8_t *first_id)
+{
+ *first_id = 0;
+ if (sync == ODP_SCHED_SYNC_ATOMIC)
+ return _odp_eventdev_gbl->event_queue.num_atomic;
+
+ *first_id += _odp_eventdev_gbl->event_queue.num_atomic;
+ if (sync == ODP_SCHED_SYNC_PARALLEL)
+ return _odp_eventdev_gbl->event_queue.num_parallel;
+
+ *first_id += _odp_eventdev_gbl->event_queue.num_parallel;
+ if (sync == ODP_SCHED_SYNC_ORDERED)
+ return _odp_eventdev_gbl->event_queue.num_ordered;
+
+ _ODP_ABORT("Invalid schedule sync type\n");
+ return 0;
+}
+
+static int read_config_file(eventdev_global_t *eventdev)
+{
+ const char *str;
+ int val = 0;
+
+ _ODP_PRINT("\nScheduler config\n----------------\n");
+
+ str = "sched_eventdev.num_atomic_queues";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT("%s: %i\n", str, val);
+ eventdev->event_queue.num_atomic = val;
+
+ str = "sched_eventdev.num_ordered_queues";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT("%s: %i\n", str, val);
+ eventdev->event_queue.num_ordered = val;
+
+ str = "sched_eventdev.num_parallel_queues";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT("%s: %i\n\n", str, val);
+ eventdev->event_queue.num_parallel = val;
+
+ str = "sched_eventdev.num_ports";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT("%s: %i\n\n", str, val);
+ eventdev->num_event_ports = val;
+
+ return 0;
+}
+
+static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
+{
+ memset(capa, 0, sizeof(odp_queue_capability_t));
+
+ /* Reserve some queues for internal use */
+ capa->max_queues = CONFIG_MAX_QUEUES;
+ capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
+ capa->plain.max_size = _odp_eventdev_gbl->plain_config.max_queue_size - 1;
+ capa->plain.lockfree.max_num = 0;
+ capa->plain.lockfree.max_size = 0;
+
+ return 0;
+}
+
+static void print_dev_info(const struct rte_event_dev_info *info)
+{
+ _ODP_PRINT("\nEvent device info\n"
+ "-----------------\n"
+ "driver name: %s\n"
+ "min_dequeue_timeout_ns: %" PRIu32 "\n"
+ "max_dequeue_timeout_ns: %" PRIu32 "\n"
+ "dequeue_timeout_ns: %" PRIu32 "\n"
+ "max_event_queues: %" PRIu8 "\n"
+ "max_event_queue_flows: %" PRIu32 "\n"
+ "max_event_queue_priority_levels: %" PRIu8 "\n"
+ "max_event_priority_levels: %" PRIu8 "\n"
+ "max_event_ports: %" PRIu8 "\n"
+ "max_event_port_dequeue_depth: %" PRIu8 "\n"
+ "max_event_port_enqueue_depth: %" PRIu32 "\n"
+ "max_num_events: %" PRId32 "\n"
+ "event_dev_cap: %" PRIu32 "\n",
+ info->driver_name,
+ info->min_dequeue_timeout_ns,
+ info->max_dequeue_timeout_ns,
+ info->dequeue_timeout_ns,
+ info->max_event_queues,
+ info->max_event_queue_flows,
+ info->max_event_queue_priority_levels,
+ info->max_event_priority_levels,
+ info->max_event_ports,
+ info->max_event_port_dequeue_depth,
+ info->max_event_port_enqueue_depth,
+ info->max_num_events,
+ info->event_dev_cap);
+}
+
+int _odp_service_setup(uint32_t service_id)
+{
+ uint32_t cores[RTE_MAX_LCORE];
+ uint32_t lcore = 0;
+ int32_t num_cores;
+ int32_t num_serv;
+ int32_t min_num_serv = INT32_MAX;
+
+ if (!rte_service_lcore_count()) {
+ _ODP_ERR("No service cores available\n");
+ return -1;
+ }
+
+ /* Use the service core with the smallest number of running services */
+ num_cores = rte_service_lcore_list(cores, RTE_MAX_LCORE);
+ while (num_cores--) {
+ rte_service_map_lcore_set(service_id, cores[num_cores], 0);
+ num_serv = rte_service_lcore_count_services(cores[num_cores]);
+ if (num_serv < min_num_serv) {
+ lcore = cores[num_cores];
+ min_num_serv = num_serv;
+ }
+ }
+ if (rte_service_map_lcore_set(service_id, lcore, 1)) {
+ _ODP_ERR("Unable to map service to core\n");
+ return -1;
+ }
+ return 0;
+}
+
+static int alloc_queues(eventdev_global_t *eventdev,
+ const struct rte_event_dev_info *info)
+{
+ int num_queues;
+
+ if (!eventdev->event_queue.num_atomic &&
+ !eventdev->event_queue.num_ordered &&
+ !eventdev->event_queue.num_parallel) {
+ uint8_t queue_per_type = info->max_event_queues / 3;
+
+ /* Divide eventdev queues evenly to ODP queue types */
+ eventdev->event_queue.num_atomic = queue_per_type;
+ eventdev->event_queue.num_ordered = queue_per_type;
+ eventdev->event_queue.num_parallel = queue_per_type;
+
+ num_queues = 3 * queue_per_type;
+ } else {
+ num_queues = eventdev->event_queue.num_atomic +
+ eventdev->event_queue.num_ordered +
+ eventdev->event_queue.num_parallel;
+ }
+
+ return num_queues;
+}
+
+static int setup_queues(uint8_t dev_id, uint8_t first_queue_id,
+ uint8_t num_queues, uint32_t num_flows,
+ odp_schedule_sync_t sync)
+{
+ uint8_t i, j;
+
+ for (i = first_queue_id, j = 0; j < num_queues; i++, j++) {
+ struct rte_event_queue_conf queue_conf;
+ queue_entry_t *queue = qentry_from_index(i);
+
+ queue->sync = sync;
+
+ if (rte_event_queue_default_conf_get(dev_id, i, &queue_conf)) {
+ _ODP_ERR("rte_event_queue_default_conf_get failed\n");
+ return -1;
+ }
+ queue_conf.schedule_type = event_schedule_type(sync);
+
+ /* Ordered queues implemented using atomic queues */
+ if (sync == ODP_SCHED_SYNC_ATOMIC ||
+ sync == ODP_SCHED_SYNC_ORDERED)
+ queue_conf.nb_atomic_flows = num_flows;
+
+ if (rte_event_queue_setup(dev_id, i, &queue_conf)) {
+ _ODP_ERR("rte_event_queue_setup failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int configure_queues(uint8_t dev_id, uint32_t num_flows)
+{
+ uint8_t first_queue_id;
+ uint8_t num_queues;
+
+ num_queues = event_queue_ids(ODP_SCHED_SYNC_ATOMIC, &first_queue_id);
+ if (setup_queues(dev_id, first_queue_id, num_queues, num_flows,
+ ODP_SCHED_SYNC_ATOMIC))
+ return -1;
+
+ num_queues = event_queue_ids(ODP_SCHED_SYNC_PARALLEL, &first_queue_id);
+ if (setup_queues(dev_id, first_queue_id, num_queues, num_flows,
+ ODP_SCHED_SYNC_PARALLEL))
+ return -1;
+
+ num_queues = event_queue_ids(ODP_SCHED_SYNC_ORDERED, &first_queue_id);
+ if (setup_queues(dev_id, first_queue_id, num_queues, num_flows,
+ ODP_SCHED_SYNC_ORDERED))
+ return -1;
+
+ return 0;
+}
+
+static int queue_is_linked(uint8_t dev_id, uint8_t queue_id)
+{
+ uint8_t i;
+
+ for (i = 0; i < _odp_eventdev_gbl->config.nb_event_ports; i++) {
+ uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int num_links;
+ int j;
+
+ num_links = rte_event_port_links_get(dev_id, i, queues,
+ priorities);
+ for (j = 0; j < num_links; j++) {
+ if (queues[j] == queue_id)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Dummy link all unlinked queues to port zero to pass evendev start */
+int _odp_dummy_link_queues(uint8_t dev_id, uint8_t dummy_linked_queues[], int num)
+{
+ uint8_t priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
+ uint8_t queue_id;
+ int ret;
+ int num_linked = 0;
+
+ for (queue_id = 0; queue_id < num; queue_id++) {
+ if (queue_is_linked(dev_id, queue_id))
+ continue;
+
+ ret = rte_event_port_link(dev_id, 0, &queue_id, &priority, 1);
+ if (ret != 1) {
+ _ODP_ERR("rte_event_port_link failed: %d\n", ret);
+ return -1;
+ }
+ dummy_linked_queues[num_linked++] = queue_id;
+ }
+ return num_linked;
+}
+
+/* Remove dummy links to port zero */
+int _odp_dummy_unlink_queues(uint8_t dev_id, uint8_t dummy_linked_queues[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (rte_event_port_unlink(dev_id, 0, &dummy_linked_queues[i],
+ 1) < 0) {
+ _ODP_ERR("rte_event_port_unlink failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int configure_ports(uint8_t dev_id,
+ const struct rte_event_dev_config *dev_conf)
+{
+ struct rte_event_port_conf port_conf;
+ uint8_t i;
+
+ for (i = 0; i < dev_conf->nb_event_ports; i++) {
+ if (rte_event_port_default_conf_get(dev_id, i, &port_conf)) {
+ _ODP_ERR("rte_event_port_default_conf_get failed\n");
+ return -1;
+ }
+
+ port_conf.new_event_threshold = dev_conf->nb_events_limit;
+ port_conf.dequeue_depth = dev_conf->nb_event_port_dequeue_depth;
+ port_conf.enqueue_depth = dev_conf->nb_event_port_enqueue_depth;
+
+ if (rte_event_port_setup(dev_id, i, &port_conf)) {
+ _ODP_ERR("rte_event_port_setup failed\n");
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static int init_event_dev(void)
+{
+ uint32_t num_flows;
+ uint8_t dev_id = 0;
+ uint8_t rx_adapter_id = 0;
+ uint8_t dummy_links[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ struct rte_event_dev_info info;
+ struct rte_event_dev_config config;
+ int num_dummy_links;
+ int ret;
+ int i;
+
+ if (rte_event_dev_count() < 1) {
+ _ODP_ERR("No eventdev devices found\n");
+ return -1;
+ }
+
+ if (read_config_file(_odp_eventdev_gbl))
+ return -1;
+
+ _odp_eventdev_gbl->dev_id = dev_id;
+ _odp_eventdev_gbl->rx_adapter.id = rx_adapter_id;
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
+ odp_ticketlock_init(&_odp_eventdev_gbl->rx_adapter.lock);
+ odp_atomic_init_u32(&_odp_eventdev_gbl->num_started, 0);
+
+ odp_ticketlock_init(&_odp_eventdev_gbl->port_lock);
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ _odp_eventdev_gbl->port[i].linked = 0;
+
+ if (rte_event_dev_info_get(dev_id, &info)) {
+ _ODP_ERR("rte_event_dev_info_get failed\n");
+ return -1;
+ }
+ print_dev_info(&info);
+
+ _odp_eventdev_gbl->num_prio = RTE_MIN(NUM_PRIO,
+ info.max_event_queue_priority_levels);
+ if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)) {
+ _ODP_PRINT(" Only one QoS level supported!\n");
+ _odp_eventdev_gbl->num_prio = 1;
+ }
+
+ memset(&config, 0, sizeof(struct rte_event_dev_config));
+ config.dequeue_timeout_ns = 0;
+ config.nb_events_limit = info.max_num_events;
+ config.nb_event_queues = alloc_queues(_odp_eventdev_gbl, &info);
+
+ config.nb_event_ports = RTE_MIN(ODP_THREAD_COUNT_MAX,
+ (int)info.max_event_ports);
+ /* RX adapter requires additional port which is reserved when
+ * rte_event_eth_rx_adapter_queue_add() is called. */
+ config.nb_event_ports -= 1;
+ if (_odp_eventdev_gbl->num_event_ports &&
+ _odp_eventdev_gbl->num_event_ports < config.nb_event_ports)
+ config.nb_event_ports = _odp_eventdev_gbl->num_event_ports;
+
+ num_flows = (EVENT_QUEUE_FLOWS < info.max_event_queue_flows) ?
+ EVENT_QUEUE_FLOWS : info.max_event_queue_flows;
+ config.nb_event_queue_flows = num_flows;
+ config.nb_event_port_dequeue_depth = (MAX_SCHED_BURST <
+ info.max_event_port_dequeue_depth) ? MAX_SCHED_BURST :
+ info.max_event_port_dequeue_depth;
+ config.nb_event_port_enqueue_depth = (MAX_SCHED_BURST <
+ info.max_event_port_enqueue_depth) ? MAX_SCHED_BURST :
+ info.max_event_port_enqueue_depth;
+ /* RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT not supported by the SW
+ * eventdev */
+ config.event_dev_cfg = 0;
+
+ ret = rte_event_dev_configure(dev_id, &config);
+ if (ret < 0) {
+ _ODP_ERR("rte_event_dev_configure failed\n");
+ return -1;
+ }
+ _odp_eventdev_gbl->config = config;
+ _odp_eventdev_gbl->num_event_ports = config.nb_event_ports;
+
+ if (configure_ports(dev_id, &config)) {
+ _ODP_ERR("Configuring eventdev ports failed\n");
+ return -1;
+ }
+
+ if (configure_queues(dev_id, num_flows)) {
+ _ODP_ERR("Configuring eventdev queues failed\n");
+ return -1;
+ }
+
+ /* Eventdev requires that each queue is linked to at least one
+ * port at startup. */
+ num_dummy_links = _odp_dummy_link_queues(dev_id, dummy_links,
+ config.nb_event_queues);
+
+ if (!(info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED)) {
+ uint32_t service_id;
+
+ ret = rte_event_dev_service_id_get(dev_id, &service_id);
+ if (ret) {
+ _ODP_ERR("Unable to retrieve service ID\n");
+ return -1;
+ }
+ if (_odp_service_setup(service_id)) {
+ _ODP_ERR("Failed to setup service core\n");
+ return -1;
+ }
+ }
+
+ if (rte_event_dev_start(dev_id)) {
+ _ODP_ERR("rte_event_dev_start failed\n");
+ return -1;
+ }
+
+ /* Unlink all ports from queues. Thread specific ports will be linked
+ * when the application calls schedule/enqueue for the first time. */
+ if (_odp_dummy_unlink_queues(dev_id, dummy_links, num_dummy_links)) {
+ rte_event_dev_stop(dev_id);
+ rte_event_dev_close(dev_id);
+ return -1;
+ }
+
+ /* Scheduling groups */
+ odp_ticketlock_init(&_odp_eventdev_gbl->grp_lock);
+
+ for (i = 0; i < NUM_SCHED_GRPS; i++) {
+ memset(_odp_eventdev_gbl->grp[i].name, 0,
+ ODP_SCHED_GROUP_NAME_LEN);
+ odp_thrmask_zero(&_odp_eventdev_gbl->grp[i].mask);
+ }
+
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_ALL].allocated = 1;
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_WORKER].allocated = 1;
+ _odp_eventdev_gbl->grp[ODP_SCHED_GROUP_CONTROL].allocated = 1;
+
+ odp_thrmask_setall(&_odp_eventdev_gbl->mask_all);
+
+ return 0;
+}
+
+static int queue_init_global(void)
+{
+ uint32_t max_queue_size;
+ uint32_t i;
+ odp_shm_t shm;
+ odp_queue_capability_t capa;
+
+ _ODP_DBG("Queue init global\n");
+
+ /* Fill in queue entry field offsets for inline functions */
+ memset(&_odp_queue_inline_offset, 0,
+ sizeof(_odp_queue_inline_offset_t));
+ _odp_queue_inline_offset.context = offsetof(queue_entry_t, param.context);
+
+ shm = odp_shm_reserve("_odp_queue_eventdev_global",
+ sizeof(eventdev_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ _odp_eventdev_gbl = odp_shm_addr(shm);
+
+ if (_odp_eventdev_gbl == NULL)
+ return -1;
+
+ memset(_odp_eventdev_gbl, 0, sizeof(eventdev_global_t));
+ _odp_eventdev_gbl->shm = shm;
+
+ if (init_event_dev())
+ return -1;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ /* init locks */
+ queue_entry_t *queue = qentry_from_index(i);
+
+ LOCK_INIT(queue);
+ queue->index = i;
+ }
+
+ max_queue_size = _odp_eventdev_gbl->config.nb_events_limit;
+ _odp_eventdev_gbl->plain_config.default_queue_size = DEFAULT_QUEUE_SIZE;
+ _odp_eventdev_gbl->plain_config.max_queue_size = MAX_QUEUE_SIZE;
+ _odp_eventdev_gbl->sched_config.max_queue_size = max_queue_size;
+
+ queue_capa(&capa, 0);
+
+ _ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t));
+ _ODP_DBG(" max num queues %u\n", capa.max_queues);
+ _ODP_DBG(" max plain queue size %u\n", capa.plain.max_size);
+ _ODP_DBG(" max num lockfree %u\n", capa.plain.lockfree.max_num);
+ _ODP_DBG(" max lockfree size %u\n\n", capa.plain.lockfree.max_size);
+
+ return 0;
+}
+
+static int queue_init_local(void)
+{
+ int thread_id = odp_thread_id();
+
+ memset(&_odp_eventdev_local, 0, sizeof(eventdev_local_t));
+
+ _ODP_ASSERT(thread_id <= UINT8_MAX);
+ _odp_eventdev_local.port_id = thread_id;
+ _odp_eventdev_local.paused = 0;
+ _odp_eventdev_local.started = 0;
+
+ return 0;
+}
+
+static int queue_term_local(void)
+{
+ return 0;
+}
+
+static int queue_term_global(void)
+{
+ int ret = 0;
+ queue_entry_t *queue;
+ int i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue = qentry_from_index(i);
+ LOCK(queue);
+ if (queue->status != QUEUE_STATUS_FREE) {
+ _ODP_ERR("Not destroyed queue: %s\n", queue->name);
+ ret = -1;
+ }
+ UNLOCK(queue);
+ }
+
+ if (_odp_rx_adapter_close())
+ ret = -1;
+
+ rte_event_dev_stop(_odp_eventdev_gbl->dev_id);
+
+ /* Fix for DPDK 17.11 sync bug */
+ sleep(1);
+
+ if (rte_event_dev_close(_odp_eventdev_gbl->dev_id)) {
+ _ODP_ERR("Failed to close event device\n");
+ ret = -1;
+ }
+
+ if (odp_shm_free(_odp_eventdev_gbl->shm)) {
+ _ODP_ERR("Shm free failed for evendev\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int queue_capability(odp_queue_capability_t *capa)
+{
+ return queue_capa(capa, 1);
+}
+
+static odp_queue_type_t queue_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->type;
+}
+
+static odp_schedule_sync_t queue_sched_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.sync;
+}
+
+static odp_schedule_prio_t queue_sched_prio(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.prio;
+}
+
+static odp_schedule_group_t queue_sched_group(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.group;
+}
+
+static uint32_t queue_lock_count(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ queue->param.sched.lock_count : 0;
+}
+
+static odp_queue_t queue_create(const char *name,
+ const odp_queue_param_t *param)
+{
+ uint32_t i;
+ uint32_t max_idx;
+ queue_entry_t *queue;
+ odp_queue_type_t type;
+ odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
+
+ if (param == NULL) {
+ odp_queue_param_init(&default_param);
+ param = &default_param;
+ }
+
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ _ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ if (param->size > _odp_eventdev_gbl->sched_config.max_queue_size)
+ return ODP_QUEUE_INVALID;
+ } else {
+ if (param->size > _odp_eventdev_gbl->plain_config.max_queue_size)
+ return ODP_QUEUE_INVALID;
+ }
+
+ /* Only blocking queues supported */
+ if (param->nonblocking != ODP_BLOCKING)
+ return ODP_QUEUE_INVALID;
+
+ /* First RTE_EVENT_MAX_QUEUES_PER_DEV IDs are mapped directly
+ * to eventdev queue IDs */
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ /* Start scheduled queue indices from zero to enable direct
+ * mapping to scheduler implementation indices. */
+ i = 0;
+ max_idx = RTE_EVENT_MAX_QUEUES_PER_DEV;
+ } else {
+ i = RTE_EVENT_MAX_QUEUES_PER_DEV;
+ /* All internal queues are of type plain */
+ max_idx = CONFIG_MAX_QUEUES;
+ }
+
+ for (; i < max_idx; i++) {
+ queue = qentry_from_index(i);
+
+ if (queue->status != QUEUE_STATUS_FREE)
+ continue;
+
+ if (type == ODP_QUEUE_TYPE_SCHED &&
+ queue->sync != param->sched.sync)
+ continue;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ if (queue_init(queue, name, param)) {
+ UNLOCK(queue);
+ _ODP_ERR("Queue init failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED)
+ queue->status = QUEUE_STATUS_SCHED;
+ else
+ queue->status = QUEUE_STATUS_READY;
+ handle = queue_from_qentry(queue);
+ UNLOCK(queue);
+ break;
+ }
+ UNLOCK(queue);
+ }
+
+ if (handle == ODP_QUEUE_INVALID) {
+ _ODP_ERR("No free queues left\n");
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (_odp_sched_fn->create_queue(queue->index,
+ &queue->param.sched)) {
+ queue->status = QUEUE_STATUS_FREE;
+ _ODP_ERR("schedule queue init failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ return handle;
+}
+
+static int queue_destroy(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+
+ queue = qentry_from_handle(handle);
+
+ if (handle == ODP_QUEUE_INVALID)
+ return -1;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" already free\n", queue->name);
+ return -1;
+ }
+ if (queue->type == ODP_QUEUE_TYPE_PLAIN) {
+ if (ring_mpmc_is_empty(queue->ring_mpmc) == 0) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" not empty\n", queue->name);
+ return -1;
+ }
+ ring_mpmc_free(queue->ring_mpmc);
+ }
+
+ switch (queue->status) {
+ case QUEUE_STATUS_READY:
+ queue->status = QUEUE_STATUS_FREE;
+ break;
+ case QUEUE_STATUS_SCHED:
+ queue->status = QUEUE_STATUS_FREE;
+ _odp_sched_fn->destroy_queue(queue->index);
+ break;
+ default:
+ _ODP_ABORT("Unexpected queue status\n");
+ }
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static int queue_context_set(odp_queue_t handle, void *context,
+ uint32_t len ODP_UNUSED)
+{
+ odp_mb_full();
+ qentry_from_handle(handle)->param.context = context;
+ odp_mb_full();
+ return 0;
+}
+
+static odp_queue_t queue_lookup(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = qentry_from_index(i);
+
+ if (queue->status == QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(queue);
+ if (strcmp(name, queue->name) == 0) {
+ /* found it */
+ UNLOCK(queue);
+ return queue_from_qentry(queue);
+ }
+ UNLOCK(queue);
+ }
+
+ return ODP_QUEUE_INVALID;
+}
+
+static inline int _plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ int num_enq;
+ ring_mpmc_t ring_mpmc;
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = queue->ring_mpmc;
+
+ num_enq = ring_mpmc_enq_multi(ring_mpmc, (void **)event_hdr, num);
+
+ return num_enq;
+}
+
+static inline int _plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int num_deq;
+ queue_entry_t *queue;
+ ring_mpmc_t ring_mpmc;
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = queue->ring_mpmc;
+
+ num_deq = ring_mpmc_deq_multi(ring_mpmc, (void **)event_hdr, num);
+
+ return num_deq;
+}
+
+static int plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_deq_multi(handle, event_hdr, num);
+}
+
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
+{
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ (void)event_hdr;
+
+ _ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static int error_enqueue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
+{
+ _ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return NULL;
+}
+
+static int error_dequeue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static void queue_param_init(odp_queue_param_t *params)
+{
+ memset(params, 0, sizeof(odp_queue_param_t));
+ params->type = ODP_QUEUE_TYPE_PLAIN;
+ params->enq_mode = ODP_QUEUE_OP_MT;
+ params->deq_mode = ODP_QUEUE_OP_MT;
+ params->nonblocking = ODP_BLOCKING;
+ params->sched.prio = odp_schedule_default_prio();
+ params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ params->sched.group = ODP_SCHED_GROUP_ALL;
+}
+
+static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
+{
+ uint32_t queue_id;
+ queue_entry_t *queue;
+ int status;
+
+ if (odp_unlikely(info == NULL)) {
+ _ODP_ERR("Unable to store info, NULL ptr given\n");
+ return -1;
+ }
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return -1;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return -1;
+ }
+
+ info->name = queue->name;
+ info->param = queue->param;
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static void queue_print(odp_queue_t handle)
+{
+ odp_pktio_info_t pktio_info;
+ queue_entry_t *queue;
+ uint32_t queue_id;
+ int status;
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return;
+ }
+ _ODP_PRINT("\nQueue info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" handle %p\n", (void *)handle);
+ _ODP_PRINT(" index %" PRIu32 "\n", queue->index);
+ _ODP_PRINT(" name %s\n", queue->name);
+ _ODP_PRINT(" enq mode %s\n",
+ queue->param.enq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" deq mode %s\n",
+ queue->param.deq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" non-blocking %s\n",
+ queue->param.nonblocking == ODP_BLOCKING ? "ODP_BLOCKING" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_LF ? "ODP_NONBLOCKING_LF" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_WF ? "ODP_NONBLOCKING_WF" :
+ "unknown")));
+ _ODP_PRINT(" type %s\n",
+ queue->type == ODP_QUEUE_TYPE_PLAIN ? "ODP_QUEUE_TYPE_PLAIN" :
+ (queue->type == ODP_QUEUE_TYPE_SCHED ? "ODP_QUEUE_TYPE_SCHED" : "unknown"));
+ if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" sync %s\n",
+ queue->param.sched.sync == ODP_SCHED_SYNC_PARALLEL ?
+ "ODP_SCHED_SYNC_PARALLEL" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ATOMIC ?
+ "ODP_SCHED_SYNC_ATOMIC" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ "ODP_SCHED_SYNC_ORDERED" : "unknown")));
+ _ODP_PRINT(" priority %d\n", queue->param.sched.prio);
+ _ODP_PRINT(" group %d\n", queue->param.sched.group);
+ }
+ if (queue->pktin.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktin.pktio, &pktio_info))
+ _ODP_PRINT(" pktin %s\n", pktio_info.name);
+ }
+ if (queue->pktout.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktout.pktio, &pktio_info))
+ _ODP_PRINT(" pktout %s\n", pktio_info.name);
+ }
+ _ODP_PRINT(" timers %" PRIu64 "\n",
+ odp_atomic_load_u64(&queue->num_timers));
+ _ODP_PRINT(" status %s\n",
+ queue->status == QUEUE_STATUS_READY ? "ready" :
+ (queue->status == QUEUE_STATUS_SCHED ? "scheduled" : "clearunknown"));
+ _ODP_PRINT(" param.size %" PRIu32 "\n", queue->param.size);
+ if (queue->type == ODP_QUEUE_TYPE_PLAIN) {
+ _ODP_PRINT(" implementation ring_mpmc\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_mpmc_length(queue->ring_mpmc),
+ ring_mpmc_max_length(queue->ring_mpmc));
+ } else {
+ _ODP_PRINT(" implementation eventdev\n");
+ }
+ _ODP_PRINT("\n");
+
+ UNLOCK(queue);
+}
+
+static inline int _sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ struct rte_event ev[CONFIG_BURST_SIZE];
+ uint16_t num_enq = 0;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t port_id = _odp_eventdev_local.port_id;
+ uint8_t sched;
+ uint8_t queue_id;
+ uint8_t priority;
+ int i;
+
+ queue = qentry_from_handle(handle);
+
+ LOCK(queue);
+
+ if (odp_unlikely(queue->status != QUEUE_STATUS_SCHED)) {
+ UNLOCK(queue);
+ _ODP_ERR("Bad queue status\n");
+ return -1;
+ }
+
+ sched = event_schedule_type(queue->param.sched.sync);
+ queue_id = queue->index;
+ priority = queue->eventdev.prio;
+
+ UNLOCK(queue);
+
+ if (odp_unlikely(port_id >= _odp_eventdev_gbl->num_event_ports)) {
+ _ODP_ERR("Max %" PRIu8 " scheduled workers supported\n",
+ _odp_eventdev_gbl->num_event_ports);
+ return 0;
+ }
+
+ for (i = 0; i < num; i++) {
+ ev[i].flow_id = 0;
+ ev[i].op = RTE_EVENT_OP_NEW;
+ ev[i].sched_type = sched;
+ ev[i].queue_id = queue_id;
+ ev[i].event_type = RTE_EVENT_TYPE_CPU;
+ ev[i].sub_event_type = 0;
+ ev[i].priority = priority;
+ ev[i].mbuf = &event_hdr[i]->mb;
+ }
+
+ num_enq = rte_event_enqueue_new_burst(dev_id, port_id, ev, num);
+
+ return num_enq;
+}
+
+static int sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _sched_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param)
+{
+ uint32_t queue_size;
+ odp_queue_type_t queue_type;
+
+ queue_type = param->type;
+
+ if (name == NULL) {
+ queue->name[0] = 0;
+ } else {
+ strncpy(queue->name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ }
+ memcpy(&queue->param, param, sizeof(odp_queue_param_t));
+ if (queue->param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
+ return -1;
+
+ /* Convert ODP priority to eventdev priority:
+ * ODP_SCHED_PRIO_HIGHEST == RTE_EVENT_DEV_PRIORITY_LOWEST */
+ queue->eventdev.prio = odp_schedule_max_prio() - param->sched.prio;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ queue->param.deq_mode = ODP_QUEUE_OP_DISABLED;
+
+ queue->type = queue_type;
+ odp_atomic_init_u64(&queue->num_timers, 0);
+
+ queue->pktin = PKTIN_INVALID;
+ queue->pktout = PKTOUT_INVALID;
+
+ queue_size = param->size;
+ if (queue_size == 0)
+ queue_size = _odp_eventdev_gbl->plain_config.default_queue_size;
+
+ if (queue_size < MIN_QUEUE_SIZE)
+ queue_size = MIN_QUEUE_SIZE;
+
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN &&
+ queue_size > _odp_eventdev_gbl->plain_config.max_queue_size) {
+ _ODP_ERR("Too large queue size %u\n", queue_size);
+ return -1;
+ }
+
+ /* Ring size must larger than queue_size */
+ if (_ODP_CHECK_IS_POWER2(queue_size))
+ queue_size++;
+
+ /* Round up if not already a power of two */
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
+
+ /* Default to error functions */
+ queue->enqueue = error_enqueue;
+ queue->enqueue_multi = error_enqueue_multi;
+ queue->dequeue = error_dequeue;
+ queue->dequeue_multi = error_dequeue_multi;
+ queue->orig_dequeue_multi = error_dequeue_multi;
+
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
+ queue->enqueue = plain_queue_enq;
+ queue->enqueue_multi = plain_queue_enq_multi;
+ queue->dequeue = plain_queue_deq;
+ queue->dequeue_multi = plain_queue_deq_multi;
+ queue->orig_dequeue_multi = plain_queue_deq_multi;
+
+ queue->ring_mpmc = ring_mpmc_create(queue->name, queue_size);
+ if (queue->ring_mpmc == NULL) {
+ _ODP_ERR("Creating MPMC ring failed\n");
+ return -1;
+ }
+ } else {
+ queue->enqueue = sched_queue_enq;
+ queue->enqueue_multi = sched_queue_enq_multi;
+ }
+ return 0;
+}
+
+static uint64_t queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktout;
+}
+
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktout.pktio = pktio;
+ qentry->pktout.index = index;
+}
+
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktin;
+}
+
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktin.pktio = pktio;
+ qentry->pktin.index = index;
+}
+
+static void queue_set_enq_deq_func(odp_queue_t handle,
+ queue_enq_fn_t enq,
+ queue_enq_multi_fn_t enq_multi,
+ queue_deq_fn_t deq,
+ queue_deq_multi_fn_t deq_multi)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ if (enq)
+ qentry->enqueue = enq;
+
+ if (enq_multi)
+ qentry->enqueue_multi = enq_multi;
+
+ if (deq)
+ qentry->dequeue = deq;
+
+ if (deq_multi)
+ qentry->dequeue_multi = deq_multi;
+}
+
+static int queue_orig_multi(odp_queue_t handle,
+ _odp_event_hdr_t **event_hdr, int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->orig_dequeue_multi(handle, event_hdr, num);
+}
+
+static int queue_api_enq_multi(odp_queue_t handle,
+ const odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ return queue->enqueue_multi(handle, (_odp_event_hdr_t **)(uintptr_t)ev, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_inc_u64(&queue->num_timers);
+}
+
+static void queue_timer_rem(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_dec_u64(&queue->num_timers);
+}
+
+static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->enqueue(handle, (_odp_event_hdr_t *)(uintptr_t)ev);
+}
+
+static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ int ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ ret = queue->dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
+}
+
+static odp_event_t queue_api_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ odp_event_t ev = (odp_event_t)queue->dequeue(handle);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
+
+ return ev;
+}
+
+/* API functions */
+_odp_queue_api_fn_t _odp_queue_eventdev_api = {
+ .queue_create = queue_create,
+ .queue_destroy = queue_destroy,
+ .queue_lookup = queue_lookup,
+ .queue_capability = queue_capability,
+ .queue_context_set = queue_context_set,
+ .queue_enq = queue_api_enq,
+ .queue_enq_multi = queue_api_enq_multi,
+ .queue_deq = queue_api_deq,
+ .queue_deq_multi = queue_api_deq_multi,
+ .queue_type = queue_type,
+ .queue_sched_type = queue_sched_type,
+ .queue_sched_prio = queue_sched_prio,
+ .queue_sched_group = queue_sched_group,
+ .queue_lock_count = queue_lock_count,
+ .queue_to_u64 = queue_to_u64,
+ .queue_param_init = queue_param_init,
+ .queue_info = queue_info,
+ .queue_print = queue_print
+};
+
+/* Functions towards internal components */
+queue_fn_t _odp_queue_eventdev_fn = {
+ .init_global = queue_init_global,
+ .term_global = queue_term_global,
+ .init_local = queue_init_local,
+ .term_local = queue_term_local,
+ .get_pktout = queue_get_pktout,
+ .set_pktout = queue_set_pktout,
+ .get_pktin = queue_get_pktin,
+ .set_pktin = queue_set_pktin,
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
+};
diff --git a/platform/linux-dpdk/odp_queue_if.c b/platform/linux-dpdk/odp_queue_if.c
new file mode 100644
index 000000000..efe3eb155
--- /dev/null
+++ b/platform/linux-dpdk/odp_queue_if.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+
+#include <odp_queue_if.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <odp/api/align.h>
+#include <odp/api/plat/queue_inline_types.h>
+
+#include <odp/visibility_begin.h>
+
+_odp_queue_inline_offset_t _odp_queue_inline_offset ODP_ALIGNED_CACHE;
+const _odp_queue_api_fn_t *_odp_queue_api;
+
+#include <odp/visibility_end.h>
+
+extern const _odp_queue_api_fn_t _odp_queue_basic_api;
+extern const queue_fn_t _odp_queue_basic_fn;
+
+extern const _odp_queue_api_fn_t _odp_queue_eventdev_api;
+extern const queue_fn_t _odp_queue_eventdev_fn;
+
+const queue_fn_t *_odp_queue_fn;
+
+odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
+{
+ return _odp_queue_api->queue_create(name, param);
+}
+
+int odp_queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], int num)
+{
+ return _odp_queue_api->queue_create_multi(name, param, share_param,
+ queue, num);
+}
+
+int odp_queue_destroy(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_destroy(queue);
+}
+
+int odp_queue_destroy_multi(odp_queue_t queue[], int num)
+{
+ return _odp_queue_api->queue_destroy_multi(queue, num);
+}
+
+odp_queue_t odp_queue_lookup(const char *name)
+{
+ return _odp_queue_api->queue_lookup(name);
+}
+
+int odp_queue_capability(odp_queue_capability_t *capa)
+{
+ return _odp_queue_api->queue_capability(capa);
+}
+
+int odp_queue_context_set(odp_queue_t queue, void *context, uint32_t len)
+{
+ return _odp_queue_api->queue_context_set(queue, context, len);
+}
+
+odp_queue_type_t odp_queue_type(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_type(queue);
+}
+
+odp_schedule_sync_t odp_queue_sched_type(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_type(queue);
+}
+
+odp_schedule_prio_t odp_queue_sched_prio(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_prio(queue);
+}
+
+odp_schedule_group_t odp_queue_sched_group(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_group(queue);
+}
+
+uint32_t odp_queue_lock_count(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_lock_count(queue);
+}
+
+uint64_t odp_queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_queue_api->queue_to_u64(hdl);
+}
+
+void odp_queue_param_init(odp_queue_param_t *param)
+{
+ _odp_queue_api->queue_param_init(param);
+}
+
+int odp_queue_info(odp_queue_t queue, odp_queue_info_t *info)
+{
+ return _odp_queue_api->queue_info(queue, info);
+}
+
+void odp_queue_print(odp_queue_t queue)
+{
+ _odp_queue_api->queue_print(queue);
+}
+
+void odp_queue_print_all(void)
+{
+ _odp_queue_api->queue_print_all();
+}
+
+int _odp_queue_init_global(void)
+{
+ const char *sched = getenv("ODP_SCHEDULER");
+
+ if (sched == NULL || !strcmp(sched, "default"))
+ sched = _ODP_SCHEDULE_DEFAULT;
+
+ if (!strcmp(sched, "basic") || !strcmp(sched, "sp")) {
+ _odp_queue_fn = &_odp_queue_basic_fn;
+ _odp_queue_api = &_odp_queue_basic_api;
+ } else if (!strcmp(sched, "eventdev")) {
+ _odp_queue_fn = &_odp_queue_eventdev_fn;
+ _odp_queue_api = &_odp_queue_eventdev_api;
+ } else {
+ _ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
+ return -1;
+ }
+
+ return _odp_queue_fn->init_global();
+}
+
+int _odp_queue_term_global(void)
+{
+ return _odp_queue_fn->term_global();
+}
diff --git a/platform/linux-dpdk/odp_queue_spsc.c b/platform/linux-dpdk/odp_queue_spsc.c
new file mode 100644
index 000000000..61dde0ed0
--- /dev/null
+++ b/platform/linux-dpdk/odp_queue_spsc.c
@@ -0,0 +1,95 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include <odp/api/hints.h>
+
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_queue_basic_internal.h>
+
+static inline int spsc_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ ring_spsc_t ring_spsc;
+
+ queue = qentry_from_handle(handle);
+ ring_spsc = queue->ring_spsc;
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ _ODP_ERR("Bad queue status\n");
+ return -1;
+ }
+
+ return ring_spsc_enq_multi(ring_spsc, (void **)event_hdr, num);
+}
+
+static inline int spsc_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ ring_spsc_t ring_spsc;
+
+ queue = qentry_from_handle(handle);
+ ring_spsc = queue->ring_spsc;
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed. */
+ return -1;
+ }
+
+ return ring_spsc_deq_multi(ring_spsc, (void **)event_hdr, num);
+}
+
+static int queue_spsc_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ return spsc_enq_multi(handle, event_hdr, num);
+}
+
+static int queue_spsc_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = spsc_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int queue_spsc_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ return spsc_deq_multi(handle, event_hdr, num);
+}
+
+static _odp_event_hdr_t *queue_spsc_deq(odp_queue_t handle)
+{
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ ret = spsc_deq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
+{
+ queue->enqueue = queue_spsc_enq;
+ queue->dequeue = queue_spsc_deq;
+ queue->enqueue_multi = queue_spsc_enq_multi;
+ queue->dequeue_multi = queue_spsc_deq_multi;
+ queue->orig_dequeue_multi = queue_spsc_deq_multi;
+
+ queue->ring_spsc = ring_spsc_create(queue->name, queue_size);
+ if (queue->ring_spsc == NULL)
+ _ODP_ABORT("Creating SPSC ring failed\n");
+}
diff --git a/platform/linux-dpdk/odp_schedule_eventdev.c b/platform/linux-dpdk/odp_schedule_eventdev.c
new file mode 100644
index 000000000..4ef8a51b7
--- /dev/null
+++ b/platform/linux-dpdk/odp_schedule_eventdev.c
@@ -0,0 +1,1131 @@
+/* Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/cpu.h>
+#include <odp/api/event.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/thrmask.h>
+
+#include <odp/api/plat/schedule_inline_types.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_eventdev_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+
+#include <rte_config.h>
+#include <rte_ethdev.h>
+#include <rte_eventdev.h>
+#include <rte_event_eth_rx_adapter.h>
+#include <rte_service.h>
+
+#include <inttypes.h>
+#include <string.h>
+
+/* Start of named groups in group mask arrays */
+#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
+
+/* Number of scheduling groups */
+#define NUM_SCHED_GRPS 32
+
+static inline odp_pktio_t index_to_pktio(int pktio_index)
+{
+ return (odp_pktio_t)(uintptr_t)pktio_index + 1;
+}
+
+static inline odp_queue_t queue_id_to_queue(uint8_t queue_id)
+{
+ return queue_from_qentry(qentry_from_index(queue_id));
+}
+
+static odp_event_t mbuf_to_event(struct rte_mbuf *mbuf)
+{
+ return (odp_event_t)mbuf;
+}
+
+static int link_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
+ uint8_t priorities[], uint16_t nb_links, uint8_t link_now)
+{
+ int ret;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->port_lock);
+
+ if (!_odp_eventdev_gbl->port[port_id].linked && !link_now) {
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+ return 0;
+ }
+
+ ret = rte_event_port_link(dev_id, port_id, queue_ids, priorities,
+ nb_links);
+ if (ret < 0 || (queue_ids && ret != nb_links)) {
+ _ODP_ERR("rte_event_port_link failed: %d\n", ret);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+ return ret;
+ }
+
+ _odp_eventdev_gbl->port[port_id].linked = 1;
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+
+ return ret;
+}
+
+static int unlink_port(uint8_t dev_id, uint8_t port_id, uint8_t queue_ids[],
+ uint16_t nb_links)
+{
+ int ret;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->port_lock);
+
+ if (!_odp_eventdev_gbl->port[port_id].linked) {
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+ return 0;
+ }
+
+ ret = rte_event_port_unlink(dev_id, port_id, queue_ids, nb_links);
+ if (ret < 0) {
+ _ODP_ERR("rte_event_port_unlink failed: %d\n", ret);
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+ return ret;
+ }
+
+ do {
+ ret = rte_event_port_unlinks_in_progress(dev_id, port_id);
+ if (ret < 0) {
+ _ODP_ERR("rte_event_port_unlinks_in_progress failed: "
+ "%d\n", ret);
+ break;
+ }
+ odp_cpu_pause();
+ } while (ret > 0);
+
+ if (queue_ids == NULL)
+ _odp_eventdev_gbl->port[port_id].linked = 0;
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->port_lock);
+
+ return ret;
+}
+
+static int resume_scheduling(uint8_t dev_id, uint8_t port_id)
+{
+ uint8_t queue_ids[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int nb_links = 0;
+ int ret;
+ int i;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ for (i = 0; i < NUM_SCHED_GRPS; i++) {
+ int j;
+
+ if (!_odp_eventdev_gbl->grp[i].allocated ||
+ !odp_thrmask_isset(&_odp_eventdev_gbl->grp[i].mask,
+ _odp_eventdev_local.port_id))
+ continue;
+
+ for (j = 0; j < RTE_EVENT_MAX_QUEUES_PER_DEV; j++) {
+ queue_entry_t *queue = _odp_eventdev_gbl->grp[i].queue[j];
+
+ if (!queue)
+ continue;
+
+ queue_ids[nb_links] = queue->index;
+ priorities[nb_links] = queue->eventdev.prio;
+ nb_links++;
+ }
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+
+ if (!nb_links)
+ return 0;
+
+ ret = link_port(dev_id, port_id, queue_ids, priorities, nb_links, 1);
+ if (ret != nb_links)
+ return -1;
+
+ if (_odp_eventdev_local.started == 0) {
+ odp_atomic_inc_u32(&_odp_eventdev_gbl->num_started);
+ _odp_eventdev_local.started = 1;
+ }
+
+ return 0;
+}
+
+static int link_group(int group, const odp_thrmask_t *mask, odp_bool_t unlink)
+{
+ odp_thrmask_t new_mask;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t queue_ids[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
+ int nb_links = 0;
+ int ret;
+ int thr;
+ int i;
+
+ for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
+ queue_entry_t *queue = _odp_eventdev_gbl->grp[group].queue[i];
+
+ if (queue == NULL)
+ continue;
+
+ queue_ids[nb_links] = queue->index;
+ priorities[nb_links] = queue->eventdev.prio;
+ nb_links++;
+ }
+
+ odp_thrmask_copy(&new_mask, mask);
+
+ thr = odp_thrmask_first(&new_mask);
+ while (thr >= 0) {
+ uint8_t port_id = thr;
+
+ thr = odp_thrmask_next(&new_mask, thr);
+
+ if (unlink)
+ ret = unlink_port(dev_id, port_id, queue_ids, nb_links);
+ else
+ ret = link_port(dev_id, port_id, queue_ids, priorities,
+ nb_links, 0);
+ if (ret < 0) {
+ _ODP_ERR("Modifying port links failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int rx_adapter_create(uint8_t dev_id, uint8_t rx_adapter_id,
+ const struct rte_event_dev_config *config)
+{
+ struct rte_event_port_conf port_config;
+ uint32_t capa;
+ int ret;
+
+ ret = rte_event_eth_rx_adapter_caps_get(dev_id, rx_adapter_id, &capa);
+ if (ret) {
+ _ODP_ERR("rte_event_eth_rx_adapter_caps_get failed: %d\n", ret);
+ return -1;
+ }
+ if ((capa & RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ) == 0)
+ _odp_eventdev_gbl->rx_adapter.single_queue = 1;
+
+ memset(&port_config, 0, sizeof(struct rte_event_port_conf));
+ port_config.new_event_threshold = config->nb_events_limit;
+ port_config.dequeue_depth = config->nb_event_port_dequeue_depth;
+ port_config.enqueue_depth = config->nb_event_port_enqueue_depth;
+ ret = rte_event_eth_rx_adapter_create(rx_adapter_id, dev_id,
+ &port_config);
+ if (ret) {
+ _ODP_ERR("rte_event_eth_rx_adapter_create failed: %d\n", ret);
+ return -1;
+ }
+
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_STOPPED;
+
+ return 0;
+}
+
+static int rx_adapter_add_queues(uint8_t rx_adapter_id, uint8_t port_id,
+ int num_pktin, int pktin_idx[],
+ odp_queue_t queues[])
+{
+ int num_dummy_links = _odp_eventdev_gbl->config.nb_event_queues;
+ uint8_t dummy_links[num_dummy_links];
+ int ret = 0;
+ int i;
+
+ /* SW eventdev requires that all queues have ports linked */
+ num_dummy_links = _odp_dummy_link_queues(_odp_eventdev_gbl->dev_id, dummy_links,
+ num_dummy_links);
+
+ for (i = 0; i < num_pktin; i++) {
+ queue_entry_t *queue = qentry_from_handle(queues[i]);
+ struct rte_event_eth_rx_adapter_queue_conf qconf;
+ struct rte_event ev;
+ int32_t rx_queue_id = pktin_idx[i];
+
+ memset(&ev, 0, sizeof(struct rte_event));
+ ev.queue_id = queue->index;
+ ev.flow_id = 0;
+ ev.priority = queue->eventdev.prio;
+ ev.sched_type = event_schedule_type(queue->param.sched.sync);
+
+ memset(&qconf, 0,
+ sizeof(struct rte_event_eth_rx_adapter_queue_conf));
+ qconf.ev = ev;
+ qconf.rx_queue_flags = 0;
+ qconf.servicing_weight = 1;
+
+ if (_odp_eventdev_gbl->rx_adapter.single_queue)
+ rx_queue_id = -1;
+
+ ret = rte_event_eth_rx_adapter_queue_add(rx_adapter_id, port_id,
+ rx_queue_id, &qconf);
+ if (ret) {
+ _ODP_ERR("rte_event_eth_rx_adapter_queue_add failed\n");
+ return -1;
+ }
+
+ if (_odp_eventdev_gbl->rx_adapter.single_queue)
+ break;
+ }
+
+ if (_odp_dummy_unlink_queues(_odp_eventdev_gbl->dev_id, dummy_links,
+ num_dummy_links))
+ return -1;
+
+ return ret;
+}
+
+int _odp_rx_adapter_close(void)
+{
+ uint16_t port_id;
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
+ int ret = 0;
+
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT)
+ return ret;
+
+ if (_odp_eventdev_gbl->rx_adapter.status != RX_ADAPTER_STOPPED &&
+ rte_event_eth_rx_adapter_stop(rx_adapter_id)) {
+ _ODP_ERR("Failed to stop RX adapter\n");
+ ret = -1;
+ }
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ rte_eth_dev_close(port_id);
+ }
+
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_INIT;
+
+ return ret;
+}
+
+void _odp_rx_adapter_port_stop(uint16_t port_id)
+{
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
+
+ if (rte_event_eth_rx_adapter_queue_del(rx_adapter_id, port_id, -1))
+ _ODP_ERR("Failed to delete RX queue\n");
+
+ rte_eth_dev_stop(port_id);
+}
+
+static int schedule_init_global(void)
+{
+ _ODP_DBG("Using eventdev scheduler\n");
+ return 0;
+}
+
+static int schedule_init_local(void)
+{
+ return 0;
+}
+
+static int schedule_term_local(void)
+{
+ return 0;
+}
+
+static int schedule_term_global(void)
+{
+ return 0;
+}
+
+static uint32_t schedule_max_ordered_locks(void)
+{
+ return 1;
+}
+
+static inline int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static inline int schedule_max_prio(void)
+{
+ return _odp_eventdev_gbl->num_prio - 1;
+}
+
+static inline int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
+static int schedule_create_queue(uint32_t qi,
+ const odp_schedule_param_t *sched_param)
+{
+ queue_entry_t *queue = qentry_from_index(qi);
+ odp_thrmask_t mask;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t queue_id = queue->index;
+ uint8_t priority = queue->eventdev.prio;
+ int thr;
+
+ if (sched_param->group < 0 || sched_param->group >= NUM_SCHED_GRPS) {
+ _ODP_ERR("Bad schedule group\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ _odp_eventdev_gbl->grp[sched_param->group].queue[queue_id] = queue;
+
+ mask = _odp_eventdev_gbl->grp[sched_param->group].mask;
+ thr = odp_thrmask_first(&mask);
+ while (0 <= thr) {
+ link_port(dev_id, thr, &queue_id, &priority, 1, 0);
+ thr = odp_thrmask_next(&mask, thr);
+ }
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+
+ return 0;
+}
+
+static void schedule_destroy_queue(uint32_t qi)
+{
+ queue_entry_t *queue = qentry_from_index(qi);
+ odp_thrmask_t mask;
+ odp_schedule_group_t group = queue->param.sched.group;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t queue_id = queue->index;
+ int thr;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ _odp_eventdev_gbl->grp[group].queue[queue_id] = NULL;
+
+ mask = _odp_eventdev_gbl->grp[group].mask;
+ thr = odp_thrmask_first(&mask);
+ while (0 <= thr) {
+ unlink_port(dev_id, thr, &queue_id, 1);
+ thr = odp_thrmask_next(&mask, thr);
+ }
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+}
+
+static void schedule_pktio_start(int pktio_index, int num_pktin,
+ int pktin_idx[], odp_queue_t queue[])
+{
+ pktio_entry_t *entry = get_pktio_entry(index_to_pktio(pktio_index));
+ uint16_t port_id = _odp_dpdk_pktio_port_id(entry);
+ uint8_t rx_adapter_id = _odp_eventdev_gbl->rx_adapter.id;
+
+ /* All eventdev pktio devices should to be started before calling
+ * odp_schedule(). This is due to the SW eventdev requirement that all
+ * event queues are linked when rte_event_eth_rx_adapter_queue_add() is
+ * called. */
+ if (odp_atomic_load_u32(&_odp_eventdev_gbl->num_started))
+ _ODP_PRINT("All ODP pktio devices used by the scheduler should "
+ "be started before calling odp_schedule() for the first time.\n");
+
+ _odp_eventdev_gbl->pktio[port_id] = entry;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->rx_adapter.lock);
+
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_INIT &&
+ rx_adapter_create(_odp_eventdev_gbl->dev_id, rx_adapter_id,
+ &_odp_eventdev_gbl->config))
+ _ODP_ABORT("Creating eventdev RX adapter failed\n");
+
+ if (rx_adapter_add_queues(rx_adapter_id, port_id, num_pktin, pktin_idx,
+ queue))
+ _ODP_ABORT("Adding RX adapter queues failed\n");
+
+ if (_odp_eventdev_gbl->rx_adapter.status == RX_ADAPTER_STOPPED) {
+ uint32_t service_id = 0;
+ int ret;
+
+ ret = rte_event_eth_rx_adapter_service_id_get(rx_adapter_id,
+ &service_id);
+ if (ret && ret != -ESRCH) {
+ _ODP_ABORT("Unable to retrieve service ID\n");
+ } else if (!ret) {
+ if (_odp_service_setup(service_id))
+ _ODP_ABORT("Unable to start RX service\n");
+ }
+
+ if (rte_event_eth_rx_adapter_start(rx_adapter_id))
+ _ODP_ABORT("Unable to start RX adapter\n");
+
+ _odp_eventdev_gbl->rx_adapter.status = RX_ADAPTER_RUNNING;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->rx_adapter.lock);
+}
+
+static inline int classify_pkts(odp_packet_t packets[], int num)
+{
+ odp_packet_t pkt;
+ odp_packet_hdr_t *pkt_hdr;
+ int i, num_rx, num_ev, num_dst;
+ odp_queue_t cur_queue;
+ odp_event_t ev[num];
+ odp_queue_t dst[num];
+ int dst_idx[num];
+
+ num_rx = 0;
+ num_dst = 0;
+ num_ev = 0;
+
+ /* Some compilers need this dummy initialization */
+ cur_queue = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < num; i++) {
+ pkt = packets[i];
+ pkt_hdr = packet_hdr(pkt);
+
+ if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
+ /* Sort events for enqueue multi operation(s) */
+ if (odp_unlikely(num_dst == 0)) {
+ num_dst = 1;
+ cur_queue = pkt_hdr->dst_queue;
+ dst[0] = cur_queue;
+ dst_idx[0] = 0;
+ }
+
+ ev[num_ev] = odp_packet_to_event(pkt);
+
+ if (cur_queue != pkt_hdr->dst_queue) {
+ cur_queue = pkt_hdr->dst_queue;
+ dst[num_dst] = cur_queue;
+ dst_idx[num_dst] = num_ev;
+ num_dst++;
+ }
+
+ num_ev++;
+ continue;
+ }
+ packets[num_rx++] = pkt;
+ }
+
+ /* Optimization for the common case */
+ if (odp_likely(num_dst == 0))
+ return num_rx;
+
+ for (i = 0; i < num_dst; i++) {
+ int num_enq, ret;
+ int idx = dst_idx[i];
+
+ if (i == (num_dst - 1))
+ num_enq = num_ev - idx;
+ else
+ num_enq = dst_idx[i + 1] - idx;
+
+ ret = odp_queue_enq_multi(dst[i], &ev[idx], num_enq);
+
+ if (ret < 0)
+ ret = 0;
+
+ if (ret < num_enq)
+ odp_event_free_multi(&ev[idx + ret], num_enq - ret);
+ }
+
+ return num_rx;
+}
+
+static inline uint16_t event_input(struct rte_event ev[], odp_event_t out_ev[],
+ uint16_t nb_events, odp_queue_t *out_queue)
+{
+ struct rte_mbuf *pkt_table[nb_events];
+ uint16_t num_pkts = 0;
+ uint16_t num_events = 0;
+ uint16_t i;
+ uint8_t first_queue;
+
+ if (odp_unlikely(nb_events == 0))
+ return 0;
+
+ first_queue = ev[0].queue_id;
+
+ for (i = 0; i < nb_events; i++) {
+ struct rte_event *event = &ev[i];
+
+ if (odp_unlikely(event->queue_id != first_queue)) {
+ uint16_t cache_idx, j;
+
+ _odp_eventdev_local.cache.idx = 0;
+ for (j = i; j < nb_events; j++) {
+ cache_idx = _odp_eventdev_local.cache.count;
+ _odp_eventdev_local.cache.event[cache_idx] = ev[j];
+ _odp_eventdev_local.cache.count++;
+ }
+ break;
+ }
+
+ /* Packets have to be initialized */
+ if (event->event_type == RTE_EVENT_TYPE_ETH_RX_ADAPTER) {
+ pkt_table[num_pkts++] = event->mbuf;
+ continue;
+ }
+
+ out_ev[num_events++] = mbuf_to_event(event->mbuf);
+ }
+
+ if (num_pkts) {
+ pktio_entry_t *entry = _odp_eventdev_gbl->pktio[pkt_table[0]->port];
+
+ num_pkts = _odp_input_pkts(entry, (odp_packet_t *)pkt_table, num_pkts);
+
+ if (!odp_global_ro.init_param.not_used.feat.cls)
+ num_pkts = classify_pkts((odp_packet_t *)pkt_table,
+ num_pkts);
+
+ for (i = 0; i < num_pkts; i++)
+ out_ev[num_events++] = mbuf_to_event(pkt_table[i]);
+ }
+
+ if (out_queue && num_events)
+ *out_queue = queue_id_to_queue(first_queue);
+
+ return num_events;
+}
+
+/* Fetch consecutive events from the same queue from cache */
+static inline uint16_t input_cached(odp_event_t out_ev[], unsigned int max_num,
+ odp_queue_t *out_queue)
+{
+ struct rte_event ev[max_num];
+ uint16_t idx = _odp_eventdev_local.cache.idx;
+ uint16_t i;
+ uint8_t first_queue = _odp_eventdev_local.cache.event[idx].queue_id;
+
+ for (i = 0; i < max_num && _odp_eventdev_local.cache.count; i++) {
+ uint16_t cache_idx = _odp_eventdev_local.cache.idx;
+ struct rte_event *event = &_odp_eventdev_local.cache.event[cache_idx];
+
+ if (odp_unlikely(event->queue_id != first_queue))
+ break;
+
+ _odp_eventdev_local.cache.idx++;
+ _odp_eventdev_local.cache.count--;
+ ev[i] = *event;
+ }
+
+ return event_input(ev, out_ev, i, out_queue);
+}
+
+static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t out_ev[], unsigned int max_num)
+{
+ odp_time_t next;
+ struct rte_event ev[max_num];
+ int first = 1;
+ uint16_t num_deq;
+ uint8_t dev_id = _odp_eventdev_gbl->dev_id;
+ uint8_t port_id = _odp_eventdev_local.port_id;
+
+ if (odp_unlikely(port_id >= _odp_eventdev_gbl->num_event_ports)) {
+ _ODP_ERR("Max %" PRIu8 " scheduled workers supported\n",
+ _odp_eventdev_gbl->num_event_ports);
+ return 0;
+ }
+
+ /* Check that port is linked */
+ if (odp_unlikely(!_odp_eventdev_gbl->port[port_id].linked &&
+ !_odp_eventdev_local.paused)) {
+ if (resume_scheduling(dev_id, port_id))
+ return 0;
+ }
+
+ if (odp_unlikely(max_num > MAX_SCHED_BURST))
+ max_num = MAX_SCHED_BURST;
+
+ if (odp_unlikely(_odp_eventdev_local.cache.count)) {
+ num_deq = input_cached(out_ev, max_num, out_queue);
+ } else {
+ while (1) {
+ num_deq = rte_event_dequeue_burst(dev_id, port_id, ev,
+ max_num, 0);
+ if (num_deq) {
+ num_deq = event_input(ev, out_ev, num_deq,
+ out_queue);
+ timer_run(2);
+ /* Classifier may enqueue events back to
+ * eventdev */
+ if (odp_unlikely(num_deq == 0))
+ continue;
+ break;
+ }
+ timer_run(1);
+
+ if (wait == ODP_SCHED_WAIT)
+ continue;
+
+ if (wait == ODP_SCHED_NO_WAIT)
+ return 0;
+
+ if (first) {
+ next = odp_time_add_ns(odp_time_local(), wait);
+ first = 0;
+ continue;
+ }
+
+ if (odp_time_cmp(next, odp_time_local()) < 0)
+ return 0;
+ }
+ }
+
+ return num_deq;
+}
+
+static odp_event_t schedule(odp_queue_t *out_queue, uint64_t wait)
+{
+ odp_event_t ev;
+
+ ev = ODP_EVENT_INVALID;
+
+ schedule_loop(out_queue, wait, &ev, 1);
+
+ return ev;
+}
+
+static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t events[], int num)
+{
+ return schedule_loop(out_queue, wait, events, num);
+}
+
+static int schedule_multi_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ return schedule_loop(out_queue, ODP_SCHED_WAIT, events, num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ return schedule_loop(out_queue, ODP_SCHED_NO_WAIT, events, num);
+}
+
+static void schedule_pause(void)
+{
+ if (unlink_port(_odp_eventdev_gbl->dev_id,
+ _odp_eventdev_local.port_id, NULL, 0) < 0)
+ _ODP_ERR("Unable to pause scheduling\n");
+
+ _odp_eventdev_local.paused = 1;
+}
+
+static void schedule_resume(void)
+{
+ if (resume_scheduling(_odp_eventdev_gbl->dev_id, _odp_eventdev_local.port_id))
+ _ODP_ERR("Unable to resume scheduling\n");
+
+ _odp_eventdev_local.paused = 0;
+}
+
+static void schedule_release_atomic(void)
+{
+ /* Nothing to do */
+}
+
+static void schedule_release_ordered(void)
+{
+ /* Nothing to do */
+}
+
+static uint64_t schedule_wait_time(uint64_t ns)
+{
+ return ns;
+}
+
+static inline void grp_update_mask(int grp, const odp_thrmask_t *new_mask)
+{
+ odp_thrmask_copy(&_odp_eventdev_gbl->grp[grp].mask, new_mask);
+}
+
+static int schedule_thr_add(odp_schedule_group_t group, int thr)
+{
+ odp_thrmask_t mask;
+ odp_thrmask_t new_mask;
+
+ if (group < 0 || group >= SCHED_GROUP_NAMED)
+ return -1;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ odp_thrmask_or(&new_mask, &_odp_eventdev_gbl->grp[group].mask, &mask);
+ grp_update_mask(group, &new_mask);
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+
+ return 0;
+}
+
+static int schedule_thr_rem(odp_schedule_group_t group, int thr)
+{
+ odp_thrmask_t mask;
+ odp_thrmask_t new_mask;
+
+ if (group < 0 || group >= SCHED_GROUP_NAMED)
+ return -1;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+ odp_thrmask_xor(&new_mask, &mask, &_odp_eventdev_gbl->mask_all);
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ odp_thrmask_and(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
+ &new_mask);
+ grp_update_mask(group, &new_mask);
+
+ unlink_port(_odp_eventdev_gbl->dev_id, thr, NULL, 0);
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+
+ return 0;
+}
+
+static void schedule_prefetch(int num)
+{
+ (void)num;
+}
+
+static int schedule_num_prio(void)
+{
+ return _odp_eventdev_gbl->num_prio;
+}
+
+static int schedule_num_grps(void)
+{
+ return NUM_SCHED_GRPS - SCHED_GROUP_NAMED;
+}
+
+static odp_schedule_group_t schedule_group_create(const char *name,
+ const odp_thrmask_t *mask)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int i;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
+ if (!_odp_eventdev_gbl->grp[i].allocated) {
+ char *grp_name = _odp_eventdev_gbl->grp[i].name;
+
+ if (name == NULL) {
+ grp_name[0] = 0;
+ } else {
+ strncpy(grp_name, name,
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
+ }
+
+ grp_update_mask(i, mask);
+ group = (odp_schedule_group_t)i;
+ _odp_eventdev_gbl->grp[i].allocated = 1;
+ break;
+ }
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return group;
+}
+
+static int schedule_group_destroy(odp_schedule_group_t group)
+{
+ odp_thrmask_t zero;
+ int ret;
+
+ odp_thrmask_zero(&zero);
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
+ _odp_eventdev_gbl->grp[group].allocated) {
+ grp_update_mask(group, &zero);
+ memset(_odp_eventdev_gbl->grp[group].name, 0,
+ ODP_SCHED_GROUP_NAME_LEN);
+ _odp_eventdev_gbl->grp[group].allocated = 0;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return ret;
+}
+
+static odp_schedule_group_t schedule_group_lookup(const char *name)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int i;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
+ if (strcmp(name, _odp_eventdev_gbl->grp[i].name) == 0) {
+ group = (odp_schedule_group_t)i;
+ break;
+ }
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return group;
+}
+
+static int schedule_group_join(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ int ret = 0;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
+ _odp_eventdev_gbl->grp[group].allocated) {
+ odp_thrmask_t new_mask;
+ odp_thrmask_t link_mask;
+
+ odp_thrmask_and(&link_mask, &_odp_eventdev_gbl->grp[group].mask,
+ mask);
+ odp_thrmask_xor(&link_mask, &link_mask, mask);
+ odp_thrmask_or(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
+ mask);
+ grp_update_mask(group, &new_mask);
+
+ ret = link_group(group, &link_mask, 0);
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return ret;
+}
+
+static int schedule_group_leave(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ odp_thrmask_t new_mask;
+ odp_thrmask_t unlink_mask;
+ int ret = 0;
+
+ odp_thrmask_xor(&new_mask, mask, &_odp_eventdev_gbl->mask_all);
+ odp_thrmask_and(&unlink_mask, mask, &_odp_eventdev_gbl->mask_all);
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
+ _odp_eventdev_gbl->grp[group].allocated) {
+ odp_thrmask_and(&unlink_mask, &_odp_eventdev_gbl->grp[group].mask,
+ &unlink_mask);
+ odp_thrmask_and(&new_mask, &_odp_eventdev_gbl->grp[group].mask,
+ &new_mask);
+ grp_update_mask(group, &new_mask);
+
+ ret = link_group(group, &unlink_mask, 1);
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return ret;
+}
+
+static int schedule_group_thrmask(odp_schedule_group_t group,
+ odp_thrmask_t *thrmask)
+{
+ int ret;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
+ _odp_eventdev_gbl->grp[group].allocated) {
+ *thrmask = _odp_eventdev_gbl->grp[group].mask;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return ret;
+}
+
+static int schedule_group_info(odp_schedule_group_t group,
+ odp_schedule_group_info_t *info)
+{
+ int ret;
+
+ odp_ticketlock_lock(&_odp_eventdev_gbl->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
+ _odp_eventdev_gbl->grp[group].allocated) {
+ info->name = _odp_eventdev_gbl->grp[group].name;
+ info->thrmask = _odp_eventdev_gbl->grp[group].mask;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&_odp_eventdev_gbl->grp_lock);
+ return ret;
+}
+
+static void schedule_order_lock(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_unlock(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_unlock_lock(uint32_t unlock_index,
+ uint32_t lock_index)
+{
+ (void)unlock_index;
+ (void)lock_index;
+}
+
+static void schedule_order_lock_start(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_lock_wait(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void order_lock(void)
+{
+ /* Nothing to do */
+}
+
+static void order_unlock(void)
+{
+ /* Nothing to do */
+}
+
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ uint16_t max_sched;
+
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ max_sched = RTE_MAX(RTE_MAX(_odp_eventdev_gbl->event_queue.num_atomic,
+ _odp_eventdev_gbl->event_queue.num_ordered),
+ _odp_eventdev_gbl->event_queue.num_parallel);
+ capa->max_queues = RTE_MIN(CONFIG_MAX_SCHED_QUEUES, max_sched);
+ capa->max_queue_size = _odp_eventdev_gbl->config.nb_events_limit;
+ capa->max_ordered_locks = schedule_max_ordered_locks();
+ capa->max_groups = schedule_num_grps();
+ capa->max_prios = odp_schedule_num_prio();
+
+ return 0;
+}
+
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ odp_schedule_capability_t capa;
+
+ schedule_capability(&capa);
+
+ memset(config, 0, sizeof(odp_schedule_config_t));
+
+ config->num_queues = capa.max_queues;
+ config->queue_size = capa.max_queue_size / 2;
+ config->max_flow_id = 0;
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ (void)config;
+
+ return 0;
+}
+
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ _ODP_PRINT("\nScheduler debug info\n");
+ _ODP_PRINT("--------------------\n");
+ _ODP_PRINT(" scheduler: eventdev\n");
+ _ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ _ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ _ODP_PRINT("\n");
+}
+
+const _odp_schedule_api_fn_t _odp_schedule_eventdev_api;
+
+static const _odp_schedule_api_fn_t *sched_api(void)
+{
+ return &_odp_schedule_eventdev_api;
+}
+
+/* Fill in scheduler interface */
+const schedule_fn_t _odp_schedule_eventdev_fn = {
+ .pktio_start = schedule_pktio_start,
+ .thr_add = schedule_thr_add,
+ .thr_rem = schedule_thr_rem,
+ .num_grps = schedule_num_grps,
+ .create_queue = schedule_create_queue,
+ .destroy_queue = schedule_destroy_queue,
+ .sched_queue = NULL,
+ .ord_enq_multi = NULL,
+ .init_global = schedule_init_global,
+ .term_global = schedule_term_global,
+ .init_local = schedule_init_local,
+ .term_local = schedule_term_local,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
+ .max_ordered_locks = schedule_max_ordered_locks,
+ .get_config = NULL,
+ .sched_api = sched_api,
+};
+
+/* Fill in scheduler API calls */
+const _odp_schedule_api_fn_t _odp_schedule_eventdev_api = {
+ .schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
+ .schedule = schedule,
+ .schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
+ .schedule_pause = schedule_pause,
+ .schedule_resume = schedule_resume,
+ .schedule_release_atomic = schedule_release_atomic,
+ .schedule_release_ordered = schedule_release_ordered,
+ .schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
+ .schedule_num_prio = schedule_num_prio,
+ .schedule_group_create = schedule_group_create,
+ .schedule_group_destroy = schedule_group_destroy,
+ .schedule_group_lookup = schedule_group_lookup,
+ .schedule_group_join = schedule_group_join,
+ .schedule_group_leave = schedule_group_leave,
+ .schedule_group_thrmask = schedule_group_thrmask,
+ .schedule_group_info = schedule_group_info,
+ .schedule_order_lock = schedule_order_lock,
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
+ .schedule_print = schedule_print
+};
diff --git a/platform/linux-dpdk/odp_schedule_if.c b/platform/linux-dpdk/odp_schedule_if.c
new file mode 100644
index 000000000..dbb098e8b
--- /dev/null
+++ b/platform/linux-dpdk/odp_schedule_if.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+
+#include <odp/api/plat/schedule_inline_types.h>
+
+#include <odp_schedule_if.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+
+/* Required for _ODP_SCHED_ID_EVENTDEV */
+#include <odp_eventdev_internal.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+/* Enable visibility to inline headers */
+#include <odp/visibility_begin.h>
+
+const _odp_schedule_api_fn_t *_odp_sched_api;
+
+int _odp_schedule_configured(void)
+{
+ return odp_global_rw->schedule_configured;
+}
+
+#include <odp/visibility_end.h>
+
+extern const schedule_fn_t _odp_schedule_sp_fn;
+extern const schedule_fn_t _odp_schedule_basic_fn;
+extern const schedule_fn_t _odp_schedule_eventdev_fn;
+const schedule_fn_t *_odp_sched_fn;
+int _odp_sched_id;
+
+int odp_schedule_capability(odp_schedule_capability_t *capa)
+{
+ return _odp_sched_api->schedule_capability(capa);
+}
+
+void odp_schedule_config_init(odp_schedule_config_t *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ _odp_sched_api->schedule_config_init(config);
+}
+
+int odp_schedule_config(const odp_schedule_config_t *config)
+{
+ int ret;
+ odp_schedule_config_t defconfig;
+
+ if (odp_global_rw->schedule_configured) {
+ _ODP_ERR("Scheduler has been configured already\n");
+ return -1;
+ }
+
+ if (!config) {
+ odp_schedule_config_init(&defconfig);
+ config = &defconfig;
+ }
+
+ ret = _odp_sched_api->schedule_config(config);
+
+ if (ret >= 0)
+ odp_global_rw->schedule_configured = 1;
+
+ return ret;
+}
+
+int odp_schedule_min_prio(void)
+{
+ return _odp_sched_api->schedule_min_prio();
+}
+
+int odp_schedule_max_prio(void)
+{
+ return _odp_sched_api->schedule_max_prio();
+}
+
+int odp_schedule_default_prio(void)
+{
+ return _odp_sched_api->schedule_default_prio();
+}
+
+int odp_schedule_num_prio(void)
+{
+ return _odp_sched_api->schedule_num_prio();
+}
+
+odp_schedule_group_t odp_schedule_group_create(const char *name,
+ const odp_thrmask_t *mask)
+{
+ return _odp_sched_api->schedule_group_create(name, mask);
+}
+
+int odp_schedule_group_destroy(odp_schedule_group_t group)
+{
+ return _odp_sched_api->schedule_group_destroy(group);
+}
+
+odp_schedule_group_t odp_schedule_group_lookup(const char *name)
+{
+ return _odp_sched_api->schedule_group_lookup(name);
+}
+
+int odp_schedule_group_join(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ return _odp_sched_api->schedule_group_join(group, mask);
+}
+
+int odp_schedule_group_leave(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ return _odp_sched_api->schedule_group_leave(group, mask);
+}
+
+int odp_schedule_group_thrmask(odp_schedule_group_t group,
+ odp_thrmask_t *thrmask)
+{
+ return _odp_sched_api->schedule_group_thrmask(group, thrmask);
+}
+
+int odp_schedule_group_info(odp_schedule_group_t group,
+ odp_schedule_group_info_t *info)
+{
+ return _odp_sched_api->schedule_group_info(group, info);
+}
+
+void odp_schedule_print(void)
+{
+ _odp_sched_api->schedule_print();
+}
+
+int _odp_schedule_init_global(void)
+{
+ const char *sched = getenv("ODP_SCHEDULER");
+
+ if (sched == NULL || !strcmp(sched, "default"))
+ sched = _ODP_SCHEDULE_DEFAULT;
+
+ _ODP_PRINT("Using scheduler '%s'\n", sched);
+
+ if (!strcmp(sched, "basic")) {
+ _odp_sched_id = _ODP_SCHED_ID_BASIC;
+ _odp_sched_fn = &_odp_schedule_basic_fn;
+ } else if (!strcmp(sched, "sp")) {
+ _odp_sched_id = _ODP_SCHED_ID_SP;
+ _odp_sched_fn = &_odp_schedule_sp_fn;
+ } else if (!strcmp(sched, "eventdev")) {
+ _odp_sched_id = _ODP_SCHED_ID_EVENTDEV;
+ _odp_sched_fn = &_odp_schedule_eventdev_fn;
+ } else {
+ _ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
+ return -1;
+ }
+
+ if (_odp_sched_fn->init_global())
+ return -1;
+
+ _odp_sched_api = _odp_sched_fn->sched_api();
+
+ return 0;
+}
+
+int _odp_schedule_term_global(void)
+{
+ return _odp_sched_fn->term_global();
+}
diff --git a/platform/linux-dpdk/odp_shared_memory.c b/platform/linux-dpdk/odp_shared_memory.c
new file mode 100644
index 000000000..398771708
--- /dev/null
+++ b/platform/linux-dpdk/odp_shared_memory.c
@@ -0,0 +1,590 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/debug.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/spinlock.h>
+
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_macros_internal.h>
+#include <odp_shm_internal.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#include <inttypes.h>
+
+#include <rte_config.h>
+#include <rte_errno.h>
+#include <rte_lcore.h>
+#include <rte_memzone.h>
+
+/* Supported ODP_SHM_* flags */
+#define SUPPORTED_SHM_FLAGS (ODP_SHM_EXPORT | ODP_SHM_HP | ODP_SHM_SINGLE_VA)
+
+#define SHM_MAX_ALIGN (0x80000000)
+#define SHM_BLOCK_NAME "%" PRIu64 "-%d-%s"
+#define SHM_MAX_NB_BLOCKS (CONFIG_INTERNAL_SHM_BLOCKS + CONFIG_SHM_BLOCKS)
+
+ODP_STATIC_ASSERT(ODP_SHM_NAME_LEN >= RTE_MEMZONE_NAMESIZE,
+ "ODP_SHM_NAME_LEN < RTE_MEMZONE_NAMESIZE");
+
+typedef enum {
+ SHM_TYPE_LOCAL = 0,
+ SHM_TYPE_REMOTE
+} shm_type_t;
+
+/**
+ * Memory zone descriptor
+ *
+ * This struct is stored inside DPDK memzone to make it available for
+ * odp_shm_import().
+ */
+typedef struct {
+ /* Shared memory flags */
+ uint32_t flags;
+} shm_zone_t;
+
+/**
+ * Memory block descriptor
+ */
+typedef struct {
+ /* DPDK memzone. If != NULL, the shm block is interpreted as reserved. */
+ const struct rte_memzone *mz;
+ /* User requested SHM size */
+ uint64_t size;
+ /* Memory block type */
+ shm_type_t type;
+ /* Memory block name */
+ char name[ODP_SHM_NAME_LEN];
+
+} shm_block_t;
+
+/**
+ * Table of blocks describing allocated shared memory. This table is visible to
+ * every ODP thread (linux process or pthreads). It is allocated shared at odp
+ * init time and is therefore inherited by all.
+ */
+typedef struct {
+ odp_spinlock_t lock;
+ shm_block_t block[SHM_MAX_NB_BLOCKS];
+} shm_table_t;
+
+static shm_table_t *shm_tbl;
+
+/**
+ * Check if DPDK memzone name has been used already
+ */
+static odp_bool_t mz_name_used(const char *name)
+{
+ int idx;
+
+ for (idx = 0; idx < SHM_MAX_NB_BLOCKS; idx++) {
+ if (shm_tbl->block[idx].mz &&
+ strncmp(name, shm_tbl->block[idx].mz->name,
+ RTE_MEMZONE_NAMESIZE) == 0)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Convert ODP shm name into unique DPDK memzone name
+ */
+static void name_to_mz_name(const char *name, char *mz_name)
+{
+ int i = 0;
+
+ /* Use pid and counter to make name unique */
+ do {
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, SHM_BLOCK_NAME,
+ (odp_instance_t)odp_global_ro.main_pid, i++, name);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+ } while (mz_name_used(mz_name));
+}
+
+/**
+ * Return a pointer to shm zone descriptor stored at the end of DPDK memzone
+ */
+static shm_zone_t *shm_zone(const struct rte_memzone *mz)
+{
+ return (shm_zone_t *)(uintptr_t)((uint8_t *)mz->addr + mz->len - sizeof(shm_zone_t));
+}
+
+static shm_block_t *mz_to_shm_block(const struct rte_memzone *mz)
+{
+ for (int i = 0; i < SHM_MAX_NB_BLOCKS; i++) {
+ if (shm_tbl->block[i].mz == mz)
+ return &shm_tbl->block[i];
+ }
+ return NULL;
+}
+
+static int find_free_block(void)
+{
+ int idx;
+
+ for (idx = 0; idx < SHM_MAX_NB_BLOCKS; idx++) {
+ if (shm_tbl->block[idx].mz == NULL)
+ return idx;
+ }
+ return -1;
+}
+
+static inline uint32_t handle_to_idx(odp_shm_t shm)
+{
+ return _odp_typeval(shm) - 1;
+}
+
+static inline odp_shm_t idx_to_handle(uint32_t idx)
+{
+ return _odp_cast_scalar(odp_shm_t, idx + 1);
+}
+
+static inline odp_bool_t handle_is_valid(odp_shm_t shm)
+{
+ int idx = handle_to_idx(shm);
+
+ if (idx < 0 || idx >= SHM_MAX_NB_BLOCKS ||
+ shm_tbl->block[idx].mz == NULL) {
+ _ODP_ERR("Invalid odp_shm_t handle: %" PRIu64 "\n", odp_shm_to_u64(shm));
+ return 0;
+ }
+ return 1;
+}
+
+int _odp_shm_init_global(const odp_init_t *init ODP_UNUSED)
+{
+ void *addr;
+
+ if ((getpid() != odp_global_ro.main_pid) ||
+ (syscall(SYS_gettid) != getpid())) {
+ _ODP_ERR("shm_init_global() must be performed by the main ODP process!\n.");
+ return -1;
+ }
+
+ /* Allocate space for the internal shared mem block table */
+ addr = mmap(NULL, sizeof(shm_table_t), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ _ODP_ERR("Unable to mmap the shm block table\n");
+ return -1;
+ }
+
+ shm_tbl = addr;
+ memset(shm_tbl, 0, sizeof(shm_table_t));
+
+ odp_spinlock_init(&shm_tbl->lock);
+
+ return 0;
+}
+
+int _odp_shm_init_local(void)
+{
+ return 0;
+}
+
+int _odp_shm_term_global(void)
+{
+ shm_block_t *block;
+ int idx;
+
+ if ((getpid() != odp_global_ro.main_pid) ||
+ (syscall(SYS_gettid) != getpid())) {
+ _ODP_ERR("shm_term_global() must be performed by the main ODP process!\n.");
+ return -1;
+ }
+
+ /* Cleanup possibly non freed memory (and complain a bit) */
+ for (idx = 0; idx < SHM_MAX_NB_BLOCKS; idx++) {
+ block = &shm_tbl->block[idx];
+ if (block->mz) {
+ _ODP_ERR("block '%s' was never freed (cleaning up...)\n", block->name);
+ rte_memzone_free(block->mz);
+ }
+ }
+ /* Free the shared memory block table */
+ if (munmap(shm_tbl, sizeof(shm_table_t)) < 0) {
+ _ODP_ERR("Unable to munmap the shm block table\n");
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_shm_term_local(void)
+{
+ return 0;
+}
+
+int odp_shm_capability(odp_shm_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_shm_capability_t));
+
+ capa->max_blocks = CONFIG_SHM_BLOCKS;
+ capa->max_size = 0;
+ capa->max_align = SHM_MAX_ALIGN;
+ capa->flags = SUPPORTED_SHM_FLAGS;
+
+ return 0;
+}
+
+odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
+ uint32_t flags)
+{
+ shm_block_t *block;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t mz_flags = RTE_MEMZONE_1GB | RTE_MEMZONE_SIZE_HINT_ONLY;
+ int idx;
+ uint32_t supported_flgs = SUPPORTED_SHM_FLAGS;
+
+ if (flags & ~supported_flgs) {
+ _ODP_ERR("Unsupported SHM flag: %" PRIx32 "\n", flags);
+ return ODP_SHM_INVALID;
+ }
+
+ if (align > SHM_MAX_ALIGN) {
+ _ODP_ERR("Align too large: %" PRIu64 "\n", align);
+ return ODP_SHM_INVALID;
+ }
+
+ /* DPDK requires alignment to be power of two */
+ if (!_ODP_CHECK_IS_POWER2(align))
+ align = _ODP_ROUNDUP_POWER2_U32(align);
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ idx = find_free_block();
+ if (idx < 0) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ _ODP_ERR("No free SHM blocks left\n");
+ return ODP_SHM_INVALID;
+ }
+ block = &shm_tbl->block[idx];
+
+ /* DPDK requires unique memzone names */
+ name_to_mz_name(name, mz_name);
+ /* Reserve extra space for storing shm zone descriptor */
+ mz = rte_memzone_reserve_aligned(mz_name, size + sizeof(shm_zone_t),
+ rte_socket_id(), mz_flags, align);
+ if (mz == NULL) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ _ODP_ERR("Reserving DPDK memzone '%s' failed: %s\n", mz_name,
+ rte_strerror(rte_errno));
+ return ODP_SHM_INVALID;
+ }
+
+ block->mz = mz;
+ snprintf(block->name, ODP_SHM_NAME_LEN, "%s", name);
+ block->name[ODP_SHM_NAME_LEN - 1] = 0;
+ block->type = SHM_TYPE_LOCAL;
+ block->size = size;
+
+ /* Note: ODP_SHM_PROC/ODP_SHM_SINGLE_VA flags are currently ignored. */
+ shm_zone(mz)->flags = flags;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return idx_to_handle(idx);
+}
+
+odp_shm_t odp_shm_import(const char *remote_name, odp_instance_t odp_inst,
+ const char *local_name)
+{
+ shm_block_t *block;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int idx;
+
+ snprintf(mz_name, RTE_MEMZONE_NAMESIZE, SHM_BLOCK_NAME, odp_inst, 0,
+ remote_name);
+ mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0;
+
+ mz = rte_memzone_lookup(mz_name);
+ if (mz == NULL) {
+ _ODP_ERR("Unable to find remote SHM block: %s\n", remote_name);
+ return ODP_SHM_INVALID;
+ }
+
+ if (!(shm_zone(mz)->flags & ODP_SHM_EXPORT)) {
+ _ODP_ERR("Not exported SHM block!\n");
+ return ODP_SHM_INVALID;
+ }
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ idx = find_free_block();
+ if (idx < 0) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ _ODP_ERR("No free SHM blocks left\n");
+ return ODP_SHM_INVALID;
+ }
+ block = &shm_tbl->block[idx];
+
+ block->mz = mz;
+ snprintf(block->name, ODP_SHM_NAME_LEN, "%s", local_name);
+ block->name[ODP_SHM_NAME_LEN - 1] = 0;
+ block->type = SHM_TYPE_REMOTE;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return idx_to_handle(idx);
+}
+
+int odp_shm_free(odp_shm_t shm)
+{
+ shm_block_t *block;
+ int ret = 0;
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ if (!handle_is_valid(shm)) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return -1;
+ }
+
+ block = &shm_tbl->block[handle_to_idx(shm)];
+
+ /* Only the creator of memzone can free it */
+ if (block->type == SHM_TYPE_LOCAL)
+ ret = rte_memzone_free(block->mz);
+
+ block->mz = NULL;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return ret;
+}
+
+odp_shm_t odp_shm_lookup(const char *name)
+{
+ int idx;
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ for (idx = 0; idx < SHM_MAX_NB_BLOCKS; idx++) {
+ if (shm_tbl->block[idx].mz &&
+ strncmp(name, shm_tbl->block[idx].name,
+ ODP_SHM_NAME_LEN) == 0) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return idx_to_handle(idx);
+ }
+ }
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return ODP_SHM_INVALID;
+}
+
+void *odp_shm_addr(odp_shm_t shm)
+{
+ void *addr;
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ if (!handle_is_valid(shm)) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return NULL;
+ }
+
+ addr = shm_tbl->block[handle_to_idx(shm)].mz->addr;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return addr;
+}
+
+int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info)
+{
+ shm_block_t *block;
+ int idx = handle_to_idx(shm);
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ if (!handle_is_valid(shm)) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return -1;
+ }
+
+ block = &shm_tbl->block[idx];
+
+ memset(info, 0, sizeof(odp_shm_info_t));
+
+ info->name = block->name;
+ info->addr = block->mz->addr;
+ info->size = block->size;
+ info->page_size = block->mz->hugepage_sz;
+ info->flags = shm_zone(block->mz)->flags;
+ info->num_seg = 1;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return 0;
+}
+
+int odp_shm_segment_info(odp_shm_t shm, uint32_t index, uint32_t num,
+ odp_shm_segment_info_t seg_info[])
+{
+ shm_block_t *block;
+ int idx = handle_to_idx(shm);
+ phys_addr_t pa;
+
+ if (index != 0 || num != 1) {
+ _ODP_ERR("Only single segment supported (%u, %u)\n", index, num);
+ return -1;
+ }
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ if (!handle_is_valid(shm)) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return -1;
+ }
+
+ block = &shm_tbl->block[idx];
+ pa = rte_mem_virt2phy(block->mz->addr);
+
+ seg_info[0].addr = (uintptr_t)block->mz->addr;
+ seg_info[0].iova = block->mz->iova != RTE_BAD_IOVA ? block->mz->iova : ODP_SHM_IOVA_INVALID;
+ seg_info[0].pa = pa != RTE_BAD_IOVA ? pa : ODP_SHM_PA_INVALID;
+ seg_info[0].len = block->size;
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return 0;
+}
+
+typedef struct {
+ odp_system_meminfo_t *info;
+ odp_system_memblock_t *memblock;
+ int32_t blocks;
+ int32_t max_num;
+
+} memzone_walker_data_t;
+
+static void walk_memzone(const struct rte_memzone *mz, void *arg)
+{
+ memzone_walker_data_t *data = arg;
+ shm_block_t *block = mz_to_shm_block(mz);
+ odp_system_memblock_t *memblock;
+ int32_t cur = data->blocks;
+ const char *name;
+ int name_len;
+
+ data->info->total_mapped += mz->len;
+ data->blocks++;
+
+ if (block != NULL) {
+ name = block->name;
+ data->info->total_used += block->size;
+ data->info->total_overhead += mz->len - block->size;
+ } else { /* DPDK internal reservations */
+ name = mz->name;
+ data->info->total_used += mz->len;
+ }
+
+ if (cur >= data->max_num)
+ return;
+ memblock = &data->memblock[cur];
+
+ name_len = strlen(name);
+ if (name_len >= ODP_SYSTEM_MEMBLOCK_NAME_LEN)
+ name_len = ODP_SYSTEM_MEMBLOCK_NAME_LEN - 1;
+
+ memcpy(memblock->name, name, name_len);
+ memblock->name[name_len] = 0;
+
+ memblock->addr = (uintptr_t)mz->addr;
+ memblock->used = mz->len;
+ memblock->overhead = block != NULL ? mz->len - block->size : 0;
+ memblock->page_size = mz->hugepage_sz;
+}
+
+int32_t odp_system_meminfo(odp_system_meminfo_t *info, odp_system_memblock_t memblock[],
+ int32_t max_num)
+{
+ memzone_walker_data_t walker_data;
+
+ memset(info, 0, sizeof(odp_system_meminfo_t));
+ memset(&walker_data, 0, sizeof(memzone_walker_data_t));
+ walker_data.max_num = max_num;
+ walker_data.info = info;
+ walker_data.memblock = memblock;
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ rte_memzone_walk(walk_memzone, (void *)&walker_data);
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ return walker_data.blocks;
+}
+
+void odp_shm_print_all(void)
+{
+ shm_block_t *block;
+ int idx;
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ _ODP_PRINT("\nShared memory blocks\n--------------------\n");
+
+ for (idx = 0; idx < SHM_MAX_NB_BLOCKS; idx++) {
+ block = &shm_tbl->block[idx];
+ if (block->mz == NULL)
+ continue;
+ _ODP_PRINT(" %s: addr: %p, len: %" PRIu64 " page size: %" PRIu64 "\n",
+ block->name, block->mz->addr,
+ block->size, block->mz->hugepage_sz);
+ }
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+
+ _ODP_PRINT("\nDPDK memzones\n-------------\n");
+ rte_memzone_dump(stdout);
+ _ODP_PRINT("\n");
+}
+
+void odp_shm_print(odp_shm_t shm)
+{
+ shm_block_t *block;
+ int idx = handle_to_idx(shm);
+
+ odp_spinlock_lock(&shm_tbl->lock);
+
+ if (!handle_is_valid(shm)) {
+ odp_spinlock_unlock(&shm_tbl->lock);
+ return;
+ }
+
+ block = &shm_tbl->block[idx];
+
+ _ODP_PRINT("\nSHM block info\n--------------\n");
+ _ODP_PRINT(" name: %s\n", block->name);
+ _ODP_PRINT(" type: %s\n", block->type == SHM_TYPE_LOCAL ? "local" : "remote");
+ _ODP_PRINT(" flags: 0x%x\n", shm_zone(block->mz)->flags);
+ _ODP_PRINT(" start: %p\n", block->mz->addr);
+ _ODP_PRINT(" len: %" PRIu64 "\n", block->size);
+ _ODP_PRINT(" page size: %" PRIu64 "\n", block->mz->hugepage_sz);
+ _ODP_PRINT(" NUMA ID: %" PRIi32 "\n", block->mz->socket_id);
+ _ODP_PRINT("\n");
+
+ odp_spinlock_unlock(&shm_tbl->lock);
+}
+
+uint64_t odp_shm_to_u64(odp_shm_t hdl)
+{
+ return _odp_pri(hdl);
+}
diff --git a/platform/linux-dpdk/odp_std_api.c b/platform/linux-dpdk/odp_std_api.c
new file mode 100644
index 000000000..251e6f874
--- /dev/null
+++ b/platform/linux-dpdk/odp_std_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/std.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/std_inlines.h>
diff --git a/platform/linux-dpdk/odp_std_clib.c b/platform/linux-dpdk/odp_std_clib.c
deleted file mode 100644
index 3f52c3a99..000000000
--- a/platform/linux-dpdk/odp_std_clib.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <rte_memcpy.h>
-
-#include <odp/api/std_clib.h>
-
-#if ODP_ABI_COMPAT == 0
-#include <odp/visibility_begin.h>
-#endif
-
-#if defined(__arm__) || defined(__aarch64__)
-static void *_rte_memcpy(void *dst, const void *src, size_t n)
-{
- return rte_memcpy(dst, src, n);
-}
-
-void* (*const dpdk_memcpy)(void*, const void*, size_t) = &_rte_memcpy;
-#else
-void* (*const dpdk_memcpy)(void*, const void*, size_t) = &rte_memcpy;
-#endif
-
-#if ODP_ABI_COMPAT == 0
-#include <odp/visibility_end.h>
-#else
-#include <odp/api/plat/std_clib_inlines.h>
-#endif
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/platform/linux-dpdk/odp_system_info.c b/platform/linux-dpdk/odp_system_info.c
new file mode 100644
index 000000000..d9cddcb40
--- /dev/null
+++ b/platform/linux-dpdk/odp_system_info.c
@@ -0,0 +1,559 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/system_info.h>
+#include <odp/api/version.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_config_internal.h>
+#include <odp/api/align.h>
+#include <odp/api/cpu.h>
+#include <odp_packet_internal.h>
+
+#include <errno.h>
+#include <string.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <ctype.h>
+
+#include <rte_string_fns.h>
+#include <rte_version.h>
+
+/* sysconf */
+#include <unistd.h>
+#include <sys/sysinfo.h>
+
+/* opendir, readdir */
+#include <sys/types.h>
+#include <dirent.h>
+
+#define CACHE_LNSZ_FILE \
+ "/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size"
+
+/*
+ * Analysis of /sys/devices/system/cpu/ files
+ */
+static int read_cache_line_size(void)
+{
+ FILE *file;
+ char str[128];
+ int size = 0;
+
+ file = fopen(CACHE_LNSZ_FILE, "rt");
+ if (file == NULL) {
+ /* File not found */
+ _ODP_WARN("Unable to read host CPU cache line size. "
+ "Using ODP_CACHE_LINE_SIZE instead.\n");
+ return ODP_CACHE_LINE_SIZE;
+ }
+
+ if (fgets(str, sizeof(str), file) != NULL) {
+ /* Read cache line size */
+ if (sscanf(str, "%i", &size) != 1)
+ size = 0;
+ }
+
+ fclose(file);
+
+ return size;
+}
+
+static uint64_t default_huge_page_size(void)
+{
+ char str[1024];
+ unsigned long sz;
+ FILE *file;
+
+ file = fopen("/proc/meminfo", "rt");
+ if (!file)
+ return 0;
+
+ while (fgets(str, sizeof(str), file) != NULL) {
+ if (sscanf(str, "Hugepagesize: %8lu kB", &sz) == 1) {
+ _ODP_DBG("default hp size is %lu kB\n", sz);
+ fclose(file);
+ return (uint64_t)sz * 1024;
+ }
+ }
+
+ _ODP_ERR("unable to get default hp size\n");
+ fclose(file);
+ return 0;
+}
+
+/*
+ * returns a malloced string containing the name of the directory for
+ * huge pages of a given size (0 for default)
+ * largely "inspired" by dpdk:
+ * lib/librte_eal/linuxapp/eal/eal_hugepage_info.c: get_hugepage_dir
+ *
+ * Analysis of /proc/mounts
+ */
+static char *get_hugepage_dir(uint64_t hugepage_sz)
+{
+ enum proc_mount_fieldnames {
+ DEVICE = 0,
+ MOUNTPT,
+ FSTYPE,
+ OPTIONS,
+ _FIELDNAME_MAX
+ };
+ static uint64_t default_size;
+ const char *proc_mounts = "/proc/mounts";
+ const char *hugetlbfs_str = "hugetlbfs";
+ const size_t htlbfs_str_len = sizeof(hugetlbfs_str) - 1;
+ const char *pagesize_opt = "pagesize=";
+ const size_t pagesize_opt_len = sizeof(pagesize_opt) - 1;
+ const char split_tok = ' ';
+ char *tokens[_FIELDNAME_MAX];
+ char buf[BUFSIZ];
+ char *retval = NULL;
+ const char *pagesz_str;
+ uint64_t pagesz;
+ FILE *fd = fopen(proc_mounts, "r");
+
+ if (fd == NULL)
+ return NULL;
+
+ if (default_size == 0)
+ default_size = default_huge_page_size();
+
+ if (hugepage_sz == 0)
+ hugepage_sz = default_size;
+
+ while (fgets(buf, sizeof(buf), fd)) {
+ if (rte_strsplit(buf, sizeof(buf), tokens,
+ _FIELDNAME_MAX, split_tok) != _FIELDNAME_MAX) {
+ _ODP_ERR("Error parsing %s\n", proc_mounts);
+ break; /* return NULL */
+ }
+
+ /* is this hugetlbfs? */
+ if (!strncmp(tokens[FSTYPE], hugetlbfs_str, htlbfs_str_len)) {
+ pagesz_str = strstr(tokens[OPTIONS], pagesize_opt);
+
+ /* No explicit size, default page size is compared */
+ if (pagesz_str == NULL) {
+ if (hugepage_sz == default_size) {
+ retval = strdup(tokens[MOUNTPT]);
+ break;
+ }
+ } else {
+ /* there is an explicit page size, so check it */
+ pagesz = rte_str_to_size(&pagesz_str[pagesize_opt_len]);
+ if (pagesz == hugepage_sz) {
+ retval = strdup(tokens[MOUNTPT]);
+ break;
+ }
+ }
+ } /* end if strncmp hugetlbfs */
+ } /* end while fgets */
+
+ fclose(fd);
+ return retval;
+}
+
+/*
+ * Analysis of /sys/devices/system/cpu/cpu%d/cpufreq/ files
+ */
+static uint64_t read_cpufreq(const char *filename, int id)
+{
+ char path[256], buffer[256], *endptr = NULL;
+ FILE *file;
+ uint64_t ret = 0;
+
+ snprintf(path, sizeof(path),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/%s", id, filename);
+
+ file = fopen(path, "r");
+ if (file == NULL)
+ return ret;
+
+ if (fgets(buffer, sizeof(buffer), file) != NULL)
+ ret = strtoull(buffer, &endptr, 0) * 1000;
+
+ fclose(file);
+
+ return ret;
+}
+
+static inline uint64_t cpu_hz_current(int id)
+{
+ uint64_t cur_hz = read_cpufreq("cpuinfo_cur_freq", id);
+
+ if (!cur_hz)
+ cur_hz = odp_cpu_arch_hz_current(id);
+
+ return cur_hz;
+}
+
+static inline uint64_t cpu_hz_static(int id)
+{
+ return odp_global_ro.system_info.cpu_hz[id];
+}
+
+/*
+ * Analysis of /sys/devices/system/cpu/ files
+ */
+static int system_cache_line(system_info_t *sysinfo)
+{
+ int ret;
+
+ ret = read_cache_line_size();
+ if (ret == 0) {
+ _ODP_ERR("read_cache_line_size failed.\n");
+ return -1;
+ }
+
+ sysinfo->cache_line_size = ret;
+
+ if (ret != ODP_CACHE_LINE_SIZE)
+ _ODP_WARN("Host CPU cache line size and ODP_CACHE_LINE_SIZE don't match.\n");
+
+ return 0;
+}
+
+/*
+ * Huge page information
+ */
+static int system_hp(hugepage_info_t *hugeinfo)
+{
+ hugeinfo->default_huge_page_size = default_huge_page_size();
+
+ /* default_huge_page_dir may be NULL if no huge page support */
+ hugeinfo->default_huge_page_dir = get_hugepage_dir(0);
+
+ return 0;
+}
+
+static int read_config_file(void)
+{
+ const char *str;
+ int val = 0;
+
+ str = "system.cpu_mhz";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.default_cpu_hz = (uint64_t)val * 1000000;
+
+ str = "system.cpu_mhz_max";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.default_cpu_hz_max = (uint64_t)val * 1000000;
+
+ str = "system.cpu_hz_static";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.cpu_hz_static = !!val;
+
+ return 0;
+}
+
+static void print_compiler_info(void)
+{
+ _ODP_PRINT("Compiler defines:\n");
+ _ODP_PRINT(" __GCC_ATOMIC_LLONG_LOCK_FREE: %d\n", __GCC_ATOMIC_LLONG_LOCK_FREE);
+ _ODP_PRINT(" __GCC_ATOMIC_LONG_LOCK_FREE: %d\n", __GCC_ATOMIC_LONG_LOCK_FREE);
+ _ODP_PRINT(" __GCC_ATOMIC_INT_LOCK_FREE: %d\n", __GCC_ATOMIC_INT_LOCK_FREE);
+ _ODP_PRINT(" __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16: ");
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+ _ODP_PRINT("1\n");
+#else
+ _ODP_PRINT("0\n");
+#endif
+ _ODP_PRINT("\n");
+}
+
+/*
+ * System info initialisation
+ */
+int _odp_system_info_init(void)
+{
+ int num_cpus;
+ int i;
+ FILE *file;
+
+ memset(&odp_global_ro.system_info, 0, sizeof(system_info_t));
+
+ odp_global_ro.system_info.page_size = ODP_PAGE_SIZE;
+
+ /* Read default CPU Hz values from config file */
+ if (read_config_file())
+ return -1;
+
+ /* Check that CONFIG_NUM_CPU_IDS is large enough */
+ num_cpus = get_nprocs_conf();
+ if (num_cpus > CONFIG_NUM_CPU_IDS)
+ _ODP_ERR("Unable to handle all %d "
+ "CPU IDs. Increase CONFIG_NUM_CPU_IDS value.\n",
+ num_cpus);
+
+ /* Read and save all CPU frequencies for static mode */
+ if (odp_global_ro.system_info.cpu_hz_static)
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++)
+ odp_global_ro.system_info.cpu_hz[i] = cpu_hz_current(i);
+
+ /* By default, read max frequency from a cpufreq file */
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++) {
+ uint64_t cpu_hz_max = read_cpufreq("cpuinfo_max_freq", i);
+
+ if (cpu_hz_max)
+ odp_global_ro.system_info.cpu_hz_max[i] = cpu_hz_max;
+ }
+
+ file = fopen("/proc/cpuinfo", "rt");
+ if (file != NULL) {
+ /* Read CPU model, and set max cpu frequency
+ * if not set from cpufreq. */
+ _odp_cpuinfo_parser(file, &odp_global_ro.system_info);
+ fclose(file);
+ } else {
+ _odp_dummy_cpuinfo(&odp_global_ro.system_info);
+ }
+
+ if (system_cache_line(&odp_global_ro.system_info))
+ return -1;
+
+ system_hp(&odp_global_ro.hugepage_info);
+
+ print_compiler_info();
+
+ return 0;
+}
+
+/*
+ * System info termination
+ */
+int _odp_system_info_term(void)
+{
+ free(odp_global_ro.hugepage_info.default_huge_page_dir);
+
+ return 0;
+}
+
+/*
+ *************************
+ * Public access functions
+ *************************
+ */
+
+uint64_t odp_cpu_hz(void)
+{
+ int id = odp_cpu_id();
+
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
+}
+
+uint64_t odp_cpu_hz_id(int id)
+{
+ _ODP_ASSERT(id >= 0 && id < CONFIG_NUM_CPU_IDS);
+
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
+}
+
+uint64_t odp_cpu_hz_max(void)
+{
+ return odp_cpu_hz_max_id(0);
+}
+
+uint64_t odp_cpu_hz_max_id(int id)
+{
+ if (id >= 0 && id < CONFIG_NUM_CPU_IDS)
+ return odp_global_ro.system_info.cpu_hz_max[id];
+ else
+ return 0;
+}
+
+uint64_t odp_sys_huge_page_size(void)
+{
+ return odp_global_ro.hugepage_info.default_huge_page_size;
+}
+
+static int pagesz_compare(const void *pagesz1, const void *pagesz2)
+{
+ const uint64_t val1 = *(const uint64_t *)pagesz1;
+ const uint64_t val2 = *(const uint64_t *)pagesz2;
+
+ if (val1 < val2)
+ return -1;
+ if (val1 > val2)
+ return 1;
+ return 0;
+}
+
+int odp_sys_huge_page_size_all(uint64_t size[], int num)
+{
+ DIR *dir;
+ struct dirent *entry;
+ int pagesz_num = 0;
+ int saved = 0;
+
+ /* See: kernel.org: hugetlbpage.txt */
+ dir = opendir("/sys/kernel/mm/hugepages");
+ if (!dir) {
+ _ODP_PRINT("Failed to open /sys/kernel/mm/hugepages: %s\n", strerror(errno));
+ return 0;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ unsigned long sz;
+
+ if (sscanf(entry->d_name, "hugepages-%8lukB", &sz) == 1) {
+ if (size != NULL && saved < num)
+ size[saved++] = sz * 1024;
+ pagesz_num++;
+ }
+ }
+ closedir(dir);
+
+ if (size != NULL && saved > 1)
+ qsort(size, saved, sizeof(uint64_t), pagesz_compare);
+
+ return pagesz_num;
+}
+
+uint64_t odp_sys_page_size(void)
+{
+ return odp_global_ro.system_info.page_size;
+}
+
+const char *odp_cpu_model_str(void)
+{
+ return odp_cpu_model_str_id(0);
+}
+
+const char *odp_cpu_model_str_id(int id)
+{
+ if (id >= 0 && id < CONFIG_NUM_CPU_IDS)
+ return odp_global_ro.system_info.model_str[id];
+ else
+ return NULL;
+}
+
+int odp_sys_cache_line_size(void)
+{
+ return odp_global_ro.system_info.cache_line_size;
+}
+
+int odp_cpu_count(void)
+{
+ return odp_global_ro.num_cpus_installed;
+}
+
+int odp_system_info(odp_system_info_t *info)
+{
+ system_info_t *sys_info = &odp_global_ro.system_info;
+
+ memset(info, 0, sizeof(odp_system_info_t));
+
+ info->cpu_arch = sys_info->cpu_arch;
+ info->cpu_isa_sw = sys_info->cpu_isa_sw;
+ info->cpu_isa_hw = sys_info->cpu_isa_hw;
+
+ return 0;
+}
+
+void odp_sys_info_print(void)
+{
+ int len, num_cpu;
+ int max_len = 512;
+ odp_cpumask_t cpumask;
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+ char str[max_len];
+
+ memset(cpumask_str, 0, sizeof(cpumask_str));
+
+ num_cpu = odp_cpumask_all_available(&cpumask);
+ odp_cpumask_to_str(&cpumask, cpumask_str, ODP_CPUMASK_STR_SIZE);
+
+ len = snprintf(str, max_len, "\n"
+ "ODP system info\n"
+ "---------------\n"
+ "ODP API version: %s\n"
+ "ODP impl name: %s\n"
+ "ODP impl details: %s\n"
+ "DPDK version: %d.%d.%d\n"
+ "CPU model: %s\n"
+ "CPU freq (hz): %" PRIu64 "\n"
+ "Cache line size: %i\n"
+ "CPU count: %i\n"
+ "CPU mask: %s\n"
+ "\n",
+ odp_version_api_str(),
+ odp_version_impl_name(),
+ odp_version_impl_str(),
+ RTE_VER_YEAR, RTE_VER_MONTH, RTE_VER_MINOR,
+ odp_cpu_model_str(),
+ odp_cpu_hz_max(),
+ odp_sys_cache_line_size(),
+ num_cpu, cpumask_str);
+
+ str[len] = '\0';
+ _ODP_PRINT("%s", str);
+
+ _odp_sys_info_print_arch();
+}
+
+void odp_sys_config_print(void)
+{
+ /* Print ODP_CONFIG_FILE default and override values */
+ if (_odp_libconfig_print())
+ _ODP_ERR("Config file print failed\n");
+
+ _ODP_PRINT("\n\nodp_config_internal.h values:\n"
+ "-----------------------------\n");
+ _ODP_PRINT("CONFIG_NUM_CPU_IDS: %i\n", CONFIG_NUM_CPU_IDS);
+ _ODP_PRINT("CONFIG_INTERNAL_QUEUES: %i\n", CONFIG_INTERNAL_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_QUEUES: %i\n", CONFIG_MAX_QUEUES);
+ _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
+ _ODP_PRINT("CONFIG_MAX_DMA_SESSIONS: %i\n", CONFIG_MAX_DMA_SESSIONS);
+ _ODP_PRINT("CONFIG_INTERNAL_STASHES: %i\n", CONFIG_INTERNAL_STASHES);
+ _ODP_PRINT("CONFIG_MAX_STASHES: %i\n", CONFIG_MAX_STASHES);
+ _ODP_PRINT("CONFIG_PKTIO_ENTRIES: %i\n", CONFIG_PKTIO_ENTRIES);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MIN: %i\n", CONFIG_BUFFER_ALIGN_MIN);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MAX: %i\n", CONFIG_BUFFER_ALIGN_MAX);
+ _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
+ _ODP_PRINT("CONFIG_PACKET_SEG_SIZE: %i\n", CONFIG_PACKET_SEG_SIZE);
+ _ODP_PRINT("CONFIG_PACKET_MAX_SEG_LEN: %i\n", CONFIG_PACKET_MAX_SEG_LEN);
+ _ODP_PRINT("CONFIG_PACKET_SEG_LEN_MIN: %i\n", CONFIG_PACKET_SEG_LEN_MIN);
+ _ODP_PRINT("CONFIG_PACKET_VECTOR_MAX_SIZE: %i\n", CONFIG_PACKET_VECTOR_MAX_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_SHM_BLOCKS: %i\n", CONFIG_INTERNAL_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_POOLS: %i\n", CONFIG_INTERNAL_POOLS);
+ _ODP_PRINT("CONFIG_POOLS: %i\n", CONFIG_POOLS);
+ _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
+ _ODP_PRINT("CONFIG_IPSEC_MAX_NUM_SA: %i\n", CONFIG_IPSEC_MAX_NUM_SA);
+ _ODP_PRINT("CONFIG_ML_MAX_MODELS: %i\n", CONFIG_ML_MAX_MODELS);
+ _ODP_PRINT("CONFIG_ML_MAX_INPUTS: %i\n", CONFIG_ML_MAX_INPUTS);
+ _ODP_PRINT("CONFIG_ML_MAX_OUTPUTS: %i\n", CONFIG_ML_MAX_OUTPUTS);
+ _ODP_PRINT("\n");
+}
diff --git a/platform/linux-dpdk/odp_thread.c b/platform/linux-dpdk/odp_thread.c
index b9a811e4a..10b5de5e0 100644
--- a/platform/linux-dpdk/odp_thread.c
+++ b/platform/linux-dpdk/odp_thread.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,16 +8,23 @@
#include <odp_posix_extensions.h>
#include <sched.h>
+#include <odp/api/atomic.h>
#include <odp/api/thread.h>
#include <odp/api/thrmask.h>
-#include <odp_internal.h>
#include <odp/api/spinlock.h>
+#include <odp_init_internal.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp/api/shared_memory.h>
#include <odp/api/align.h>
#include <odp/api/cpu.h>
#include <odp_schedule_if.h>
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp_thread_internal.h>
+#include <odp_libconfig_internal.h>
+
+#include <rte_config.h>
#include <rte_lcore.h>
#include <string.h>
@@ -24,41 +32,51 @@
#include <stdlib.h>
typedef struct {
- int thr;
- int cpu;
- odp_thread_type_t type;
-} thread_state_t;
-
-
-typedef struct {
- thread_state_t thr[ODP_THREAD_COUNT_MAX];
+ _odp_thread_state_t thr[ODP_THREAD_COUNT_MAX];
struct {
odp_thrmask_t all;
odp_thrmask_t worker;
odp_thrmask_t control;
};
-
+ odp_atomic_u64_t thrmask_all_epoch;
uint32_t num;
uint32_t num_worker;
uint32_t num_control;
+ uint32_t num_max;
odp_spinlock_t lock;
+ odp_shm_t shm;
} thread_globals_t;
-
/* Globals */
static thread_globals_t *thread_globals;
+#include <odp/visibility_begin.h>
/* Thread local */
-static __thread thread_state_t *this_thread;
+__thread _odp_thread_state_t *_odp_this_thread;
+#include <odp/visibility_end.h>
-int odp_thread_init_global(void)
+int _odp_thread_init_global(void)
{
odp_shm_t shm;
- shm = odp_shm_reserve("odp_thread_globals",
+ int num_max = 0;
+ const char *str = "system.thread_count_max";
+
+ if (!_odp_libconfig_lookup_int(str, &num_max)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ if (num_max <= 0) {
+ _ODP_ERR("Config option '%s' not valid.\n", str);
+ return -1;
+ }
+ if (num_max > ODP_THREAD_COUNT_MAX)
+ num_max = ODP_THREAD_COUNT_MAX;
+
+ shm = odp_shm_reserve("_odp_thread_global",
sizeof(thread_globals_t),
ODP_CACHE_LINE_SIZE, 0);
@@ -68,33 +86,67 @@ int odp_thread_init_global(void)
return -1;
memset(thread_globals, 0, sizeof(thread_globals_t));
+
+ thread_globals->shm = shm;
+
odp_spinlock_init(&thread_globals->lock);
+ odp_atomic_init_u64(&thread_globals->thrmask_all_epoch, 0);
+ thread_globals->num_max = num_max;
+ _ODP_PRINT("System config:\n");
+ _ODP_PRINT(" system.thread_count_max: %d\n\n", num_max);
return 0;
}
-int odp_thread_term_global(void)
+int _odp_thread_term_global(void)
{
- int ret;
+ int ret, num;
+
+ odp_spinlock_lock(&thread_globals->lock);
+ num = thread_globals->num;
+ odp_spinlock_unlock(&thread_globals->lock);
- ret = odp_shm_free(odp_shm_lookup("odp_thread_globals"));
+ if (num)
+ _ODP_ERR("%u threads have not called odp_term_local().\n", num);
+
+ ret = odp_shm_free(thread_globals->shm);
if (ret < 0)
- ODP_ERR("shm free failed for odp_thread_globals");
+ _ODP_ERR("shm free failed for odp_thread_globals");
return ret;
}
+uint64_t _odp_thread_thrmask_epoch(void)
+{
+ return odp_atomic_load_u64(&thread_globals->thrmask_all_epoch);
+}
+
+int _odp_thread_cpu_ids(unsigned int cpu_ids[], int max_num)
+{
+ odp_thrmask_t *all = &thread_globals->all;
+ int num_cpus = 0;
+ uint32_t thr;
+
+ for (thr = 0; num_cpus < max_num && thr < thread_globals->num_max; thr++) {
+ if (odp_thrmask_isset(all, thr))
+ cpu_ids[num_cpus++] = thread_globals->thr[thr].cpu;
+ }
+
+ return num_cpus;
+}
+
static int alloc_id(odp_thread_type_t type)
{
int thr;
odp_thrmask_t *all = &thread_globals->all;
- if (thread_globals->num >= ODP_THREAD_COUNT_MAX)
+ if (thread_globals->num >= thread_globals->num_max)
return -1;
- for (thr = 0; thr < ODP_THREAD_COUNT_MAX; thr++) {
+ for (thr = 0; thr < (int)thread_globals->num_max; thr++) {
if (odp_thrmask_isset(all, thr) == 0) {
odp_thrmask_set(all, thr);
+ odp_atomic_inc_u64(&thread_globals->thrmask_all_epoch);
if (type == ODP_THREAD_WORKER) {
odp_thrmask_set(&thread_globals->worker, thr);
@@ -116,13 +168,14 @@ static int free_id(int thr)
{
odp_thrmask_t *all = &thread_globals->all;
- if (thr < 0 || thr >= ODP_THREAD_COUNT_MAX)
+ if (thr < 0 || thr >= (int)thread_globals->num_max)
return -1;
if (odp_thrmask_isset(all, thr) == 0)
return -1;
odp_thrmask_clr(all, thr);
+ odp_atomic_inc_u64(&thread_globals->thrmask_all_epoch);
if (thread_globals->thr[thr].type == ODP_THREAD_WORKER) {
odp_thrmask_clr(&thread_globals->worker, thr);
@@ -136,25 +189,38 @@ static int free_id(int thr)
return thread_globals->num;
}
-int odp_thread_init_local(odp_thread_type_t type)
+int _odp_thread_init_local(odp_thread_type_t type)
{
int id;
int cpu;
- struct rte_config *cfg = rte_eal_get_configuration();
+ int group_all, group_worker, group_control;
+
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
+
+ if (_odp_sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ _odp_sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
odp_spinlock_lock(&thread_globals->lock);
id = alloc_id(type);
odp_spinlock_unlock(&thread_globals->lock);
if (id < 0) {
- ODP_ERR("Too many threads\n");
+ _ODP_ERR("Too many threads\n");
return -1;
}
cpu = sched_getcpu();
if (cpu < 0) {
- ODP_ERR("getcpu failed\n");
+ _ODP_ERR("getcpu failed\n");
return -1;
}
@@ -162,70 +228,92 @@ int odp_thread_init_local(odp_thread_type_t type)
thread_globals->thr[id].cpu = cpu;
thread_globals->thr[id].type = type;
RTE_PER_LCORE(_lcore_id) = cpu;
- if (cfg->lcore_role[cpu] == ROLE_RTE)
- ODP_ERR("There is a thread already running on core %d\n", cpu);
- cfg->lcore_role[cpu] = ROLE_RTE;
- this_thread = &thread_globals->thr[id];
+ _odp_this_thread = &thread_globals->thr[id];
+
+ if (group_all)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
+ if (type == ODP_THREAD_WORKER && group_worker)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
- if (type == ODP_THREAD_WORKER)
- sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
- sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
+ if (type == ODP_THREAD_CONTROL && group_control)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0;
}
-int odp_thread_term_local(void)
+int _odp_thread_term_local(void)
{
int num;
- int id = this_thread->thr;
- odp_thread_type_t type = this_thread->type;
+ int group_all, group_worker, group_control;
+ int id = _odp_this_thread->thr;
+ odp_thread_type_t type = _odp_this_thread->type;
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
+
+ if (_odp_sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ _odp_sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
- if (type == ODP_THREAD_WORKER)
- sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
- sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+ if (group_all)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+
+ if (type == ODP_THREAD_WORKER && group_worker)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
+
+ if (type == ODP_THREAD_CONTROL && group_control)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+
+ _odp_this_thread = NULL;
odp_spinlock_lock(&thread_globals->lock);
num = free_id(id);
odp_spinlock_unlock(&thread_globals->lock);
if (num < 0) {
- ODP_ERR("failed to free thread id %i", id);
+ _ODP_ERR("failed to free thread id %i", id);
return -1;
}
return num; /* return a number of threads left */
}
-int odp_thread_id(void)
+int odp_thread_count(void)
{
- return this_thread->thr;
+ return thread_globals->num;
}
-int odp_thread_count(void)
+int odp_thread_control_count(void)
{
- return thread_globals->num;
+ return thread_globals->num_control;
+}
+
+int odp_thread_worker_count(void)
+{
+ return thread_globals->num_worker;
}
int odp_thread_count_max(void)
{
- return ODP_THREAD_COUNT_MAX;
+ return thread_globals->num_max;
}
-odp_thread_type_t odp_thread_type(void)
+int odp_thread_control_count_max(void)
{
- return this_thread->type;
+ return thread_globals->num_max;
}
-int odp_cpu_id(void)
+int odp_thread_worker_count_max(void)
{
- return this_thread->cpu;
+ return thread_globals->num_max;
}
int odp_thrmask_worker(odp_thrmask_t *mask)
diff --git a/platform/linux-dpdk/odp_time.c b/platform/linux-dpdk/odp_time.c
index c1ad973cc..fbee02df1 100644
--- a/platform/linux-dpdk/odp_time.c
+++ b/platform/linux-dpdk/odp_time.c
@@ -1,350 +1,74 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_posix_extensions.h>
+#include <odp/api/plat/time_inlines.h>
-#include <time.h>
-#include <odp/api/time.h>
-#include <odp/api/hints.h>
#include <odp_debug_internal.h>
-#include <odp_time_internal.h>
-#include <rte_cycles.h>
-#include <string.h>
-
-typedef union {
- odp_time_t ex;
- struct timespec in;
-} _odp_time_t;
-
-static odp_time_t start_time;
-static time_handler_t time_handler;
-double tick_per_nsec;
-double nsec_per_tick;
-
-static inline uint64_t time_local_res_dpdk(void)
-{
- return rte_get_timer_hz();
-}
-
-static inline
-uint64_t time_to_ns(odp_time_t time)
-{
- uint64_t ns;
-
- ns = time.tv_sec * ODP_TIME_SEC_IN_NS;
- ns += time.tv_nsec;
-
- return ns;
-}
-
-static inline uint64_t time_to_ns_dpdk(odp_time_t time)
-{
- return (time.tv_sec * nsec_per_tick);
-}
-
-static inline odp_time_t time_diff(odp_time_t t2, odp_time_t t1)
-{
- odp_time_t time;
+#include <odp_init_internal.h>
- time.tv_sec = t2.tv_sec - t1.tv_sec;
- time.tv_nsec = t2.tv_nsec - t1.tv_nsec;
-
- if (time.tv_nsec < 0) {
- time.tv_nsec += ODP_TIME_SEC_IN_NS;
- --time.tv_sec;
- }
-
- return time;
-}
-
-static inline odp_time_t time_diff_dpdk(odp_time_t t2, odp_time_t t1)
-{
- odp_time_t time;
+#include <rte_config.h>
+#include <rte_cycles.h>
- time.tv_sec = t2.tv_sec - t1.tv_sec;
- return time;
-}
+#include <inttypes.h>
+#include <string.h>
-static inline odp_time_t time_curr(void)
-{
- int ret;
- _odp_time_t time;
+#define YEAR_IN_SEC (365 * 24 * 3600)
- ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time.in);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_gettime failed\n");
- return time.ex;
-}
+#include <odp/visibility_begin.h>
-static inline odp_time_t time_curr_dpdk(void)
-{
- odp_time_t time;
+_odp_time_global_t _odp_time_glob;
- time.tv_sec = rte_get_timer_cycles();
- return time;
-}
+#include <odp/visibility_end.h>
-static inline odp_time_t time_local(void)
+int _odp_time_init_global(void)
{
+ uint64_t diff, years;
odp_time_t time;
- time = time_handler.time_curr();
- return time_handler.time_diff(time, start_time);
-}
+ memset(&_odp_time_glob, 0, sizeof(_odp_time_global_t));
-static inline int time_cmp(odp_time_t t2, odp_time_t t1)
-{
- if (t2.tv_sec < t1.tv_sec)
- return -1;
-
- if (t2.tv_sec > t1.tv_sec)
- return 1;
-
- return t2.tv_nsec - t1.tv_nsec;
-}
+#ifdef RTE_LIBEAL_USE_HPET
+ if (rte_eal_hpet_init(1) != 0)
+ _ODP_WARN("HPET init failed. Using TSC time.\n");
+#endif
-static inline int time_cmp_dpdk(odp_time_t t2, odp_time_t t1)
-{
- if (t2.tv_sec < t1.tv_sec)
+ _odp_time_glob.freq_hz = rte_get_timer_hz();
+ _odp_time_glob.start_cycles = rte_get_timer_cycles();
+ if (_odp_time_glob.start_cycles == 0) {
+ _ODP_ERR("Initializing start cycles failed.\n");
return -1;
- if (t2.tv_sec > t1.tv_sec)
- return 1;
- return 0;
-}
-
-static inline odp_time_t time_sum(odp_time_t t1, odp_time_t t2)
-{
- odp_time_t time;
-
- time.tv_sec = t2.tv_sec + t1.tv_sec;
- time.tv_nsec = t2.tv_nsec + t1.tv_nsec;
-
- if (time.tv_nsec >= (long)ODP_TIME_SEC_IN_NS) {
- time.tv_nsec -= ODP_TIME_SEC_IN_NS;
- ++time.tv_sec;
}
- return time;
-}
-
-static inline odp_time_t time_sum_dpdk(odp_time_t t1, odp_time_t t2)
-{
- odp_time_t time;
-
- time.tv_sec = t2.tv_sec + t1.tv_sec;
- return time;
-}
-
-static inline odp_time_t time_local_from_ns(uint64_t ns)
-{
- odp_time_t time;
-
- time.tv_sec = ns / ODP_TIME_SEC_IN_NS;
- time.tv_nsec = ns - time.tv_sec * ODP_TIME_SEC_IN_NS;
-
- return time;
-}
-
-static inline odp_time_t time_local_from_ns_dpdk(uint64_t ns)
-{
- odp_time_t time;
- time.tv_sec = ns * tick_per_nsec;
- return time;
-}
-
-static inline void time_wait_until(odp_time_t time)
-{
- odp_time_t cur;
-
- do {
- cur = time_local();
- } while (time_handler.time_cmp(time, cur) > 0);
-}
-
-static inline uint64_t time_local_res(void)
-{
- int ret;
- struct timespec tres;
-
- ret = clock_getres(CLOCK_MONOTONIC_RAW, &tres);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_getres failed\n");
-
- return ODP_TIME_SEC_IN_NS / (uint64_t)tres.tv_nsec;
-}
-
-odp_time_t odp_time_local(void)
-{
- return time_local();
-}
-
-odp_time_t odp_time_global(void)
-{
- return time_local();
-}
+ time.u64 = _odp_time_glob.start_cycles;
+ _odp_time_glob.start_ns = _odp_time_to_ns(time);
-odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1)
-{
- return time_handler.time_diff(t2, t1);
-}
+ /* Make sure that counters will not wrap */
+ diff = UINT64_MAX - _odp_time_glob.start_cycles;
+ years = (diff / _odp_time_glob.freq_hz) / YEAR_IN_SEC;
-uint64_t odp_time_to_ns(odp_time_t time)
-{
- return time_handler.time_to_ns(time);
-}
-
-odp_time_t odp_time_local_from_ns(uint64_t ns)
-{
- return time_handler.time_local_from_ns(ns);
-}
-
-odp_time_t odp_time_global_from_ns(uint64_t ns)
-{
- return time_handler.time_local_from_ns(ns);
-}
-
-int odp_time_cmp(odp_time_t t2, odp_time_t t1)
-{
- return time_handler.time_cmp(t2, t1);
-}
-
-odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
-{
- return time_handler.time_sum(t1, t2);
-}
-
-uint64_t odp_time_local_res(void)
-{
- return time_handler.time_local_res();
-}
-
-uint64_t odp_time_global_res(void)
-{
- return time_handler.time_local_res();
-}
-
-void odp_time_wait_ns(uint64_t ns)
-{
- odp_time_t cur = time_local();
- odp_time_t wait = time_handler.time_local_from_ns(ns);
- odp_time_t end_time = time_handler.time_sum(cur, wait);
-
- time_wait_until(end_time);
-}
-
-void odp_time_wait_until(odp_time_t time)
-{
- return time_wait_until(time);
-}
-
-static uint64_t time_to_u64(odp_time_t time)
-{
- int ret;
- struct timespec tres;
- uint64_t resolution;
-
- ret = clock_getres(CLOCK_MONOTONIC_RAW, &tres);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_getres failed\n");
-
- resolution = (uint64_t)tres.tv_nsec;
-
- return time_handler.time_to_ns(time) / resolution;
-}
-
-static uint64_t time_to_u64_dpdk(odp_time_t time)
-{
- return time.tv_sec;
-}
-
-uint64_t odp_time_to_u64(odp_time_t time)
-{
- return time_handler.time_to_u64(time);
-}
-
-static odp_bool_t is_invariant_tsc_supported(void)
-{
- FILE *file;
- char *line = NULL;
- size_t len = 0;
- odp_bool_t nonstop_tsc = false;
- odp_bool_t constant_tsc = false;
- odp_bool_t ret = false;
-
- file = fopen("/proc/cpuinfo", "rt");
- while (getline(&line, &len, file) != -1) {
- if (strstr(line, "flags") != NULL) {
- if (strstr(line, "constant_tsc") != NULL)
- constant_tsc = true;
- if (strstr(line, "nonstop_tsc") != NULL)
- nonstop_tsc = true;
-
- if (constant_tsc && nonstop_tsc)
- ret = true;
- else
- ret = false;
-
- free(line);
- fclose(file);
- return ret;
- }
+ if (years < 10) {
+ _ODP_ERR("Time counter would wrap in 10 years: %" PRIu64 "\n",
+ _odp_time_glob.start_cycles);
+ return -1;
}
- free(line);
- fclose(file);
- return false;
-}
-static inline odp_bool_t is_dpdk_timer_cycles_support(void)
-{
- if (is_invariant_tsc_supported() == true)
- return true;
+ diff = UINT64_MAX - _odp_time_glob.start_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
-#ifdef RTE_LIBEAL_USE_HPET
- if (rte_eal_hpet_init(1) == 0)
- return true;
-#endif
- return false;
-}
-
-int odp_time_init_global(void)
-{
- if (is_dpdk_timer_cycles_support()) {
- time_handler.time_to_ns = time_to_ns_dpdk;
- time_handler.time_diff = time_diff_dpdk;
- time_handler.time_curr = time_curr_dpdk;
- time_handler.time_cmp = time_cmp_dpdk;
- time_handler.time_sum = time_sum_dpdk;
- time_handler.time_local_from_ns = time_local_from_ns_dpdk;
- time_handler.time_local_res = time_local_res_dpdk;
- time_handler.time_to_u64 = time_to_u64_dpdk;
- tick_per_nsec = (double)time_local_res_dpdk() /
- (double)ODP_TIME_SEC_IN_NS;
- nsec_per_tick = (double)ODP_TIME_SEC_IN_NS /
- (double)time_local_res_dpdk();
- } else {
- time_handler.time_to_ns = time_to_ns;
- time_handler.time_diff = time_diff;
- time_handler.time_curr = time_curr;
- time_handler.time_cmp = time_cmp;
- time_handler.time_sum = time_sum;
- time_handler.time_local_from_ns = time_local_from_ns;
- time_handler.time_local_res = time_local_res;
- time_handler.time_to_u64 = time_to_u64;
- }
-
- start_time = time_handler.time_curr();
- if (time_handler.time_cmp(start_time, ODP_TIME_NULL) == 0) {
- ODP_ABORT("initiate odp time failed\n");
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ _odp_time_glob.start_ns);
return -1;
- } else {
- return 0;
}
+
+ return 0;
}
-int odp_time_term_global(void)
+int _odp_time_term_global(void)
{
return 0;
}
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
new file mode 100644
index 000000000..f4c190aad
--- /dev/null
+++ b/platform/linux-dpdk/odp_timer.c
@@ -0,0 +1,1433 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/deprecated.h>
+#include <odp/api/queue.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/std.h>
+#include <odp/api/thread.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/time.h>
+#include <odp/api/timer.h>
+
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/timer_inlines.h>
+
+#include <odp/api/plat/timer_inline_types.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
+#include <odp_queue_if.h>
+#include <odp_ring_u32_internal.h>
+#include <odp_thread_internal.h>
+#include <odp_timer_internal.h>
+
+#include <rte_cycles.h>
+#include <rte_timer.h>
+
+#include <inttypes.h>
+#include <stdio.h>
+#include <string.h>
+
+/* One divided by one nanosecond in Hz */
+#define GIGA_HZ 1000000000
+
+/* Timer states */
+#define NOT_TICKING 0
+#define EXPIRED 1
+#define TICKING 2
+
+/* Maximum number of timer pools */
+#define MAX_TIMER_POOLS 8
+
+/* Maximum ring size for storing timer pool timers. Must be a power of two. */
+#define MAX_TIMER_RING_SIZE (32 * 1024)
+
+/* Maximum number of timers per timer pool. Validation test expects 2000 timers
+ * per thread and up to 32 threads. */
+#define MAX_TIMERS (MAX_TIMER_RING_SIZE - 1)
+
+ODP_STATIC_ASSERT(MAX_TIMERS < MAX_TIMER_RING_SIZE,
+ "MAX_TIMER_RING_SIZE too small");
+
+/* Special expiration tick used for detecting final periodic timer events */
+#define PERIODIC_CANCELLED ((uint64_t)0xFFFFFFFFFFFFFFFF)
+
+/* Max timeout in capability. One year in nsec (0x0070 09D3 2DA3 0000). */
+#define MAX_TMO_NS (365 * 24 * 3600 * ODP_TIME_SEC_IN_NS)
+
+/* Actual resolution depends on application polling frequency. Promise
+ * 10 usec resolution. */
+#define MAX_RES_NS 10000
+#define MAX_RES_HZ (GIGA_HZ / MAX_RES_NS)
+
+/* Limit minimum supported timeout in timer (CPU) cycles. Timer setup, polling,
+ * timer management, timeout enqueue, etc takes about this many CPU cycles.
+ * It does not make sense to set up shorter timeouts than this. */
+#define MIN_TMO_CYCLES 2000
+
+/* Duration of a spin loop */
+#define WAIT_SPINS 30
+
+/* Minimum periodic timer base frequency */
+#define MIN_BASE_HZ 1
+
+/* Maximum periodic timer base frequency */
+#define MAX_BASE_HZ MAX_RES_HZ
+
+/* Maximum periodic timer multiplier */
+#define MAX_MULTIPLIER 1000000
+
+/* Maximum number of periodic timers per pool */
+#define MAX_PERIODIC_TIMERS 100
+
+/* Periodic tick fractional part accumulator size */
+#define ACC_SIZE (1ull << 32)
+
+typedef struct {
+ odp_ticketlock_t lock;
+ uint64_t tick;
+ const void *user_ptr;
+ odp_queue_t queue;
+ odp_event_t tmo_event;
+ struct timer_pool_s *timer_pool;
+ int state;
+ uint32_t timer_idx;
+
+ /* Period of periodic timer in ticks, includes PERIODIC_CANCELLED flag. */
+ uint64_t periodic_ticks;
+ /* Periodic ticks fractional part. */
+ uint32_t periodic_ticks_frac;
+ /* Periodic ticks fractional part accumulator. */
+ uint32_t periodic_ticks_frac_acc;
+
+ struct rte_timer rte_timer;
+
+} timer_entry_t;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct timer_pool_s {
+ timer_entry_t timer[MAX_TIMER_RING_SIZE];
+
+ struct {
+ uint32_t ring_mask;
+
+ ring_u32_t ring_hdr;
+ uint32_t ring_data[MAX_TIMER_RING_SIZE];
+
+ } free_timer;
+
+ odp_timer_pool_param_t param;
+ char name[ODP_TIMER_POOL_NAME_LEN + 1];
+ int used;
+ odp_ticketlock_t lock;
+ uint32_t cur_timers;
+ uint32_t hwm_timers;
+ double base_freq;
+ uint64_t max_multiplier;
+ uint8_t periodic;
+
+} timer_pool_t;
+#pragma GCC diagnostic pop
+
+/* Wrappers for alternative DPDK timer implementation */
+typedef int (*timer_stop_fn)(struct rte_timer *tim);
+typedef int (*timer_manage_fn)(void);
+typedef int (*timer_reset_fn)(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned int tim_lcore,
+ rte_timer_cb_t fct, void *arg);
+
+typedef struct timer_ops_t {
+ timer_stop_fn stop;
+ timer_manage_fn manage;
+ timer_reset_fn reset;
+} timer_ops_t;
+
+typedef struct {
+ timer_pool_t timer_pool[MAX_TIMER_POOLS];
+ odp_shm_t shm;
+ odp_ticketlock_t lock;
+ volatile uint64_t wait_counter;
+ uint64_t poll_interval_nsec;
+ odp_time_t poll_interval_time;
+ int num_timer_pools;
+ int poll_interval;
+ uint32_t data_id;
+ uint8_t use_alternate;
+ timer_ops_t ops;
+
+} timer_global_t;
+
+typedef struct timer_local_t {
+ odp_time_t last_run;
+ uint64_t thrmask_epoch;
+ int run_cnt;
+ int num_poll_cores;
+ unsigned int poll_cores[ODP_THREAD_COUNT_MAX];
+
+} timer_local_t;
+
+/* Points to timer global data */
+static timer_global_t *timer_global;
+
+/* Timer thread local data */
+static __thread timer_local_t timer_local;
+
+#include <odp/visibility_begin.h>
+
+/* Fill in timeout header field offsets for inline functions */
+const _odp_timeout_inline_offset_t
+_odp_timeout_inline_offset ODP_ALIGNED_CACHE = {
+ .expiration = offsetof(odp_timeout_hdr_t, expiration),
+ .timer = offsetof(odp_timeout_hdr_t, timer),
+ .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr),
+ .uarea_addr = offsetof(odp_timeout_hdr_t, uarea_addr)
+};
+
+/* Global data for inline functions */
+_odp_timer_global_t _odp_timer_glob;
+
+#include <odp/visibility_end.h>
+
+static void timer_cb(struct rte_timer *rte_timer, void *arg ODP_UNUSED)
+{
+ timer_entry_t *timer = rte_timer->arg;
+ odp_event_t event;
+ odp_queue_t queue;
+
+ odp_ticketlock_lock(&timer->lock);
+
+ if (timer->state != TICKING) {
+ _ODP_ERR("Timer has been cancelled or freed.\n");
+ odp_ticketlock_unlock(&timer->lock);
+ return;
+ }
+
+ queue = timer->queue;
+ event = timer->tmo_event;
+ timer->state = EXPIRED;
+
+ if (!timer->timer_pool->periodic)
+ timer->tmo_event = ODP_EVENT_INVALID;
+
+ odp_ticketlock_unlock(&timer->lock);
+
+ if (odp_unlikely(odp_queue_enq(queue, event))) {
+ _ODP_ERR("Timeout event enqueue failed.\n");
+ odp_event_free(event);
+ }
+}
+
+static void timer_alt_manage_cb(struct rte_timer *rte_timer)
+{
+ timer_cb(rte_timer, NULL);
+}
+
+static inline int timer_stop(struct rte_timer *tim)
+{
+ return rte_timer_stop(tim);
+}
+
+static inline int timer_alt_stop(struct rte_timer *tim)
+{
+ return rte_timer_alt_stop(timer_global->data_id, tim);
+}
+
+static inline int timer_manage(void)
+{
+ return rte_timer_manage();
+}
+
+static inline int timer_alt_manage(void)
+{
+ uint64_t thrmask_epoch = _odp_thread_thrmask_epoch();
+
+ if (odp_unlikely(timer_local.thrmask_epoch != thrmask_epoch)) {
+ int cpu_ids = _odp_thread_cpu_ids(timer_local.poll_cores,
+ ODP_THREAD_COUNT_MAX);
+
+ timer_local.num_poll_cores = cpu_ids;
+ timer_local.thrmask_epoch = thrmask_epoch;
+ }
+
+ return rte_timer_alt_manage(timer_global->data_id,
+ timer_local.poll_cores,
+ timer_local.num_poll_cores,
+ timer_alt_manage_cb);
+}
+
+static inline int timer_reset(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type, unsigned int tim_lcore,
+ rte_timer_cb_t fct, void *arg)
+{
+ return rte_timer_reset(tim, ticks, type, tim_lcore, fct, arg);
+}
+
+static inline int timer_alt_reset(struct rte_timer *tim, uint64_t ticks,
+ enum rte_timer_type type,
+ unsigned int tim_lcore, rte_timer_cb_t fct,
+ void *arg)
+{
+ return rte_timer_alt_reset(timer_global->data_id, tim, ticks, type,
+ tim_lcore, fct, arg);
+}
+
+static inline timer_pool_t *timer_pool_from_hdl(odp_timer_pool_t hdl)
+{
+ return (timer_pool_t *)(uintptr_t)hdl;
+}
+
+static inline odp_timer_pool_t timer_pool_to_hdl(timer_pool_t *tp)
+{
+ return (odp_timer_pool_t)tp;
+}
+
+static inline timer_entry_t *timer_from_hdl(odp_timer_t timer_hdl)
+{
+ return (timer_entry_t *)(uintptr_t)timer_hdl;
+}
+
+static uint64_t max_multiplier_capa(double freq)
+{
+ uint64_t mult;
+
+ if (freq < MIN_BASE_HZ)
+ return 0;
+
+ mult = MAX_BASE_HZ / freq;
+ if (mult > MAX_MULTIPLIER)
+ mult = MAX_MULTIPLIER;
+
+ return mult;
+}
+
+int _odp_timer_init_global(const odp_init_t *params)
+{
+ odp_shm_t shm;
+ const char *conf_str;
+ int val = 0;
+
+ /* Timers are not polled until at least one timer pool has been
+ * created. */
+ odp_global_rw->inline_timers = false;
+
+ if (params && params->not_used.feat.timer) {
+ timer_global = NULL;
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_timer_global", sizeof(timer_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Global data alloc (%zu bytes) failed\n", sizeof(timer_global_t));
+ return -1;
+ }
+
+ timer_global = odp_shm_addr(shm);
+ memset(timer_global, 0, sizeof(timer_global_t));
+
+ timer_global->shm = shm;
+ odp_ticketlock_init(&timer_global->lock);
+
+ _ODP_PRINT("\nTimer config:\n");
+
+ conf_str = "timer.inline_poll_interval";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->poll_interval = val;
+ _ODP_PRINT(" %s: %d\n", conf_str, val);
+
+ conf_str = "timer.inline_poll_interval_nsec";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->poll_interval_nsec = val;
+ timer_global->poll_interval_time =
+ odp_time_global_from_ns(timer_global->poll_interval_nsec);
+ _ODP_PRINT(" %s: %d\n", conf_str, val);
+
+ conf_str = "timer.alternate";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->use_alternate = !!val;
+ _ODP_PRINT(" %s: %" PRIu8 "\n", conf_str, timer_global->use_alternate);
+
+ _ODP_PRINT("\n");
+
+ if (rte_timer_subsystem_init()) {
+ _ODP_ERR("Initializing DPDK timer library failed\n");
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ if (timer_global->use_alternate) {
+ if (rte_timer_data_alloc(&timer_global->data_id)) {
+ _ODP_ERR("Failed to allocate DPDK timer data instance\n");
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->ops.stop = timer_alt_stop;
+ timer_global->ops.manage = timer_alt_manage;
+ timer_global->ops.reset = timer_alt_reset;
+ } else {
+ timer_global->ops.stop = timer_stop;
+ timer_global->ops.manage = timer_manage;
+ timer_global->ops.reset = timer_reset;
+ }
+
+ _odp_timer_glob.freq_hz = rte_get_timer_hz();
+ if (_odp_timer_glob.freq_hz == 0) {
+ _ODP_ERR("Reading timer frequency failed\n");
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ return 0;
+}
+
+int _odp_timer_term_global(void)
+{
+ if (timer_global && timer_global->use_alternate) {
+ if (rte_timer_data_dealloc(timer_global->data_id)) {
+ _ODP_ERR("Failed to deallocate DPDK timer data instance\n");
+ return -1;
+ }
+ }
+ rte_timer_subsystem_finalize();
+
+ if (timer_global && odp_shm_free(timer_global->shm)) {
+ _ODP_ERR("Shm free failed for odp_timer\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int _odp_timer_init_local(void)
+{
+ timer_local.last_run = odp_time_global_from_ns(0);
+ timer_local.run_cnt = 1;
+
+ return 0;
+}
+
+int _odp_timer_term_local(void)
+{
+ return 0;
+}
+
+void _odp_timer_run_inline(int dec)
+{
+ int poll_interval = (dec == TIMER_SCAN_FORCE) ? 0 : timer_global->poll_interval;
+ odp_time_t now;
+ int ret;
+
+ /* Rate limit how often this thread checks the timer pools. */
+
+ if (poll_interval > 1) {
+ timer_local.run_cnt -= dec;
+ if (timer_local.run_cnt > 0)
+ return;
+ timer_local.run_cnt = poll_interval;
+ }
+
+ now = odp_time_global();
+
+ if (poll_interval > 1) {
+ odp_time_t period = odp_time_diff(now, timer_local.last_run);
+
+ if (odp_time_cmp(period, timer_global->poll_interval_time) < 0)
+ return;
+ timer_local.last_run = now;
+ }
+
+ /* Check timer pools */
+ ret = timer_global->ops.manage();
+ if (odp_unlikely(ret))
+ _ODP_ERR("RTE timer manage failed: %d\n", ret);
+}
+
+static inline uint64_t tmo_ticks_to_ns_round_up(uint64_t tmo_ticks)
+{
+ uint64_t tmo_ns = odp_timer_tick_to_ns(NULL, tmo_ticks);
+
+ /* Make sure the ns value will not be rounded down when converted back
+ * to ticks. */
+ while (odp_timer_ns_to_tick(NULL, tmo_ns) < tmo_ticks)
+ tmo_ns++;
+
+ return tmo_ns;
+}
+
+int odp_timer_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_capability_t *capa)
+{
+ uint64_t min_tmo = tmo_ticks_to_ns_round_up(MIN_TMO_CYCLES);
+
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ memset(capa, 0, sizeof(odp_timer_capability_t));
+
+ capa->max_pools_combined = MAX_TIMER_POOLS;
+ capa->max_pools = MAX_TIMER_POOLS;
+ capa->max_timers = MAX_TIMERS;
+ capa->periodic.max_pools = MAX_TIMER_POOLS;
+ capa->periodic.max_timers = MAX_PERIODIC_TIMERS;
+ capa->highest_res_ns = MAX_RES_NS;
+ capa->max_res.res_ns = MAX_RES_NS;
+ capa->max_res.res_hz = MAX_RES_HZ;
+ capa->max_res.min_tmo = min_tmo;
+ capa->max_res.max_tmo = MAX_TMO_NS;
+ capa->max_tmo.res_ns = MAX_RES_NS;
+ capa->max_tmo.res_hz = MAX_RES_HZ;
+ capa->max_tmo.min_tmo = min_tmo;
+ capa->max_tmo.max_tmo = MAX_TMO_NS;
+ capa->queue_type_sched = true;
+ capa->queue_type_plain = true;
+
+ capa->periodic.min_base_freq_hz.integer = MIN_BASE_HZ;
+ capa->periodic.max_base_freq_hz.integer = MAX_BASE_HZ;
+
+ return 0;
+}
+
+int odp_timer_res_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_res_capability_t *res_capa)
+{
+ uint64_t min_tmo = tmo_ticks_to_ns_round_up(MIN_TMO_CYCLES);
+
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ if (res_capa->min_tmo) {
+ _ODP_ERR("Only res_ns or max_tmo based queries supported\n");
+ return -1;
+ }
+
+ if (res_capa->res_ns || res_capa->res_hz) {
+ if (res_capa->res_ns && res_capa->res_ns < MAX_RES_NS) {
+ _ODP_DBG("Timeout resolution capability (res_ns) exceeded\n");
+ return -1;
+ }
+ if (res_capa->res_hz && res_capa->res_hz > MAX_RES_HZ) {
+ _ODP_DBG("Timeout resolution capability (res_hz) exceeded\n");
+ return -1;
+ }
+ res_capa->min_tmo = min_tmo;
+ res_capa->max_tmo = MAX_TMO_NS;
+ } else { /* max_tmo */
+ if (res_capa->max_tmo > MAX_TMO_NS) {
+ _ODP_DBG("Maximum relative timeout capability (max_tmo) exceeded\n");
+ return -1;
+ }
+ res_capa->min_tmo = min_tmo;
+ res_capa->res_ns = MAX_RES_NS;
+ res_capa->res_hz = MAX_RES_HZ;
+ }
+
+ return 0;
+}
+
+int odp_timer_periodic_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_periodic_capability_t *capa)
+{
+ double freq;
+ uint64_t multiplier;
+
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ freq = odp_fract_u64_to_dbl(&capa->base_freq_hz);
+ if (freq < MIN_BASE_HZ || freq > MAX_BASE_HZ) {
+ _ODP_ERR("Base frequency not supported (min: %f, max %f)\n",
+ (double)MIN_BASE_HZ, (double)MAX_BASE_HZ);
+ return -1;
+ }
+
+ multiplier = max_multiplier_capa(freq);
+
+ if (capa->max_multiplier > multiplier)
+ return -1;
+
+ if (capa->res_ns && capa->res_ns < MAX_RES_NS)
+ return -1;
+
+ /* Update capa with supported values */
+ capa->max_multiplier = multiplier;
+ capa->res_ns = MAX_RES_NS;
+
+ /* All base frequencies within the range are supported */
+ return 1;
+}
+
+void odp_timer_pool_param_init(odp_timer_pool_param_t *param)
+{
+ memset(param, 0, sizeof(odp_timer_pool_param_t));
+ param->timer_type = ODP_TIMER_TYPE_SINGLE;
+ param->clk_src = ODP_CLOCK_DEFAULT;
+ param->exp_mode = ODP_TIMER_EXP_AFTER;
+}
+
+odp_timer_pool_t odp_timer_pool_create(const char *name,
+ const odp_timer_pool_param_t *param)
+{
+ timer_pool_t *timer_pool;
+ timer_entry_t *timer;
+ uint32_t i, num_timers;
+ uint64_t res_ns, nsec_per_scan;
+ uint64_t max_multiplier = 0;
+ double base_freq = 0.0;
+ int periodic = (param->timer_type == ODP_TIMER_TYPE_PERIODIC) ? 1 : 0;
+
+ if (odp_global_ro.init_param.not_used.feat.timer) {
+ _ODP_ERR("Trying to use disabled ODP feature.\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", param->clk_src);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->timer_type != ODP_TIMER_TYPE_SINGLE &&
+ param->timer_type != ODP_TIMER_TYPE_PERIODIC) {
+ _ODP_ERR("Bad timer type %i\n", param->timer_type);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if ((param->res_ns && param->res_hz) ||
+ (param->res_ns == 0 && param->res_hz == 0)) {
+ _ODP_ERR("Invalid timeout resolution\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->res_hz == 0 && param->res_ns < MAX_RES_NS) {
+ _ODP_ERR("Too high resolution\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->res_ns == 0 && param->res_hz > MAX_RES_HZ) {
+ _ODP_ERR("Too high resolution\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->num_timers > MAX_TIMERS) {
+ _ODP_ERR("Too many timers\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ num_timers = param->num_timers;
+
+ if (param->res_ns)
+ res_ns = param->res_ns;
+ else
+ res_ns = GIGA_HZ / param->res_hz;
+
+ if (periodic) {
+ uint64_t max_capa, min_period_ns;
+
+ base_freq = odp_fract_u64_to_dbl(&param->periodic.base_freq_hz);
+ max_multiplier = param->periodic.max_multiplier;
+
+ if (base_freq < MIN_BASE_HZ || base_freq > MAX_BASE_HZ) {
+ _ODP_ERR("Bad base frequency: %f\n", base_freq);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ max_capa = max_multiplier_capa(base_freq);
+
+ if (max_multiplier == 0 || max_multiplier > max_capa) {
+ _ODP_ERR("Bad max multiplier: %" PRIu64 "\n", max_multiplier);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ min_period_ns = GIGA_HZ / (base_freq * max_multiplier);
+
+ if (res_ns > min_period_ns)
+ res_ns = min_period_ns;
+ }
+
+ /* Scan timer pool twice during resolution interval */
+ if (res_ns > ODP_TIME_USEC_IN_NS)
+ nsec_per_scan = res_ns / 2;
+ else
+ nsec_per_scan = res_ns;
+
+ /* Ring size must larger than param->num_timers */
+ if (_ODP_CHECK_IS_POWER2(num_timers))
+ num_timers++;
+ num_timers = _ODP_ROUNDUP_POWER2_U32(num_timers);
+
+ odp_ticketlock_lock(&timer_global->lock);
+
+ if (timer_global->num_timer_pools >= MAX_TIMER_POOLS) {
+ odp_ticketlock_unlock(&timer_global->lock);
+ _ODP_DBG("No more free timer pools\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ for (i = 0; i < MAX_TIMER_POOLS; i++) {
+ timer_pool = &timer_global->timer_pool[i];
+
+ if (timer_pool->used == 0) {
+ timer_pool->used = 1;
+ break;
+ }
+ }
+ timer_global->num_timer_pools++;
+
+ /* Enable inline timer polling */
+ if (timer_global->num_timer_pools == 1)
+ odp_global_rw->inline_timers = true;
+
+ /* Increase poll rate to match the highest resolution */
+ if (timer_global->poll_interval_nsec > nsec_per_scan) {
+ timer_global->poll_interval_nsec = nsec_per_scan;
+ timer_global->poll_interval_time =
+ odp_time_global_from_ns(nsec_per_scan);
+ }
+
+ odp_ticketlock_unlock(&timer_global->lock);
+ if (name) {
+ strncpy(timer_pool->name, name,
+ ODP_TIMER_POOL_NAME_LEN);
+ timer_pool->name[ODP_TIMER_POOL_NAME_LEN] = 0;
+ }
+
+ timer_pool->param = *param;
+ timer_pool->param.res_ns = res_ns;
+
+ timer_pool->periodic = periodic;
+ timer_pool->base_freq = base_freq;
+ timer_pool->max_multiplier = max_multiplier;
+
+ ring_u32_init(&timer_pool->free_timer.ring_hdr);
+ timer_pool->free_timer.ring_mask = num_timers - 1;
+
+ odp_ticketlock_init(&timer_pool->lock);
+ timer_pool->cur_timers = 0;
+ timer_pool->hwm_timers = 0;
+
+ for (i = 0; i < timer_pool->free_timer.ring_mask; i++) {
+ timer = &timer_pool->timer[i];
+ memset(timer, 0, sizeof(timer_entry_t));
+
+ odp_ticketlock_init(&timer->lock);
+ rte_timer_init(&timer->rte_timer);
+ timer->rte_timer.arg = timer;
+ timer->timer_pool = timer_pool;
+ timer->timer_idx = i;
+
+ ring_u32_enq(&timer_pool->free_timer.ring_hdr,
+ timer_pool->free_timer.ring_mask, i);
+ }
+
+ return timer_pool_to_hdl(timer_pool);
+}
+
+void odp_timer_pool_start(void)
+{
+ /* Nothing to do */
+}
+
+int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num)
+{
+ _ODP_ASSERT(timer_pool != NULL);
+ _ODP_ASSERT(num > 0);
+ if (ODP_DEBUG) {
+ for (int i = 0; i < num; i++)
+ _ODP_ASSERT(timer_pool[i] != ODP_TIMER_POOL_INVALID);
+ }
+
+ /* Nothing to do here, timer pools are started by the create call. */
+ return num;
+}
+
+void odp_timer_pool_destroy(odp_timer_pool_t tp)
+{
+ timer_pool_t *timer_pool = timer_pool_from_hdl(tp);
+
+ odp_ticketlock_lock(&timer_global->lock);
+
+ timer_pool->used = 0;
+ timer_global->num_timer_pools--;
+
+ /* Disable inline timer polling */
+ if (timer_global->num_timer_pools == 0)
+ odp_global_rw->inline_timers = false;
+
+ odp_ticketlock_unlock(&timer_global->lock);
+}
+
+int odp_timer_sample_ticks(odp_timer_pool_t tp[], uint64_t tick[], uint64_t clk_count[], int num)
+{
+ uint64_t now;
+ int i;
+
+ if (num <= 0 || num > MAX_TIMER_POOLS) {
+ _ODP_ERR("Bad number of timer pools: %i\n", num);
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (odp_unlikely(tp[i] == ODP_TIMER_POOL_INVALID)) {
+ _ODP_ERR("Invalid timer pool\n");
+ return -1;
+ }
+ }
+
+ now = rte_get_timer_cycles();
+
+ for (i = 0; i < num; i++) {
+ tick[i] = now;
+
+ if (clk_count)
+ clk_count[i] = 0;
+ }
+
+ return 0;
+}
+
+int odp_timer_pool_info(odp_timer_pool_t tp,
+ odp_timer_pool_info_t *info)
+{
+ timer_pool_t *timer_pool;
+ uint64_t freq_hz = rte_get_timer_hz();
+
+ if (odp_unlikely(tp == ODP_TIMER_POOL_INVALID)) {
+ _ODP_ERR("Invalid timer pool.\n");
+ return -1;
+ }
+
+ timer_pool = timer_pool_from_hdl(tp);
+
+ memset(info, 0, sizeof(odp_timer_pool_info_t));
+ info->param = timer_pool->param;
+ info->cur_timers = timer_pool->cur_timers;
+ info->hwm_timers = timer_pool->hwm_timers;
+ info->name = timer_pool->name;
+
+ info->tick_info.freq.integer = freq_hz;
+ info->tick_info.nsec.integer = ODP_TIME_SEC_IN_NS / freq_hz;
+ if (ODP_TIME_SEC_IN_NS % freq_hz) {
+ info->tick_info.nsec.numer = ODP_TIME_SEC_IN_NS - (info->tick_info.nsec.integer *
+ freq_hz);
+ info->tick_info.nsec.denom = freq_hz;
+ }
+ /* Leave source clock information to zero as there is no direct link
+ * between a source clock signal and a timer tick. */
+
+ return 0;
+}
+
+uint64_t odp_timer_pool_to_u64(odp_timer_pool_t tp)
+{
+ return _odp_pri(tp);
+}
+
+odp_timer_t odp_timer_alloc(odp_timer_pool_t tp,
+ odp_queue_t queue,
+ const void *user_ptr)
+{
+ uint32_t timer_idx;
+ timer_entry_t *timer;
+ timer_pool_t *timer_pool = timer_pool_from_hdl(tp);
+
+ if (odp_unlikely(tp == ODP_TIMER_POOL_INVALID)) {
+ _ODP_ERR("Invalid timer pool.\n");
+ return ODP_TIMER_INVALID;
+ }
+
+ if (odp_unlikely(queue == ODP_QUEUE_INVALID)) {
+ _ODP_ERR("%s: Invalid queue handle.\n", timer_pool->name);
+ return ODP_TIMER_INVALID;
+ }
+
+ if (ring_u32_deq(&timer_pool->free_timer.ring_hdr,
+ timer_pool->free_timer.ring_mask,
+ &timer_idx) == 0)
+ return ODP_TIMER_INVALID;
+
+ timer = &timer_pool->timer[timer_idx];
+
+ timer->state = NOT_TICKING;
+ timer->user_ptr = user_ptr;
+ timer->queue = queue;
+ timer->tmo_event = ODP_EVENT_INVALID;
+
+ /* Add timer to queue */
+ _odp_queue_fn->timer_add(queue);
+
+ odp_ticketlock_lock(&timer_pool->lock);
+
+ timer_pool->cur_timers++;
+
+ if (timer_pool->cur_timers > timer_pool->hwm_timers)
+ timer_pool->hwm_timers = timer_pool->cur_timers;
+
+ odp_ticketlock_unlock(&timer_pool->lock);
+
+ return (odp_timer_t)timer;
+}
+
+int odp_timer_free(odp_timer_t timer_hdl)
+{
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+ timer_pool_t *timer_pool = timer->timer_pool;
+ uint32_t timer_idx = timer->timer_idx;
+
+ odp_ticketlock_lock(&timer->lock);
+
+ if (odp_unlikely(timer->state == TICKING)) {
+ odp_ticketlock_unlock(&timer->lock);
+ _ODP_ERR("Timer is active\n");
+ return -1;
+ }
+
+ /* Remove timer from queue */
+ _odp_queue_fn->timer_rem(timer->queue);
+
+ odp_ticketlock_unlock(&timer->lock);
+
+ odp_ticketlock_lock(&timer_pool->lock);
+
+ timer_pool->cur_timers--;
+
+ odp_ticketlock_unlock(&timer_pool->lock);
+
+ ring_u32_enq(&timer_pool->free_timer.ring_hdr,
+ timer_pool->free_timer.ring_mask, timer_idx);
+
+ return 0;
+}
+
+static inline odp_timeout_hdr_t *timeout_to_hdr(odp_timeout_t tmo)
+{
+ return (odp_timeout_hdr_t *)(uintptr_t)tmo;
+}
+
+static inline int timer_set(odp_timer_t timer_hdl, uint64_t tick, odp_event_t event, int absolute)
+{
+ odp_event_t tmo_event;
+ uint64_t cur_tick, rel_tick, abs_tick;
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+ int num_retry = 0;
+ unsigned int lcore = rte_lcore_id();
+
+retry:
+ cur_tick = rte_get_timer_cycles();
+
+ if (absolute) {
+ abs_tick = tick;
+ rel_tick = abs_tick - cur_tick;
+
+ if (odp_unlikely(abs_tick < cur_tick))
+ rel_tick = 0;
+ } else {
+ rel_tick = tick;
+ abs_tick = rel_tick + cur_tick;
+ }
+
+ if (rel_tick < MIN_TMO_CYCLES) {
+ _ODP_DBG("Too early\n");
+ _ODP_DBG(" cur_tick %" PRIu64 ", abs_tick %" PRIu64 "\n", cur_tick, abs_tick);
+ _ODP_DBG(" num_retry %i\n", num_retry);
+ return ODP_TIMER_TOO_NEAR;
+ }
+
+ odp_ticketlock_lock(&timer->lock);
+
+ if (timer->tmo_event == ODP_EVENT_INVALID) {
+ if (odp_unlikely(event == ODP_EVENT_INVALID)) {
+ odp_ticketlock_unlock(&timer->lock);
+ /* Event missing, or timer already expired and
+ * enqueued the event. */
+ return ODP_TIMER_FAIL;
+ }
+ } else {
+ /* Check that timer was not active */
+ if (odp_unlikely(event != ODP_EVENT_INVALID)) {
+ _ODP_ERR("Timer was already active\n");
+ odp_ticketlock_unlock(&timer->lock);
+ return ODP_TIMER_FAIL;
+ }
+ }
+
+ if (odp_unlikely(timer_global->ops.reset(&timer->rte_timer, rel_tick,
+ SINGLE, lcore, timer_cb, timer))) {
+ int do_retry = 0;
+
+ /* Another core is currently running the callback function.
+ * State is:
+ * - TICKING, when callback has not yet started
+ * - EXPIRED, when callback has not yet finished, or this cpu
+ * does not yet see that it has been finished
+ */
+
+ if (timer->state == EXPIRED)
+ do_retry = 1;
+
+ odp_ticketlock_unlock(&timer->lock);
+
+ if (do_retry) {
+ /* Timer has been expired, wait and retry until DPDK on
+ * this CPU sees it. */
+ int i;
+
+ for (i = 0; i < WAIT_SPINS; i++)
+ timer_global->wait_counter++;
+
+ num_retry++;
+ goto retry;
+ }
+
+ /* Timer was just about to expire. Too late to reset this timer.
+ * Return code is NOEVENT, even when application did give
+ * an event. */
+ return ODP_TIMER_FAIL;
+ }
+
+ if (event != ODP_EVENT_INVALID)
+ timer->tmo_event = event;
+
+ tmo_event = timer->tmo_event;
+ timer->tick = abs_tick;
+ timer->state = TICKING;
+
+ if (odp_event_type(tmo_event) == ODP_EVENT_TIMEOUT) {
+ odp_timeout_hdr_t *timeout_hdr;
+
+ timeout_hdr = timeout_to_hdr((odp_timeout_t)tmo_event);
+ timeout_hdr->expiration = abs_tick;
+ timeout_hdr->user_ptr = timer->user_ptr;
+ timeout_hdr->timer = (odp_timer_t)timer;
+ }
+
+ odp_ticketlock_unlock(&timer->lock);
+ return ODP_TIMER_SUCCESS;
+}
+
+int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
+{
+ odp_event_t tmo_ev = start_param->tmo_ev;
+ int abs = start_param->tick_type == ODP_TIMER_TICK_ABS;
+ int ret;
+
+ ret = timer_set(timer, start_param->tick, tmo_ev, abs);
+ if (odp_unlikely(ret != ODP_TIMER_SUCCESS))
+ return ret;
+
+ return ODP_TIMER_SUCCESS;
+}
+
+int odp_timer_restart(odp_timer_t timer, const odp_timer_start_t *start_param)
+{
+ int abs = start_param->tick_type == ODP_TIMER_TICK_ABS;
+
+ /* Reset timer without changing the event */
+ return timer_set(timer, start_param->tick, ODP_EVENT_INVALID, abs);
+}
+
+int odp_timer_periodic_start(odp_timer_t timer_hdl,
+ const odp_timer_periodic_start_t *start_param)
+{
+ uint64_t period_ns;
+ uint64_t first_tick;
+ odp_event_t tmo_ev = start_param->tmo_ev;
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+ timer_pool_t *tp = timer->timer_pool;
+ uint64_t multiplier = start_param->freq_multiplier;
+ double freq = multiplier * tp->base_freq;
+ double period_ns_dbl;
+ int absolute;
+ int ret;
+
+ if (odp_unlikely(!tp->periodic)) {
+ _ODP_ERR("Not a periodic timer\n");
+ return ODP_TIMER_FAIL;
+ }
+
+ if (odp_unlikely(multiplier == 0 || multiplier > tp->max_multiplier)) {
+ _ODP_ERR("Bad frequency multiplier: %" PRIu64 "\n", multiplier);
+ return ODP_TIMER_FAIL;
+ }
+
+ if (odp_unlikely(odp_event_type(tmo_ev) != ODP_EVENT_TIMEOUT)) {
+ _ODP_ERR("Event type is not timeout\n");
+ return ODP_TIMER_FAIL;
+ }
+
+ period_ns_dbl = (double)ODP_TIME_SEC_IN_NS / freq;
+ period_ns = period_ns_dbl;
+
+ if (period_ns == 0) {
+ _ODP_ERR("Too high periodic timer frequency: %f\n", freq);
+ return ODP_TIMER_FAIL;
+ }
+
+ timer->periodic_ticks = odp_timer_ns_to_tick(timer_pool_to_hdl(tp), period_ns);
+ timer->periodic_ticks_frac = (period_ns_dbl - period_ns) * ACC_SIZE;
+ timer->periodic_ticks_frac_acc = 0;
+
+ first_tick = timer->periodic_ticks;
+ absolute = 0;
+
+ if (start_param->first_tick) {
+ first_tick = start_param->first_tick;
+ absolute = 1;
+ }
+
+ ret = timer_set(timer_hdl, first_tick, tmo_ev, absolute);
+ if (odp_unlikely(ret != ODP_TIMER_SUCCESS))
+ return ret;
+
+ return ODP_TIMER_SUCCESS;
+}
+
+int odp_timer_periodic_ack(odp_timer_t timer_hdl, odp_event_t tmo_ev)
+{
+ uint64_t abs_tick, acc;
+ odp_timeout_t tmo = odp_timeout_from_event(tmo_ev);
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+ odp_timeout_hdr_t *timeout_hdr;
+ int ret;
+
+ if (odp_unlikely(odp_event_type(tmo_ev) != ODP_EVENT_TIMEOUT)) {
+ _ODP_ERR("Event type is not timeout\n");
+ return -1;
+ }
+
+ abs_tick = timer->periodic_ticks;
+
+ if (odp_unlikely(abs_tick == PERIODIC_CANCELLED)) {
+ timer->tmo_event = ODP_EVENT_INVALID;
+ return 2;
+ }
+
+ acc = (uint64_t)timer->periodic_ticks_frac_acc + (uint64_t)timer->periodic_ticks_frac;
+
+ if (acc >= ACC_SIZE) {
+ abs_tick++;
+ acc -= ACC_SIZE;
+ }
+
+ timer->periodic_ticks_frac_acc = acc;
+
+ timeout_hdr = timeout_to_hdr(tmo);
+ abs_tick += timeout_hdr->expiration;
+ timeout_hdr->expiration = abs_tick;
+
+ ret = timer_set(timer_hdl, abs_tick, ODP_EVENT_INVALID, 1);
+ if (odp_likely(ret == ODP_TIMER_SUCCESS))
+ return 0;
+
+ /* Send delayed timeout immediately to catch-up */
+ if (ret == ODP_TIMER_TOO_NEAR) {
+ if (odp_unlikely(odp_queue_enq(timer->queue, tmo_ev))) {
+ _ODP_ERR("Failed to enqueue catch-up timeout event\n");
+ return -1;
+ }
+ return 0;
+ }
+ _ODP_ERR("Failed to re-arm periodic timer: %d\n", ret);
+ return -1;
+}
+
+int odp_timer_cancel(odp_timer_t timer_hdl, odp_event_t *tmo_ev)
+{
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+
+ odp_ticketlock_lock(&timer->lock);
+
+ if (odp_unlikely(timer->state < TICKING)) {
+ int state = timer->state;
+
+ odp_ticketlock_unlock(&timer->lock);
+
+ if (state == EXPIRED)
+ return ODP_TIMER_TOO_NEAR;
+ return ODP_TIMER_FAIL;
+ }
+
+ if (odp_unlikely(timer_global->ops.stop(&timer->rte_timer))) {
+ /* Another core runs timer callback function. */
+ odp_ticketlock_unlock(&timer->lock);
+ return ODP_TIMER_TOO_NEAR;
+ }
+
+ *tmo_ev = timer->tmo_event;
+ timer->tmo_event = ODP_EVENT_INVALID;
+ timer->state = NOT_TICKING;
+
+ odp_ticketlock_unlock(&timer->lock);
+ return ODP_TIMER_SUCCESS;
+}
+
+int odp_timer_periodic_cancel(odp_timer_t timer_hdl)
+{
+ timer_pool_t *tp;
+ timer_entry_t *timer;
+ odp_event_t event;
+ int ret;
+
+ if (odp_unlikely(timer_hdl == ODP_TIMER_INVALID)) {
+ _ODP_ERR("Bad timer handle\n");
+ return -1;
+ }
+
+ timer = timer_from_hdl(timer_hdl);
+ tp = timer->timer_pool;
+ event = timer->tmo_event;
+
+ if (odp_unlikely(!tp->periodic)) {
+ _ODP_ERR("Not a periodic timer\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&timer->lock);
+
+ ret = timer_global->ops.stop(&timer->rte_timer);
+
+ /* Mark timer cancelled, so that a following ack call stops restarting it. */
+ timer->periodic_ticks = PERIODIC_CANCELLED;
+
+ /* Timer successfully cancelled, so send the final event manually. */
+ if (ret == 0 && timer->state == TICKING) {
+ timer->state = NOT_TICKING;
+ timer->tmo_event = ODP_EVENT_INVALID;
+ if (odp_unlikely(odp_queue_enq(timer->queue, event))) {
+ _ODP_ERR("Failed to enqueue final timeout event\n");
+ _odp_event_free(event);
+ odp_ticketlock_unlock(&timer->lock);
+ return -1;
+ }
+ }
+
+ odp_ticketlock_unlock(&timer->lock);
+
+ return 0;
+}
+
+uint64_t odp_timer_to_u64(odp_timer_t timer_hdl)
+{
+ return (uint64_t)(uintptr_t)timer_hdl;
+}
+
+uint64_t odp_timeout_to_u64(odp_timeout_t tmo)
+{
+ return (uint64_t)(uintptr_t)tmo;
+}
+
+int ODP_DEPRECATE(odp_timeout_fresh)(odp_timeout_t tmo)
+{
+ timer_entry_t *timer;
+ odp_timeout_hdr_t *timeout_hdr = timeout_to_hdr(tmo);
+
+ /* Timeout not connected to a timer */
+ if (odp_unlikely(timeout_hdr->timer == ODP_TIMER_INVALID))
+ return 0;
+
+ timer = timer_from_hdl(timeout_hdr->timer);
+
+ if (timer->timer_pool->periodic)
+ return timer->periodic_ticks != PERIODIC_CANCELLED;
+
+ /* Check if timer has been reused after timeout sent. */
+ return timeout_hdr->expiration == timer->tick;
+}
+
+odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
+{
+ odp_timeout_hdr_t *timeout_hdr;
+ odp_event_t event;
+ pool_t *pool;
+
+ _ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
+
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_TIMEOUT_INVALID;
+
+ timeout_hdr = timeout_to_hdr(odp_timeout_from_event(event));
+ timeout_hdr->timer = ODP_TIMER_INVALID;
+
+ return odp_timeout_from_event(event);
+}
+
+int odp_timeout_alloc_multi(odp_pool_t pool_hdl, odp_timeout_t tmo[], int num)
+{
+ pool_t *pool;
+ int ret;
+
+ _ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+ _ODP_ASSERT(tmo != NULL);
+ _ODP_ASSERT(num > 0);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
+
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)tmo, num);
+
+ for (int i = 0; i < ret; i++)
+ timeout_to_hdr(tmo[i])->timer = ODP_TIMER_INVALID;
+
+ return ret;
+}
+
+void odp_timeout_free(odp_timeout_t tmo)
+{
+ _odp_event_free(odp_timeout_to_event(tmo));
+}
+
+void odp_timeout_free_multi(odp_timeout_t tmo[], int num)
+{
+ _ODP_ASSERT(tmo != NULL);
+ _ODP_ASSERT(num > 0);
+
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)tmo, num);
+}
+
+void odp_timer_pool_print(odp_timer_pool_t timer_pool)
+{
+ timer_pool_t *tp;
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (timer_pool == ODP_TIMER_POOL_INVALID) {
+ _ODP_ERR("Bad timer pool handle\n");
+ return;
+ }
+
+ tp = timer_pool_from_hdl(timer_pool);
+
+ len += _odp_snprint(&str[len], n - len, "Timer pool info\n");
+ len += _odp_snprint(&str[len], n - len, "---------------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool));
+ len += _odp_snprint(&str[len], n - len, " name %s\n", tp->name);
+ len += _odp_snprint(&str[len], n - len, " num timers %u\n", tp->cur_timers);
+ len += _odp_snprint(&str[len], n - len, " hwm timers %u\n", tp->hwm_timers);
+ len += _odp_snprint(&str[len], n - len, " num tp %i\n",
+ timer_global->num_timer_pools);
+ len += _odp_snprint(&str[len], n - len, " periodic %" PRIu8 "\n", tp->periodic);
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
+
+ _ODP_PRINT("DPDK timer statistics\n---------------------\n");
+ if (timer_global->use_alternate)
+ rte_timer_alt_dump_stats(timer_global->data_id, stdout);
+ else
+ rte_timer_dump_stats(stdout);
+ _ODP_PRINT("\n");
+}
+
+void odp_timer_print(odp_timer_t timer_hdl)
+{
+ timer_entry_t *timer = timer_from_hdl(timer_hdl);
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (timer_hdl == ODP_TIMER_INVALID) {
+ _ODP_ERR("Bad timer handle\n");
+ return;
+ }
+
+ len += _odp_snprint(&str[len], n - len, "Timer info\n");
+ len += _odp_snprint(&str[len], n - len, "----------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timer_to_u64(timer_hdl));
+ len += _odp_snprint(&str[len], n - len, " timer pool 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool_to_hdl(timer->timer_pool)));
+ len += _odp_snprint(&str[len], n - len, " timer index %" PRIu32 "\n", timer->timer_idx);
+ len += _odp_snprint(&str[len], n - len, " dest queue 0x%" PRIx64 "\n",
+ odp_queue_to_u64(timer->queue));
+ len += _odp_snprint(&str[len], n - len, " user ptr %p\n", timer->user_ptr);
+ len += _odp_snprint(&str[len], n - len, " state %s\n",
+ (timer->state == NOT_TICKING) ? "not ticking" :
+ (timer->state == EXPIRED ? "expired" : "ticking"));
+ len += _odp_snprint(&str[len], n - len, " periodic ticks %" PRIu64 "\n",
+ timer->periodic_ticks);
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
+}
+
+void odp_timeout_print(odp_timeout_t tmo)
+{
+ const odp_timeout_hdr_t *tmo_hdr;
+ odp_timer_t timer;
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (tmo == ODP_TIMEOUT_INVALID) {
+ _ODP_ERR("Bad timeout handle\n");
+ return;
+ }
+
+ tmo_hdr = timeout_to_hdr(tmo);
+ timer = tmo_hdr->timer;
+
+ len += _odp_snprint(&str[len], n - len, "Timeout info\n");
+ len += _odp_snprint(&str[len], n - len, "------------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timeout_to_u64(tmo));
+ len += _odp_snprint(&str[len], n - len, " expiration %" PRIu64 "\n",
+ tmo_hdr->expiration);
+ len += _odp_snprint(&str[len], n - len, " user ptr %p\n", tmo_hdr->user_ptr);
+ len += _odp_snprint(&str[len], n - len, " user area %p\n", tmo_hdr->uarea_addr);
+
+ if (timer != ODP_TIMER_INVALID) {
+ timer_entry_t *timer_entry = timer_from_hdl(timer);
+ timer_pool_t *tp = timer_entry->timer_pool;
+
+ len += _odp_snprint(&str[len], n - len, " timer pool 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool_to_hdl(tp)));
+ len += _odp_snprint(&str[len], n - len, " timer 0x%" PRIx64 "\n",
+ odp_timer_to_u64(timer));
+ len += _odp_snprint(&str[len], n - len, " timer index %u\n",
+ timer_entry->timer_idx);
+ len += _odp_snprint(&str[len], n - len, " periodic %i\n", tp->periodic);
+ }
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
+}
diff --git a/platform/linux-dpdk/test/.gitignore b/platform/linux-dpdk/test/.gitignore
new file mode 100644
index 000000000..88eb4dce8
--- /dev/null
+++ b/platform/linux-dpdk/test/.gitignore
@@ -0,0 +1,3 @@
+*.log
+*.trs
+*.env
diff --git a/platform/linux-dpdk/test/Makefile.am b/platform/linux-dpdk/test/Makefile.am
new file mode 100644
index 000000000..2a33bfbcd
--- /dev/null
+++ b/platform/linux-dpdk/test/Makefile.am
@@ -0,0 +1,48 @@
+include $(top_srcdir)/test/Makefile.inc
+TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
+
+if WITH_OPENSSL
+TESTS_ENVIRONMENT += WITH_OPENSSL=1
+else
+TESTS_ENVIRONMENT += WITH_OPENSSL=0
+endif
+
+SUBDIRS =
+TESTS =
+
+if test_vald
+TESTS += validation/api/pktio/pktio_run.sh
+
+test_SCRIPTS = $(dist_check_SCRIPTS)
+
+SUBDIRS += validation/api/pktio \
+ example \
+ performance
+
+if WITH_ML
+TESTS += validation/api/ml/ml_linux$(EXEEXT)
+SUBDIRS += validation/api/ml
+endif
+
+else
+#performance tests refer to pktio_env
+if test_perf
+SUBDIRS += validation/api/pktio \
+ performance
+endif
+endif
+
+TEST_EXTENSIONS = .sh
+
+TESTNAME = linux-dpdk
+
+TESTENV = tests-$(TESTNAME).env
+
+test_DATA = $(TESTENV)
+
+DISTCLEANFILES = $(TESTENV)
+.PHONY: $(TESTENV)
+$(TESTENV):
+ echo "TESTS=\"$(TESTS)\"" > $@
+ echo "$(TESTS_ENVIRONMENT)" >> $@
+ echo "$(LOG_COMPILER)" >> $@
diff --git a/platform/linux-dpdk/test/crypto.conf b/platform/linux-dpdk/test/crypto.conf
new file mode 100644
index 000000000..97fdea6f5
--- /dev/null
+++ b/platform/linux-dpdk/test/crypto.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-dpdk"
+config_file_version = "0.1.26"
+
+system: {
+ # One crypto queue pair is required per thread for lockless operation
+ thread_count_max = 8
+}
diff --git a/platform/linux-dpdk/test/default-timer.conf b/platform/linux-dpdk/test/default-timer.conf
new file mode 100644
index 000000000..3219854de
--- /dev/null
+++ b/platform/linux-dpdk/test/default-timer.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-dpdk"
+config_file_version = "0.1.26"
+
+timer: {
+ # Use DPDK default timer API based implementation
+ alternate = 0
+}
diff --git a/platform/linux-dpdk/test/example/Makefile.am b/platform/linux-dpdk/test/example/Makefile.am
new file mode 100644
index 000000000..947647cd4
--- /dev/null
+++ b/platform/linux-dpdk/test/example/Makefile.am
@@ -0,0 +1,11 @@
+SUBDIRS = \
+ classifier \
+ generator \
+ ipsec_api \
+ ipsec_crypto \
+ l2fwd_simple \
+ l3fwd \
+ packet \
+ ping \
+ simple_pipeline \
+ switch
diff --git a/platform/linux-dpdk/test/example/classifier/Makefile.am b/platform/linux-dpdk/test/example/classifier/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/classifier/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/classifier/pktio_env b/platform/linux-dpdk/test/example/classifier/pktio_env
new file mode 100644
index 000000000..8b390092c
--- /dev/null
+++ b/platform/linux-dpdk/test/example/classifier/pktio_env
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create one pcap interface
+# which uses udp64.pcap to inject traffic.
+#
+# Network set-up
+# +---------+ +-----------+
+# |pcap intf| IF0<---> | Classifier|
+# +--------- +-----------+
+#
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP in=${PCAP_IN}"
+
+IF0=0
+TIME_OUT_VAL=1
+CPASS_COUNT_ARG1=100
+CPASS_COUNT_ARG2=100
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0;
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/generator/Makefile.am b/platform/linux-dpdk/test/example/generator/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/generator/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/generator/pktio_env b/platform/linux-dpdk/test/example/generator/pktio_env
new file mode 100644
index 000000000..cea715a5b
--- /dev/null
+++ b/platform/linux-dpdk/test/example/generator/pktio_env
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on odp-dpdk.
+#
+# Generator uses a loop interface to validate udp mode.
+#
+# Network set-up
+# IF0 ---> null:0
+
+IF0=null:0
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/ipsec_api/Makefile.am b/platform/linux-dpdk/test/example/ipsec_api/Makefile.am
new file mode 100644
index 000000000..2535ad466
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ipsec_api/Makefile.am
@@ -0,0 +1,21 @@
+EXTRA_DIST = pktio_env
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/test/example/ipsec_api/pktio_env b/platform/linux-dpdk/test/example/ipsec_api/pktio_env
new file mode 100644
index 000000000..3267bd4cd
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ipsec_api/pktio_env
@@ -0,0 +1,72 @@
+#!/bin/sh
+#
+# Copyright (C) 2021, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# ipsec_api application uses two loop devices loop0 and loop1.
+#
+
+if [ "$0" == "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+ exit 1
+fi
+
+# Absolute path to the .env file.
+LINUX_ENV_PATH=$PWD/../../platform/linux-dpdk/test
+
+TESTENV="tests-linux-dpdk.env"
+
+if [ -f $LINUX_ENV_PATH/$TESTENV ]; then
+ source $LINUX_ENV_PATH/$TESTENV
+else
+ echo "BUG: unable to find $TESTENV!"
+ echo "$TESTENV has to be in following directory: "
+ echo " $LINUX_ENV_PATH"
+ exit 1
+fi
+
+# Skip IPsec example tests when there's no OpenSSL.
+if [ -n "$WITH_OPENSSL" ] && [ ${WITH_OPENSSL} -eq 0 ]; then
+ echo "Crypto not supported. Skipping."
+ exit 77
+fi
+
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "IPsec Live / Router mode test. Skipping."
+ exit 77
+fi
+
+IF0=p7p1
+IF1=p8p1
+
+NEXT_HOP_MAC0=08:00:27:76:B5:E0
+NEXT_HOP_MAC1=08:00:27:F5:8B:DB
+
+LIF0=loop1
+LIF1=loop2
+
+IF_LIST=$LIF0,$LIF1
+ROUTE_IF_INB=$LIF0
+ROUTE_IF_OUTB=$LIF1
+OUT_IF=$LIF1
+IN_IF=$LIF0
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/ipsec_crypto/Makefile.am b/platform/linux-dpdk/test/example/ipsec_crypto/Makefile.am
new file mode 100644
index 000000000..2535ad466
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ipsec_crypto/Makefile.am
@@ -0,0 +1,21 @@
+EXTRA_DIST = pktio_env
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/test/example/ipsec_crypto/pktio_env b/platform/linux-dpdk/test/example/ipsec_crypto/pktio_env
new file mode 100644
index 000000000..1c6e7d172
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ipsec_crypto/pktio_env
@@ -0,0 +1,72 @@
+#!/bin/sh
+#
+# Copyright (C) 2021, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# ipsec_api application uses two loop devices loop0 and loop1.
+#
+
+if [ "$0" == "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+ exit 1
+fi
+
+# Absolute path to the .env file.
+LINUX_ENV_PATH=$PWD/../../platform/linux-dpdk/test
+
+TESTENV="tests-linux-dpdk.env"
+
+if [ -f $LINUX_ENV_PATH/$TESTENV ]; then
+ source $LINUX_ENV_PATH/$TESTENV
+else
+ echo "BUG: unable to find $TESTENV!"
+ echo "$TESTENV has to be in following directory: "
+ echo " $LINUX_ENV_PATH"
+ exit 1
+fi
+
+# Skip IPsec example tests when there's no OpenSSL.
+if [ -n "$WITH_OPENSSL" ] && [ ${WITH_OPENSSL} -eq 0 ]; then
+ echo "Crypto not supported. Skipping."
+ exit 77
+fi
+
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "Live / Router mode test. Skipping."
+ exit 77
+fi
+
+IF0=p7p1
+IF1=p8p1
+
+NEXT_HOP_MAC0=08:00:27:76:B5:E0
+NEXT_HOP_MAC1=08:00:27:F5:8B:DB
+
+LIF0=loop1
+LIF1=loop2
+
+IF_LIST=$LIF0,$LIF1
+ROUTE_IF_INB=$LIF0
+ROUTE_IF_OUTB=$LIF1
+OUT_IF=$LIF1
+IN_IF=$LIF0
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/l2fwd_simple/Makefile.am b/platform/linux-dpdk/test/example/l2fwd_simple/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/l2fwd_simple/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/l2fwd_simple/pktio_env b/platform/linux-dpdk/test/example/l2fwd_simple/pktio_env
new file mode 100644
index 000000000..d1cb6d84a
--- /dev/null
+++ b/platform/linux-dpdk/test/example/l2fwd_simple/pktio_env
@@ -0,0 +1,54 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create two pcap interfaces which
+# use udp64.pcap file to inject traffic. An output pcap file is generated by
+# the second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+IF0=0
+IF1=1
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null \
+--vdev net_pcap1,rx_pcap=${PCAP_IN},tx_pcap=pcapout.pcap"
+
+echo "Using PCAP_IN = ${PCAP_IN}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ # DPDK PCAP vdev may add some extra data to the end of the output file
+ # in process mode if the interface is started after fork.
+ if [ `stat -c %s pcapout.pcap` -lt `stat -c %s ${PCAP_IN}` ]; then
+ echo "File sizes disagree"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/l3fwd/Makefile.am b/platform/linux-dpdk/test/example/l3fwd/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/l3fwd/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/l3fwd/pktio_env b/platform/linux-dpdk/test/example/l3fwd/pktio_env
new file mode 100644
index 000000000..c9e35f09a
--- /dev/null
+++ b/platform/linux-dpdk/test/example/l3fwd/pktio_env
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create two pcap interfaces of which
+# the first one uses udp64.pcap to inject traffic. An output pcap file is
+# generated by the second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_EMPTY=`find . ${TEST_DIR} $(dirname $0) -name empty.pcap -print -quit`
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+IF0=0
+IF1=1
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null \
+--vdev net_pcap1,rx_pcap=${PCAP_EMPTY},tx_pcap=${PCAP_OUT}"
+
+echo "Using PCAP_IN = ${PCAP_IN}, PCAP_OUT = ${PCAP_OUT}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ echo "Pass: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/packet/Makefile.am b/platform/linux-dpdk/test/example/packet/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/packet/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/packet/pktio_env b/platform/linux-dpdk/test/example/packet/pktio_env
new file mode 100644
index 000000000..0f6a1c233
--- /dev/null
+++ b/platform/linux-dpdk/test/example/packet/pktio_env
@@ -0,0 +1,55 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create two pcap interfaces which
+# use udp64.pcap file to inject traffic. An output pcap file is generated by
+# the second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+IF0=0
+IF1=1
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null \
+--vdev net_pcap1,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT}"
+
+echo "Using PCAP in=${PCAP_IN}:out=${PCAP_OUT} size %${PCAP_IN_SIZE}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "Error: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/ping/Makefile.am b/platform/linux-dpdk/test/example/ping/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ping/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/ping/pktio_env b/platform/linux-dpdk/test/example/ping/pktio_env
new file mode 100644
index 000000000..a8dba6e9a
--- /dev/null
+++ b/platform/linux-dpdk/test/example/ping/pktio_env
@@ -0,0 +1,54 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create one pcap interface which uses
+# udp64.pcap file for incoming test traffic. An separate output pcap file is
+# generated for transmitted packets.
+#
+# Network set-up
+# IF0
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name icmp_echo_req.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+IF0=0
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT}"
+
+echo "Using PCAP in=${PCAP_IN}:out=${PCAP_OUT} size %${PCAP_IN_SIZE}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "Error: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ echo "pcap in size:${PCAP_IN_SIZE} pcap out size:${PCAP_OUT_SIZE}"
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/simple_pipeline/Makefile.am b/platform/linux-dpdk/test/example/simple_pipeline/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/simple_pipeline/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/simple_pipeline/pktio_env b/platform/linux-dpdk/test/example/simple_pipeline/pktio_env
new file mode 100644
index 000000000..c2cccffbc
--- /dev/null
+++ b/platform/linux-dpdk/test/example/simple_pipeline/pktio_env
@@ -0,0 +1,52 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create two pcap interfaces which
+# use udp64.pcap file to inject traffic. An output pcap file is generated by
+# the second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+IF0=0
+IF1=1
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null \
+--vdev net_pcap1,rx_pcap=${PCAP_IN},tx_pcap=pcapout.pcap"
+
+echo "Using PCAP_IN = ${PCAP_IN}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ if [ `stat -c %s pcapout.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
+ echo "File sizes disagree"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/example/switch/Makefile.am b/platform/linux-dpdk/test/example/switch/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-dpdk/test/example/switch/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-dpdk/test/example/switch/pktio_env b/platform/linux-dpdk/test/example/switch/pktio_env
new file mode 100644
index 000000000..8daca3c3e
--- /dev/null
+++ b/platform/linux-dpdk/test/example/switch/pktio_env
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# Copyright (c) 2020, Nokia
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-dpdk.
+#
+# For linux-dpdk the default behavior is to create four pcap interfaces of which
+# the first one uses udp64.pcap file to inject traffic. The other three
+# interfaces generate individual output pcap files.
+#
+# Network set-up
+# IF0 |---> IF1
+# |---> IF2
+# |---> IF3
+
+NUM_RX_PORT=3
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_EMPTY=`find . ${TEST_DIR} $(dirname $0) -name empty.pcap -print -quit`
+IF0=0
+IF1=1
+IF2=2
+IF3=3
+
+export ODP_PLATFORM_PARAMS="--no-pci \
+--vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=/dev/null \
+--vdev net_pcap1,rx_pcap=${PCAP_EMPTY},tx_pcap=pcapout1.pcap \
+--vdev net_pcap2,rx_pcap=${PCAP_EMPTY},tx_pcap=pcapout2.pcap \
+--vdev net_pcap3,rx_pcap=${PCAP_EMPTY},tx_pcap=pcapout3.pcap"
+
+echo "Switch test using PCAP_IN = ${PCAP_IN}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ for i in `seq 1 $NUM_RX_PORT`;
+ do
+ if [ `stat -c %s pcapout${i}.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
+ echo "Error: Output file $i size not matching"
+ exit 1
+ fi
+ rm -f pcapout${i}.pcap
+ done
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1, $IF2, $IF3."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1, $IF2, $IF3."
+ return 0
+}
diff --git a/platform/linux-dpdk/test/performance/Makefile.am b/platform/linux-dpdk/test/performance/Makefile.am
new file mode 100644
index 000000000..4070f09f2
--- /dev/null
+++ b/platform/linux-dpdk/test/performance/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = dmafwd
diff --git a/platform/linux-dpdk/test/performance/dmafwd/Makefile.am b/platform/linux-dpdk/test/performance/dmafwd/Makefile.am
new file mode 100644
index 000000000..91d42cc74
--- /dev/null
+++ b/platform/linux-dpdk/test/performance/dmafwd/Makefile.am
@@ -0,0 +1,18 @@
+EXTRA_DIST = pktio_env
+
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/test/performance/dmafwd/pktio_env b/platform/linux-dpdk/test/performance/dmafwd/pktio_env
new file mode 100644
index 000000000..7135ca17d
--- /dev/null
+++ b/platform/linux-dpdk/test/performance/dmafwd/pktio_env
@@ -0,0 +1,59 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT=dmafwd_out.pcap
+IF0=0
+DUMP=tcpdump
+
+export ODP_PLATFORM_PARAMS="--no-pci --vdev net_pcap0,rx_pcap=${PCAP_IN},tx_pcap=${PCAP_OUT}"
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "ERROR: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ local RET=0
+
+ if command -v ${DUMP}; then
+ local VALIN=valin
+ local VALOUT=valout
+
+ ${DUMP} -r ${PCAP_IN} -t -x > ${VALIN}
+ ${DUMP} -r ${PCAP_OUT} -t -x > ${VALOUT}
+ diff ${VALIN} ${VALOUT}
+ RET=$?
+ rm -f ${VALIN}
+ rm -f ${VALOUT}
+ else
+ echo "WARNING: No ${DUMP} available, using \"stat\" for diff"
+ local SZIN=$(stat -c %s ${PCAP_IN})
+ local SZOUT=$(stat -c %s ${PCAP_OUT})
+
+ if [ ${SZIN} -ne ${SZOUT} ]; then
+ RET=1
+ fi
+ fi
+
+ rm -f ${PCAP_OUT}
+
+ if [ $RET -ne 0 ]; then
+ echo "ERROR: Input and output captures do not match, exiting"
+ exit 1
+ fi
+
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-dpdk/test/process-mode.conf b/platform/linux-dpdk/test/process-mode.conf
new file mode 100644
index 000000000..827eb6074
--- /dev/null
+++ b/platform/linux-dpdk/test/process-mode.conf
@@ -0,0 +1,7 @@
+# Mandatory fields
+odp_implementation = "linux-dpdk"
+config_file_version = "0.1.26"
+
+dpdk: {
+ process_mode_memory_mb = 1024
+}
diff --git a/platform/linux-dpdk/test/sched-basic.conf b/platform/linux-dpdk/test/sched-basic.conf
new file mode 100644
index 000000000..2c11cb419
--- /dev/null
+++ b/platform/linux-dpdk/test/sched-basic.conf
@@ -0,0 +1,14 @@
+# Mandatory fields
+odp_implementation = "linux-dpdk"
+config_file_version = "0.1.26"
+
+# Test scheduler with an odd spread value and without dynamic load balance
+sched_basic: {
+ prio_spread = 3
+ load_balance = 0
+ order_stash_size = 0
+ powersave: {
+ poll_time_nsec = 5000
+ sleep_time_nsec = 50000
+ }
+}
diff --git a/platform/linux-dpdk/test/stash-custom.conf b/platform/linux-dpdk/test/stash-custom.conf
new file mode 100644
index 000000000..62f314c4e
--- /dev/null
+++ b/platform/linux-dpdk/test/stash-custom.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-dpdk"
+config_file_version = "0.1.26"
+
+# Test overflow safe stash variant
+stash: {
+ strict_size = 0
+}
diff --git a/platform/linux-dpdk/test/validation/api/Makefile.inc b/platform/linux-dpdk/test/validation/api/Makefile.inc
new file mode 100644
index 000000000..cda6237ea
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/Makefile.inc
@@ -0,0 +1 @@
+include $(top_srcdir)/test/validation/api/Makefile.inc
diff --git a/platform/linux-dpdk/test/validation/api/ml/.gitignore b/platform/linux-dpdk/test/validation/api/ml/.gitignore
new file mode 100644
index 000000000..e31f902c4
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/.gitignore
@@ -0,0 +1 @@
+ml_linux
diff --git a/platform/linux-dpdk/test/validation/api/ml/Makefile.am b/platform/linux-dpdk/test/validation/api/ml/Makefile.am
new file mode 100644
index 000000000..40910d5c6
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/Makefile.am
@@ -0,0 +1,29 @@
+include ../Makefile.inc
+
+test_PROGRAMS = ml_linux
+ml_linux_SOURCES = ../../../../../linux-generic/test/validation/api/ml/ml_linux.c
+
+EXTRA_DIST = \
+ batch_add.onnx \
+ simple_linear.onnx
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/test/validation/api/ml/README.md b/platform/linux-dpdk/test/validation/api/ml/README.md
new file mode 120000
index 000000000..d121a477d
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/README.md
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/README.md \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/batch_add.onnx b/platform/linux-dpdk/test/validation/api/ml/batch_add.onnx
new file mode 120000
index 000000000..b827c0e58
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/batch_add.onnx
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/batch_add.onnx \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/batch_add_gen.py b/platform/linux-dpdk/test/validation/api/ml/batch_add_gen.py
new file mode 120000
index 000000000..695b6d399
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/batch_add_gen.py
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/batch_add_gen.py \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/gen_models.sh b/platform/linux-dpdk/test/validation/api/ml/gen_models.sh
new file mode 120000
index 000000000..e9b25d58f
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/gen_models.sh
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/gen_models.sh \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/requirements.txt b/platform/linux-dpdk/test/validation/api/ml/requirements.txt
new file mode 120000
index 000000000..b94d5d389
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/requirements.txt
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/requirements.txt \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/simple_linear.onnx b/platform/linux-dpdk/test/validation/api/ml/simple_linear.onnx
new file mode 120000
index 000000000..f471922d1
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/simple_linear.onnx
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/simple_linear.onnx \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/ml/simple_linear_gen.py b/platform/linux-dpdk/test/validation/api/ml/simple_linear_gen.py
new file mode 120000
index 000000000..53fb4f6ed
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/ml/simple_linear_gen.py
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/ml/simple_linear_gen.py \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/pktio/.gitignore b/platform/linux-dpdk/test/validation/api/pktio/.gitignore
new file mode 120000
index 000000000..42a9f364e
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/pktio/.gitignore
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/pktio/.gitignore \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/pktio/Makefile.am b/platform/linux-dpdk/test/validation/api/pktio/Makefile.am
new file mode 100644
index 000000000..e2664feb3
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/pktio/Makefile.am
@@ -0,0 +1,24 @@
+dist_check_SCRIPTS = pktio_env \
+ pktio_run.sh
+
+test_SCRIPTS = $(dist_check_SCRIPTS)
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-dpdk/test/validation/api/pktio/pktio_env b/platform/linux-dpdk/test/validation/api/pktio/pktio_env
new file mode 120000
index 000000000..161505ff9
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/pktio/pktio_env
@@ -0,0 +1 @@
+../../../../../linux-generic/test/validation/api/pktio/pktio_env \ No newline at end of file
diff --git a/platform/linux-dpdk/test/validation/api/pktio/pktio_run.sh b/platform/linux-dpdk/test/validation/api/pktio/pktio_run.sh
new file mode 100755
index 000000000..641b39493
--- /dev/null
+++ b/platform/linux-dpdk/test/validation/api/pktio/pktio_run.sh
@@ -0,0 +1,105 @@
+#!/bin/sh
+#
+# Copyright (c) 2015-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Proceed the pktio tests. This script expects at least one argument:
+# setup) setup the pktio test environment
+# cleanup) cleanup the pktio test environment
+# run) run the pktio tests (setup, run, cleanup)
+# extra arguments are passed unchanged to the test itself (pktio_main)
+# Without arguments, "run" is assumed and no extra argument is passed to the
+# test (legacy mode).
+#
+
+# directories where pktio_main binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone (./pktio_run) intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=${TEST_DIR}/api/pktio:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../../../test/validation/api/pktio:$PATH
+PATH=.:$PATH
+
+pktio_main_path=$(which pktio_main${EXEEXT})
+if [ -x "$pktio_main_path" ] ; then
+ echo "running with pktio_main: $pktio_run_path"
+else
+ echo "cannot find pktio_main: please set you PATH for it."
+fi
+
+# directory where platform test sources are, including scripts
+TEST_SRC_DIR=$(dirname $0)
+
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+# Use installed pktio env or for make check take it from platform directory
+if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
+ . ${TEST_SRC_DIR}/pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
+ echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
+ exit 1
+fi
+
+run_test()
+{
+ local ret=0
+
+ pktio_main${EXEEXT} $*
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+ if [ $ret -ne 0 ]; then
+ echo "!!! FAILED !!!"
+ fi
+
+ return $ret
+}
+
+run()
+{
+ # need to be root to run tests with real interfaces
+ if [ "$(id -u)" != "0" ]; then
+ exit $ret
+ fi
+
+ if [ "$ODP_PKTIO_IF0" = "" ]; then
+ # no interfaces specified, use default veth interfaces
+ # setup by the pktio_env script
+ setup_pktio_env clean
+ if [ $? != 0 ]; then
+ echo "Failed to setup test environment, skipping test."
+ exit $TEST_SKIPPED
+ fi
+ export ODP_PLATFORM_PARAMS="--no-pci --vdev net_pcap0,iface=$IF0 --vdev net_pcap1,iface=$IF1"
+ export ODP_PKTIO_IF0=0
+ export ODP_PKTIO_IF1=1
+ fi
+
+ run_test
+ ret=$?
+
+ exit $ret
+}
+
+if [ $# != 0 ]; then
+ action=$1
+ shift
+fi
+
+case "$action" in
+ setup) setup_pktio_env ;;
+ cleanup) cleanup_pktio_env ;;
+ run) run ;;
+ *) run ;;
+esac
diff --git a/platform/linux-dpdk/test/wrapper-script.sh b/platform/linux-dpdk/test/wrapper-script.sh
new file mode 100755
index 000000000..c8cbae844
--- /dev/null
+++ b/platform/linux-dpdk/test/wrapper-script.sh
@@ -0,0 +1,70 @@
+#!/bin/bash
+
+export ODP_PLATFORM_PARAMS=${ODP_PLATFORM_PARAMS:-"--vdev=crypto_openssl --vdev=crypto_null"}
+# where to mount huge pages
+export HUGEPAGEDIR=${HUGEPAGEDIR:-/mnt/huge}
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+# Make sure huge pages are released when a unit test crashes "make check"
+trap ctrl_c INT
+
+ctrl_c() {
+ echo "** Trapped CTRL-C"
+ if grep -qs "$HUGEPAGEDIR" /proc/mounts; then
+ echo "** Umounting hugetlbfs"
+ sleep 1 && sudo umount -a -t hugetlbfs
+ fi
+}
+
+function mount_and_reserve() {
+ export PATH_NR="/sys/devices/system/node/node0/hugepages/hugepages-${SIZE_KB}kB/nr_hugepages"
+ export PATH_FREE="/sys/devices/system/node/node0/hugepages/hugepages-${SIZE_KB}kB/free_hugepages"
+ if grep -qs "$HUGEPAGEDIR" /proc/mounts; then
+ echo "Umounting hugetlbfs from previous use!"
+ sudo umount -a -t hugetlbfs
+ fi
+ echo "Trying $SIZE pages"
+ sudo mount -t hugetlbfs -o pagesize=$SIZE nodev $HUGEPAGEDIR 2>/dev/null
+ res=$?
+ if [ $res -ne 0 ]; then
+ echo "ERROR: can't mount hugepages"
+ return $res
+ fi
+ sudo sh -c "echo $RESERVE > $PATH_NR"
+ if [ `cat $PATH_NR` -lt 1 ]; then
+ echo "Failed to reserve at least 1 huge page!"
+ return 1
+ else
+ echo "Total number: `cat $PATH_NR`"
+ echo "Free pages: `cat $PATH_FREE`"
+ fi
+}
+
+if [ ! -d $HUGEPAGEDIR ]; then
+ sudo mkdir -p $HUGEPAGEDIR
+fi
+
+# Need to be root to use DPDK
+if [ "$(id -u)" != "0" ]; then
+ echo "DPDK needs root privileges"
+ exit $TEST_SKIPPED
+fi
+
+echo "Mounting hugetlbfs"
+export SIZE=2MB
+export SIZE_KB=2048
+export RESERVE=768
+mount_and_reserve
+res=$?
+if [ $res -ne 0 ]; then
+ echo "ERROR: can't mount hugepages"
+ exit $res
+fi
+echo "running $1!"
+$1
+res=$?
+echo "Unmounting hugetlbfs"
+sleep 0.3 && sudo umount -a -t hugetlbfs
+exit $res
+