aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-generic
diff options
context:
space:
mode:
Diffstat (limited to 'platform/linux-generic')
-rw-r--r--platform/linux-generic/.gitignore3
-rw-r--r--platform/linux-generic/Makefile.am659
-rw-r--r--platform/linux-generic/Makefile.inc2
-rw-r--r--platform/linux-generic/README86
-rw-r--r--platform/linux-generic/_ishmphy.c207
-rw-r--r--platform/linux-generic/arch/aarch64/cpu_flags.c1052
-rw-r--r--platform/linux-generic/arch/aarch64/cpu_flags.h20
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/atomic.h12
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h278
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/cpu.h27
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/cpu_inlines.h60
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/hash_crc32.h103
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/sync_inlines.h31
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/time_cpu.h53
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/time_inlines.h7
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h47
-rw-r--r--platform/linux-generic/arch/aarch64/odp_atomic.c56
-rw-r--r--platform/linux-generic/arch/aarch64/odp_atomic.h325
-rw-r--r--platform/linux-generic/arch/aarch64/odp_cpu.h202
-rw-r--r--platform/linux-generic/arch/aarch64/odp_cpu_cycles.c48
-rw-r--r--platform/linux-generic/arch/aarch64/odp_crypto_armv8.c896
-rw-r--r--platform/linux-generic/arch/aarch64/odp_random.h166
-rw-r--r--platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c396
-rw-r--r--platform/linux-generic/arch/aarch64/odp_wait_until.h100
-rw-r--r--platform/linux-generic/arch/arm/odp/api/abi/cpu.h23
-rw-r--r--platform/linux-generic/arch/arm/odp/api/abi/cpu_inlines.h (renamed from platform/linux-generic/arch/arm/odp/api/cpu_arch.h)14
-rw-r--r--platform/linux-generic/arch/arm/odp_cpu.h87
-rw-r--r--platform/linux-generic/arch/arm/odp_sysinfo_parse.c34
-rw-r--r--platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h100
-rw-r--r--platform/linux-generic/arch/common/odp_time_cpu.c74
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/atomic_generic.h276
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/atomic_inlines.h7
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/cpu.h23
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/cpu_generic.h34
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/cpu_inlines.h24
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/hash_crc32.h37
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/sync_inlines.h31
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/time_inlines.h46
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/wait_until.h5
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h25
-rw-r--r--platform/linux-generic/arch/default/odp/api/cpu_arch.h24
-rw-r--r--platform/linux-generic/arch/default/odp_atomic.c (renamed from platform/linux-generic/odp_atomic.c)26
-rw-r--r--platform/linux-generic/arch/default/odp_atomic.h114
-rw-r--r--platform/linux-generic/arch/default/odp_cpu.h26
-rw-r--r--platform/linux-generic/arch/default/odp_cpu_arch.c48
-rw-r--r--platform/linux-generic/arch/default/odp_cpu_cycles.c (renamed from platform/linux-generic/arch/arm/odp_cpu_arch.c)27
-rw-r--r--platform/linux-generic/arch/default/odp_hash_crc32.c496
-rw-r--r--platform/linux-generic/arch/default/odp_random.c33
-rw-r--r--platform/linux-generic/arch/default/odp_random.h41
-rw-r--r--platform/linux-generic/arch/default/odp_sysinfo_parse.c25
-rw-r--r--platform/linux-generic/arch/default/odp_time.c112
-rw-r--r--platform/linux-generic/arch/default/odp_wait_until.h53
-rw-r--r--platform/linux-generic/arch/mips64/odp/api/cpu_arch.h32
-rw-r--r--platform/linux-generic/arch/mips64/odp_cpu_arch.c31
-rw-r--r--platform/linux-generic/arch/mips64/odp_sysinfo_parse.c64
-rw-r--r--platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h25
-rw-r--r--platform/linux-generic/arch/powerpc/odp/api/cpu_arch.h24
-rw-r--r--platform/linux-generic/arch/powerpc/odp_cpu_arch.c48
-rw-r--r--platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c23
-rw-r--r--platform/linux-generic/arch/x86/cpu_flags.c378
-rw-r--r--platform/linux-generic/arch/x86/cpu_flags.h21
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/cpu.h23
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/cpu_inlines.h46
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/cpu_rdtsc.h (renamed from platform/linux-generic/arch/x86/odp_cpu_arch.c)20
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/hash_crc32.h77
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/sync_inlines.h31
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/time_cpu.h35
-rw-r--r--platform/linux-generic/arch/x86/odp/api/abi/time_inlines.h7
-rw-r--r--platform/linux-generic/arch/x86/odp/api/cpu_arch.h29
-rw-r--r--platform/linux-generic/arch/x86/odp_cpu.h14
-rw-r--r--platform/linux-generic/arch/x86/odp_cpu_cycles.c21
-rw-r--r--platform/linux-generic/arch/x86/odp_random.h160
-rw-r--r--platform/linux-generic/arch/x86/odp_sysinfo_parse.c86
-rw-r--r--platform/linux-generic/arch/x86/odp_time_cpu.c106
l---------platform/linux-generic/check-globals.sh1
-rw-r--r--platform/linux-generic/doc/platform_specific.dox4
-rw-r--r--platform/linux-generic/dumpconfig/.gitignore1
-rw-r--r--platform/linux-generic/dumpconfig/Makefile.am10
-rw-r--r--platform/linux-generic/dumpconfig/dumpconfig.c43
-rw-r--r--platform/linux-generic/example/Makefile.am5
-rw-r--r--platform/linux-generic/example/ml/.gitignore5
-rw-r--r--platform/linux-generic/example/ml/Makefile.am46
-rw-r--r--platform/linux-generic/example/ml/README.md94
-rw-r--r--platform/linux-generic/example/ml/example_digit.csv1
-rw-r--r--platform/linux-generic/example/ml/mnist-12.onnxbin0 -> 26143 bytes
-rw-r--r--platform/linux-generic/example/ml/mnist.c300
-rw-r--r--platform/linux-generic/example/ml/model_explorer.c88
-rw-r--r--platform/linux-generic/example/ml/model_read.c47
-rw-r--r--platform/linux-generic/example/ml/model_read.h29
-rwxr-xr-xplatform/linux-generic/example/ml/odp_ml_run_mnist.sh9
-rwxr-xr-xplatform/linux-generic/example/ml/odp_ml_run_model_explorer.sh8
-rwxr-xr-xplatform/linux-generic/example/ml/odp_ml_run_simple_linear.sh8
-rw-r--r--platform/linux-generic/example/ml/simple_linear.c281
-rw-r--r--platform/linux-generic/example/ml/simple_linear.onnxbin0 -> 214 bytes
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/align.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/atomic.h96
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/barrier.h (renamed from platform/linux-generic/include/odp/api/plat/barrier_types.h)6
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/buffer.h28
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/buffer_types.h40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/byteorder.h (renamed from platform/linux-generic/include/odp/api/plat/byteorder_types.h)16
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/classification.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/comp.h34
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/cpumask.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/crypto.h29
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/crypto_types.h (renamed from platform/linux-generic/include/odp/api/plat/crypto_types.h)20
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/debug.h68
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/dma.h27
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/dma_types.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/errno.h18
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event.h29
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event_types.h60
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/hash.h21
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/init.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/ipsec.h32
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h41
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/ml_types.h45
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet.h28
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_flags.h26
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_io.h29
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h (renamed from platform/linux-generic/include/odp/api/plat/packet_io_types.h)26
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_types.h108
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/pool.h28
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/pool_types.h42
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats.h27
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue.h27
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/queue_types.h (renamed from platform/linux-generic/include/odp/api/plat/queue_types.h)16
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/random.h (renamed from platform/linux-generic/include/odp/api/cpu.h)12
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/rwlock.h10
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/rwlock_recursive.h10
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/schedule.h28
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/schedule_types.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/shared_memory.h (renamed from platform/linux-generic/include/odp/api/plat/shared_memory_types.h)19
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/spinlock.h10
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/spinlock_recursive.h10
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/stash.h24
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/stash_types.h38
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/std.h (renamed from platform/linux-generic/include/odp/api/packet_io_stats.h)10
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/std_types.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/sync.h (renamed from platform/linux-generic/include/odp/api/hash.h)14
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/thread.h8
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/thread_types.h5
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/thrmask.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/ticketlock.h (renamed from platform/linux-generic/include/odp/api/plat/ticketlock_types.h)22
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/time.h8
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/time_types.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/timer.h8
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/timer_types.h (renamed from platform/linux-generic/include/odp/api/plat/timer_types.h)16
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/traffic_mngr.h7
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/version.h7
-rw-r--r--platform/linux-generic/include/_ishm_internal.h52
-rw-r--r--platform/linux-generic/include/ishmphy_internal.h2
-rw-r--r--platform/linux-generic/include/odp/api/align.h58
-rw-r--r--platform/linux-generic/include/odp/api/atomic.h42
-rw-r--r--platform/linux-generic/include/odp/api/barrier.h31
-rw-r--r--platform/linux-generic/include/odp/api/buffer.h39
-rw-r--r--platform/linux-generic/include/odp/api/byteorder.h43
-rw-r--r--platform/linux-generic/include/odp/api/classification.h46
-rw-r--r--platform/linux-generic/include/odp/api/compiler.h34
-rw-r--r--platform/linux-generic/include/odp/api/cpumask.h28
-rw-r--r--platform/linux-generic/include/odp/api/crypto.h41
-rw-r--r--platform/linux-generic/include/odp/api/debug.h48
-rw-r--r--platform/linux-generic/include/odp/api/errno.h27
-rw-r--r--platform/linux-generic/include/odp/api/event.h36
-rw-r--r--platform/linux-generic/include/odp/api/hints.h34
-rw-r--r--platform/linux-generic/include/odp/api/init.h36
-rw-r--r--platform/linux-generic/include/odp/api/packet.h38
-rw-r--r--platform/linux-generic/include/odp/api/packet_flags.h31
-rw-r--r--platform/linux-generic/include/odp/api/packet_io.h41
-rw-r--r--platform/linux-generic/include/odp/api/plat/atomic_inlines.h452
-rw-r--r--platform/linux-generic/include/odp/api/plat/atomic_types.h88
-rw-r--r--platform/linux-generic/include/odp/api/plat/buffer_inline_types.h37
-rw-r--r--platform/linux-generic/include/odp/api/plat/buffer_inlines.h84
-rw-r--r--platform/linux-generic/include/odp/api/plat/buffer_types.h50
-rw-r--r--platform/linux-generic/include/odp/api/plat/byteorder_inlines.h55
-rw-r--r--platform/linux-generic/include/odp/api/plat/classification_types.h49
-rw-r--r--platform/linux-generic/include/odp/api/plat/cpu_inlines.h61
-rw-r--r--platform/linux-generic/include/odp/api/plat/cpumask_types.h54
-rw-r--r--platform/linux-generic/include/odp/api/plat/crypto_inlines.h70
-rw-r--r--platform/linux-generic/include/odp/api/plat/debug_inlines.h124
-rw-r--r--platform/linux-generic/include/odp/api/plat/dma_inlines.h135
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_inline_types.h45
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_inlines.h199
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_types.h54
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_validation_external.h111
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h51
-rw-r--r--platform/linux-generic/include/odp/api/plat/hash_inlines.h47
-rw-r--r--platform/linux-generic/include/odp/api/plat/init_types.h35
-rw-r--r--platform/linux-generic/include/odp/api/plat/ipsec_inlines.h58
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h252
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_flag_inlines_api.h41
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h176
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h692
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines_api.h113
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_io_inlines.h41
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_types.h157
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h105
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_inline_types.h47
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_inlines.h41
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_types.h55
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inline_types.h73
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inlines.h72
-rw-r--r--platform/linux-generic/include/odp/api/plat/rwlock_inlines.h105
-rw-r--r--platform/linux-generic/include/odp/api/plat/rwlock_recursive_inlines.h142
-rw-r--r--platform/linux-generic/include/odp/api/plat/rwlock_recursive_types.h38
-rw-r--r--platform/linux-generic/include/odp/api/plat/rwlock_types.h37
-rw-r--r--platform/linux-generic/include/odp/api/plat/schedule_inline_types.h68
-rw-r--r--platform/linux-generic/include/odp/api/plat/schedule_inlines.h135
-rw-r--r--platform/linux-generic/include/odp/api/plat/schedule_types.h62
-rw-r--r--platform/linux-generic/include/odp/api/plat/spinlock_inlines.h65
-rw-r--r--platform/linux-generic/include/odp/api/plat/spinlock_recursive_inlines.h98
-rw-r--r--platform/linux-generic/include/odp/api/plat/spinlock_recursive_types.h36
-rw-r--r--platform/linux-generic/include/odp/api/plat/spinlock_types.h34
-rw-r--r--platform/linux-generic/include/odp/api/plat/static_inline.h.in43
-rw-r--r--platform/linux-generic/include/odp/api/plat/std_inlines.h (renamed from platform/linux-generic/include/odp/api/plat/std_clib_inlines.h)25
-rw-r--r--platform/linux-generic/include/odp/api/plat/strong_types.h12
-rw-r--r--platform/linux-generic/include/odp/api/plat/sync_inlines.h45
-rw-r--r--platform/linux-generic/include/odp/api/plat/thread_inline_types.h36
-rw-r--r--platform/linux-generic/include/odp/api/plat/thread_inlines.h51
-rw-r--r--platform/linux-generic/include/odp/api/plat/thread_types.h34
-rw-r--r--platform/linux-generic/include/odp/api/plat/thrmask_types.h48
-rw-r--r--platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h88
-rw-r--r--platform/linux-generic/include/odp/api/plat/ticketlock_inlines_api.h36
-rw-r--r--platform/linux-generic/include/odp/api/plat/time_inlines.h191
-rw-r--r--platform/linux-generic/include/odp/api/plat/time_types.h43
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_inline_types.h40
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_inlines.h106
-rw-r--r--platform/linux-generic/include/odp/api/plat/traffic_mngr_types.h185
-rw-r--r--platform/linux-generic/include/odp/api/plat/version_types.h30
-rw-r--r--platform/linux-generic/include/odp/api/pool.h37
-rw-r--r--platform/linux-generic/include/odp/api/queue.h44
-rw-r--r--platform/linux-generic/include/odp/api/random.h34
-rw-r--r--platform/linux-generic/include/odp/api/rwlock.h28
-rw-r--r--platform/linux-generic/include/odp/api/rwlock_recursive.h28
-rw-r--r--platform/linux-generic/include/odp/api/schedule.h36
-rw-r--r--platform/linux-generic/include/odp/api/schedule_types.h28
-rw-r--r--platform/linux-generic/include/odp/api/shared_memory.h36
-rw-r--r--platform/linux-generic/include/odp/api/spinlock.h28
-rw-r--r--platform/linux-generic/include/odp/api/spinlock_recursive.h28
-rw-r--r--platform/linux-generic/include/odp/api/std_clib.h28
-rw-r--r--platform/linux-generic/include/odp/api/std_types.h42
-rw-r--r--platform/linux-generic/include/odp/api/sync.h39
-rw-r--r--platform/linux-generic/include/odp/api/system_info.h29
-rw-r--r--platform/linux-generic/include/odp/api/thread.h28
-rw-r--r--platform/linux-generic/include/odp/api/thrmask.h36
-rw-r--r--platform/linux-generic/include/odp/api/ticketlock.h32
-rw-r--r--platform/linux-generic/include/odp/api/time.h31
-rw-r--r--platform/linux-generic/include/odp/api/timer.h40
-rw-r--r--platform/linux-generic/include/odp/api/traffic_mngr.h35
-rw-r--r--platform/linux-generic/include/odp/api/version.h27
-rw-r--r--platform/linux-generic/include/odp/drv/README2
-rw-r--r--platform/linux-generic/include/odp/drv/compiler.h34
-rw-r--r--platform/linux-generic/include/odp/drv/std_types.h42
-rw-r--r--platform/linux-generic/include/odp/visibility_begin.h15
-rw-r--r--platform/linux-generic/include/odp/visibility_end.h15
-rw-r--r--platform/linux-generic/include/odp_align_internal.h67
-rw-r--r--platform/linux-generic/include/odp_atomic_internal.h514
-rw-r--r--platform/linux-generic/include/odp_bitmap_internal.h317
-rw-r--r--platform/linux-generic/include/odp_bitset.h96
-rw-r--r--platform/linux-generic/include/odp_buffer_inlines.h35
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h108
-rw-r--r--platform/linux-generic/include/odp_chksum_internal.h204
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h216
-rw-r--r--platform/linux-generic/include/odp_classification_inlines.h377
-rw-r--r--platform/linux-generic/include/odp_classification_internal.h286
-rw-r--r--platform/linux-generic/include/odp_config_internal.h146
-rw-r--r--platform/linux-generic/include/odp_crypto_internal.h97
-rw-r--r--platform/linux-generic/include/odp_debug_internal.h77
-rw-r--r--platform/linux-generic/include/odp_errno_define.h4
-rw-r--r--platform/linux-generic/include/odp_ethtool_rss.h65
-rw-r--r--platform/linux-generic/include/odp_ethtool_stats.h31
-rw-r--r--platform/linux-generic/include/odp_event_internal.h100
-rw-r--r--platform/linux-generic/include/odp_event_validation_internal.h52
-rw-r--r--platform/linux-generic/include/odp_event_vector_internal.h81
-rw-r--r--platform/linux-generic/include/odp_fdserver_internal.h (renamed from platform/linux-generic/include/_fdserver_internal.h)4
-rw-r--r--platform/linux-generic/include/odp_forward_typedefs_internal.h7
-rw-r--r--platform/linux-generic/include/odp_global_data.h105
-rw-r--r--platform/linux-generic/include/odp_init_internal.h115
-rw-r--r--platform/linux-generic/include/odp_internal.h136
-rw-r--r--platform/linux-generic/include/odp_ipsec_internal.h413
-rw-r--r--platform/linux-generic/include/odp_ishmphy_internal.h (renamed from platform/linux-generic/include/_ishmphy_internal.h)8
-rw-r--r--platform/linux-generic/include/odp_ishmpool_internal.h54
-rw-r--r--platform/linux-generic/include/odp_libconfig_internal.h41
-rw-r--r--platform/linux-generic/include/odp_llqueue.h324
-rw-r--r--platform/linux-generic/include/odp_macros_internal.h103
-rw-r--r--platform/linux-generic/include/odp_ml_fp16.h23
-rw-r--r--platform/linux-generic/include/odp_name_table_internal.h3
-rw-r--r--platform/linux-generic/include/odp_packet_dpdk.h116
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h477
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h373
-rw-r--r--platform/linux-generic/include/odp_packet_io_ipc_internal.h56
-rw-r--r--platform/linux-generic/include/odp_packet_io_queue.h50
-rw-r--r--platform/linux-generic/include/odp_packet_io_ring_internal.h589
-rw-r--r--platform/linux-generic/include/odp_packet_io_stats.h42
-rw-r--r--platform/linux-generic/include/odp_packet_io_stats_common.h23
-rw-r--r--platform/linux-generic/include/odp_packet_netmap.h68
-rw-r--r--platform/linux-generic/include/odp_packet_socket.h179
-rw-r--r--platform/linux-generic/include/odp_packet_tap.h21
-rw-r--r--platform/linux-generic/include/odp_parse_internal.h120
-rw-r--r--platform/linux-generic/include/odp_pcapng.h29
-rw-r--r--platform/linux-generic/include/odp_pkt_queue_internal.h8
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h176
-rw-r--r--platform/linux-generic/include/odp_posix_extensions.h2
-rw-r--r--platform/linux-generic/include/odp_print_internal.h22
-rw-r--r--platform/linux-generic/include/odp_queue_basic_internal.h126
-rw-r--r--platform/linux-generic/include/odp_queue_if.h73
-rw-r--r--platform/linux-generic/include/odp_queue_internal.h114
-rw-r--r--platform/linux-generic/include/odp_queue_lf.h37
-rw-r--r--platform/linux-generic/include/odp_queue_scalable_internal.h104
-rw-r--r--platform/linux-generic/include/odp_random_openssl_internal.h23
-rw-r--r--platform/linux-generic/include/odp_random_std_internal.h24
-rw-r--r--platform/linux-generic/include/odp_ring_common.h22
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h267
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_internal.h350
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_u32_internal.h25
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_u64_internal.h25
-rw-r--r--platform/linux-generic/include/odp_ring_ptr_internal.h25
-rw-r--r--platform/linux-generic/include/odp_ring_spsc_internal.h130
-rw-r--r--platform/linux-generic/include/odp_ring_st_internal.h111
-rw-r--r--platform/linux-generic/include/odp_ring_u32_internal.h25
-rw-r--r--platform/linux-generic/include/odp_ring_u64_internal.h25
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h100
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable.h151
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_config.h55
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_ordered.h126
-rw-r--r--platform/linux-generic/include/odp_shm_internal.h38
-rw-r--r--platform/linux-generic/include/odp_socket_common.h73
-rw-r--r--platform/linux-generic/include/odp_sorted_list_internal.h2
-rw-r--r--platform/linux-generic/include/odp_sysfs_stats.h31
-rw-r--r--platform/linux-generic/include/odp_sysinfo_internal.h45
-rw-r--r--platform/linux-generic/include/odp_timer_internal.h46
-rw-r--r--platform/linux-generic/include/odp_timer_wheel_internal.h2
-rw-r--r--platform/linux-generic/include/odp_traffic_mngr_internal.h61
-rw-r--r--platform/linux-generic/include/odp_types_internal.h24
-rw-r--r--platform/linux-generic/include/protocols/eth.h8
-rw-r--r--platform/linux-generic/include/protocols/ip.h17
-rw-r--r--platform/linux-generic/include/protocols/ipsec.h14
-rw-r--r--platform/linux-generic/include/protocols/sctp.h51
-rw-r--r--platform/linux-generic/include/protocols/tcp.h5
-rw-r--r--platform/linux-generic/include/protocols/thash.h111
-rw-r--r--platform/linux-generic/include/protocols/udp.h8
-rw-r--r--platform/linux-generic/libodp-linux.pc.in12
-rw-r--r--platform/linux-generic/m4/configure.m4116
-rw-r--r--platform/linux-generic/m4/odp_cpu.m435
-rw-r--r--platform/linux-generic/m4/odp_crypto.m450
-rw-r--r--platform/linux-generic/m4/odp_dpdk.m472
-rw-r--r--platform/linux-generic/m4/odp_event_validation.m423
-rw-r--r--platform/linux-generic/m4/odp_ipc.m49
-rw-r--r--platform/linux-generic/m4/odp_ipsec_mb.m419
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m436
-rw-r--r--platform/linux-generic/m4/odp_ml.m446
-rw-r--r--platform/linux-generic/m4/odp_netmap.m442
-rw-r--r--platform/linux-generic/m4/odp_openssl.m453
-rw-r--r--platform/linux-generic/m4/odp_pcap.m422
-rw-r--r--platform/linux-generic/m4/odp_pcapng.m420
-rw-r--r--platform/linux-generic/m4/odp_pthread.m413
-rw-r--r--platform/linux-generic/m4/odp_schedule.m413
-rw-r--r--platform/linux-generic/m4/odp_scheduler.m411
-rw-r--r--platform/linux-generic/m4/odp_wfe.m414
-rw-r--r--platform/linux-generic/m4/odp_xdp.m415
-rw-r--r--platform/linux-generic/miniz/miniz.c619
-rw-r--r--platform/linux-generic/miniz/miniz.h363
-rw-r--r--platform/linux-generic/miniz/miniz_common.h68
-rw-r--r--platform/linux-generic/miniz/miniz_tdef.c1564
-rw-r--r--platform/linux-generic/miniz/miniz_tdef.h183
-rw-r--r--platform/linux-generic/miniz/miniz_tinfl.c725
-rw-r--r--platform/linux-generic/miniz/miniz_tinfl.h146
-rw-r--r--platform/linux-generic/odp_atomic_api.c11
-rw-r--r--platform/linux-generic/odp_barrier.c9
-rw-r--r--platform/linux-generic/odp_bitmap.c315
-rw-r--r--platform/linux-generic/odp_buffer.c84
-rw-r--r--platform/linux-generic/odp_buffer_api.c11
-rw-r--r--platform/linux-generic/odp_byteorder_api.c (renamed from platform/linux-generic/odp_byteorder.c)7
-rw-r--r--platform/linux-generic/odp_chksum.c14
-rw-r--r--platform/linux-generic/odp_classification.c1854
-rw-r--r--platform/linux-generic/odp_comp.c680
-rw-r--r--platform/linux-generic/odp_cpu.c16
-rw-r--r--platform/linux-generic/odp_cpu_api.c11
-rw-r--r--platform/linux-generic/odp_cpumask.c62
-rw-r--r--platform/linux-generic/odp_cpumask_task.c115
-rw-r--r--platform/linux-generic/odp_crypto.c1132
-rw-r--r--platform/linux-generic/odp_crypto_api.c11
-rw-r--r--platform/linux-generic/odp_crypto_ipsecmb.c895
-rw-r--r--platform/linux-generic/odp_crypto_null.c510
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c2830
-rw-r--r--platform/linux-generic/odp_dma.c864
-rw-r--r--platform/linux-generic/odp_dma_api.c11
-rw-r--r--platform/linux-generic/odp_errno.c15
-rw-r--r--platform/linux-generic/odp_event.c108
-rw-r--r--platform/linux-generic/odp_event_api.c11
-rw-r--r--platform/linux-generic/odp_event_validation.c260
-rw-r--r--platform/linux-generic/odp_fdserver.c (renamed from platform/linux-generic/_fdserver.c)266
-rw-r--r--platform/linux-generic/odp_hash.c489
-rw-r--r--platform/linux-generic/odp_hash_api.c11
-rw-r--r--platform/linux-generic/odp_hash_crc_gen.c248
-rw-r--r--platform/linux-generic/odp_impl.c32
-rw-r--r--platform/linux-generic/odp_init.c766
-rw-r--r--platform/linux-generic/odp_ipsec.c2725
-rw-r--r--platform/linux-generic/odp_ipsec_api.c11
-rw-r--r--platform/linux-generic/odp_ipsec_events.c175
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c1307
-rw-r--r--platform/linux-generic/odp_ishm.c (renamed from platform/linux-generic/_ishm.c)1501
-rw-r--r--platform/linux-generic/odp_ishmphy.c145
-rw-r--r--platform/linux-generic/odp_ishmpool.c659
-rw-r--r--platform/linux-generic/odp_libconfig.c344
-rw-r--r--platform/linux-generic/odp_ml.c2646
-rw-r--r--platform/linux-generic/odp_ml_fp16.c425
-rw-r--r--platform/linux-generic/odp_ml_null.c232
-rw-r--r--platform/linux-generic/odp_ml_quantize.c79
-rw-r--r--platform/linux-generic/odp_name_table.c109
-rw-r--r--platform/linux-generic/odp_packet.c2693
-rw-r--r--platform/linux-generic/odp_packet_api.c15
-rw-r--r--platform/linux-generic/odp_packet_flags.c235
-rw-r--r--platform/linux-generic/odp_packet_flags_api.c12
-rw-r--r--platform/linux-generic/odp_packet_io.c2802
-rw-r--r--platform/linux-generic/odp_packet_io_api.c11
-rw-r--r--platform/linux-generic/odp_packet_vector.c142
-rw-r--r--platform/linux-generic/odp_parse.c482
-rw-r--r--platform/linux-generic/odp_pcapng.c606
-rw-r--r--platform/linux-generic/odp_pkt_queue.c141
-rw-r--r--platform/linux-generic/odp_pool.c2027
-rw-r--r--platform/linux-generic/odp_pool_api.c11
-rw-r--r--platform/linux-generic/odp_pool_mem_src_ops.c22
-rw-r--r--platform/linux-generic/odp_print.c47
-rw-r--r--platform/linux-generic/odp_queue.c772
-rw-r--r--platform/linux-generic/odp_queue_api.c11
-rw-r--r--platform/linux-generic/odp_queue_basic.c1301
-rw-r--r--platform/linux-generic/odp_queue_if.c146
-rw-r--r--platform/linux-generic/odp_queue_lf.c370
-rw-r--r--platform/linux-generic/odp_queue_scalable.c1201
-rw-r--r--platform/linux-generic/odp_queue_spsc.c136
-rw-r--r--platform/linux-generic/odp_random.c66
-rw-r--r--platform/linux-generic/odp_random_openssl.c41
-rw-r--r--platform/linux-generic/odp_random_std.c106
-rw-r--r--platform/linux-generic/odp_rwlock.c74
-rw-r--r--platform/linux-generic/odp_rwlock_api.c10
-rw-r--r--platform/linux-generic/odp_rwlock_recursive.c107
-rw-r--r--platform/linux-generic/odp_rwlock_recursive_api.c10
-rw-r--r--platform/linux-generic/odp_schedule.c1258
-rw-r--r--platform/linux-generic/odp_schedule_api.c11
-rw-r--r--platform/linux-generic/odp_schedule_basic.c2412
-rw-r--r--platform/linux-generic/odp_schedule_if.c147
-rw-r--r--platform/linux-generic/odp_schedule_iquery.c1521
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c2209
-rw-r--r--platform/linux-generic/odp_schedule_scalable_ordered.c370
-rw-r--r--platform/linux-generic/odp_schedule_sp.c499
-rw-r--r--platform/linux-generic/odp_shared_memory.c60
-rw-r--r--platform/linux-generic/odp_sorted_list.c17
-rw-r--r--platform/linux-generic/odp_spinlock.c40
-rw-r--r--platform/linux-generic/odp_spinlock_api.c10
-rw-r--r--platform/linux-generic/odp_spinlock_recursive.c70
-rw-r--r--platform/linux-generic/odp_spinlock_recursive_api.c10
-rw-r--r--platform/linux-generic/odp_stash.c932
-rw-r--r--platform/linux-generic/odp_std.c19
-rw-r--r--platform/linux-generic/odp_std_api.c11
-rw-r--r--platform/linux-generic/odp_std_clib.c10
-rw-r--r--platform/linux-generic/odp_sync_api.c (renamed from platform/linux-generic/odp_sync.c)7
-rw-r--r--platform/linux-generic/odp_system_info.c379
-rw-r--r--platform/linux-generic/odp_thread.c156
-rw-r--r--platform/linux-generic/odp_thread_api.c12
-rw-r--r--platform/linux-generic/odp_thrmask.c2
-rw-r--r--platform/linux-generic/odp_ticketlock.c19
-rw-r--r--platform/linux-generic/odp_ticketlock_api.c11
-rw-r--r--platform/linux-generic/odp_time.c213
-rw-r--r--platform/linux-generic/odp_time_api.c11
-rw-r--r--platform/linux-generic/odp_timer.c2311
-rw-r--r--platform/linux-generic/odp_timer_api.c11
-rw-r--r--platform/linux-generic/odp_timer_wheel.c67
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c1798
-rw-r--r--platform/linux-generic/odp_version.c2
-rw-r--r--platform/linux-generic/odp_weak.c4
-rw-r--r--platform/linux-generic/pktio/dpdk.c2603
-rw-r--r--platform/linux-generic/pktio/ethtool.c164
-rw-r--r--platform/linux-generic/pktio/ethtool_rss.c253
-rw-r--r--platform/linux-generic/pktio/io_ops.c30
-rw-r--r--platform/linux-generic/pktio/ipc.c791
-rw-r--r--platform/linux-generic/pktio/loop.c786
-rw-r--r--platform/linux-generic/pktio/netmap.c972
-rw-r--r--platform/linux-generic/pktio/null.c216
-rw-r--r--platform/linux-generic/pktio/pcap.c373
-rw-r--r--platform/linux-generic/pktio/pktio_common.c171
-rw-r--r--platform/linux-generic/pktio/ring.c660
-rw-r--r--platform/linux-generic/pktio/socket.c1040
-rw-r--r--platform/linux-generic/pktio/socket_common.c297
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c952
-rw-r--r--platform/linux-generic/pktio/socket_xdp.c1249
-rw-r--r--platform/linux-generic/pktio/stats/ethtool_stats.c281
-rw-r--r--platform/linux-generic/pktio/stats/packet_io_stats.c192
-rw-r--r--platform/linux-generic/pktio/stats/sysfs_stats.c204
-rw-r--r--platform/linux-generic/pktio/sysfs.c77
-rw-r--r--platform/linux-generic/pktio/tap.c383
-rw-r--r--platform/linux-generic/test/.gitignore3
-rw-r--r--platform/linux-generic/test/Makefile.am58
-rw-r--r--platform/linux-generic/test/example/Makefile.am11
-rw-r--r--platform/linux-generic/test/example/classifier/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/classifier/pktio_env44
-rw-r--r--platform/linux-generic/test/example/generator/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/generator/pktio_env34
-rw-r--r--platform/linux-generic/test/example/ipsec_api/Makefile.am21
-rw-r--r--platform/linux-generic/test/example/ipsec_api/pktio_env77
-rw-r--r--platform/linux-generic/test/example/ipsec_crypto/Makefile.am21
-rw-r--r--platform/linux-generic/test/example/ipsec_crypto/pktio_env77
-rw-r--r--platform/linux-generic/test/example/l2fwd_simple/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/l2fwd_simple/pktio_env47
-rw-r--r--platform/linux-generic/test/example/l3fwd/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/l3fwd/pktio_env51
-rw-r--r--platform/linux-generic/test/example/packet/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/packet/pktio_env50
-rw-r--r--platform/linux-generic/test/example/ping/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/ping/pktio_env50
-rw-r--r--platform/linux-generic/test/example/simple_pipeline/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/simple_pipeline/pktio_env47
-rw-r--r--platform/linux-generic/test/example/switch/Makefile.am1
-rw-r--r--platform/linux-generic/test/example/switch/pktio_env54
-rw-r--r--platform/linux-generic/test/inline-timer.conf8
-rw-r--r--platform/linux-generic/test/packet_align.conf21
-rw-r--r--platform/linux-generic/test/performance/Makefile.am1
-rw-r--r--platform/linux-generic/test/performance/dmafwd/Makefile.am18
-rw-r--r--platform/linux-generic/test/performance/dmafwd/pktio_env57
-rw-r--r--platform/linux-generic/test/pktio_ipc/.gitignore2
-rw-r--r--platform/linux-generic/test/pktio_ipc/Makefile.am31
-rw-r--r--platform/linux-generic/test/pktio_ipc/ipc_common.c170
-rw-r--r--platform/linux-generic/test/pktio_ipc/ipc_common.h99
-rw-r--r--platform/linux-generic/test/pktio_ipc/pktio_ipc1.c381
-rw-r--r--platform/linux-generic/test/pktio_ipc/pktio_ipc2.c268
-rwxr-xr-xplatform/linux-generic/test/pktio_ipc/pktio_ipc_run.sh85
-rw-r--r--platform/linux-generic/test/process-mode.conf9
-rw-r--r--platform/linux-generic/test/sched-basic.conf13
-rw-r--r--platform/linux-generic/test/stash-custom.conf8
-rw-r--r--platform/linux-generic/test/validation/api/Makefile.inc1
-rw-r--r--platform/linux-generic/test/validation/api/ml/.gitignore1
-rw-r--r--platform/linux-generic/test/validation/api/ml/Makefile.am34
-rw-r--r--platform/linux-generic/test/validation/api/ml/README.md23
-rw-r--r--platform/linux-generic/test/validation/api/ml/batch_add.onnxbin0 -> 144 bytes
-rw-r--r--platform/linux-generic/test/validation/api/ml/batch_add_gen.py32
-rwxr-xr-xplatform/linux-generic/test/validation/api/ml/gen_models.sh14
-rw-r--r--platform/linux-generic/test/validation/api/ml/ml_linux.c1167
-rw-r--r--platform/linux-generic/test/validation/api/ml/requirements.txt2
-rw-r--r--platform/linux-generic/test/validation/api/ml/simple_linear.onnxbin0 -> 214 bytes
-rw-r--r--platform/linux-generic/test/validation/api/ml/simple_linear_gen.py34
-rw-r--r--platform/linux-generic/test/validation/api/pktio/.gitignore2
-rw-r--r--platform/linux-generic/test/validation/api/pktio/Makefile.am32
-rw-r--r--platform/linux-generic/test/validation/api/pktio/pktio_env120
-rwxr-xr-xplatform/linux-generic/test/validation/api/pktio/pktio_run.sh117
-rwxr-xr-xplatform/linux-generic/test/validation/api/pktio/pktio_run_dpdk.sh95
-rwxr-xr-xplatform/linux-generic/test/validation/api/pktio/pktio_run_pcap.sh39
-rwxr-xr-xplatform/linux-generic/test/validation/api/pktio/pktio_run_tap.sh119
-rw-r--r--platform/linux-generic/test/validation/api/shmem/.gitignore3
-rw-r--r--platform/linux-generic/test/validation/api/shmem/Makefile.am14
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_common.h24
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_linux.c330
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_linux.h9
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_odp1.c91
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_odp1.h7
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_odp2.c105
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_odp2.h7
556 files changed, 73511 insertions, 23418 deletions
diff --git a/platform/linux-generic/.gitignore b/platform/linux-generic/.gitignore
index 909756a1f..16e788a90 100644
--- a/platform/linux-generic/.gitignore
+++ b/platform/linux-generic/.gitignore
@@ -1 +1,2 @@
-include/odp/api/plat/static_inline.h
+libodp-linux.pc
+odp_libconfig_config.h
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 9e8a0064e..11cdb4c64 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -1,233 +1,506 @@
# Uncomment this if you need to change the CUSTOM_STR string
-#export CUSTOM_STR=https://git.linaro.org/lng/odp.git
+#export CUSTOM_STR=https://github.com/OpenDataPlane/odp.git
include $(top_srcdir)/platform/Makefile.inc
-include $(top_srcdir)/platform/@with_platform@/Makefile.inc
-
-AM_CFLAGS += -I$(srcdir)/include
-AM_CFLAGS += -I$(top_srcdir)/include
-AM_CFLAGS += -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@
-AM_CFLAGS += -I$(top_builddir)/include
-AM_CFLAGS += -Iinclude
-
-include_HEADERS = \
- $(top_srcdir)/include/odp.h \
- $(top_srcdir)/include/odp_api.h
-
-odpincludedir= $(includedir)/odp
-odpinclude_HEADERS = \
- $(srcdir)/include/odp/visibility_begin.h \
- $(srcdir)/include/odp/visibility_end.h
-
-odpapiincludedir= $(includedir)/odp/api
-odpapiinclude_HEADERS = \
- $(srcdir)/include/odp/api/align.h \
- $(srcdir)/include/odp/api/atomic.h \
- $(srcdir)/include/odp/api/barrier.h \
- $(srcdir)/include/odp/api/buffer.h \
- $(srcdir)/include/odp/api/byteorder.h \
- $(srcdir)/include/odp/api/classification.h \
- $(srcdir)/include/odp/api/compiler.h \
- $(srcdir)/include/odp/api/cpu.h \
- $(srcdir)/include/odp/api/cpumask.h \
- $(srcdir)/include/odp/api/crypto.h \
- $(srcdir)/include/odp/api/debug.h \
- $(srcdir)/include/odp/api/errno.h \
- $(srcdir)/include/odp/api/event.h \
- $(srcdir)/include/odp/api/hash.h \
- $(srcdir)/include/odp/api/hints.h \
- $(srcdir)/include/odp/api/init.h \
- $(srcdir)/include/odp/api/packet_flags.h \
- $(srcdir)/include/odp/api/packet.h \
- $(srcdir)/include/odp/api/packet_io.h \
- $(srcdir)/include/odp/api/packet_io_stats.h \
- $(srcdir)/include/odp/api/pool.h \
- $(srcdir)/include/odp/api/queue.h \
- $(srcdir)/include/odp/api/random.h \
- $(srcdir)/include/odp/api/rwlock.h \
- $(srcdir)/include/odp/api/rwlock_recursive.h \
- $(srcdir)/include/odp/api/schedule.h \
- $(srcdir)/include/odp/api/schedule_types.h \
- $(srcdir)/include/odp/api/shared_memory.h \
- $(srcdir)/include/odp/api/spinlock.h \
- $(srcdir)/include/odp/api/spinlock_recursive.h \
- $(srcdir)/include/odp/api/std_clib.h \
- $(srcdir)/include/odp/api/std_types.h \
- $(srcdir)/include/odp/api/sync.h \
- $(srcdir)/include/odp/api/system_info.h \
- $(srcdir)/include/odp/api/thread.h \
- $(srcdir)/include/odp/api/thrmask.h \
- $(srcdir)/include/odp/api/ticketlock.h \
- $(srcdir)/include/odp/api/time.h \
- $(srcdir)/include/odp/api/timer.h \
- $(srcdir)/include/odp/api/traffic_mngr.h \
- $(srcdir)/include/odp/api/version.h \
- $(srcdir)/arch/@ARCH_DIR@/odp/api/cpu_arch.h
+lib_LTLIBRARIES += $(LIB)/libodp-linux.la
+AM_CPPFLAGS = $(ODP_INCLUDES)
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_builddir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/@ARCH_DIR@
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/default
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/common
+
+AM_CPPFLAGS += $(OPENSSL_CPPFLAGS)
+AM_CPPFLAGS += $(ORT_CPPFLAGS)
+
+AM_CFLAGS += $(AARCH64CRYPTO_CFLAGS)
+AM_CFLAGS += $(DPDK_CFLAGS)
+AM_CFLAGS += $(LIBCONFIG_CFLAGS)
+AM_CFLAGS += $(LIBXDP_CFLAGS)
+
+DISTCLEANFILES = include/odp_libconfig_config.h
+include/odp_libconfig_config.h: $(top_builddir)/$(rel_default_config_path) $(top_builddir)/config.status
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
+
+odpapiabiarchincludedir = $(archincludedir)/odp/api/abi
+odpapiabiarchinclude_HEADERS =
+
+if !ODP_ABI_COMPAT
odpapiplatincludedir= $(includedir)/odp/api/plat
odpapiplatinclude_HEADERS = \
- $(builddir)/include/odp/api/plat/static_inline.h \
- $(srcdir)/include/odp/api/plat/atomic_inlines.h \
- $(srcdir)/include/odp/api/plat/atomic_types.h \
- $(srcdir)/include/odp/api/plat/barrier_types.h \
- $(srcdir)/include/odp/api/plat/buffer_types.h \
- $(srcdir)/include/odp/api/plat/byteorder_inlines.h \
- $(srcdir)/include/odp/api/plat/byteorder_types.h \
- $(srcdir)/include/odp/api/plat/classification_types.h \
- $(srcdir)/include/odp/api/plat/cpumask_types.h \
- $(srcdir)/include/odp/api/plat/crypto_types.h \
- $(srcdir)/include/odp/api/plat/event_types.h \
- $(srcdir)/include/odp/api/plat/init_types.h \
- $(srcdir)/include/odp/api/plat/packet_flag_inlines.h \
- $(srcdir)/include/odp/api/plat/packet_flag_inlines_api.h \
- $(srcdir)/include/odp/api/plat/packet_inlines.h \
- $(srcdir)/include/odp/api/plat/packet_inlines_api.h \
- $(srcdir)/include/odp/api/plat/packet_types.h \
- $(srcdir)/include/odp/api/plat/packet_io_types.h \
- $(srcdir)/include/odp/api/plat/pool_types.h \
- $(srcdir)/include/odp/api/plat/queue_types.h \
- $(srcdir)/include/odp/api/plat/rwlock_types.h \
- $(srcdir)/include/odp/api/plat/rwlock_recursive_types.h \
- $(srcdir)/include/odp/api/plat/schedule_types.h \
- $(srcdir)/include/odp/api/plat/shared_memory_types.h \
- $(srcdir)/include/odp/api/plat/spinlock_types.h \
- $(srcdir)/include/odp/api/plat/spinlock_recursive_types.h \
- $(srcdir)/include/odp/api/plat/std_clib_inlines.h \
- $(srcdir)/include/odp/api/plat/strong_types.h \
- $(srcdir)/include/odp/api/plat/sync_inlines.h \
- $(srcdir)/include/odp/api/plat/thread_types.h \
- $(srcdir)/include/odp/api/plat/thrmask_types.h \
- $(srcdir)/include/odp/api/plat/ticketlock_inlines.h \
- $(srcdir)/include/odp/api/plat/ticketlock_inlines_api.h \
- $(srcdir)/include/odp/api/plat/ticketlock_types.h \
- $(srcdir)/include/odp/api/plat/time_types.h \
- $(srcdir)/include/odp/api/plat/timer_types.h \
- $(srcdir)/include/odp/api/plat/traffic_mngr_types.h \
- $(srcdir)/include/odp/api/plat/version_types.h
-
-odpdrvincludedir = $(includedir)/odp/drv
-odpdrvinclude_HEADERS = \
- $(srcdir)/include/odp/drv/compiler.h
+ include/odp/api/plat/atomic_inlines.h \
+ include/odp/api/plat/buffer_inlines.h \
+ include/odp/api/plat/buffer_inline_types.h \
+ include/odp/api/plat/byteorder_inlines.h \
+ include/odp/api/plat/cpu_inlines.h \
+ include/odp/api/plat/crypto_inlines.h \
+ include/odp/api/plat/dma_inlines.h \
+ include/odp/api/plat/debug_inlines.h \
+ include/odp/api/plat/event_inlines.h \
+ include/odp/api/plat/event_inline_types.h \
+ include/odp/api/plat/event_validation_external.h \
+ include/odp/api/plat/event_vector_inline_types.h \
+ include/odp/api/plat/hash_inlines.h \
+ include/odp/api/plat/ipsec_inlines.h \
+ include/odp/api/plat/packet_flag_inlines.h \
+ include/odp/api/plat/packet_inline_types.h \
+ include/odp/api/plat/packet_inlines.h \
+ include/odp/api/plat/packet_vector_inlines.h \
+ include/odp/api/plat/packet_io_inlines.h \
+ include/odp/api/plat/pool_inlines.h \
+ include/odp/api/plat/pool_inline_types.h \
+ include/odp/api/plat/queue_inlines.h \
+ include/odp/api/plat/queue_inline_types.h \
+ include/odp/api/plat/rwlock_inlines.h \
+ include/odp/api/plat/rwlock_recursive_inlines.h \
+ include/odp/api/plat/schedule_inlines.h \
+ include/odp/api/plat/schedule_inline_types.h \
+ include/odp/api/plat/spinlock_inlines.h \
+ include/odp/api/plat/spinlock_recursive_inlines.h \
+ include/odp/api/plat/std_inlines.h \
+ include/odp/api/plat/strong_types.h \
+ include/odp/api/plat/sync_inlines.h \
+ include/odp/api/plat/thread_inlines.h \
+ include/odp/api/plat/thread_inline_types.h \
+ include/odp/api/plat/ticketlock_inlines.h \
+ include/odp/api/plat/time_inlines.h \
+ include/odp/api/plat/timer_inlines.h \
+ include/odp/api/plat/timer_inline_types.h
+
+odpapiabiarchinclude_HEADERS += \
+ include-abi/odp/api/abi/align.h \
+ include-abi/odp/api/abi/atomic.h \
+ include-abi/odp/api/abi/barrier.h \
+ include-abi/odp/api/abi/buffer.h \
+ include-abi/odp/api/abi/buffer_types.h \
+ include-abi/odp/api/abi/byteorder.h \
+ include-abi/odp/api/abi/classification.h \
+ include-abi/odp/api/abi/comp.h \
+ include-abi/odp/api/abi/cpumask.h \
+ include-abi/odp/api/abi/crypto.h \
+ include-abi/odp/api/abi/crypto_types.h \
+ include-abi/odp/api/abi/debug.h \
+ include-abi/odp/api/abi/dma.h \
+ include-abi/odp/api/abi/dma_types.h \
+ include-abi/odp/api/abi/errno.h \
+ include-abi/odp/api/abi/event.h \
+ include-abi/odp/api/abi/event_types.h \
+ include-abi/odp/api/abi/hash.h \
+ include-abi/odp/api/abi/init.h \
+ include-abi/odp/api/abi/ipsec.h \
+ include-abi/odp/api/abi/ipsec_types.h \
+ include-abi/odp/api/abi/ml_types.h \
+ include-abi/odp/api/abi/packet.h \
+ include-abi/odp/api/abi/packet_types.h \
+ include-abi/odp/api/abi/packet_flags.h \
+ include-abi/odp/api/abi/packet_io.h \
+ include-abi/odp/api/abi/packet_io_types.h \
+ include-abi/odp/api/abi/proto_stats.h \
+ include-abi/odp/api/abi/proto_stats_types.h \
+ include-abi/odp/api/abi/pool.h \
+ include-abi/odp/api/abi/pool_types.h \
+ include-abi/odp/api/abi/queue.h \
+ include-abi/odp/api/abi/queue_types.h \
+ include-abi/odp/api/abi/random.h \
+ include-abi/odp/api/abi/rwlock.h \
+ include-abi/odp/api/abi/rwlock_recursive.h \
+ include-abi/odp/api/abi/schedule.h \
+ include-abi/odp/api/abi/schedule_types.h \
+ include-abi/odp/api/abi/shared_memory.h \
+ include-abi/odp/api/abi/spinlock.h \
+ include-abi/odp/api/abi/spinlock_recursive.h \
+ include-abi/odp/api/abi/stash.h \
+ include-abi/odp/api/abi/stash_types.h \
+ include-abi/odp/api/abi/std.h \
+ include-abi/odp/api/abi/std_types.h \
+ include-abi/odp/api/abi/sync.h \
+ include-abi/odp/api/abi/thread.h \
+ include-abi/odp/api/abi/thread_types.h \
+ include-abi/odp/api/abi/thrmask.h \
+ include-abi/odp/api/abi/ticketlock.h \
+ include-abi/odp/api/abi/time.h \
+ include-abi/odp/api/abi/time_types.h \
+ include-abi/odp/api/abi/timer.h \
+ include-abi/odp/api/abi/timer_types.h \
+ include-abi/odp/api/abi/traffic_mngr.h \
+ include-abi/odp/api/abi/version.h
+endif
noinst_HEADERS = \
- ${srcdir}/include/_fdserver_internal.h \
- ${srcdir}/include/_ishm_internal.h \
- ${srcdir}/include/_ishmphy_internal.h \
- ${srcdir}/include/odp_align_internal.h \
- ${srcdir}/include/odp_atomic_internal.h \
- ${srcdir}/include/odp_buffer_inlines.h \
- ${srcdir}/include/odp_bitmap_internal.h \
- ${srcdir}/include/odp_buffer_internal.h \
- ${srcdir}/include/odp_classification_datamodel.h \
- ${srcdir}/include/odp_classification_inlines.h \
- ${srcdir}/include/odp_classification_internal.h \
- ${srcdir}/include/odp_config_internal.h \
- ${srcdir}/include/odp_crypto_internal.h \
- ${srcdir}/include/odp_debug_internal.h \
- ${srcdir}/include/odp_errno_define.h \
- ${srcdir}/include/odp_forward_typedefs_internal.h \
- ${srcdir}/include/odp_internal.h \
- ${srcdir}/include/odp_name_table_internal.h \
- ${srcdir}/include/odp_packet_internal.h \
- ${srcdir}/include/odp_packet_io_internal.h \
- ${srcdir}/include/odp_packet_io_ipc_internal.h \
- ${srcdir}/include/odp_packet_io_queue.h \
- ${srcdir}/include/odp_packet_io_ring_internal.h \
- ${srcdir}/include/odp_packet_netmap.h \
- ${srcdir}/include/odp_packet_dpdk.h \
- ${srcdir}/include/odp_packet_socket.h \
- ${srcdir}/include/odp_packet_tap.h \
- ${srcdir}/include/odp_pkt_queue_internal.h \
- ${srcdir}/include/odp_pool_internal.h \
- ${srcdir}/include/odp_posix_extensions.h \
- ${srcdir}/include/odp_queue_internal.h \
- ${srcdir}/include/odp_ring_internal.h \
- ${srcdir}/include/odp_schedule_if.h \
- ${srcdir}/include/odp_sorted_list_internal.h \
- ${srcdir}/include/odp_shm_internal.h \
- ${srcdir}/include/odp_timer_internal.h \
- ${srcdir}/include/odp_timer_wheel_internal.h \
- ${srcdir}/include/odp_traffic_mngr_internal.h \
- ${srcdir}/include/protocols/eth.h \
- ${srcdir}/include/protocols/ip.h \
- ${srcdir}/include/protocols/ipsec.h \
- ${srcdir}/include/protocols/tcp.h \
- ${srcdir}/include/protocols/udp.h \
- ${srcdir}/Makefile.inc
+ include/odp_atomic_internal.h \
+ include/odp_bitset.h \
+ include/odp_buffer_internal.h \
+ include/odp_chksum_internal.h \
+ include/odp_classification_datamodel.h \
+ include/odp_classification_internal.h \
+ include/odp_config_internal.h \
+ include/odp_debug_internal.h \
+ include/odp_errno_define.h \
+ include/odp_event_internal.h \
+ include/odp_event_validation_internal.h \
+ include/odp_fdserver_internal.h \
+ include/odp_forward_typedefs_internal.h \
+ include/odp_ml_fp16.h \
+ include/odp_global_data.h \
+ include/odp_init_internal.h \
+ include/odp_ipsec_internal.h \
+ include/odp_ishmphy_internal.h \
+ include/odp_ishmpool_internal.h \
+ include/odp_libconfig_internal.h \
+ include/odp_llqueue.h \
+ include/odp_macros_internal.h \
+ include/odp_name_table_internal.h \
+ include/odp_packet_dpdk.h \
+ include/odp_packet_internal.h \
+ include/odp_packet_io_internal.h \
+ include/odp_parse_internal.h \
+ include/odp_print_internal.h \
+ include/odp_socket_common.h \
+ include/odp_packet_io_stats_common.h \
+ include/odp_packet_io_stats.h \
+ include/odp_sysfs_stats.h \
+ include/odp_ethtool_stats.h \
+ include/odp_ethtool_rss.h \
+ include/odp_pcapng.h \
+ include/odp_pkt_queue_internal.h \
+ include/odp_pool_internal.h \
+ include/odp_posix_extensions.h \
+ include/odp_queue_if.h \
+ include/odp_queue_basic_internal.h \
+ include/odp_queue_lf.h \
+ include/odp_queue_scalable_internal.h \
+ include/odp_random_std_internal.h \
+ include/odp_random_openssl_internal.h \
+ include/odp_ring_common.h \
+ include/odp_ring_internal.h \
+ include/odp_ring_mpmc_internal.h \
+ include/odp_ring_mpmc_u32_internal.h \
+ include/odp_ring_mpmc_u64_internal.h \
+ include/odp_ring_ptr_internal.h \
+ include/odp_ring_spsc_internal.h \
+ include/odp_ring_st_internal.h \
+ include/odp_ring_u32_internal.h \
+ include/odp_ring_u64_internal.h \
+ include/odp_schedule_if.h \
+ include/odp_schedule_scalable_config.h \
+ include/odp_schedule_scalable.h \
+ include/odp_schedule_scalable_ordered.h \
+ include/odp_shm_internal.h \
+ include/odp_sorted_list_internal.h \
+ include/odp_sysinfo_internal.h \
+ include/odp_timer_internal.h \
+ include/odp_timer_wheel_internal.h \
+ include/odp_traffic_mngr_internal.h \
+ include/odp_types_internal.h \
+ include/odp_event_vector_internal.h \
+ include/protocols/eth.h \
+ include/protocols/ip.h \
+ include/protocols/ipsec.h \
+ include/protocols/sctp.h \
+ include/protocols/tcp.h \
+ include/protocols/thash.h \
+ include/protocols/udp.h
+BUILT_SOURCES = \
+ include/odp_libconfig_config.h
__LIB__libodp_linux_la_SOURCES = \
- _fdserver.c \
- _ishm.c \
- _ishmphy.c \
- odp_atomic.c \
odp_barrier.c \
- odp_bitmap.c \
odp_buffer.c \
- odp_byteorder.c \
+ odp_chksum.c \
odp_classification.c \
- odp_cpu.c \
+ odp_comp.c \
+ miniz/miniz.c miniz/miniz.h miniz/miniz_common.h \
+ miniz/miniz_tdef.c miniz/miniz_tdef.h \
+ miniz/miniz_tinfl.c miniz/miniz_tinfl.h \
odp_cpumask.c \
odp_cpumask_task.c \
- odp_crypto.c \
+ odp_dma.c \
odp_errno.c \
odp_event.c \
- odp_hash.c \
- odp_init.c \
+ odp_event_validation.c \
+ odp_fdserver.c \
+ odp_hash_crc_gen.c \
odp_impl.c \
+ odp_init.c \
+ odp_ipsec.c \
+ odp_ipsec_events.c \
+ odp_ipsec_sad.c \
+ odp_ishm.c \
+ odp_ishmphy.c \
+ odp_ishmpool.c \
+ odp_libconfig.c \
+ odp_ml_fp16.c \
+ odp_ml_quantize.c \
odp_name_table.c \
odp_packet.c \
+ odp_packet_vector.c \
odp_packet_flags.c \
odp_packet_io.c \
- pktio/ethtool.c \
- pktio/io_ops.c \
- pktio/ipc.c \
- pktio/pktio_common.c \
- pktio/loop.c \
- pktio/netmap.c \
- pktio/dpdk.c \
- pktio/socket.c \
- pktio/socket_mmap.c \
- pktio/sysfs.c \
- pktio/tap.c \
- pktio/ring.c \
+ odp_parse.c \
odp_pkt_queue.c \
+ odp_print.c \
odp_pool.c \
- odp_queue.c \
- odp_rwlock.c \
- odp_rwlock_recursive.c \
- odp_schedule.c \
+ odp_pool_mem_src_ops.c \
+ odp_queue_basic.c \
+ odp_queue_if.c \
+ odp_queue_lf.c \
+ odp_queue_scalable.c \
+ odp_queue_spsc.c \
+ odp_random.c \
+ odp_random_std.c \
+ odp_random_openssl.c \
+ odp_schedule_basic.c \
odp_schedule_if.c \
+ odp_schedule_scalable.c \
+ odp_schedule_scalable_ordered.c \
odp_schedule_sp.c \
- odp_schedule_iquery.c \
odp_shared_memory.c \
odp_sorted_list.c \
- odp_spinlock.c \
- odp_spinlock_recursive.c \
- odp_std_clib.c \
- odp_sync.c \
+ odp_stash.c \
+ odp_std.c \
odp_system_info.c \
+ odp_pcapng.c \
odp_thread.c \
odp_thrmask.c \
- odp_ticketlock.c \
- odp_time.c \
odp_timer.c \
odp_timer_wheel.c \
odp_traffic_mngr.c \
odp_version.c \
odp_weak.c \
- arch/@ARCH_DIR@/odp_cpu_arch.c \
- arch/@ARCH_DIR@/odp_sysinfo_parse.c
+ pktio/stats/ethtool_stats.c \
+ pktio/stats/sysfs_stats.c \
+ pktio/stats/packet_io_stats.c \
+ pktio/dpdk.c \
+ pktio/socket_common.c \
+ pktio/ethtool_rss.c \
+ pktio/io_ops.c \
+ pktio/ipc.c \
+ pktio/loop.c \
+ pktio/null.c \
+ pktio/pktio_common.c \
+ pktio/socket.c \
+ pktio/socket_mmap.c \
+ pktio/socket_xdp.c \
+ pktio/tap.c
+
+if WITH_OPENSSL_CRYPTO
+__LIB__libodp_linux_la_SOURCES += \
+ odp_crypto_openssl.c
+else
+if WITH_ARMV8_CRYPTO
+__LIB__libodp_linux_la_SOURCES += \
+ arch/aarch64/odp_crypto_armv8.c
+else
+if WITH_IPSECMB_CRYPTO
+__LIB__libodp_linux_la_SOURCES += \
+ odp_crypto_ipsecmb.c
+else
+__LIB__libodp_linux_la_SOURCES += \
+ odp_crypto_null.c
+endif
+endif
+endif
+
+if WITH_ML
+__LIB__libodp_linux_la_SOURCES += \
+ odp_ml.c
+else
+__LIB__libodp_linux_la_SOURCES += \
+ odp_ml_null.c
+endif
+
+if ODP_ABI_COMPAT
+__LIB__libodp_linux_la_SOURCES += \
+ odp_atomic_api.c \
+ odp_buffer_api.c \
+ odp_byteorder_api.c \
+ odp_cpu_api.c \
+ odp_crypto_api.c \
+ odp_dma_api.c \
+ odp_event_api.c \
+ odp_hash_api.c \
+ odp_ipsec_api.c \
+ odp_packet_api.c \
+ odp_packet_flags_api.c \
+ odp_packet_io_api.c \
+ odp_pool_api.c \
+ odp_queue_api.c \
+ odp_rwlock_api.c \
+ odp_rwlock_recursive_api.c \
+ odp_schedule_api.c \
+ odp_spinlock_api.c \
+ odp_spinlock_recursive_api.c \
+ odp_std_api.c \
+ odp_sync_api.c \
+ odp_thread_api.c \
+ odp_ticketlock_api.c \
+ odp_time_api.c \
+ odp_timer_api.c
+endif
+
+if ARCH_IS_ARM
+__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/arm/odp_sysinfo_parse.c \
+ arch/default/odp_time.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/arm/odp/api/abi/cpu_inlines.h \
+ arch/arm/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/arm/odp_cpu.h \
+ arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_AARCH64
+__LIB__libodp_linux_la_SOURCES += arch/aarch64/odp_atomic.c \
+ arch/aarch64/odp_cpu_cycles.c \
+ arch/aarch64/cpu_flags.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/aarch64/odp_sysinfo_parse.c \
+ arch/common/odp_time_cpu.c
+odpapiabiarchinclude_HEADERS += arch/aarch64/odp/api/abi/hash_crc32.h \
+ arch/aarch64/odp/api/abi/time_cpu.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/aarch64/odp/api/abi/atomic_inlines.h \
+ arch/aarch64/odp/api/abi/atomic.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/aarch64/odp/api/abi/cpu_inlines.h \
+ arch/aarch64/odp/api/abi/cpu.h \
+ arch/aarch64/odp/api/abi/sync_inlines.h \
+ arch/common/odp/api/abi/time_cpu_inlines.h \
+ arch/aarch64/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/aarch64/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/aarch64/odp_atomic.h \
+ arch/aarch64/odp_cpu.h \
+ arch/aarch64/cpu_flags.h \
+ arch/aarch64/odp_random.h \
+ arch/aarch64/odp_wait_until.h
+endif
+if ARCH_IS_DEFAULT
+__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/default/odp_sysinfo_parse.c \
+ arch/default/odp_time.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/default/odp/api/abi/cpu_inlines.h \
+ arch/default/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_POWERPC
+__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
+ arch/default/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/powerpc/odp_sysinfo_parse.c \
+ arch/default/odp_time.c
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/hash_crc32.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/default/odp/api/abi/cpu_generic.h \
+ arch/default/odp/api/abi/cpu_inlines.h \
+ arch/powerpc/odp/api/abi/cpu.h \
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_random.h \
+ arch/default/odp_wait_until.h
+endif
+if ARCH_IS_X86
+__LIB__libodp_linux_la_SOURCES += arch/default/odp_atomic.c \
+ arch/x86/cpu_flags.c \
+ arch/x86/odp_cpu_cycles.c \
+ arch/default/odp_hash_crc32.c \
+ arch/default/odp_random.c \
+ arch/x86/odp_sysinfo_parse.c \
+ arch/x86/odp_time_cpu.c \
+ arch/common/odp_time_cpu.c
+odpapiabiarchinclude_HEADERS += arch/x86/odp/api/abi/cpu_rdtsc.h \
+ arch/x86/odp/api/abi/hash_crc32.h \
+ arch/x86/odp/api/abi/time_cpu.h
+if !ODP_ABI_COMPAT
+odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
+ arch/default/odp/api/abi/atomic_inlines.h \
+ arch/x86/odp/api/abi/cpu_inlines.h \
+ arch/x86/odp/api/abi/cpu.h \
+ arch/x86/odp/api/abi/sync_inlines.h \
+ arch/common/odp/api/abi/time_cpu_inlines.h \
+ arch/x86/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
+endif
+noinst_HEADERS += arch/x86/cpu_flags.h \
+ arch/x86/odp_cpu.h \
+ arch/x86/odp_random.h \
+ arch/default/odp_atomic.h \
+ arch/default/odp_cpu.h \
+ arch/default/odp_wait_until.h
+endif
-if HAVE_PCAP
+if ODP_PKTIO_PCAP
__LIB__libodp_linux_la_SOURCES += pktio/pcap.c
endif
-# Create symlink for ABI header files. Application does not need to use the arch
-# specific include path for installed files.
-install-data-hook:
- if [ -h $(prefix)/include/odp/api/abi ]; then \
- : \
- else \
- $(LN_S) -rf $(prefix)/include/odp/arch/@ARCH_ABI@/odp/api/abi \
- $(prefix)/include/odp/api/abi; \
- fi
+__LIB__libodp_linux_la_LIBADD = $(AARCH64CRYPTO_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(ATOMIC_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(OPENSSL_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(LIBCONFIG_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(DPDK_LIBS_LIBODP)
+__LIB__libodp_linux_la_LIBADD += $(PTHREAD_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(TIMER_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(LIBXDP_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(IPSEC_MB_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(ORT_LIBS)
+
+if ODP_PKTIO_PCAP
+__LIB__libodp_linux_la_LIBADD += $(PCAP_LIBS)
+endif
+
+CHECK_GLOBALS_REGEX = " (odp_|_odp_|_deprecated_odp_|miniz_|mz_|tdefl_|tinfl_|mp_hdlr_init_odp_pool_ops)"
+
+TESTS_ENVIRONMENT = \
+ LIBTOOL="$(LIBTOOL)" \
+ NM="$(NM)" \
+ LIB="$(LIB)" \
+ lib_LTLIBRARIES="$(lib_LTLIBRARIES)" \
+ CHECK_GLOBALS_REGEX=$(CHECK_GLOBALS_REGEX)
+
+dist_check_SCRIPTS = check-globals.sh
+
+TESTS = $(dist_check_SCRIPTS)
diff --git a/platform/linux-generic/Makefile.inc b/platform/linux-generic/Makefile.inc
deleted file mode 100644
index 876519bef..000000000
--- a/platform/linux-generic/Makefile.inc
+++ /dev/null
@@ -1,2 +0,0 @@
-AM_CFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH_DIR)
-AM_CXXFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH_DIR)
diff --git a/platform/linux-generic/README b/platform/linux-generic/README
index 3e05dabf2..8fa50c127 100644
--- a/platform/linux-generic/README
+++ b/platform/linux-generic/README
@@ -1,17 +1,83 @@
-Copyright (c) 2014, Linaro Limited
+Copyright (c) 2014-2018, Linaro Limited
+Copyright (c) 2019-2023, Nokia
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
1. Intro
-
-OpenDataPlane implementation for Linux generic. Directory linux-generic contains ODP headers and implementation
-for linux-generic target. This drop does not target high
-performance. It is rather proof of ODP API functionality. It still uses
-linux-generic's SW scheduler.
+ OpenDataPlane API generic Linux implementation. Directory linux-generic
+ contains the header and source files and additional platform test scripts
+ for ODP linux-generic implementation.
2. Build
-# To compile ODP
-./bootstrap
-./configure
-make
+ See DEPENDENCIES file about system requirements and dependencies to external
+ libraries/packages. It contains also more detailed build instructions.
+
+ Generally, ODP is built with these three steps:
+ ./bootstrap
+ ./configure
+ make
+
+3. Configuration file
+ See config/README for application runtime configuration options.
+
+4. Packet I/O
+ When passing a packet I/O device name to odp_pktio_open() one can explicitly
+ specify the used implementation internal pktio type. The pktio type can be
+ selected by adding a pktio type prefix to the device name separated by a
+ colon (<pktio_type>:<if_name>).
+
+ E.g.
+ socket:eth1
+ socket_xdp:eth2
+
+ The supported pktio types are:
+ dpdk
+ ipc
+ loop
+ null
+ pcap
+ socket
+ socket_mmap
+ socket_xdp
+ tap
+
+5. Random data
+ On x86 ODP_RANDOM_TRUE type random data is generated using rdseed [1] via
+ compiler builtin functions. If OpenSSL is not available or its use for
+ generating random data is disabled with the --disable-openssl-rand
+ configure option, ODP_RANDOM_CRYPTO type random data is generated using
+ rdrand [1].
+
+ Note that there may be issues with the quality or security of rdrand and
+ rdseed. [2]
+
+6. Event validation
+ ODP linux-generic implementation supports additional fast path event
+ validity checks which are disabled by default to minimize overhead. These
+ checks can be enabled with --enable-event-validation [abort/warn] or
+ --enabled-debug=full configuration options.
+
+ Event validation adds additional endmark data to ODP buffers and packets,
+ which is used to detect data writes outside allowed areas. Endmarks are
+ checked by the implementation each time application calls one the following
+ API functions:
+ - odp_buffer_free() / odp_buffer_free_multi()
+ - odp_buffer_is_valid()
+ - odp_event_free() / odp_event_free_multi() / odp_event_free_sp()
+ - odp_event_is_valid()
+ - odp_packet_free() / odp_packet_free_multi() / odp_packet_free_sp()
+ - odp_packet_is_valid()
+ - odp_queue_enq() / odp_queue_enq_multi()
+
+ Event validation can function in two modes: abort (default) and warn. In
+ abort mode the application is terminated immediately if an event validity
+ check fails. In warn mode only an error log message is printed.
+
+7. References
+ [1] Intel Digital Random Number Generator (DRNG) Software Implementation
+ Guide. John P Mechalas, 17 October 2018.
+ https://www.intel.com/content/www/us/en/developer/articles/guide/intel-digital-random-number-generator-drng-software-implementation-guide.html
+
+ [2] RDRAND. Wikipedia, 29 September 2021.
+ https://en.wikipedia.org/wiki/RDRAND#Reception
diff --git a/platform/linux-generic/_ishmphy.c b/platform/linux-generic/_ishmphy.c
deleted file mode 100644
index d519af60c..000000000
--- a/platform/linux-generic/_ishmphy.c
+++ /dev/null
@@ -1,207 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*
- * This file handles the lower end of the ishm memory allocator:
- * It performs the physical mappings.
- */
-#include <odp_posix_extensions.h>
-#include <odp_config_internal.h>
-#include <odp_internal.h>
-#include <odp/api/align.h>
-#include <odp/api/system_info.h>
-#include <odp/api/debug.h>
-#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
-#include <_ishm_internal.h>
-#include <_ishmphy_internal.h>
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <string.h>
-#include <errno.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <_ishmphy_internal.h>
-
-static void *common_va_address;
-static uint64_t common_va_len;
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/* Book some virtual address space
- * This function is called at odp_init_global() time to pre-book some
- * virtual address space inherited by all odpthreads (i.e. descendant
- * processes and threads) and later used to guarantee the unicity the
- * the mapping VA address when memory is reserver with the _ODP_ISHM_SINGLE_VA
- * flag.
- * returns the address of the mapping or NULL on error.
- */
-void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align)
-{
- void *addr;
-
- addr = mmap(NULL, len + align, PROT_NONE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
- if (addr == MAP_FAILED) {
- ODP_ERR("_ishmphy_book_va failure\n");
- return NULL;
- }
-
- if (mprotect(addr, len, PROT_NONE))
- ODP_ERR("failure for protect\n");
-
- ODP_DBG("VA Reserved: %p, len=%p\n", addr, len + align);
-
- common_va_address = addr;
- common_va_len = len;
-
- /* return the nearest aligned address: */
- return (void *)(((uintptr_t)addr + align - 1) & (-align));
-}
-
-/* Un-book some virtual address space
- * This function is called at odp_term_global() time to unbook
- * the virtual address space booked by _ishmphy_book_va()
- */
-int _odp_ishmphy_unbook_va(void)
-{
- int ret;
-
- ret = munmap(common_va_address, common_va_len);
- if (ret)
- ODP_ERR("_unishmphy_book_va failure\n");
- return ret;
-}
-
-/*
- * do a mapping:
- * Performs a mapping of the provided file descriptor to the process VA
- * space. If the _ODP_ISHM_SINGLE_VA flag is set, 'start' is assumed to be
- * the VA address where the mapping is to be done.
- * If the flag is not set, a new VA address is taken.
- * returns the address of the mapping or NULL on error.
- */
-void *_odp_ishmphy_map(int fd, void *start, uint64_t size,
- int flags)
-{
- void *mapped_addr_tmp, *mapped_addr;
- int mmap_flags = 0;
-
- if (flags & _ODP_ISHM_SINGLE_VA) {
- if (!start) {
- ODP_ERR("failure: missing address\n");
- return NULL;
- }
- /* maps over fragment of reserved VA: */
- /* first, try a normal map. If that works, remap it where it
- * should (on the prereverved space), and remove the initial
- * normal mapping:
- * This is because it turned out that if a mapping fails
- * on a the prereserved virtual address space, then
- * the prereserved address space which was tried to be mapped
- * on becomes available to the kernel again! This was not
- * according to expectations: the assumption was that if a
- * mapping fails, the system should remain unchanged, but this
- * is obvioulsy not true (at least for huge pages when
- * exhausted).
- * So the strategy is to first map at a non reserved place
- * (which can then be freed and returned to the kernel on
- * failure) and peform a new map to the prereserved space on
- * success (which is then guaranteed to work).
- * The initial free maping can then be removed.
- */
- mapped_addr = MAP_FAILED;
- mapped_addr_tmp = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | mmap_flags, fd, 0);
- if (mapped_addr_tmp != MAP_FAILED) {
- /* If OK, do new map at right fixed location... */
- mapped_addr = mmap(start,
- size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED | mmap_flags,
- fd, 0);
- if (mapped_addr != start)
- ODP_ERR("new map failed:%s\n", strerror(errno));
- /* ... and remove initial mapping: */
- if (munmap(mapped_addr_tmp, size))
- ODP_ERR("munmap failed:%s\n", strerror(errno));
- }
- } else {
- /* just do a new mapping in the VA space: */
- mapped_addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | mmap_flags, fd, 0);
- if ((mapped_addr >= common_va_address) &&
- ((char *)mapped_addr <
- (char *)common_va_address + common_va_len)) {
- ODP_ERR("VA SPACE OVERLAP!\n");
- }
- }
-
- if (mapped_addr == MAP_FAILED) {
- ODP_ERR("mmap failed:%s\n", strerror(errno));
- return NULL;
- }
-
- /* if locking is requested, lock it...*/
- if (flags & _ODP_ISHM_LOCK) {
- if (mlock(mapped_addr, size)) {
- if (munmap(mapped_addr, size))
- ODP_ERR("munmap failed:%s\n", strerror(errno));
- ODP_ERR("mlock failed:%s\n", strerror(errno));
- return NULL;
- }
- }
- return mapped_addr;
-}
-
-/* free a mapping:
- * If the _ODP_ISHM_SINGLE_VA flag was given at creation time the virtual
- * address range must be returned to the preoallocated "pool". this is
- * done by mapping non accessibly memory there (hence blocking the VA but
- * releasing the physical memory).
- * If the _ODP_ISHM_SINGLE_VA flag was not given, both physical memory and
- * virtual address space are realeased by calling the normal munmap.
- * return 0 on success or -1 on error.
- */
-int _odp_ishmphy_unmap(void *start, uint64_t len, int flags)
-{
- void *addr;
- int ret;
- int mmap_flgs;
-
- mmap_flgs = MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS | MAP_NORESERVE;
-
- /* if locking was requested, unlock...*/
- if (flags & _ODP_ISHM_LOCK)
- munlock(start, len);
-
- if (flags & _ODP_ISHM_SINGLE_VA) {
- /* map unnaccessible memory overwrites previous mapping
- * and free the physical memory, but guarantees to block
- * the VA range from other mappings
- */
- addr = mmap(start, len, PROT_NONE, mmap_flgs, -1, 0);
- if (addr == MAP_FAILED) {
- ODP_ERR("_ishmphy_free failure for ISHM_SINGLE_VA\n");
- return -1;
- }
- if (mprotect(start, len, PROT_NONE))
- ODP_ERR("_ishmphy_free failure for protect\n");
- return 0;
- }
-
- /* just release the mapping */
- ret = munmap(start, len);
- if (ret)
- ODP_ERR("_ishmphy_free failure: %s\n", strerror(errno));
- return ret;
-}
diff --git a/platform/linux-generic/arch/aarch64/cpu_flags.c b/platform/linux-generic/arch/aarch64/cpu_flags.c
new file mode 100644
index 000000000..9923e9306
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/cpu_flags.c
@@ -0,0 +1,1052 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/hints.h>
+
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+
+#include "cpu_flags.h"
+
+#include <asm/hwcap.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/auxv.h>
+
+typedef struct {
+ const char *name;
+ const uint64_t bit_mask;
+} hwcap_feat_flag_t;
+
+/* Linux HWCAP and HWCAP2 flags
+ *
+ * See https://docs.kernel.org/arch/arm64/elf_hwcaps.html for meaning of each flag.
+ */
+static hwcap_feat_flag_t hwcap_flags[] = {
+ {
+ /* Floating-point support for single-precision and double-precision types */
+ .name = "FEAT_FP",
+#ifdef HWCAP_FP
+ .bit_mask = HWCAP_FP,
+#endif
+ },
+
+ {
+ /* Advanced SIMD support for:
+ * - integer byte, halfword, word and doubleword element operations
+ * - single-precision and double-precision floating-point arithmetic */
+ .name = "ASIMD",
+#ifdef HWCAP_ASIMD
+ .bit_mask = HWCAP_ASIMD,
+#endif
+ },
+
+ {
+ /* Generic Timer is configured to generate events at approx. 10KHz */
+ .name = "EVTSTRM",
+#ifdef HWCAP_EVTSTRM
+ .bit_mask = HWCAP_EVTSTRM,
+#endif
+ },
+
+ {
+ /* Advanced SIMD AES Instructions */
+ .name = "FEAT_AES",
+#ifdef HWCAP_AES
+ .bit_mask = HWCAP_AES,
+#endif
+ },
+
+ {
+ /* Advanced SIMD PMULL Instructions */
+ .name = "FEAT_PMULL",
+#ifdef HWCAP_PMULL
+ .bit_mask = HWCAP_PMULL,
+#endif
+ },
+
+ {
+ /* Advanced SIMD SHA1 Instructions */
+ .name = "FEAT_SHA1",
+#ifdef HWCAP_SHA1
+ .bit_mask = HWCAP_SHA1,
+#endif
+ },
+
+ {
+ /* Advanced SIMD SHA256 Instructions */
+ .name = "FEAT_SHA256",
+#ifdef HWCAP_SHA2
+ .bit_mask = HWCAP_SHA2,
+#endif
+ },
+
+ {
+ /* CRC32 Instructions */
+ .name = "FEAT_CRC32",
+#ifdef HWCAP_CRC32
+ .bit_mask = HWCAP_CRC32,
+#endif
+ },
+
+ {
+ /* Large System Extensions */
+ .name = "FEAT_LSE",
+#ifdef HWCAP_ATOMICS
+ .bit_mask = HWCAP_ATOMICS,
+#endif
+ },
+
+ {
+ /* Half-precision Floating-point Data Processing Instructions */
+ .name = "FEAT_FP16",
+#ifdef HWCAP_FPHP
+ .bit_mask = HWCAP_FPHP,
+#endif
+ },
+
+ {
+ /* Advanced SIMD support with half-precision floating-point arithmetic */
+ .name = "ASIMDHP",
+#ifdef HWCAP_ASIMDHP
+ .bit_mask = HWCAP_ASIMDHP,
+#endif
+ },
+
+ {
+ /* Availability of EL0 Access to certain ID Registers */
+ .name = "CPUID",
+#ifdef HWCAP_CPUID
+ .bit_mask = HWCAP_CPUID,
+#endif
+ },
+
+ {
+ /* Rounding Double Multiply Accumulate Extensions */
+ .name = "FEAT_RDM",
+#ifdef HWCAP_ASIMDRDM
+ .bit_mask = HWCAP_ASIMDRDM,
+#endif
+ },
+
+ {
+ /* JavaScript FJCVTS Conversion Instructions */
+ .name = "FEAT_JSCVT",
+#ifdef HWCAP_JSCVT
+ .bit_mask = HWCAP_JSCVT,
+#endif
+ },
+
+ {
+ /* Floating-point FCMLA and FCADD Instructions */
+ .name = "FEAT_FCMA",
+#ifdef HWCAP_FCMA
+ .bit_mask = HWCAP_FCMA,
+#endif
+ },
+
+ {
+ /* Load-acquire RCpc Instructions */
+ .name = "FEAT_LRCPC",
+#ifdef HWCAP_LRCPC
+ .bit_mask = HWCAP_LRCPC,
+#endif
+ },
+
+ {
+ /* DC CVAP Instructions */
+ .name = "FEAT_DPB",
+#ifdef HWCAP_DCPOP
+ .bit_mask = HWCAP_DCPOP,
+#endif
+ },
+
+ {
+ /* Advanced SIMD EOR3, RAX1, XAR, and BCAX Instructions */
+ .name = "FEAT_SHA3",
+#ifdef HWCAP_SHA3
+ .bit_mask = HWCAP_SHA3,
+#endif
+ },
+
+ {
+ /* Advanced SIMD SM3 Instructions */
+ .name = "FEAT_SM3",
+#ifdef HWCAP_SM3
+ .bit_mask = HWCAP_SM3,
+#endif
+ },
+
+ {
+ /* Advanced SIMD SM4 Instructions */
+ .name = "FEAT_SM4",
+#ifdef HWCAP_SM4
+ .bit_mask = HWCAP_SM4,
+#endif
+ },
+
+ {
+ /* Advanced SIMD Int8 Dot Product Instructions */
+ .name = "FEAT_DotProd",
+#ifdef HWCAP_ASIMDDP
+ .bit_mask = HWCAP_ASIMDDP,
+#endif
+ },
+
+ {
+ /* Advanced SIMD SHA512 Instructions */
+ .name = "FEAT_SHA512",
+#ifdef HWCAP_SHA512
+ .bit_mask = HWCAP_SHA512,
+#endif
+ },
+
+ {
+ /* Scalable Vector Extensions */
+ .name = "FEAT_SVE",
+#ifdef HWCAP_SVE
+ .bit_mask = HWCAP_SVE,
+#endif
+ },
+
+ {
+ /* Half-precision Floating-point FMLAL Instructions */
+ .name = "FEAT_FHM",
+#ifdef HWCAP_ASIMDFHM
+ .bit_mask = HWCAP_ASIMDFHM,
+#endif
+ },
+
+ {
+ /* Data Independent Timing Instructions */
+ .name = "FEAT_DIT",
+#ifdef HWCAP_DIT
+ .bit_mask = HWCAP_DIT,
+#endif
+ },
+
+ {
+ /* Large System Extensions Version 2 */
+ .name = "FEAT_LSE2",
+#ifdef HWCAP_USCAT
+ .bit_mask = HWCAP_USCAT,
+#endif
+ },
+
+ {
+ /* Load-acquire RCpc Instructions Version 2 */
+ .name = "FEAT_LRCPC2",
+#ifdef HWCAP_ILRCPC
+ .bit_mask = HWCAP_ILRCPC,
+#endif
+ },
+
+ {
+ /* Condition Flag Manipulation Extensions */
+ .name = "FEAT_FlagM",
+#ifdef HWCAP_FLAGM
+ .bit_mask = HWCAP_FLAGM,
+#endif
+ },
+
+ {
+ /* Speculative Store Bypass Safe Instructions */
+ .name = "FEAT_SSBS2",
+#ifdef HWCAP_SSBS
+ .bit_mask = HWCAP_SSBS,
+#endif
+ },
+
+ {
+ /* Speculation Barrier Instructions */
+ .name = "FEAT_SB",
+#ifdef HWCAP_SB
+ .bit_mask = HWCAP_SB,
+#endif
+ },
+
+ {
+ /* Pointer Authentication Extensions */
+ .name = "FEAT_PAuth",
+#ifdef HWCAP_PACA
+ .bit_mask = HWCAP_PACA,
+#endif
+ },
+
+ {
+ /* Generic Authentication Extensions */
+ .name = "PACG",
+#ifdef HWCAP_PACG
+ .bit_mask = HWCAP_PACG,
+#endif
+ }
+};
+
+static hwcap_feat_flag_t hwcap2_flags[] = {
+ {
+ /* DC CVADP instructions */
+ .name = "FEAT_DPB2",
+#ifdef HWCAP2_DCPODP
+ .bit_mask = HWCAP2_DCPODP,
+#endif
+ },
+
+ {
+ /* Scalable Vector Extensions Version 2 */
+ .name = "FEAT_SVE2",
+#ifdef HWCAP2_SVE2
+ .bit_mask = HWCAP2_SVE2,
+#endif
+ },
+
+ {
+ /* SVE AES Instructions */
+ .name = "FEAT_SVE_AES",
+#ifdef HWCAP2_SVEAES
+ .bit_mask = HWCAP2_SVEAES,
+#endif
+ },
+
+ {
+ /* SVE PMULL Instructions */
+ .name = "FEAT_SVE_PMULL128",
+#ifdef HWCAP2_SVEPMULL
+ .bit_mask = HWCAP2_SVEPMULL,
+#endif
+ },
+
+ {
+ /* SVE Bit Permute Instructions */
+ .name = "FEAT_SVE_BitPerm",
+#ifdef HWCAP2_SVEBITPERM
+ .bit_mask = HWCAP2_SVEBITPERM,
+#endif
+ },
+
+ {
+ /* SVE SHA-3 Instructions */
+ .name = "FEAT_SVE_SHA3",
+#ifdef HWCAP2_SVESHA3
+ .bit_mask = HWCAP2_SVESHA3,
+#endif
+ },
+
+ {
+ /* SVE SM4 Instructions */
+ .name = "FEAT_SVE_SM4",
+#ifdef HWCAP2_SVESM4
+ .bit_mask = HWCAP2_SVESM4,
+#endif
+ },
+
+ {
+ /* Condition Flag Manipulation Extensions Version 2 */
+ .name = "FEAT_FlagM2",
+#ifdef HWCAP2_FLAGM2
+ .bit_mask = HWCAP2_FLAGM2,
+#endif
+ },
+
+ {
+ /* FRINT32Z, FRINT32X, FRINT64Z, and FRINT64X instructions */
+ .name = "FEAT_FRINTTS",
+#ifdef HWCAP2_FRINT
+ .bit_mask = HWCAP2_FRINT,
+#endif
+ },
+
+ {
+ /* SVE Int8 Matrix Multiplication Instructions */
+ .name = "SVEI8MM",
+#ifdef HWCAP2_SVEI8MM
+ .bit_mask = HWCAP2_SVEI8MM,
+#endif
+ },
+
+ {
+ /* SVE Single-precision Floating-point Matrix Multiply Instructions */
+ .name = "FEAT_F32MM",
+#ifdef HWCAP2_SVEF32MM
+ .bit_mask = HWCAP2_SVEF32MM,
+#endif
+ },
+
+ {
+ /* SVE Double-precision Floating-point Matrix Multiply Instructions */
+ .name = "FEAT_F64MM",
+#ifdef HWCAP2_SVEF64MM
+ .bit_mask = HWCAP2_SVEF64MM,
+#endif
+ },
+
+ {
+ /* SVE BFloat16 Instructions */
+ .name = "SVEBF16",
+#ifdef HWCAP2_SVEBF16
+ .bit_mask = HWCAP2_SVEBF16,
+#endif
+ },
+
+ {
+ /* Advanced SIMD and Floating-point Int8 Matrix Multiplication Instructions */
+ .name = "FEAT_I8MM",
+#ifdef HWCAP2_I8MM
+ .bit_mask = HWCAP2_I8MM,
+#endif
+ },
+
+ {
+ /* Advanced SIMD and Floating-point BFloat16 Instructions */
+ .name = "FEAT_BF16",
+#ifdef HWCAP2_BF16
+ .bit_mask = HWCAP2_BF16,
+#endif
+ },
+
+ {
+ /* Data Gathering Hint Extensions */
+ .name = "FEAT_DGH",
+#ifdef HWCAP2_DGH
+ .bit_mask = HWCAP2_DGH,
+#endif
+ },
+
+ {
+ /* Random Number Generation Extensions */
+ .name = "FEAT_RNG",
+#ifdef HWCAP2_RNG
+ .bit_mask = HWCAP2_RNG,
+#endif
+ },
+
+ {
+ /* Branch Target Identification Extensions */
+ .name = "FEAT_BTI",
+#ifdef HWCAP2_BTI
+ .bit_mask = HWCAP2_BTI,
+#endif
+ },
+
+ {
+ /* Full Memory Tagging Extensions */
+ .name = "FEAT_MTE2",
+#ifdef HWCAP2_MTE
+ .bit_mask = HWCAP2_MTE,
+#endif
+ },
+
+ {
+ .name = "ECV",
+#ifdef HWCAP2_ECV
+ .bit_mask = HWCAP2_ECV,
+#endif
+ },
+
+ {
+ .name = "AFP",
+#ifdef HWCAP2_AFP
+ .bit_mask = HWCAP2_AFP,
+#endif
+ },
+
+ {
+ .name = "RPRES",
+#ifdef HWCAP2_RPRES
+ .bit_mask = HWCAP2_RPRES,
+#endif
+ },
+
+ {
+ .name = "MTE3",
+#ifdef HWCAP2_MTE3
+ .bit_mask = HWCAP2_MTE3,
+#endif
+ },
+
+ {
+ .name = "SME",
+#ifdef HWCAP2_SME
+ .bit_mask = HWCAP2_SME,
+#endif
+ },
+
+ {
+ .name = "SME_I16I64",
+#ifdef HWCAP2_SME_I16I64
+ .bit_mask = HWCAP2_SME_I16I64,
+#endif
+ },
+
+ {
+ .name = "SME_F64F64",
+#ifdef HWCAP2_SME_F64F64
+ .bit_mask = HWCAP2_SME_F64F64,
+#endif
+ },
+
+ {
+ .name = "SME_I8I32",
+#ifdef HWCAP2_SME_I8I32
+ .bit_mask = HWCAP2_SME_I8I32,
+#endif
+ },
+
+ {
+ .name = "SME_F16F32",
+#ifdef HWCAP2_SME_F16F32
+ .bit_mask = HWCAP2_SME_F16F32,
+#endif
+ },
+
+ {
+ .name = "SME_B16F32",
+#ifdef HWCAP2_SME_B16F32
+ .bit_mask = HWCAP2_SME_B16F32,
+#endif
+ },
+
+ {
+ .name = "SME_F32F32",
+#ifdef HWCAP2_SME_F32F32
+ .bit_mask = HWCAP2_SME_F32F32,
+#endif
+ },
+
+ {
+ .name = "SME_FA64",
+#ifdef HWCAP2_SME_FA64
+ .bit_mask = HWCAP2_SME_FA64,
+#endif
+ },
+
+ {
+ .name = "WFXT",
+#ifdef HWCAP2_WFXT
+ .bit_mask = HWCAP2_WFXT,
+#endif
+ },
+
+ {
+ .name = "EBF16",
+#ifdef HWCAP2_EBF16
+ .bit_mask = HWCAP2_EBF16,
+#endif
+ },
+
+ {
+ .name = "SVE_EBF16",
+#ifdef HWCAP2_SVE_EBF16
+ .bit_mask = HWCAP2_SVE_EBF16,
+#endif
+ },
+
+ {
+ .name = "CSSC",
+#ifdef HWCAP2_CSSC
+ .bit_mask = HWCAP2_CSSC,
+#endif
+ },
+
+ {
+ .name = "RPRFM",
+#ifdef HWCAP2_RPRFM
+ .bit_mask = HWCAP2_RPRFM,
+#endif
+ },
+
+ {
+ .name = "SVE2P1",
+#ifdef HWCAP2_SVE2P1
+ .bit_mask = HWCAP2_SVE2P1,
+#endif
+ },
+
+ {
+ .name = "SME2",
+#ifdef HWCAP2_SME2
+ .bit_mask = HWCAP2_SME2,
+#endif
+ },
+
+ {
+ .name = "SME2P1",
+#ifdef HWCAP2_SME2P1
+ .bit_mask = HWCAP2_SME2P1,
+#endif
+ },
+
+ {
+ .name = "SME_I16I32",
+#ifdef HWCAP2_SME_I16I32
+ .bit_mask = HWCAP2_SME_I16I32,
+#endif
+ },
+
+ {
+ .name = "SME_BI32I32",
+#ifdef HWCAP2_SME_BI32I32
+ .bit_mask = HWCAP2_SME_BI32I32,
+#endif
+ },
+
+ {
+ .name = "SME_B16B16",
+#ifdef HWCAP2_SME_B16B16
+ .bit_mask = HWCAP2_SME_B16B16,
+#endif
+ },
+
+ {
+ .name = "SME_F16F16",
+#ifdef HWCAP2_SME_F16F16
+ .bit_mask = HWCAP2_SME_F16F16,
+#endif
+ },
+
+ {
+ .name = "MOPS",
+#ifdef HWCAP2_MOPS
+ .bit_mask = HWCAP2_MOPS,
+#endif
+ },
+};
+
+static void _odp_sys_info_print_acle_flags(void)
+{
+ const char *ndef = "n/a";
+
+ /* Avoid compiler warning about unused variable */
+ (void)ndef;
+
+ /* See ARM C Language Extensions documentation for details */
+ _ODP_PRINT("ARM FEATURES:\n");
+
+ _ODP_PRINT(" __ARM_ALIGN_MAX_PWR ");
+#ifdef __ARM_ALIGN_MAX_PWR
+ _ODP_PRINT("%i\n", __ARM_ALIGN_MAX_PWR);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_ALIGN_MAX_STACK_PWR ");
+#ifdef __ARM_ALIGN_MAX_STACK_PWR
+ _ODP_PRINT("%i\n", __ARM_ALIGN_MAX_STACK_PWR);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_ARCH ");
+#ifdef __ARM_ARCH
+ _ODP_PRINT("%i\n", __ARM_ARCH);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_ARCH_ISA_A64 ");
+#ifdef __ARM_ARCH_ISA_A64
+ _ODP_PRINT("%i\n", __ARM_ARCH_ISA_A64);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_BIG_ENDIAN ");
+#ifdef __ARM_BIG_ENDIAN
+ _ODP_PRINT("%i\n", __ARM_BIG_ENDIAN);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_BF16_FORMAT_ALTERNATIVE ");
+#ifdef __ARM_BF16_FORMAT_ALTERNATIVE
+ _ODP_PRINT("%i\n", __ARM_BF16_FORMAT_ALTERNATIVE);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_ATOMICS ");
+#ifdef __ARM_FEATURE_ATOMICS
+ _ODP_PRINT("%i\n", __ARM_FEATURE_ATOMICS);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_BF16 ");
+#ifdef __ARM_FEATURE_BF16
+ _ODP_PRINT("%i\n", __ARM_FEATURE_BF16);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_BTI_DEFAULT ");
+#ifdef __ARM_FEATURE_BTI_DEFAULT
+ _ODP_PRINT("%i\n", __ARM_FEATURE_BTI_DEFAULT);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_CDE ");
+#ifdef __ARM_FEATURE_CDE
+ _ODP_PRINT("%i\n", __ARM_FEATURE_CDE);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_CDE_COPROC ");
+#ifdef __ARM_FEATURE_CDE_COPROC
+ _ODP_PRINT("0x%X\n", __ARM_FEATURE_CDE_COPROC);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_CLZ ");
+#ifdef __ARM_FEATURE_CLZ
+ _ODP_PRINT("%i\n", __ARM_FEATURE_CLZ);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_COMPLEX ");
+#ifdef __ARM_FEATURE_COMPLEX
+ _ODP_PRINT("%i\n", __ARM_FEATURE_COMPLEX);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_COPROC ");
+#ifdef __ARM_FEATURE_COPROC
+ _ODP_PRINT("0x%X\n", __ARM_FEATURE_COPROC);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_CRC32 ");
+#ifdef __ARM_FEATURE_CRC32
+ _ODP_PRINT("%i\n", __ARM_FEATURE_CRC32);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_CRYPTO ");
+#ifdef __ARM_FEATURE_CRYPTO
+ _ODP_PRINT("%i\n", __ARM_FEATURE_CRYPTO);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_DIRECTED_ROUNDING ");
+#ifdef __ARM_FEATURE_DIRECTED_ROUNDING
+ _ODP_PRINT("%i\n", __ARM_FEATURE_DIRECTED_ROUNDING);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_DOTPROD ");
+#ifdef __ARM_FEATURE_DOTPROD
+ _ODP_PRINT("%i\n", __ARM_FEATURE_DOTPROD);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_FMA ");
+#ifdef __ARM_FEATURE_FMA
+ _ODP_PRINT("%i\n", __ARM_FEATURE_FMA);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_FP16_FML ");
+#ifdef __ARM_FEATURE_FP16_FML
+ _ODP_PRINT("%i\n", __ARM_FEATURE_FP16_FML);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_FRINT ");
+#ifdef __ARM_FEATURE_FRINT
+ _ODP_PRINT("%i\n", __ARM_FEATURE_FRINT);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_IDIV ");
+#ifdef __ARM_FEATURE_IDIV
+ _ODP_PRINT("%i\n", __ARM_FEATURE_IDIV);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_JCVT ");
+#ifdef __ARM_FEATURE_JCVT
+ _ODP_PRINT("%i\n", __ARM_FEATURE_JCVT);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_MATMUL_INT8 ");
+#ifdef __ARM_FEATURE_MATMUL_INT8
+ _ODP_PRINT("%i\n", __ARM_FEATURE_MATMUL_INT8);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_MEMORY_TAGGING ");
+#ifdef __ARM_FEATURE_MEMORY_TAGGING
+ _ODP_PRINT("%i\n", __ARM_FEATURE_MEMORY_TAGGING);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_MVE ");
+#ifdef __ARM_FEATURE_MVE
+ _ODP_PRINT("0x%X\n", __ARM_FEATURE_MVE);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_NUMERIC_MAXMIN ");
+#ifdef __ARM_FEATURE_NUMERIC_MAXMIN
+ _ODP_PRINT("%i\n", __ARM_FEATURE_NUMERIC_MAXMIN);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_PAC_DEFAULT ");
+#ifdef __ARM_FEATURE_PAC_DEFAULT
+ _ODP_PRINT("0x%X\n", __ARM_FEATURE_PAC_DEFAULT);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_QRDMX ");
+#ifdef __ARM_FEATURE_QRDMX
+ _ODP_PRINT("%i\n", __ARM_FEATURE_QRDMX);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_RNG ");
+#ifdef __ARM_FEATURE_RNG
+ _ODP_PRINT("%i\n", __ARM_FEATURE_RNG);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_SHA3 ");
+#ifdef __ARM_FEATURE_SHA3
+ _ODP_PRINT("%i\n", __ARM_FEATURE_SHA3);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_SHA512 ");
+#ifdef __ARM_FEATURE_SHA512
+ _ODP_PRINT("%i\n", __ARM_FEATURE_SHA512);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_SM3 ");
+#ifdef __ARM_FEATURE_SM3
+ _ODP_PRINT("%i\n", __ARM_FEATURE_SM3);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_SM4 ");
+#ifdef __ARM_FEATURE_SM4
+ _ODP_PRINT("%i\n", __ARM_FEATURE_SM4);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_TME ");
+#ifdef __ARM_FEATURE_TME
+ _ODP_PRINT("%i\n", __ARM_FEATURE_TME);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FEATURE_UNALIGNED ");
+#ifdef __ARM_FEATURE_UNALIGNED
+ _ODP_PRINT("%i\n", __ARM_FEATURE_UNALIGNED);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP ");
+#ifdef __ARM_FP
+ _ODP_PRINT("0x%X\n", __ARM_FP);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP_FAST ");
+#ifdef __ARM_FP_FAST
+ _ODP_PRINT("%i\n", __ARM_FP_FAST);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP_FENV_ROUNDING ");
+#ifdef __ARM_FP_FENV_ROUNDING
+ _ODP_PRINT("%i\n", __ARM_FP_FENV_ROUNDING);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP16_ARGS ");
+#ifdef __ARM_FP16_ARGS
+ _ODP_PRINT("%i\n", __ARM_FP16_ARGS);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP16_FORMAT_ALTERNATIVE ");
+#ifdef __ARM_FP16_FORMAT_ALTERNATIVE
+ _ODP_PRINT("%i\n", __ARM_FP16_FORMAT_ALTERNATIVE);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_FP16_FORMAT_IEEE ");
+#ifdef __ARM_FP16_FORMAT_IEEE
+ _ODP_PRINT("%i\n", __ARM_FP16_FORMAT_IEEE);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_NEON ");
+#ifdef __ARM_NEON
+ _ODP_PRINT("%i\n", __ARM_NEON);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_NEON_FP ");
+#ifdef __ARM_NEON_FP
+ _ODP_PRINT("0x%X\n", __ARM_NEON_FP);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_PCS_AAPCS64 ");
+#ifdef __ARM_PCS_AAPCS64
+ _ODP_PRINT("%i\n", __ARM_PCS_AAPCS64);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_ROPI ");
+#ifdef __ARM_ROPI
+ _ODP_PRINT("%i\n", __ARM_ROPI);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_RWPI ");
+#ifdef __ARM_RWPI
+ _ODP_PRINT("%i\n", __ARM_RWPI);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_SIZEOF_MINIMAL_ENUM ");
+#ifdef __ARM_SIZEOF_MINIMAL_ENUM
+ _ODP_PRINT("%i\n", __ARM_SIZEOF_MINIMAL_ENUM);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" __ARM_SIZEOF_WCHAR_T ");
+#ifdef __ARM_SIZEOF_WCHAR_T
+ _ODP_PRINT("%i\n", __ARM_SIZEOF_WCHAR_T);
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT(" ARM ISA version: ");
+#if defined(__ARM_ARCH)
+ if (__ARM_ARCH < 8) {
+ _ODP_PRINT("v%i\n", __ARM_ARCH);
+ } else if (__ARM_ARCH == 8) {
+ /* Actually, this checks for new NEON instructions in
+ * v8.1, but is currently the only way to distinguish
+ * v8.0 and >=v8.1. */
+ #ifdef __ARM_FEATURE_QRDMX
+ _ODP_PRINT("v8.1 or higher\n");
+ #else
+ _ODP_PRINT("v8.0\n");
+ #endif
+ } else {
+ /* ACLE 2018 defines that from v8.1 onwards the value includes
+ * the minor version number: __ARM_ARCH = X * 100 + Y
+ * E.g. for Armv8.1 __ARM_ARCH = 801 */
+ int major = __ARM_ARCH / 100;
+ int minor = __ARM_ARCH - (major * 100);
+
+ _ODP_PRINT("v%i.%i\n", major, minor);
+ }
+#else
+ _ODP_PRINT("%s\n", ndef);
+#endif
+
+ _ODP_PRINT("\n");
+}
+
+static void _odp_sys_info_print_hwcap_flags(void)
+{
+ uint64_t hwcap, hwcap2;
+ uint32_t size, size2, i;
+
+ hwcap = getauxval(AT_HWCAP);
+ hwcap2 = getauxval(AT_HWCAP2);
+ size = _ODP_ARRAY_SIZE(hwcap_flags);
+ size2 = _ODP_ARRAY_SIZE(hwcap2_flags);
+
+ _ODP_PRINT("ARM FEATURES SUPPORTED BY HARDWARE:\n");
+
+ /* Supported HWCAP flags */
+ for (i = 0; i < size; i++)
+ if (hwcap & hwcap_flags[i].bit_mask)
+ _ODP_PRINT("%s ", hwcap_flags[i].name);
+
+ /* Supported HWCAP2 flags */
+ for (i = 0; i < size2; i++)
+ if (hwcap2 & hwcap2_flags[i].bit_mask)
+ _ODP_PRINT("%s ", hwcap2_flags[i].name);
+
+ _ODP_PRINT("\n\nARM FEATURES NOT SUPPORTED BY HARDWARE:\n");
+
+ /* Unsupported HWCAP flags */
+ for (i = 0; i < size; i++)
+ if (hwcap_flags[i].bit_mask && (hwcap & hwcap_flags[i].bit_mask) == 0)
+ _ODP_PRINT("%s ", hwcap_flags[i].name);
+
+ /* Unsupported HWCAP2 flags */
+ for (i = 0; i < size2; i++)
+ if (hwcap2_flags[i].bit_mask && (hwcap2 & hwcap2_flags[i].bit_mask) == 0)
+ _ODP_PRINT("%s ", hwcap2_flags[i].name);
+
+ _ODP_PRINT("\n\nARM FEATURES UNKNOWN TO LINUX VERSION:\n");
+ /* Unknown HWCAP flags */
+ for (i = 0; i < size; i++)
+ if (hwcap_flags[i].bit_mask == 0)
+ _ODP_PRINT("%s ", hwcap_flags[i].name);
+
+ /* Unknown HWCAP2 flags */
+ for (i = 0; i < size2; i++)
+ if (hwcap2_flags[i].bit_mask == 0)
+ _ODP_PRINT("%s ", hwcap2_flags[i].name);
+
+ _ODP_PRINT("\n\n");
+}
+
+void _odp_cpu_flags_print_all(void)
+{
+ _odp_sys_info_print_acle_flags();
+ _odp_sys_info_print_hwcap_flags();
+}
diff --git a/platform/linux-generic/arch/aarch64/cpu_flags.h b/platform/linux-generic/arch/aarch64/cpu_flags.h
new file mode 100644
index 000000000..177b1c44f
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/cpu_flags.h
@@ -0,0 +1,20 @@
+/* Copyright (c) 2021, ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_FLAGS_H_
+#define ODP_PLAT_CPU_FLAGS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void _odp_cpu_flags_print_all(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/atomic.h b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic.h
new file mode 100644
index 000000000..14cca3ca0
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic.h
@@ -0,0 +1,12 @@
+/* Copyright (c) 2021, ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifdef __ARM_FEATURE_ATOMICS
+#define _ODP_LOCK_FREE_128BIT_ATOMICS
+#endif
+
+#include <odp/api/abi-default/atomic.h>
+#include <odp/api/plat/atomic_inlines.h>
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h
new file mode 100644
index 000000000..3b0f94efe
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h
@@ -0,0 +1,278 @@
+/* Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_ATOMIC_INLINES_H_
+#define ODP_API_ABI_ATOMIC_INLINES_H_
+
+#include <odp/api/atomic.h>
+
+#ifdef _ODP_LOCK_FREE_128BIT_ATOMICS
+
+/**
+ * @internal
+ * Helper macro for lockless atomic CAS operations on 128-bit integers
+ * @param[in,out] atom Pointer to the 128-bit atomic variable
+ * @param oper CAS operation
+ * @param old_val Old value
+ * @param new_val New value to be swapped
+ * @return 1 for success and 0 for fail
+ */
+#define ATOMIC_CAS_OP_128(atom, oper, old_val, new_val, val) \
+__extension__ ({ \
+ odp_u128_t _val; \
+ odp_atomic_u128_t *_atom = atom; \
+ odp_u128_t *_old_val = old_val; \
+ odp_u128_t _new_val = new_val; \
+ odp_u128_t *ptr = (odp_u128_t *)(_atom); \
+ register uint64_t old0 __asm__ ("x0"); \
+ register uint64_t old1 __asm__ ("x1"); \
+ register uint64_t new0 __asm__ ("x2"); \
+ register uint64_t new1 __asm__ ("x3"); \
+ old0 = (uint64_t)(_old_val)->u64[0]; \
+ old1 = (uint64_t)(_old_val)->u64[1]; \
+ new0 = (uint64_t)(_new_val).u64[0]; \
+ new1 = (uint64_t)(_new_val).u64[1]; \
+ __asm__ volatile(oper " %[old0], %[old1], %[new0], %[new1], [%[ptr]]" \
+ : [old0] "+r" (old0), [old1] "+r" (old1) \
+ : [new0] "r" (new0), [new1] "r" (new1), \
+ [ptr] "r" (ptr) \
+ : "memory"); \
+ _val.u64[0] = old0; \
+ _val.u64[1] = old1; \
+ val = _val; \
+})
+
+#define ATOMIC_CAS_OP_128_NO_ORDER(atom, old_value, new_value, val) \
+ ATOMIC_CAS_OP_128(atom, "casp", old_value, new_value, val)
+
+#define ATOMIC_CAS_OP_128_ACQ(atom, old_value, new_value, val) \
+ ATOMIC_CAS_OP_128(atom, "caspa", old_value, new_value, val)
+
+#define ATOMIC_CAS_OP_128_REL(atom, old_value, new_value, val) \
+ ATOMIC_CAS_OP_128(atom, "caspl", old_value, new_value, val)
+
+#define ATOMIC_CAS_OP_128_ACQ_REL(atom, old_value, new_value, val) \
+ ATOMIC_CAS_OP_128(atom, "caspal", old_value, new_value, val)
+
+static inline void _odp_atomic_init_u128(odp_atomic_u128_t *atom, odp_u128_t new_val)
+{
+ atom->v = new_val;
+}
+
+static inline odp_u128_t _odp_atomic_load_u128(odp_atomic_u128_t *atom)
+{
+ odp_u128_t val, exp;
+
+ exp.u64[0] = 0;
+ exp.u64[1] = 0;
+ ATOMIC_CAS_OP_128_NO_ORDER(atom, &exp, exp, val);
+ return val;
+}
+
+static inline void _odp_atomic_store_u128(odp_atomic_u128_t *atom, odp_u128_t new_val)
+{
+ odp_u128_t old, val;
+
+ old = atom->v;
+
+ while (1) {
+ ATOMIC_CAS_OP_128_NO_ORDER(atom, &old, new_val, val);
+
+ if ((val.u64[0] == old.u64[0]) && (val.u64[1] == old.u64[1]))
+ return;
+
+ old = val;
+ }
+}
+
+static inline int _odp_atomic_cas_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ odp_u128_t val;
+
+ ATOMIC_CAS_OP_128_NO_ORDER(atom, old_val, new_val, val);
+
+ if ((val.u64[0] == old_val->u64[0]) && (val.u64[1] == old_val->u64[1]))
+ return 1;
+
+ old_val->u64[0] = val.u64[0];
+ old_val->u64[1] = val.u64[1];
+
+ return 0;
+}
+
+static inline int _odp_atomic_cas_acq_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ odp_u128_t val;
+
+ ATOMIC_CAS_OP_128_ACQ(atom, old_val, new_val, val);
+
+ if ((val.u64[0] == old_val->u64[0]) && (val.u64[1] == old_val->u64[1]))
+ return 1;
+
+ old_val->u64[0] = val.u64[0];
+ old_val->u64[1] = val.u64[1];
+
+ return 0;
+}
+
+static inline int _odp_atomic_cas_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ odp_u128_t val;
+
+ ATOMIC_CAS_OP_128_REL(atom, old_val, new_val, val);
+
+ if ((val.u64[0] == old_val->u64[0]) && (val.u64[1] == old_val->u64[1]))
+ return 1;
+
+ old_val->u64[0] = val.u64[0];
+ old_val->u64[1] = val.u64[1];
+
+ return 0;
+}
+
+static inline int _odp_atomic_cas_acq_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ odp_u128_t val;
+
+ ATOMIC_CAS_OP_128_ACQ_REL(atom, old_val, new_val, val);
+
+ if ((val.u64[0] == old_val->u64[0]) && (val.u64[1] == old_val->u64[1]))
+ return 1;
+
+ old_val->u64[0] = val.u64[0];
+ old_val->u64[1] = val.u64[1];
+
+ return 0;
+}
+
+static inline void _odp_atomic_add_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ __asm__ volatile("stadd %w[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_sub_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ int32_t neg_val = (int32_t)-val;
+
+ __asm__ volatile("stadd %w[neg_val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [neg_val] "r" (neg_val));
+}
+
+static inline void _odp_atomic_inc_u32(odp_atomic_u32_t *atom)
+{
+ _odp_atomic_add_u32(atom, 1);
+}
+
+static inline void _odp_atomic_dec_u32(odp_atomic_u32_t *atom)
+{
+ _odp_atomic_sub_u32(atom, 1);
+}
+
+static inline void _odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ __asm__ volatile("stadd %[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ int64_t neg_val = (int64_t)-val;
+
+ __asm__ volatile("stadd %[neg_val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [neg_val] "r" (neg_val));
+}
+
+static inline void _odp_atomic_inc_u64(odp_atomic_u64_t *atom)
+{
+ _odp_atomic_add_u64(atom, 1);
+}
+
+static inline void _odp_atomic_dec_u64(odp_atomic_u64_t *atom)
+{
+ _odp_atomic_sub_u64(atom, 1);
+}
+
+static inline void _odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ __asm__ volatile("stumax %w[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ __asm__ volatile("stumin %w[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ __asm__ volatile("stumax %[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ __asm__ volatile("stumin %[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val));
+}
+
+static inline void _odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ __asm__ volatile("staddl %w[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val)
+ : "memory");
+}
+
+static inline void _odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ int32_t neg_val = (int32_t)-val;
+
+ __asm__ volatile("staddl %w[neg_val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [neg_val] "r" (neg_val)
+ : "memory");
+}
+
+static inline void _odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ __asm__ volatile("staddl %[val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [val] "r" (val)
+ : "memory");
+}
+
+static inline void _odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ int64_t neg_val = (int64_t)-val;
+
+ __asm__ volatile("staddl %[neg_val], %[atom]"
+ : [atom] "+Q" (atom->v)
+ : [neg_val] "r" (neg_val)
+ : "memory");
+}
+
+#else /* !_ODP_LOCK_FREE_128BIT_ATOMICS */
+
+/* Use generic implementation */
+#include <odp/api/abi/atomic_generic.h>
+
+#endif
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/cpu.h b/platform/linux-generic/arch/aarch64/odp/api/abi/cpu.h
new file mode 100644
index 000000000..825ff19d4
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/cpu.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/autoheader_external.h>
+
+#ifndef ODP_CACHE_LINE_SIZE
+ #define ODP_CACHE_LINE_SIZE _ODP_CACHE_LINE_SIZE
+#endif
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/cpu_inlines.h b/platform/linux-generic/arch/aarch64/odp/api/abi/cpu_inlines.h
new file mode 100644
index 000000000..a26908e66
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/cpu_inlines.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_CPU_INLINES_H_
+#define ODP_ARCH_CPU_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/time_cpu.h>
+
+#include <stdint.h>
+
+/* CPU frequency is shifted to decrease integer division error */
+#define _ODP_CPU_FREQ_SHIFT 16
+
+typedef struct _odp_cpu_cycles_global_t {
+ uint64_t res;
+ uint64_t res_shifted;
+ uint64_t max;
+
+} _odp_cpu_cycles_global_t;
+
+extern _odp_cpu_cycles_global_t _odp_cpu_cycles_glob;
+
+static inline void _odp_cpu_pause(void)
+{
+ /* YIELD hints the CPU to switch to another thread if possible
+ * and executes as a NOP otherwise.
+ * ISB flushes the pipeline, then restarts. This is guaranteed to
+ * stall the CPU a number of cycles.
+ */
+ __asm volatile("isb" ::: "memory");
+}
+
+static inline uint64_t _odp_cpu_cycles(void)
+{
+ return (_odp_time_cpu_global() * _odp_cpu_cycles_glob.res_shifted) >> _ODP_CPU_FREQ_SHIFT;
+}
+
+static inline uint64_t _odp_cpu_cycles_resolution(void)
+{
+ return _odp_cpu_cycles_glob.res;
+}
+
+static inline uint64_t _odp_cpu_cycles_max(void)
+{
+ return _odp_cpu_cycles_glob.max;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/hash_crc32.h b/platform/linux-generic/arch/aarch64/odp/api/abi/hash_crc32.h
new file mode 100644
index 000000000..fd7bf91c6
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/hash_crc32.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2021 ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_HASH_CRC32_H_
+#define ODP_API_ABI_HASH_CRC32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+uint32_t _odp_hash_crc32_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+uint32_t _odp_hash_crc32c_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+
+#ifdef __ARM_FEATURE_CRC32
+
+#include <arm_acle.h>
+
+static inline uint32_t _odp_hash_crc32(const void *data_ptr, uint32_t data_len,
+ uint32_t init_val)
+{
+ uint32_t i;
+ uintptr_t pd = (uintptr_t)data_ptr;
+
+ for (i = 0; i < data_len / 8; i++) {
+ init_val = __crc32d(init_val, *(const uint64_t *)pd);
+ pd += 8;
+ }
+
+ if (data_len & 0x4) {
+ init_val = __crc32w(init_val, *(const uint32_t *)pd);
+ pd += 4;
+ }
+
+ if (data_len & 0x2) {
+ init_val = __crc32h(init_val, *(const uint16_t *)pd);
+ pd += 2;
+ }
+
+ if (data_len & 0x1)
+ init_val = __crc32b(init_val, *(const uint8_t *)pd);
+
+ return init_val;
+}
+
+static inline uint32_t _odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ uint32_t i;
+ uintptr_t pd = (uintptr_t)data;
+
+ for (i = 0; i < data_len / 8; i++) {
+ init_val = __crc32cd(init_val, *(const uint64_t *)pd);
+ pd += 8;
+ }
+
+ if (data_len & 0x4) {
+ init_val = __crc32cw(init_val, *(const uint32_t *)pd);
+ pd += 4;
+ }
+
+ if (data_len & 0x2) {
+ init_val = __crc32ch(init_val, *(const uint16_t *)pd);
+ pd += 2;
+ }
+
+ if (data_len & 0x1)
+ init_val = __crc32cb(init_val, *(const uint8_t *)pd);
+
+ return init_val;
+}
+
+#else /* __ARM_FEATURE_CRC32 */
+
+/*
+ * Fall back to software implementation
+ */
+
+static inline uint32_t _odp_hash_crc32(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32_generic(data, data_len, init_val);
+}
+
+static inline uint32_t _odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32c_generic(data, data_len, init_val);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/sync_inlines.h b/platform/linux-generic/arch/aarch64/odp/api/abi/sync_inlines.h
new file mode 100644
index 000000000..3d42e7dd8
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/sync_inlines.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ARCH_SYNC_INLINES_H_
+#define ODP_ARCH_SYNC_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void _odp_mb_sync(void)
+{
+ __asm__ volatile("dsb sy" ::: "memory");
+}
+
+static inline void _odp_mb_sync_load(void)
+{
+ __asm__ volatile("dsb ld" ::: "memory");
+}
+
+static inline void _odp_mb_sync_store(void)
+{
+ __asm__ volatile("dsb st" ::: "memory");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/time_cpu.h b/platform/linux-generic/arch/aarch64/odp/api/abi/time_cpu.h
new file mode 100644
index 000000000..aba2799c7
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/time_cpu.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_TIME_CPU_H_
+#define ODP_API_ABI_TIME_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+static inline uint64_t _odp_time_cpu_global(void)
+{
+ uint64_t cntvct;
+
+ __asm__ volatile("mrs %0, cntvct_el0" : "=r"(cntvct) : : "memory");
+
+ return cntvct;
+}
+
+static inline uint64_t _odp_time_cpu_global_strict(void)
+{
+ uint64_t cntvct;
+
+ __asm__ volatile("isb" ::: "memory");
+ __asm__ volatile("mrs %0, cntvct_el0" : "=r"(cntvct) : : "memory");
+
+ return cntvct;
+}
+
+static inline uint64_t _odp_time_cpu_global_freq(void)
+{
+ uint64_t cntfrq;
+
+ __asm__ volatile("mrs %0, cntfrq_el0" : "=r"(cntfrq) : : );
+
+ return cntfrq;
+}
+
+static inline int _odp_time_cpu_global_freq_is_const(void)
+{
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/time_inlines.h b/platform/linux-generic/arch/aarch64/odp/api/abi/time_inlines.h
new file mode 100644
index 000000000..331d1996f
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/time_inlines.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi/time_cpu_inlines.h>
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h b/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h
new file mode 100644
index 000000000..73a3d476a
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#ifndef ODP_API_ABI_WAIT_UNTIL_H_
+#define ODP_API_ABI_WAIT_UNTIL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/autoheader_external.h>
+
+#ifdef _ODP_WFE_LOCKS
+
+#include <stdint.h>
+
+#include <odp/api/atomic.h>
+
+static inline void
+_odp_wait_until_equal_acq_u32(odp_atomic_u32_t *addr, uint32_t expected)
+{
+ uint32_t value;
+ uint32_t *var = &addr->v;
+
+ __asm__ volatile("sevl" : : : "memory");
+ do {
+ __asm__ volatile("wfe" : : : "memory");
+ __asm__ volatile("ldaxr %w0, [%1]"
+ : "=&r" (value)
+ : "r" (var)
+ : "memory");
+ } while (expected != value);
+}
+
+#else /* !_ODP_WFE_LOCKS*/
+
+/* Use generic implementation */
+#include <odp/api/abi/wait_until_generic.h>
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp_atomic.c b/platform/linux-generic/arch/aarch64/odp_atomic.c
new file mode 100644
index 000000000..c6b809768
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_atomic.c
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/atomic.h>
+
+int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ /* All operations have locks */
+ if (atomic_op)
+ atomic_op->all_bits = 0;
+
+ return 0;
+#else
+ /* All operations are lock-free */
+ if (atomic_op) {
+ atomic_op->all_bits = ~((uint32_t)0);
+ atomic_op->op.init = 0;
+ }
+
+ return 2;
+#endif
+}
+
+int odp_atomic_lock_free_u128(odp_atomic_op_t *atomic_op)
+{
+#ifdef _ODP_LOCK_FREE_128BIT_ATOMICS
+ if (atomic_op) {
+ atomic_op->all_bits = 0;
+ atomic_op->op.load = 1;
+ atomic_op->op.store = 1;
+ atomic_op->op.cas = 1;
+ }
+
+ return 2;
+#elif defined(__SIZEOF_INT128__)
+ if (__atomic_is_lock_free(16, NULL)) {
+ if (atomic_op) {
+ atomic_op->all_bits = 0;
+ atomic_op->op.load = 1;
+ atomic_op->op.store = 1;
+ atomic_op->op.cas = 1;
+ }
+ return 2;
+ }
+#endif
+ /* All operations have locks */
+ if (atomic_op)
+ atomic_op->all_bits = 0;
+
+ return 0;
+}
diff --git a/platform/linux-generic/arch/aarch64/odp_atomic.h b/platform/linux-generic/arch/aarch64/odp_atomic.h
new file mode 100644
index 000000000..d3b8ea4dc
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_atomic.h
@@ -0,0 +1,325 @@
+/* Copyright (c) 2017-2021, ARM Limited
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#error This file should not be included directly, please include odp_cpu.h
+#endif
+
+#include <odp_types_internal.h>
+#include <limits.h>
+
+#ifdef CONFIG_DMBSTR
+
+#define atomic_store_release(loc, val, ro) \
+do { \
+ _odp_release_barrier(ro); \
+ __atomic_store_n(loc, val, __ATOMIC_RELAXED); \
+} while (0)
+
+#else
+
+#define atomic_store_release(loc, val, ro) \
+ __atomic_store_n(loc, val, __ATOMIC_RELEASE)
+
+#endif /* CONFIG_DMBSTR */
+
+#define HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)
+#define HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \
+ (mo) == __ATOMIC_SEQ_CST)
+
+#define LL_MO(mo) (HAS_ACQ((mo)) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED)
+#define SC_MO(mo) (HAS_RLS((mo)) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED)
+
+#ifndef __ARM_FEATURE_QRDMX /* Feature only available in v8.1a and beyond */
+static inline bool
+__lockfree_compare_exchange_16(register _odp_u128_t *var, _odp_u128_t *exp,
+ register _odp_u128_t neu, bool weak, int mo_success,
+ int mo_failure)
+{
+ (void)weak; /* Always do strong CAS or we can't perform atomic read */
+ /* Ignore memory ordering for failure, memory order for
+ * success must be stronger or equal. */
+ (void)mo_failure;
+ register _odp_u128_t old;
+ register _odp_u128_t expected;
+ int ll_mo = LL_MO(mo_success);
+ int sc_mo = SC_MO(mo_success);
+
+ expected = *exp;
+ __asm__ volatile("" ::: "memory");
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must write back neu or old to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old == expected ? neu : old, sc_mo)));
+ *exp = old; /* Always update, atomically read value */
+ return old == expected;
+}
+
+static inline _odp_u128_t __lockfree_exchange_16(_odp_u128_t *var,
+ _odp_u128_t neu, int mo)
+{
+ register _odp_u128_t old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, neu, sc_mo)));
+ return old;
+}
+
+static inline _odp_u128_t __lockfree_fetch_and_16(_odp_u128_t *var,
+ _odp_u128_t mask, int mo)
+{
+ register _odp_u128_t old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old & mask, sc_mo)));
+ return old;
+}
+
+static inline _odp_u128_t __lockfree_fetch_or_16(_odp_u128_t *var,
+ _odp_u128_t mask, int mo)
+{
+ register _odp_u128_t old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old | mask, sc_mo)));
+ return old;
+}
+
+#else
+
+static inline _odp_u128_t cas_u128(_odp_u128_t *ptr, _odp_u128_t old_val,
+ _odp_u128_t new_val, int mo)
+{
+ /* CASP instructions require that the first register number is paired */
+ register uint64_t old0 __asm__ ("x0");
+ register uint64_t old1 __asm__ ("x1");
+ register uint64_t new0 __asm__ ("x2");
+ register uint64_t new1 __asm__ ("x3");
+
+ old0 = (uint64_t)old_val;
+ old1 = (uint64_t)(old_val >> 64);
+ new0 = (uint64_t)new_val;
+ new1 = (uint64_t)(new_val >> 64);
+
+ if (mo == __ATOMIC_RELAXED) {
+ __asm__ volatile("casp %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
+ : "memory");
+ } else if (mo == __ATOMIC_ACQUIRE) {
+ __asm__ volatile("caspa %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
+ : "memory");
+ } else if (mo == __ATOMIC_ACQ_REL) {
+ __asm__ volatile("caspal %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
+ : "memory");
+ } else if (mo == __ATOMIC_RELEASE) {
+ __asm__ volatile("caspl %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
+ : "memory");
+ } else {
+ abort();
+ }
+
+ return ((_odp_u128_t)old0) | (((_odp_u128_t)old1) << 64);
+}
+
+static inline bool
+__lockfree_compare_exchange_16(register _odp_u128_t *var, _odp_u128_t *exp,
+ register _odp_u128_t neu, bool weak, int mo_success,
+ int mo_failure)
+{
+ (void)weak;
+ (void)mo_failure;
+ _odp_u128_t old;
+ _odp_u128_t expected;
+
+ expected = *exp;
+ old = cas_u128(var, expected, neu, mo_success);
+ *exp = old; /* Always update, atomically read value */
+ return old == expected;
+}
+
+static inline _odp_u128_t __lockfree_exchange_16(_odp_u128_t *var,
+ _odp_u128_t neu, int mo)
+{
+ _odp_u128_t old;
+ _odp_u128_t expected;
+
+ do {
+ expected = *var;
+ old = cas_u128(var, expected, neu, mo);
+ } while (old != expected);
+ return old;
+}
+
+static inline _odp_u128_t __lockfree_fetch_and_16(_odp_u128_t *var,
+ _odp_u128_t mask, int mo)
+{
+ _odp_u128_t old;
+ _odp_u128_t expected;
+
+ do {
+ expected = *var;
+ old = cas_u128(var, expected, expected & mask, mo);
+ } while (old != expected);
+ return old;
+}
+
+static inline _odp_u128_t __lockfree_fetch_or_16(_odp_u128_t *var,
+ _odp_u128_t mask, int mo)
+{
+ _odp_u128_t old;
+ _odp_u128_t expected;
+
+ do {
+ expected = *var;
+ old = cas_u128(var, expected, expected | mask, mo);
+ } while (old != expected);
+ return old;
+}
+
+#endif /* __ARM_FEATURE_QRDMX */
+
+static inline _odp_u128_t __lockfree_load_16(_odp_u128_t *var, int mo)
+{
+ _odp_u128_t old = *var; /* Possibly torn read */
+
+ /* Do CAS to ensure atomicity
+ * Either CAS succeeds (writing back the same value)
+ * Or CAS fails and returns the old value (atomic read)
+ */
+ (void)__lockfree_compare_exchange_16(var, &old, old, false, mo, mo);
+ return old;
+}
+
+static inline _odp_u128_t lockfree_load_u128(_odp_u128_t *atomic)
+{
+ return __lockfree_load_16((_odp_u128_t *)atomic, __ATOMIC_RELAXED);
+}
+
+static inline int lockfree_cas_acq_rel_u128(_odp_u128_t *atomic,
+ _odp_u128_t old_val,
+ _odp_u128_t new_val)
+{
+ return __lockfree_compare_exchange_16((_odp_u128_t *)atomic,
+ (_odp_u128_t *)&old_val,
+ new_val,
+ 0,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+}
+
+static inline int lockfree_check_u128(void)
+{
+ return 1;
+}
+
+/** Atomic bit set operations with memory ordering */
+#if defined(__SIZEOF_INT128__) && __SIZEOF_INT128__ == 16
+typedef _odp_u128_t bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_INT128__)
+
+#elif __GCC_ATOMIC_LLONG_LOCK_FREE == 2 && \
+ __SIZEOF_LONG_LONG__ != __SIZEOF_LONG__
+typedef unsigned long long bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_LONG_LONG__)
+
+#elif __GCC_ATOMIC_LONG_LOCK_FREE == 2 && __SIZEOF_LONG__ != __SIZEOF_INT__
+typedef unsigned long bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_LONG__)
+
+#elif __GCC_ATOMIC_INT_LOCK_FREE == 2
+typedef unsigned int bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_INT__)
+
+#else
+/* Target does not support lock-free atomic operations */
+typedef unsigned int bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_INT__)
+#endif
+
+#if ATOM_BITSET_SIZE <= 32
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ return 1UL << bit;
+}
+
+#elif ATOM_BITSET_SIZE <= 64
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ return 1ULL << bit;
+}
+
+#elif ATOM_BITSET_SIZE <= 128
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ if (bit < 64)
+ return 1ULL << bit;
+ else
+ return (_odp_u128_t)(1ULL << (bit - 64)) << 64;
+}
+
+#else
+#error Unsupported size of bit sets (ATOM_BITSET_SIZE)
+#endif
+
+static inline bitset_t atom_bitset_load(bitset_t *bs, int mo)
+{
+ return __lockfree_load_16(bs, mo);
+}
+
+static inline void atom_bitset_set(bitset_t *bs, uint32_t bit, int mo)
+{
+ (void)__lockfree_fetch_or_16(bs, bitset_mask(bit), mo);
+}
+
+static inline void atom_bitset_clr(bitset_t *bs, uint32_t bit, int mo)
+{
+ (void)__lockfree_fetch_and_16(bs, ~bitset_mask(bit), mo);
+}
+
+static inline bitset_t atom_bitset_xchg(bitset_t *bs, bitset_t neu, int mo)
+{
+ return __lockfree_exchange_16(bs, neu, mo);
+}
+
+static inline bitset_t atom_bitset_cmpxchg(bitset_t *bs, bitset_t *old,
+ bitset_t neu, bool weak,
+ int mo_success, int mo_failure)
+{
+ return __lockfree_compare_exchange_16(bs, old, neu, weak, mo_success,
+ mo_failure);
+}
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H */
diff --git a/platform/linux-generic/arch/aarch64/odp_cpu.h b/platform/linux-generic/arch/aarch64/odp_cpu.h
new file mode 100644
index 000000000..ad8b36d87
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_cpu.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+
+#if !defined(__aarch64__)
+#error Use this file only when compiling for ARMv8 architecture
+#endif
+
+#include <odp_debug_internal.h>
+#include <odp_types_internal.h>
+
+/*
+ * Use LLD/SCD atomic primitives instead of lock-based code path in llqueue
+ * LLD/SCD is on ARM the fastest way to enqueue and dequeue elements from a
+ * linked list queue.
+ */
+#define CONFIG_LLDSCD
+
+/*
+ * Use DMB;STR instead of STRL on ARM
+ * On early ARMv8 implementations (e.g. Cortex-A57) this is noticeably more
+ * performant than using store-release.
+ * This also allows for load-only barriers (DMB ISHLD) which are much cheaper
+ * than a full barrier
+ */
+#define CONFIG_DMBSTR
+
+/* Only ARMv8 supports DMB ISHLD */
+/* A load only barrier is much cheaper than full barrier */
+#define _odp_release_barrier(ro) \
+do { \
+ if (ro) \
+ __asm__ volatile("dmb ishld" ::: "memory"); \
+ else \
+ __asm__ volatile("dmb ish" ::: "memory"); \
+} while (0)
+
+static inline uint16_t ll8(uint8_t *var, int mm)
+{
+ uint16_t old;
+
+ _ODP_ASSERT(mm == __ATOMIC_ACQUIRE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxrb %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("ldxrb %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ return old;
+}
+
+static inline uint32_t ll32(uint32_t *var, int mm)
+{
+ uint32_t old;
+
+ _ODP_ASSERT(mm == __ATOMIC_ACQUIRE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxr %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("ldxr %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ return old;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t sc32(uint32_t *var, uint32_t neu, int mm)
+{
+ uint32_t ret;
+
+ _ODP_ASSERT(mm == __ATOMIC_RELEASE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxr %w0, %w1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("stxr %w0, %w1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : );
+ return ret;
+}
+
+static inline uint64_t ll64(uint64_t *var, int mm)
+{
+ uint64_t old;
+
+ _ODP_ASSERT(mm == __ATOMIC_ACQUIRE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxr %0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("ldxr %0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ return old;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t sc64(uint64_t *var, uint64_t neu, int mm)
+{
+ uint32_t ret;
+
+ _ODP_ASSERT(mm == __ATOMIC_RELEASE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxr %w0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("stxr %w0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : );
+ return ret;
+}
+
+union i128 {
+ _odp_u128_t i128;
+ int64_t i64[2];
+};
+
+static inline _odp_u128_t lld(_odp_u128_t *var, int mm)
+{
+ union i128 old;
+
+ _ODP_ASSERT(mm == __ATOMIC_ACQUIRE || mm == __ATOMIC_RELAXED);
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxp %0, %1, [%2]"
+ : "=&r" (old.i64[0]), "=&r" (old.i64[1])
+ : "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("ldxp %0, %1, [%2]"
+ : "=&r" (old.i64[0]), "=&r" (old.i64[1])
+ : "r" (var)
+ : );
+ return old.i128;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t scd(_odp_u128_t *var, _odp_u128_t neu, int mm)
+{
+ uint32_t ret;
+
+ _ODP_ASSERT(mm == __ATOMIC_RELEASE || mm == __ATOMIC_RELAXED);
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxp %w0, %1, %2, [%3]"
+ : "=&r" (ret)
+ : "r" (((*(union i128 *)&neu)).i64[0]),
+ "r" (((*(union i128 *)&neu)).i64[1]),
+ "r" (var)
+ : "memory");
+ else
+ __asm__ volatile("stxp %w0, %1, %2, [%3]"
+ : "=&r" (ret)
+ : "r" (((*(union i128 *)&neu)).i64[0]),
+ "r" (((*(union i128 *)&neu)).i64[1]),
+ "r" (var)
+ : );
+#pragma GCC diagnostic pop
+ return ret;
+}
+
+#include "odp_atomic.h"
+#include "odp_wait_until.h"
+
+#ifdef __ARM_FEATURE_UNALIGNED
+#define _ODP_UNALIGNED 1
+#else
+#define _ODP_UNALIGNED 0
+#endif
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H */
diff --git a/platform/linux-generic/arch/aarch64/odp_cpu_cycles.c b/platform/linux-generic/arch/aarch64/odp_cpu_cycles.c
new file mode 100644
index 000000000..fba263ee4
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_cpu_cycles.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/cpu.h>
+
+#include <odp/api/abi/cpu_inlines.h>
+#include <odp/api/abi/time_cpu.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+#include <string.h>
+
+#include <odp/visibility_begin.h>
+
+_odp_cpu_cycles_global_t _odp_cpu_cycles_glob;
+
+#include <odp/visibility_end.h>
+
+int _odp_cpu_cycles_init_global(void)
+{
+ uint64_t cpu_hz, cpu_time_hz;
+
+ memset(&_odp_cpu_cycles_glob, 0, sizeof(_odp_cpu_cycles_global_t));
+
+ cpu_time_hz = _odp_time_cpu_global_freq();
+ if (cpu_time_hz == 0) {
+ _ODP_ERR("CPU time counter frequency not available\n");
+ return -1;
+ }
+
+ cpu_hz = odp_cpu_hz_max_id(0);
+ if (cpu_hz == 0) {
+ _ODP_ERR("CPU frequency not available\n");
+ return -1;
+ }
+
+ _odp_cpu_cycles_glob.res_shifted = (cpu_hz << _ODP_CPU_FREQ_SHIFT) / cpu_time_hz;
+
+ _odp_cpu_cycles_glob.res = cpu_hz > cpu_time_hz ?
+ (_odp_cpu_cycles_glob.res_shifted >> _ODP_CPU_FREQ_SHIFT) : 1;
+
+ _odp_cpu_cycles_glob.max = (UINT64_MAX >> _ODP_CPU_FREQ_SHIFT) -
+ (UINT64_MAX % _odp_cpu_cycles_glob.res);
+
+ return 0;
+}
diff --git a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
new file mode 100644
index 000000000..52936dacf
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
@@ -0,0 +1,896 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/crypto.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/sync.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/hints.h>
+#include <odp/api/random.h>
+
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_packet_internal.h>
+
+#include "AArch64cryptolib.h"
+
+#define MAX_SESSIONS 4000
+/* Length in bytes */
+#define ARM_CRYPTO_MAX_CIPHER_KEY_LENGTH 32
+#define ARM_CRYPTO_MAX_AUTH_KEY_LENGTH 32
+#define ARM_CRYPTO_MAX_IV_LENGTH 16
+#define ARM_CRYPTO_MAX_AAD_LENGTH 16
+#define ARM_CRYPTO_MAX_DATA_LENGTH 65536
+#define ARM_CRYPTO_MAX_DIGEST_LENGTH 16
+
+#define AES_GCM_IV_LEN 12
+ODP_STATIC_ASSERT(AES_GCM_IV_LEN <= ARM_CRYPTO_MAX_IV_LENGTH,
+ "AES_GCM_IV_LEN exceeds ARM_CRYPTO_MAX_IV_LENGTH");
+
+/*
+ * ARM crypto library may read up to 15 bytes past the end of input
+ * data and AAD and write up to 15 bytes past the end of output data.
+ */
+#define OOB_WRITE_LEN 16 /* rounded up to 16 bytes for efficiency */
+
+/*
+ * Data buffer size must be a multiple of 16, because the ARM crypto
+ * library will write full 16 byte blocks even if the last data block
+ * is not a full block.
+ */
+ODP_STATIC_ASSERT(ARM_CRYPTO_MAX_DATA_LENGTH % 16 == 0,
+ "Data buffer size not a multiple of 16");
+
+/*
+ * IV buffer size must be a multiple of 16, because the ARM crypto
+ * library will read in 16 byte blocks even if the last data block
+ * is not a full block.
+ */
+ODP_STATIC_ASSERT(ARM_CRYPTO_MAX_IV_LENGTH % 16 == 0,
+ "IV buffer size not a multiple of 16");
+
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
+{.key_len = 0, .iv_len = 0} };
+
+#ifdef __ARM_FEATURE_AES
+static const odp_crypto_cipher_capability_t cipher_capa_aes_gcm[] = {
+{.key_len = 16, .iv_len = AES_GCM_IV_LEN},
+{.key_len = 24, .iv_len = AES_GCM_IV_LEN},
+{.key_len = 32, .iv_len = AES_GCM_IV_LEN} };
+#endif
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_null[] = {
+{.digest_len = 0, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+#define AES_GCM_TAG_LEN 16
+
+#ifdef __ARM_FEATURE_AES
+static const odp_crypto_auth_capability_t auth_capa_aes_gcm[] = {
+{.digest_len = AES_GCM_TAG_LEN, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
+#endif
+
+/** Forward declaration of session structure */
+typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef
+void (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
+
+/**
+ * Per crypto session data structure
+ */
+struct odp_crypto_generic_session_t {
+ odp_crypto_generic_session_t *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
+ struct {
+ uint8_t key_data[ARM_CRYPTO_MAX_CIPHER_KEY_LENGTH];
+ } cipher;
+
+ struct {
+ uint8_t key[ARM_CRYPTO_MAX_AUTH_KEY_LENGTH];
+ } auth;
+
+ crypto_func_t func;
+ unsigned int idx;
+ armv8_cipher_constants_t cc;
+};
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+
+struct odp_crypto_global_s {
+ odp_spinlock_t lock;
+ odp_crypto_generic_session_t *free;
+ odp_crypto_generic_session_t sessions[MAX_SESSIONS];
+};
+
+static odp_crypto_global_t *global;
+
+typedef struct crypto_local_t {
+ uint8_t buffer[ARM_CRYPTO_MAX_DATA_LENGTH];
+} crypto_local_t;
+
+static __thread crypto_local_t local;
+
+static
+odp_crypto_generic_session_t *alloc_session(void)
+{
+ odp_crypto_generic_session_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ if (!session)
+ return NULL;
+
+ session->idx = session - global->sessions;
+
+ return session;
+}
+
+static
+void free_session(odp_crypto_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static inline void set_crypto_op_result(odp_packet_t pkt,
+ odp_crypto_alg_err_t cipher_err,
+ odp_crypto_alg_err_t auth_err)
+{
+ odp_crypto_packet_result_t *op_result;
+
+ op_result = &packet_hdr(pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = cipher_err;
+ op_result->auth_status.alg_err = auth_err;
+}
+
+static inline void set_crypto_op_result_ok(odp_packet_t pkt)
+{
+ set_crypto_op_result(pkt,
+ ODP_CRYPTO_ALG_ERR_NONE,
+ ODP_CRYPTO_ALG_ERR_NONE);
+}
+
+static void
+null_crypto_routine(odp_packet_t pkt ODP_UNUSED,
+ const odp_crypto_packet_op_param_t *param ODP_UNUSED,
+ odp_crypto_generic_session_t *session ODP_UNUSED)
+{
+ set_crypto_op_result_ok(pkt);
+}
+
+static inline void copy_aad(uint8_t *dst, const uint8_t *src, uint32_t len)
+{
+ _ODP_ASSERT(len == 8 || len == 12);
+
+ /* Use constant length memcpy for better optimization result */
+ if (len == 8)
+ memcpy(dst, src, 8);
+ else
+ memcpy(dst, src, 12);
+}
+
+static
+void aes_gcm_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ armv8_cipher_state_t cs = {
+ .counter = {
+ .d = {0, 0}
+ }
+ };
+ uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
+ uint64_t iv_bit_length = AES_GCM_IV_LEN * 8;
+ uint64_t plaintext_bit_length = param->cipher_range.length * 8;
+ uint64_t aad_bit_length = session->p.auth_aad_len * 8;
+ uint32_t in_pos = param->cipher_range.offset;
+ uint32_t in_len = param->cipher_range.length;
+ odp_bool_t continuous_data;
+ uint16_t saved_tail[OOB_WRITE_LEN];
+ uint8_t tag[AES_GCM_TAG_LEN];
+ int rc;
+
+ /* Fail early if cipher_range is too large */
+ if (odp_unlikely(in_len > ARM_CRYPTO_MAX_DATA_LENGTH)) {
+ _ODP_DBG("ARM Crypto: Packet size too large for requested operation\n");
+ goto err;
+ }
+
+ /* The crypto lib may read 16 bytes. Copy to a big enough buffer */
+ _ODP_ASSERT(param->cipher_iv_ptr != NULL);
+ memcpy(iv_data, param->cipher_iv_ptr, AES_GCM_IV_LEN);
+
+ cs.constants = &session->cc;
+
+ rc = armv8_aes_gcm_set_counter(iv_data, iv_bit_length, &cs);
+ if (odp_unlikely(rc)) {
+ _ODP_DBG("ARM Crypto: Failure while setting nonce\n");
+ goto err;
+ }
+
+ /* Copy AAD in a stack to make sure that the ARM crypto library can
+ * read it in 16 byte chunks. */
+ uint8_t aad[ARM_CRYPTO_MAX_AAD_LENGTH];
+
+ copy_aad(aad, param->aad_ptr, session->p.auth_aad_len);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(odp_packet_is_segmented(pkt)) ||
+ odp_unlikely(odp_packet_tailroom(pkt) < OOB_WRITE_LEN)) {
+ /* Packet is segmented or it may not be safe to read and write
+ * beyond the end of packet data. Copy the cipher range to a
+ * contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ continuous_data = false;
+ } else {
+ /* Save data that might get overwritten */
+ memcpy(saved_tail, data + in_len, OOB_WRITE_LEN);
+ continuous_data = true;
+ }
+
+ rc = armv8_enc_aes_gcm_from_state(&cs,
+ aad, aad_bit_length,
+ data, plaintext_bit_length,
+ data,
+ tag);
+ if (odp_unlikely(rc)) {
+ _ODP_DBG("ARM Crypto: AES GCM Encoding failed\n");
+ goto err;
+ }
+
+ if (odp_likely(continuous_data)) {
+ memcpy(data + in_len, saved_tail, OOB_WRITE_LEN);
+ memcpy(data - in_pos + param->hash_result_offset,
+ tag, AES_GCM_TAG_LEN);
+ } else {
+ odp_packet_copy_from_mem(pkt, in_pos, in_len, data);
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ AES_GCM_TAG_LEN, tag);
+ }
+
+ set_crypto_op_result_ok(pkt);
+ return;
+
+err:
+ set_crypto_op_result(pkt,
+ ODP_CRYPTO_ALG_ERR_DATA_SIZE,
+ ODP_CRYPTO_ALG_ERR_NONE);
+}
+
+static
+void aes_gcm_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ armv8_cipher_state_t cs = {
+ .counter = {
+ .d = {0, 0}
+ }
+ };
+ uint8_t iv_data[ARM_CRYPTO_MAX_IV_LENGTH];
+ uint8_t tag[AES_GCM_TAG_LEN];
+ uint64_t iv_bit_length = AES_GCM_IV_LEN * 8;
+ uint64_t plaintext_bit_length = param->cipher_range.length * 8;
+ uint64_t aad_bit_length = session->p.auth_aad_len * 8;
+ uint32_t in_pos = param->cipher_range.offset;
+ uint32_t in_len = param->cipher_range.length;
+ odp_bool_t continuous_data;
+ uint16_t saved_tail[OOB_WRITE_LEN];
+ int rc;
+
+ /* Fail early if cipher_range is too large */
+ if (odp_unlikely(in_len > ARM_CRYPTO_MAX_DATA_LENGTH)) {
+ _ODP_DBG("ARM Crypto: Packet size too large for requested operation\n");
+ goto err;
+ }
+
+ /* The crypto lib may read 16 bytes. Copy to a big enough buffer */
+ _ODP_ASSERT(param->cipher_iv_ptr != NULL);
+ memcpy(iv_data, param->cipher_iv_ptr, AES_GCM_IV_LEN);
+
+ cs.constants = &session->cc;
+
+ rc = armv8_aes_gcm_set_counter(iv_data, iv_bit_length, &cs);
+ if (odp_unlikely(rc)) {
+ _ODP_DBG("ARM Crypto: Failure while setting nonce\n");
+ goto err;
+ }
+
+ /* Copy AAD in a stack to make sure that the ARM crypto library can
+ * read it in 16 byte chunks. */
+ uint8_t aad[ARM_CRYPTO_MAX_AAD_LENGTH];
+
+ copy_aad(aad, param->aad_ptr, session->p.auth_aad_len);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(odp_packet_is_segmented(pkt)) ||
+ odp_unlikely(odp_packet_tailroom(pkt) < OOB_WRITE_LEN)) {
+ /* Packet is segmented or it may not be safe to read and write
+ * beyond the end of packet data. Copy the cipher range to a
+ * contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+ data = local.buffer;
+ /* Copy tag from the packet to a buffer */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ AES_GCM_TAG_LEN, tag);
+ continuous_data = false;
+ } else {
+ /* Save data that might get overwritten */
+ memcpy(saved_tail, data + in_len, OOB_WRITE_LEN);
+ /* Copy tag from the packet to a buffer */
+ memcpy(tag, data - in_pos + param->hash_result_offset, AES_GCM_TAG_LEN);
+ continuous_data = true;
+ }
+
+ rc = armv8_dec_aes_gcm_from_state(&cs,
+ aad, aad_bit_length,
+ data, plaintext_bit_length,
+ tag,
+ data);
+ if (odp_unlikely(rc)) {
+ _ODP_DBG("ARM Crypto: AES GCM Decoding failed\n");
+ goto err;
+ }
+
+ if (odp_likely(continuous_data))
+ memcpy(data + in_len, saved_tail, OOB_WRITE_LEN);
+ else
+ odp_packet_copy_from_mem(pkt, in_pos, in_len, data);
+
+ set_crypto_op_result_ok(pkt);
+ return;
+
+err:
+ set_crypto_op_result(pkt,
+ ODP_CRYPTO_ALG_ERR_NONE,
+ ODP_CRYPTO_ALG_ERR_ICV_CHECK);
+}
+
+static int process_aes_gcm_param(odp_crypto_generic_session_t *session)
+{
+ /* Verify Key len is valid */
+ if (16 != session->p.cipher_key.length &&
+ 24 != session->p.cipher_key.length &&
+ 32 != session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if (session->p.cipher_iv_len != AES_GCM_IV_LEN)
+ return -1;
+
+ if (ARM_CRYPTO_MAX_CIPHER_KEY_LENGTH < session->p.cipher_key.length)
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->func = aes_gcm_encrypt;
+ else
+ session->func = aes_gcm_decrypt;
+
+ return 0;
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capa)
+{
+ if (NULL == capa)
+ return -1;
+
+ /* Initialize crypto capability structure */
+ memset(capa, 0, sizeof(odp_crypto_capability_t));
+
+ capa->sync_mode = ODP_SUPPORT_PREFERRED;
+ capa->async_mode = ODP_SUPPORT_YES;
+ capa->queue_type_plain = 1;
+ capa->queue_type_sched = 1;
+
+ capa->ciphers.bit.null = 1;
+ capa->auths.bit.null = 1;
+
+#ifdef __ARM_FEATURE_AES
+ capa->ciphers.bit.aes_gcm = 1;
+ capa->auths.bit.aes_gcm = 1;
+#endif
+
+ capa->max_sessions = MAX_SESSIONS;
+
+ return 0;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = cipher_capa_null;
+ num = sizeof(cipher_capa_null) / size;
+ break;
+#ifdef __ARM_FEATURE_AES
+ case ODP_CIPHER_ALG_AES_GCM:
+ src = cipher_capa_aes_gcm;
+ num = sizeof(cipher_capa_aes_gcm) / size;
+ break;
+#endif
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = auth_capa_null;
+ num = sizeof(auth_capa_null) / size;
+ break;
+#ifdef __ARM_FEATURE_AES
+ case ODP_AUTH_ALG_AES_GCM:
+ src = auth_capa_aes_gcm;
+ num = sizeof(auth_capa_aes_gcm) / size;
+ break;
+#endif
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ int rc = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->cipher_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->auth_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP ||
+ param->op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (NULL == session) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ /* Copy parameters */
+ session->p = *param;
+
+ if (session->p.cipher_iv_len > ARM_CRYPTO_MAX_IV_LENGTH) {
+ _ODP_DBG("Maximum IV length exceeded\n");
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ if (session->p.auth_iv_len > ARM_CRYPTO_MAX_IV_LENGTH) {
+ _ODP_DBG("Maximum auth IV length exceeded\n");
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on cipher */
+ switch (param->cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ session->func = null_crypto_routine;
+ rc = 0;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ {
+ /* Set cipher mode for AES-GCM */
+ armv8_cipher_mode_t mode = 0;
+
+ switch (param->cipher_key.length) {
+ case 16:
+ mode = AES_GCM_128;
+ break;
+ case 24:
+ mode = AES_GCM_192;
+ break;
+ case 32:
+ mode = AES_GCM_256;
+ break;
+ default:
+ rc = -1;
+ break;
+ }
+
+ /* AES-GCM requires to do both auth and
+ * cipher at the same time */
+ if (param->auth_alg != ODP_AUTH_ALG_AES_GCM) {
+ rc = -1;
+ } else if (mode == AES_GCM_128 || mode == AES_GCM_192 ||
+ mode == AES_GCM_256) {
+ if (armv8_aes_gcm_set_constants(mode,
+ session->p.auth_digest_len,
+ session->p.cipher_key.data,
+ &session->cc) != 0) {
+ _ODP_DBG("ARM Crypto: Failure in setting constants\n");
+ rc = -1;
+ }
+ rc = process_aes_gcm_param(session);
+ } else {
+ rc = -1;
+ }
+ break;
+ }
+ default:
+ rc = -1;
+ }
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on auth */
+ switch (param->auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ if (param->cipher_alg == ODP_CIPHER_ALG_NULL)
+ rc = 0;
+ else
+ rc = -1;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ /* AES-GCM requires to do both auth and
+ * cipher at the same time */
+ if (param->cipher_alg == ODP_CIPHER_ALG_AES_GCM) {
+ rc = 0;
+ } else {
+ rc = -1;
+ }
+ break;
+ default:
+ rc = -1;
+ }
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ /* We're happy */
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL)
+ free_session(session);
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+ odp_crypto_generic_session_t *generic;
+
+ generic = (odp_crypto_generic_session_t *)(intptr_t)session;
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+int _odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(odp_crypto_global_t);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("_odp_crypto_pool_armv8crypto", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (ODP_SHM_INVALID == shm) {
+ _ODP_ERR("unable to allocate crypto pool\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(odp_shm_lookup("_odp_crypto_pool_armv8crypto"));
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for _odp_crypto_pool_armv8crypto\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_crypto_init_local(void)
+{
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ memset(&local, 0, sizeof(local));
+
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ return 0;
+}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ out_pkt = pkt_in;
+ } else {
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
+ }
+
+ if (odp_unlikely(session->p.null_crypto_enable &&
+ param->null_crypto))
+ goto out;
+
+ /* Invoke the crypto function */
+ session->func(out_pkt, param, session);
+
+out:
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
+
+ /* Synchronous, simply return results */
+ *pkt_out = out_pkt;
+
+ return 0;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ int i, rc;
+ odp_crypto_generic_session_t *session;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_packet_t pkt;
+ odp_event_t event;
+ odp_crypto_generic_session_t *session;
+ int i, rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-generic/arch/aarch64/odp_random.h b/platform/linux-generic/arch/aarch64/odp_random.h
new file mode 100644
index 000000000..023e6c455
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_random.h
@@ -0,0 +1,166 @@
+/* Copyright (c) 2021, ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_AARCH64_RANDOM_H_
+#define ODP_AARCH64_RANDOM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/random.h>
+#include <odp/autoheader_internal.h>
+
+#include <stdint.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void);
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len);
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len);
+
+#ifdef __ARM_FEATURE_RNG
+
+#if __ARM_FEATURE_UNALIGNED != 1
+#error This implementation requires unaligned access
+#endif
+
+static inline int _odp_random_max_kind(void)
+{
+ return ODP_RANDOM_TRUE;
+}
+
+static inline int _odp_rndr(uint64_t *v)
+{
+ int pass;
+
+ /* Return a 64-bit random number which is reseeded from the True Random
+ * Number source. If the hardware returns a genuine random number,
+ * PSTATE.NZCV is set to 0b0000. The NZCV condition flag is checked via
+ * the CSET instruction. If the hardware cannot return a genuine random
+ * number in a reasonable period of time, PSTATE.NZCV is set to 0b0100
+ * and the data value returned is 0. */
+ __asm__ volatile("mrs %0, s3_3_c2_c4_0\n\t"
+ "cset %w[pass], ne"
+ : "=r" (*v), [pass] "=r" (pass)
+ :
+ : "cc");
+
+ return pass;
+}
+
+static inline int _odp_rndrrs(uint64_t *v)
+{
+ int pass;
+
+ /* Return a 64-bit random number which is reseeded from the True Random
+ * Number source immediately before the read of the random number.
+ * If the hardware returns a genuine random number, PSTATE.NZCV is
+ * set to 0b0000. The NZCV condition flag is checked via the CSET
+ * instruction. If the hardware cannot return a genuine random number
+ * in a reasonable period of time, PSTATE.NZCV is set to 0b0100 and the
+ * data value returned is 0. */
+ __asm__ volatile("mrs %0, s3_3_c2_c4_1\n\t"
+ "cset %w[pass], ne"
+ : "=r" (*v), [pass] "=r" (pass)
+ :
+ : "cc");
+
+ return pass;
+}
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ uint64_t temp;
+
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!_odp_rndr(&temp))
+ ;
+
+ *(uint64_t *)(uintptr_t)buf = temp;
+ buf += 8;
+ }
+
+ if (len & 7) {
+ while (!_odp_rndr(&temp))
+ ;
+
+ if (len & 4) {
+ *(uint32_t *)(uintptr_t)buf = temp & 0xffffffff;
+ temp >>= 32;
+ buf += 4;
+ }
+
+ if (len & 2) {
+ *(uint16_t *)(uintptr_t)buf = temp & 0xffff;
+ temp >>= 16;
+ buf += 2;
+ }
+
+ if (len & 1)
+ *buf = temp & 0xff;
+ }
+
+ return len;
+}
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ uint64_t temp;
+
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!_odp_rndrrs(&temp))
+ ;
+
+ *(uint64_t *)(uintptr_t)buf = temp;
+ buf += 8;
+ }
+
+ if (len & 7) {
+ while (!_odp_rndrrs(&temp))
+ ;
+
+ if (len & 4) {
+ *(uint32_t *)(uintptr_t)buf = temp & 0xffffffff;
+ temp >>= 32;
+ buf += 4;
+ }
+
+ if (len & 2) {
+ *(uint16_t *)(uintptr_t)buf = temp & 0xffff;
+ temp >>= 16;
+ buf += 2;
+ }
+
+ if (len & 1)
+ *buf = temp & 0xff;
+ }
+
+ return len;
+}
+
+#else
+
+static inline int _odp_random_max_kind(void)
+{
+ return _odp_random_max_kind_generic();
+}
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_crypto_data_generic(buf, len);
+}
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_true_data_generic(buf, len);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
new file mode 100644
index 000000000..f242c845e
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
@@ -0,0 +1,396 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <odp/api/hints.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
+#include <odp_debug_internal.h>
+#include "cpu_flags.h"
+
+#define TMP_STR_LEN 64
+
+static void aarch64_impl_str(char *str, int maxlen, int implementer)
+{
+ switch (implementer) {
+ case 0x41:
+ snprintf(str, maxlen, "ARM Limited");
+ return;
+ case 0x42:
+ snprintf(str, maxlen, "Broadcom Corporation");
+ return;
+ case 0x43:
+ snprintf(str, maxlen, "Marvell (Cavium) Inc.");
+ return;
+ case 0x44:
+ snprintf(str, maxlen, "Digital Equipment Corporation");
+ return;
+ case 0x46:
+ snprintf(str, maxlen, "Fujitsu Ltd.");
+ return;
+ case 0x49:
+ snprintf(str, maxlen, "Infineon Technologies AG");
+ return;
+ case 0x4d:
+ snprintf(str, maxlen, "Freescale Semiconductor Inc.");
+ return;
+ case 0x4e:
+ snprintf(str, maxlen, "NVIDIA Corporation");
+ return;
+ case 0x50:
+ snprintf(str, maxlen, "Applied Micro Circuits Corporation");
+ return;
+ case 0x51:
+ snprintf(str, maxlen, "Qualcomm Inc.");
+ return;
+ case 0x56:
+ snprintf(str, maxlen, "Marvell International Ltd.");
+ return;
+ case 0x69:
+ snprintf(str, maxlen, "Intel Corporation");
+ return;
+ case 0xc0:
+ snprintf(str, maxlen, "Ampere Computing");
+ return;
+ default:
+ break;
+ }
+
+ snprintf(str, maxlen, "UNKNOWN (0x%x)", implementer);
+}
+
+static void aarch64_part_info(char *str, int maxlen, odp_cpu_arch_arm_t *cpu_isa, int implementer,
+ int part, int variant, int revision)
+{
+ *cpu_isa = ODP_CPU_ARCH_ARM_UNKNOWN;
+
+ if (implementer == 0x41) {
+ /* Part numbers are specified in Main ID Register (MIDR_EL1) documentation */
+ switch (part) {
+ case 0xd02:
+ snprintf(str, maxlen, "Cortex-A34");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd04:
+ snprintf(str, maxlen, "Cortex-A35");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd03:
+ snprintf(str, maxlen, "Cortex-A53");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd05:
+ snprintf(str, maxlen, "Cortex-A55");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd07:
+ snprintf(str, maxlen, "Cortex-A57");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd06:
+ snprintf(str, maxlen, "Cortex-A65");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd08:
+ snprintf(str, maxlen, "Cortex-A72");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd09:
+ snprintf(str, maxlen, "Cortex-A73");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_0;
+ return;
+ case 0xd0a:
+ snprintf(str, maxlen, "Cortex-A75");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd0b:
+ snprintf(str, maxlen, "Cortex-A76");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd0c:
+ snprintf(str, maxlen, "Neoverse N1");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd0e:
+ snprintf(str, maxlen, "Cortex-A76AE");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd0d:
+ snprintf(str, maxlen, "Cortex-A77");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd40:
+ snprintf(str, maxlen, "Neoverse V1");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_4;
+ return;
+ case 0xd41:
+ snprintf(str, maxlen, "Cortex-A78");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd42:
+ snprintf(str, maxlen, "Cortex-A78AE");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd44:
+ snprintf(str, maxlen, "Cortex-X1");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd46:
+ snprintf(str, maxlen, "Cortex-A510");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd47:
+ snprintf(str, maxlen, "Cortex-A710");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd48:
+ snprintf(str, maxlen, "Cortex-X2");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd49:
+ snprintf(str, maxlen, "Neoverse N2");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd4a:
+ snprintf(str, maxlen, "Neoverse E1");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd4b:
+ snprintf(str, maxlen, "Cortex-A78C");
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xd4d:
+ snprintf(str, maxlen, "Cortex-A715");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_0;
+ return;
+ case 0xd80:
+ snprintf(str, maxlen, "Cortex-A520");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_2;
+ return;
+ case 0xd81:
+ snprintf(str, maxlen, "Cortex-A720");
+ *cpu_isa = ODP_CPU_ARCH_ARMV9_2;
+ return;
+ default:
+ break;
+ }
+ } else if (implementer == 0x43) {
+ switch (part) {
+ case 0xa1:
+ snprintf(str, maxlen, "CN88XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
+ return;
+ case 0xa2:
+ snprintf(str, maxlen, "CN81XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
+ return;
+ case 0xa3:
+ snprintf(str, maxlen, "CN83XX, Pass %i.%i", variant + 1, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
+ return;
+ case 0xaf:
+ snprintf(str, maxlen, "CN99XX, Rev %c%i", 'A' + variant, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_1;
+ return;
+ case 0xb1:
+ snprintf(str, maxlen, "CN98XX, Rev %c%i", 'A' + variant, revision);
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ case 0xb2:
+ /* Handle B0 errata: variant and revision numbers show up as A1 */
+ if (variant == 0 && revision == 1)
+ snprintf(str, maxlen, "CN96XX, Rev B0");
+ else
+ snprintf(str, maxlen, "CN96XX, Rev %c%i", 'A' + variant, revision);
+
+ *cpu_isa = ODP_CPU_ARCH_ARMV8_2;
+ return;
+ default:
+ break;
+ }
+ }
+
+ snprintf(str, maxlen, "part 0x%x, var 0x%x, rev 0x%x",
+ part, variant, revision);
+}
+
+static odp_cpu_arch_arm_t arm_isa_version(void)
+{
+#if defined(__ARM_ARCH)
+ if (__ARM_ARCH == 8) {
+ #ifdef __ARM_FEATURE_QRDMX
+ /* v8.1 or higher */
+ return ODP_CPU_ARCH_ARMV8_1;
+ #else
+ return ODP_CPU_ARCH_ARMV8_0;
+ #endif
+ }
+
+ if (__ARM_ARCH == 9) {
+ /* v9.0 or higher */
+ return ODP_CPU_ARCH_ARMV9_0;
+ }
+
+ if (__ARM_ARCH >= 800) {
+ /* ACLE 2018 defines that from v8.1 onwards the value includes
+ * the minor version number: __ARM_ARCH = X * 100 + Y
+ * E.g. for Armv8.1 __ARM_ARCH = 801 */
+ int major = __ARM_ARCH / 100;
+ int minor = __ARM_ARCH - (major * 100);
+
+ if (major == 8) {
+ switch (minor) {
+ case 0:
+ return ODP_CPU_ARCH_ARMV8_0;
+ case 1:
+ return ODP_CPU_ARCH_ARMV8_1;
+ case 2:
+ return ODP_CPU_ARCH_ARMV8_2;
+ case 3:
+ return ODP_CPU_ARCH_ARMV8_3;
+ case 4:
+ return ODP_CPU_ARCH_ARMV8_4;
+ case 5:
+ return ODP_CPU_ARCH_ARMV8_5;
+ case 6:
+ return ODP_CPU_ARCH_ARMV8_6;
+ case 7:
+ return ODP_CPU_ARCH_ARMV8_7;
+ case 8:
+ return ODP_CPU_ARCH_ARMV8_8;
+ case 9:
+ return ODP_CPU_ARCH_ARMV8_9;
+ default:
+ return ODP_CPU_ARCH_ARM_UNKNOWN;
+ }
+ } else if (major == 9) {
+ switch (minor) {
+ case 0:
+ return ODP_CPU_ARCH_ARMV9_0;
+ case 1:
+ return ODP_CPU_ARCH_ARMV9_1;
+ case 2:
+ return ODP_CPU_ARCH_ARMV9_2;
+ case 3:
+ return ODP_CPU_ARCH_ARMV9_3;
+ default:
+ return ODP_CPU_ARCH_ARM_UNKNOWN;
+ }
+ }
+ }
+#endif
+ return ODP_CPU_ARCH_ARM_UNKNOWN;
+}
+
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+{
+ char str[1024];
+ char impl_str[TMP_STR_LEN];
+ char part_str[TMP_STR_LEN];
+ const char *cur;
+ long int impl, arch, var, part, rev;
+ int id;
+
+ sysinfo->cpu_arch = ODP_CPU_ARCH_ARM;
+ sysinfo->cpu_isa_sw.arm = arm_isa_version();
+ /* Linux cpuinfo does not have detailed ISA version number (CPU architecture: 8) */
+ sysinfo->cpu_isa_hw.arm = ODP_CPU_ARCH_ARM_UNKNOWN;
+
+ strcpy(sysinfo->cpu_arch_str, "aarch64");
+
+ memset(impl_str, 0, sizeof(impl_str));
+ memset(part_str, 0, sizeof(part_str));
+
+ impl = 0;
+ arch = 0;
+ var = 0;
+ part = 0;
+ rev = 0;
+ id = 0;
+
+ while (fgets(str, sizeof(str), file) != NULL && id < CONFIG_NUM_CPU_IDS) {
+ /* Parse line by line a block of cpuinfo */
+ cur = strstr(str, "CPU implementer");
+
+ if (cur) {
+ cur = strchr(cur, ':');
+ impl = strtol(cur + 1, NULL, 16);
+ aarch64_impl_str(impl_str, TMP_STR_LEN, impl);
+ continue;
+ }
+
+ cur = strstr(str, "CPU architecture");
+
+ if (cur) {
+ cur = strchr(cur, ':');
+ arch = strtol(cur + 1, NULL, 10);
+ continue;
+ }
+
+ cur = strstr(str, "CPU variant");
+
+ if (cur) {
+ cur = strchr(cur, ':');
+ var = strtol(cur + 1, NULL, 16);
+ continue;
+ }
+
+ cur = strstr(str, "CPU part");
+
+ if (cur) {
+ cur = strchr(cur, ':');
+ part = strtol(cur + 1, NULL, 16);
+ continue;
+ }
+
+ cur = strstr(str, "CPU revision");
+
+ if (cur) {
+ odp_cpu_arch_arm_t cpu_isa;
+
+ cur = strchr(cur, ':');
+ rev = strtol(cur + 1, NULL, 10);
+
+ aarch64_part_info(part_str, TMP_STR_LEN, &cpu_isa, impl, part, var, rev);
+ sysinfo->cpu_isa_hw.arm = cpu_isa;
+
+ /* This is the last line about this cpu, update
+ * model string. */
+ snprintf(sysinfo->model_str[id],
+ sizeof(sysinfo->model_str[id]),
+ "%s, %s, arch %li",
+ impl_str, part_str, arch);
+
+ /* Some CPUs do not support cpufreq, use a dummy
+ * max freq. */
+ if (sysinfo->cpu_hz_max[id] == 0) {
+ uint64_t hz = sysinfo->default_cpu_hz_max;
+
+ _ODP_WARN("CPU[%i] uses default max frequency of %" PRIu64 " "
+ "Hz from config file\n", id, hz);
+ sysinfo->cpu_hz_max[id] = hz;
+ }
+
+ id++;
+ }
+ }
+
+ return 0;
+}
+
+void _odp_sys_info_print_arch(void)
+{
+ _odp_cpu_flags_print_all();
+}
+
+uint64_t odp_cpu_arch_hz_current(int id ODP_UNUSED)
+{
+ return odp_global_ro.system_info.default_cpu_hz;
+}
diff --git a/platform/linux-generic/arch/aarch64/odp_wait_until.h b/platform/linux-generic/arch/aarch64/odp_wait_until.h
new file mode 100644
index 000000000..eca3f9ce5
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp_wait_until.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2017 ARM Limited
+ * Copyright (c) 2017-2018 Linaro Limited
+ * Copyright (c) 2024 Nokia
+ */
+
+#ifndef ODP_AARCH64_WAIT_UNTIL_H_
+#define ODP_AARCH64_WAIT_UNTIL_H_
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#error This file should not be included directly, please include odp_cpu.h
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/cpu.h>
+
+#include <odp_cpu.h>
+
+#include <stdint.h>
+
+static inline void _odp_sevl(void)
+{
+ __asm__ volatile("sevl" : : : );
+}
+
+static inline int _odp_wfe(void)
+{
+ __asm__ volatile("wfe" : : : "memory");
+ return 1;
+}
+
+#define _odp_monitor_u8(addr, mo) ll8((addr), (mo))
+#define _odp_monitor_u32(addr, mo) ll32((addr), (mo))
+#define _odp_monitor_u64(addr, mo) ll64((addr), (mo))
+#define _odp_monitor_u128(addr, mo) lld((addr), (mo))
+
+#if ATOM_BITSET_SIZE <= 32
+static inline bitset_t _odp_bitset_monitor(bitset_t *bs, int mo)
+{
+ return _odp_monitor_u32(bs, mo);
+}
+#elif ATOM_BITSET_SIZE <= 64
+static inline bitset_t _odp_bitset_monitor(bitset_t *bs, int mo)
+{
+ return _odp_monitor_u64(bs, mo);
+}
+#elif ATOM_BITSET_SIZE <= 128
+static inline bitset_t _odp_bitset_monitor(bitset_t *bs, int mo)
+{
+ return _odp_monitor_u128(bs, mo);
+}
+#else
+#error Unsupported size of bit sets (ATOM_BITSET_SIZE)
+#endif
+
+/**
+ * The _odp_wait_until_eq_*() functions defined in this header are intended to
+ * be used only with the scalable scheduler and queue implementations. Even
+ * though these functions use standard non-atomic parameter types, the
+ * parameters must only be operated using atomic operations. If new functions
+ * are added to this file, they should use _odp_wait_until_equal_*() prefix and
+ * atomic parameter types.
+ */
+
+static inline void _odp_wait_until_eq_u32(uint32_t *val, uint32_t expected)
+{
+ _odp_sevl();
+ while (_odp_wfe() && _odp_monitor_u32(val, __ATOMIC_RELAXED) != expected)
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_bitset(bitset_t *val, bitset_t expected)
+{
+ _odp_sevl();
+ while (_odp_wfe() && _odp_bitset_monitor(val, __ATOMIC_RELAXED != expected))
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_acq_u8(uint8_t *val, uint8_t expected)
+{
+ _odp_sevl();
+ while (_odp_wfe() && _odp_monitor_u8(val, __ATOMIC_ACQUIRE) != expected)
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_acq_u32(uint32_t *val, uint32_t expected)
+{
+ _odp_sevl();
+ while (_odp_wfe() && _odp_monitor_u32(val, __ATOMIC_ACQUIRE) != expected)
+ odp_cpu_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/arm/odp/api/abi/cpu.h b/platform/linux-generic/arch/arm/odp/api/abi/cpu.h
new file mode 100644
index 000000000..9224af9a0
--- /dev/null
+++ b/platform/linux-generic/arch/arm/odp/api/abi/cpu.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ODP_CACHE_LINE_SIZE 64
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/arm/odp/api/cpu_arch.h b/platform/linux-generic/arch/arm/odp/api/abi/cpu_inlines.h
index 7c75a690e..bf44806a0 100644
--- a/platform/linux-generic/arch/arm/odp/api/cpu_arch.h
+++ b/platform/linux-generic/arch/arm/odp/api/abi/cpu_inlines.h
@@ -1,19 +1,18 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_PLAT_CPU_ARCH_H_
-#define ODP_PLAT_CPU_ARCH_H_
+#ifndef ODP_ARCH_CPU_INLINES_H_
+#define ODP_ARCH_CPU_INLINES_H_
#ifdef __cplusplus
extern "C" {
#endif
-#define _ODP_CACHE_LINE_SIZE 64
-
-static inline void odp_cpu_pause(void)
+static inline void _odp_cpu_pause(void)
{
/* YIELD hints the CPU to switch to another thread if possible
* and executes as a NOP otherwise.
@@ -23,6 +22,9 @@ static inline void odp_cpu_pause(void)
__asm volatile("isb" ::: "memory");
}
+/* Use generic implementations for the rest of the functions */
+#include <odp/api/abi/cpu_generic.h>
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/arch/arm/odp_cpu.h b/platform/linux-generic/arch/arm/odp_cpu.h
new file mode 100644
index 000000000..6b2674736
--- /dev/null
+++ b/platform/linux-generic/arch/arm/odp_cpu.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+
+#if !defined(__arm__)
+#error Use this file only when compiling for ARM architecture
+#endif
+
+#include <odp_debug_internal.h>
+
+/*
+ * Use LLD/SCD atomic primitives instead of lock-based code path in llqueue
+ * LLD/SCD is on ARM the fastest way to enqueue and dequeue elements from a
+ * linked list queue.
+ */
+#define CONFIG_LLDSCD
+
+/*
+ * Use DMB;STR instead of STRL on ARM
+ * On early ARMv8 implementations (e.g. Cortex-A57) this is noticeably more
+ * performant than using store-release.
+ * This also allows for load-only barriers (DMB ISHLD) which are much cheaper
+ * than a full barrier
+ */
+#define CONFIG_DMBSTR
+
+static inline uint64_t lld(uint64_t *var, int mm)
+{
+ uint64_t old;
+
+ __asm__ volatile("ldrexd %0, %H0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ /* Barrier after an acquiring load */
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("dmb" : : : "memory");
+ return old;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t scd(uint64_t *var, uint64_t neu, int mm)
+{
+ uint32_t ret;
+
+ /* Barrier before a releasing store */
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("dmb" : : : "memory");
+ __asm__ volatile("strexd %0, %1, %H1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : );
+ return ret;
+}
+
+#ifdef CONFIG_DMBSTR
+
+#define atomic_store_release(loc, val, ro) \
+do { \
+ __atomic_thread_fence(__ATOMIC_RELEASE); \
+ __atomic_store_n(loc, val, __ATOMIC_RELAXED); \
+} while (0)
+
+#else
+
+#define atomic_store_release(loc, val, ro) \
+ __atomic_store_n(loc, val, __ATOMIC_RELEASE)
+
+#endif /* CONFIG_DMBSTR */
+
+#include "../default/odp_atomic.h"
+#include "../default/odp_wait_until.h"
+
+#ifdef __ARM_FEATURE_UNALIGNED
+#define _ODP_UNALIGNED 1
+#else
+#define _ODP_UNALIGNED 0
+#endif
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H */
diff --git a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
index 53e2aaeaf..4cbe46d7c 100644
--- a/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/arm/odp_sysinfo_parse.c
@@ -1,27 +1,33 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_internal.h>
-#include <odp_debug_internal.h>
-#include <string.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
-int cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
{
- int i;
+ sysinfo->cpu_arch = ODP_CPU_ARCH_ARM;
+ sysinfo->cpu_isa_sw.arm = ODP_CPU_ARCH_ARM_UNKNOWN;
+ sysinfo->cpu_isa_hw.arm = ODP_CPU_ARCH_ARM_UNKNOWN;
- ODP_DBG("Warning: use dummy values for freq and model string\n");
- for (i = 0; i < MAX_CPU_NUMBER; i++) {
- sysinfo->cpu_hz_max[i] = 1400000000;
- strcpy(sysinfo->model_str[i], "UNKNOWN");
- }
+#if defined(__ARM_ARCH)
+ if (__ARM_ARCH == 6)
+ sysinfo->cpu_isa_sw.arm = ODP_CPU_ARCH_ARMV6;
+ else if (__ARM_ARCH == 7)
+ sysinfo->cpu_isa_sw.arm = ODP_CPU_ARCH_ARMV7;
+#endif
- return 0;
+ return _odp_dummy_cpuinfo(sysinfo);
}
-uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+void _odp_sys_info_print_arch(void)
{
- return 0;
+}
+
+uint64_t odp_cpu_arch_hz_current(int id ODP_UNUSED)
+{
+ return odp_global_ro.system_info.default_cpu_hz;
}
diff --git a/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h b/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h
new file mode 100644
index 000000000..553114666
--- /dev/null
+++ b/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_TIME_CPU_INLINES_H_
+#define ODP_ARCH_TIME_CPU_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/time_types.h>
+
+#include <odp/api/abi/time_cpu.h>
+
+#include <stdint.h>
+
+#define _ODP_TIME_GIGA_HZ 1000000000ULL
+
+typedef struct _odp_time_global_t {
+ uint64_t freq_hz;
+ uint64_t start_time;
+ uint64_t start_time_ns;
+
+} _odp_time_global_t;
+
+extern _odp_time_global_t _odp_time_glob;
+
+static inline odp_time_t _odp_time_cur(void)
+{
+ odp_time_t time;
+
+ time.count = _odp_time_cpu_global();
+ return time;
+}
+
+static inline odp_time_t _odp_time_cur_strict(void)
+{
+ odp_time_t time;
+
+ time.count = _odp_time_cpu_global_strict();
+ return time;
+}
+
+static inline uint64_t _odp_time_to_ns(odp_time_t time)
+{
+ uint64_t nsec;
+ uint64_t freq_hz = _odp_time_glob.freq_hz;
+ uint64_t count = time.count;
+ uint64_t sec = 0;
+
+ if (count >= freq_hz) {
+ sec = count / freq_hz;
+ count = count - sec * freq_hz;
+ }
+
+ nsec = (_ODP_TIME_GIGA_HZ * count) / freq_hz;
+
+ return (sec * _ODP_TIME_GIGA_HZ) + nsec;
+}
+
+static inline odp_time_t _odp_time_from_ns(uint64_t ns)
+{
+ odp_time_t time;
+ uint64_t count;
+ uint64_t freq_hz = _odp_time_glob.freq_hz;
+ uint64_t sec = 0;
+
+ if (ns >= ODP_TIME_SEC_IN_NS) {
+ sec = ns / ODP_TIME_SEC_IN_NS;
+ ns = ns - sec * ODP_TIME_SEC_IN_NS;
+ }
+
+ count = sec * freq_hz;
+ count += (ns * freq_hz) / ODP_TIME_SEC_IN_NS;
+
+ time.count = count;
+
+ return time;
+}
+
+static inline uint64_t _odp_time_res(void)
+{
+ return _odp_time_glob.freq_hz;
+}
+
+static inline void _odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.count = _odp_time_glob.start_time;
+ startup->global_ns = _odp_time_glob.start_time_ns;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/common/odp_time_cpu.c b/platform/linux-generic/arch/common/odp_time_cpu.c
new file mode 100644
index 000000000..3c392de0c
--- /dev/null
+++ b/platform/linux-generic/arch/common/odp_time_cpu.c
@@ -0,0 +1,74 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/time_types.h>
+
+#include <odp/api/abi/time_cpu.h>
+#include <odp/api/abi/time_cpu_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+
+#define YEAR_IN_SEC (365 * 24 * 3600)
+
+#include <odp/visibility_begin.h>
+
+_odp_time_global_t _odp_time_glob;
+
+#include <odp/visibility_end.h>
+
+int _odp_time_init_global(void)
+{
+ uint64_t count, diff, years;
+ odp_time_t time;
+ _odp_time_global_t *global = &_odp_time_glob;
+
+ memset(global, 0, sizeof(_odp_time_global_t));
+
+ if (!_odp_time_cpu_global_freq_is_const())
+ return -1;
+
+ global->freq_hz = _odp_time_cpu_global_freq();
+ if (global->freq_hz == 0)
+ return -1;
+
+ _ODP_PRINT("HW time counter freq: %" PRIu64 " hz\n\n", global->freq_hz);
+
+ count = _odp_time_cpu_global();
+ time.count = count;
+ global->start_time = count;
+ global->start_time_ns = _odp_time_to_ns(time);
+
+ /* Make sure that counters will not wrap */
+ diff = UINT64_MAX - count;
+ years = (diff / global->freq_hz) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time counter would wrap in 10 years: %" PRIu64 "\n", count);
+ return -1;
+ }
+
+ diff = UINT64_MAX - global->start_time_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ global->start_time_ns);
+ return -1;
+ }
+
+ return 0;
+}
+
+int _odp_time_term_global(void)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/default/odp/api/abi/atomic_generic.h b/platform/linux-generic/arch/default/odp/api/abi/atomic_generic.h
new file mode 100644
index 000000000..c6ed86363
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/atomic_generic.h
@@ -0,0 +1,276 @@
+/* Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_ATOMIC_GENERIC_H_
+#define ODP_API_ABI_ATOMIC_GENERIC_H_
+
+#include <odp/api/atomic.h>
+
+static inline void _odp_atomic_add_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_sub_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_inc_u32(odp_atomic_u32_t *atom)
+{
+ (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_dec_u32(odp_atomic_u32_t *atom)
+{
+ (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t new_val)
+{
+ uint32_t old_val;
+
+ old_val = __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
+
+ while (new_val > old_val) {
+ if (__atomic_compare_exchange_n(&atom->v, &old_val, new_val, 0 /* strong */,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ break;
+ }
+}
+
+static inline void _odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t new_val)
+{
+ uint32_t old_val;
+
+ old_val = __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
+
+ while (new_val < old_val) {
+ if (__atomic_compare_exchange_n(&atom->v, &old_val, new_val, 0 /* strong */,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ break;
+ }
+}
+
+static inline void _odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline void _odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline void _odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_inc_u64(odp_atomic_u64_t *atom)
+{
+ (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_dec_u64(odp_atomic_u64_t *atom)
+{
+ (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+static inline void _odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_val)
+{
+ uint64_t old_val;
+
+ old_val = __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
+
+ while (new_val > old_val) {
+ if (__atomic_compare_exchange_n(&atom->v, &old_val, new_val, 0 /* strong */,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ break;
+ }
+}
+
+static inline void _odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_val)
+{
+ uint64_t old_val;
+
+ old_val = __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
+
+ while (new_val < old_val) {
+ if (__atomic_compare_exchange_n(&atom->v, &old_val, new_val, 0 /* strong */,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED))
+ break;
+ }
+}
+
+#ifndef ODP_ATOMIC_U64_LOCK
+static inline void _odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline void _odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
+}
+#endif
+
+#ifdef __SIZEOF_INT128__
+
+static inline void _odp_atomic_init_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ atom->v = val;
+}
+
+static inline odp_u128_t _odp_atomic_load_u128(odp_atomic_u128_t *atom)
+{
+ union {
+ odp_u128_t val;
+ __int128_t i;
+ } u;
+
+ u.i = __atomic_load_n((__int128_t *)&atom->v, __ATOMIC_RELAXED);
+ return u.val;
+}
+
+static inline void _odp_atomic_store_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ __atomic_store_n((__int128_t *)&atom->v, *(__int128_t *)&val, __ATOMIC_RELAXED);
+}
+
+static inline int _odp_atomic_cas_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return __atomic_compare_exchange_n((__int128_t *)&atom->v, (__int128_t *)old_val,
+ *(__int128_t *)&new_val, 0 /* strong */,
+ __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+}
+
+static inline int _odp_atomic_cas_acq_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return __atomic_compare_exchange_n((__int128_t *)&atom->v, (__int128_t *)old_val,
+ *(__int128_t *)&new_val, 0 /* strong */,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED);
+}
+
+static inline int _odp_atomic_cas_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return __atomic_compare_exchange_n((__int128_t *)&atom->v, (__int128_t *)old_val,
+ *(__int128_t *)&new_val, 0 /* strong */,
+ __ATOMIC_RELEASE, __ATOMIC_RELAXED);
+}
+
+static inline int _odp_atomic_cas_acq_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return __atomic_compare_exchange_n((__int128_t *)&atom->v, (__int128_t *)old_val,
+ *(__int128_t *)&new_val, 0 /* strong */,
+ __ATOMIC_ACQ_REL, __ATOMIC_RELAXED);
+}
+
+#else /* Lock-based implementation */
+
+/**
+ * @internal
+ * 128 bit store operation expression for the ATOMIC_OP macro
+ */
+#define ATOMIC_STORE_OP_128(new_val) \
+({ \
+ (_atom)->v = (new_val); \
+})
+
+/**
+ * @internal
+ * 128 bit CAS operation expression for the ATOMIC_OP macro
+ */
+#define ATOMIC_CAS_OP_128(ret_ptr, old_val, new_val) \
+__extension__ ({ \
+ int *_ret_ptr = ret_ptr; \
+ odp_u128_t *_cas_old = old_val; \
+ odp_u128_t _cas_new = new_val; \
+ if (((_atom)->v.u64[0] == (_cas_old)->u64[0]) && \
+ ((_atom)->v.u64[1] == (_cas_old)->u64[1])) { \
+ (_atom)->v = (_cas_new); \
+ *(_ret_ptr) = 1; \
+ } else { \
+ *(_ret_ptr) = 0; \
+ } \
+})
+
+/**
+ * @internal
+ * Helper macro for lock-based atomic operations on 128-bit integers
+ * @param[in,out] atom Pointer to the 128-bit atomic variable
+ * @param expr Expression used update the variable.
+ * @return The old value of the variable.
+ */
+#define ATOMIC_OP_128(atom, expr) \
+__extension__ ({ \
+ odp_u128_t _old_val; \
+ odp_atomic_u128_t *_atom = atom; \
+ /* Loop while lock is already taken, stop when lock becomes clear */ \
+ while (__atomic_test_and_set(&(_atom)->lock, __ATOMIC_ACQUIRE)) \
+ (void)0; \
+ _old_val = (_atom)->v; \
+ (expr); /* Perform whatever update is desired */ \
+ __atomic_clear(&(_atom)->lock, __ATOMIC_RELEASE); \
+ _old_val; /* Return old value */ \
+})
+
+static inline void _odp_atomic_init_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ atom->v.u64[0] = val.u64[0];
+ atom->v.u64[1] = val.u64[1];
+ atom->lock = 0;
+}
+
+static inline odp_u128_t _odp_atomic_load_u128(odp_atomic_u128_t *atom)
+{
+ return ATOMIC_OP_128(atom, (void)0);
+}
+
+static inline void _odp_atomic_store_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ ATOMIC_OP_128(atom, ATOMIC_STORE_OP_128(val));
+}
+
+static inline int _odp_atomic_cas_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ int ret;
+
+ *old_val = ATOMIC_OP_128(atom, ATOMIC_CAS_OP_128(&ret, old_val, new_val));
+ return ret;
+}
+
+static inline int _odp_atomic_cas_acq_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return _odp_atomic_cas_u128(atom, old_val, new_val);
+}
+
+static inline int _odp_atomic_cas_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return _odp_atomic_cas_u128(atom, old_val, new_val);
+}
+
+static inline int _odp_atomic_cas_acq_rel_u128(odp_atomic_u128_t *atom, odp_u128_t *old_val,
+ odp_u128_t new_val)
+{
+ return _odp_atomic_cas_u128(atom, old_val, new_val);
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/atomic_inlines.h b/platform/linux-generic/arch/default/odp/api/abi/atomic_inlines.h
new file mode 100644
index 000000000..f1072d11f
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/atomic_inlines.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi/atomic_generic.h>
diff --git a/platform/linux-generic/arch/default/odp/api/abi/cpu.h b/platform/linux-generic/arch/default/odp/api/abi/cpu.h
new file mode 100644
index 000000000..e09efdfcf
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/cpu.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ODP_CACHE_LINE_SIZE 64
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/cpu_generic.h b/platform/linux-generic/arch/default/odp/api/abi/cpu_generic.h
new file mode 100644
index 000000000..b75e65717
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/cpu_generic.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_GENERIC_H_
+#define ODP_API_ABI_CPU_GENERIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+uint64_t _odp_cpu_cycles(void);
+int _odp_cpu_cycles_init_global(void);
+
+static inline uint64_t _odp_cpu_cycles_max(void)
+{
+ return UINT64_MAX;
+}
+
+static inline uint64_t _odp_cpu_cycles_resolution(void)
+{
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/cpu_inlines.h b/platform/linux-generic/arch/default/odp/api/abi/cpu_inlines.h
new file mode 100644
index 000000000..54aeae946
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/cpu_inlines.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_CPU_INLINES_H_
+#define ODP_ARCH_CPU_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void _odp_cpu_pause(void)
+{
+}
+
+#include <odp/api/abi/cpu_generic.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/hash_crc32.h b/platform/linux-generic/arch/default/odp/api/abi/hash_crc32.h
new file mode 100644
index 000000000..8759ed948
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/hash_crc32.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_HASH_CRC32_H_
+#define ODP_API_ABI_HASH_CRC32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+uint32_t _odp_hash_crc32_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+uint32_t _odp_hash_crc32c_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+
+static inline uint32_t _odp_hash_crc32(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32_generic(data, data_len, init_val);
+}
+
+static inline uint32_t _odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32c_generic(data, data_len, init_val);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/sync_inlines.h b/platform/linux-generic/arch/default/odp/api/abi/sync_inlines.h
new file mode 100644
index 000000000..bfbb3039f
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/sync_inlines.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ARCH_SYNC_INLINES_H_
+#define ODP_ARCH_SYNC_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void _odp_mb_sync(void)
+{
+ __sync_synchronize();
+}
+
+static inline void _odp_mb_sync_load(void)
+{
+ __sync_synchronize();
+}
+
+static inline void _odp_mb_sync_store(void)
+{
+ __sync_synchronize();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h b/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h
new file mode 100644
index 000000000..ed0ffdb3f
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_TIME_INLINES_H_
+#define ODP_ARCH_TIME_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/time_types.h>
+
+#include <stdint.h>
+
+odp_time_t _odp_time_cur(void);
+uint64_t _odp_time_res(void);
+void _odp_time_startup(odp_time_startup_t *startup);
+
+static inline odp_time_t _odp_time_cur_strict(void)
+{
+ return _odp_time_cur();
+}
+
+static inline uint64_t _odp_time_to_ns(odp_time_t time)
+{
+ return time.nsec;
+}
+
+static inline odp_time_t _odp_time_from_ns(uint64_t ns)
+{
+ odp_time_t time;
+
+ time.nsec = ns;
+
+ return time;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/abi/wait_until.h b/platform/linux-generic/arch/default/odp/api/abi/wait_until.h
new file mode 100644
index 000000000..35e8d2566
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/wait_until.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#include <odp/api/abi/wait_until_generic.h>
diff --git a/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h b/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h
new file mode 100644
index 000000000..3d3fce175
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#ifndef ODP_API_ABI_WAIT_UNTIL_GENERIC_H_
+#define ODP_API_ABI_WAIT_UNTIL_GENERIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+
+static inline void
+_odp_wait_until_equal_acq_u32(odp_atomic_u32_t *addr, uint32_t expected)
+{
+ while (odp_atomic_load_acq_u32(addr) != expected)
+ odp_cpu_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp/api/cpu_arch.h b/platform/linux-generic/arch/default/odp/api/cpu_arch.h
deleted file mode 100644
index 22b1da2dd..000000000
--- a/platform/linux-generic/arch/default/odp/api/cpu_arch.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_CPU_ARCH_H_
-#define ODP_PLAT_CPU_ARCH_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define _ODP_CACHE_LINE_SIZE 64
-
-static inline void odp_cpu_pause(void)
-{
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/odp_atomic.c b/platform/linux-generic/arch/default/odp_atomic.c
index 0e40cda51..36fc5e8ea 100644
--- a/platform/linux-generic/odp_atomic.c
+++ b/platform/linux-generic/arch/default/odp_atomic.c
@@ -1,13 +1,11 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, ARM Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/atomic.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/atomic_inlines.h>
-#endif
int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op)
{
@@ -27,3 +25,23 @@ int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op)
return 2;
#endif
}
+
+int odp_atomic_lock_free_u128(odp_atomic_op_t *atomic_op)
+{
+#ifdef __SIZEOF_INT128__
+ if (__atomic_is_lock_free(16, NULL)) {
+ if (atomic_op) {
+ atomic_op->all_bits = 0;
+ atomic_op->op.load = 1;
+ atomic_op->op.store = 1;
+ atomic_op->op.cas = 1;
+ }
+ return 2;
+ }
+#endif
+ /* All operations have locks */
+ if (atomic_op)
+ atomic_op->all_bits = 0;
+
+ return 0;
+}
diff --git a/platform/linux-generic/arch/default/odp_atomic.h b/platform/linux-generic/arch/default/odp_atomic.h
new file mode 100644
index 000000000..4cfc6b4bd
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_atomic.h
@@ -0,0 +1,114 @@
+/* Copyright (c) 2021, ARM Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_DEFAULT_ATOMIC_H_
+#define ODP_DEFAULT_ATOMIC_H_
+
+#include <odp_types_internal.h>
+
+#ifdef __SIZEOF_INT128__
+
+static inline _odp_u128_t lockfree_load_u128(_odp_u128_t *atomic)
+{
+ return __atomic_load_n(atomic, __ATOMIC_RELAXED);
+}
+
+static inline int lockfree_cas_acq_rel_u128(_odp_u128_t *atomic,
+ _odp_u128_t old_val,
+ _odp_u128_t new_val)
+{
+ return __atomic_compare_exchange_n(atomic, &old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+}
+
+static inline int lockfree_check_u128(void)
+{
+ return __atomic_is_lock_free(16, NULL);
+}
+
+#endif
+
+#include <limits.h>
+
+/** Atomic bit set operations with memory ordering */
+#if __GCC_ATOMIC_LLONG_LOCK_FREE == 2 && \
+ __SIZEOF_LONG_LONG__ != __SIZEOF_LONG__
+typedef unsigned long long bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_LONG_LONG__)
+
+#elif __GCC_ATOMIC_LONG_LOCK_FREE == 2 && __SIZEOF_LONG__ != __SIZEOF_INT__
+typedef unsigned long bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_LONG__)
+
+#elif __GCC_ATOMIC_INT_LOCK_FREE == 2
+typedef unsigned int bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_INT__)
+
+#else
+/* Target does not support lock-free atomic operations */
+typedef unsigned int bitset_t;
+#define ATOM_BITSET_SIZE (CHAR_BIT * __SIZEOF_INT__)
+#endif
+
+#if ATOM_BITSET_SIZE <= 32
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ return 1UL << bit;
+}
+
+#elif ATOM_BITSET_SIZE <= 64
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ return 1ULL << bit;
+}
+
+#elif ATOM_BITSET_SIZE <= 128
+
+static inline bitset_t bitset_mask(uint32_t bit)
+{
+ if (bit < 64)
+ return 1ULL << bit;
+ else
+ return (_odp_u128_t)(1ULL << (bit - 64)) << 64;
+}
+
+#else
+#error Unsupported size of bit sets (ATOM_BITSET_SIZE)
+#endif
+
+static inline bitset_t atom_bitset_load(bitset_t *bs, int mo)
+{
+ return __atomic_load_n(bs, mo);
+}
+
+static inline void atom_bitset_set(bitset_t *bs, uint32_t bit, int mo)
+{
+ (void)__atomic_fetch_or(bs, bitset_mask(bit), mo);
+}
+
+static inline void atom_bitset_clr(bitset_t *bs, uint32_t bit, int mo)
+{
+ (void)__atomic_fetch_and(bs, ~bitset_mask(bit), mo);
+}
+
+static inline bitset_t atom_bitset_xchg(bitset_t *bs, bitset_t neu, int mo)
+{
+ return __atomic_exchange_n(bs, neu, mo);
+}
+
+static inline bitset_t atom_bitset_cmpxchg(bitset_t *bs, bitset_t *old,
+ bitset_t neu, bool weak,
+ int mo_success, int mo_failure)
+{
+ return __atomic_compare_exchange_n(bs, old, neu, weak, mo_success,
+ mo_failure);
+}
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp_cpu.h b/platform/linux-generic/arch/default/odp_cpu.h
new file mode 100644
index 000000000..6b10966c6
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_cpu.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_DEFAULT_CPU_H_
+#define ODP_DEFAULT_CPU_H_
+
+#ifndef _ODP_UNALIGNED
+#define _ODP_UNALIGNED 0
+#endif
+
+/******************************************************************************
+ * Atomics
+ *****************************************************************************/
+
+#define atomic_store_release(loc, val, ro) \
+ __atomic_store_n(loc, val, __ATOMIC_RELEASE)
+
+#include "odp_atomic.h"
+#include "odp_wait_until.h"
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp_cpu_arch.c b/platform/linux-generic/arch/default/odp_cpu_arch.c
deleted file mode 100644
index 2ac223e07..000000000
--- a/platform/linux-generic/arch/default/odp_cpu_arch.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_posix_extensions.h>
-
-#include <stdlib.h>
-#include <time.h>
-
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-#include <odp/api/system_info.h>
-#include <odp_debug_internal.h>
-
-#define GIGA 1000000000
-
-uint64_t odp_cpu_cycles(void)
-{
- struct timespec time;
- uint64_t sec, ns, hz, cycles;
- int ret;
-
- ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
-
- if (ret != 0)
- ODP_ABORT("clock_gettime failed\n");
-
- hz = odp_cpu_hz_max();
- sec = (uint64_t)time.tv_sec;
- ns = (uint64_t)time.tv_nsec;
-
- cycles = sec * hz;
- cycles += (ns * hz) / GIGA;
-
- return cycles;
-}
-
-uint64_t odp_cpu_cycles_max(void)
-{
- return UINT64_MAX;
-}
-
-uint64_t odp_cpu_cycles_resolution(void)
-{
- return 1;
-}
diff --git a/platform/linux-generic/arch/arm/odp_cpu_arch.c b/platform/linux-generic/arch/default/odp_cpu_cycles.c
index 2ac223e07..41436a672 100644
--- a/platform/linux-generic/arch/arm/odp_cpu_arch.c
+++ b/platform/linux-generic/arch/default/odp_cpu_cycles.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,17 +7,19 @@
#include <odp_posix_extensions.h>
+#include <stdint.h>
#include <stdlib.h>
#include <time.h>
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-#include <odp/api/system_info.h>
#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
#define GIGA 1000000000
-uint64_t odp_cpu_cycles(void)
+#include <odp/api/abi/cpu_generic.h>
+
+uint64_t _odp_cpu_cycles(void)
{
struct timespec time;
uint64_t sec, ns, hz, cycles;
@@ -25,9 +28,10 @@ uint64_t odp_cpu_cycles(void)
ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
if (ret != 0)
- ODP_ABORT("clock_gettime failed\n");
+ _ODP_ABORT("clock_gettime failed\n");
+
+ hz = odp_global_ro.system_info.cpu_hz_max[0];
- hz = odp_cpu_hz_max();
sec = (uint64_t)time.tv_sec;
ns = (uint64_t)time.tv_nsec;
@@ -37,12 +41,7 @@ uint64_t odp_cpu_cycles(void)
return cycles;
}
-uint64_t odp_cpu_cycles_max(void)
-{
- return UINT64_MAX;
-}
-
-uint64_t odp_cpu_cycles_resolution(void)
+int _odp_cpu_cycles_init_global(void)
{
- return 1;
+ return 0;
}
diff --git a/platform/linux-generic/arch/default/odp_hash_crc32.c b/platform/linux-generic/arch/default/odp_hash_crc32.c
new file mode 100644
index 000000000..f71c11909
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_hash_crc32.c
@@ -0,0 +1,496 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/std_types.h>
+
+#include <odp/api/abi/hash_crc32.h>
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Table generated with odp_hash_crc_gen64() */
+static const uint32_t crc32_table[256] ODP_ALIGNED_CACHE = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba,
+ 0x076dc419, 0x706af48f, 0xe963a535, 0x9e6495a3,
+ 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91,
+ 0x1db71064, 0x6ab020f2, 0xf3b97148, 0x84be41de,
+ 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec,
+ 0x14015c4f, 0x63066cd9, 0xfa0f3d63, 0x8d080df5,
+ 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b,
+ 0x35b5a8fa, 0x42b2986c, 0xdbbbc9d6, 0xacbcf940,
+ 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116,
+ 0x21b4f4b5, 0x56b3c423, 0xcfba9599, 0xb8bda50f,
+ 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d,
+ 0x76dc4190, 0x01db7106, 0x98d220bc, 0xefd5102a,
+ 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818,
+ 0x7f6a0dbb, 0x086d3d2d, 0x91646c97, 0xe6635c01,
+ 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457,
+ 0x65b0d9c6, 0x12b7e950, 0x8bbeb8ea, 0xfcb9887c,
+ 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2,
+ 0x4adfa541, 0x3dd895d7, 0xa4d1c46d, 0xd3d6f4fb,
+ 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9,
+ 0x5005713c, 0x270241aa, 0xbe0b1010, 0xc90c2086,
+ 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4,
+ 0x59b33d17, 0x2eb40d81, 0xb7bd5c3b, 0xc0ba6cad,
+ 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683,
+ 0xe3630b12, 0x94643b84, 0x0d6d6a3e, 0x7a6a5aa8,
+ 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe,
+ 0xf762575d, 0x806567cb, 0x196c3671, 0x6e6b06e7,
+ 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5,
+ 0xd6d6a3e8, 0xa1d1937e, 0x38d8c2c4, 0x4fdff252,
+ 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60,
+ 0xdf60efc3, 0xa867df55, 0x316e8eef, 0x4669be79,
+ 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f,
+ 0xc5ba3bbe, 0xb2bd0b28, 0x2bb45a92, 0x5cb36a04,
+ 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a,
+ 0x9c0906a9, 0xeb0e363f, 0x72076785, 0x05005713,
+ 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21,
+ 0x86d3d2d4, 0xf1d4e242, 0x68ddb3f8, 0x1fda836e,
+ 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c,
+ 0x8f659eff, 0xf862ae69, 0x616bffd3, 0x166ccf45,
+ 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db,
+ 0xaed16a4a, 0xd9d65adc, 0x40df0b66, 0x37d83bf0,
+ 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6,
+ 0xbad03605, 0xcdd70693, 0x54de5729, 0x23d967bf,
+ 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+static const uint32_t crc32c_tables[8][256] = {{
+ 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
+ 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
+ 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
+ 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
+ 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
+ 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
+ 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
+ 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
+ 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
+ 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
+ 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
+ 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
+ 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
+ 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
+ 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
+ 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
+ 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
+ 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
+ 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
+ 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
+ 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
+ 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
+ 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
+ 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
+ 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
+ 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
+ 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
+ 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
+ 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
+ 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
+ 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
+ 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
+},
+{
+ 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
+ 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
+ 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
+ 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
+ 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
+ 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
+ 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
+ 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
+ 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
+ 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
+ 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
+ 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
+ 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
+ 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
+ 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
+ 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
+ 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
+ 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
+ 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
+ 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
+ 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
+ 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
+ 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
+ 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
+ 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
+ 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
+ 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
+ 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
+ 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
+ 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
+ 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
+ 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
+},
+{
+ 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
+ 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
+ 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
+ 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
+ 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
+ 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
+ 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
+ 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
+ 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
+ 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
+ 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
+ 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
+ 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
+ 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
+ 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
+ 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
+ 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
+ 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
+ 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
+ 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
+ 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
+ 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
+ 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
+ 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
+ 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
+ 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
+ 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
+ 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
+ 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
+ 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
+ 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
+ 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
+},
+{
+ 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
+ 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
+ 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
+ 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
+ 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
+ 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
+ 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
+ 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
+ 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
+ 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
+ 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
+ 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
+ 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
+ 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
+ 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
+ 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
+ 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
+ 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
+ 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
+ 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
+ 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
+ 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
+ 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
+ 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
+ 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
+ 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
+ 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
+ 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
+ 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
+ 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
+ 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
+ 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
+},
+{
+ 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
+ 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
+ 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
+ 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
+ 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
+ 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
+ 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
+ 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
+ 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
+ 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
+ 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
+ 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
+ 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
+ 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
+ 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
+ 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
+ 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
+ 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
+ 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
+ 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
+ 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
+ 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
+ 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
+ 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
+ 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
+ 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
+ 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
+ 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
+ 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
+ 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
+ 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
+ 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
+},
+{
+ 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
+ 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
+ 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
+ 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
+ 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
+ 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
+ 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
+ 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
+ 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
+ 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
+ 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
+ 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
+ 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
+ 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
+ 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
+ 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
+ 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
+ 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
+ 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
+ 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
+ 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
+ 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
+ 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
+ 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
+ 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
+ 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
+ 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
+ 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
+ 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
+ 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
+ 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
+ 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
+},
+{
+ 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
+ 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
+ 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
+ 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
+ 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
+ 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
+ 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
+ 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
+ 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
+ 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
+ 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
+ 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
+ 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
+ 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
+ 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
+ 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
+ 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
+ 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
+ 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
+ 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
+ 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
+ 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
+ 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
+ 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
+ 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
+ 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
+ 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
+ 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
+ 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
+ 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
+ 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
+ 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
+},
+{
+ 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
+ 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
+ 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
+ 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
+ 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
+ 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
+ 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
+ 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
+ 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
+ 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
+ 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
+ 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
+ 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
+ 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
+ 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
+ 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
+ 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
+ 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
+ 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
+ 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
+ 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
+ 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
+ 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
+ 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
+ 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
+ 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
+ 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
+ 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
+ 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
+ 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
+ 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
+ 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
+} };
+
+#define CRC32_UPD(crc, n) \
+ (crc32c_tables[(n)][(crc) & 0xff] ^ \
+ crc32c_tables[(n) - 1][((crc) >> 8) & 0xff])
+
+static inline uint32_t crc32c_u8(uint8_t data, uint32_t init_val)
+{
+ uint32_t crc;
+
+ crc = init_val;
+ crc ^= data;
+
+ return crc32c_tables[0][crc & 0xff] ^ (crc >> 8);
+}
+
+static inline uint32_t crc32c_u16(uint16_t data, uint32_t init_val)
+{
+ uint32_t crc;
+
+ crc = init_val;
+ crc ^= data;
+
+ crc = CRC32_UPD(crc, 1) ^ (crc >> 16);
+
+ return crc;
+}
+
+static inline uint32_t crc32c_u32(uint32_t data, uint32_t init_val)
+{
+ uint32_t crc, term1, term2;
+
+ crc = init_val;
+ crc ^= data;
+
+ term1 = CRC32_UPD(crc, 3);
+ term2 = crc >> 16;
+ crc = term1 ^ CRC32_UPD(term2, 1);
+
+ return crc;
+}
+
+static inline uint32_t crc32c_u64(uint64_t data, uint32_t init_val)
+{
+ uint32_t crc, term1, term2;
+
+ union {
+ uint64_t u64;
+ uint32_t u32[2];
+ } d;
+
+ d.u64 = data;
+
+ crc = init_val;
+ crc ^= d.u32[0];
+
+ term1 = CRC32_UPD(crc, 7);
+ term2 = crc >> 16;
+ crc = term1 ^ CRC32_UPD(term2, 5);
+ term1 = CRC32_UPD(d.u32[1], 3);
+ term2 = d.u32[1] >> 16;
+ crc ^= term1 ^ CRC32_UPD(term2, 1);
+
+ return crc;
+}
+
+#include <odp/visibility_begin.h>
+
+uint32_t _odp_hash_crc32_generic(const void *data_ptr, uint32_t data_len,
+ uint32_t init_val)
+{
+ uint32_t i, crc;
+ const uint8_t *byte = data_ptr;
+
+ crc = init_val;
+
+ for (i = 0; i < data_len; i++)
+ crc = crc32_table[(crc ^ byte[i]) & 0xff] ^ (crc >> 8);
+
+ return crc;
+}
+
+uint32_t _odp_hash_crc32c_generic(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ uint32_t i;
+ uintptr_t pd = (uintptr_t)data;
+
+ for (i = 0; i < data_len / 8; i++) {
+ init_val = crc32c_u64(*(const uint64_t *)pd, init_val);
+ pd += 8;
+ }
+
+ if (data_len & 0x4) {
+ init_val = crc32c_u32(*(const uint32_t *)pd, init_val);
+ pd += 4;
+ }
+
+ if (data_len & 0x2) {
+ init_val = crc32c_u16(*(const uint16_t *)pd, init_val);
+ pd += 2;
+ }
+
+ if (data_len & 0x1)
+ init_val = crc32c_u8(*(const uint8_t *)pd, init_val);
+
+ return init_val;
+}
+
+#include <odp/visibility_end.h>
diff --git a/platform/linux-generic/arch/default/odp_random.c b/platform/linux-generic/arch/default/odp_random.c
new file mode 100644
index 000000000..18d2a45d2
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_random.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_random.h>
+#include <odp/api/spec/random.h>
+
+#include <odp/visibility_begin.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void)
+{
+ return ODP_RANDOM_BASIC;
+}
+
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len)
+{
+ (void)buf;
+ (void)len;
+
+ return -1;
+}
+
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len)
+{
+ (void)buf;
+ (void)len;
+
+ return -1;
+}
+
+#include <odp/visibility_end.h>
diff --git a/platform/linux-generic/arch/default/odp_random.h b/platform/linux-generic/arch/default/odp_random.h
new file mode 100644
index 000000000..215eb6d93
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_random.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_DEFAULT_RANDOM_H_
+#define ODP_DEFAULT_RANDOM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/random.h>
+
+#include <stdint.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void);
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len);
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len);
+
+static inline odp_random_kind_t _odp_random_max_kind(void)
+{
+ return _odp_random_max_kind_generic();
+}
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_true_data_generic(buf, len);
+}
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_crypto_data_generic(buf, len);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp_sysinfo_parse.c b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
index 53e2aaeaf..11d33d576 100644
--- a/platform/linux-generic/arch/default/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
@@ -1,27 +1,22 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_internal.h>
-#include <odp_debug_internal.h>
-#include <string.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
-int cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
{
- int i;
-
- ODP_DBG("Warning: use dummy values for freq and model string\n");
- for (i = 0; i < MAX_CPU_NUMBER; i++) {
- sysinfo->cpu_hz_max[i] = 1400000000;
- strcpy(sysinfo->model_str[i], "UNKNOWN");
- }
+ return _odp_dummy_cpuinfo(sysinfo);
+}
- return 0;
+void _odp_sys_info_print_arch(void)
+{
}
-uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+uint64_t odp_cpu_arch_hz_current(int id ODP_UNUSED)
{
- return 0;
+ return odp_global_ro.system_info.default_cpu_hz;
}
diff --git a/platform/linux-generic/arch/default/odp_time.c b/platform/linux-generic/arch/default/odp_time.c
new file mode 100644
index 000000000..664a5deae
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_time.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
+
+#include <odp/api/abi/time_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+#include <time.h>
+
+#define YEAR_IN_SEC (365 * 24 * 3600)
+
+typedef struct _odp_time_global_t {
+ struct timespec start_time;
+ uint64_t start_time_ns;
+
+} _odp_time_global_t;
+
+_odp_time_global_t _odp_time_glob;
+
+static inline uint64_t time_nsec(struct timespec *t)
+{
+ uint64_t nsec = (t->tv_sec * ODP_TIME_SEC_IN_NS) + t->tv_nsec;
+
+ return nsec;
+}
+
+#include <odp/visibility_begin.h>
+
+odp_time_t _odp_time_cur(void)
+{
+ int ret;
+ odp_time_t time;
+ struct timespec sys_time;
+
+ ret = clock_gettime(CLOCK_MONOTONIC_RAW, &sys_time);
+ if (odp_unlikely(ret != 0))
+ _ODP_ABORT("clock_gettime() failed\n");
+
+ time.nsec = time_nsec(&sys_time);
+
+ return time;
+}
+
+uint64_t _odp_time_res(void)
+{
+ int ret;
+ struct timespec tres;
+
+ ret = clock_getres(CLOCK_MONOTONIC_RAW, &tres);
+ if (odp_unlikely(ret != 0))
+ _ODP_ABORT("clock_getres() failed\n");
+
+ return ODP_TIME_SEC_IN_NS / (uint64_t)tres.tv_nsec;
+}
+
+void _odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.nsec = _odp_time_glob.start_time_ns;
+ startup->global_ns = _odp_time_glob.start_time_ns;
+}
+
+#include <odp/visibility_end.h>
+
+int _odp_time_init_global(void)
+{
+ struct timespec *start_time;
+ uint64_t diff, years;
+ int ret = 0;
+ _odp_time_global_t *global = &_odp_time_glob;
+
+ memset(global, 0, sizeof(_odp_time_global_t));
+
+ start_time = &global->start_time;
+ start_time->tv_sec = 0;
+ start_time->tv_nsec = 0;
+
+ ret = clock_gettime(CLOCK_MONOTONIC_RAW, start_time);
+ if (ret)
+ _ODP_ERR("clock_gettime() failed: %d\n", ret);
+
+ global->start_time_ns = time_nsec(start_time);
+
+ diff = UINT64_MAX - global->start_time_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ global->start_time_ns);
+ return -1;
+ }
+
+ return ret;
+}
+
+int _odp_time_term_global(void)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/default/odp_wait_until.h b/platform/linux-generic/arch/default/odp_wait_until.h
new file mode 100644
index 000000000..c51f4355e
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp_wait_until.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2024 Nokia
+ */
+
+#ifndef ODP_DEFAULT_WAIT_UNTIL_H_
+#define ODP_DEFAULT_WAIT_UNTIL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/cpu_inlines.h>
+
+#include <stdint.h>
+
+/**
+ * The _odp_wait_until_eq_*() functions defined in this header are intended to
+ * be used only with the scalable scheduler and queue implementations. Even
+ * though these functions use standard non-atomic parameter types, the
+ * parameters must only be operated using atomic operations. If new functions
+ * are added to this file, they should use _odp_wait_until_equal_*() prefix and
+ * atomic parameter types.
+ */
+
+static inline void _odp_wait_until_eq_u32(uint32_t *val, uint32_t expected)
+{
+ while (__atomic_load_n(val, __ATOMIC_RELAXED) != expected)
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_bitset(bitset_t *val, bitset_t expected)
+{
+ while (__atomic_load_n(val, __ATOMIC_RELAXED) != expected)
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_acq_u8(uint8_t *val, uint8_t expected)
+{
+ while (__atomic_load_n(val, __ATOMIC_ACQUIRE) != expected)
+ odp_cpu_pause();
+}
+
+static inline void _odp_wait_until_eq_acq_u32(uint32_t *val, uint32_t expected)
+{
+ while (__atomic_load_n(val, __ATOMIC_ACQUIRE) != expected)
+ odp_cpu_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/mips64/odp/api/cpu_arch.h b/platform/linux-generic/arch/mips64/odp/api/cpu_arch.h
deleted file mode 100644
index 3582b129b..000000000
--- a/platform/linux-generic/arch/mips64/odp/api/cpu_arch.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_CPU_ARCH_H_
-#define ODP_PLAT_CPU_ARCH_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#if defined __OCTEON__
-#define _ODP_CACHE_LINE_SIZE 128
-#else
-#error Please add support for your arch in cpu_arch.h
-#endif
-
-static inline void odp_cpu_pause(void)
-{
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/arch/mips64/odp_cpu_arch.c b/platform/linux-generic/arch/mips64/odp_cpu_arch.c
deleted file mode 100644
index 646acf9c1..000000000
--- a/platform/linux-generic/arch/mips64/odp_cpu_arch.c
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-#include <odp/api/system_info.h>
-
-uint64_t odp_cpu_cycles(void)
-{
- #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
- #define CVMX_TMP_STR2(x) #x
- uint64_t cycle;
-
- __asm__ __volatile__ ("rdhwr %[rt],$" CVMX_TMP_STR(31) :
- [rt] "=d" (cycle) : : "memory");
-
- return cycle;
-}
-
-uint64_t odp_cpu_cycles_max(void)
-{
- return UINT64_MAX;
-}
-
-uint64_t odp_cpu_cycles_resolution(void)
-{
- return 1;
-}
diff --git a/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c b/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
deleted file mode 100644
index 407264b7f..000000000
--- a/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_internal.h>
-#include <string.h>
-
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
-{
- char str[1024];
- char *pos;
- double mhz = 0.0;
- uint64_t hz;
- int model = 0;
- int count = 2;
- int id = 0;
-
- strcpy(sysinfo->cpu_arch_str, "mips64");
- while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
- if (!mhz) {
- pos = strstr(str, "BogoMIPS");
-
- if (pos)
- if (sscanf(pos, "BogoMIPS : %lf", &mhz) == 1) {
- /* bogomips seems to be 2x freq */
- hz = (uint64_t)(mhz * 1000000.0 / 2.0);
- sysinfo->cpu_hz_max[id] = hz;
- count--;
- }
- }
-
- if (!model) {
- pos = strstr(str, "cpu model");
-
- if (pos) {
- int len;
-
- pos = strchr(str, ':');
- strncpy(sysinfo->model_str[id], pos + 2,
- sizeof(sysinfo->model_str[id]) - 1);
- len = strlen(sysinfo->model_str[id]);
- sysinfo->model_str[id][len - 1] = 0;
- model = 1;
- count--;
- }
- }
-
- if (count == 0) {
- mhz = 0.0;
- model = 0;
- count = 2;
- id++;
- }
- }
-
- return 0;
-}
-
-uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
-{
- return 0;
-}
diff --git a/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h b/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h
new file mode 100644
index 000000000..ecf56e82e
--- /dev/null
+++ b/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ODP_CACHE_LINE_SIZE 128
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
diff --git a/platform/linux-generic/arch/powerpc/odp/api/cpu_arch.h b/platform/linux-generic/arch/powerpc/odp/api/cpu_arch.h
deleted file mode 100644
index 22b1da2dd..000000000
--- a/platform/linux-generic/arch/powerpc/odp/api/cpu_arch.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_CPU_ARCH_H_
-#define ODP_PLAT_CPU_ARCH_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define _ODP_CACHE_LINE_SIZE 64
-
-static inline void odp_cpu_pause(void)
-{
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/arch/powerpc/odp_cpu_arch.c b/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
deleted file mode 100644
index 2ac223e07..000000000
--- a/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_posix_extensions.h>
-
-#include <stdlib.h>
-#include <time.h>
-
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-#include <odp/api/system_info.h>
-#include <odp_debug_internal.h>
-
-#define GIGA 1000000000
-
-uint64_t odp_cpu_cycles(void)
-{
- struct timespec time;
- uint64_t sec, ns, hz, cycles;
- int ret;
-
- ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
-
- if (ret != 0)
- ODP_ABORT("clock_gettime failed\n");
-
- hz = odp_cpu_hz_max();
- sec = (uint64_t)time.tv_sec;
- ns = (uint64_t)time.tv_nsec;
-
- cycles = sec * hz;
- cycles += (ns * hz) / GIGA;
-
- return cycles;
-}
-
-uint64_t odp_cpu_cycles_max(void)
-{
- return UINT64_MAX;
-}
-
-uint64_t odp_cpu_cycles_resolution(void)
-{
- return 1;
-}
diff --git a/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
index 3b88d55b6..2049cc42f 100644
--- a/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
@@ -1,13 +1,14 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_internal.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
char *pos;
@@ -17,8 +18,12 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
int count = 2;
int id = 0;
+ sysinfo->cpu_arch = ODP_CPU_ARCH_PPC;
+ sysinfo->cpu_isa_sw.ppc = ODP_CPU_ARCH_PPC_UNKNOWN;
+ sysinfo->cpu_isa_hw.ppc = ODP_CPU_ARCH_PPC_UNKNOWN;
+
strcpy(sysinfo->cpu_arch_str, "powerpc");
- while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
+ while (fgets(str, sizeof(str), file) != NULL && id < CONFIG_NUM_CPU_IDS) {
if (!mhz) {
pos = strstr(str, "clock");
@@ -38,7 +43,7 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
pos = strchr(str, ':');
strncpy(sysinfo->model_str[id], pos + 2,
- sizeof(sysinfo->model_str[id]) - 1);
+ MODEL_STR_SIZE - 1);
len = strlen(sysinfo->model_str[id]);
sysinfo->model_str[id][len - 1] = 0;
model = 1;
@@ -57,7 +62,11 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
return 0;
}
-uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+void _odp_sys_info_print_arch(void)
{
- return 0;
+}
+
+uint64_t odp_cpu_arch_hz_current(int id ODP_UNUSED)
+{
+ return odp_global_ro.system_info.default_cpu_hz;
}
diff --git a/platform/linux-generic/arch/x86/cpu_flags.c b/platform/linux-generic/arch/x86/cpu_flags.c
new file mode 100644
index 000000000..9211df002
--- /dev/null
+++ b/platform/linux-generic/arch/x86/cpu_flags.c
@@ -0,0 +1,378 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
+ */
+
+#include "cpu_flags.h"
+
+#include <odp/api/abi/time_cpu.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+
+#include <cpuid.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+
+enum rte_cpu_flag_t {
+ /* (EAX 01h) ECX features*/
+ RTE_CPUFLAG_SSE3 = 0, /**< SSE3 */
+ RTE_CPUFLAG_PCLMULQDQ, /**< PCLMULQDQ */
+ RTE_CPUFLAG_DTES64, /**< DTES64 */
+ RTE_CPUFLAG_MONITOR, /**< MONITOR */
+ RTE_CPUFLAG_DS_CPL, /**< DS_CPL */
+ RTE_CPUFLAG_VMX, /**< VMX */
+ RTE_CPUFLAG_SMX, /**< SMX */
+ RTE_CPUFLAG_EIST, /**< EIST */
+ RTE_CPUFLAG_TM2, /**< TM2 */
+ RTE_CPUFLAG_SSSE3, /**< SSSE3 */
+ RTE_CPUFLAG_CNXT_ID, /**< CNXT_ID */
+ RTE_CPUFLAG_FMA, /**< FMA */
+ RTE_CPUFLAG_CMPXCHG16B, /**< CMPXCHG16B */
+ RTE_CPUFLAG_XTPR, /**< XTPR */
+ RTE_CPUFLAG_PDCM, /**< PDCM */
+ RTE_CPUFLAG_PCID, /**< PCID */
+ RTE_CPUFLAG_DCA, /**< DCA */
+ RTE_CPUFLAG_SSE4_1, /**< SSE4_1 */
+ RTE_CPUFLAG_SSE4_2, /**< SSE4_2 */
+ RTE_CPUFLAG_X2APIC, /**< X2APIC */
+ RTE_CPUFLAG_MOVBE, /**< MOVBE */
+ RTE_CPUFLAG_POPCNT, /**< POPCNT */
+ RTE_CPUFLAG_TSC_DEADLINE, /**< TSC_DEADLINE */
+ RTE_CPUFLAG_AES, /**< AES */
+ RTE_CPUFLAG_XSAVE, /**< XSAVE */
+ RTE_CPUFLAG_OSXSAVE, /**< OSXSAVE */
+ RTE_CPUFLAG_AVX, /**< AVX */
+ RTE_CPUFLAG_F16C, /**< F16C */
+ RTE_CPUFLAG_RDRAND, /**< RDRAND */
+ RTE_CPUFLAG_HYPERVISOR, /**< Running in a VM */
+
+ /* (EAX 01h) EDX features */
+ RTE_CPUFLAG_FPU, /**< FPU */
+ RTE_CPUFLAG_VME, /**< VME */
+ RTE_CPUFLAG_DE, /**< DE */
+ RTE_CPUFLAG_PSE, /**< PSE */
+ RTE_CPUFLAG_TSC, /**< TSC */
+ RTE_CPUFLAG_MSR, /**< MSR */
+ RTE_CPUFLAG_PAE, /**< PAE */
+ RTE_CPUFLAG_MCE, /**< MCE */
+ RTE_CPUFLAG_CX8, /**< CX8 */
+ RTE_CPUFLAG_APIC, /**< APIC */
+ RTE_CPUFLAG_SEP, /**< SEP */
+ RTE_CPUFLAG_MTRR, /**< MTRR */
+ RTE_CPUFLAG_PGE, /**< PGE */
+ RTE_CPUFLAG_MCA, /**< MCA */
+ RTE_CPUFLAG_CMOV, /**< CMOV */
+ RTE_CPUFLAG_PAT, /**< PAT */
+ RTE_CPUFLAG_PSE36, /**< PSE36 */
+ RTE_CPUFLAG_PSN, /**< PSN */
+ RTE_CPUFLAG_CLFSH, /**< CLFSH */
+ RTE_CPUFLAG_DS, /**< DS */
+ RTE_CPUFLAG_ACPI, /**< ACPI */
+ RTE_CPUFLAG_MMX, /**< MMX */
+ RTE_CPUFLAG_FXSR, /**< FXSR */
+ RTE_CPUFLAG_SSE, /**< SSE */
+ RTE_CPUFLAG_SSE2, /**< SSE2 */
+ RTE_CPUFLAG_SS, /**< SS */
+ RTE_CPUFLAG_HTT, /**< HTT */
+ RTE_CPUFLAG_TM, /**< TM */
+ RTE_CPUFLAG_PBE, /**< PBE */
+
+ /* (EAX 06h) EAX features */
+ RTE_CPUFLAG_DIGTEMP, /**< DIGTEMP */
+ RTE_CPUFLAG_TRBOBST, /**< TRBOBST */
+ RTE_CPUFLAG_ARAT, /**< ARAT */
+ RTE_CPUFLAG_PLN, /**< PLN */
+ RTE_CPUFLAG_ECMD, /**< ECMD */
+ RTE_CPUFLAG_PTM, /**< PTM */
+
+ /* (EAX 06h) ECX features */
+ RTE_CPUFLAG_MPERF_APERF_MSR, /**< MPERF_APERF_MSR */
+ RTE_CPUFLAG_ACNT2, /**< ACNT2 */
+ RTE_CPUFLAG_ENERGY_EFF, /**< ENERGY_EFF */
+
+ /* (EAX 07h, ECX 0h) EBX features */
+ RTE_CPUFLAG_FSGSBASE, /**< FSGSBASE */
+ RTE_CPUFLAG_BMI1, /**< BMI1 */
+ RTE_CPUFLAG_HLE, /**< Hardware Lock elision */
+ RTE_CPUFLAG_AVX2, /**< AVX2 */
+ RTE_CPUFLAG_SMEP, /**< SMEP */
+ RTE_CPUFLAG_BMI2, /**< BMI2 */
+ RTE_CPUFLAG_ERMS, /**< ERMS */
+ RTE_CPUFLAG_INVPCID, /**< INVPCID */
+ RTE_CPUFLAG_RTM, /**< Transactional memory */
+ RTE_CPUFLAG_AVX512F, /**< AVX512F */
+ RTE_CPUFLAG_RDSEED, /**< RDSEED instruction */
+
+ /* (EAX 80000001h) ECX features */
+ RTE_CPUFLAG_LAHF_SAHF, /**< LAHF_SAHF */
+ RTE_CPUFLAG_LZCNT, /**< LZCNT */
+
+ /* (EAX 80000001h) EDX features */
+ RTE_CPUFLAG_SYSCALL, /**< SYSCALL */
+ RTE_CPUFLAG_XD, /**< XD */
+ RTE_CPUFLAG_1GB_PG, /**< 1GB_PG */
+ RTE_CPUFLAG_RDTSCP, /**< RDTSCP */
+ RTE_CPUFLAG_EM64T, /**< EM64T */
+
+ /* (EAX 80000007h) EDX features */
+ RTE_CPUFLAG_INVTSC, /**< INVTSC */
+
+ RTE_CPUFLAG_AVX512DQ, /**< AVX512 Doubleword and Quadword */
+ RTE_CPUFLAG_AVX512IFMA, /**< AVX512 Integer Fused Multiply-Add */
+ RTE_CPUFLAG_AVX512CD, /**< AVX512 Conflict Detection*/
+ RTE_CPUFLAG_AVX512BW, /**< AVX512 Byte and Word */
+ RTE_CPUFLAG_AVX512VL, /**< AVX512 Vector Length */
+ RTE_CPUFLAG_AVX512VBMI, /**< AVX512 Vector Bit Manipulation */
+ RTE_CPUFLAG_AVX512VBMI2, /**< AVX512 Vector Bit Manipulation 2 */
+ RTE_CPUFLAG_GFNI, /**< Galois Field New Instructions */
+ RTE_CPUFLAG_VAES, /**< Vector AES */
+ RTE_CPUFLAG_VPCLMULQDQ, /**< Vector Carry-less Multiply */
+ RTE_CPUFLAG_AVX512VNNI,
+ /**< AVX512 Vector Neural Network Instructions */
+ RTE_CPUFLAG_AVX512BITALG, /**< AVX512 Bit Algorithms */
+ RTE_CPUFLAG_AVX512VPOPCNTDQ, /**< AVX512 Vector Popcount */
+ RTE_CPUFLAG_CLDEMOTE, /**< Cache Line Demote */
+ RTE_CPUFLAG_MOVDIRI, /**< Direct Store Instructions */
+ RTE_CPUFLAG_MOVDIR64B, /**< Direct Store Instructions 64B */
+ RTE_CPUFLAG_AVX512VP2INTERSECT, /**< AVX512 Two Register Intersection */
+
+ RTE_CPUFLAG_WAITPKG, /**< UMONITOR/UMWAIT/TPAUSE */
+
+ /* The last item */
+ RTE_CPUFLAG_NUMFLAGS, /**< This should always be the last! */
+};
+
+enum cpu_register_t {
+ RTE_REG_EAX = 0,
+ RTE_REG_EBX,
+ RTE_REG_ECX,
+ RTE_REG_EDX,
+};
+
+typedef uint32_t cpuid_registers_t[4];
+
+/**
+ * Struct to hold a processor feature entry
+ */
+struct feature_entry {
+ uint32_t leaf; /**< cpuid leaf */
+ uint32_t subleaf; /**< cpuid subleaf */
+ uint32_t reg; /**< cpuid register */
+ uint32_t bit; /**< cpuid register bit */
+#define CPU_FLAG_NAME_MAX_LEN 64
+ char name[CPU_FLAG_NAME_MAX_LEN]; /**< String for printing */
+};
+
+#define FEAT_DEF(name, leaf, subleaf, reg, bit) \
+ [RTE_CPUFLAG_##name] = {leaf, subleaf, reg, bit, #name },
+
+static const struct feature_entry cpu_feature_table[] = {
+ FEAT_DEF(SSE3, 0x00000001, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(PCLMULQDQ, 0x00000001, 0, RTE_REG_ECX, 1)
+ FEAT_DEF(DTES64, 0x00000001, 0, RTE_REG_ECX, 2)
+ FEAT_DEF(MONITOR, 0x00000001, 0, RTE_REG_ECX, 3)
+ FEAT_DEF(DS_CPL, 0x00000001, 0, RTE_REG_ECX, 4)
+ FEAT_DEF(VMX, 0x00000001, 0, RTE_REG_ECX, 5)
+ FEAT_DEF(SMX, 0x00000001, 0, RTE_REG_ECX, 6)
+ FEAT_DEF(EIST, 0x00000001, 0, RTE_REG_ECX, 7)
+ FEAT_DEF(TM2, 0x00000001, 0, RTE_REG_ECX, 8)
+ FEAT_DEF(SSSE3, 0x00000001, 0, RTE_REG_ECX, 9)
+ FEAT_DEF(CNXT_ID, 0x00000001, 0, RTE_REG_ECX, 10)
+ FEAT_DEF(FMA, 0x00000001, 0, RTE_REG_ECX, 12)
+ FEAT_DEF(CMPXCHG16B, 0x00000001, 0, RTE_REG_ECX, 13)
+ FEAT_DEF(XTPR, 0x00000001, 0, RTE_REG_ECX, 14)
+ FEAT_DEF(PDCM, 0x00000001, 0, RTE_REG_ECX, 15)
+ FEAT_DEF(PCID, 0x00000001, 0, RTE_REG_ECX, 17)
+ FEAT_DEF(DCA, 0x00000001, 0, RTE_REG_ECX, 18)
+ FEAT_DEF(SSE4_1, 0x00000001, 0, RTE_REG_ECX, 19)
+ FEAT_DEF(SSE4_2, 0x00000001, 0, RTE_REG_ECX, 20)
+ FEAT_DEF(X2APIC, 0x00000001, 0, RTE_REG_ECX, 21)
+ FEAT_DEF(MOVBE, 0x00000001, 0, RTE_REG_ECX, 22)
+ FEAT_DEF(POPCNT, 0x00000001, 0, RTE_REG_ECX, 23)
+ FEAT_DEF(TSC_DEADLINE, 0x00000001, 0, RTE_REG_ECX, 24)
+ FEAT_DEF(AES, 0x00000001, 0, RTE_REG_ECX, 25)
+ FEAT_DEF(XSAVE, 0x00000001, 0, RTE_REG_ECX, 26)
+ FEAT_DEF(OSXSAVE, 0x00000001, 0, RTE_REG_ECX, 27)
+ FEAT_DEF(AVX, 0x00000001, 0, RTE_REG_ECX, 28)
+ FEAT_DEF(F16C, 0x00000001, 0, RTE_REG_ECX, 29)
+ FEAT_DEF(RDRAND, 0x00000001, 0, RTE_REG_ECX, 30)
+ FEAT_DEF(HYPERVISOR, 0x00000001, 0, RTE_REG_ECX, 31)
+
+ FEAT_DEF(FPU, 0x00000001, 0, RTE_REG_EDX, 0)
+ FEAT_DEF(VME, 0x00000001, 0, RTE_REG_EDX, 1)
+ FEAT_DEF(DE, 0x00000001, 0, RTE_REG_EDX, 2)
+ FEAT_DEF(PSE, 0x00000001, 0, RTE_REG_EDX, 3)
+ FEAT_DEF(TSC, 0x00000001, 0, RTE_REG_EDX, 4)
+ FEAT_DEF(MSR, 0x00000001, 0, RTE_REG_EDX, 5)
+ FEAT_DEF(PAE, 0x00000001, 0, RTE_REG_EDX, 6)
+ FEAT_DEF(MCE, 0x00000001, 0, RTE_REG_EDX, 7)
+ FEAT_DEF(CX8, 0x00000001, 0, RTE_REG_EDX, 8)
+ FEAT_DEF(APIC, 0x00000001, 0, RTE_REG_EDX, 9)
+ FEAT_DEF(SEP, 0x00000001, 0, RTE_REG_EDX, 11)
+ FEAT_DEF(MTRR, 0x00000001, 0, RTE_REG_EDX, 12)
+ FEAT_DEF(PGE, 0x00000001, 0, RTE_REG_EDX, 13)
+ FEAT_DEF(MCA, 0x00000001, 0, RTE_REG_EDX, 14)
+ FEAT_DEF(CMOV, 0x00000001, 0, RTE_REG_EDX, 15)
+ FEAT_DEF(PAT, 0x00000001, 0, RTE_REG_EDX, 16)
+ FEAT_DEF(PSE36, 0x00000001, 0, RTE_REG_EDX, 17)
+ FEAT_DEF(PSN, 0x00000001, 0, RTE_REG_EDX, 18)
+ FEAT_DEF(CLFSH, 0x00000001, 0, RTE_REG_EDX, 19)
+ FEAT_DEF(DS, 0x00000001, 0, RTE_REG_EDX, 21)
+ FEAT_DEF(ACPI, 0x00000001, 0, RTE_REG_EDX, 22)
+ FEAT_DEF(MMX, 0x00000001, 0, RTE_REG_EDX, 23)
+ FEAT_DEF(FXSR, 0x00000001, 0, RTE_REG_EDX, 24)
+ FEAT_DEF(SSE, 0x00000001, 0, RTE_REG_EDX, 25)
+ FEAT_DEF(SSE2, 0x00000001, 0, RTE_REG_EDX, 26)
+ FEAT_DEF(SS, 0x00000001, 0, RTE_REG_EDX, 27)
+ FEAT_DEF(HTT, 0x00000001, 0, RTE_REG_EDX, 28)
+ FEAT_DEF(TM, 0x00000001, 0, RTE_REG_EDX, 29)
+ FEAT_DEF(PBE, 0x00000001, 0, RTE_REG_EDX, 31)
+
+ FEAT_DEF(DIGTEMP, 0x00000006, 0, RTE_REG_EAX, 0)
+ FEAT_DEF(TRBOBST, 0x00000006, 0, RTE_REG_EAX, 1)
+ FEAT_DEF(ARAT, 0x00000006, 0, RTE_REG_EAX, 2)
+ FEAT_DEF(PLN, 0x00000006, 0, RTE_REG_EAX, 4)
+ FEAT_DEF(ECMD, 0x00000006, 0, RTE_REG_EAX, 5)
+ FEAT_DEF(PTM, 0x00000006, 0, RTE_REG_EAX, 6)
+
+ FEAT_DEF(MPERF_APERF_MSR, 0x00000006, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(ACNT2, 0x00000006, 0, RTE_REG_ECX, 1)
+ FEAT_DEF(ENERGY_EFF, 0x00000006, 0, RTE_REG_ECX, 3)
+
+ FEAT_DEF(FSGSBASE, 0x00000007, 0, RTE_REG_EBX, 0)
+ FEAT_DEF(BMI1, 0x00000007, 0, RTE_REG_EBX, 3)
+ FEAT_DEF(HLE, 0x00000007, 0, RTE_REG_EBX, 4)
+ FEAT_DEF(AVX2, 0x00000007, 0, RTE_REG_EBX, 5)
+ FEAT_DEF(SMEP, 0x00000007, 0, RTE_REG_EBX, 7)
+ FEAT_DEF(BMI2, 0x00000007, 0, RTE_REG_EBX, 8)
+ FEAT_DEF(ERMS, 0x00000007, 0, RTE_REG_EBX, 9)
+ FEAT_DEF(INVPCID, 0x00000007, 0, RTE_REG_EBX, 10)
+ FEAT_DEF(RTM, 0x00000007, 0, RTE_REG_EBX, 11)
+ FEAT_DEF(AVX512F, 0x00000007, 0, RTE_REG_EBX, 16)
+ FEAT_DEF(AVX512DQ, 0x00000007, 0, RTE_REG_EBX, 17)
+ FEAT_DEF(RDSEED, 0x00000007, 0, RTE_REG_EBX, 18)
+ FEAT_DEF(AVX512IFMA, 0x00000007, 0, RTE_REG_EBX, 21)
+ FEAT_DEF(AVX512CD, 0x00000007, 0, RTE_REG_EBX, 28)
+ FEAT_DEF(AVX512BW, 0x00000007, 0, RTE_REG_EBX, 30)
+ FEAT_DEF(AVX512VL, 0x00000007, 0, RTE_REG_EBX, 31)
+
+ FEAT_DEF(AVX512VBMI, 0x00000007, 0, RTE_REG_ECX, 1)
+ FEAT_DEF(WAITPKG, 0x00000007, 0, RTE_REG_ECX, 5)
+ FEAT_DEF(AVX512VBMI2, 0x00000007, 0, RTE_REG_ECX, 6)
+ FEAT_DEF(GFNI, 0x00000007, 0, RTE_REG_ECX, 8)
+ FEAT_DEF(VAES, 0x00000007, 0, RTE_REG_ECX, 9)
+ FEAT_DEF(VPCLMULQDQ, 0x00000007, 0, RTE_REG_ECX, 10)
+ FEAT_DEF(AVX512VNNI, 0x00000007, 0, RTE_REG_ECX, 11)
+ FEAT_DEF(AVX512BITALG, 0x00000007, 0, RTE_REG_ECX, 12)
+ FEAT_DEF(AVX512VPOPCNTDQ, 0x00000007, 0, RTE_REG_ECX, 14)
+ FEAT_DEF(CLDEMOTE, 0x00000007, 0, RTE_REG_ECX, 25)
+ FEAT_DEF(MOVDIRI, 0x00000007, 0, RTE_REG_ECX, 27)
+ FEAT_DEF(MOVDIR64B, 0x00000007, 0, RTE_REG_ECX, 28)
+
+ FEAT_DEF(AVX512VP2INTERSECT, 0x00000007, 0, RTE_REG_EDX, 8)
+
+ FEAT_DEF(LAHF_SAHF, 0x80000001, 0, RTE_REG_ECX, 0)
+ FEAT_DEF(LZCNT, 0x80000001, 0, RTE_REG_ECX, 4)
+
+ FEAT_DEF(SYSCALL, 0x80000001, 0, RTE_REG_EDX, 11)
+ FEAT_DEF(XD, 0x80000001, 0, RTE_REG_EDX, 20)
+ FEAT_DEF(1GB_PG, 0x80000001, 0, RTE_REG_EDX, 26)
+ FEAT_DEF(RDTSCP, 0x80000001, 0, RTE_REG_EDX, 27)
+ FEAT_DEF(EM64T, 0x80000001, 0, RTE_REG_EDX, 29)
+
+ FEAT_DEF(INVTSC, 0x80000007, 0, RTE_REG_EDX, 8)
+};
+
+static int cpu_get_flag_enabled(enum rte_cpu_flag_t feature)
+{
+ const struct feature_entry *feat;
+ cpuid_registers_t regs;
+ unsigned int maxleaf;
+
+ if (feature >= RTE_CPUFLAG_NUMFLAGS)
+ /* Flag does not match anything in the feature tables */
+ return -ENOENT;
+
+ feat = &cpu_feature_table[feature];
+
+ if (!feat->leaf)
+ /* This entry in the table wasn't filled out! */
+ return -EFAULT;
+
+ maxleaf = __get_cpuid_max(feat->leaf & 0x80000000, NULL);
+
+ if (maxleaf < feat->leaf)
+ return 0;
+
+ __cpuid_count(feat->leaf, feat->subleaf,
+ regs[RTE_REG_EAX], regs[RTE_REG_EBX],
+ regs[RTE_REG_ECX], regs[RTE_REG_EDX]);
+
+ /* check if the feature is enabled */
+ return (regs[feat->reg] >> feat->bit) & 1;
+}
+
+static const char *cpu_get_flag_name(enum rte_cpu_flag_t feature)
+{
+ if (feature >= RTE_CPUFLAG_NUMFLAGS)
+ return NULL;
+ return cpu_feature_table[feature].name;
+}
+
+void _odp_cpu_flags_print_all(void)
+{
+ int len, i;
+ int max_str = 1024;
+ int max_len = max_str - 1;
+ char str[max_str];
+
+ len = snprintf(str, max_len, "\nCPU features supported:\n");
+
+ for (i = 0; i < RTE_CPUFLAG_NUMFLAGS; i++) {
+ if (cpu_get_flag_enabled(i) > 0)
+ len += snprintf(&str[len], max_len - len, "%s ",
+ cpu_get_flag_name(i));
+ }
+
+ len += snprintf(&str[len], max_len - len,
+ "\n\nCPU features NOT supported:\n");
+
+ for (i = 0; i < RTE_CPUFLAG_NUMFLAGS; i++) {
+ if (cpu_get_flag_enabled(i) <= 0)
+ len += snprintf(&str[len], max_len - len, "%s ",
+ cpu_get_flag_name(i));
+ }
+
+ len += snprintf(&str[len], max_len - len, "\n\n");
+
+ str[len] = '\0';
+ _ODP_PRINT("%s", str);
+}
+
+int _odp_time_cpu_global_freq_is_const(void)
+{
+ if (odp_global_ro.system_info.cpu_constant_tsc ||
+ cpu_get_flag_enabled(RTE_CPUFLAG_INVTSC) > 0)
+ return 1;
+
+ _ODP_ERR("WARN: assuming constant TSC based on CPU arch, but could not confirm from CPU "
+ "flags\n");
+
+ return 1;
+}
+
+int _odp_cpu_flags_has_rdtsc(void)
+{
+ if (cpu_get_flag_enabled(RTE_CPUFLAG_TSC) > 0)
+ return 1;
+
+ return 0;
+}
diff --git a/platform/linux-generic/arch/x86/cpu_flags.h b/platform/linux-generic/arch/x86/cpu_flags.h
new file mode 100644
index 000000000..8d485dbfa
--- /dev/null
+++ b/platform/linux-generic/arch/x86/cpu_flags.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_FLAGS_H_
+#define ODP_PLAT_CPU_FLAGS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void _odp_cpu_flags_print_all(void);
+int _odp_cpu_flags_has_rdtsc(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/cpu.h b/platform/linux-generic/arch/x86/odp/api/abi/cpu.h
new file mode 100644
index 000000000..9224af9a0
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/cpu.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_CPU_H_
+#define ODP_API_ABI_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define ODP_CACHE_LINE_SIZE 64
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/cpu_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/cpu_inlines.h b/platform/linux-generic/arch/x86/odp/api/abi/cpu_inlines.h
new file mode 100644
index 000000000..4b542a577
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/cpu_inlines.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_CPU_INLINES_H_
+#define ODP_ARCH_CPU_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <odp/api/abi/cpu_rdtsc.h>
+
+static inline void _odp_cpu_pause(void)
+{
+#ifdef __SSE2__
+ __asm__ __volatile__ ("pause");
+#else
+ __asm__ __volatile__ ("rep; nop");
+#endif
+}
+
+static inline uint64_t _odp_cpu_cycles(void)
+{
+ return _odp_cpu_rdtsc();
+}
+
+static inline uint64_t _odp_cpu_cycles_max(void)
+{
+ return UINT64_MAX;
+}
+
+static inline uint64_t _odp_cpu_cycles_resolution(void)
+{
+ return 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp_cpu_arch.c b/platform/linux-generic/arch/x86/odp/api/abi/cpu_rdtsc.h
index c8cf27b65..ccc5f0f36 100644
--- a/platform/linux-generic/arch/x86/odp_cpu_arch.c
+++ b/platform/linux-generic/arch/x86/odp/api/abi/cpu_rdtsc.h
@@ -1,11 +1,15 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/cpu.h>
-uint64_t odp_cpu_cycles(void)
+#ifndef ODP_ARCH_CPU_RDTSC_H_
+#define ODP_ARCH_CPU_RDTSC_H_
+
+#include <stdint.h>
+
+static inline uint64_t _odp_cpu_rdtsc(void)
{
union {
uint64_t tsc_64;
@@ -22,12 +26,4 @@ uint64_t odp_cpu_cycles(void)
return tsc.tsc_64;
}
-uint64_t odp_cpu_cycles_max(void)
-{
- return UINT64_MAX;
-}
-
-uint64_t odp_cpu_cycles_resolution(void)
-{
- return 1;
-}
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/hash_crc32.h b/platform/linux-generic/arch/x86/odp/api/abi/hash_crc32.h
new file mode 100644
index 000000000..c2c71bcb7
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/hash_crc32.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_HASH_CRC32_H_
+#define ODP_API_ABI_HASH_CRC32_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+uint32_t _odp_hash_crc32_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+uint32_t _odp_hash_crc32c_generic(const void *data, uint32_t data_len,
+ uint32_t init_val);
+
+static inline uint32_t _odp_hash_crc32(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32_generic(data, data_len, init_val);
+}
+
+#ifdef __SSE4_2__
+
+static inline uint32_t _odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ uint32_t i;
+ uintptr_t pd = (uintptr_t)data;
+
+#ifdef __x86_64__
+ for (i = 0; i < data_len / 8; i++) {
+ init_val = (uint32_t)__builtin_ia32_crc32di(init_val, *(const uint64_t *)pd);
+ pd += 8;
+ }
+
+ if (data_len & 0x4) {
+ init_val = __builtin_ia32_crc32si(init_val, *(const uint32_t *)pd);
+ pd += 4;
+ }
+#else
+ for (i = 0; i < data_len / 4; i++) {
+ init_val = __builtin_ia32_crc32si(init_val, *(const uint32_t *)pd);
+ pd += 4;
+ }
+#endif
+
+ if (data_len & 0x2) {
+ init_val = __builtin_ia32_crc32hi(init_val, *(const uint16_t *)pd);
+ pd += 2;
+ }
+
+ if (data_len & 0x1)
+ init_val = __builtin_ia32_crc32qi(init_val, *(const uint8_t *)pd);
+
+ return init_val;
+}
+
+#else
+
+static inline uint32_t _odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32c_generic(data, data_len, init_val);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/sync_inlines.h b/platform/linux-generic/arch/x86/odp/api/abi/sync_inlines.h
new file mode 100644
index 000000000..bebe6b571
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/sync_inlines.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ARCH_SYNC_INLINES_H_
+#define ODP_ARCH_SYNC_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void _odp_mb_sync(void)
+{
+ __asm__ volatile("mfence" ::: "memory");
+}
+
+static inline void _odp_mb_sync_load(void)
+{
+ __asm__ volatile("lfence" ::: "memory");
+}
+
+static inline void _odp_mb_sync_store(void)
+{
+ __asm__ volatile("sfence" ::: "memory");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/time_cpu.h b/platform/linux-generic/arch/x86/odp/api/abi/time_cpu.h
new file mode 100644
index 000000000..baf79ad3f
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/time_cpu.h
@@ -0,0 +1,35 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ARCH_TIME_CPU_H_
+#define ODP_ARCH_TIME_CPU_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <odp/api/abi/cpu_rdtsc.h>
+
+static inline uint64_t _odp_time_cpu_global(void)
+{
+ return _odp_cpu_rdtsc();
+}
+
+static inline uint64_t _odp_time_cpu_global_strict(void)
+{
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+ return _odp_cpu_rdtsc();
+}
+
+int _odp_time_cpu_global_freq_is_const(void);
+uint64_t _odp_time_cpu_global_freq(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp/api/abi/time_inlines.h b/platform/linux-generic/arch/x86/odp/api/abi/time_inlines.h
new file mode 100644
index 000000000..331d1996f
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/api/abi/time_inlines.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi/time_cpu_inlines.h>
diff --git a/platform/linux-generic/arch/x86/odp/api/cpu_arch.h b/platform/linux-generic/arch/x86/odp/api/cpu_arch.h
deleted file mode 100644
index 44e6b30ed..000000000
--- a/platform/linux-generic/arch/x86/odp/api/cpu_arch.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_CPU_ARCH_H_
-#define ODP_PLAT_CPU_ARCH_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define _ODP_CACHE_LINE_SIZE 64
-
-static inline void odp_cpu_pause(void)
-{
-#ifdef __SSE2__
- __asm__ __volatile__ ("pause");
-#else
- __asm__ __volatile__ ("rep; nop");
-#endif
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/arch/x86/odp_cpu.h b/platform/linux-generic/arch/x86/odp_cpu.h
new file mode 100644
index 000000000..8f8f22daf
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_cpu.h
@@ -0,0 +1,14 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_X86_CPU_H_
+#define ODP_X86_CPU_H_
+
+#define _ODP_UNALIGNED 1
+
+#include <default/odp_cpu.h>
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp_cpu_cycles.c b/platform/linux-generic/arch/x86/odp_cpu_cycles.c
new file mode 100644
index 000000000..2624af0f6
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_cpu_cycles.c
@@ -0,0 +1,21 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/cpu.h>
+
+#include "cpu_flags.h"
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+
+int _odp_cpu_cycles_init_global(void)
+{
+ if (_odp_cpu_flags_has_rdtsc() == 0) {
+ _ODP_ERR("RDTSC instruction not supported\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/arch/x86/odp_random.h b/platform/linux-generic/arch/x86/odp_random.h
new file mode 100644
index 000000000..54628038e
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_random.h
@@ -0,0 +1,160 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * These functions implement ODP_RANDOM_CRYPTO random data using rdrand [1],
+ * and ODP_RANDOM_TRUE random data using rdseed [1], via compiler builtin
+ * functions.
+ *
+ * Note that there may be issues with the quality or security of rdrand and
+ * rdseed. [2]
+ *
+ * [1] Intel Digital Random Number Generator (DRNG) Software Implementation
+ * Guide. John P Mechalas, 17 October 2018.
+ * https://www.intel.com/content/www/us/en/developer/articles/guide/intel-digital-random-number-generator-drng-software-implementation-guide.html
+ *
+ * [2] RDRAND. Wikipedia, 29 September 2021.
+ * https://en.wikipedia.org/wiki/RDRAND#Reception
+ */
+
+#ifndef ODP_X86_RANDOM_H_
+#define ODP_X86_RANDOM_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/spec/random.h>
+
+#include <stdint.h>
+
+odp_random_kind_t _odp_random_max_kind_generic(void);
+int32_t _odp_random_true_data_generic(uint8_t *buf, uint32_t len);
+int32_t _odp_random_crypto_data_generic(uint8_t *buf, uint32_t len);
+
+#ifdef __RDRND__
+
+static inline int _odp_random_max_kind(void)
+{
+#ifdef __RDSEED__
+ return ODP_RANDOM_TRUE;
+#else
+ return ODP_RANDOM_CRYPTO;
+#endif
+}
+
+#else
+
+static inline int _odp_random_max_kind(void)
+{
+ return _odp_random_max_kind_generic();
+}
+
+#endif
+
+#ifdef __RDSEED__
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+#ifdef __x86_64__
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!__builtin_ia32_rdseed_di_step((unsigned long long *)buf))
+ ;
+ buf += 8;
+ }
+
+ if (len & 4) {
+ while (!__builtin_ia32_rdseed_si_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#else
+ for (uint32_t i = 0; i < len / 4; i++) {
+ while (!__builtin_ia32_rdseed_si_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#endif
+ if (len & 2) {
+ while (!__builtin_ia32_rdseed_hi_step((unsigned short int *)buf))
+ ;
+ buf += 2;
+ }
+
+ if (len & 1) {
+ uint16_t w;
+
+ while (!__builtin_ia32_rdseed_hi_step(&w))
+ ;
+ *((uint8_t *)buf) = w & 0xff;
+ }
+
+ return len;
+}
+
+#else
+
+static inline int32_t _odp_random_true_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_true_data_generic(buf, len);
+}
+
+#endif
+
+#ifdef __RDRND__
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+#ifdef __x86_64__
+ for (uint32_t i = 0; i < len / 8; i++) {
+ while (!__builtin_ia32_rdrand64_step((unsigned long long *)buf))
+ ;
+ buf += 8;
+ }
+
+ if (len & 4) {
+ while (!__builtin_ia32_rdrand32_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#else
+ for (uint32_t i = 0; i < len / 4; i++) {
+ while (!__builtin_ia32_rdrand32_step((unsigned int *)buf))
+ ;
+ buf += 4;
+ }
+#endif
+ if (len & 2) {
+ while (!__builtin_ia32_rdrand16_step((unsigned short int *)buf))
+ ;
+ buf += 2;
+ }
+
+ if (len & 1) {
+ uint16_t w;
+
+ while (!__builtin_ia32_rdrand16_step(&w))
+ ;
+ *((uint8_t *)buf) = w & 0xff;
+ }
+
+ return len;
+}
+
+#else
+
+static inline int32_t _odp_random_crypto_data(uint8_t *buf, uint32_t len)
+{
+ return _odp_random_crypto_data_generic(buf, len);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
index 96127ec67..3cbdb2037 100644
--- a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
@@ -1,44 +1,94 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_internal.h>
+#include <odp_sysinfo_internal.h>
+#include "cpu_flags.h"
#include <string.h>
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo)
{
char str[1024];
- char *pos;
+ char *pos, *pos_end;
double ghz = 0.0;
+ double mhz = 0.0;
uint64_t hz;
int id = 0;
+ bool freq_set = false;
+
+ sysinfo->cpu_arch = ODP_CPU_ARCH_X86;
+ sysinfo->cpu_isa_sw.x86 = ODP_CPU_ARCH_X86_UNKNOWN;
+ sysinfo->cpu_isa_hw.x86 = ODP_CPU_ARCH_X86_UNKNOWN;
+
+ #if defined __x86_64 || defined __x86_64__
+ sysinfo->cpu_isa_sw.x86 = ODP_CPU_ARCH_X86_64;
+ #elif defined __i686 || defined __i686__
+ sysinfo->cpu_isa_sw.x86 = ODP_CPU_ARCH_X86_I686;
+ #endif
strcpy(sysinfo->cpu_arch_str, "x86");
- while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
+ while (fgets(str, sizeof(str), file) != NULL && id < CONFIG_NUM_CPU_IDS) {
+ if (strstr(str, "flags") && strstr(str, "constant_tsc")) {
+ sysinfo->cpu_constant_tsc = 1;
+ continue;
+ }
+
pos = strstr(str, "model name");
if (pos) {
- pos = strchr(str, ':');
+ freq_set = false;
+
+ /* Copy model name between : and @ characters */
+ pos = strchr(str, ':');
+ pos_end = strchr(str, '@');
+ if (pos == NULL)
+ continue;
+
+ if (pos_end != NULL)
+ *(pos_end - 1) = '\0';
+
strncpy(sysinfo->model_str[id], pos + 2,
- sizeof(sysinfo->model_str[id]) - 1);
-
- pos = strchr(sysinfo->model_str[id], '@');
- if (pos) {
- *(pos - 1) = '\0';
- if (sscanf(pos, "@ %lfGHz", &ghz) == 1) {
- hz = (uint64_t)(ghz * 1000000000.0);
- sysinfo->cpu_hz_max[id] = hz;
- }
+ MODEL_STR_SIZE - 1);
+
+ if (sysinfo->cpu_hz_max[id]) {
+ freq_set = true;
+ id++;
+ continue;
+ }
+
+ /* max frequency needs to be set */
+ if (pos_end != NULL &&
+ sscanf(pos_end, "@ %lfGHz", &ghz) == 1) {
+ hz = (uint64_t)(ghz * 1000000000.0);
+ sysinfo->cpu_hz_max[id++] = hz;
+ freq_set = true;
+ }
+ } else if (!freq_set &&
+ strstr(str, "bogomips") != NULL) {
+ pos = strchr(str, ':');
+ if (pos == NULL)
+ continue;
+
+ if (sscanf(pos + 2, "%lf", &mhz) == 1) {
+ /* On typical x86 BogoMIPS is freq * 2 */
+ hz = (uint64_t)(mhz * 1000000.0 / 2);
+ sysinfo->cpu_hz_max[id++] = hz;
+ freq_set = true;
}
- id++;
}
}
return 0;
}
-uint64_t odp_cpu_hz_current(int id)
+void _odp_sys_info_print_arch(void)
+{
+ _odp_cpu_flags_print_all();
+}
+
+uint64_t odp_cpu_arch_hz_current(int id)
{
char str[1024];
FILE *file;
@@ -47,6 +97,8 @@ uint64_t odp_cpu_hz_current(int id)
double mhz = 0.0;
file = fopen("/proc/cpuinfo", "rt");
+ if (!file)
+ return 0;
/* find the correct processor instance */
while (fgets(str, sizeof(str), file) != NULL) {
diff --git a/platform/linux-generic/arch/x86/odp_time_cpu.c b/platform/linux-generic/arch/x86/odp_time_cpu.c
new file mode 100644
index 000000000..ab897296d
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_time_cpu.c
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2024 Nokia
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
+
+#include <odp/api/abi/time_cpu.h>
+
+#include <odp_debug_internal.h>
+
+#include <time.h>
+#include <errno.h>
+#include <string.h>
+
+static int nwait(uint64_t nsec)
+{
+ struct timespec ts1, ts2;
+ uint64_t diff;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts1))
+ return 1;
+
+ do {
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts2))
+ return 1;
+
+ diff = (ts2.tv_sec - ts1.tv_sec) * ODP_TIME_SEC_IN_NS +
+ ts2.tv_nsec - ts1.tv_nsec;
+ } while (diff < nsec);
+
+ return 0;
+}
+
+static void sort(uint64_t values[], int num)
+{
+ for (int n = 0; n < num; n++) {
+ for (int i = n + 1; i < num; i++) {
+ if (values[i] < values[n]) {
+ uint64_t tmp = values[i];
+
+ values[i] = values[n];
+ values[n] = tmp;
+ }
+ }
+ }
+}
+
+static uint64_t median(uint64_t values[], int num)
+{
+ sort(values, num);
+ if (num % 2 == 0)
+ return (values[num / 2 - 1] + values[num / 2]) / 2;
+ else
+ return values[num / 2];
+}
+
+/* Measure TSC frequency. */
+uint64_t _odp_time_cpu_global_freq(void)
+{
+ struct timespec ts1, ts2;
+ uint64_t t1, t2, ts_nsec, cycles;
+ int i;
+ const int rounds = 6; /* first round is warmup */
+ int warm_up = 1;
+ uint64_t hz[rounds];
+
+ for (i = 0; i < rounds; i++) {
+ uint64_t wait_nsec = ODP_TIME_SEC_IN_NS / 50;
+
+ if (warm_up)
+ wait_nsec = ODP_TIME_SEC_IN_NS / 1000;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts1))
+ goto err_out;
+
+ t1 = _odp_time_cpu_global();
+
+ if (nwait(wait_nsec))
+ goto err_out;
+
+ if (clock_gettime(CLOCK_MONOTONIC_RAW, &ts2))
+ goto err_out;
+
+ t2 = _odp_time_cpu_global();
+
+ ts_nsec = (ts2.tv_sec - ts1.tv_sec) * ODP_TIME_SEC_IN_NS;
+ ts_nsec += ts2.tv_nsec - ts1.tv_nsec;
+
+ cycles = t2 - t1;
+
+ hz[i] = (cycles * ODP_TIME_SEC_IN_NS) / ts_nsec;
+
+ if (warm_up)
+ warm_up = 0;
+ }
+
+ return median(&hz[1], rounds - 1);
+
+err_out:
+ _ODP_ERR("clock_gettime() failed (%s)\n", strerror(errno));
+ return 0;
+}
diff --git a/platform/linux-generic/check-globals.sh b/platform/linux-generic/check-globals.sh
new file mode 120000
index 000000000..c999a29ef
--- /dev/null
+++ b/platform/linux-generic/check-globals.sh
@@ -0,0 +1 @@
+../../scripts/check-globals.sh \ No newline at end of file
diff --git a/platform/linux-generic/doc/platform_specific.dox b/platform/linux-generic/doc/platform_specific.dox
index e116ec617..be0486666 100644
--- a/platform/linux-generic/doc/platform_specific.dox
+++ b/platform/linux-generic/doc/platform_specific.dox
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -28,7 +28,7 @@
* to odp_init_local() is actually fully defined by these
* requirements: It has to be the value returned by the
* unique call to odp_init_global() made by one single
- * acsendant of the current process.
+ * ancestor of the current process.
*/
/**
diff --git a/platform/linux-generic/dumpconfig/.gitignore b/platform/linux-generic/dumpconfig/.gitignore
new file mode 100644
index 000000000..44752b565
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/.gitignore
@@ -0,0 +1 @@
+odp_linuxgen_dumpconfig
diff --git a/platform/linux-generic/dumpconfig/Makefile.am b/platform/linux-generic/dumpconfig/Makefile.am
new file mode 100644
index 000000000..933424f0a
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/Makefile.am
@@ -0,0 +1,10 @@
+include $(top_srcdir)/Makefile.inc
+
+AM_CPPFLAGS = -I$(top_builddir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/include
+
+bin_PROGRAMS = odp_linuxgen_dumpconfig
+
+odp_linuxgen_dumpconfig_SOURCES = dumpconfig.c
+
+TESTS = odp_linuxgen_dumpconfig
diff --git a/platform/linux-generic/dumpconfig/dumpconfig.c b/platform/linux-generic/dumpconfig/dumpconfig.c
new file mode 100644
index 000000000..a04f5c2dd
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/dumpconfig.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <odp_libconfig_config.h>
+
+int main(void)
+{
+ unsigned int i;
+ const char *filename;
+ FILE *f;
+ char c;
+
+ printf("# Builtin platform config\n\n");
+ for (i = 0; i < sizeof(config_builtin); i++)
+ printf("%c", config_builtin[i]);
+
+ filename = getenv("ODP_CONFIG_FILE");
+ if (filename == NULL)
+ return 0;
+
+ printf("# Overridden section with ODP_CONFIG_FILE=%s\n\n", filename);
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ fprintf(stderr, "Error: open file %s\n", filename);
+ return -1;
+ }
+
+ while (1) {
+ c = fgetc(f);
+ if (feof(f))
+ break;
+ printf("%c", c);
+ }
+
+ fclose(f);
+ return 0;
+}
diff --git a/platform/linux-generic/example/Makefile.am b/platform/linux-generic/example/Makefile.am
new file mode 100644
index 000000000..84f337387
--- /dev/null
+++ b/platform/linux-generic/example/Makefile.am
@@ -0,0 +1,5 @@
+SUBDIRS =
+
+if WITH_ML
+SUBDIRS += ml
+endif
diff --git a/platform/linux-generic/example/ml/.gitignore b/platform/linux-generic/example/ml/.gitignore
new file mode 100644
index 000000000..d845f6bb5
--- /dev/null
+++ b/platform/linux-generic/example/ml/.gitignore
@@ -0,0 +1,5 @@
+model_explorer
+simple_linear
+mnist
+*.log
+*.trs
diff --git a/platform/linux-generic/example/ml/Makefile.am b/platform/linux-generic/example/ml/Makefile.am
new file mode 100644
index 000000000..3692b704e
--- /dev/null
+++ b/platform/linux-generic/example/ml/Makefile.am
@@ -0,0 +1,46 @@
+include $(top_srcdir)/example/Makefile.inc
+
+LDADD += -lm
+
+bin_PROGRAMS = model_explorer simple_linear mnist
+
+simple_linear_SOURCES = simple_linear.c model_read.c model_read.h
+model_explorer_SOURCES = model_explorer.c model_read.c model_read.h
+mnist_SOURCES = mnist.c model_read.c model_read.h
+
+EXTRA_DIST = \
+ odp_ml_run_mnist.sh \
+ example_digit.csv \
+ mnist-12.onnx \
+ odp_ml_run_model_explorer.sh \
+ odp_ml_run_simple_linear.sh \
+ simple_linear.onnx \
+ README.md
+
+if test_example
+TESTS = \
+ odp_ml_run_mnist.sh \
+ odp_ml_run_model_explorer.sh \
+ odp_ml_run_simple_linear.sh
+endif
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/example/ml/README.md b/platform/linux-generic/example/ml/README.md
new file mode 100644
index 000000000..fc6a57c0a
--- /dev/null
+++ b/platform/linux-generic/example/ml/README.md
@@ -0,0 +1,94 @@
+# ML examples
+
+Machine Learning API examples demonstrate how to use ODP ML API in different tasks:
+for example simple linear computation and predicting a handwritten digit in
+a given image.
+
+## Simple Linear
+
+This example runs on a very simple model of form y = 3 * x + 4 where x is given
+as the second argument.
+
+### Generate model
+
+```bash
+python3 <odp_directory>/platform/linux-generic/test/validation/api/ml/simple_linear_gen.py
+```
+
+### Run simple linear
+
+```bash
+$ ./simple_linear 3
+.
+.
+.
+y = 3 * 3 + 4: 13
+.
+```
+
+Or run the program with multiple threads, each thread inferences on one x given in
+the input. Thus, the number of threads is the number of numbers in the second argument.
+
+```bash
+$ ./simple_linear [2,4,5]
+.
+.
+.
+y = 3 * 2 + 4: 10
+y = 3 * 5 + 4: 19
+y = 3 * 4 + 4: 16
+.
+```
+
+## MNIST
+
+This example predicts a handwritten digit in a given image. Refer to
+https://github.com/onnx/models/tree/main/validated/vision/classification/mnist
+for more information. The model file is from
+https://github.com/onnx/models/raw/main/validated/vision/classification/mnist/model/mnist-12.onnx
+(SPDX-License-Identifier: MIT).
+
+### Prepare input data
+
+The input image is stored in a csv file which contains, comma separated, the
+digit label (a number from 0 to 9) and the 784 pixel values (a number from 0 to
+255). Pixel order is left to right and then top down. The MNIST dataset is
+available in this format at https://www.kaggle.com/oddrationale/mnist-in-csv.
+
+### Run mnist
+
+```bash
+$ ./mnist mnist-12.onnx example_digit.csv
+.
+.
+.
+predicted_digit: 4, expected_digit: 4
+.
+```
+
+## Model Explorer
+
+The example prints basic model information.
+
+### Run model_explorer
+
+```bash
+$ ./model_explorer simple_linear.onnx
+.
+.
+.
+Model info
+----------
+ Model handle: 0x7fe8426ce1d8
+ Name: model-explorer
+ Model version: 1
+ Model interface version: 0
+ Index: 0
+ Number of inputs: 1
+ Input[0]: Name: x, Data_type: int32, Shape: static [1], Size: 4
+ Number of outputs: 1
+ Output[0]: Name: y, Data_type: int32, Shape: static [1], Size: 4
+.
+.
+.
+```
diff --git a/platform/linux-generic/example/ml/example_digit.csv b/platform/linux-generic/example/ml/example_digit.csv
new file mode 100644
index 000000000..2ab0f4a0c
--- /dev/null
+++ b/platform/linux-generic/example/ml/example_digit.csv
@@ -0,0 +1 @@
+4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,55,9,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,36,215,98,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,36,249,144,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,34,246,148,0,0,0,0,0,0,0,7,8,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,39,255,139,0,0,0,0,0,0,2,95,117,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,51,255,97,0,0,0,0,0,0,8,203,211,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,70,255,58,0,0,0,0,0,0,13,238,167,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,111,255,23,0,0,0,0,0,0,24,255,110,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,209,222,1,0,0,0,0,0,0,62,255,51,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,55,255,125,0,0,0,0,0,0,0,117,255,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,4,164,255,60,0,0,0,0,0,0,0,171,230,12,0,0,0,0,0,0,0,0,0,0,0,0,0,0,24,235,255,178,120,89,74,72,72,72,74,246,241,121,141,153,148,83,1,0,0,0,0,0,0,0,0,0,6,121,231,255,255,255,255,255,255,255,255,255,255,255,255,255,253,173,14,0,0,0,0,0,0,0,0,0,0,1,19,44,63,76,83,83,83,83,100,255,192,66,52,45,46,34,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,39,255,138,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,68,255,113,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,104,255,84,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,147,255,52,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,8,190,255,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,25,229,210,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,50,255,117,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,91,255,34,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,49,120,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/platform/linux-generic/example/ml/mnist-12.onnx b/platform/linux-generic/example/ml/mnist-12.onnx
new file mode 100644
index 000000000..6661bfe3c
--- /dev/null
+++ b/platform/linux-generic/example/ml/mnist-12.onnx
Binary files differ
diff --git a/platform/linux-generic/example/ml/mnist.c b/platform/linux-generic/example/ml/mnist.c
new file mode 100644
index 000000000..4c1066302
--- /dev/null
+++ b/platform/linux-generic/example/ml/mnist.c
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp_api.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+#include <inttypes.h>
+
+#include "model_read.h"
+
+/**
+ * About MNIST model used in this example.
+ *
+ * The model predicts handwritten digits. It has one input and one output whose
+ * detailed information is as follows:
+ *
+ * Input:
+ * Name: Input3, type: float32, shape: [1, 1, 28, 28]
+ *
+ * Output:
+ * Name: Plus214_Output_0, type: float32, shape: [1, 10]
+ *
+ * Refer https://github.com/onnx/models/tree/main/validated/vision/classification/mnist
+ * for more information about the model.
+ *
+ * The model outputs the likelihood of each number before softmax, so we need to
+ * map the output to probabilities across the 10 classes with softmax function.
+ *
+ * In this example, the input image is stored in example_digit.csv file, which
+ * contains, comma separated, the digit label (a number from 0 to 9) and the 784
+ * pixel values (a number from 0 to 255). Pixel order is first left to right and
+ * then top down. The MNIST dataset is available in this format at
+ * https://www.kaggle.com/oddrationale/mnist-in-csv.
+ */
+
+#define MAX_MODEL_SIZE 30000
+#define INPUT_NUM_ELEMS 784 /* Total shape for input: 1 * 1 * 28 * 28 */
+#define OUTPUT_NUM_ELEMS 10 /* Total shape for output: 1 * 10 */
+
+static int read_digit_csv(const char *file_name, uint8_t *expected_digit, float *pixels)
+{
+ char *tmp;
+ char *token;
+ char *end;
+ FILE *digit_file;
+ size_t size, num_elem;
+ const char *delim = ","; /* Delimiter */
+ size_t num_pixel = 0;
+
+ /* Get the model file size in bytes */
+ digit_file = fopen(file_name, "rb");
+ fseek(digit_file, 0, SEEK_END);
+ size = ftell(digit_file);
+ rewind(digit_file);
+
+ tmp = malloc(size);
+ memset(tmp, 0, size);
+ num_elem = fread(tmp, size, 1, digit_file);
+
+ fclose(digit_file);
+ if (num_elem != 1) {
+ printf("Read digit file failed\n");
+ free(tmp);
+ return -1;
+ }
+
+ /* Get the first token which is the expected digit */
+ token = strtok(tmp, delim);
+ *expected_digit = (uint8_t)strtol(token, &end, 10);
+ if ((*expected_digit > 9) || (end == token)/*No numeric character*/) {
+ printf("Invalid digit %u or no numeric character available\n",
+ *expected_digit);
+ free(tmp);
+ return -1;
+ }
+
+ /* The rest 784 numbers are pixel values */
+ token = strtok(NULL, delim);
+ while (token != NULL) {
+ pixels[num_pixel] = strtof(token, NULL);
+ num_pixel++;
+ token = strtok(NULL, delim);
+ }
+
+ if (num_pixel != INPUT_NUM_ELEMS) {
+ printf("Wrong number of pixels: %zu (expected:784)\n", num_pixel);
+ free(tmp);
+ return -1;
+ }
+
+ free(tmp);
+ return 0;
+}
+
+static int prepare_run_params(const char *file_name, uint8_t *expected_digit,
+ odp_ml_data_seg_t *input, odp_ml_data_seg_t *output)
+{
+ input->size = INPUT_NUM_ELEMS * sizeof(float);
+ input->addr = malloc(input->size);
+ memset(input->addr, 0, input->size);
+
+ if (read_digit_csv(file_name, expected_digit, input->addr)) {
+ free(input->addr);
+ return -1;
+ }
+
+ output->size = OUTPUT_NUM_ELEMS * sizeof(float);
+ output->addr = malloc(output->size);
+ memset(output->addr, 0, output->size);
+
+ return 0;
+}
+
+static float array_max(float *arr, uint8_t arr_len)
+{
+ float max = arr[0];
+
+ for (size_t i = 1; i < arr_len; i++) {
+ if (arr[i] > max)
+ max = arr[i];
+ }
+
+ return max;
+}
+
+static void softmax(float *input, uint8_t input_len)
+{
+ float rowmax = array_max(input, input_len);
+
+ float input_exp[input_len];
+ float sum = 0.0f;
+
+ for (size_t i = 0; i != input_len; ++i) {
+ input_exp[i] = exp(input[i] - rowmax);
+ sum += input_exp[i];
+ }
+
+ for (size_t i = 0; i != input_len; ++i)
+ input[i] = input_exp[i] / sum;
+}
+
+static uint8_t index_of_max(float *arr, uint8_t arr_len)
+{
+ uint8_t i = 0;
+ uint8_t max_index = 0;
+ float max = arr[0];
+
+ for (i = 1; i < arr_len; i++) {
+ if (arr[i] > max) {
+ max = arr[i];
+ max_index = i;
+ }
+ }
+
+ return max_index;
+}
+
+int main(int argc, char *argv[])
+{
+ const char *model_file;
+ const char *input_file;
+ float *probabilities;
+ uint8_t expected_digit;
+ uint8_t predicted_digit;
+ odp_instance_t inst;
+ odp_ml_data_t data;
+ odp_ml_model_t ml_model;
+ odp_ml_data_seg_t input;
+ odp_ml_data_seg_t output;
+ odp_ml_capability_t capa;
+ odp_ml_config_t ml_config;
+ odp_ml_model_param_t model_param;
+ int ret = 0;
+
+ if (argc != 3) {
+ printf("Please provide an input image file for classification.\n"
+ "\nUsage:\n"
+ " %s model_file input_image\n"
+ "\nThis example classifies digit written on the input image.\n\n",
+ argv[0]);
+ return -1;
+ }
+
+ model_file = argv[1];
+ input_file = argv[2];
+
+ if (odp_init_global(&inst, NULL, NULL)) {
+ printf("Global init failed.\n");
+ return -1;
+ }
+
+ if (odp_init_local(inst, ODP_THREAD_CONTROL)) {
+ printf("Local init failed.\n");
+ return -1;
+ }
+
+ if (odp_ml_capability(&capa)) {
+ printf("odp_ml_capability() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ if (MAX_MODEL_SIZE > capa.max_model_size) {
+ printf("Configured max model size %d exceeds max mode size %" PRIu64 " in capa\n",
+ MAX_MODEL_SIZE, capa.max_model_size);
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_config_init(&ml_config);
+ ml_config.max_model_size = MAX_MODEL_SIZE;
+ ml_config.load_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+ ml_config.run_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+
+ if (odp_ml_config(&ml_config)) {
+ printf("odp_ml_config() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_param_init(&model_param);
+ if (read_model_from_file(model_file, &model_param)) {
+ printf("Read model file failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ ml_model = odp_ml_model_create("mnist", &model_param);
+ free(model_param.model);
+ if (ml_model == ODP_ML_MODEL_INVALID) {
+ printf("odp_ml_model_create() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_print(ml_model);
+
+ if (odp_ml_model_load(ml_model, NULL)) {
+ printf("odp_ml_model_load() failed\n");
+ ret = -1;
+ goto destroy_model;
+ }
+
+ data.num_input_seg = 1;
+ data.num_output_seg = 1;
+ data.input_seg = &input;
+ data.output_seg = &output;
+ if (prepare_run_params(input_file, &expected_digit, &input, &output)) {
+ printf("prepare_run_params() failed\n");
+ ret = -1;
+ goto unload;
+ }
+
+ if (odp_ml_run(ml_model, &data, NULL) != 1) {
+ printf("odp_ml_model_run() failed\n");
+ ret = -1;
+ goto free_model_io;
+ }
+
+ probabilities = output.addr;
+
+ /* Post-process the model output */
+ softmax(probabilities, OUTPUT_NUM_ELEMS);
+ predicted_digit = index_of_max(probabilities, OUTPUT_NUM_ELEMS);
+ printf("predicted_digit: %u, expected_digit: %u\n", predicted_digit, expected_digit);
+
+free_model_io:
+ free(input.addr);
+ free(output.addr);
+
+unload:
+ if (odp_ml_model_unload(ml_model, NULL)) {
+ printf("odp_ml_model_unload() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+destroy_model:
+ /* Destroy the model */
+ if (odp_ml_model_destroy(ml_model)) {
+ printf("odp_ml_model_destroy() failed\n");
+ ret = -1;
+ }
+
+odp_term:
+ if (odp_term_local()) {
+ printf("Local term failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(inst)) {
+ printf("Global term failed.\n");
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/platform/linux-generic/example/ml/model_explorer.c b/platform/linux-generic/example/ml/model_explorer.c
new file mode 100644
index 000000000..bd449b032
--- /dev/null
+++ b/platform/linux-generic/example/ml/model_explorer.c
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp_api.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "model_read.h"
+
+/**
+ * Read basic model information, e.g. inputs/outputs.
+ */
+
+int main(int argc, char *argv[])
+{
+ odp_instance_t inst;
+ odp_ml_model_t ml_model;
+ odp_ml_capability_t capa;
+ odp_ml_config_t ml_config;
+ odp_ml_model_param_t model_param;
+ int ret = 0;
+
+ if (argc != 2) {
+ printf("Please specify model path\n"
+ "\nUsage:\n"
+ " %s model_path\n"
+ "\nThis example prints model information\n\n",
+ argv[0]);
+ return -1;
+ }
+
+ if (odp_init_global(&inst, NULL, NULL)) {
+ printf("Global init failed.\n");
+ return -1;
+ }
+
+ if (odp_init_local(inst, ODP_THREAD_CONTROL)) {
+ printf("Local init failed.\n");
+ return -1;
+ }
+
+ if (odp_ml_capability(&capa)) {
+ printf("odp_ml_capability() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_config_init(&ml_config);
+ ml_config.max_model_size = capa.max_model_size;
+ ml_config.load_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+ ml_config.run_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+
+ if (odp_ml_config(&ml_config)) {
+ printf("odp_ml_config() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_param_init(&model_param);
+ if (read_model_from_file(argv[1], &model_param)) {
+ ret = -1;
+ goto odp_term;
+ }
+
+ ml_model = odp_ml_model_create("model-explorer", &model_param);
+ free(model_param.model);
+ if (ml_model == ODP_ML_MODEL_INVALID) {
+ printf("odp_ml_model_create failed.\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_print(ml_model);
+
+odp_term:
+ if (odp_term_local()) {
+ printf("Local term failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(inst)) {
+ printf("Global term failed.\n");
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/platform/linux-generic/example/ml/model_read.c b/platform/linux-generic/example/ml/model_read.c
new file mode 100644
index 000000000..7aa20bf35
--- /dev/null
+++ b/platform/linux-generic/example/ml/model_read.c
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <odp_api.h>
+
+#include "model_read.h"
+
+int read_model_from_file(const char *file_name, odp_ml_model_param_t *model_param)
+{
+ FILE *model_file;
+ /* Number of elements successfully read */
+ size_t num_elem;
+
+ /* Get the model file size in bytes */
+ model_file = fopen(file_name, "rb");
+ if (model_file == NULL) {
+ perror("Failed to open model file");
+ return -1;
+ }
+
+ fseek(model_file, 0, SEEK_END);
+ model_param->size = ftell(model_file);
+ rewind(model_file);
+
+ /* Allocate memory for model buffer */
+ model_param->model = malloc(model_param->size);
+ memset(model_param->model, 0, model_param->size);
+ if (!model_param->model) {
+ printf("Allocating memory for model buffer failed\n");
+ return -1;
+ }
+
+ /* Read the model file */
+ num_elem = fread(model_param->model, model_param->size, 1, model_file);
+ fclose(model_file);
+ if (num_elem != 1) {
+ printf("Read model file failed\n");
+ free(model_param->model);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/example/ml/model_read.h b/platform/linux-generic/example/ml/model_read.h
new file mode 100644
index 000000000..df2062d5f
--- /dev/null
+++ b/platform/linux-generic/example/ml/model_read.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_MODEL_READ_H_
+#define ODP_MODEL_READ_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+
+/**
+ * Read model binaries from model file
+ *
+ * @param file_name The name of model file
+ * @param model_param Model parameter where model content and size are read to
+ *
+ * @retval 0 on success
+ * @retval < 0 on failure
+ */
+int read_model_from_file(const char *file_name, odp_ml_model_param_t *model_param);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/example/ml/odp_ml_run_mnist.sh b/platform/linux-generic/example/ml/odp_ml_run_mnist.sh
new file mode 100755
index 000000000..f83d6f60d
--- /dev/null
+++ b/platform/linux-generic/example/ml/odp_ml_run_mnist.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+set -e
+
+# wget https://github.com/onnx/models/raw/main/validated/vision/classification/mnist/model/mnist-12.onnx
+./mnist${EXEEXT} mnist-12.onnx example_digit.csv
diff --git a/platform/linux-generic/example/ml/odp_ml_run_model_explorer.sh b/platform/linux-generic/example/ml/odp_ml_run_model_explorer.sh
new file mode 100755
index 000000000..7f9fed5a6
--- /dev/null
+++ b/platform/linux-generic/example/ml/odp_ml_run_model_explorer.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+set -e
+
+./model_explorer${EXEEXT} simple_linear.onnx
diff --git a/platform/linux-generic/example/ml/odp_ml_run_simple_linear.sh b/platform/linux-generic/example/ml/odp_ml_run_simple_linear.sh
new file mode 100755
index 000000000..b394b61a8
--- /dev/null
+++ b/platform/linux-generic/example/ml/odp_ml_run_simple_linear.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+set -e
+
+./simple_linear${EXEEXT} [2,4,5]
diff --git a/platform/linux-generic/example/ml/simple_linear.c b/platform/linux-generic/example/ml/simple_linear.c
new file mode 100644
index 000000000..3417219c7
--- /dev/null
+++ b/platform/linux-generic/example/ml/simple_linear.c
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <inttypes.h>
+
+#include "model_read.h"
+
+/**
+ * About model simple_linear.onnx used in this example.
+ *
+ * Model info:
+ * Inputs: name: x, type: int32, shape: [1]
+ * Outputs: name: y, type: int32, shape: [1]
+ *
+ * The model is of form y = 3 * x + 4 where x is given as the second argument.
+ * Thus when x = 5, the output y should be 19.
+ */
+
+#define NUM_INPUTS 1
+#define NUM_OUTPUTS 1
+#define MAX_NUM_WORKERS 10
+#define MAX_MODEL_SIZE 500
+
+typedef struct infer_param_t {
+ int32_t x;
+ odp_ml_model_t ml_model;
+} infer_param_t;
+
+typedef struct {
+ odp_shm_t shm;
+ /* Thread specific arguments */
+ infer_param_t infer_param[MAX_NUM_WORKERS];
+} thread_args_t;
+
+/* Global pointer to thread_args */
+static thread_args_t *thread_args;
+
+static int run_inference(void *infer_param)
+{
+ int32_t y;
+ odp_ml_data_t data;
+ odp_ml_data_seg_t input;
+ odp_ml_data_seg_t output;
+ infer_param_t *param = (infer_param_t *)infer_param;
+
+ data.num_input_seg = NUM_INPUTS;
+ data.input_seg = &input;
+ input.addr = &param->x;
+ input.size = sizeof(int32_t);
+
+ data.num_output_seg = NUM_OUTPUTS;
+ data.output_seg = &output;
+ output.addr = &y;
+ output.size = sizeof(int32_t);
+
+ while (1) {
+ int ret = odp_ml_run(param->ml_model, &data, NULL);
+
+ if (ret == 1)
+ break;
+
+ if (ret < 0) {
+ ODPH_ERR("odp_ml_model_run() failed: %d\n", ret);
+ return -1;
+ }
+ }
+
+ printf("y = 3 * %d + 4: %d\n", param->x, y);
+
+ return 0;
+}
+
+static int parse_argv1(char *argv1, uint32_t *num, int32_t *x)
+{
+ char *token;
+ int i;
+
+ if (!strstr(argv1, "[")) {
+ *num = 1;
+ *x = strtol(argv1, NULL, 10);
+ return 0;
+ }
+
+ token = strtok(argv1, "[,]");
+ if (token == NULL) {
+ ODPH_ERR("Invalid argv[1]\n");
+ return -1;
+ }
+ x[0] = strtol(token, NULL, 10);
+
+ for (i = 0; i < MAX_NUM_WORKERS; i++) {
+ token = strtok(NULL, "[,]");
+ if (token == NULL)
+ break;
+
+ x[i + 1] = strtol(token, NULL, 10);
+ }
+
+ if (i == MAX_NUM_WORKERS) {
+ ODPH_ERR("Too much xs, maximum number is: %d\n", MAX_NUM_WORKERS);
+ return -1;
+ }
+
+ *num = i + 1;
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ odp_shm_t shm;
+ int num_workers;
+ odp_instance_t inst;
+ odp_cpumask_t cpumask;
+ odp_ml_model_t ml_model;
+ odp_ml_capability_t capa;
+ odp_ml_config_t ml_config;
+ int32_t x[MAX_NUM_WORKERS];
+ odp_ml_model_param_t model_param;
+ odph_thread_t thread_tbl[MAX_NUM_WORKERS];
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[MAX_NUM_WORKERS];
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ int ret = 0;
+ uint32_t num = 0;
+
+ if (argc != 2) {
+ ODPH_ERR("Please specify x\n"
+ "\nUsage:\n"
+ " %s x\n"
+ "\nThis example runs inference on model y = 3x + 4\n\n",
+ argv[0]);
+ return -1;
+ }
+
+ if (parse_argv1(argv[1], &num, x))
+ return -1;
+
+ if (odp_init_global(&inst, NULL, NULL)) {
+ ODPH_ERR("Global init failed.\n");
+ return -1;
+ }
+
+ if (odp_init_local(inst, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("Local init failed.\n");
+ return -1;
+ }
+
+ if (odp_ml_capability(&capa)) {
+ ODPH_ERR("odp_ml_capability() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ if (MAX_MODEL_SIZE > capa.max_model_size) {
+ ODPH_ERR("Configured max model size %d exceeds max mode size %" PRIu64 " in capa\n",
+ MAX_MODEL_SIZE, capa.max_model_size);
+ ret = -1;
+ goto odp_term;
+ }
+
+ /* Set ML configuration parameter */
+ odp_ml_config_init(&ml_config);
+ ml_config.max_model_size = MAX_MODEL_SIZE;
+ ml_config.load_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+ ml_config.run_mode_mask = ODP_ML_COMPL_MODE_SYNC;
+
+ if (odp_ml_config(&ml_config)) {
+ ODPH_ERR("odp_ml_config() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_param_init(&model_param);
+ if (read_model_from_file("simple_linear.onnx", &model_param)) {
+ ret = -1;
+ goto odp_term;
+ }
+
+ ml_model = odp_ml_model_create("simple linear", &model_param);
+ free(model_param.model);
+ if (ml_model == ODP_ML_MODEL_INVALID) {
+ ODPH_ERR("odp_ml_model_create() failed\n");
+ ret = -1;
+ goto odp_term;
+ }
+
+ odp_ml_model_print(ml_model);
+ odp_ml_print();
+
+ if (odp_ml_model_load(ml_model, NULL)) {
+ ODPH_ERR("odp_ml_model_load() failed\n");
+ ret = -1;
+ goto destroy_model;
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("_thread_args", sizeof(thread_args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("Error: shared mem reserve failed.\n");
+ ret = -1;
+ goto unload;
+ }
+
+ thread_args = odp_shm_addr(shm);
+ if (thread_args == NULL) {
+ ODPH_ERR("Error: shared mem alloc failed.\n");
+ ret = -1;
+ goto free_shm;
+ }
+ thread_args->shm = shm;
+ memset(thread_args, 0, sizeof(thread_args_t));
+
+ /* Prepare inference parameter */
+ for (uint32_t i = 0; i < num; i++) {
+ thread_args->infer_param[i].x = x[i];
+ thread_args->infer_param[i].ml_model = ml_model;
+ }
+
+ num_workers = odp_cpumask_default_worker(&cpumask, num);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+
+ printf("num worker threads: %i\n", num_workers);
+ printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n", cpumaskstr);
+
+ /* Create and init worker threads */
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = inst;
+ thr_common.cpumask = &cpumask;
+
+ for (int i = 0; i < num_workers; ++i) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = run_inference;
+ thr_param[i].arg = &thread_args->infer_param[i];
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ }
+
+ odph_thread_create(thread_tbl, &thr_common, thr_param, num_workers);
+
+ odph_thread_join(thread_tbl, num_workers);
+
+free_shm:
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("Error: shm free global data\n");
+ return -1;
+ }
+
+unload:
+ /* Unload a model */
+ if (odp_ml_model_unload(ml_model, NULL)) {
+ ODPH_ERR("odp_ml_model_load() failed\n");
+ ret = -1;
+ }
+
+destroy_model:
+ if (odp_ml_model_destroy(ml_model)) {
+ ODPH_ERR("odp_ml_model_destroy() failed\n");
+ ret = -1;
+ }
+
+odp_term:
+ if (odp_term_local()) {
+ ODPH_ERR("Local term failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(inst)) {
+ ODPH_ERR("Global term failed.\n");
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/platform/linux-generic/example/ml/simple_linear.onnx b/platform/linux-generic/example/ml/simple_linear.onnx
new file mode 100644
index 000000000..45c4b95b9
--- /dev/null
+++ b/platform/linux-generic/example/ml/simple_linear.onnx
Binary files differ
diff --git a/platform/linux-generic/include-abi/odp/api/abi/align.h b/platform/linux-generic/include-abi/odp/api/abi/align.h
new file mode 100644
index 000000000..7fa343078
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/align.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/align.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/atomic.h b/platform/linux-generic/include-abi/odp/api/abi/atomic.h
new file mode 100644
index 000000000..4f481f913
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/atomic.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP atomic operations
+ */
+
+#ifndef ODP_API_ABI_ATOMIC_H_
+#define ODP_API_ABI_ATOMIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/align.h>
+
+/**
+ * @internal
+ * Atomic 32-bit unsigned integer
+ */
+typedef struct ODP_ALIGNED(sizeof(uint32_t)) odp_atomic_u32_s {
+ uint32_t v; /**< Actual storage for the atomic variable */
+} odp_atomic_u32_t;
+
+#if __GCC_ATOMIC_LLONG_LOCK_FREE >= 2
+
+/**
+ * @internal
+ * Atomic 64-bit unsigned integer
+ */
+typedef struct ODP_ALIGNED(sizeof(uint64_t)) odp_atomic_u64_s {
+ uint64_t v; /**< Actual storage for the atomic variable */
+} odp_atomic_u64_t;
+
+#else
+
+#define ODP_ATOMIC_U64_LOCK 1
+
+/**
+ * @internal
+ * Atomic 64-bit unsigned integer
+ */
+typedef struct ODP_ALIGNED(sizeof(uint64_t)) odp_atomic_u64_s {
+ uint64_t v; /**< Actual storage for the atomic variable */
+ /* Some architectures do not support lock-free operations on 64-bit
+ * data types. We use a spin lock to ensure atomicity. */
+ char lock; /**< Spin lock (if needed) used to ensure atomic access */
+} odp_atomic_u64_t;
+
+#endif
+
+#if defined(__SIZEOF_INT128__) || defined(_ODP_LOCK_FREE_128BIT_ATOMICS)
+
+/**
+ * @internal
+ * Atomic 128-bit unsigned integer
+ */
+typedef struct ODP_ALIGNED(sizeof(odp_u128_t)) odp_atomic_u128_s {
+ odp_u128_t v; /**< Actual storage for the atomic variable */
+} odp_atomic_u128_t;
+
+#else
+
+/**
+ * @internal
+ * Atomic 128-bit unsigned integer
+ */
+typedef struct ODP_ALIGNED(sizeof(odp_u128_t)) odp_atomic_u128_s {
+ odp_u128_t v; /**< Actual storage for the atomic variable */
+ /* Some architectures do not support lock-free operations on 128-bit
+ * data types. We use a spin lock to ensure atomicity. */
+ char lock; /**< Spin lock (if needed) used to ensure atomic access */
+} odp_atomic_u128_t;
+
+#endif
+
+/** @addtogroup odp_atomic
+ * @{
+ */
+
+#include <odp/api/plat/atomic_inlines.h>
+
+/**
+ * @}
+ */
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/barrier_types.h b/platform/linux-generic/include-abi/odp/api/abi/barrier.h
index 00b383cc6..cff12c577 100644
--- a/platform/linux-generic/include/odp/api/plat/barrier_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/barrier.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,8 +10,8 @@
* ODP barrier
*/
-#ifndef ODP_BARRIER_TYPES_H_
-#define ODP_BARRIER_TYPES_H_
+#ifndef ODP_ABI_BARRIER_H_
+#define ODP_ABI_BARRIER_H_
#ifdef __cplusplus
extern "C" {
diff --git a/platform/linux-generic/include-abi/odp/api/abi/buffer.h b/platform/linux-generic/include-abi/odp/api/abi/buffer.h
new file mode 100644
index 000000000..a6309fe39
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/buffer.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP buffer
+ */
+
+#ifndef ODP_API_ABI_BUFFER_H_
+#define ODP_API_ABI_BUFFER_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined API functions */
+#include <odp/api/plat/buffer_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h b/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h
new file mode 100644
index 000000000..63067268c
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/buffer_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP buffer types
+ */
+
+#ifndef ODP_API_ABI_BUFFER_TYPES_H_
+#define ODP_API_ABI_BUFFER_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_buffer
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_buffer_t);
+
+#define ODP_BUFFER_INVALID _odp_cast_scalar(odp_buffer_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/byteorder_types.h b/platform/linux-generic/include-abi/odp/api/abi/byteorder.h
index 20d52bf8f..c0b5ebe0c 100644
--- a/platform/linux-generic/include/odp/api/plat/byteorder_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/byteorder.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,23 +10,25 @@
* ODP byteorder
*/
-#ifndef ODP_BYTEORDER_TYPES_H_
-#define ODP_BYTEORDER_TYPES_H_
+#ifndef ODP_API_ABI_BYTEORDER_H_
+#define ODP_API_ABI_BYTEORDER_H_
#ifdef __cplusplus
extern "C" {
#endif
+#include <odp/api/std_types.h>
+
#ifndef __BYTE_ORDER__
-#error __BYTE_ORDER not defined!
+#error __BYTE_ORDER__ not defined!
#endif
#ifndef __ORDER_BIG_ENDIAN__
-#error __BIG_ENDIAN not defined!
+#error __ORDER_BIG_ENDIAN__ not defined!
#endif
#ifndef __ORDER_LITTLE_ENDIAN__
-#error __LITTLE_ENDIAN not defined!
+#error __ORDER_LITTLE_ENDIAN__ not defined!
#endif
/* for use with type checkers such as sparse */
@@ -73,6 +75,8 @@ typedef uint64_t __odp_bitwise odp_u64be_t;
typedef uint16_t __odp_bitwise odp_u16sum_t;
typedef uint32_t __odp_bitwise odp_u32sum_t;
+#include <odp/api/plat/byteorder_inlines.h>
+
/**
* @}
*/
diff --git a/platform/linux-generic/include-abi/odp/api/abi/classification.h b/platform/linux-generic/include-abi/odp/api/abi/classification.h
new file mode 100644
index 000000000..d63763dbd
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/classification.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP classification descriptor
+ */
+
+#ifndef ODP_API_ABI_CLASSIFICATION_H_
+#define ODP_API_ABI_CLASSIFICATION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_classification
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_cos_t);
+#define ODP_COS_INVALID _odp_cast_scalar(odp_cos_t, 0)
+
+typedef ODP_HANDLE_T(odp_pmr_t);
+#define ODP_PMR_INVALID _odp_cast_scalar(odp_pmr_t, 0)
+
+#define ODP_COS_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/comp.h b/platform/linux-generic/include-abi/odp/api/abi/comp.h
new file mode 100644
index 000000000..45681e961
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/comp.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_COMP_H_
+#define ODP_ABI_COMP_H_
+
+#include <odp/api/plat/strong_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @addtogroup odp_compression
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_comp_session_t);
+
+#define ODP_COMP_SESSION_INVALID _odp_cast_scalar(odp_comp_session_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/cpumask.h b/platform/linux-generic/include-abi/odp/api/abi/cpumask.h
new file mode 100644
index 000000000..c64bf2a69
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/cpumask.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/cpumask.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/crypto.h b/platform/linux-generic/include-abi/odp/api/abi/crypto.h
new file mode 100644
index 000000000..bef725c28
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/crypto.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP crypto
+ */
+
+#ifndef ODP_API_ABI_CRYPTO_H_
+#define ODP_API_ABI_CRYPTO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined API functions */
+#include <odp/api/plat/crypto_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/crypto_types.h b/platform/linux-generic/include-abi/odp/api/abi/crypto_types.h
index 2cc747eb2..b1e4aa5ae 100644
--- a/platform/linux-generic/include/odp/api/plat/crypto_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/crypto_types.h
@@ -1,43 +1,39 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
* ODP crypto
*/
-#ifndef ODP_CRYPTO_TYPES_H_
-#define ODP_CRYPTO_TYPES_H_
+#ifndef ODP_API_ABI_CRYPTO_TYPES_H_
+#define ODP_API_ABI_CRYPTO_TYPES_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/crypto.h>
-#else
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/strong_types.h>
-/** @ingroup odp_crypto
+/** @addtogroup odp_crypto
* @{
*/
#define ODP_CRYPTO_SESSION_INVALID (0xffffffffffffffffULL)
typedef uint64_t odp_crypto_session_t;
-typedef ODP_HANDLE_T(odp_crypto_compl_t);
/**
* @}
*/
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/debug.h b/platform/linux-generic/include-abi/odp/api/abi/debug.h
new file mode 100644
index 000000000..a3a86e64a
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/debug.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP atomic operations
+ */
+
+#ifndef ODP_API_ABI_DEBUG_H_
+#define ODP_API_ABI_DEBUG_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal _Static_assert was only added in GCC 4.6 and the C++ version
+ * static_assert for g++ 6 and above. Provide a weak replacement for previous
+ * versions.
+ */
+#define _odp_merge(a, b) a##b
+/** @internal */
+#define _odp_label(a) _odp_merge(_ODP_SASSERT_, a)
+/** @internal */
+#define _ODP_SASSERT _odp_label(__COUNTER__)
+/** @internal */
+#define _ODP_SASSERT_ENUM(e) { _ODP_SASSERT = 1 / !!(e) }
+/** @internal */
+#define _odp_static_assert(e, s) enum _ODP_SASSERT_ENUM(e)
+
+#if defined(__clang__)
+#if defined(__cplusplus)
+#if !__has_feature(cxx_static_assert) && !defined(static_assert)
+/** @internal */
+#define static_assert(e, s) _odp_static_assert(e, s)
+#endif
+#elif !__has_feature(c_static_assert) && !defined(_Static_assert)
+/** @internal */
+#define _Static_assert(e, s) _odp_static_assert(e, s)
+#endif
+
+#elif defined(__GNUC__)
+#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6)) || \
+ (__GNUC__ < 6 && defined(__cplusplus))
+#if defined(__cplusplus)
+#if !defined(static_assert)
+/** @intenral */
+#define static_assert(e, s) _odp_static_assert(e, s)
+#endif
+#elif !defined(_Static_assert)
+/** @internal */
+#define _Static_assert(e, s) _odp_static_assert(e, s)
+#endif
+#endif
+
+#endif
+
+#include <odp/api/abi-default/debug.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/dma.h b/platform/linux-generic/include-abi/odp/api/abi/dma.h
new file mode 100644
index 000000000..60798143c
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/dma.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP DMA
+ */
+
+#ifndef ODP_API_ABI_DMA_H_
+#define ODP_API_ABI_DMA_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/dma_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/dma_types.h b/platform/linux-generic/include-abi/odp/api/abi/dma_types.h
new file mode 100644
index 000000000..d5bee0374
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/dma_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_API_ABI_DMA_TYPES_H_
+#define ODP_API_ABI_DMA_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_dma
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_dma_t);
+
+#define ODP_DMA_INVALID _odp_cast_scalar(odp_dma_t, 0)
+
+typedef uint32_t odp_dma_transfer_id_t;
+
+#define ODP_DMA_TRANSFER_ID_INVALID ((odp_dma_transfer_id_t)0)
+
+typedef ODP_HANDLE_T(odp_dma_compl_t);
+
+#define ODP_DMA_COMPL_INVALID _odp_cast_scalar(odp_dma_compl_t, 0)
+
+#define ODP_DMA_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/errno.h b/platform/linux-generic/include-abi/odp/api/abi/errno.h
new file mode 100644
index 000000000..6215a0676
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/errno.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP errno
+ */
+
+#ifndef ODP_API_ABI_ERRNO_H_
+#define ODP_API_ABI_ERRNO_H_
+
+#include <odp/api/abi-default/errno.h>
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event.h b/platform/linux-generic/include-abi/odp/api/abi/event.h
new file mode 100644
index 000000000..e059f318c
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/event.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP event
+ */
+
+#ifndef ODP_API_ABI_EVENT_H_
+#define ODP_API_ABI_EVENT_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/event_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event_types.h b/platform/linux-generic/include-abi/odp/api/abi/event_types.h
new file mode 100644
index 000000000..01ee66cd3
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/event_types.h
@@ -0,0 +1,60 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event type definitions
+ */
+
+#ifndef ODP_API_ABI_EVENT_TYPES_H_
+#define ODP_API_ABI_EVENT_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_event
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_event_t);
+
+#define ODP_EVENT_INVALID _odp_cast_scalar(odp_event_t, 0)
+
+typedef enum odp_event_type_t {
+ ODP_EVENT_BUFFER = 1,
+ ODP_EVENT_PACKET = 2,
+ ODP_EVENT_TIMEOUT = 3,
+ ODP_EVENT_IPSEC_STATUS = 5,
+ ODP_EVENT_PACKET_VECTOR = 6,
+ ODP_EVENT_PACKET_TX_COMPL = 7,
+ ODP_EVENT_DMA_COMPL = 8,
+ ODP_EVENT_ML_COMPL = 9
+} odp_event_type_t;
+
+typedef enum odp_event_subtype_t {
+ ODP_EVENT_NO_SUBTYPE = 0,
+ ODP_EVENT_PACKET_BASIC = 1,
+ ODP_EVENT_PACKET_CRYPTO = 2,
+ ODP_EVENT_PACKET_IPSEC = 3,
+ ODP_EVENT_PACKET_COMP = 4,
+ ODP_EVENT_ML_COMPL_LOAD = 5,
+ ODP_EVENT_ML_COMPL_RUN = 6
+} odp_event_subtype_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/hash.h b/platform/linux-generic/include-abi/odp/api/abi/hash.h
new file mode 100644
index 000000000..b132d7eb4
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/hash.h
@@ -0,0 +1,21 @@
+/* Copyright (c) 2020, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP hash
+ */
+
+#ifndef ODP_API_ABI_HASH_H_
+#define ODP_API_ABI_HASH_H_
+
+#include <odp/api/abi-default/hash.h>
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/hash_inlines.h>
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/init.h b/platform/linux-generic/include-abi/odp/api/abi/init.h
new file mode 100644
index 000000000..7ad523fde
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/init.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/init.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/ipsec.h b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h
new file mode 100644
index 000000000..1817e5564
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h
@@ -0,0 +1,32 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP IPsec
+ */
+
+#ifndef ODP_API_ABI_IPSEC_H_
+#define ODP_API_ABI_IPSEC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined API functions */
+#include <odp/api/plat/ipsec_inlines.h>
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h b/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h
new file mode 100644
index 000000000..1c5501997
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/ipsec_types.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP IPsec platform specific types
+ */
+
+#ifndef ODP_API_ABI_IPSEC_TYPES_H_
+#define ODP_API_ABI_IPSEC_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_ipsec
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_ipsec_sa_t);
+
+#define ODP_IPSEC_SA_INVALID _odp_cast_scalar(odp_ipsec_sa_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/ml_types.h b/platform/linux-generic/include-abi/odp/api/abi/ml_types.h
new file mode 100644
index 000000000..0fdb7a8dc
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/ml_types.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2023 Nokia
+ */
+
+#ifndef ODP_API_ABI_ML_TYPES_H_
+#define ODP_API_ABI_ML_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @internal Implementation specific ML parameters */
+struct _odp_ml_model_extra_param_t {
+ /** @internal Dummy field to avoid empty struct */
+ char dummy;
+};
+
+/** @addtogroup odp_ml
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_ml_model_t);
+typedef ODP_HANDLE_T(odp_ml_compl_t);
+typedef struct _odp_ml_model_extra_param_t odp_ml_model_extra_param_t;
+
+#define ODP_ML_MODEL_INVALID _odp_cast_scalar(odp_ml_model_t, 0)
+#define ODP_ML_COMPL_INVALID _odp_cast_scalar(odp_ml_compl_t, 0)
+
+#define ODP_ML_MODEL_NAME_LEN 64
+#define ODP_ML_MODEL_IO_NAME_LEN 64
+#define ODP_ML_SHAPE_NAME_LEN 16
+#define ODP_ML_EXTRA_STAT_NAME_LEN 64
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet.h b/platform/linux-generic/include-abi/odp/api/abi/packet.h
new file mode 100644
index 000000000..5703141d4
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_H_
+#define ODP_API_ABI_PACKET_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_flags.h b/platform/linux-generic/include-abi/odp/api/abi/packet_flags.h
new file mode 100644
index 000000000..8e7b88ca2
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_flags.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_FLAGS_H_
+#define ODP_API_ABI_PACKET_FLAGS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/packet_flag_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_io.h b/platform/linux-generic/include-abi/odp/api/abi/packet_io.h
new file mode 100644
index 000000000..c7c7b2faa
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_io.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP Packet IO
+ */
+
+#ifndef ODP_API_ABI_PACKET_IO_H_
+#define ODP_API_ABI_PACKET_IO_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/packet_io_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_io_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h
index 5a45321fb..76b162020 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_io_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_io_types.h
@@ -1,18 +1,18 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
- * ODP Packet IO
+ * ODP Packet IO types
*/
-#ifndef ODP_PACKET_IO_TYPES_H_
-#define ODP_PACKET_IO_TYPES_H_
+#ifndef ODP_API_ABI_PACKET_IO_TYPES_H_
+#define ODP_API_ABI_PACKET_IO_TYPES_H_
#ifdef __cplusplus
extern "C" {
@@ -22,11 +22,11 @@ extern "C" {
#include <odp/api/plat/strong_types.h>
/** @addtogroup odp_packet_io
- * Operations on a packet.
* @{
*/
typedef ODP_HANDLE_T(odp_pktio_t);
+typedef ODP_HANDLE_T(odp_lso_profile_t);
/** @internal */
typedef struct odp_pktin_queue_t {
@@ -41,17 +41,19 @@ typedef struct odp_pktout_queue_t {
} odp_pktout_queue_t;
#define ODP_PKTIO_INVALID _odp_cast_scalar(odp_pktio_t, 0)
+#define ODP_LSO_PROFILE_INVALID _odp_cast_scalar(odp_lso_profile_t, 0)
+
+#define ODP_PKTIO_MAX_INDEX 63
#define ODP_PKTIO_MACADDR_MAXSIZE 16
#define ODP_PKTIN_NO_WAIT 0
-#define ODP_PKTIN_WAIT UINT64_MAX
-/** Get printable format of odp_pktio_t */
-static inline uint64_t odp_pktio_to_u64(odp_pktio_t hdl)
-{
- return _odp_pri(hdl);
-}
+#define ODP_PKTIN_MAX_QUEUES 64
+
+#define ODP_PKTOUT_MAX_QUEUES 64
+
+#define ODP_PKTIO_STATS_EXTRA_NAME_LEN 64
/**
* @}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_types.h b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
new file mode 100644
index 000000000..90b2af107
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_types.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_API_ABI_PACKET_TYPES_H_
+#define ODP_API_ABI_PACKET_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_packet
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_packet_t);
+
+#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef ODP_HANDLE_T(odp_packet_seg_t);
+
+#define ODP_PACKET_SEG_INVALID _odp_cast_scalar(odp_packet_seg_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_buf_t);
+
+#define ODP_PACKET_BUF_INVALID _odp_cast_scalar(odp_packet_buf_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_vector_t);
+
+#define ODP_PACKET_VECTOR_INVALID _odp_cast_scalar(odp_packet_vector_t, 0)
+
+typedef ODP_HANDLE_T(odp_packet_tx_compl_t);
+
+#define ODP_PACKET_TX_COMPL_INVALID _odp_cast_scalar(odp_packet_tx_compl_t, 0)
+
+#define ODP_PACKET_OFFSET_INVALID 0xffff
+
+typedef enum {
+ ODP_PACKET_GREEN = 0,
+ ODP_PACKET_YELLOW = 1,
+ ODP_PACKET_RED = 2,
+ ODP_PACKET_ALL_COLORS = 3,
+} odp_packet_color_t;
+
+typedef enum {
+ ODP_PACKET_CHKSUM_UNKNOWN = 0,
+ ODP_PACKET_CHKSUM_BAD,
+ ODP_PACKET_CHKSUM_OK
+} odp_packet_chksum_status_t;
+
+typedef struct odp_packet_parse_result_flag_t {
+ union {
+ uint64_t all;
+
+ struct {
+ uint64_t has_error : 1;
+ uint64_t has_l2_error : 1;
+ uint64_t has_l3_error : 1;
+ uint64_t has_l4_error : 1;
+ uint64_t has_l2 : 1;
+ uint64_t has_l3 : 1;
+ uint64_t has_l4 : 1;
+ uint64_t has_eth : 1;
+ uint64_t has_eth_bcast : 1;
+ uint64_t has_eth_mcast : 1;
+ uint64_t has_jumbo : 1;
+ uint64_t has_vlan : 1;
+ uint64_t has_vlan_qinq : 1;
+ uint64_t has_arp : 1;
+ uint64_t has_ipv4 : 1;
+ uint64_t has_ipv6 : 1;
+ uint64_t has_ip_bcast : 1;
+ uint64_t has_ip_mcast : 1;
+ uint64_t has_ipfrag : 1;
+ uint64_t has_ipopt : 1;
+ uint64_t has_ipsec : 1;
+ uint64_t has_udp : 1;
+ uint64_t has_tcp : 1;
+ uint64_t has_sctp : 1;
+ uint64_t has_icmp : 1;
+ };
+ };
+
+} odp_packet_parse_result_flag_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/pool.h b/platform/linux-generic/include-abi/odp/api/abi/pool.h
new file mode 100644
index 000000000..d8a80197c
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/pool.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP pool
+ */
+
+#ifndef ODP_API_ABI_POOL_H_
+#define ODP_API_ABI_POOL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined API functions */
+#include <odp/api/plat/pool_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/pool_types.h b/platform/linux-generic/include-abi/odp/api/abi/pool_types.h
new file mode 100644
index 000000000..77b0ff638
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/pool_types.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP pool
+ */
+
+#ifndef ODP_API_ABI_POOL_TYPES_H_
+#define ODP_API_ABI_POOL_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_pool
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_pool_t);
+
+#define ODP_POOL_INVALID _odp_cast_scalar(odp_pool_t, 0)
+
+#define ODP_POOL_NAME_LEN 32
+
+#define ODP_POOL_MAX_THREAD_STATS 128
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
new file mode 100644
index 000000000..d81035df2
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_H_
+#define ODP_API_ABI_PROTO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Placeholder for inlined functions for non-ABI compat mode */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
new file mode 100644
index 000000000..d9db29188
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/proto_stats_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2021, Marvell
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP proto stats types
+ */
+
+#ifndef ODP_API_ABI_PROTO_STATS_TYPES_H_
+#define ODP_API_ABI_PROTO_STATS_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_proto_stats
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_proto_stats_t);
+
+#define ODP_PROTO_STATS_INVALID _odp_cast_scalar(odp_proto_stats_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/queue.h b/platform/linux-generic/include-abi/odp/api/abi/queue.h
new file mode 100644
index 000000000..6c34123df
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP queue
+ */
+
+#ifndef ODP_API_ABI_QUEUE_H_
+#define ODP_API_ABI_QUEUE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/queue_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/queue_types.h b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
index 1561e2239..4eff762bd 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/queue_types.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,22 +11,17 @@
* ODP queue
*/
-#ifndef ODP_QUEUE_TYPES_H_
-#define ODP_QUEUE_TYPES_H_
+#ifndef ODP_API_ABI_QUEUE_TYPES_H_
+#define ODP_API_ABI_QUEUE_TYPES_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/queue.h>
-#else
-
#include <odp/api/std_types.h>
#include <odp/api/plat/strong_types.h>
-/** @ingroup odp_queue
+/** @addtogroup odp_queue
* @{
*/
@@ -39,8 +35,6 @@ typedef ODP_HANDLE_T(odp_queue_t);
* @}
*/
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp/api/cpu.h b/platform/linux-generic/include-abi/odp/api/abi/random.h
index d49c782b1..07714a47e 100644
--- a/platform/linux-generic/include/odp/api/cpu.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/random.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,19 +7,17 @@
/**
* @file
*
- * ODP CPU
+ * ODP random
*/
-#ifndef ODP_PLAT_CPU_H_
-#define ODP_PLAT_CPU_H_
+#ifndef ODP_API_ABI_RANDOM_H_
+#define ODP_API_ABI_RANDOM_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <odp/api/cpu_arch.h>
-
-#include <odp/api/spec/cpu.h>
+/* Empty placeholder header for function inlining */
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/rwlock.h b/platform/linux-generic/include-abi/odp/api/abi/rwlock.h
new file mode 100644
index 000000000..78d7c1a4a
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/rwlock.h
@@ -0,0 +1,10 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/rwlock.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/rwlock_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/rwlock_recursive.h b/platform/linux-generic/include-abi/odp/api/abi/rwlock_recursive.h
new file mode 100644
index 000000000..ab7150605
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/rwlock_recursive.h
@@ -0,0 +1,10 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/rwlock_recursive.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/rwlock_recursive_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/schedule.h b/platform/linux-generic/include-abi/odp/api/abi/schedule.h
new file mode 100644
index 000000000..bb28886b0
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/schedule.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Schedule
+ */
+
+#ifndef ODP_API_ABI_SCHEDULE_H_
+#define ODP_API_ABI_SCHEDULE_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Inlined API functions */
+#include <odp/api/plat/schedule_inlines.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/schedule_types.h b/platform/linux-generic/include-abi/odp/api/abi/schedule_types.h
new file mode 100644
index 000000000..d5164ff79
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/schedule_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/schedule_types.h>
diff --git a/platform/linux-generic/include/odp/api/plat/shared_memory_types.h b/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h
index 2c5b4ed2e..bfcb9ebe5 100644
--- a/platform/linux-generic/include/odp/api/plat/shared_memory_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/shared_memory.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,38 +11,33 @@
* ODP shared memory
*/
-#ifndef ODP_SHARED_MEMORY_TYPES_H_
-#define ODP_SHARED_MEMORY_TYPES_H_
+#ifndef ODP_API_ABI_SHARED_MEMORY_H_
+#define ODP_API_ABI_SHARED_MEMORY_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/shared_memory.h>
-#else
-
#include <odp/api/std_types.h>
#include <odp/api/plat/strong_types.h>
-/** @ingroup odp_shared_memory
+/** @addtogroup odp_shared_memory
* @{
*/
typedef ODP_HANDLE_T(odp_shm_t);
#define ODP_SHM_INVALID _odp_cast_scalar(odp_shm_t, 0)
-#define ODP_SHM_NULL ODP_SHM_INVALID
#define ODP_SHM_NAME_LEN 32
+#define ODP_SHM_IOVA_INVALID ((uint64_t)-1)
+#define ODP_SHM_PA_INVALID ODP_SHM_IOVA_INVALID
+
/**
* @}
*/
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/spinlock.h b/platform/linux-generic/include-abi/odp/api/abi/spinlock.h
new file mode 100644
index 000000000..d1e5fa1e9
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/spinlock.h
@@ -0,0 +1,10 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/spinlock.h>
+
+/* Include inlined versions of API functions */
+#include <odp/api/plat/spinlock_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/spinlock_recursive.h b/platform/linux-generic/include-abi/odp/api/abi/spinlock_recursive.h
new file mode 100644
index 000000000..cdcbae1b4
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/spinlock_recursive.h
@@ -0,0 +1,10 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/spinlock_recursive.h>
+
+/* Include inlined versions of API functions */
+#include <odp/api/plat/spinlock_recursive_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/stash.h b/platform/linux-generic/include-abi/odp/api/abi/stash.h
new file mode 100644
index 000000000..69bf989d6
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/stash.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ */
+
+#ifndef ODP_API_ABI_STASH_H_
+#define ODP_API_ABI_STASH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Empty placeholder header for inline functions */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/stash_types.h b/platform/linux-generic/include-abi/odp/api/abi/stash_types.h
new file mode 100644
index 000000000..2a4115886
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/stash_types.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ */
+
+#ifndef ODP_API_ABI_STASH_TYPES_H_
+#define ODP_API_ABI_STASH_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+
+/** @addtogroup odp_stash
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_stash_t);
+
+#define ODP_STASH_INVALID _odp_cast_scalar(odp_stash_t, 0)
+
+#define ODP_STASH_NAME_LEN 32
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/packet_io_stats.h b/platform/linux-generic/include-abi/odp/api/abi/std.h
index a9cd9535e..201fca18e 100644
--- a/platform/linux-generic/include/odp/api/packet_io_stats.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/std.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,17 +7,17 @@
/**
* @file
*
- * ODP packet IO stats
+ * ODP barrier
*/
-#ifndef ODP_PLAT_PACKET_IO_STATS_H_
-#define ODP_PLAT_PACKET_IO_STATS_H_
+#ifndef ODP_API_ABI_STD_H_
+#define ODP_API_ABI_STD_H_
#ifdef __cplusplus
extern "C" {
#endif
-#include <odp/api/spec/packet_io_stats.h>
+#include <odp/api/plat/std_inlines.h>
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/std_types.h b/platform/linux-generic/include-abi/odp/api/abi/std_types.h
new file mode 100644
index 000000000..594e6f9dd
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/std_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/std_types.h>
diff --git a/platform/linux-generic/include/odp/api/hash.h b/platform/linux-generic/include-abi/odp/api/abi/sync.h
index 332029ed0..276514b58 100644
--- a/platform/linux-generic/include/odp/api/hash.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/sync.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,26 +7,26 @@
/**
* @file
*
- * ODP Hash function
+ * ODP barrier
*/
-#ifndef ODP_PLAT_HASH_H_
-#define ODP_PLAT_HASH_H_
+#ifndef ODP_API_ABI_SYNC_H_
+#define ODP_API_ABI_SYNC_H_
#ifdef __cplusplus
extern "C" {
#endif
-/** @ingroup odp_hash
+/** @addtogroup odp_barrier
* @{
*/
+#include <odp/api/plat/sync_inlines.h>
+
/**
* @}
*/
-#include <odp/api/spec/hash.h>
-
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/thread.h b/platform/linux-generic/include-abi/odp/api/abi/thread.h
new file mode 100644
index 000000000..14c074b95
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/thread.h
@@ -0,0 +1,8 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Inlined API functions */
+#include <odp/api/plat/thread_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/thread_types.h b/platform/linux-generic/include-abi/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/thrmask.h b/platform/linux-generic/include-abi/odp/api/abi/thrmask.h
new file mode 100644
index 000000000..ab05cd83f
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/thrmask.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/thrmask.h>
diff --git a/platform/linux-generic/include/odp/api/plat/ticketlock_types.h b/platform/linux-generic/include-abi/odp/api/abi/ticketlock.h
index 81d479d61..b621bea7e 100644
--- a/platform/linux-generic/include/odp/api/plat/ticketlock_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/ticketlock.h
@@ -1,18 +1,17 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
* ODP ticketlock
*/
-#ifndef ODP_TICKETLOCK_TYPES_H_
-#define ODP_TICKETLOCK_TYPES_H_
+#ifndef ODP_API_ABI_TICKETLOCK_H_
+#define ODP_API_ABI_TICKETLOCK_H_
#ifdef __cplusplus
extern "C" {
@@ -20,13 +19,22 @@ extern "C" {
#include <odp/api/atomic.h>
+/** @addtogroup odp_locks
+ * @{
+ */
+
/** @internal */
-struct odp_ticketlock_s {
+typedef struct odp_ticketlock_s {
odp_atomic_u32_t next_ticket; /**< Next ticket */
odp_atomic_u32_t cur_ticket; /**< Current ticket */
-};
+} odp_ticketlock_t;
-typedef struct odp_ticketlock_s odp_ticketlock_t;
+/* Include inlined versions of API functions */
+#include <odp/api/plat/ticketlock_inlines.h>
+
+/**
+ * @}
+ */
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/time.h b/platform/linux-generic/include-abi/odp/api/abi/time.h
new file mode 100644
index 000000000..62c7e2b67
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/time.h
@@ -0,0 +1,8 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Inlined API functions */
+#include <odp/api/plat/time_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/time_types.h b/platform/linux-generic/include-abi/odp/api/abi/time_types.h
new file mode 100644
index 000000000..cba80f508
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/time_types.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/time_types.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/timer.h b/platform/linux-generic/include-abi/odp/api/abi/timer.h
new file mode 100644
index 000000000..0a3b3a9cc
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/timer.h
@@ -0,0 +1,8 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* Inlined functions for non-ABI compat mode */
+#include <odp/api/plat/timer_inlines.h>
diff --git a/platform/linux-generic/include/odp/api/plat/timer_types.h b/platform/linux-generic/include-abi/odp/api/abi/timer_types.h
index 8821bed60..6cfa37a36 100644
--- a/platform/linux-generic/include/odp/api/plat/timer_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/timer_types.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,8 +11,8 @@
* ODP timer service
*/
-#ifndef ODP_TIMER_TYPES_H_
-#define ODP_TIMER_TYPES_H_
+#ifndef ODP_API_ABI_TIMER_TYPES_H_
+#define ODP_API_ABI_TIMER_TYPES_H_
#ifdef __cplusplus
extern "C" {
@@ -24,21 +24,19 @@ extern "C" {
* @{
**/
-struct odp_timer_pool_s; /**< Forward declaration */
+typedef ODP_HANDLE_T(odp_timer_pool_t);
-typedef struct odp_timer_pool_s *odp_timer_pool_t;
-
-#define ODP_TIMER_POOL_INVALID NULL
+#define ODP_TIMER_POOL_INVALID _odp_cast_scalar(odp_timer_pool_t, 0)
#define ODP_TIMER_POOL_NAME_LEN 32
typedef ODP_HANDLE_T(odp_timer_t);
-#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0xffffffff)
+#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0)
typedef ODP_HANDLE_T(odp_timeout_t);
-#define ODP_TIMEOUT_INVALID _odp_cast_scalar(odp_timeout_t, 0xffffffff)
+#define ODP_TIMEOUT_INVALID _odp_cast_scalar(odp_timeout_t, 0)
/**
* @}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/traffic_mngr.h b/platform/linux-generic/include-abi/odp/api/abi/traffic_mngr.h
new file mode 100644
index 000000000..0e6bc7982
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/traffic_mngr.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/traffic_mngr.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/version.h b/platform/linux-generic/include-abi/odp/api/abi/version.h
new file mode 100644
index 000000000..429d4f3fa
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/version.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/version.h>
diff --git a/platform/linux-generic/include/_ishm_internal.h b/platform/linux-generic/include/_ishm_internal.h
deleted file mode 100644
index c7c330774..000000000
--- a/platform/linux-generic/include/_ishm_internal.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_ISHM_INTERNAL_H_
-#define ODP_ISHM_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-
-/* flags available at ishm_reserve: */
-#define _ODP_ISHM_SINGLE_VA 1
-#define _ODP_ISHM_LOCK 2
-#define _ODP_ISHM_EXPORT 4 /*create export descr file in /tmp */
-
-/**
- * Shared memory block info
- */
-typedef struct _odp_ishm_info_t {
- const char *name; /**< Block name */
- void *addr; /**< Block address */
- uint64_t size; /**< Block size in bytes */
- uint64_t page_size; /**< Memory page size */
- uint32_t flags; /**< _ODP_ISHM_* flags */
- uint32_t user_flags;/**< user specific flags */
-} _odp_ishm_info_t;
-
-int _odp_ishm_reserve(const char *name, uint64_t size, int fd, uint32_t align,
- uint32_t flags, uint32_t user_flags);
-int _odp_ishm_free_by_index(int block_index);
-int _odp_ishm_free_by_name(const char *name);
-int _odp_ishm_free_by_address(void *addr);
-void *_odp_ishm_lookup_by_index(int block_index);
-int _odp_ishm_lookup_by_name(const char *name);
-int _odp_ishm_lookup_by_address(void *addr);
-int _odp_ishm_find_exported(const char *remote_name,
- pid_t external_odp_pid,
- const char *local_name);
-void *_odp_ishm_address(int block_index);
-int _odp_ishm_info(int block_index, _odp_ishm_info_t *info);
-int _odp_ishm_status(const char *title);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/ishmphy_internal.h b/platform/linux-generic/include/ishmphy_internal.h
index 0bc4207af..3ed29f40a 100644
--- a/platform/linux-generic/include/ishmphy_internal.h
+++ b/platform/linux-generic/include/ishmphy_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/include/odp/api/align.h b/platform/linux-generic/include/odp/api/align.h
deleted file mode 100644
index c238b80af..000000000
--- a/platform/linux-generic/include/odp/api/align.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP alignments
- */
-
-#ifndef ODP_PLAT_ALIGN_H_
-#define ODP_PLAT_ALIGN_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/cpu_arch.h>
-
-/** @ingroup odp_compiler_optim
- * @{
- */
-
-#ifdef __GNUC__
-
-#define ODP_ALIGNED(x) __attribute__((__aligned__(x)))
-
-#define ODP_PACKED __attribute__((__packed__))
-
-#define ODP_OFFSETOF(type, member) __builtin_offsetof(type, member)
-
-#define ODP_FIELD_SIZEOF(type, member) sizeof(((type *)0)->member)
-
-#else
-#error Non-gcc compatible compiler
-#endif
-
-#define ODP_CACHE_LINE_SIZE _ODP_CACHE_LINE_SIZE
-
-#define ODP_PAGE_SIZE 4096
-
-#define ODP_ALIGNED_CACHE ODP_ALIGNED(ODP_CACHE_LINE_SIZE)
-
-#define ODP_ALIGNED_PAGE ODP_ALIGNED(ODP_PAGE_SIZE)
-
-/**
- * @}
- */
-
-#include <odp/api/spec/align.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/atomic.h b/platform/linux-generic/include/odp/api/atomic.h
deleted file mode 100644
index 7886cb4ea..000000000
--- a/platform/linux-generic/include/odp/api/atomic.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP atomic operations
- */
-
-#ifndef ODP_PLAT_ATOMIC_H_
-#define ODP_PLAT_ATOMIC_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/align.h>
-#include <odp/api/plat/atomic_types.h>
-
-/** @ingroup odp_atomic
- * @{
- */
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/atomic_inlines.h>
-#endif
-
-/**
- * @}
- */
-
-#include <odp/api/spec/atomic.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/barrier.h b/platform/linux-generic/include/odp/api/barrier.h
deleted file mode 100644
index ab1b77562..000000000
--- a/platform/linux-generic/include/odp/api/barrier.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP execution barriers
- */
-
-#ifndef ODP_PLAT_BARRIER_H_
-#define ODP_PLAT_BARRIER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/atomic.h>
-#include <odp/api/plat/shared_memory_types.h>
-#include <odp/api/plat/barrier_types.h>
-
-#include <odp/api/spec/barrier.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/buffer.h b/platform/linux-generic/include/odp/api/buffer.h
deleted file mode 100644
index 81dbf1280..000000000
--- a/platform/linux-generic/include/odp/api/buffer.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP buffer descriptor
- */
-
-#ifndef ODP_PLAT_BUFFER_H_
-#define ODP_PLAT_BUFFER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/plat/buffer_types.h>
-#include <odp/api/plat/pool_types.h>
-
-/** @ingroup odp_buffer
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/buffer.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/byteorder.h b/platform/linux-generic/include/odp/api/byteorder.h
deleted file mode 100644
index ec3d0eef7..000000000
--- a/platform/linux-generic/include/odp/api/byteorder.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP byteorder
- */
-
-#ifndef ODP_PLAT_BYTEORDER_H_
-#define ODP_PLAT_BYTEORDER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/byteorder_types.h>
-#include <odp/api/compiler.h>
-
-/** @ingroup odp_compiler_optim
- * @{
- */
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/byteorder_inlines.h>
-#endif
-
-/**
- * @}
- */
-
-#include <odp/api/spec/byteorder.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/classification.h b/platform/linux-generic/include/odp/api/classification.h
deleted file mode 100644
index 2ba6eb0eb..000000000
--- a/platform/linux-generic/include/odp/api/classification.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP classification descriptor
- */
-
-#ifndef ODP_PLAT_CLASSIFICATION_H_
-#define ODP_PLAT_CLASSIFICATION_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/pool_types.h>
-#include <odp/api/plat/classification_types.h>
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/plat/packet_io_types.h>
-#include <odp/api/plat/queue_types.h>
-
-/** @ingroup odp_classification
- * @{
- */
-
-/* REMOVE THESE FROM API SPEC. Typedefs needed only for suppressing Doxygen
- * warning. */
-typedef void odp_flowsig_t;
-typedef void odp_cos_flow_set_t;
-
-/**
- * @}
- */
-
-#include <odp/api/spec/classification.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/compiler.h b/platform/linux-generic/include/odp/api/compiler.h
deleted file mode 100644
index 5249d5d62..000000000
--- a/platform/linux-generic/include/odp/api/compiler.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Compiler related
- */
-
-#ifndef ODP_PLAT_COMPILER_H_
-#define ODP_PLAT_COMPILER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_compiler_optim
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/compiler.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/cpumask.h b/platform/linux-generic/include/odp/api/cpumask.h
deleted file mode 100644
index 325ea52ed..000000000
--- a/platform/linux-generic/include/odp/api/cpumask.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP CPU masks and enumeration
- */
-
-#ifndef ODP_PLAT_CPUMASK_H_
-#define ODP_PLAT_CPUMASK_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/cpumask_types.h>
-
-#include <odp/api/spec/cpumask.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/crypto.h b/platform/linux-generic/include/odp/api/crypto.h
deleted file mode 100644
index 4f65932aa..000000000
--- a/platform/linux-generic/include/odp/api/crypto.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP crypto
- */
-
-#ifndef ODP_PLAT_CRYPTO_H_
-#define ODP_PLAT_CRYPTO_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/plat/crypto_types.h>
-#include <odp/api/plat/buffer_types.h>
-#include <odp/api/plat/pool_types.h>
-#include <odp/api/queue.h>
-
-/** @ingroup odp_crypto
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/crypto.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/debug.h b/platform/linux-generic/include/odp/api/debug.h
deleted file mode 100644
index 7db143395..000000000
--- a/platform/linux-generic/include/odp/api/debug.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP debug
- */
-
-#ifndef ODP_PLAT_DEBUG_H_
-#define ODP_PLAT_DEBUG_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/spec/debug.h>
-
-#if defined(__GNUC__) && !defined(__clang__)
-
-#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
-
-/**
- * @internal _Static_assert was only added in GCC 4.6. Provide a weak replacement
- * for previous versions.
- */
-#define _Static_assert(e, s) (extern int (*static_assert_checker(void)) \
- [sizeof(struct { unsigned int error_if_negative:(e) ? 1 : -1; })])
-
-#endif
-
-#endif
-
-/**
- * @internal Compile time assertion macro. Fails compilation and outputs 'msg'
- * if condition 'cond' is false. Macro definition is empty when compiler is not
- * supported or the compiler does not support static assertion.
- */
-#define ODP_STATIC_ASSERT(cond, msg) _Static_assert(cond, msg)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/errno.h b/platform/linux-generic/include/odp/api/errno.h
deleted file mode 100644
index f70d84df0..000000000
--- a/platform/linux-generic/include/odp/api/errno.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP errno API
- */
-
-#ifndef ODP_PLAT_ERRNO_H_
-#define ODP_PLAT_ERRNO_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-#include <odp/api/spec/errno.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/event.h b/platform/linux-generic/include/odp/api/event.h
deleted file mode 100644
index 55931b625..000000000
--- a/platform/linux-generic/include/odp/api/event.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP event
- */
-
-#ifndef ODP_PLAT_EVENT_H_
-#define ODP_PLAT_EVENT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/event_types.h>
-
-/** @ingroup odp_event
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/event.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/hints.h b/platform/linux-generic/include/odp/api/hints.h
deleted file mode 100644
index 3ba1ba95e..000000000
--- a/platform/linux-generic/include/odp/api/hints.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP compiler hints
- */
-
-#ifndef ODP_PLAT_HINTS_H_
-#define ODP_PLAT_HINTS_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_compiler_optim
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/hints.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/init.h b/platform/linux-generic/include/odp/api/init.h
deleted file mode 100644
index 1d9f59a6f..000000000
--- a/platform/linux-generic/include/odp/api/init.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP initialization.
- */
-
-#ifndef ODP_PLAT_INIT_H_
-#define ODP_PLAT_INIT_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/init_types.h>
-
-/** @ingroup odp_initialization
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/init.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/packet.h b/platform/linux-generic/include/odp/api/packet.h
deleted file mode 100644
index eff408035..000000000
--- a/platform/linux-generic/include/odp/api/packet.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP packet descriptor
- */
-
-#ifndef ODP_PLAT_PACKET_H_
-#define ODP_PLAT_PACKET_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/plat/packet_io_types.h>
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/plat/buffer_types.h>
-#include <odp/api/plat/pool_types.h>
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/packet_inlines.h>
-#endif
-
-#include <odp/api/spec/packet.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/packet_flags.h b/platform/linux-generic/include/odp/api/packet_flags.h
deleted file mode 100644
index 1e55af823..000000000
--- a/platform/linux-generic/include/odp/api/packet_flags.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP packet flags
- */
-
-#ifndef ODP_PLAT_PACKET_FLAGS_H_
-#define ODP_PLAT_PACKET_FLAGS_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/packet_flag_inlines.h>
-#endif
-
-#include <odp/api/spec/packet_flags.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/packet_io.h b/platform/linux-generic/include/odp/api/packet_io.h
deleted file mode 100644
index 76c7dfeda..000000000
--- a/platform/linux-generic/include/odp/api/packet_io.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP Packet IO
- */
-
-#ifndef ODP_PLAT_PACKET_IO_H_
-#define ODP_PLAT_PACKET_IO_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/pool_types.h>
-#include <odp/api/plat/classification_types.h>
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/plat/packet_io_types.h>
-#include <odp/api/plat/queue_types.h>
-
-/** @ingroup odp_packet_io
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/packet_io.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/atomic_inlines.h b/platform/linux-generic/include/odp/api/plat/atomic_inlines.h
index 03b2884fd..e47559102 100644
--- a/platform/linux-generic/include/odp/api/plat/atomic_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/atomic_inlines.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,6 +14,69 @@
#ifndef _ODP_PLAT_ATOMIC_INLINES_H_
#define _ODP_PLAT_ATOMIC_INLINES_H_
+#include <odp/api/abi/atomic_inlines.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_atomic_init_u32 __odp_atomic_init_u32
+ #define odp_atomic_load_u32 __odp_atomic_load_u32
+ #define odp_atomic_store_u32 __odp_atomic_store_u32
+ #define odp_atomic_fetch_add_u32 __odp_atomic_fetch_add_u32
+ #define odp_atomic_add_u32 __odp_atomic_add_u32
+ #define odp_atomic_fetch_sub_u32 __odp_atomic_fetch_sub_u32
+ #define odp_atomic_sub_u32 __odp_atomic_sub_u32
+ #define odp_atomic_fetch_inc_u32 __odp_atomic_fetch_inc_u32
+ #define odp_atomic_inc_u32 __odp_atomic_inc_u32
+ #define odp_atomic_fetch_dec_u32 __odp_atomic_fetch_dec_u32
+ #define odp_atomic_dec_u32 __odp_atomic_dec_u32
+ #define odp_atomic_cas_u32 __odp_atomic_cas_u32
+ #define odp_atomic_xchg_u32 __odp_atomic_xchg_u32
+ #define odp_atomic_load_acq_u32 __odp_atomic_load_acq_u32
+ #define odp_atomic_store_rel_u32 __odp_atomic_store_rel_u32
+ #define odp_atomic_add_rel_u32 __odp_atomic_add_rel_u32
+ #define odp_atomic_sub_rel_u32 __odp_atomic_sub_rel_u32
+ #define odp_atomic_cas_acq_u32 __odp_atomic_cas_acq_u32
+ #define odp_atomic_cas_rel_u32 __odp_atomic_cas_rel_u32
+ #define odp_atomic_cas_acq_rel_u32 __odp_atomic_cas_acq_rel_u32
+ #define odp_atomic_max_u32 __odp_atomic_max_u32
+ #define odp_atomic_min_u32 __odp_atomic_min_u32
+ #define odp_atomic_init_u64 __odp_atomic_init_u64
+ #define odp_atomic_load_u64 __odp_atomic_load_u64
+ #define odp_atomic_store_u64 __odp_atomic_store_u64
+ #define odp_atomic_fetch_add_u64 __odp_atomic_fetch_add_u64
+ #define odp_atomic_add_u64 __odp_atomic_add_u64
+ #define odp_atomic_fetch_sub_u64 __odp_atomic_fetch_sub_u64
+ #define odp_atomic_sub_u64 __odp_atomic_sub_u64
+ #define odp_atomic_fetch_inc_u64 __odp_atomic_fetch_inc_u64
+ #define odp_atomic_inc_u64 __odp_atomic_inc_u64
+ #define odp_atomic_fetch_dec_u64 __odp_atomic_fetch_dec_u64
+ #define odp_atomic_dec_u64 __odp_atomic_dec_u64
+ #define odp_atomic_cas_u64 __odp_atomic_cas_u64
+ #define odp_atomic_xchg_u64 __odp_atomic_xchg_u64
+ #define odp_atomic_load_acq_u64 __odp_atomic_load_acq_u64
+ #define odp_atomic_store_rel_u64 __odp_atomic_store_rel_u64
+ #define odp_atomic_add_rel_u64 __odp_atomic_add_rel_u64
+ #define odp_atomic_sub_rel_u64 __odp_atomic_sub_rel_u64
+ #define odp_atomic_cas_acq_u64 __odp_atomic_cas_acq_u64
+ #define odp_atomic_cas_rel_u64 __odp_atomic_cas_rel_u64
+ #define odp_atomic_cas_acq_rel_u64 __odp_atomic_cas_acq_rel_u64
+ #define odp_atomic_max_u64 __odp_atomic_max_u64
+ #define odp_atomic_min_u64 __odp_atomic_min_u64
+ #define odp_atomic_init_u128 __odp_atomic_init_u128
+ #define odp_atomic_load_u128 __odp_atomic_load_u128
+ #define odp_atomic_store_u128 __odp_atomic_store_u128
+ #define odp_atomic_cas_u128 __odp_atomic_cas_u128
+ #define odp_atomic_cas_acq_u128 __odp_atomic_cas_acq_u128
+ #define odp_atomic_cas_rel_u128 __odp_atomic_cas_rel_u128
+ #define odp_atomic_cas_acq_rel_u128 __odp_atomic_cas_acq_rel_u128
+
+#else
+ #define _ODP_INLINE
+#endif
+
_ODP_INLINE void odp_atomic_init_u32(odp_atomic_u32_t *atom, uint32_t val)
{
__atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
@@ -36,7 +100,7 @@ _ODP_INLINE uint32_t odp_atomic_fetch_add_u32(odp_atomic_u32_t *atom,
_ODP_INLINE void odp_atomic_add_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
+ _odp_atomic_add_u32(atom, val);
}
_ODP_INLINE uint32_t odp_atomic_fetch_sub_u32(odp_atomic_u32_t *atom,
@@ -47,7 +111,7 @@ _ODP_INLINE uint32_t odp_atomic_fetch_sub_u32(odp_atomic_u32_t *atom,
_ODP_INLINE void odp_atomic_sub_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
+ _odp_atomic_sub_u32(atom, val);
}
_ODP_INLINE uint32_t odp_atomic_fetch_inc_u32(odp_atomic_u32_t *atom)
@@ -57,7 +121,7 @@ _ODP_INLINE uint32_t odp_atomic_fetch_inc_u32(odp_atomic_u32_t *atom)
_ODP_INLINE void odp_atomic_inc_u32(odp_atomic_u32_t *atom)
{
- (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+ _odp_atomic_inc_u32(atom);
}
_ODP_INLINE uint32_t odp_atomic_fetch_dec_u32(odp_atomic_u32_t *atom)
@@ -67,7 +131,7 @@ _ODP_INLINE uint32_t odp_atomic_fetch_dec_u32(odp_atomic_u32_t *atom)
_ODP_INLINE void odp_atomic_dec_u32(odp_atomic_u32_t *atom)
{
- (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+ _odp_atomic_dec_u32(atom);
}
_ODP_INLINE int odp_atomic_cas_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
@@ -85,307 +149,413 @@ _ODP_INLINE uint32_t odp_atomic_xchg_u32(odp_atomic_u32_t *atom,
return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
}
-_ODP_INLINE void odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t new_max)
+_ODP_INLINE void odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- uint32_t old_val;
-
- old_val = odp_atomic_load_u32(atom);
-
- while (new_max > old_val) {
- if (odp_atomic_cas_u32(atom, &old_val, new_max))
- break;
- }
+ _odp_atomic_max_u32(atom, val);
}
-_ODP_INLINE void odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t new_min)
+_ODP_INLINE void odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- uint32_t old_val;
+ _odp_atomic_min_u32(atom, val);
+}
- old_val = odp_atomic_load_u32(atom);
+#ifdef ODP_ATOMIC_U64_LOCK
- while (new_min < old_val) {
- if (odp_atomic_cas_u32(atom, &old_val, new_min))
- break;
- }
-}
+/**
+ * @internal
+ * CAS operation expression for the ATOMIC_OP macro
+ */
+#define ATOMIC_CAS_OP(ret_ptr, old_val, new_val) \
+__extension__ ({ \
+ if (atom->v == (old_val)) { \
+ atom->v = (new_val); \
+ *(ret_ptr) = 1; \
+ } else { \
+ *(ret_ptr) = 0; \
+ } \
+})
+
+/**
+ * @internal
+ * Helper macro for lock-based atomic operations on 64-bit integers
+ * @param[in,out] atom Pointer to the 64-bit atomic variable
+ * @param expr Expression used update the variable.
+ * @return The old value of the variable.
+ */
+#define ATOMIC_OP(atom, expr) \
+__extension__ ({ \
+ uint64_t _old_val; \
+ /* Loop while lock is already taken, stop when lock becomes clear */ \
+ while (__atomic_test_and_set(&(atom)->lock, __ATOMIC_ACQUIRE)) \
+ (void)0; \
+ _old_val = (atom)->v; \
+ (expr); /* Perform whatever update is desired */ \
+ __atomic_clear(&(atom)->lock, __ATOMIC_RELEASE); \
+ _old_val; /* Return old value */ \
+})
_ODP_INLINE void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
{
atom->v = val;
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
__atomic_clear(&atom->lock, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_load_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, (void)0);
-#else
- return __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_store_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v = val);
-#else
- __atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_add_u64(odp_atomic_u64_t *atom,
uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v += val);
-#else
- return __atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v += val);
-#else
- (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_sub_u64(odp_atomic_u64_t *atom,
uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v -= val);
-#else
- return __atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v -= val);
-#else
- (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_inc_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v++);
-#else
- return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_inc_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v++);
-#else
- (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_dec_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v--);
-#else
- return __atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_dec_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v--);
-#else
- (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
int ret;
*old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
return ret;
-#else
- return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
- 0 /* strong */,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom,
uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v = new_val);
-#else
- return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
-#endif
}
-_ODP_INLINE void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_max)
+_ODP_INLINE uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
+{
+ return ATOMIC_OP(atom, (void)0);
+}
+
+_ODP_INLINE void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)ATOMIC_OP(atom, atom->v = val);
+}
+
+_ODP_INLINE void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)ATOMIC_OP(atom, atom->v += val);
+}
+
+_ODP_INLINE void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)ATOMIC_OP(atom, atom->v -= val);
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+}
+
+_ODP_INLINE int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val,
+ uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+}
+
+_ODP_INLINE void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_val)
{
uint64_t old_val;
old_val = odp_atomic_load_u64(atom);
- while (new_max > old_val) {
- if (odp_atomic_cas_u64(atom, &old_val, new_max))
+ while (new_val > old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_val))
break;
}
}
-_ODP_INLINE void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_min)
+_ODP_INLINE void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_val)
{
uint64_t old_val;
old_val = odp_atomic_load_u64(atom);
- while (new_min < old_val) {
- if (odp_atomic_cas_u64(atom, &old_val, new_min))
+ while (new_val < old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_val))
break;
}
}
-_ODP_INLINE uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom)
+#else /* !ODP_ATOMIC_U64_LOCK */
+
+_ODP_INLINE void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
+ atom->v = val;
}
-_ODP_INLINE void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE uint64_t odp_atomic_load_u64(odp_atomic_u64_t *atom)
{
- __atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
+ return __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
}
-_ODP_INLINE void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE void odp_atomic_store_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
+ __atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
}
-_ODP_INLINE void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE uint64_t odp_atomic_fetch_add_u64(odp_atomic_u64_t *atom,
+ uint64_t val)
{
- (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
+ return __atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
}
-_ODP_INLINE int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val, uint32_t new_val)
+_ODP_INLINE void odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
- 0 /* strong */,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED);
+ _odp_atomic_add_u64(atom, val);
}
-_ODP_INLINE int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val, uint32_t new_val)
+_ODP_INLINE uint64_t odp_atomic_fetch_sub_u64(odp_atomic_u64_t *atom,
+ uint64_t val)
{
- return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
- 0 /* strong */,
- __ATOMIC_RELEASE,
- __ATOMIC_RELAXED);
+ return __atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
}
-_ODP_INLINE int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val,
- uint32_t new_val)
+_ODP_INLINE void odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ _odp_atomic_sub_u64(atom, val);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_inc_u64(odp_atomic_u64_t *atom)
+{
+ return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_inc_u64(odp_atomic_u64_t *atom)
+{
+ _odp_atomic_inc_u64(atom);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_dec_u64(odp_atomic_u64_t *atom)
+{
+ return __atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_dec_u64(odp_atomic_u64_t *atom)
+{
+ _odp_atomic_dec_u64(atom);
+}
+
+_ODP_INLINE int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val)
{
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
- __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED,
__ATOMIC_RELAXED);
}
+_ODP_INLINE uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom,
+ uint64_t new_val)
+{
+ return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
+}
+
_ODP_INLINE uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP(atom, (void)0);
-#else
return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
-#endif
}
_ODP_INLINE void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v = val);
-#else
__atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
-#endif
}
_ODP_INLINE void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v += val);
-#else
- (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
-#endif
+ _odp_atomic_add_rel_u64(atom, val);
}
_ODP_INLINE void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v -= val);
-#else
- (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
-#endif
+ _odp_atomic_sub_rel_u64(atom, val);
}
_ODP_INLINE int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
uint64_t *old_val, uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
uint64_t *old_val, uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
uint64_t *old_val,
uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_ACQ_REL,
__ATOMIC_RELAXED);
-#endif
}
+_ODP_INLINE void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ _odp_atomic_max_u64(atom, val);
+}
+
+_ODP_INLINE void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ _odp_atomic_min_u64(atom, val);
+}
+
+#endif /* !ODP_ATOMIC_U64_LOCK */
+
+_ODP_INLINE uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom)
+{
+ return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
+}
+
+_ODP_INLINE void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ __atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+_ODP_INLINE void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ _odp_atomic_add_rel_u32(atom, val);
+}
+
+_ODP_INLINE void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+{
+ _odp_atomic_sub_rel_u32(atom, val);
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val,
+ uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_init_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ _odp_atomic_init_u128(atom, val);
+}
+
+_ODP_INLINE odp_u128_t odp_atomic_load_u128(odp_atomic_u128_t *atom)
+{
+ return _odp_atomic_load_u128(atom);
+}
+
+_ODP_INLINE void odp_atomic_store_u128(odp_atomic_u128_t *atom, odp_u128_t val)
+{
+ _odp_atomic_store_u128(atom, val);
+}
+
+_ODP_INLINE int odp_atomic_cas_u128(odp_atomic_u128_t *atom,
+ odp_u128_t *old_val, odp_u128_t new_val)
+{
+ return _odp_atomic_cas_u128(atom, old_val, new_val);
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_u128(odp_atomic_u128_t *atom,
+ odp_u128_t *old_val, odp_u128_t new_val)
+{
+ return _odp_atomic_cas_acq_u128(atom, old_val, new_val);
+}
+
+_ODP_INLINE int odp_atomic_cas_rel_u128(odp_atomic_u128_t *atom,
+ odp_u128_t *old_val, odp_u128_t new_val)
+{
+ return _odp_atomic_cas_rel_u128(atom, old_val, new_val);
+}
+
+_ODP_INLINE int odp_atomic_cas_acq_rel_u128(odp_atomic_u128_t *atom,
+ odp_u128_t *old_val, odp_u128_t new_val)
+{
+ return _odp_atomic_cas_acq_rel_u128(atom, old_val, new_val);
+}
+
+/** @endcond */
+
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/atomic_types.h b/platform/linux-generic/include/odp/api/plat/atomic_types.h
deleted file mode 100644
index a674ac997..000000000
--- a/platform/linux-generic/include/odp/api/plat/atomic_types.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP atomic operations
- */
-
-#ifndef ODP_ATOMIC_TYPES_H_
-#define ODP_ATOMIC_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/align.h>
-
-/**
- * @internal
- * Atomic 64-bit unsigned integer
- */
-struct odp_atomic_u64_s {
- uint64_t v; /**< Actual storage for the atomic variable */
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- /* Some architectures do not support lock-free operations on 64-bit
- * data types. We use a spin lock to ensure atomicity. */
- char lock; /**< Spin lock (if needed) used to ensure atomic access */
-#endif
-} ODP_ALIGNED(sizeof(uint64_t)); /* Enforce alignement! */
-
-/**
- * @internal
- * Atomic 32-bit unsigned integer
- */
-struct odp_atomic_u32_s {
- uint32_t v; /**< Actual storage for the atomic variable */
-} ODP_ALIGNED(sizeof(uint32_t)); /* Enforce alignement! */
-
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
-
-/**
- * @internal
- * CAS operation expression for the ATOMIC_OP macro
- */
-#define ATOMIC_CAS_OP(ret_ptr, old_val, new_val) \
-({ \
- if (atom->v == (old_val)) { \
- atom->v = (new_val); \
- *(ret_ptr) = 1; \
- } else { \
- *(ret_ptr) = 0; \
- } \
-})
-
-/**
- * @internal
- * Helper macro for lock-based atomic operations on 64-bit integers
- * @param[in,out] atom Pointer to the 64-bit atomic variable
- * @param expr Expression used update the variable.
- * @return The old value of the variable.
- */
-#define ATOMIC_OP(atom, expr) \
-({ \
- uint64_t _old_val; \
- /* Loop while lock is already taken, stop when lock becomes clear */ \
- while (__atomic_test_and_set(&(atom)->lock, __ATOMIC_ACQUIRE)) \
- (void)0; \
- _old_val = (atom)->v; \
- (expr); /* Perform whatever update is desired */ \
- __atomic_clear(&(atom)->lock, __ATOMIC_RELEASE); \
- _old_val; /* Return old value */ \
-})
-#endif
-
-typedef struct odp_atomic_u64_s odp_atomic_u64_t;
-
-typedef struct odp_atomic_u32_s odp_atomic_u32_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h b/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h
new file mode 100644
index 000000000..f64a176f5
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/buffer_inline_types.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_BUFFER_INLINE_TYPES_H_
+#define ODP_PLAT_BUFFER_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Buffer header field accessors */
+#define _odp_buffer_get(buffer_hdr, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)buffer_hdr + \
+ _odp_buffer_inline_offset.field))
+
+/* Buffer header field offsets for inline functions */
+typedef struct _odp_buffer_inline_offset_t {
+ uint16_t uarea_addr;
+
+} _odp_buffer_inline_offset_t;
+
+extern const _odp_buffer_inline_offset_t _odp_buffer_inline_offset;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/buffer_inlines.h b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h
new file mode 100644
index 000000000..75ef36cf3
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/buffer_inlines.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2019-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_BUFFER_INLINES_H_
+#define ODP_PLAT_BUFFER_INLINES_H_
+
+#include <odp/api/buffer_types.h>
+#include <odp/api/event.h>
+#include <odp/api/pool_types.h>
+
+#include <odp/api/plat/buffer_inline_types.h>
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_inline_types.h>
+#include <odp/api/plat/pool_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_buffer_from_event __odp_buffer_from_event
+ #define odp_buffer_from_event_multi __odp_buffer_from_event_multi
+ #define odp_buffer_to_event __odp_buffer_to_event
+ #define odp_buffer_to_event_multi __odp_buffer_to_event_multi
+ #define odp_buffer_addr __odp_buffer_addr
+ #define odp_buffer_size __odp_buffer_size
+ #define odp_buffer_pool __odp_buffer_pool
+ #define odp_buffer_user_area __odp_buffer_user_area
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_buffer_t odp_buffer_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_BUFFER);
+
+ return (odp_buffer_t)ev;
+}
+
+_ODP_INLINE void odp_buffer_from_event_multi(odp_buffer_t buf[], const odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++)
+ buf[i] = odp_buffer_from_event(ev[i]);
+}
+
+_ODP_INLINE odp_event_t odp_buffer_to_event(odp_buffer_t buf)
+{
+ return (odp_event_t)buf;
+}
+
+_ODP_INLINE void odp_buffer_to_event_multi(const odp_buffer_t buf[], odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++)
+ ev[i] = odp_buffer_to_event(buf[i]);
+}
+
+_ODP_INLINE void *odp_buffer_addr(odp_buffer_t buf)
+{
+ return _odp_event_hdr_field((odp_event_t)buf, void *, base_data);
+}
+
+_ODP_INLINE uint32_t odp_buffer_size(odp_buffer_t buf)
+{
+ odp_pool_t pool = _odp_event_hdr_field(buf, odp_pool_t, pool);
+
+ return _odp_pool_get(pool, uint32_t, seg_len);
+}
+
+_ODP_INLINE odp_pool_t odp_buffer_pool(odp_buffer_t buf)
+{
+ return _odp_event_hdr_field(buf, odp_pool_t, pool);
+}
+
+_ODP_INLINE void *odp_buffer_user_area(odp_buffer_t buf)
+{
+ return _odp_buffer_get(buf, void *, uarea_addr);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/buffer_types.h b/platform/linux-generic/include/odp/api/plat/buffer_types.h
deleted file mode 100644
index 809768f3d..000000000
--- a/platform/linux-generic/include/odp/api/plat/buffer_types.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP buffer descriptor
- */
-
-#ifndef ODP_BUFFER_TYPES_H_
-#define ODP_BUFFER_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/buffer.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_buffer
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_buffer_t);
-
-#define ODP_BUFFER_INVALID _odp_cast_scalar(odp_buffer_t, 0xffffffff)
-
-typedef ODP_HANDLE_T(odp_buffer_seg_t);
-
-#define ODP_SEGMENT_INVALID ((odp_buffer_seg_t)ODP_BUFFER_INVALID)
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h b/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
index e7818904b..31d2f1db9 100644
--- a/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,9 +17,54 @@
extern "C" {
#endif
-/** @ingroup odp_compiler_optim
- * @{
+#include <odp/api/abi/byteorder.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef __odp_force
+#define __odp_force
+#endif
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_be_to_cpu_16 __odp_be_to_cpu_16
+ #define odp_be_to_cpu_32 __odp_be_to_cpu_32
+ #define odp_be_to_cpu_64 __odp_be_to_cpu_64
+ #define odp_cpu_to_be_16 __odp_cpu_to_be_16
+ #define odp_cpu_to_be_32 __odp_cpu_to_be_32
+ #define odp_cpu_to_be_64 __odp_cpu_to_be_64
+ #define odp_le_to_cpu_16 __odp_le_to_cpu_16
+ #define odp_le_to_cpu_32 __odp_le_to_cpu_32
+ #define odp_le_to_cpu_64 __odp_le_to_cpu_64
+ #define odp_cpu_to_le_16 __odp_cpu_to_le_16
+ #define odp_cpu_to_le_32 __odp_cpu_to_le_32
+ #define odp_cpu_to_le_64 __odp_cpu_to_le_64
+#else
+ #define _ODP_INLINE
+#endif
+
+/** @internal GNU compiler version */
+#define GCC_VERSION (__GNUC__ * 10000 \
+ + __GNUC_MINOR__ * 100 \
+ + __GNUC_PATCHLEVEL__)
+
+/**
+ * @internal
+ * Compiler __builtin_bswap16() is not available on all platforms
+ * until GCC 4.8.0 - work around this by offering __odp_builtin_bswap16()
+ * Don't use this function directly, instead see odp_byteorder.h
*/
+#if GCC_VERSION < 40800
+/*
+ * We have to explicitly cast back to uint16_t because clang promotes the
+ * left side of << operator to int.
+ */
+#define __odp_builtin_bswap16(u16) ((uint16_t)(((u16)&0x00ff) << 8) | \
+ (((u16)&0xff00) >> 8))
+#else
+#define __odp_builtin_bswap16(u16) __builtin_bswap16(u16)
+#endif
_ODP_INLINE uint16_t odp_be_to_cpu_16(odp_u16be_t be16)
{
@@ -129,9 +174,7 @@ _ODP_INLINE odp_u64le_t odp_cpu_to_le_64(uint64_t cpu64)
#endif
}
-/**
- * @}
- */
+/** @endcond */
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp/api/plat/classification_types.h b/platform/linux-generic/include/odp/api/plat/classification_types.h
deleted file mode 100644
index d210feb0c..000000000
--- a/platform/linux-generic/include/odp/api/plat/classification_types.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP classification descriptor
- */
-
-#ifndef ODP_CLASSIFICATION_TYPES_H_
-#define ODP_CLASSIFICATION_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/classification.h>
-#else
-
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_classification
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_cos_t);
-#define ODP_COS_INVALID _odp_cast_scalar(odp_cos_t, ~0)
-
-typedef ODP_HANDLE_T(odp_pmr_t);
-#define ODP_PMR_INVAL _odp_cast_scalar(odp_pmr_t, ~0)
-
-#define ODP_COS_NAME_LEN 32
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/cpu_inlines.h b/platform/linux-generic/include/odp/api/plat/cpu_inlines.h
new file mode 100644
index 000000000..bb1b89154
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/cpu_inlines.h
@@ -0,0 +1,61 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_INLINES_H_
+#define ODP_PLAT_CPU_INLINES_H_
+
+#include <odp/api/hints.h>
+
+#include <odp/api/abi/cpu_inlines.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_cpu_pause __odp_cpu_pause
+ #define odp_cpu_cycles __odp_cpu_cycles
+ #define odp_cpu_cycles_max __odp_cpu_cycles_max
+ #define odp_cpu_cycles_resolution __odp_cpu_cycles_resolution
+ #define odp_cpu_cycles_diff __odp_cpu_cycles_diff
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void odp_cpu_pause(void)
+{
+ _odp_cpu_pause();
+}
+
+_ODP_INLINE uint64_t odp_cpu_cycles_max(void)
+{
+ return _odp_cpu_cycles_max();
+}
+
+_ODP_INLINE uint64_t odp_cpu_cycles_resolution(void)
+{
+ return _odp_cpu_cycles_resolution();
+}
+
+_ODP_INLINE uint64_t odp_cpu_cycles(void)
+{
+ return _odp_cpu_cycles();
+}
+
+_ODP_INLINE uint64_t odp_cpu_cycles_diff(uint64_t c2, uint64_t c1)
+{
+ if (odp_likely(c2 >= c1))
+ return c2 - c1;
+
+ return c2 + (odp_cpu_cycles_max() - c1) + _odp_cpu_cycles_resolution();
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/cpumask_types.h b/platform/linux-generic/include/odp/api/plat/cpumask_types.h
deleted file mode 100644
index c2727a46c..000000000
--- a/platform/linux-generic/include/odp/api/plat/cpumask_types.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP CPU masks and enumeration
- */
-
-#ifndef ODP_CPUMASK_TYPES_H_
-#define ODP_CPUMASK_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_cpumask
- * @{
- */
-
-#include <odp/api/std_types.h>
-#include <odp/api/align.h>
-
-#define ODP_CPUMASK_SIZE 1024
-
-#define ODP_CPUMASK_STR_SIZE ((ODP_CPUMASK_SIZE + 3) / 4 + 3)
-
-/**
- * CPU mask
- *
- * Don't access directly, use access functions.
- */
-typedef struct odp_cpumask_t {
- /** @private CPU mask storage
- *
- * This is private to the implementation.
- * Don't access directly, use access functions.
- */
- uint8_t _u8[ODP_CPUMASK_SIZE / 8];
-} odp_cpumask_t ODP_ALIGNED(8);
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/crypto_inlines.h b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
new file mode 100644
index 000000000..f350edfea
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
@@ -0,0 +1,70 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CRYPTO_INLINES_H_
+#define ODP_PLAT_CRYPTO_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/crypto_types.h>
+#include <odp/api/event.h>
+#include <odp/api/packet.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/packet_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_crypto_packet_from_event __odp_crypto_packet_from_event
+ #define odp_crypto_packet_to_event __odp_crypto_packet_to_event
+ #define odp_crypto_result __odp_crypto_result
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_packet_t odp_crypto_packet_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_CRYPTO);
+
+ return odp_packet_from_event(ev);
+}
+
+_ODP_INLINE odp_event_t odp_crypto_packet_to_event(odp_packet_t pkt)
+{
+ return odp_packet_to_event(pkt);
+}
+
+_ODP_INLINE int odp_crypto_result(odp_crypto_packet_result_t *result, odp_packet_t pkt)
+{
+ odp_crypto_packet_result_t *op_result;
+ odp_bool_t ok;
+
+ _ODP_ASSERT(odp_packet_subtype(pkt) == ODP_EVENT_PACKET_CRYPTO);
+
+ op_result = _odp_pkt_get_ptr(pkt, odp_crypto_packet_result_t, crypto_op);
+
+ ok = op_result->cipher_status.alg_err == ODP_CRYPTO_ALG_ERR_NONE &&
+ op_result->auth_status.alg_err == ODP_CRYPTO_ALG_ERR_NONE;
+
+ if (result)
+ *result = *op_result;
+
+ return ok ? 0 : -1;
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/debug_inlines.h b/platform/linux-generic/include/odp/api/plat/debug_inlines.h
new file mode 100644
index 000000000..0755b1fda
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/debug_inlines.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Debug inlines
+ *
+ * @warning These definitions are not part of ODP API, they are for
+ * implementation internal use only.
+ */
+
+#ifndef ODP_DEBUG_INLINES_H_
+#define ODP_DEBUG_INLINES_H_
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/hints.h>
+#include <odp/api/init.h>
+
+#include <odp/api/plat/thread_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#pragma GCC diagnostic push
+
+#ifdef __clang__
+#pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#endif
+
+extern odp_log_func_t ODP_PRINTF_FORMAT(2, 3) _odp_log_fn;
+extern odp_abort_func_t _odp_abort_fn;
+
+#define _ODP_LOG_FN(level, ...) \
+ do { \
+ if (_odp_this_thread && _odp_this_thread->log_fn) \
+ _odp_this_thread->log_fn(level, ##__VA_ARGS__); \
+ else \
+ _odp_log_fn(level, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * ODP LOG macro.
+ */
+#define _ODP_LOG(level, prefix, fmt, ...) \
+ _ODP_LOG_FN(level, "%s: %s:%d:%s(): " fmt, prefix, \
+ __FILE__, __LINE__, __func__, ##__VA_ARGS__)
+
+/**
+ * Runtime assertion-macro - aborts if 'cond' is false.
+ */
+#define _ODP_ASSERT(cond) \
+ do { if ((ODP_DEBUG == 1) && (!(cond))) { \
+ _ODP_ERR("%s\n", #cond); \
+ _odp_abort_fn(); } \
+ } while (0)
+
+/*
+ * Print debug message to log, if ODP_DEBUG_PRINT flag is set (ignores CONFIG_DEBUG_LEVEL).
+ */
+#define _ODP_DBG(...) \
+ do { \
+ if (ODP_DEBUG_PRINT == 1) \
+ __extension__ ({ \
+ _ODP_LOG(ODP_LOG_DBG, "DBG", ##__VA_ARGS__); \
+ }); \
+ } while (0)
+
+/**
+ * Log warning message.
+ */
+#define _ODP_WARN(...) \
+ do { \
+ __extension__ ({ \
+ _ODP_LOG(ODP_LOG_WARN, "WARN", ##__VA_ARGS__); \
+ }); \
+ } while (0)
+
+/**
+ * Log error message.
+ */
+#define _ODP_ERR(...) \
+ do { \
+ __extension__ ({ \
+ _ODP_LOG(ODP_LOG_ERR, "ERR", ##__VA_ARGS__); \
+ }); \
+ } while (0)
+
+/**
+ * Log abort message and then stop execution (by default call abort()).
+ * This function should not return.
+ */
+#define _ODP_ABORT(...) \
+ do { \
+ __extension__ ({ \
+ _ODP_LOG(ODP_LOG_ABORT, "ABORT", ##__VA_ARGS__); \
+ }); \
+ _odp_abort_fn(); \
+ } while (0)
+
+/**
+ * Log print message when the application calls one of the ODP APIs
+ * specifically for dumping internal data.
+ */
+#define _ODP_PRINT(...) \
+ _ODP_LOG_FN(ODP_LOG_PRINT, ##__VA_ARGS__)
+
+#pragma GCC diagnostic pop
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/dma_inlines.h b/platform/linux-generic/include/odp/api/plat/dma_inlines.h
new file mode 100644
index 000000000..84b5fef5b
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/dma_inlines.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_DMA_INLINES_H_
+#define ODP_PLAT_DMA_INLINES_H_
+
+#include <odp/api/buffer.h>
+#include <odp/api/dma_types.h>
+#include <odp/api/event_types.h>
+#include <odp/api/hints.h>
+#include <odp/api/pool_types.h>
+#include <odp/api/queue_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_inline_types.h>
+
+#include <stdint.h>
+#include <string.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_dma_compl_from_event __odp_dma_compl_from_event
+ #define odp_dma_compl_to_event __odp_dma_compl_to_event
+ #define odp_dma_compl_user_area __odp_dma_compl_user_area
+ #define odp_dma_compl_result __odp_dma_compl_result
+ #define odp_dma_transfer_param_init __odp_dma_transfer_param_init
+ #define odp_dma_compl_param_init __odp_dma_compl_param_init
+ #define odp_dma_compl_alloc __odp_dma_compl_alloc
+ #define odp_dma_compl_free __odp_dma_compl_free
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_dma_compl_t odp_dma_compl_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(_odp_event_hdr_field(ev, int8_t, event_type) == ODP_EVENT_DMA_COMPL);
+
+ return (odp_dma_compl_t)(uintptr_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_dma_compl_to_event(odp_dma_compl_t dma_compl)
+{
+ return (odp_event_t)(uintptr_t)dma_compl;
+}
+
+_ODP_INLINE void *odp_dma_compl_user_area(odp_dma_compl_t dma_compl)
+{
+ return odp_buffer_user_area((odp_buffer_t)(uintptr_t)dma_compl);
+}
+
+_ODP_INLINE int odp_dma_compl_result(odp_dma_compl_t dma_compl, odp_dma_result_t *result_out)
+{
+ odp_dma_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)dma_compl;
+
+ if (odp_unlikely(dma_compl == ODP_DMA_COMPL_INVALID)) {
+ _ODP_ERR("Bad DMA compl handle\n");
+ return -1;
+ }
+
+ result = (odp_dma_result_t *)odp_buffer_addr(buf);
+
+ if (result_out)
+ *result_out = *result;
+
+ return result->success ? 0 : -1;
+}
+
+_ODP_INLINE void odp_dma_transfer_param_init(odp_dma_transfer_param_t *trs_param)
+{
+ memset(trs_param, 0, sizeof(odp_dma_transfer_param_t));
+
+ trs_param->src_format = ODP_DMA_FORMAT_ADDR;
+ trs_param->dst_format = ODP_DMA_FORMAT_ADDR;
+ trs_param->num_src = 1;
+ trs_param->num_dst = 1;
+}
+
+_ODP_INLINE void odp_dma_compl_param_init(odp_dma_compl_param_t *compl_param)
+{
+ memset(compl_param, 0, sizeof(odp_dma_compl_param_t));
+
+ compl_param->queue = ODP_QUEUE_INVALID;
+ compl_param->event = ODP_EVENT_INVALID;
+ compl_param->transfer_id = ODP_DMA_TRANSFER_ID_INVALID;
+}
+
+_ODP_INLINE odp_dma_compl_t odp_dma_compl_alloc(odp_pool_t pool)
+{
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_dma_result_t *result;
+ int8_t *ev_type;
+
+ buf = odp_buffer_alloc(pool);
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return ODP_DMA_COMPL_INVALID;
+
+ result = (odp_dma_result_t *)odp_buffer_addr(buf);
+ memset(result, 0, sizeof(odp_dma_result_t));
+
+ ev = odp_buffer_to_event(buf);
+ ev_type = _odp_event_hdr_ptr(ev, int8_t, event_type);
+ *ev_type = ODP_EVENT_DMA_COMPL;
+
+ return (odp_dma_compl_t)(uintptr_t)buf;
+}
+
+_ODP_INLINE void odp_dma_compl_free(odp_dma_compl_t dma_compl)
+{
+ int8_t *ev_type;
+ odp_event_t ev;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)dma_compl;
+
+ if (odp_unlikely(dma_compl == ODP_DMA_COMPL_INVALID)) {
+ _ODP_ERR("Bad DMA compl handle\n");
+ return;
+ }
+
+ ev = odp_buffer_to_event(buf);
+ ev_type = _odp_event_hdr_ptr(ev, int8_t, event_type);
+ *ev_type = ODP_EVENT_BUFFER;
+
+ odp_buffer_free(buf);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_inline_types.h
new file mode 100644
index 000000000..cbf01588f
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_inline_types.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_EVENT_INLINE_TYPES_H_
+#define ODP_PLAT_EVENT_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Event header field accessors */
+#define _odp_event_hdr_field(event_hdr, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)event_hdr + \
+ _odp_event_inline_offset.field))
+#define _odp_event_hdr_ptr(event_hdr, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)event_hdr + \
+ _odp_event_inline_offset.field))
+
+/* Event header field offsets for inline functions */
+typedef struct _odp_event_inline_offset_t {
+ uint16_t event_type;
+ uint16_t base_data;
+ uint16_t subtype;
+ uint16_t flow_id;
+ uint16_t pool;
+
+} _odp_event_inline_offset_t;
+
+extern const _odp_event_inline_offset_t _odp_event_inline_offset;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_inlines.h b/platform/linux-generic/include/odp/api/plat/event_inlines.h
new file mode 100644
index 000000000..990575166
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_inlines.h
@@ -0,0 +1,199 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_EVENT_INLINES_H_
+#define ODP_PLAT_EVENT_INLINES_H_
+
+#include <odp/api/buffer_types.h>
+#include <odp/api/event_types.h>
+#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
+#include <odp/api/timer_types.h>
+
+#include <odp/api/plat/buffer_inline_types.h>
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_inline_types.h>
+#include <odp/api/plat/event_vector_inline_types.h>
+#include <odp/api/plat/packet_inline_types.h>
+#include <odp/api/plat/timer_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_event_type __odp_event_type
+ #define odp_event_type_multi __odp_event_type_multi
+ #define odp_event_pool __odp_event_pool
+ #define odp_event_user_area __odp_event_user_area
+ #define odp_event_user_area_and_flag __odp_event_user_area_and_flag
+ #define odp_event_subtype __odp_event_subtype
+ #define odp_event_types __odp_event_types
+ #define odp_event_types_multi __odp_event_types_multi
+ #define odp_event_flow_id __odp_event_flow_id
+ #define odp_event_flow_id_set __odp_event_flow_id_set
+#else
+ #define _ODP_INLINE
+#endif
+
+static inline odp_event_type_t __odp_event_type_get(odp_event_t event)
+{
+ int8_t type;
+
+ type = _odp_event_hdr_field(event, int8_t, event_type);
+
+ return (odp_event_type_t)type;
+}
+
+static inline odp_event_subtype_t __odp_event_subtype_get(odp_event_t event)
+{
+ int8_t type;
+
+ type = _odp_event_hdr_field(event, int8_t, subtype);
+
+ return (odp_event_subtype_t)type;
+}
+
+_ODP_INLINE odp_event_type_t odp_event_type(odp_event_t event)
+{
+ return __odp_event_type_get(event);
+}
+
+_ODP_INLINE int odp_event_type_multi(const odp_event_t event[], int num,
+ odp_event_type_t *type_out)
+{
+ int i;
+ odp_event_type_t type = __odp_event_type_get(event[0]);
+
+ for (i = 1; i < num; i++) {
+ if (__odp_event_type_get(event[i]) != type)
+ break;
+ }
+
+ *type_out = type;
+
+ return i;
+}
+
+_ODP_INLINE odp_pool_t odp_event_pool(odp_event_t event)
+{
+ const odp_event_type_t type = __odp_event_type_get(event);
+
+ switch (type) {
+ case ODP_EVENT_BUFFER:
+ case ODP_EVENT_PACKET:
+ case ODP_EVENT_PACKET_VECTOR:
+ return _odp_event_hdr_field(event, odp_pool_t, pool);
+ default:
+ return ODP_POOL_INVALID;
+ }
+}
+
+_ODP_INLINE void *odp_event_user_area(odp_event_t event)
+{
+ const odp_event_type_t type = __odp_event_type_get(event);
+
+ switch (type) {
+ case ODP_EVENT_BUFFER:
+ case ODP_EVENT_ML_COMPL:
+ case ODP_EVENT_DMA_COMPL:
+ return _odp_buffer_get((odp_buffer_t)event, void *, uarea_addr);
+ case ODP_EVENT_PACKET:
+ return _odp_pkt_get((odp_packet_t)event, void *, user_area);
+ case ODP_EVENT_PACKET_VECTOR:
+ return _odp_event_vect_get((odp_packet_vector_t)event, void *, uarea_addr);
+ case ODP_EVENT_TIMEOUT:
+ return _odp_timeout_hdr_field((odp_timeout_t)event, void *, uarea_addr);
+ default:
+ return NULL;
+ }
+}
+
+_ODP_INLINE void *odp_event_user_area_and_flag(odp_event_t event, int *flag)
+{
+ const odp_event_type_t type = __odp_event_type_get(event);
+
+ _ODP_ASSERT(flag != NULL);
+
+ switch (type) {
+ case ODP_EVENT_BUFFER:
+ case ODP_EVENT_DMA_COMPL:
+ case ODP_EVENT_ML_COMPL:
+ *flag = -1;
+ return _odp_buffer_get((odp_buffer_t)event, void *, uarea_addr);
+ case ODP_EVENT_PACKET:
+ {
+ _odp_packet_flags_t pkt_flags;
+ odp_packet_t pkt = (odp_packet_t)event;
+
+ pkt_flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ *flag = pkt_flags.user_flag;
+
+ return _odp_pkt_get(pkt, void *, user_area);
+ }
+ case ODP_EVENT_PACKET_VECTOR:
+ {
+ _odp_event_vector_flags_t pktv_flags;
+ odp_packet_vector_t pktv = (odp_packet_vector_t)event;
+
+ pktv_flags.all_flags = _odp_event_vect_get(pktv, uint32_t, flags);
+ *flag = pktv_flags.user_flag;
+
+ return _odp_event_vect_get(pktv, void *, uarea_addr);
+ }
+ case ODP_EVENT_TIMEOUT:
+ *flag = -1;
+ return _odp_timeout_hdr_field((odp_timeout_t)event, void *, uarea_addr);
+ default:
+ *flag = -1;
+ return NULL;
+ }
+}
+
+_ODP_INLINE odp_event_subtype_t odp_event_subtype(odp_event_t event)
+{
+ return __odp_event_subtype_get(event);
+}
+
+_ODP_INLINE odp_event_type_t odp_event_types(odp_event_t event,
+ odp_event_subtype_t *subtype)
+{
+ odp_event_type_t event_type = __odp_event_type_get(event);
+
+ *subtype = __odp_event_subtype_get(event);
+
+ return event_type;
+}
+
+_ODP_INLINE void odp_event_types_multi(const odp_event_t event[], odp_event_type_t type[],
+ odp_event_subtype_t subtype[], int num)
+{
+ for (int i = 0; i < num; i++)
+ type[i] = __odp_event_type_get(event[i]);
+
+ if (subtype == NULL)
+ return;
+
+ for (int i = 0; i < num; i++)
+ subtype[i] = __odp_event_subtype_get(event[i]);
+}
+
+_ODP_INLINE uint32_t odp_event_flow_id(odp_event_t event)
+{
+ return _odp_event_hdr_field(event, uint8_t, flow_id);
+}
+
+_ODP_INLINE void odp_event_flow_id_set(odp_event_t event, uint32_t id)
+{
+ uint8_t *flow_id = _odp_event_hdr_ptr(event, uint8_t, flow_id);
+
+ *flow_id = (uint8_t)id;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_types.h b/platform/linux-generic/include/odp/api/plat/event_types.h
deleted file mode 100644
index a1aa0e452..000000000
--- a/platform/linux-generic/include/odp/api/plat/event_types.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP event
- */
-
-#ifndef ODP_EVENT_TYPES_H_
-#define ODP_EVENT_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/event.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_event
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_event_t);
-
-#define ODP_EVENT_INVALID _odp_cast_scalar(odp_event_t, 0xffffffff)
-
-typedef enum odp_event_type_t {
- ODP_EVENT_BUFFER = 1,
- ODP_EVENT_PACKET = 2,
- ODP_EVENT_TIMEOUT = 3,
- ODP_EVENT_CRYPTO_COMPL = 4,
-} odp_event_type_t;
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_validation_external.h b/platform/linux-generic/include/odp/api/plat/event_validation_external.h
new file mode 100644
index 000000000..7f5c0364f
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_validation_external.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event validation
+ *
+ * @warning These definitions are not part of ODP API, they are for
+ * implementation internal use only.
+ */
+
+#ifndef ODP_EVENT_VALIDATION_EXTERNAL_H_
+#define ODP_EVENT_VALIDATION_EXTERNAL_H_
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/buffer_types.h>
+#include <odp/api/event_types.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** Enumerations for identifying ODP API functions */
+typedef enum {
+ _ODP_EV_BUFFER_FREE = 0,
+ _ODP_EV_BUFFER_FREE_MULTI,
+ _ODP_EV_BUFFER_IS_VALID,
+ _ODP_EV_EVENT_FREE,
+ _ODP_EV_EVENT_FREE_MULTI,
+ _ODP_EV_EVENT_FREE_SP,
+ _ODP_EV_EVENT_IS_VALID,
+ _ODP_EV_PACKET_FREE,
+ _ODP_EV_PACKET_FREE_MULTI,
+ _ODP_EV_PACKET_FREE_SP,
+ _ODP_EV_PACKET_IS_VALID,
+ _ODP_EV_QUEUE_ENQ,
+ _ODP_EV_QUEUE_ENQ_MULTI,
+ _ODP_EV_MAX
+} _odp_ev_id_t;
+
+/* Implementation internal event validation functions */
+#if _ODP_EVENT_VALIDATION
+
+int _odp_event_validate(odp_event_t event, _odp_ev_id_t id);
+
+int _odp_event_validate_multi(const odp_event_t event[], int num, _odp_ev_id_t id);
+
+int _odp_buffer_validate(odp_buffer_t buf, _odp_ev_id_t ev_id);
+
+int _odp_buffer_validate_multi(const odp_buffer_t buf[], int num, _odp_ev_id_t ev_id);
+
+int _odp_packet_validate(odp_packet_t pkt, _odp_ev_id_t ev_id);
+
+int _odp_packet_validate_multi(const odp_packet_t pkt[], int num, _odp_ev_id_t ev_id);
+
+#else
+
+static inline int _odp_event_validate(odp_event_t event ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_event_validate_multi(const odp_event_t event[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_buffer_validate(odp_buffer_t buf ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_buffer_validate_multi(const odp_buffer_t buf[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_packet_validate(odp_packet_t pkt ODP_UNUSED, _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+static inline int _odp_packet_validate_multi(const odp_packet_t pkt[] ODP_UNUSED,
+ int num ODP_UNUSED,
+ _odp_ev_id_t ev_id ODP_UNUSED)
+{
+ return 0;
+}
+
+#endif /* _ODP_EVENT_VALIDATION */
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
new file mode 100644
index 000000000..773f5171c
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_
+#define ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+typedef union {
+ uint32_t all_flags;
+
+ struct {
+ uint32_t user_flag : 1;
+ };
+
+} _odp_event_vector_flags_t;
+
+/* Event vector field accessors */
+#define _odp_event_vect_get(vect, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)vect + _odp_event_vector_inline.field))
+#define _odp_event_vect_get_ptr(vect, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)vect + _odp_event_vector_inline.field))
+
+/* Event vector header field offsets for inline functions */
+typedef struct _odp_event_vector_inline_offset_t {
+ uint16_t packet;
+ uint16_t pool;
+ uint16_t size;
+ uint16_t uarea_addr;
+ uint16_t flags;
+
+} _odp_event_vector_inline_offset_t;
+
+extern const _odp_event_vector_inline_offset_t _odp_event_vector_inline;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_PLAT_EVENT_VECTOR_INLINE_TYPES_H_ */
diff --git a/platform/linux-generic/include/odp/api/plat/hash_inlines.h b/platform/linux-generic/include/odp/api/plat/hash_inlines.h
new file mode 100644
index 000000000..b38a34d53
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/hash_inlines.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_HASH_INLINES_H_
+#define ODP_PLAT_HASH_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/hash_crc32.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_hash_crc32 __odp_hash_crc32
+ #define odp_hash_crc32c __odp_hash_crc32c
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE uint32_t odp_hash_crc32(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32(data, data_len, init_val);
+}
+
+_ODP_INLINE uint32_t odp_hash_crc32c(const void *data, uint32_t data_len,
+ uint32_t init_val)
+{
+ return _odp_hash_crc32c(data, data_len, init_val);
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/init_types.h b/platform/linux-generic/include/odp/api/plat/init_types.h
deleted file mode 100644
index 888b04a70..000000000
--- a/platform/linux-generic/include/odp/api/plat/init_types.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP initialization.
- */
-
-#ifndef ODP_INIT_TYPES_H_
-#define ODP_INIT_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-
-typedef uint64_t odp_instance_t;
-
-/**
- * @internal platform specific data
- */
-typedef struct odp_platform_init_t {
- int ipc_ns; /**< Name space for ipc shared objects. */
-} odp_platform_init_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h b/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h
new file mode 100644
index 000000000..1d1f6ec61
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/ipsec_inlines.h
@@ -0,0 +1,58 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_IPSEC_INLINES_H_
+#define ODP_PLAT_IPSEC_INLINES_H_
+
+#include <odp/api/event.h>
+#include <odp/api/ipsec_types.h>
+#include <odp/api/packet.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/packet_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_ipsec_packet_from_event __odp_ipsec_packet_from_event
+ #define odp_ipsec_packet_to_event __odp_ipsec_packet_to_event
+ #define odp_ipsec_result __odp_ipsec_result
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_packet_t odp_ipsec_packet_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_IPSEC);
+
+ return odp_packet_from_event(ev);
+}
+
+_ODP_INLINE odp_event_t odp_ipsec_packet_to_event(odp_packet_t pkt)
+{
+ return odp_packet_to_event(pkt);
+}
+
+_ODP_INLINE int odp_ipsec_result(odp_ipsec_packet_result_t *result, odp_packet_t pkt)
+{
+ odp_ipsec_packet_result_t *res;
+
+ _ODP_ASSERT(result != NULL);
+ _ODP_ASSERT(odp_packet_subtype(pkt) == ODP_EVENT_PACKET_IPSEC);
+
+ res = _odp_pkt_get_ptr(pkt, odp_ipsec_packet_result_t, ipsec_ctx);
+
+ *result = *res;
+
+ return 0;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
index 2de04c814..9330d89f8 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, Linaro Limited
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,21 +14,53 @@
#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_H_
#define _ODP_PLAT_PACKET_FLAG_INLINES_H_
-#include <odp/api/plat/packet_types.h>
+#include <odp/api/abi/packet_types.h>
+#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/hints.h>
-/** @internal Inline function offsets */
-extern const _odp_packet_inline_offset_t _odp_packet_inline;
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
-/** @internal Inline function @param pkt @return */
static inline uint64_t _odp_packet_input_flags(odp_packet_t pkt)
{
- return *(uint64_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.input_flags);
+ return _odp_pkt_get(pkt, uint64_t, input_flags);
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_l2(odp_packet_t pkt)
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_has_l2 __odp_packet_has_l2
+ #define odp_packet_has_l3 __odp_packet_has_l3
+ #define odp_packet_has_l4 __odp_packet_has_l4
+ #define odp_packet_has_eth __odp_packet_has_eth
+ #define odp_packet_has_jumbo __odp_packet_has_jumbo
+ #define odp_packet_has_flow_hash __odp_packet_has_flow_hash
+ #define odp_packet_has_ts __odp_packet_has_ts
+ #define odp_packet_has_ipsec __odp_packet_has_ipsec
+ #define odp_packet_has_eth_bcast __odp_packet_has_eth_bcast
+ #define odp_packet_has_eth_mcast __odp_packet_has_eth_mcast
+ #define odp_packet_has_vlan __odp_packet_has_vlan
+ #define odp_packet_has_vlan_qinq __odp_packet_has_vlan_qinq
+ #define odp_packet_has_arp __odp_packet_has_arp
+ #define odp_packet_has_ipv4 __odp_packet_has_ipv4
+ #define odp_packet_has_ipv6 __odp_packet_has_ipv6
+ #define odp_packet_has_ip_bcast __odp_packet_has_ip_bcast
+ #define odp_packet_has_ip_mcast __odp_packet_has_ip_mcast
+ #define odp_packet_has_ipfrag __odp_packet_has_ipfrag
+ #define odp_packet_has_ipopt __odp_packet_has_ipopt
+ #define odp_packet_has_udp __odp_packet_has_udp
+ #define odp_packet_has_tcp __odp_packet_has_tcp
+ #define odp_packet_has_sctp __odp_packet_has_sctp
+ #define odp_packet_has_icmp __odp_packet_has_icmp
+ #define odp_packet_has_error __odp_packet_has_error
+ #define odp_packet_has_l2_error __odp_packet_has_l2_error
+ #define odp_packet_has_l3_error __odp_packet_has_l3_error
+ #define odp_packet_has_l4_error __odp_packet_has_l4_error
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE int odp_packet_has_l2(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -35,8 +68,23 @@ static inline int _odp_packet_has_l2(odp_packet_t pkt)
return flags.l2;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_eth(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_l3(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.l3;
+}
+
+_ODP_INLINE int odp_packet_has_l4(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.l4;
+}
+
+_ODP_INLINE int odp_packet_has_eth(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -44,8 +92,7 @@ static inline int _odp_packet_has_eth(odp_packet_t pkt)
return flags.eth;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_jumbo(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_jumbo(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -53,8 +100,7 @@ static inline int _odp_packet_has_jumbo(odp_packet_t pkt)
return flags.jumbo;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_flow_hash(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_flow_hash(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -62,8 +108,7 @@ static inline int _odp_packet_has_flow_hash(odp_packet_t pkt)
return flags.flow_hash;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_has_ts(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_has_ts(odp_packet_t pkt)
{
_odp_packet_input_flags_t flags;
@@ -71,20 +116,171 @@ static inline int _odp_packet_has_ts(odp_packet_t pkt)
return flags.timestamp;
}
-/* Include inlined versions of API functions */
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
+_ODP_INLINE int odp_packet_has_ipsec(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
-/** @ingroup odp_packet
- * @{
- */
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipsec;
+}
-#include <odp/api/plat/packet_flag_inlines_api.h>
+_ODP_INLINE int odp_packet_has_eth_bcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
-/**
- * @}
- */
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.eth_bcast;
+}
-#endif
+_ODP_INLINE int odp_packet_has_eth_mcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.eth_mcast;
+}
+
+_ODP_INLINE int odp_packet_has_vlan(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.vlan;
+}
+
+_ODP_INLINE int odp_packet_has_vlan_qinq(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.vlan_qinq;
+}
+
+_ODP_INLINE int odp_packet_has_arp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.arp;
+}
+
+_ODP_INLINE int odp_packet_has_ipv4(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipv4;
+}
+
+_ODP_INLINE int odp_packet_has_ipv6(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipv6;
+}
+
+_ODP_INLINE int odp_packet_has_ip_bcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ip_bcast;
+}
+
+_ODP_INLINE int odp_packet_has_ip_mcast(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ip_mcast;
+}
+
+_ODP_INLINE int odp_packet_has_ipfrag(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipfrag;
+}
+
+_ODP_INLINE int odp_packet_has_ipopt(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.ipopt;
+}
+
+_ODP_INLINE int odp_packet_has_udp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.udp;
+}
+
+_ODP_INLINE int odp_packet_has_tcp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.tcp;
+}
+
+_ODP_INLINE int odp_packet_has_sctp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.sctp;
+}
+
+_ODP_INLINE int odp_packet_has_icmp(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t flags;
+
+ flags.all = _odp_packet_input_flags(pkt);
+ return flags.icmp;
+}
+
+_ODP_INLINE int odp_packet_has_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ return flags.all.error != 0;
+}
+
+_ODP_INLINE int odp_packet_has_l2_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ /* L2 parsing is always done by default and hence
+ no additional check is required. */
+ return flags.snap_len_err;
+}
+
+_ODP_INLINE int odp_packet_has_l3_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.ip_err;
+}
+
+_ODP_INLINE int odp_packet_has_l4_error(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.tcp_err | flags.udp_err;
+}
+
+/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines_api.h b/platform/linux-generic/include/odp/api/plat/packet_flag_inlines_api.h
deleted file mode 100644
index f4e143aa0..000000000
--- a/platform/linux-generic/include/odp/api/plat/packet_flag_inlines_api.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Packet inline functions
- */
-
-#ifndef _ODP_PLAT_PACKET_FLAG_INLINES_API_H_
-#define _ODP_PLAT_PACKET_FLAG_INLINES_API_H_
-
-_ODP_INLINE int odp_packet_has_l2(odp_packet_t pkt)
-{
- return _odp_packet_has_l2(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_eth(odp_packet_t pkt)
-{
- return _odp_packet_has_eth(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_jumbo(odp_packet_t pkt)
-{
- return _odp_packet_has_jumbo(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_flow_hash(odp_packet_t pkt)
-{
- return _odp_packet_has_flow_hash(pkt);
-}
-
-_ODP_INLINE int odp_packet_has_ts(odp_packet_t pkt)
-{
- return _odp_packet_has_ts(pkt);
-}
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
new file mode 100644
index 000000000..691965624
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -0,0 +1,176 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_PACKET_INLINE_TYPES_H_
+#define ODP_PACKET_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Packet field accessor */
+#define _odp_pkt_get(pkt, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
+#define _odp_pkt_get_ptr(pkt, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
+/* Packet header field offsets for inline functions */
+typedef struct _odp_packet_inline_offset_t {
+ uint16_t seg_data;
+ uint16_t seg_len;
+ uint16_t seg_next;
+ uint16_t frame_len;
+ uint16_t headroom;
+ uint16_t tailroom;
+ uint16_t pool;
+ uint16_t input;
+ uint16_t seg_count;
+ uint16_t user_ptr;
+ uint16_t user_area;
+ uint16_t l2_offset;
+ uint16_t l3_offset;
+ uint16_t l4_offset;
+ uint16_t flow_hash;
+ uint16_t timestamp;
+ uint16_t input_flags;
+ uint16_t flags;
+ uint16_t cls_mark;
+ uint16_t ipsec_ctx;
+ uint16_t crypto_op;
+
+} _odp_packet_inline_offset_t;
+
+extern const _odp_packet_inline_offset_t _odp_packet_inline;
+
+/* Packet input & protocol flags */
+typedef union {
+ /* All input flags */
+ uint64_t all;
+
+ /* Individual input flags */
+ struct {
+ uint64_t dst_queue:1; /* Dst queue present */
+ uint64_t cls_mark: 1; /* Classifier mark value present*/
+
+ uint64_t flow_hash:1; /* Flow hash present */
+ uint64_t timestamp:1; /* Timestamp present */
+
+ uint64_t l2:1; /* known L2 protocol present */
+ uint64_t l3:1; /* known L3 protocol present */
+ uint64_t l4:1; /* known L4 protocol present */
+
+ uint64_t eth:1; /* Ethernet */
+ uint64_t eth_bcast:1; /* Ethernet broadcast */
+ uint64_t eth_mcast:1; /* Ethernet multicast */
+ uint64_t jumbo:1; /* Jumbo frame */
+ uint64_t vlan:1; /* VLAN hdr found */
+ uint64_t vlan_qinq:1; /* Stacked VLAN found, QinQ */
+
+ uint64_t snap:1; /* SNAP */
+ uint64_t arp:1; /* ARP */
+
+ uint64_t ipv4:1; /* IPv4 */
+ uint64_t ipv6:1; /* IPv6 */
+ uint64_t ip_bcast:1; /* IP broadcast */
+ uint64_t ip_mcast:1; /* IP multicast */
+ uint64_t ipfrag:1; /* IP fragment */
+ uint64_t ipopt:1; /* IP optional headers */
+
+ uint64_t ipsec:1; /* IPSec packet. Required by the
+ odp_packet_has_ipsec_set() func. */
+ uint64_t ipsec_ah:1; /* IPSec authentication header */
+ uint64_t ipsec_esp:1; /* IPSec encapsulating security
+ payload */
+ uint64_t udp:1; /* UDP */
+ uint64_t tcp:1; /* TCP */
+ uint64_t sctp:1; /* SCTP */
+ uint64_t icmp:1; /* ICMP */
+ uint64_t no_next_hdr:1; /* No Next Header */
+
+ uint64_t color:2; /* Packet color for traffic mgmt */
+ uint64_t nodrop:1; /* Drop eligibility status */
+
+ uint64_t l3_chksum_done:1; /* L3 checksum validation done */
+ uint64_t l4_chksum_done:1; /* L4 checksum validation done */
+ uint64_t ipsec_udp:1; /* UDP-encapsulated IPsec packet */
+ uint64_t udp_chksum_zero:1; /* UDP header had 0 as chksum */
+ };
+
+} _odp_packet_input_flags_t;
+
+/*
+ * Additional packet flags
+ */
+typedef union {
+ /* All flags */
+ uint32_t all_flags;
+
+ struct {
+ uint32_t reserved1: 4;
+
+ /*
+ * Init flags
+ */
+ uint32_t user_ptr_set: 1; /* User has set a non-NULL value */
+ uint32_t user_flag: 1;
+
+ /*
+ * Packet output flags
+ */
+ uint32_t lso: 1; /* LSO requested */
+ uint32_t payload_off: 1; /* Payload offset is valid */
+ uint32_t l3_chksum_set: 1; /* L3 chksum bit is valid */
+ uint32_t l3_chksum: 1; /* L3 chksum override */
+ uint32_t l4_chksum_set: 1; /* L4 chksum bit is valid */
+ uint32_t l4_chksum: 1; /* L4 chksum override */
+ uint32_t ts_set: 1; /* Set Tx timestamp */
+ uint32_t tx_compl_ev: 1; /* Tx completion event requested */
+ uint32_t tx_compl_poll: 1; /* Tx completion poll requested */
+ uint32_t free_ctrl: 1; /* Don't free option */
+ uint32_t tx_aging: 1; /* Packet aging at Tx requested */
+ uint32_t shaper_len_adj: 8; /* Adjustment for traffic mgr */
+
+ /*
+ * Error flags
+ */
+ uint32_t snap_len_err: 1; /* Snap length error */
+ uint32_t ip_err: 1; /* IP error */
+ uint32_t l3_chksum_err: 1; /* L3 checksum error */
+ uint32_t tcp_err: 1; /* TCP error */
+ uint32_t udp_err: 1; /* UDP error */
+ uint32_t sctp_err: 1; /* SCTP error */
+ uint32_t l4_chksum_err: 1; /* L4 checksum error */
+ };
+
+ /* Flag groups */
+ struct {
+ uint32_t reserved2: 4;
+ uint32_t other: 21; /* All other flags */
+ uint32_t error: 7; /* All error flags */
+ } all;
+
+} _odp_packet_flags_t;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index 3dd643fe2..2dd74fa29 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2017, Linaro Limited
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,176 +14,671 @@
#ifndef _ODP_PLAT_PACKET_INLINES_H_
#define _ODP_PLAT_PACKET_INLINES_H_
-#include <odp/api/plat/packet_types.h>
-#include <odp/api/pool.h>
-#include <odp/api/packet_io.h>
+#include <odp/api/event.h>
#include <odp/api/hints.h>
+#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
+#include <odp/api/time_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/packet_io_inlines.h>
+#include <odp/api/plat/packet_inline_types.h>
+#include <odp/api/plat/pool_inline_types.h>
+#include <odp/api/plat/event_inline_types.h>
+
+#include <stdint.h>
+#include <string.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_data __odp_packet_data
+ #define odp_packet_seg_len __odp_packet_seg_len
+ #define odp_packet_data_seg_len __odp_packet_data_seg_len
+ #define odp_packet_len __odp_packet_len
+ #define odp_packet_headroom __odp_packet_headroom
+ #define odp_packet_tailroom __odp_packet_tailroom
+ #define odp_packet_pool __odp_packet_pool
+ #define odp_packet_input __odp_packet_input
+ #define odp_packet_input_set __odp_packet_input_set
+ #define odp_packet_input_index __odp_packet_input_index
+ #define odp_packet_num_segs __odp_packet_num_segs
+ #define odp_packet_user_ptr __odp_packet_user_ptr
+ #define odp_packet_user_ptr_set __odp_packet_user_ptr_set
+ #define odp_packet_user_area __odp_packet_user_area
+ #define odp_packet_user_area_size __odp_packet_user_area_size
+ #define odp_packet_user_flag __odp_packet_user_flag
+ #define odp_packet_user_flag_set __odp_packet_user_flag_set
+ #define odp_packet_l2_offset __odp_packet_l2_offset
+ #define odp_packet_l3_offset __odp_packet_l3_offset
+ #define odp_packet_l4_offset __odp_packet_l4_offset
+ #define odp_packet_l2_offset_set __odp_packet_l2_offset_set
+ #define odp_packet_l3_offset_set __odp_packet_l3_offset_set
+ #define odp_packet_l4_offset_set __odp_packet_l4_offset_set
+ #define odp_packet_l2_ptr __odp_packet_l2_ptr
+ #define odp_packet_l3_ptr __odp_packet_l3_ptr
+ #define odp_packet_l4_ptr __odp_packet_l4_ptr
+ #define odp_packet_l2_type __odp_packet_l2_type
+ #define odp_packet_l3_type __odp_packet_l3_type
+ #define odp_packet_l4_type __odp_packet_l4_type
+ #define odp_packet_l3_chksum_status __odp_packet_l3_chksum_status
+ #define odp_packet_l4_chksum_status __odp_packet_l4_chksum_status
+ #define odp_packet_l3_chksum_insert __odp_packet_l3_chksum_insert
+ #define odp_packet_l4_chksum_insert __odp_packet_l4_chksum_insert
+ #define odp_packet_flow_hash __odp_packet_flow_hash
+ #define odp_packet_ts __odp_packet_ts
+ #define odp_packet_ts_set __odp_packet_ts_set
+ #define odp_packet_ts_request __odp_packet_ts_request
+ #define odp_packet_head __odp_packet_head
+ #define odp_packet_is_segmented __odp_packet_is_segmented
+ #define odp_packet_first_seg __odp_packet_first_seg
+ #define odp_packet_seg_data __odp_packet_seg_data
+ #define odp_packet_seg_data_len __odp_packet_seg_data_len
+ #define odp_packet_next_seg __odp_packet_next_seg
+ #define odp_packet_prefetch __odp_packet_prefetch
+ #define odp_packet_copy_from_mem __odp_packet_copy_from_mem
+ #define odp_packet_copy_to_mem __odp_packet_copy_to_mem
+ #define odp_packet_from_event __odp_packet_from_event
+ #define odp_packet_to_event __odp_packet_to_event
+ #define odp_packet_from_event_multi __odp_packet_from_event_multi
+ #define odp_packet_to_event_multi __odp_packet_to_event_multi
+ #define odp_packet_subtype __odp_packet_subtype
+ #define odp_packet_tx_compl_from_event __odp_packet_tx_compl_from_event
+ #define odp_packet_tx_compl_to_event __odp_packet_tx_compl_to_event
+ #define odp_packet_color __odp_packet_color
+ #define odp_packet_drop_eligible __odp_packet_drop_eligible
+ #define odp_packet_shaper_len_adjust __odp_packet_shaper_len_adjust
+ #define odp_packet_cls_mark __odp_packet_cls_mark
+ #define odp_packet_buf_data_len __odp_packet_buf_data_len
+ #define odp_packet_buf_size __odp_packet_buf_size
+ #define odp_packet_buf_head __odp_packet_buf_head
+ #define odp_packet_buf_data_offset __odp_packet_buf_data_offset
+ #define odp_packet_buf_data_set __odp_packet_buf_data_set
+ #define odp_packet_buf_from_head __odp_packet_buf_from_head
+
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+void *_odp_packet_map(void *pkt_ptr, uint32_t offset, uint32_t *seg_len,
+ odp_packet_seg_t *seg);
-/** @internal Inline function offsets */
-extern const _odp_packet_inline_offset_t _odp_packet_inline;
+int _odp_packet_copy_from_mem_seg(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, const void *src);
-#if ODP_ABI_COMPAT == 1
-/** @internal Inline function @param seg @return */
-static inline uint32_t _odp_packet_seg_to_ndx(odp_packet_seg_t seg)
+int _odp_packet_copy_to_mem_seg(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, void *dst);
+
+_ODP_INLINE void *odp_packet_data(odp_packet_t pkt)
{
- return _odp_typeval(seg);
+ return _odp_pkt_get(pkt, void *, seg_data);
}
-/** @internal Inline function @param ndx @return */
-static inline odp_packet_seg_t _odp_packet_seg_from_ndx(uint32_t ndx)
+_ODP_INLINE uint32_t odp_packet_seg_len(odp_packet_t pkt)
{
- return _odp_cast_scalar(odp_packet_seg_t, ndx);
+ return _odp_pkt_get(pkt, uint32_t, seg_len);
+}
+
+_ODP_INLINE void *odp_packet_data_seg_len(odp_packet_t pkt,
+ uint32_t *seg_len)
+{
+ *seg_len = odp_packet_seg_len(pkt);
+ return odp_packet_data(pkt);
+}
+
+_ODP_INLINE uint32_t odp_packet_len(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint32_t, frame_len);
+}
+
+_ODP_INLINE uint32_t odp_packet_headroom(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint16_t, headroom);
+}
+
+_ODP_INLINE uint32_t odp_packet_tailroom(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint16_t, tailroom);
+}
+
+_ODP_INLINE odp_pool_t odp_packet_pool(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, odp_pool_t, pool);
+}
+
+_ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, odp_pktio_t, input);
+}
+
+_ODP_INLINE void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_pktio_t *pktio_ptr = _odp_pkt_get_ptr(pkt, odp_pktio_t, input);
+
+ *pktio_ptr = pktio;
+}
+
+_ODP_INLINE int odp_packet_input_index(odp_packet_t pkt)
+{
+ odp_pktio_t pktio = odp_packet_input(pkt);
+
+ return odp_pktio_index(pktio);
+}
+
+_ODP_INLINE int odp_packet_num_segs(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint8_t, seg_count);
+}
+
+_ODP_INLINE void *odp_packet_user_ptr(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ if (flags.user_ptr_set == 0)
+ return NULL;
+
+ return _odp_pkt_get(pkt, void *, user_ptr);
+}
+
+_ODP_INLINE void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+ const void **user_ptr = _odp_pkt_get_ptr(pkt, const void *, user_ptr);
+
+ if (odp_unlikely(ptr == NULL)) {
+ flags->user_ptr_set = 0;
+ return;
+ }
+
+ *user_ptr = ptr;
+ flags->user_ptr_set = 1;
+}
+
+_ODP_INLINE void *odp_packet_user_area(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, void *, user_area);
+}
+
+_ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
+{
+ void *pool = _odp_pkt_get(pkt, void *, pool);
+
+ return _odp_pool_get(pool, uint32_t, uarea_size);
}
-#endif
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_data(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_user_flag(odp_packet_t pkt)
{
- return *(void **)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.data);
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.user_flag;
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_seg_len(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_user_flag_set(odp_packet_t pkt, int val)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.seg_len);
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->user_flag = !!val;
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_len(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_l2_offset(odp_packet_t pkt)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.frame_len);
+ return _odp_pkt_get(pkt, uint16_t, l2_offset);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_headroom(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_l3_offset(odp_packet_t pkt)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.headroom);
+ return _odp_pkt_get(pkt, uint16_t, l3_offset);
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_tailroom(odp_packet_t pkt)
+_ODP_INLINE uint32_t odp_packet_l4_offset(odp_packet_t pkt)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.tailroom);
+ return _odp_pkt_get(pkt, uint16_t, l4_offset);
}
-/** @internal Inline function @param pkt @return */
-static inline odp_pool_t _odp_packet_pool(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
{
- return *(odp_pool_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.pool);
+ uint16_t *l2_offset = _odp_pkt_get_ptr(pkt, uint16_t, l2_offset);
+ _odp_packet_input_flags_t *input_flags = _odp_pkt_get_ptr(pkt, _odp_packet_input_flags_t,
+ input_flags);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ input_flags->l2 = 1;
+ *l2_offset = (uint16_t)offset;
+ return 0;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_pktio_t _odp_packet_input(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_l3_offset_set(odp_packet_t pkt, uint32_t offset)
{
- return *(odp_pktio_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.input);
+ uint16_t *l3_offset = _odp_pkt_get_ptr(pkt, uint16_t, l3_offset);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ *l3_offset = (uint16_t)offset;
+ return 0;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_num_segs(odp_packet_t pkt)
+_ODP_INLINE int odp_packet_l4_offset_set(odp_packet_t pkt, uint32_t offset)
{
- return *(uint8_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.segcount);
+ uint16_t *l4_offset = _odp_pkt_get_ptr(pkt, uint16_t, l4_offset);
+
+ if (odp_unlikely(offset >= odp_packet_len(pkt)))
+ return -1;
+
+ *l4_offset = (uint16_t)offset;
+ return 0;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_user_ptr(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
{
- return *(void **)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.user_ptr);
+ uint32_t offset = odp_packet_l2_offset(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset >= seg_len)) {
+ void *pkt_hdr = (void *)pkt;
+
+ return _odp_packet_map(pkt_hdr, offset, len, NULL);
+ }
+
+ if (len)
+ *len = seg_len - offset;
+
+ return data + offset;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_user_area(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
{
- return *(void **)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.user_area);
+ uint32_t offset = odp_packet_l3_offset(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset >= seg_len)) {
+ void *pkt_hdr = (void *)pkt;
+
+ return _odp_packet_map(pkt_hdr, offset, len, NULL);
+ }
+
+ if (len)
+ *len = seg_len - offset;
+
+ return data + offset;
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_user_area_size(odp_packet_t pkt)
+_ODP_INLINE void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.user_area_size);
+ uint32_t offset = odp_packet_l4_offset(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset >= seg_len)) {
+ void *pkt_hdr = (void *)pkt;
+
+ return _odp_packet_map(pkt_hdr, offset, len, NULL);
+ }
+
+ if (len)
+ *len = seg_len - offset;
+
+ return data + offset;
}
-/** @internal Inline function @param pkt @return */
-static inline uint32_t _odp_packet_flow_hash(odp_packet_t pkt)
+_ODP_INLINE odp_proto_l2_type_t odp_packet_l2_type(odp_packet_t pkt)
{
- return *(uint32_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.flow_hash);
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return input_flags.eth ? ODP_PROTO_L2_TYPE_ETH : ODP_PROTO_L2_TYPE_NONE;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_time_t _odp_packet_ts(odp_packet_t pkt)
+_ODP_INLINE odp_proto_l3_type_t odp_packet_l3_type(odp_packet_t pkt)
{
- return *(odp_time_t *)(uintptr_t)((uint8_t *)pkt +
- _odp_packet_inline.timestamp);
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (input_flags.ipv4)
+ return ODP_PROTO_L3_TYPE_IPV4;
+ else if (input_flags.ipv6)
+ return ODP_PROTO_L3_TYPE_IPV6;
+ else if (input_flags.arp)
+ return ODP_PROTO_L3_TYPE_ARP;
+
+ return ODP_PROTO_L3_TYPE_NONE;
}
-/** @internal Inline function @param pkt @return */
-static inline void *_odp_packet_head(odp_packet_t pkt)
+_ODP_INLINE odp_proto_l4_type_t odp_packet_l4_type(odp_packet_t pkt)
{
- return (uint8_t *)_odp_packet_data(pkt) - _odp_packet_headroom(pkt);
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (input_flags.tcp)
+ return ODP_PROTO_L4_TYPE_TCP;
+ else if (input_flags.udp)
+ return ODP_PROTO_L4_TYPE_UDP;
+ else if (input_flags.sctp)
+ return ODP_PROTO_L4_TYPE_SCTP;
+ else if (input_flags.ipsec_ah)
+ return ODP_PROTO_L4_TYPE_AH;
+ else if (input_flags.ipsec_esp)
+ return ODP_PROTO_L4_TYPE_ESP;
+ else if (input_flags.icmp && input_flags.ipv4)
+ return ODP_PROTO_L4_TYPE_ICMPV4;
+ else if (input_flags.icmp && input_flags.ipv6)
+ return ODP_PROTO_L4_TYPE_ICMPV6;
+ else if (input_flags.no_next_hdr)
+ return ODP_PROTO_L4_TYPE_NO_NEXT;
+
+ return ODP_PROTO_L4_TYPE_NONE;
}
-/** @internal Inline function @param pkt @return */
-static inline int _odp_packet_is_segmented(odp_packet_t pkt)
+_ODP_INLINE odp_packet_chksum_status_t odp_packet_l3_chksum_status(odp_packet_t pkt)
{
- return _odp_packet_num_segs(pkt) > 1;
+ _odp_packet_flags_t flags;
+ _odp_packet_input_flags_t input_flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (!input_flags.l3_chksum_done)
+ return ODP_PACKET_CHKSUM_UNKNOWN;
+
+ if (flags.l3_chksum_err)
+ return ODP_PACKET_CHKSUM_BAD;
+
+ return ODP_PACKET_CHKSUM_OK;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_packet_seg_t _odp_packet_first_seg(odp_packet_t pkt)
+_ODP_INLINE odp_packet_chksum_status_t odp_packet_l4_chksum_status(odp_packet_t pkt)
{
- (void)pkt;
+ _odp_packet_flags_t flags;
+ _odp_packet_input_flags_t input_flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ if (!input_flags.l4_chksum_done)
+ return ODP_PACKET_CHKSUM_UNKNOWN;
+
+ if (flags.l4_chksum_err)
+ return ODP_PACKET_CHKSUM_BAD;
- return _odp_packet_seg_from_ndx(0);
+ return ODP_PACKET_CHKSUM_OK;
}
-/** @internal Inline function @param pkt @return */
-static inline odp_packet_seg_t _odp_packet_last_seg(odp_packet_t pkt)
+_ODP_INLINE void odp_packet_l3_chksum_insert(odp_packet_t pkt, int insert)
{
- return _odp_packet_seg_from_ndx(_odp_packet_num_segs(pkt) - 1);
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->l3_chksum_set = 1;
+ flags->l3_chksum = !!insert;
+}
+
+_ODP_INLINE void odp_packet_l4_chksum_insert(odp_packet_t pkt, int insert)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->l4_chksum_set = 1;
+ flags->l4_chksum = !!insert;
}
-/** @internal Inline function @param pkt @param seg @return */
-static inline odp_packet_seg_t _odp_packet_next_seg(odp_packet_t pkt,
- odp_packet_seg_t seg)
+_ODP_INLINE uint32_t odp_packet_flow_hash(odp_packet_t pkt)
{
- if (odp_unlikely(_odp_packet_seg_to_ndx(seg) >=
- _odp_packet_seg_to_ndx(_odp_packet_last_seg(pkt))))
+ return _odp_pkt_get(pkt, uint32_t, flow_hash);
+}
+
+_ODP_INLINE odp_time_t odp_packet_ts(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, odp_time_t, timestamp);
+}
+
+_ODP_INLINE void odp_packet_ts_set(odp_packet_t pkt, odp_time_t timestamp)
+{
+ odp_time_t *ts = _odp_pkt_get_ptr(pkt, odp_time_t, timestamp);
+ _odp_packet_input_flags_t *input_flags = _odp_pkt_get_ptr(pkt, _odp_packet_input_flags_t,
+ input_flags);
+
+ *ts = timestamp;
+ input_flags->timestamp = 1;
+}
+
+_ODP_INLINE void odp_packet_ts_request(odp_packet_t pkt, int enable)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->ts_set = !!enable;
+}
+
+_ODP_INLINE void *odp_packet_head(odp_packet_t pkt)
+{
+ return (uint8_t *)odp_packet_data(pkt) - odp_packet_headroom(pkt);
+}
+
+_ODP_INLINE int odp_packet_is_segmented(odp_packet_t pkt)
+{
+ return _odp_pkt_get(pkt, uint8_t, seg_count) > 1;
+}
+
+_ODP_INLINE odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
+{
+ return (odp_packet_seg_t)pkt;
+}
+
+_ODP_INLINE void *odp_packet_seg_data(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_seg_t seg)
+{
+ return _odp_pkt_get((odp_packet_t)seg, void *, seg_data);
+}
+
+_ODP_INLINE uint32_t odp_packet_seg_data_len(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_seg_t seg)
+{
+ return _odp_pkt_get((odp_packet_t)seg, uint32_t, seg_len);
+}
+
+_ODP_INLINE odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt ODP_UNUSED,
+ odp_packet_seg_t seg)
+{
+ void *next_seg = _odp_pkt_get((odp_packet_t)seg, void *, seg_next);
+
+ if (odp_unlikely(next_seg == NULL))
return ODP_PACKET_SEG_INVALID;
- return seg + 1;
+ return (odp_packet_seg_t)next_seg;
}
-/** @internal Inline function @param pkt @param offset @param len */
-static inline void _odp_packet_prefetch(odp_packet_t pkt, uint32_t offset,
+_ODP_INLINE void odp_packet_prefetch(odp_packet_t pkt, uint32_t offset,
uint32_t len)
{
- (void)pkt; (void)offset; (void)len;
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+ (void)len;
+
+ if (odp_unlikely(offset >= seg_len))
+ return;
+
+ odp_prefetch(data + offset);
}
-/* Include inlined versions of API functions */
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
+_ODP_INLINE int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, const void *src)
+{
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
-/** @ingroup odp_packet
- * @{
- */
+ if (odp_unlikely(offset + len > seg_len))
+ return _odp_packet_copy_from_mem_seg(pkt, offset, len, src);
-#include <odp/api/plat/packet_inlines_api.h>
+ memcpy(data + offset, src, len);
-/**
- * @}
- */
+ return 0;
+}
-#endif
+_ODP_INLINE int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, void *dst)
+{
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint8_t *data = (uint8_t *)odp_packet_data(pkt);
+
+ if (odp_unlikely(offset + len > seg_len))
+ return _odp_packet_copy_to_mem_seg(pkt, offset, len, dst);
+
+ memcpy(dst, data + offset, len);
+
+ return 0;
+}
+
+_ODP_INLINE odp_packet_t odp_packet_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+
+ return (odp_packet_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_to_event(odp_packet_t pkt)
+{
+ return (odp_event_t)pkt;
+}
+
+_ODP_INLINE void odp_packet_from_event_multi(odp_packet_t pkt[],
+ const odp_event_t ev[],
+ int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ pkt[i] = odp_packet_from_event(ev[i]);
+}
+
+_ODP_INLINE void odp_packet_to_event_multi(const odp_packet_t pkt[],
+ odp_event_t ev[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ ev[i] = odp_packet_to_event(pkt[i]);
+}
+
+_ODP_INLINE odp_event_subtype_t odp_packet_subtype(odp_packet_t pkt)
+{
+ return (odp_event_subtype_t)_odp_event_hdr_field((odp_event_t)(uintptr_t)pkt,
+ int8_t, subtype);
+}
+
+_ODP_INLINE odp_packet_tx_compl_t odp_packet_tx_compl_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET_TX_COMPL);
+
+ return (odp_packet_tx_compl_t)(uintptr_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_tx_compl_to_event(odp_packet_tx_compl_t tx_compl)
+{
+ return (odp_event_t)(uintptr_t)tx_compl;
+}
+
+_ODP_INLINE odp_packet_color_t odp_packet_color(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return (odp_packet_color_t)input_flags.color;
+}
+
+_ODP_INLINE odp_bool_t odp_packet_drop_eligible(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return !input_flags.nodrop;
+}
+
+_ODP_INLINE int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return (int8_t)flags.shaper_len_adj;
+}
+
+_ODP_INLINE uint64_t odp_packet_cls_mark(odp_packet_t pkt)
+{
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = _odp_pkt_get(pkt, uint64_t, input_flags);
+
+ return input_flags.cls_mark ? _odp_pkt_get(pkt, uint16_t, cls_mark) : 0;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ return _odp_pkt_get(pkt_buf, uint32_t, seg_len);
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+
+ return _odp_pool_get(pool, uint32_t, ext_pkt_buf_size) -
+ _odp_pool_get(pool, uint32_t, ext_head_offset) -
+ _odp_pool_get(pool, uint32_t, trailer_size);
+}
+
+_ODP_INLINE void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return NULL;
+
+ return (uint8_t *)(uintptr_t)pkt_buf + head_offset;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ void *buf_head = odp_packet_buf_head(pkt_buf);
+
+ return (uint32_t)((uintptr_t)_odp_pkt_get(pkt_buf, void *, seg_data) - (uintptr_t)buf_head);
+}
+
+_ODP_INLINE void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
+ uint32_t data_len)
+{
+ uint8_t *head = (uint8_t *)odp_packet_buf_head(pkt_buf);
+ uint32_t *seg_len = _odp_pkt_get_ptr(pkt_buf, uint32_t, seg_len);
+ void **seg_data = _odp_pkt_get_ptr(pkt_buf, void *, seg_data);
+
+ *seg_len = data_len;
+ *seg_data = head + data_offset;
+}
+
+_ODP_INLINE odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool, void *head)
+{
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return ODP_PACKET_BUF_INVALID;
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
+
+/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines_api.h b/platform/linux-generic/include/odp/api/plat/packet_inlines_api.h
deleted file mode 100644
index 233bc8761..000000000
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines_api.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/* Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Packet inline functions
- */
-
-#ifndef _ODP_PLAT_PACKET_INLINES_API_H_
-#define _ODP_PLAT_PACKET_INLINES_API_H_
-
-_ODP_INLINE void *odp_packet_data(odp_packet_t pkt)
-{
- return _odp_packet_data(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_seg_len(odp_packet_t pkt)
-{
- return _odp_packet_seg_len(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_len(odp_packet_t pkt)
-{
- return _odp_packet_len(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_headroom(odp_packet_t pkt)
-{
- return _odp_packet_headroom(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_tailroom(odp_packet_t pkt)
-{
- return _odp_packet_tailroom(pkt);
-}
-
-_ODP_INLINE odp_pool_t odp_packet_pool(odp_packet_t pkt)
-{
- return _odp_packet_pool(pkt);
-}
-
-_ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
-{
- return _odp_packet_input(pkt);
-}
-
-_ODP_INLINE int odp_packet_num_segs(odp_packet_t pkt)
-{
- return _odp_packet_num_segs(pkt);
-}
-
-_ODP_INLINE void *odp_packet_user_ptr(odp_packet_t pkt)
-{
- return _odp_packet_user_ptr(pkt);
-}
-
-_ODP_INLINE void *odp_packet_user_area(odp_packet_t pkt)
-{
- return _odp_packet_user_area(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
-{
- return _odp_packet_user_area_size(pkt);
-}
-
-_ODP_INLINE uint32_t odp_packet_flow_hash(odp_packet_t pkt)
-{
- return _odp_packet_flow_hash(pkt);
-}
-
-_ODP_INLINE odp_time_t odp_packet_ts(odp_packet_t pkt)
-{
- return _odp_packet_ts(pkt);
-}
-
-_ODP_INLINE void *odp_packet_head(odp_packet_t pkt)
-{
- return _odp_packet_head(pkt);
-}
-
-_ODP_INLINE int odp_packet_is_segmented(odp_packet_t pkt)
-{
- return _odp_packet_is_segmented(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_first_seg(odp_packet_t pkt)
-{
- return _odp_packet_first_seg(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt)
-{
- return _odp_packet_last_seg(pkt);
-}
-
-_ODP_INLINE odp_packet_seg_t odp_packet_next_seg(odp_packet_t pkt,
- odp_packet_seg_t seg)
-{
- return _odp_packet_next_seg(pkt, seg);
-}
-
-_ODP_INLINE void odp_packet_prefetch(odp_packet_t pkt, uint32_t offset,
- uint32_t len)
-{
- return _odp_packet_prefetch(pkt, offset, len);
-}
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_io_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_io_inlines.h
new file mode 100644
index 000000000..a3d1d0d61
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/packet_io_inlines.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_PACKET_IO_INLINES_H_
+#define ODP_PLAT_PACKET_IO_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/packet_io_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_pktio_index __odp_pktio_index
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE int odp_pktio_index(odp_pktio_t pktio)
+{
+ return (int)(uintptr_t)pktio - 1;
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_types.h b/platform/linux-generic/include/odp/api/plat/packet_types.h
deleted file mode 100644
index 7e3c51e6c..000000000
--- a/platform/linux-generic/include/odp/api/plat/packet_types.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP packet descriptor
- */
-
-#ifndef ODP_PACKET_TYPES_H_
-#define ODP_PACKET_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/packet.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @ingroup odp_packet
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_packet_t);
-
-#define ODP_PACKET_INVALID _odp_cast_scalar(odp_packet_t, 0)
-
-#define ODP_PACKET_OFFSET_INVALID (0x0fffffff)
-
-typedef uint8_t odp_packet_seg_t;
-
-static inline uint8_t _odp_packet_seg_to_ndx(odp_packet_seg_t seg)
-{
- return (uint8_t)seg;
-}
-
-static inline odp_packet_seg_t _odp_packet_seg_from_ndx(uint8_t ndx)
-{
- return (odp_packet_seg_t)ndx;
-}
-
-#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)-1)
-
-typedef enum {
- ODP_PACKET_GREEN = 0,
- ODP_PACKET_YELLOW = 1,
- ODP_PACKET_RED = 2,
- ODP_PACKET_ALL_COLORS = 3,
-} odp_packet_color_t;
-
-#define ODP_NUM_PACKET_COLORS 3
-
-/**
- * @}
- */
-
-#endif
-
-/** @internal Packet header field offsets for inline functions */
-typedef struct _odp_packet_inline_offset_t {
- /** @internal field offset */
- size_t data;
- /** @internal field offset */
- size_t seg_len;
- /** @internal field offset */
- size_t frame_len;
- /** @internal field offset */
- size_t headroom;
- /** @internal field offset */
- size_t tailroom;
- /** @internal field offset */
- size_t pool;
- /** @internal field offset */
- size_t input;
- /** @internal field offset */
- size_t segcount;
- /** @internal field offset */
- size_t user_ptr;
- /** @internal field offset */
- size_t user_area;
- /** @internal field offset */
- size_t user_area_size;
- /** @internal field offset */
- size_t flow_hash;
- /** @internal field offset */
- size_t timestamp;
- /** @internal field offset */
- size_t input_flags;
-
-} _odp_packet_inline_offset_t;
-
-/** @internal Packet input & protocol flags */
-typedef union {
- /** All input flags */
- uint64_t all;
-
- struct {
- uint64_t parsed_l2:1; /**< L2 parsed */
- uint64_t dst_queue:1; /**< Dst queue present */
-
- uint64_t flow_hash:1; /**< Flow hash present */
- uint64_t timestamp:1; /**< Timestamp present */
-
- uint64_t l2:1; /**< known L2 protocol present */
- uint64_t l3:1; /**< known L3 protocol present */
- uint64_t l4:1; /**< known L4 protocol present */
-
- uint64_t eth:1; /**< Ethernet */
- uint64_t eth_bcast:1; /**< Ethernet broadcast */
- uint64_t eth_mcast:1; /**< Ethernet multicast */
- uint64_t jumbo:1; /**< Jumbo frame */
- uint64_t vlan:1; /**< VLAN hdr found */
- uint64_t vlan_qinq:1; /**< Stacked VLAN found, QinQ */
-
- uint64_t snap:1; /**< SNAP */
- uint64_t arp:1; /**< ARP */
-
- uint64_t ipv4:1; /**< IPv4 */
- uint64_t ipv6:1; /**< IPv6 */
- uint64_t ip_bcast:1; /**< IP broadcast */
- uint64_t ip_mcast:1; /**< IP multicast */
- uint64_t ipfrag:1; /**< IP fragment */
- uint64_t ipopt:1; /**< IP optional headers */
-
- uint64_t ipsec:1; /**< IPSec packet. Required by the
- odp_packet_has_ipsec_set() func. */
- uint64_t ipsec_ah:1; /**< IPSec authentication header */
- uint64_t ipsec_esp:1; /**< IPSec encapsulating security
- payload */
- uint64_t udp:1; /**< UDP */
- uint64_t tcp:1; /**< TCP */
- uint64_t tcpopt:1; /**< TCP options present */
- uint64_t sctp:1; /**< SCTP */
- uint64_t icmp:1; /**< ICMP */
-
- uint64_t color:2; /**< Packet color for traffic mgmt */
- uint64_t nodrop:1; /**< Drop eligibility status */
- };
-
-} _odp_packet_input_flags_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
new file mode 100644
index 000000000..2f8e0a709
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2020-2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * Packet vector inline functions
+ */
+
+#ifndef _ODP_PLAT_PACKET_VECTOR_INLINES_H_
+#define _ODP_PLAT_PACKET_VECTOR_INLINES_H_
+
+#include <odp/api/event.h>
+#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/event_vector_inline_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_packet_vector_from_event __odp_packet_vector_from_event
+ #define odp_packet_vector_to_event __odp_packet_vector_to_event
+ #define odp_packet_vector_tbl __odp_packet_vector_tbl
+ #define odp_packet_vector_pool __odp_packet_vector_pool
+ #define odp_packet_vector_size __odp_packet_vector_size
+ #define odp_packet_vector_size_set __odp_packet_vector_size_set
+ #define odp_packet_vector_user_area __odp_packet_vector_user_area
+ #define odp_packet_vector_user_flag __odp_packet_vector_user_flag
+ #define odp_packet_vector_user_flag_set __odp_packet_vector_user_flag_set
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_packet_vector_t odp_packet_vector_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET_VECTOR);
+
+ return (odp_packet_vector_t)ev;
+}
+
+_ODP_INLINE odp_event_t odp_packet_vector_to_event(odp_packet_vector_t pktv)
+{
+ return (odp_event_t)pktv;
+}
+
+_ODP_INLINE uint32_t odp_packet_vector_tbl(odp_packet_vector_t pktv, odp_packet_t **pkt_tbl)
+{
+ *pkt_tbl = _odp_event_vect_get_ptr(pktv, odp_packet_t, packet);
+
+ return _odp_event_vect_get(pktv, uint32_t, size);
+}
+
+_ODP_INLINE odp_pool_t odp_packet_vector_pool(odp_packet_vector_t pktv)
+{
+ return _odp_event_vect_get(pktv, odp_pool_t, pool);
+}
+
+_ODP_INLINE uint32_t odp_packet_vector_size(odp_packet_vector_t pktv)
+{
+ return _odp_event_vect_get(pktv, uint32_t, size);
+}
+
+_ODP_INLINE void odp_packet_vector_size_set(odp_packet_vector_t pktv, uint32_t size)
+{
+ uint32_t *vector_size = _odp_event_vect_get_ptr(pktv, uint32_t, size);
+
+ *vector_size = size;
+}
+
+_ODP_INLINE void *odp_packet_vector_user_area(odp_packet_vector_t pktv)
+{
+ return _odp_event_vect_get(pktv, void *, uarea_addr);
+}
+
+_ODP_INLINE int odp_packet_vector_user_flag(odp_packet_vector_t pktv)
+{
+ _odp_event_vector_flags_t flags;
+
+ flags.all_flags = _odp_event_vect_get(pktv, uint32_t, flags);
+
+ return flags.user_flag;
+}
+
+_ODP_INLINE void odp_packet_vector_user_flag_set(odp_packet_vector_t pktv, int val)
+{
+ _odp_event_vector_flags_t *flags = _odp_event_vect_get_ptr(pktv, _odp_event_vector_flags_t,
+ flags);
+
+ flags->user_flag = !!val;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
new file mode 100644
index 000000000..fbff7eda7
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP packet descriptor
+ */
+
+#ifndef ODP_POOL_INLINE_TYPES_H_
+#define ODP_POOL_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/** Pool field accessor */
+#define _odp_pool_get(pool, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)pool + _odp_pool_inline.field))
+
+/** Pool header field offsets for inline functions */
+typedef struct _odp_pool_inline_offset_t {
+ uint16_t index;
+ uint16_t seg_len;
+ uint16_t uarea_size;
+ uint16_t trailer_size;
+ uint16_t ext_head_offset;
+ uint16_t ext_pkt_buf_size;
+
+} _odp_pool_inline_offset_t;
+
+extern const _odp_pool_inline_offset_t _odp_pool_inline;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/pool_inlines.h b/platform/linux-generic/include/odp/api/plat/pool_inlines.h
new file mode 100644
index 000000000..58d66fad2
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/pool_inlines.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_POOL_INLINES_H_
+#define ODP_PLAT_POOL_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/pool_types.h>
+
+#include <odp/api/plat/pool_inline_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_pool_index __odp_pool_index
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE int odp_pool_index(odp_pool_t pool)
+{
+ return (int)_odp_pool_get(pool, uint32_t, index);
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/pool_types.h b/platform/linux-generic/include/odp/api/plat/pool_types.h
deleted file mode 100644
index 8bc816d4e..000000000
--- a/platform/linux-generic/include/odp/api/plat/pool_types.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP pool
- */
-
-#ifndef ODP_POOL_TYPES_H_
-#define ODP_POOL_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/abi/pool.h>
-#else
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-#include <odp/api/plat/event_types.h>
-
-/** @ingroup odp_pool
- * @{
- */
-
-typedef ODP_HANDLE_T(odp_pool_t);
-
-#define ODP_POOL_INVALID _odp_cast_scalar(odp_pool_t, 0xffffffff)
-
-#define ODP_POOL_NAME_LEN 32
-
-typedef enum odp_pool_type_t {
- ODP_POOL_BUFFER = ODP_EVENT_BUFFER,
- ODP_POOL_PACKET = ODP_EVENT_PACKET,
- ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT,
-} odp_pool_type_t;
-
-/**
- * @}
- */
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
new file mode 100644
index 000000000..593942072
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_QUEUE_INLINE_TYPES_H_
+#define ODP_PLAT_QUEUE_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <odp/api/event_types.h>
+#include <odp/api/queue_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Queue entry field accessor */
+#define _odp_qentry_field(qentry, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)qentry + \
+ _odp_queue_inline_offset.field))
+
+/* Queue entry field offsets for inline functions */
+typedef struct _odp_queue_inline_offset_t {
+ uint16_t context;
+
+} _odp_queue_inline_offset_t;
+
+/* Queue API functions */
+typedef struct {
+ odp_queue_t (*queue_create)(const char *name,
+ const odp_queue_param_t *param);
+ int (*queue_create_multi)(const char *name[],
+ const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[],
+ int num);
+ int (*queue_destroy)(odp_queue_t queue);
+ int (*queue_destroy_multi)(odp_queue_t queue[], int num);
+ odp_queue_t (*queue_lookup)(const char *name);
+ int (*queue_capability)(odp_queue_capability_t *capa);
+ int (*queue_context_set)(odp_queue_t queue, void *context,
+ uint32_t len);
+ int (*queue_enq)(odp_queue_t queue, odp_event_t ev);
+ int (*queue_enq_multi)(odp_queue_t queue, const odp_event_t events[],
+ int num);
+ odp_event_t (*queue_deq)(odp_queue_t queue);
+ int (*queue_deq_multi)(odp_queue_t queue, odp_event_t events[],
+ int num);
+ odp_queue_type_t (*queue_type)(odp_queue_t queue);
+ odp_schedule_sync_t (*queue_sched_type)(odp_queue_t queue);
+ odp_schedule_prio_t (*queue_sched_prio)(odp_queue_t queue);
+ odp_schedule_group_t (*queue_sched_group)(odp_queue_t queue);
+ uint32_t (*queue_lock_count)(odp_queue_t queue);
+ uint64_t (*queue_to_u64)(odp_queue_t queue);
+ void (*queue_param_init)(odp_queue_param_t *param);
+ int (*queue_info)(odp_queue_t queue, odp_queue_info_t *info);
+ void (*queue_print)(odp_queue_t queue);
+ void (*queue_print_all)(void);
+
+} _odp_queue_api_fn_t;
+
+extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inlines.h b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
new file mode 100644
index 000000000..609c0c9e4
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_QUEUE_INLINES_H_
+#define ODP_PLAT_QUEUE_INLINES_H_
+
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_validation_external.h>
+#include <odp/api/plat/queue_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+extern const _odp_queue_api_fn_t *_odp_queue_api;
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_queue_context __odp_queue_context
+ #define odp_queue_enq __odp_queue_enq
+ #define odp_queue_enq_multi __odp_queue_enq_multi
+ #define odp_queue_deq __odp_queue_deq
+ #define odp_queue_deq_multi __odp_queue_deq_multi
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void *odp_queue_context(odp_queue_t handle)
+{
+ void *context;
+ void *qentry = (void *)handle;
+
+ context = _odp_qentry_field(qentry, void *, context);
+
+ return context;
+}
+
+_ODP_INLINE int odp_queue_enq(odp_queue_t queue, odp_event_t ev)
+{
+ if (odp_unlikely(_odp_event_validate(ev, _ODP_EV_QUEUE_ENQ)))
+ return -1;
+
+ return _odp_queue_api->queue_enq(queue, ev);
+}
+
+_ODP_INLINE int odp_queue_enq_multi(odp_queue_t queue,
+ const odp_event_t events[], int num)
+{
+ if (odp_unlikely(_odp_event_validate_multi(events, num, _ODP_EV_QUEUE_ENQ_MULTI)))
+ return -1;
+
+ return _odp_queue_api->queue_enq_multi(queue, events, num);
+}
+
+_ODP_INLINE odp_event_t odp_queue_deq(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_deq(queue);
+}
+
+_ODP_INLINE int odp_queue_deq_multi(odp_queue_t queue,
+ odp_event_t events[], int num)
+{
+ return _odp_queue_api->queue_deq_multi(queue, events, num);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/rwlock_inlines.h b/platform/linux-generic/include/odp/api/plat/rwlock_inlines.h
new file mode 100644
index 000000000..0bb9d8c6e
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/rwlock_inlines.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_RWLOCK_INLINES_H_
+#define ODP_PLAT_RWLOCK_INLINES_H_
+
+#include <odp/api/atomic.h>
+#include <odp/api/cpu.h>
+
+#include <odp/api/abi/rwlock.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_rwlock_init __odp_rwlock_init
+ #define odp_rwlock_read_lock __odp_rwlock_read_lock
+ #define odp_rwlock_read_trylock __odp_rwlock_read_trylock
+ #define odp_rwlock_read_unlock __odp_rwlock_read_unlock
+ #define odp_rwlock_write_lock __odp_rwlock_write_lock
+ #define odp_rwlock_write_trylock __odp_rwlock_write_trylock
+ #define odp_rwlock_write_unlock __odp_rwlock_write_unlock
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void odp_rwlock_init(odp_rwlock_t *rwlock)
+{
+ odp_atomic_init_u32(&rwlock->cnt, 0);
+}
+
+_ODP_INLINE void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
+{
+ uint32_t cnt;
+ int is_locked = 0;
+
+ while (is_locked == 0) {
+ cnt = odp_atomic_load_u32(&rwlock->cnt);
+ /* waiting for read lock */
+ if ((int32_t)cnt < 0) {
+ odp_cpu_pause();
+ continue;
+ }
+ is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt, &cnt, cnt + 1);
+ }
+}
+
+_ODP_INLINE int odp_rwlock_read_trylock(odp_rwlock_t *rwlock)
+{
+ uint32_t cnt = odp_atomic_load_u32(&rwlock->cnt);
+
+ while (cnt != (uint32_t)-1) {
+ if (odp_atomic_cas_acq_u32(&rwlock->cnt, &cnt, cnt + 1))
+ return 1;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE void odp_rwlock_read_unlock(odp_rwlock_t *rwlock)
+{
+ odp_atomic_sub_rel_u32(&rwlock->cnt, 1);
+}
+
+_ODP_INLINE void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
+{
+ uint32_t cnt;
+ int is_locked = 0;
+
+ while (is_locked == 0) {
+ uint32_t zero = 0;
+
+ cnt = odp_atomic_load_u32(&rwlock->cnt);
+ /* lock acquired, wait */
+ if (cnt != 0) {
+ odp_cpu_pause();
+ continue;
+ }
+ is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt, &zero, (uint32_t)-1);
+ }
+}
+
+_ODP_INLINE int odp_rwlock_write_trylock(odp_rwlock_t *rwlock)
+{
+ uint32_t zero = 0;
+
+ return odp_atomic_cas_acq_u32(&rwlock->cnt, &zero, (uint32_t)-1);
+}
+
+_ODP_INLINE void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
+{
+ odp_atomic_store_rel_u32(&rwlock->cnt, 0);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/rwlock_recursive_inlines.h b/platform/linux-generic/include/odp/api/plat/rwlock_recursive_inlines.h
new file mode 100644
index 000000000..21ad4be4a
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/rwlock_recursive_inlines.h
@@ -0,0 +1,142 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_RWLOCK_RECURSIVE_INLINES_H_
+#define ODP_PLAT_RWLOCK_RECURSIVE_INLINES_H_
+
+#include <odp/api/rwlock.h>
+#include <odp/api/thread.h>
+
+#include <odp/api/abi/rwlock_recursive.h>
+
+#include <odp/api/plat/debug_inlines.h>
+
+#include <stdint.h>
+#include <string.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_rwlock_recursive_init __odp_rwlock_recursive_init
+ #define odp_rwlock_recursive_read_lock __odp_rwlock_recursive_read_lock
+ #define odp_rwlock_recursive_read_trylock __odp_rwlock_recursive_read_trylock
+ #define odp_rwlock_recursive_read_unlock __odp_rwlock_recursive_read_unlock
+ #define odp_rwlock_recursive_write_lock __odp_rwlock_recursive_write_lock
+ #define odp_rwlock_recursive_write_trylock __odp_rwlock_recursive_write_trylock
+ #define odp_rwlock_recursive_write_unlock __odp_rwlock_recursive_write_unlock
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void odp_rwlock_recursive_init(odp_rwlock_recursive_t *rlock)
+{
+ memset(rlock, 0, sizeof(odp_rwlock_recursive_t));
+ odp_rwlock_init(&rlock->lock);
+ rlock->wr_owner = -1;
+}
+
+/* Multiple readers can recurse the lock concurrently */
+_ODP_INLINE void odp_rwlock_recursive_read_lock(odp_rwlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->rd_cnt[thr]) {
+ _ODP_ASSERT(rlock->rd_cnt[thr] < UINT8_MAX);
+ rlock->rd_cnt[thr]++;
+ return;
+ }
+
+ odp_rwlock_read_lock(&rlock->lock);
+ rlock->rd_cnt[thr] = 1;
+}
+
+/* Multiple readers can recurse the lock concurrently */
+_ODP_INLINE int odp_rwlock_recursive_read_trylock(odp_rwlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->rd_cnt[thr]) {
+ _ODP_ASSERT(rlock->rd_cnt[thr] < UINT8_MAX);
+ rlock->rd_cnt[thr]++;
+ return 1;
+ }
+
+ if (odp_rwlock_read_trylock(&rlock->lock)) {
+ rlock->rd_cnt[thr] = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE void odp_rwlock_recursive_read_unlock(odp_rwlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ _ODP_ASSERT(rlock->rd_cnt[thr]);
+ rlock->rd_cnt[thr]--;
+
+ if (rlock->rd_cnt[thr] > 0)
+ return;
+
+ odp_rwlock_read_unlock(&rlock->lock);
+}
+
+/* Only one writer can recurse the lock */
+_ODP_INLINE void odp_rwlock_recursive_write_lock(odp_rwlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->wr_owner == thr) {
+ _ODP_ASSERT(rlock->wr_cnt < UINT32_MAX);
+ rlock->wr_cnt++;
+ return;
+ }
+
+ odp_rwlock_write_lock(&rlock->lock);
+ rlock->wr_owner = thr;
+ rlock->wr_cnt = 1;
+}
+
+/* Only one writer can recurse the lock */
+_ODP_INLINE int odp_rwlock_recursive_write_trylock(odp_rwlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->wr_owner == thr) {
+ _ODP_ASSERT(rlock->wr_cnt < UINT32_MAX);
+ rlock->wr_cnt++;
+ return 1;
+ }
+
+ if (odp_rwlock_write_trylock(&rlock->lock)) {
+ rlock->wr_owner = thr;
+ rlock->wr_cnt = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE void odp_rwlock_recursive_write_unlock(odp_rwlock_recursive_t *rlock)
+{
+ _ODP_ASSERT(rlock->wr_cnt);
+ rlock->wr_cnt--;
+
+ if (rlock->wr_cnt > 0)
+ return;
+
+ rlock->wr_owner = -1;
+ odp_rwlock_write_unlock(&rlock->lock);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/rwlock_recursive_types.h b/platform/linux-generic/include/odp/api/plat/rwlock_recursive_types.h
deleted file mode 100644
index 36f9204ac..000000000
--- a/platform/linux-generic/include/odp/api/plat/rwlock_recursive_types.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP recursive read/write lock
- */
-
-#ifndef ODP_RWLOCK_RECURSIVE_TYPES_H_
-#define ODP_RWLOCK_RECURSIVE_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/rwlock.h>
-#include <odp/api/std_types.h>
-#include <odp/api/thread.h>
-
-/** @internal */
-struct odp_rwlock_recursive_s {
- odp_rwlock_t lock; /**< the lock */
- int wr_owner; /**< write owner thread */
- uint32_t wr_cnt; /**< write recursion count */
- uint8_t rd_cnt[ODP_THREAD_COUNT_MAX]; /**< read recursion count */
-};
-
-typedef struct odp_rwlock_recursive_s odp_rwlock_recursive_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/rwlock_types.h b/platform/linux-generic/include/odp/api/plat/rwlock_types.h
deleted file mode 100644
index f7dc04496..000000000
--- a/platform/linux-generic/include/odp/api/plat/rwlock_types.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP rwlock
- */
-
-#ifndef ODP_RWLOCK_TYPES_H_
-#define ODP_RWLOCK_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/atomic.h>
-
-/** @internal */
-struct odp_rwlock_s {
- odp_atomic_u32_t cnt; /**< lock count
- 0 lock not taken
- -1 write lock taken
- >0 read lock(s) taken */
-};
-
-typedef struct odp_rwlock_s odp_rwlock_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/schedule_inline_types.h b/platform/linux-generic/include/odp/api/plat/schedule_inline_types.h
new file mode 100644
index 000000000..92089c9a0
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/schedule_inline_types.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_SCHEDULE_INLINE_TYPES_H_
+#define ODP_PLAT_SCHEDULE_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/event_types.h>
+#include <odp/api/queue_types.h>
+#include <odp/api/schedule_types.h>
+#include <odp/api/thrmask.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Schedule API functions */
+typedef struct {
+ uint64_t (*schedule_wait_time)(uint64_t ns);
+ int (*schedule_capability)(odp_schedule_capability_t *capa);
+ void (*schedule_config_init)(odp_schedule_config_t *config);
+ int (*schedule_config)(const odp_schedule_config_t *config);
+ odp_event_t (*schedule)(odp_queue_t *from, uint64_t wait);
+ int (*schedule_multi)(odp_queue_t *from, uint64_t wait, odp_event_t events[], int num);
+ int (*schedule_multi_wait)(odp_queue_t *from, odp_event_t events[], int num);
+ int (*schedule_multi_no_wait)(odp_queue_t *from, odp_event_t events[], int num);
+ void (*schedule_pause)(void);
+ void (*schedule_resume)(void);
+ void (*schedule_release_atomic)(void);
+ void (*schedule_release_ordered)(void);
+ void (*schedule_prefetch)(int num);
+ int (*schedule_min_prio)(void);
+ int (*schedule_max_prio)(void);
+ int (*schedule_default_prio)(void);
+ int (*schedule_num_prio)(void);
+ odp_schedule_group_t (*schedule_group_create)(const char *name, const odp_thrmask_t *mask);
+ int (*schedule_group_destroy)(odp_schedule_group_t group);
+ odp_schedule_group_t (*schedule_group_lookup)(const char *name);
+ int (*schedule_group_join)(odp_schedule_group_t group, const odp_thrmask_t *mask);
+ int (*schedule_group_leave)(odp_schedule_group_t group, const odp_thrmask_t *mask);
+ int (*schedule_group_thrmask)(odp_schedule_group_t group, odp_thrmask_t *mask);
+ int (*schedule_group_info)(odp_schedule_group_t group, odp_schedule_group_info_t *info);
+ void (*schedule_order_lock)(uint32_t lock_index);
+ void (*schedule_order_unlock)(uint32_t lock_index);
+ void (*schedule_order_unlock_lock)(uint32_t unlock_index, uint32_t lock_index);
+ void (*schedule_order_lock_start)(uint32_t lock_index);
+ void (*schedule_order_lock_wait)(uint32_t lock_index);
+ void (*schedule_order_wait)(void);
+ void (*schedule_print)(void);
+
+} _odp_schedule_api_fn_t;
+
+/* Scheduler configuration status */
+int _odp_schedule_configured(void);
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/schedule_inlines.h b/platform/linux-generic/include/odp/api/plat/schedule_inlines.h
new file mode 100644
index 000000000..733b068d9
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/schedule_inlines.h
@@ -0,0 +1,135 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_SCHEDULE_INLINES_H_
+#define ODP_PLAT_SCHEDULE_INLINES_H_
+
+#include <odp/api/event_types.h>
+#include <odp/api/queue_types.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/schedule_inline_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+extern const _odp_schedule_api_fn_t *_odp_sched_api;
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_schedule __odp_schedule
+ #define odp_schedule_multi __odp_schedule_multi
+ #define odp_schedule_multi_wait __odp_schedule_multi_wait
+ #define odp_schedule_multi_no_wait __odp_schedule_multi_no_wait
+ #define odp_schedule_wait_time __odp_schedule_wait_time
+ #define odp_schedule_pause __odp_schedule_pause
+ #define odp_schedule_resume __odp_schedule_resume
+ #define odp_schedule_release_atomic __odp_schedule_release_atomic
+ #define odp_schedule_release_ordered __odp_schedule_release_ordered
+ #define odp_schedule_prefetch __odp_schedule_prefetch
+ #define odp_schedule_order_lock __odp_schedule_order_lock
+ #define odp_schedule_order_unlock __odp_schedule_order_unlock
+ #define odp_schedule_order_unlock_lock __odp_schedule_order_unlock_lock
+ #define odp_schedule_order_lock_start __odp_schedule_order_lock_start
+ #define odp_schedule_order_lock_wait __odp_schedule_order_lock_wait
+ #define odp_schedule_order_wait __odp_schedule_order_wait
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
+{
+ _ODP_ASSERT(_odp_schedule_configured());
+
+ return _odp_sched_api->schedule(from, wait);
+}
+
+_ODP_INLINE int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[], int num)
+{
+ _ODP_ASSERT(_odp_schedule_configured());
+
+ return _odp_sched_api->schedule_multi(from, wait, events, num);
+}
+
+_ODP_INLINE int odp_schedule_multi_wait(odp_queue_t *from, odp_event_t events[], int num)
+{
+ _ODP_ASSERT(_odp_schedule_configured());
+
+ return _odp_sched_api->schedule_multi_wait(from, events, num);
+}
+
+_ODP_INLINE int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
+{
+ _ODP_ASSERT(_odp_schedule_configured());
+
+ return _odp_sched_api->schedule_multi_no_wait(from, events, num);
+}
+
+_ODP_INLINE uint64_t odp_schedule_wait_time(uint64_t ns)
+{
+ return _odp_sched_api->schedule_wait_time(ns);
+}
+
+_ODP_INLINE void odp_schedule_pause(void)
+{
+ _odp_sched_api->schedule_pause();
+}
+
+_ODP_INLINE void odp_schedule_resume(void)
+{
+ _odp_sched_api->schedule_resume();
+}
+
+_ODP_INLINE void odp_schedule_release_atomic(void)
+{
+ _odp_sched_api->schedule_release_atomic();
+}
+
+_ODP_INLINE void odp_schedule_release_ordered(void)
+{
+ _odp_sched_api->schedule_release_ordered();
+}
+
+_ODP_INLINE void odp_schedule_prefetch(int num)
+{
+ _odp_sched_api->schedule_prefetch(num);
+}
+
+_ODP_INLINE void odp_schedule_order_lock(uint32_t lock_index)
+{
+ _odp_sched_api->schedule_order_lock(lock_index);
+}
+
+_ODP_INLINE void odp_schedule_order_unlock(uint32_t lock_index)
+{
+ _odp_sched_api->schedule_order_unlock(lock_index);
+}
+
+_ODP_INLINE void odp_schedule_order_unlock_lock(uint32_t unlock_index, uint32_t lock_index)
+{
+ _odp_sched_api->schedule_order_unlock_lock(unlock_index, lock_index);
+}
+
+_ODP_INLINE void odp_schedule_order_lock_start(uint32_t lock_index)
+{
+ _odp_sched_api->schedule_order_lock_start(lock_index);
+}
+
+_ODP_INLINE void odp_schedule_order_lock_wait(uint32_t lock_index)
+{
+ _odp_sched_api->schedule_order_lock_wait(lock_index);
+}
+
+_ODP_INLINE void odp_schedule_order_wait(void)
+{
+ _odp_sched_api->schedule_order_wait();
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/schedule_types.h b/platform/linux-generic/include/odp/api/plat/schedule_types.h
deleted file mode 100644
index 535fd6d05..000000000
--- a/platform/linux-generic/include/odp/api/plat/schedule_types.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP schedule
- */
-
-#ifndef ODP_SCHEDULE_TYPES_H_
-#define ODP_SCHEDULE_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_scheduler
- * @{
- */
-
-#define ODP_SCHED_WAIT UINT64_MAX
-#define ODP_SCHED_NO_WAIT 0
-
-typedef int odp_schedule_prio_t;
-
-#define ODP_SCHED_PRIO_HIGHEST 0
-
-#define ODP_SCHED_PRIO_NORMAL 4
-
-#define ODP_SCHED_PRIO_LOWEST 7
-
-#define ODP_SCHED_PRIO_DEFAULT ODP_SCHED_PRIO_NORMAL
-
-typedef int odp_schedule_sync_t;
-
-#define ODP_SCHED_SYNC_PARALLEL 0
-#define ODP_SCHED_SYNC_ATOMIC 1
-#define ODP_SCHED_SYNC_ORDERED 2
-
-typedef int odp_schedule_group_t;
-
-/* These must be kept in sync with thread_globals_t in odp_thread.c */
-#define ODP_SCHED_GROUP_INVALID -1
-#define ODP_SCHED_GROUP_ALL 0
-#define ODP_SCHED_GROUP_WORKER 1
-#define ODP_SCHED_GROUP_CONTROL 2
-
-#define ODP_SCHED_GROUP_NAME_LEN 32
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/spinlock_inlines.h b/platform/linux-generic/include/odp/api/plat/spinlock_inlines.h
new file mode 100644
index 000000000..a04c43f88
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/spinlock_inlines.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_SPINLOCK_INLINES_H_
+#define ODP_PLAT_SPINLOCK_INLINES_H_
+
+#include <odp/api/cpu.h>
+
+#include <odp/api/abi/spinlock.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_spinlock_init __odp_spinlock_init
+ #define odp_spinlock_lock __odp_spinlock_lock
+ #define odp_spinlock_trylock __odp_spinlock_trylock
+ #define odp_spinlock_unlock __odp_spinlock_unlock
+ #define odp_spinlock_is_locked __odp_spinlock_is_locked
+
+ #include <odp/api/plat/cpu_inlines.h>
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void odp_spinlock_init(odp_spinlock_t *spinlock)
+{
+ __atomic_clear(&spinlock->lock, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_spinlock_lock(odp_spinlock_t *spinlock)
+{
+ /* While the lock is already taken... */
+ while (__atomic_test_and_set(&spinlock->lock, __ATOMIC_ACQUIRE))
+ /* ...spin reading the flag (relaxed MM),
+ * the loop will exit when the lock becomes available
+ * and we will retry the TAS operation above */
+ while (__atomic_load_n(&spinlock->lock, __ATOMIC_RELAXED))
+ odp_cpu_pause();
+}
+
+_ODP_INLINE int odp_spinlock_trylock(odp_spinlock_t *spinlock)
+{
+ return (__atomic_test_and_set(&spinlock->lock, __ATOMIC_ACQUIRE) == 0);
+}
+
+_ODP_INLINE void odp_spinlock_unlock(odp_spinlock_t *spinlock)
+{
+ __atomic_clear(&spinlock->lock, __ATOMIC_RELEASE);
+}
+
+_ODP_INLINE int odp_spinlock_is_locked(odp_spinlock_t *spinlock)
+{
+ return __atomic_load_n(&spinlock->lock, __ATOMIC_RELAXED) != 0;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/spinlock_recursive_inlines.h b/platform/linux-generic/include/odp/api/plat/spinlock_recursive_inlines.h
new file mode 100644
index 000000000..e795353f4
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/spinlock_recursive_inlines.h
@@ -0,0 +1,98 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_SPINLOCK_RECURSIVE_INLINES_H_
+#define ODP_PLAT_SPINLOCK_RECURSIVE_INLINES_H_
+
+#include <odp/api/spinlock.h>
+#include <odp/api/thread.h>
+
+#include <odp/api/abi/spinlock_recursive.h>
+
+#include <odp/api/plat/debug_inlines.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_spinlock_recursive_init __odp_spinlock_recursive_init
+ #define odp_spinlock_recursive_lock __odp_spinlock_recursive_lock
+ #define odp_spinlock_recursive_trylock __odp_spinlock_recursive_trylock
+ #define odp_spinlock_recursive_unlock __odp_spinlock_recursive_unlock
+ #define odp_spinlock_recursive_is_locked __odp_spinlock_recursive_is_locked
+
+ #include <odp/api/plat/spinlock_inlines.h>
+ #include <odp/api/plat/thread_inlines.h>
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE void odp_spinlock_recursive_init(odp_spinlock_recursive_t *rlock)
+{
+ odp_spinlock_init(&rlock->lock);
+ rlock->owner = -1;
+ rlock->cnt = 0;
+}
+
+_ODP_INLINE void odp_spinlock_recursive_lock(odp_spinlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->owner == thr) {
+ _ODP_ASSERT(rlock->cnt < UINT32_MAX);
+ rlock->cnt++;
+ return;
+ }
+
+ odp_spinlock_lock(&rlock->lock);
+ rlock->owner = thr;
+ rlock->cnt = 1;
+}
+
+_ODP_INLINE int odp_spinlock_recursive_trylock(odp_spinlock_recursive_t *rlock)
+{
+ int thr = odp_thread_id();
+
+ if (rlock->owner == thr) {
+ _ODP_ASSERT(rlock->cnt < UINT32_MAX);
+ rlock->cnt++;
+ return 1;
+ }
+
+ if (odp_spinlock_trylock(&rlock->lock)) {
+ rlock->owner = thr;
+ rlock->cnt = 1;
+ return 1;
+ }
+
+ return 0;
+}
+
+_ODP_INLINE void odp_spinlock_recursive_unlock(odp_spinlock_recursive_t *rlock)
+{
+ _ODP_ASSERT(rlock->cnt);
+ rlock->cnt--;
+
+ if (rlock->cnt > 0)
+ return;
+
+ rlock->owner = -1;
+ odp_spinlock_unlock(&rlock->lock);
+}
+
+_ODP_INLINE int odp_spinlock_recursive_is_locked(odp_spinlock_recursive_t *rlock)
+{
+ return odp_thread_id() == rlock->owner ? 1 : odp_spinlock_is_locked(&rlock->lock);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/spinlock_recursive_types.h b/platform/linux-generic/include/odp/api/plat/spinlock_recursive_types.h
deleted file mode 100644
index c5a1adff9..000000000
--- a/platform/linux-generic/include/odp/api/plat/spinlock_recursive_types.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP recursive spinlock
- */
-
-#ifndef ODP_SPINLOCK_RECURSIVE_TYPES_H_
-#define ODP_SPINLOCK_RECURSIVE_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/spinlock.h>
-#include <odp/api/std_types.h>
-
-/** @internal */
-struct odp_spinlock_recursive_s {
- odp_spinlock_t lock; /**< the lock */
- int owner; /**< thread owning the lock */
- uint32_t cnt; /**< recursion count */
-};
-
-typedef struct odp_spinlock_recursive_s odp_spinlock_recursive_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/spinlock_types.h b/platform/linux-generic/include/odp/api/plat/spinlock_types.h
deleted file mode 100644
index f38ece6af..000000000
--- a/platform/linux-generic/include/odp/api/plat/spinlock_types.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP spinlock
- */
-
-#ifndef ODP_SPINLOCK_TYPES_H_
-#define ODP_SPINLOCK_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-
-/** @internal */
-struct odp_spinlock_s {
- char lock; /**< lock flag, should match odp_atomic_flag_t */
-};
-
-typedef struct odp_spinlock_s odp_spinlock_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/static_inline.h.in b/platform/linux-generic/include/odp/api/plat/static_inline.h.in
deleted file mode 100644
index 3cf004347..000000000
--- a/platform/linux-generic/include/odp/api/plat/static_inline.h.in
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Macro for static inline functions
- */
-
-#ifndef ODP_PLAT_STATIC_INLINE_H_
-#define ODP_PLAT_STATIC_INLINE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * @internal
- * @def ODP_ABI_COMPAT
- * Control ABI compatibility
- */
-
-/**
- * @internal
- * @def _ODP_INLINE
- * Define a function as inlined or not inlined (for ABI compatibility)
- */
-#if @ODP_ABI_COMPAT@
-#define ODP_ABI_COMPAT 1
-#define _ODP_INLINE
-#else
-#define ODP_ABI_COMPAT 0
-#define _ODP_INLINE static inline
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h b/platform/linux-generic/include/odp/api/plat/std_inlines.h
index 8f505d079..3f6a7e9d4 100644
--- a/platform/linux-generic/include/odp/api/plat/std_clib_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/std_inlines.h
@@ -1,19 +1,26 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifndef ODP_PLAT_STD_CLIB_INLINE_H_
-#define ODP_PLAT_STD_CLIB_INLINE_H_
+#ifndef ODP_PLAT_STD_INLINE_H_
+#define ODP_PLAT_STD_INLINE_H_
-#ifdef __cplusplus
-extern "C" {
-#endif
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
-#include <odp/api/spec/std_types.h>
#include <string.h>
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_memcpy __odp_memcpy
+ #define odp_memset __odp_memset
+ #define odp_memcmp __odp_memcmp
+#else
+ #define _ODP_INLINE
+#endif
+
_ODP_INLINE void *odp_memcpy(void *dst, const void *src, size_t num)
{
return memcpy(dst, src, num);
@@ -29,8 +36,6 @@ _ODP_INLINE int odp_memcmp(const void *ptr1, const void *ptr2, size_t num)
return memcmp(ptr1, ptr2, num);
}
-#ifdef __cplusplus
-}
-#endif
+/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/strong_types.h b/platform/linux-generic/include/odp/api/plat/strong_types.h
index a53d76352..6b61c7c27 100644
--- a/platform/linux-generic/include/odp/api/plat/strong_types.h
+++ b/platform/linux-generic/include/odp/api/plat/strong_types.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -15,9 +15,13 @@
#ifndef STRONG_TYPES_H_
#define STRONG_TYPES_H_
+#include <odp/api/std_types.h>
+
/** Use strong typing for ODP types */
#ifdef __cplusplus
-#define ODP_HANDLE_T(type) struct _##type { uint8_t unused_dummy_var; } *type
+/* Allow type to be expanded before concatenation with underscore */
+#define _ODP_HANDLE_T(type) struct _##type { uint8_t unused_dummy_var; } *type
+#define ODP_HANDLE_T(type) _ODP_HANDLE_T(type)
#else
#define odp_handle_t struct { uint8_t unused_dummy_var; } *
/** C/C++ helper macro for strong typing */
@@ -25,10 +29,10 @@
#endif
/** Internal macro to get value of an ODP handle */
-#define _odp_typeval(handle) ((uint32_t)(uintptr_t)(handle))
+#define _odp_typeval(handle) ((uintptr_t)(handle))
/** Internal macro to get printable value of an ODP handle */
-#define _odp_pri(handle) ((uint64_t)_odp_typeval(handle))
+#define _odp_pri(handle) ((uint64_t)(uintptr_t)(handle))
/** Internal macro to convert a scalar to a typed handle */
#define _odp_cast_scalar(type, val) ((type)(uintptr_t)(val))
diff --git a/platform/linux-generic/include/odp/api/plat/sync_inlines.h b/platform/linux-generic/include/odp/api/plat/sync_inlines.h
index 76eb68107..b3a88b629 100644
--- a/platform/linux-generic/include/odp/api/plat/sync_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/sync_inlines.h
@@ -1,7 +1,6 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
*/
/**
@@ -13,13 +12,26 @@
#ifndef ODP_PLAT_SYNC_INLINE_H_
#define ODP_PLAT_SYNC_INLINE_H_
+#include <odp/api/abi/sync_inlines.h>
+
#ifdef __cplusplus
extern "C" {
#endif
-/** @ingroup odp_barrier
- * @{
- */
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_mb_release __odp_mb_release
+ #define odp_mb_acquire __odp_mb_acquire
+ #define odp_mb_full __odp_mb_full
+ #define odp_mb_sync __odp_mb_sync
+ #define odp_mb_sync_load __odp_mb_sync_load
+ #define odp_mb_sync_store __odp_mb_sync_store
+#else
+ #define _ODP_INLINE
+#endif
_ODP_INLINE void odp_mb_release(void)
{
@@ -36,9 +48,22 @@ _ODP_INLINE void odp_mb_full(void)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
}
-/**
- * @}
- */
+_ODP_INLINE void odp_mb_sync(void)
+{
+ _odp_mb_sync();
+}
+
+_ODP_INLINE void odp_mb_sync_load(void)
+{
+ _odp_mb_sync_load();
+}
+
+_ODP_INLINE void odp_mb_sync_store(void)
+{
+ _odp_mb_sync_store();
+}
+
+/** @endcond */
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp/api/plat/thread_inline_types.h b/platform/linux-generic/include/odp/api/plat/thread_inline_types.h
new file mode 100644
index 000000000..d24263fa7
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/thread_inline_types.h
@@ -0,0 +1,36 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_THREAD_INLINE_TYPES_H_
+#define ODP_PLAT_THREAD_INLINE_TYPES_H_
+
+#include <odp/api/init.h>
+#include <odp/api/thread_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+typedef struct {
+ odp_log_func_t log_fn;
+ odp_thread_type_t type;
+ int thr;
+ int cpu;
+
+} _odp_thread_state_t;
+
+extern __thread _odp_thread_state_t *_odp_this_thread;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/thread_inlines.h b/platform/linux-generic/include/odp/api/plat/thread_inlines.h
new file mode 100644
index 000000000..2b6957064
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/thread_inlines.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_THREAD_INLINES_H_
+#define ODP_PLAT_THREAD_INLINES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/thread_types.h>
+
+#include <odp/api/plat/thread_inline_types.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_thread_id __odp_thread_id
+ #define odp_thread_type __odp_thread_type
+ #define odp_cpu_id __odp_cpu_id
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE int odp_thread_id(void)
+{
+ return _odp_this_thread->thr;
+}
+
+_ODP_INLINE odp_thread_type_t odp_thread_type(void)
+{
+ return _odp_this_thread->type;
+}
+
+_ODP_INLINE int odp_cpu_id(void)
+{
+ return _odp_this_thread->cpu;
+}
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/thread_types.h b/platform/linux-generic/include/odp/api/plat/thread_types.h
deleted file mode 100644
index 33af45983..000000000
--- a/platform/linux-generic/include/odp/api/plat/thread_types.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP thread
- */
-
-#ifndef ODP_THREAD_TYPES_H_
-#define ODP_THREAD_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_thread
- * @{
- */
-
-#define ODP_THREAD_COUNT_MAX 128
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/thrmask_types.h b/platform/linux-generic/include/odp/api/plat/thrmask_types.h
deleted file mode 100644
index 5d93890c1..000000000
--- a/platform/linux-generic/include/odp/api/plat/thrmask_types.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP thread masks
- */
-
-#ifndef ODP_THRMASK_TYPES_H_
-#define ODP_THRMASK_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_thread
- * @{
- */
-
-#include <odp/api/cpumask.h>
-
-/**
- * Minimum size of output buffer for odp_thrmask_to_str()
- */
-#define ODP_THRMASK_STR_SIZE ODP_CPUMASK_STR_SIZE
-
-/**
- * Thread mask
- *
- * Don't access directly, use access functions.
- */
-typedef struct odp_thrmask_t {
- odp_cpumask_t m; /**< @private Mask*/
-} odp_thrmask_t;
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h b/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
index ecbea7c4d..b596d1609 100644
--- a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
@@ -1,30 +1,42 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-/**
- * @file
- *
- * Ticketlock inline functions
- */
-
#ifndef _ODP_PLAT_TICKETLOCK_INLINES_H_
#define _ODP_PLAT_TICKETLOCK_INLINES_H_
#include <odp/api/atomic.h>
-#include <odp/api/sync.h>
#include <odp/api/cpu.h>
-#include <odp/api/plat/ticketlock_types.h>
+#include <odp/api/abi/ticketlock.h>
+#include <odp/api/abi/wait_until.h>
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_ticketlock_init __odp_ticketlock_init
+ #define odp_ticketlock_lock __odp_ticketlock_lock
+ #define odp_ticketlock_trylock __odp_ticketlock_trylock
+ #define odp_ticketlock_unlock __odp_ticketlock_unlock
+ #define odp_ticketlock_is_locked __odp_ticketlock_is_locked
+ /* Inline atomic functions */
+ #include <odp/api/plat/atomic_inlines.h>
+ #include <odp/api/plat/cpu_inlines.h>
+#else
+ #undef _ODP_INLINE
+ #define _ODP_INLINE
+#endif
-/** @internal
- * Acquire ticket lock.
- *
- * @param ticketlock Pointer to a ticket lock
- */
-static inline void _odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
+_ODP_INLINE void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
+{
+ odp_atomic_init_u32(&ticketlock->next_ticket, 0);
+ odp_atomic_init_u32(&ticketlock->cur_ticket, 0);
+}
+
+_ODP_INLINE void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
{
uint32_t ticket;
@@ -35,19 +47,10 @@ static inline void _odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
/* Spin waiting for our turn. Use load-acquire so that we acquire
* all stores from the previous lock owner */
- while (ticket != odp_atomic_load_acq_u32(&ticketlock->cur_ticket))
- odp_cpu_pause();
+ _odp_wait_until_equal_acq_u32(&ticketlock->cur_ticket, ticket);
}
-/** @internal
- * Try to acquire ticket lock.
- *
- * @param tklock Pointer to a ticket lock
- *
- * @retval 1 lock acquired
- * @retval 0 lock not acquired
- */
-static inline int _odp_ticketlock_trylock(odp_ticketlock_t *tklock)
+_ODP_INLINE int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
{
/* We read 'next_ticket' and 'cur_ticket' non-atomically which should
* not be a problem as they are not independent of each other.
@@ -75,12 +78,7 @@ static inline int _odp_ticketlock_trylock(odp_ticketlock_t *tklock)
return 0;
}
-/** @internal
- * Release ticket lock
- *
- * @param ticketlock Pointer to a ticket lock
- */
-static inline void _odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
+_ODP_INLINE void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
{
/* Release the lock by incrementing 'cur_ticket'. As we are the
* lock owner and thus the only thread that is allowed to write
@@ -92,15 +90,7 @@ static inline void _odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
odp_atomic_store_rel_u32(&ticketlock->cur_ticket, cur + 1);
}
-/** @internal
- * Check if ticket lock is locked
- *
- * @param ticketlock Pointer to a ticket lock
- *
- * @retval 1 the lock is busy (locked)
- * @retval 0 the lock is available (unlocked)
- */
-static inline int _odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
+_ODP_INLINE int odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
{
/* Compare 'cur_ticket' with 'next_ticket'. Ideally we should read
* both variables atomically but the information can become stale
@@ -111,20 +101,6 @@ static inline int _odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
odp_atomic_load_u32(&ticketlock->next_ticket);
}
-/* Include inlined versions of API functions */
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-
-/** @ingroup odp_locks
- * @{
- */
-
-#include <odp/api/plat/ticketlock_inlines_api.h>
-
-/**
- * @}
- */
-
-#endif
+/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines_api.h b/platform/linux-generic/include/odp/api/plat/ticketlock_inlines_api.h
deleted file mode 100644
index 5efe696ff..000000000
--- a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines_api.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Ticketlock inline functions
- */
-
-#ifndef _ODP_PLAT_TICKETLOCK_INLINES_API_H_
-#define _ODP_PLAT_TICKETLOCK_INLINES_API_H_
-
-_ODP_INLINE void odp_ticketlock_lock(odp_ticketlock_t *lock)
-{
- return _odp_ticketlock_lock(lock);
-}
-
-_ODP_INLINE int odp_ticketlock_trylock(odp_ticketlock_t *lock)
-{
- return _odp_ticketlock_trylock(lock);
-}
-
-_ODP_INLINE void odp_ticketlock_unlock(odp_ticketlock_t *lock)
-{
- _odp_ticketlock_unlock(lock);
-}
-
-_ODP_INLINE int odp_ticketlock_is_locked(odp_ticketlock_t *lock)
-{
- return _odp_ticketlock_is_locked(lock);
-}
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/time_inlines.h b/platform/linux-generic/include/odp/api/plat/time_inlines.h
new file mode 100644
index 000000000..8ead06f7b
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/time_inlines.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIME_INLINES_H_
+#define ODP_PLAT_TIME_INLINES_H_
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
+
+#include <odp/api/abi/time_inlines.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_time_local __odp_time_local
+ #define odp_time_global __odp_time_global
+ #define odp_time_to_ns __odp_time_to_ns
+ #define odp_time_local_ns __odp_time_local_ns
+ #define odp_time_global_ns __odp_time_global_ns
+
+ #define odp_time_local_strict __odp_time_local_strict
+ #define odp_time_global_strict __odp_time_global_strict
+ #define odp_time_local_strict_ns __odp_time_local_strict_ns
+ #define odp_time_global_strict_ns __odp_time_global_strict_ns
+
+ #define odp_time_cmp __odp_time_cmp
+ #define odp_time_diff __odp_time_diff
+ #define odp_time_diff_ns __odp_time_diff_ns
+ #define odp_time_add_ns __odp_time_add_ns
+ #define odp_time_sum __odp_time_sum
+
+ #define odp_time_local_from_ns __odp_time_local_from_ns
+ #define odp_time_global_from_ns __odp_time_global_from_ns
+
+ #define odp_time_local_res __odp_time_local_res
+ #define odp_time_global_res __odp_time_global_res
+
+ #define odp_time_wait_ns __odp_time_wait_ns
+ #define odp_time_wait_until __odp_time_wait_until
+ #define odp_time_startup __odp_time_startup
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_time_t odp_time_local(void)
+{
+ return _odp_time_cur();
+}
+
+_ODP_INLINE odp_time_t odp_time_global(void)
+{
+ return _odp_time_cur();
+}
+
+_ODP_INLINE odp_time_t odp_time_local_strict(void)
+{
+ return _odp_time_cur_strict();
+}
+
+_ODP_INLINE odp_time_t odp_time_global_strict(void)
+{
+ return _odp_time_cur_strict();
+}
+
+_ODP_INLINE uint64_t odp_time_local_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
+_ODP_INLINE uint64_t odp_time_global_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
+_ODP_INLINE uint64_t odp_time_local_strict_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur_strict());
+}
+
+_ODP_INLINE uint64_t odp_time_global_strict_ns(void)
+{
+ return _odp_time_to_ns(_odp_time_cur_strict());
+}
+
+_ODP_INLINE uint64_t odp_time_to_ns(odp_time_t time)
+{
+ return _odp_time_to_ns(time);
+}
+
+_ODP_INLINE int odp_time_cmp(odp_time_t t2, odp_time_t t1)
+{
+ if (odp_likely(t2.u64 > t1.u64))
+ return 1;
+
+ if (t2.u64 < t1.u64)
+ return -1;
+
+ return 0;
+}
+
+_ODP_INLINE odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1)
+{
+ odp_time_t time;
+
+ time.u64 = t2.u64 - t1.u64;
+
+ return time;
+}
+
+_ODP_INLINE uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1)
+{
+ odp_time_t time;
+
+ time.u64 = t2.u64 - t1.u64;
+
+ return odp_time_to_ns(time);
+}
+
+_ODP_INLINE odp_time_t odp_time_add_ns(odp_time_t time, uint64_t ns)
+{
+ odp_time_t t = _odp_time_from_ns(ns);
+
+ t.u64 += time.u64;
+
+ return t;
+}
+
+_ODP_INLINE odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
+{
+ odp_time_t time;
+
+ time.u64 = t1.u64 + t2.u64;
+
+ return time;
+}
+
+_ODP_INLINE odp_time_t odp_time_local_from_ns(uint64_t ns)
+{
+ return _odp_time_from_ns(ns);
+}
+
+_ODP_INLINE odp_time_t odp_time_global_from_ns(uint64_t ns)
+{
+ return _odp_time_from_ns(ns);
+}
+
+_ODP_INLINE uint64_t odp_time_local_res(void)
+{
+ return _odp_time_res();
+}
+
+_ODP_INLINE uint64_t odp_time_global_res(void)
+{
+ return _odp_time_res();
+}
+
+_ODP_INLINE void odp_time_wait_until(odp_time_t time)
+{
+ odp_time_t cur;
+
+ do {
+ cur = _odp_time_cur();
+ } while (odp_time_cmp(time, cur) > 0);
+}
+
+_ODP_INLINE void odp_time_wait_ns(uint64_t ns)
+{
+ odp_time_t cur = _odp_time_cur();
+ odp_time_t wait = _odp_time_from_ns(ns);
+ odp_time_t end_time = odp_time_sum(cur, wait);
+
+ odp_time_wait_until(end_time);
+}
+
+_ODP_INLINE void odp_time_startup(odp_time_startup_t *startup)
+{
+ _odp_time_startup(startup);
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/time_types.h b/platform/linux-generic/include/odp/api/plat/time_types.h
deleted file mode 100644
index 4847f3b1f..000000000
--- a/platform/linux-generic/include/odp/api/plat/time_types.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP time service
- */
-
-#ifndef ODP_TIME_TYPES_H_
-#define ODP_TIME_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odp_time
- * @{
- **/
-
-/**
- * @internal Time structure used to isolate odp-linux implementation from
- * the linux timespec structure, which is dependent on POSIX extension level.
- */
-typedef struct odp_time_t {
- int64_t tv_sec; /**< @internal Seconds */
- int64_t tv_nsec; /**< @internal Nanoseconds */
-} odp_time_t;
-
-#define ODP_TIME_NULL ((odp_time_t){0, 0})
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/timer_inline_types.h b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h
new file mode 100644
index 000000000..330cbe4ce
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h
@@ -0,0 +1,40 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIMER_INLINE_TYPES_H_
+#define ODP_PLAT_TIMER_INLINE_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+/* Timeout header field accessor */
+#define _odp_timeout_hdr_field(hdr, cast, field) \
+ (*(cast *)(uintptr_t)((uint8_t *)hdr + \
+ _odp_timeout_inline_offset.field))
+
+/* Timeout header field offsets for inline functions */
+typedef struct _odp_timeout_inline_offset_t {
+ uint16_t expiration;
+ uint16_t timer;
+ uint16_t user_ptr;
+ uint16_t uarea_addr;
+
+} _odp_timeout_inline_offset_t;
+
+extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset;
+
+/** @endcond */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/timer_inlines.h b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
new file mode 100644
index 000000000..d2982079f
--- /dev/null
+++ b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
@@ -0,0 +1,106 @@
+/* Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_TIMER_INLINES_H_
+#define ODP_PLAT_TIMER_INLINES_H_
+
+#include <odp/api/event.h>
+#include <odp/api/timer_types.h>
+
+#include <odp/api/abi/time_inlines.h>
+
+#include <odp/api/plat/debug_inlines.h>
+#include <odp/api/plat/timer_inline_types.h>
+
+#include <stdint.h>
+
+/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+
+#ifndef _ODP_NO_INLINE
+ /* Inline functions by default */
+ #define _ODP_INLINE static inline
+ #define odp_timeout_timer __odp_timeout_timer
+ #define odp_timeout_tick __odp_timeout_tick
+ #define odp_timeout_user_ptr __odp_timeout_user_ptr
+ #define odp_timeout_user_area __odp_timeout_user_area
+ #define odp_timer_current_tick __odp_timer_current_tick
+ #define odp_timer_tick_to_ns __odp_timer_tick_to_ns
+ #define odp_timer_ns_to_tick __odp_timer_ns_to_tick
+ #define odp_timeout_from_event __odp_timeout_from_event
+ #define odp_timeout_from_event_multi __odp_timeout_from_event_multi
+ #define odp_timeout_to_event __odp_timeout_to_event
+#else
+ #define _ODP_INLINE
+#endif
+
+_ODP_INLINE odp_timer_t odp_timeout_timer(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, odp_timer_t, timer);
+}
+
+_ODP_INLINE uint64_t odp_timeout_tick(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, uint64_t, expiration);
+}
+
+_ODP_INLINE void *odp_timeout_user_ptr(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, user_ptr);
+}
+
+_ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
+}
+
+_ODP_INLINE uint64_t odp_timer_current_tick(odp_timer_pool_t tpid)
+{
+ (void)tpid;
+
+ /* This is equal to odp_time_global_ns(). Cannot call inlined API function from here. */
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
+_ODP_INLINE uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp, uint64_t ticks)
+{
+ (void)tp;
+
+ /* Timer ticks in API are nsec */
+ return ticks;
+}
+
+_ODP_INLINE uint64_t odp_timer_ns_to_tick(odp_timer_pool_t tp, uint64_t ns)
+{
+ (void)tp;
+
+ /* Timer ticks in API are nsec */
+ return ns;
+}
+
+_ODP_INLINE odp_timeout_t odp_timeout_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+
+ return (odp_timeout_t)ev;
+}
+
+_ODP_INLINE void odp_timeout_from_event_multi(odp_timeout_t tmo[], const odp_event_t ev[], int num)
+{
+ for (int i = 0; i < num; i++) {
+ _ODP_ASSERT(odp_event_type(ev[i]) == ODP_EVENT_TIMEOUT);
+
+ tmo[i] = odp_timeout_from_event(ev[i]);
+ }
+}
+
+_ODP_INLINE odp_event_t odp_timeout_to_event(odp_timeout_t tmo)
+{
+ return (odp_event_t)tmo;
+}
+
+/** @endcond */
+
+#endif
diff --git a/platform/linux-generic/include/odp/api/plat/traffic_mngr_types.h b/platform/linux-generic/include/odp/api/plat/traffic_mngr_types.h
deleted file mode 100644
index b766afecd..000000000
--- a/platform/linux-generic/include/odp/api/plat/traffic_mngr_types.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP traffic mngr
- */
-
-#ifndef ODP_TRAFFIC_MNGR_TYPES_H_
-#define ODP_TRAFFIC_MNGR_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/strong_types.h>
-
-/** @addtogroup odp_traffic_mngr
- * Macros and operations on a TM system.
- * @{
- */
-
-/** The ODP_TM_MAX_NUM_SYSTEMS constant specifies the maximum number of TM
- * systems that may be created. On some platforms this might be much more
- * limited to as little as one hardware TM system.
- */
-#define ODP_TM_MAX_NUM_SYSTEMS 64
-
-/** The ODP_TM_MAX_PRIORITIES constant specifies the largest range of
- * priorities that any TM system can support. All strict priority values MUST
- * in the range 0..ODP_TM_MAX_PRIORITIES-1.
- */
-#define ODP_TM_MAX_PRIORITIES 16
-
-/** The ODP_TM MAX_LEVELS constant specifies the largest range of
- * tm_node levels that any TM system can support. Hence all tm_node level
- * values MUST be in the range 0..ODP_TM_MAX_LEVELS-1. Smaller tm_node
- * levels are associated with tm_nodes closer to the TM system egress.
- */
-#define ODP_TM_MAX_LEVELS 8
-
-/**
- * The smallest SCHED weight is 1 (i.e. 0 is not a legal WFQ/WRR value).
- */
-#define ODP_TM_MIN_SCHED_WEIGHT 1
-
-/** The ODP_TM_MAX_SCHED_WEIGHT constant is the largest weight any TM system
- * can support (at least from a configuration standpoint). A given TM system
- * could have a smaller value.
- */
-#define ODP_TM_MAX_SCHED_WEIGHT 255
-
-/** The ODP_TM_MAX_TM_QUEUES constant is the largest number of tm_queues
- * that can handled by any one TM system.
- */
-#define ODP_TM_MAX_TM_QUEUES (16 * 1024 * 1024)
-
-/** The ODP_TM_MAX_NUM_OUTPUTS constant is the largest number of outputs that
- * can be configured for any one TM system.
- */
-#define ODP_TM_MAX_NUM_OUTPUTS 256
-
-/** The ODP_TM_MAX_NUM_TM_NODES constant is the largest number of tm_nodes that
- * can be in existence for any one TM system.
- */
-#define ODP_TM_MAX_NUM_TM_NODES (1024 * 1024)
-
-/** The ODP_TM_MAX_TM_NODE_FANIN constant is the largest number of fan-in
- * "inputs" that can be simultaneously connected to a single tm_node.
- * *TBD* Does this need to be as large as ODP_TM_MAX_TM_QUEUES? *TBD*
- */
-#define ODP_TM_MAX_TM_NODE_FANIN (1024 * 1024)
-
-/** The ODP_TM_MIN_SHAPER_BW constant is the smallest amount of bandwidth that
- * can a shaper's peak or commit rate can be set to. It is in units of
- * 1000 bytes/second so that it and the ODP_TM_MAX_SHAPER_BW can both fit in
- * 32 bits.
- */
-#define ODP_TM_MIN_SHAPER_BW 1
-
-/** The ODP_TM_MAX_SHAPER_BW constant is the largest amound of bandwidth that
- * any shaper's peak or commit rate can be set to. It is in units of
- * 1000 bytes/second so that it and the ODP_TM_MIN_SHAPER_BW can both fit in
- * 32 bits.
- */
-#define ODP_TM_MAX_SHAPER_BW 12500000
-
-/** The ODP_NUM_SHAPER_COLORS constant just counts the number of enumeration
- * values defined in the odp_tm_shaper_color_t type.
- */
-#define ODP_NUM_SHAPER_COLORS 3
-
-/** The INVALID_PRIORITY constant is used when one needs to indicate an
- * invalid priority value.
- */
-#define ODP_TM_INVALID_PRIORITY 255
-
-/** The odp_tm_percent_t type is used when specifying fields that are
- * percentages. It is a fixed point integer whose units are 1/100 of a
- * percent. Hence 100% is represented as the integer value 10000. Note
- * that because it is often used as a ratio of the current queue value and
- * maximum queue threshold, it can be > 100%, but in any event will never
- * be larger than 500% (i.e. it MUST be capped at 50000).
- */
-typedef uint16_t odp_tm_percent_t;
-
-/** The odp_tm_handle_t type is a generic type that can stand for any of the
- * other ODP_TM handle types.
- */
-typedef uint64_t odp_tm_handle_t;
-
-/** Each odp_tm_t value represents a specific TM system. Almost all
- * functions in this API require a odp_tm_t value - either directly
- * as a function parameter or indirectly by having another ODP TM handle value
- * as a function parameter.
- */
-typedef odp_tm_handle_t odp_tm_t;
-
-/** Each odp_tm_queue_t value is an opaque ODP handle representing a specific
- * tm_queue within a specific TM system.
- */
-typedef odp_tm_handle_t odp_tm_queue_t;
-
-/** Each odp_tm_node_t value is an opaque ODP handle representing a specific
- * tm_node within a specific TM system.
- */
-typedef odp_tm_handle_t odp_tm_node_t;
-
-/** Each odp_tm_shaper_t value is an opaque ODP handle representing a specific
- * shaper profile usable across all TM systems described by this API. A given
- * shaper profile can then be attached to any tm_queue or tm_node.
- */
-typedef odp_tm_handle_t odp_tm_shaper_t;
-
-/** Each odp_tm_sched_t value is an opaque ODP handle representing a specific
- * tm_node scheduler profile usable across all TM systems described by this
- * API. A given tm_node scheduler profile can then be attached to any tm_node.
- */
-typedef odp_tm_handle_t odp_tm_sched_t;
-
-/** Each odp_tm_threshold_t value is an opaque ODP handle representing a
- * specific queue threshold profile usable across all TM systems described by
- * this API. A given queue threshold profile can then be attached to any
- * tm_queue or tm_node.
- */
-typedef odp_tm_handle_t odp_tm_threshold_t;
-
-/** Each odp_tm_wred_t value is an opaque ODP handle representing a specific
- * WRED profile usable across all TM systems described by this API. A given
- * WRED profile can then be attached to any tm_queue or tm_node.
- */
-typedef odp_tm_handle_t odp_tm_wred_t;
-
-/** The ODP_TM_INVALID constant can be used with any ODP TM handle type and
- * indicates that this value does NOT represent a valid TM object.
- */
-#define ODP_TM_INVALID 0
-
-/**
- * @def ODP_TM_ROOT
- * Constant that is used to refer to the egress/root node of the TM subsystem's
- * tree/hierarchy of nodes.
- */
-#define ODP_TM_ROOT ((odp_tm_handle_t)-1)
-
-/** Get printable format of odp_queue_t */
-static inline uint64_t odp_tm_handle_to_u64(odp_tm_handle_t hdl)
-{
- return hdl;
-}
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/plat/version_types.h b/platform/linux-generic/include/odp/api/plat/version_types.h
deleted file mode 100644
index e3327eb33..000000000
--- a/platform/linux-generic/include/odp/api/plat/version_types.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_VERSION_TYPESH_
-#define ODP_VERSION_TYPESH_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @internal Version string expand */
-#define ODP_VERSION_STR_EXPAND(x) #x
-
-/** @internal Version to string */
-#define ODP_VERSION_TO_STR(x) ODP_VERSION_STR_EXPAND(x)
-
-/** @internal API version string */
-#define ODP_VERSION_API_STR \
-ODP_VERSION_TO_STR(ODP_VERSION_API_GENERATION) "." \
-ODP_VERSION_TO_STR(ODP_VERSION_API_MAJOR) "." \
-ODP_VERSION_TO_STR(ODP_VERSION_API_MINOR)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/pool.h b/platform/linux-generic/include/odp/api/pool.h
deleted file mode 100644
index d712b6501..000000000
--- a/platform/linux-generic/include/odp/api/pool.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP pool
- */
-
-#ifndef ODP_PLAT_POOL_H_
-#define ODP_PLAT_POOL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_buffer
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/plat/pool_types.h>
-#include <odp/api/plat/shared_memory_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/spec/pool.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/queue.h b/platform/linux-generic/include/odp/api/queue.h
deleted file mode 100644
index adceafbd1..000000000
--- a/platform/linux-generic/include/odp/api/queue.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP queue
- */
-
-#ifndef ODP_PLAT_QUEUE_H_
-#define ODP_PLAT_QUEUE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/plat/queue_types.h>
-#include <odp/api/plat/buffer_types.h>
-#include <odp/api/plat/pool_types.h>
-
-/** @ingroup odp_queue
- * @{
- */
-
-/* REMOVE FROM API SPEC. Typedef needed only for suppressing Doxygen
- * warning. */
-typedef void odp_queue_group_t;
-
-/**
- * @}
- */
-
-#include <odp/api/spec/queue.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/random.h b/platform/linux-generic/include/odp/api/random.h
deleted file mode 100644
index c8529b3f9..000000000
--- a/platform/linux-generic/include/odp/api/random.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP random number API
- */
-
-#ifndef ODP_PLAT_RANDOM_H_
-#define ODP_PLAT_RANDOM_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_random ODP RANDOM
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/random.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/rwlock.h b/platform/linux-generic/include/odp/api/rwlock.h
deleted file mode 100644
index 4a86173f5..000000000
--- a/platform/linux-generic/include/odp/api/rwlock.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP RW Locks
- */
-
-#ifndef ODP_PLAT_RWLOCK_H_
-#define ODP_PLAT_RWLOCK_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/rwlock_types.h>
-
-#include <odp/api/spec/rwlock.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* ODP_RWLOCK_H_ */
diff --git a/platform/linux-generic/include/odp/api/rwlock_recursive.h b/platform/linux-generic/include/odp/api/rwlock_recursive.h
deleted file mode 100644
index 4a081532d..000000000
--- a/platform/linux-generic/include/odp/api/rwlock_recursive.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP resursive read/write lock
- */
-
-#ifndef ODP_PLAT_RWLOCK_RECURSIVE_H_
-#define ODP_PLAT_RWLOCK_RECURSIVE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/rwlock_recursive_types.h>
-
-#include <odp/api/spec/rwlock_recursive.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/schedule.h b/platform/linux-generic/include/odp/api/schedule.h
deleted file mode 100644
index 002648517..000000000
--- a/platform/linux-generic/include/odp/api/schedule.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP schedule
- */
-
-#ifndef ODP_PLAT_SCHEDULE_H_
-#define ODP_PLAT_SCHEDULE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/schedule_types.h>
-
-/** @ingroup odp_scheduler
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/schedule.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/schedule_types.h b/platform/linux-generic/include/odp/api/schedule_types.h
deleted file mode 100644
index 536007d66..000000000
--- a/platform/linux-generic/include/odp/api/schedule_types.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP schedule
- */
-
-#ifndef ODP_PLAT_SCHEDULE_TYPES_H_
-#define ODP_PLAT_SCHEDULE_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/schedule_types.h>
-
-#include <odp/api/spec/schedule_types.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/shared_memory.h b/platform/linux-generic/include/odp/api/shared_memory.h
deleted file mode 100644
index affc290e7..000000000
--- a/platform/linux-generic/include/odp/api/shared_memory.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2013-2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP shared memory
- */
-
-#ifndef ODP_PLAT_SHARED_MEMORY_H_
-#define ODP_PLAT_SHARED_MEMORY_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/shared_memory_types.h>
-
-/** @ingroup odp_shared_memory
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/shared_memory.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/spinlock.h b/platform/linux-generic/include/odp/api/spinlock.h
deleted file mode 100644
index 830f4edd2..000000000
--- a/platform/linux-generic/include/odp/api/spinlock.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP spinlock
- */
-
-#ifndef ODP_PLAT_SPINLOCK_H_
-#define ODP_PLAT_SPINLOCK_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/spinlock_types.h>
-
-#include <odp/api/spec/spinlock.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/spinlock_recursive.h b/platform/linux-generic/include/odp/api/spinlock_recursive.h
deleted file mode 100644
index d97b0173a..000000000
--- a/platform/linux-generic/include/odp/api/spinlock_recursive.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP resursive spinlock
- */
-
-#ifndef ODP_PLAT_SPINLOCK_RECURSIVE_H_
-#define ODP_PLAT_SPINLOCK_RECURSIVE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/spinlock_recursive_types.h>
-
-#include <odp/api/spec/spinlock_recursive.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/std_clib.h b/platform/linux-generic/include/odp/api/std_clib.h
deleted file mode 100644
index fea472543..000000000
--- a/platform/linux-generic/include/odp/api/std_clib.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PLAT_STD_CLIB_H_
-#define ODP_PLAT_STD_CLIB_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/spec/std_types.h>
-#include <string.h>
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/std_clib_inlines.h>
-#endif
-
-#include <odp/api/spec/std_clib.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/std_types.h b/platform/linux-generic/include/odp/api/std_types.h
deleted file mode 100644
index b61f33f4c..000000000
--- a/platform/linux-generic/include/odp/api/std_types.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Standard C language types and definitions for ODP.
- */
-
-#ifndef ODP_PLAT_STD_TYPES_H_
-#define ODP_PLAT_STD_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* uint64_t, uint32_t, etc */
-#include <stdint.h>
-
-/* true and false for odp_bool_t */
-#include <stdbool.h>
-
-/** @addtogroup odp_system ODP SYSTEM
- * @{
- */
-
-typedef int odp_bool_t;
-
-/**
- * @}
- */
-
-#include <odp/api/spec/std_types.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/sync.h b/platform/linux-generic/include/odp/api/sync.h
deleted file mode 100644
index e1afcc722..000000000
--- a/platform/linux-generic/include/odp/api/sync.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP synchronisation
- */
-
-#ifndef ODP_PLAT_SYNC_H_
-#define ODP_PLAT_SYNC_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_barrier
- * @{
- */
-
-#include <odp/api/plat/static_inline.h>
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/sync_inlines.h>
-#endif
-
-/**
- * @}
- */
-
-#include <odp/api/spec/sync.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/system_info.h b/platform/linux-generic/include/odp/api/system_info.h
deleted file mode 100644
index 36ddc814b..000000000
--- a/platform/linux-generic/include/odp/api/system_info.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP system information
- */
-
-#ifndef ODP_PLAT_SYSTEM_INFO_H_
-#define ODP_PLAT_SYSTEM_INFO_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-
-
-#include <odp/api/spec/system_info.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/thread.h b/platform/linux-generic/include/odp/api/thread.h
deleted file mode 100644
index c54abc890..000000000
--- a/platform/linux-generic/include/odp/api/thread.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP thread API
- */
-
-#ifndef ODP_PLAT_THREAD_H_
-#define ODP_PLAT_THREAD_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/thread_types.h>
-
-#include <odp/api/spec/thread.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/thrmask.h b/platform/linux-generic/include/odp/api/thrmask.h
deleted file mode 100644
index b1c207775..000000000
--- a/platform/linux-generic/include/odp/api/thrmask.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP thread masks
- */
-
-#ifndef ODP_PLAT_THRMASK_H_
-#define ODP_PLAT_THRMASK_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/thrmask_types.h>
-
-/** @ingroup odp_thread
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/thrmask.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/ticketlock.h b/platform/linux-generic/include/odp/api/ticketlock.h
deleted file mode 100644
index e0f5d81fd..000000000
--- a/platform/linux-generic/include/odp/api/ticketlock.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP ticketlock
- */
-
-#ifndef ODP_PLAT_TICKETLOCK_H_
-#define ODP_PLAT_TICKETLOCK_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/ticketlock_types.h>
-
-#if ODP_ABI_COMPAT == 0
-#include <odp/api/plat/ticketlock_inlines.h>
-#endif
-
-#include <odp/api/spec/ticketlock.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/time.h b/platform/linux-generic/include/odp/api/time.h
deleted file mode 100644
index 8d1c33e68..000000000
--- a/platform/linux-generic/include/odp/api/time.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP time
- */
-
-#ifndef ODP_PLAT_TIME_H_
-#define ODP_PLAT_TIME_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-
-
-
-#include <odp/api/plat/time_types.h>
-#include <odp/api/spec/time.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/timer.h b/platform/linux-generic/include/odp/api/timer.h
deleted file mode 100644
index 1450727f8..000000000
--- a/platform/linux-generic/include/odp/api/timer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP timer service
- */
-
-#ifndef ODP_PLAT_TIMER_H_
-#define ODP_PLAT_TIMER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/plat/event_types.h>
-#include <odp/api/plat/pool_types.h>
-#include <odp/api/plat/queue_types.h>
-#include <odp/api/plat/timer_types.h>
-
-/** @ingroup odp_timer
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/spec/timer.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/traffic_mngr.h b/platform/linux-generic/include/odp/api/traffic_mngr.h
deleted file mode 100644
index 3e6f5fbbe..000000000
--- a/platform/linux-generic/include/odp/api/traffic_mngr.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP Traffic manager
- */
-
-#ifndef ODP_PLAT_TRAFFIC_MNGR_H_
-#define ODP_PLAT_TRAFFIC_MNGR_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @ingroup odp_traffic_mngr
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/api/plat/traffic_mngr_types.h>
-#include <odp/api/spec/traffic_mngr.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/api/version.h b/platform/linux-generic/include/odp/api/version.h
deleted file mode 100644
index fc4ea5865..000000000
--- a/platform/linux-generic/include/odp/api/version.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP version
- */
-
-#ifndef ODP_PLAT_VERSION_H_
-#define ODP_PLAT_VERSION_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/plat/version_types.h>
-#include <odp/api/spec/version.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/drv/README b/platform/linux-generic/include/odp/drv/README
deleted file mode 100644
index fd38e8e57..000000000
--- a/platform/linux-generic/include/odp/drv/README
+++ /dev/null
@@ -1,2 +0,0 @@
-This directory contains the files defining the ODP driver interface,
-for linux-generic.
diff --git a/platform/linux-generic/include/odp/drv/compiler.h b/platform/linux-generic/include/odp/drv/compiler.h
deleted file mode 100644
index 24e84c554..000000000
--- a/platform/linux-generic/include/odp/drv/compiler.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Compiler related
- */
-
-#ifndef ODPDRV_PLAT_COMPILER_H_
-#define ODPDRV_PLAT_COMPILER_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/** @addtogroup odpdrv_compiler_optim ODPDRV COMPILER / OPTIMIZATION
- * @{
- */
-
-/**
- * @}
- */
-
-#include <odp/drv/spec/compiler.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/drv/std_types.h b/platform/linux-generic/include/odp/drv/std_types.h
deleted file mode 100644
index 4fe4affda..000000000
--- a/platform/linux-generic/include/odp/drv/std_types.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Standard C language types and definitions for ODP driver interface.
- */
-
-#ifndef ODPDRV_PLAT_STD_TYPES_H_
-#define ODPDRV_PLAT_STD_TYPES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <inttypes.h>
-#include <limits.h>
-
-/** @addtogroup odpdrv_system ODPDRV SYSTEM
- * @{
- */
-
-typedef int odpdrv_bool_t;
-
-/**
- * @}
- */
-
-#include <odp/drv/spec/std_types.h>
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp/visibility_begin.h b/platform/linux-generic/include/odp/visibility_begin.h
deleted file mode 100644
index 1bbb43def..000000000
--- a/platform/linux-generic/include/odp/visibility_begin.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*
- * @file
- *
- * Linker visibility directives
- */
-
-#if __GNUC__ >= 4
-#pragma GCC visibility push(default)
-#endif
diff --git a/platform/linux-generic/include/odp/visibility_end.h b/platform/linux-generic/include/odp/visibility_end.h
deleted file mode 100644
index 748af5103..000000000
--- a/platform/linux-generic/include/odp/visibility_end.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*
- * @file
- *
- * Linker visibility directives
- */
-
-#if __GNUC__ >= 4
-#pragma GCC visibility pop
-#endif
diff --git a/platform/linux-generic/include/odp_align_internal.h b/platform/linux-generic/include/odp_align_internal.h
deleted file mode 100644
index 61ff200c5..000000000
--- a/platform/linux-generic/include/odp_align_internal.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP internal alignments
- */
-
-#ifndef ODP_ALIGN_INTERNAL_H_
-#define ODP_ALIGN_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/align.h>
-#include <stdint.h>
-
-/* Macros to calculate ODP_ROUNDUP_POWER2_U32() in five rounds of shift
- * and OR operations. */
-#define _RSHIFT_U32(x, y) (((uint32_t)(x)) >> (y))
-#define _POW2_U32_R1(x) (((uint32_t)(x)) | _RSHIFT_U32(x, 1))
-#define _POW2_U32_R2(x) (_POW2_U32_R1(x) | _RSHIFT_U32(_POW2_U32_R1(x), 2))
-#define _POW2_U32_R3(x) (_POW2_U32_R2(x) | _RSHIFT_U32(_POW2_U32_R2(x), 4))
-#define _POW2_U32_R4(x) (_POW2_U32_R3(x) | _RSHIFT_U32(_POW2_U32_R3(x), 8))
-#define _POW2_U32_R5(x) (_POW2_U32_R4(x) | _RSHIFT_U32(_POW2_U32_R4(x), 16))
-
-/* Round up a uint32_t value 'x' to the next power of two.
- *
- * The value is not round up, if it's already a power of two (including 1).
- * The value must be larger than 0 and not exceed 0x80000000.
- */
-#define ROUNDUP_POWER2_U32(x) \
- ((((uint32_t)(x)) > 0x80000000) ? 0 : (_POW2_U32_R5(x - 1) + 1))
-
-/*
- * Round up 'x' to alignment 'align'
- */
-#define ROUNDUP_ALIGN(x, align)\
- ((align) * (((x) + (align) - 1) / (align)))
-
-/*
- * Round up 'x' to cache line size alignment
- */
-#define ROUNDUP_CACHE_LINE(x)\
- ROUNDUP_ALIGN(x, ODP_CACHE_LINE_SIZE)
-
-/*
- * Round down 'x' to 'align' alignment, which is a power of two
- */
-#define ROUNDDOWN_POWER2(x, align)\
- ((x) & (~((align) - 1)))
-
-/*
- * Check if value is a power of two
- */
-#define CHECK_IS_POWER2(x) ((((x) - 1) & (x)) == 0)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_atomic_internal.h b/platform/linux-generic/include/odp_atomic_internal.h
index dca2175ce..6de8cd485 100644
--- a/platform/linux-generic/include/odp_atomic_internal.h
+++ b/platform/linux-generic/include/odp_atomic_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -20,6 +20,7 @@
#include <odp/api/align.h>
#include <odp/api/hints.h>
#include <odp/api/atomic.h>
+#include <odp_types_internal.h>
#include <stdbool.h>
#ifdef __cplusplus
@@ -27,14 +28,6 @@ extern "C" {
#endif
/**
- * Pointer atomic type
- */
-typedef struct {
- void *v; /**< Actual storage for the atomic variable */
-} _odp_atomic_ptr_t
-ODP_ALIGNED(sizeof(void *)); /* Enforce alignement! */
-
-/**
* Atomic flag (boolean) type
* @Note this is not the same as a plain boolean type.
* _odp_atomic_flag_t is guaranteed to be able to operate on atomically.
@@ -59,469 +52,9 @@ typedef enum {
_ODP_MEMMODEL_RLS = __ATOMIC_RELEASE,
/** Acquire&release memory ordering, synchronize with acquire loads and release
* stores in another (one other) thread */
- _ODP_MEMMODEL_ACQ_RLS = __ATOMIC_ACQ_REL,
-/** Sequential consistent memory ordering, synchronize with acquire loads and
- * release stores in all threads */
- _ODP_MEMMODEL_SC = __ATOMIC_SEQ_CST
-} _odp_memmodel_t;
-
-/*****************************************************************************
- * Operations on 32-bit atomics
- * _odp_atomic_u32_load_mm - return current value
- * _odp_atomic_u32_store_mm - no return value
- * _odp_atomic_u32_xchg_mm - return old value
- * _odp_atomic_u32_cmp_xchg_strong_mm - return bool
- * _odp_atomic_u32_fetch_add_mm - return old value
- * _odp_atomic_u32_add_mm - no return value
- * _odp_atomic_u32_fetch_sub_mm - return old value
- * _odp_atomic_u32_sub_mm - no return value
- *****************************************************************************/
-
-/**
- * Atomic load of 32-bit atomic variable
- *
- * @param atom Pointer to a 32-bit atomic variable
- * @param mmodel Memory ordering associated with the load operation
- *
- * @return Value of the variable
- */
-static inline uint32_t _odp_atomic_u32_load_mm(const odp_atomic_u32_t *atom,
- _odp_memmodel_t mmodel)
-{
- return __atomic_load_n(&atom->v, mmodel);
-}
-
-/**
- * Atomic store to 32-bit atomic variable
- *
- * @param[out] atom Pointer to a 32-bit atomic variable
- * @param val Value to store in the atomic variable
- * @param mmodel Memory order associated with the store operation
- */
-static inline void _odp_atomic_u32_store_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
-{
- __atomic_store_n(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic exchange (swap) of 32-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param val New value to store in the atomic variable
- * @param mmodel Memory order associated with the exchange operation
- *
- * @return Old value of the variable
- */
-static inline uint32_t _odp_atomic_u32_xchg_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
-
-{
- return __atomic_exchange_n(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic compare and exchange (swap) of 32-bit atomic variable
- * "Strong" semantics, will not fail spuriously.
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param[in,out] exp Pointer to expected value (updated on failure)
- * @param val New value to write
- * @param success Memory order associated with a successful compare-and-swap
- * operation
- * @param failure Memory order associated with a failed compare-and-swap
- * operation
- *
- * @retval 1 exchange successul
- * @retval 0 exchange failed and '*exp' updated with current value
- */
-static inline int _odp_atomic_u32_cmp_xchg_strong_mm(
- odp_atomic_u32_t *atom,
- uint32_t *exp,
- uint32_t val,
- _odp_memmodel_t success,
- _odp_memmodel_t failure)
-{
- return __atomic_compare_exchange_n(&atom->v, exp, val,
- false/*strong*/, success, failure);
-}
-
-/**
- * Atomic fetch and add of 32-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param val Value to add to the atomic variable
- * @param mmodel Memory order associated with the add operation
- *
- * @return Value of the atomic variable before the addition
- */
-static inline uint32_t _odp_atomic_u32_fetch_add_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
-{
- return __atomic_fetch_add(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic add of 32-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param val Value to add to the atomic variable
- * @param mmodel Memory order associated with the add operation
- */
-static inline void _odp_atomic_u32_add_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
+ _ODP_MEMMODEL_ACQ_RLS = __ATOMIC_ACQ_REL
-{
- (void)__atomic_fetch_add(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic fetch and subtract of 32-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param val Value to subtract from the atomic variable
- * @param mmodel Memory order associated with the subtract operation
- *
- * @return Value of the atomic variable before the subtraction
- */
-static inline uint32_t _odp_atomic_u32_fetch_sub_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
-{
- return __atomic_fetch_sub(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic subtract of 32-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 32-bit atomic variable
- * @param val Value to subtract from the atomic variable
- * @param mmodel Memory order associated with the subtract operation
- */
-static inline void _odp_atomic_u32_sub_mm(odp_atomic_u32_t *atom,
- uint32_t val,
- _odp_memmodel_t mmodel)
-
-{
- (void)__atomic_fetch_sub(&atom->v, val, mmodel);
-}
-
-/*****************************************************************************
- * Operations on 64-bit atomics
- * _odp_atomic_u64_load_mm - return current value
- * _odp_atomic_u64_store_mm - no return value
- * _odp_atomic_u64_xchg_mm - return old value
- * _odp_atomic_u64_cmp_xchg_strong_mm - return bool
- * _odp_atomic_u64_fetch_add_mm - return old value
- * _odp_atomic_u64_add_mm - no return value
- * _odp_atomic_u64_fetch_sub_mm - return old value
- * _odp_atomic_u64_sub_mm - no return value
- *****************************************************************************/
-
-/* Check if the compiler support lock-less atomic operations on 64-bit types */
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
-/**
- * @internal
- * Helper macro for lock-based atomic operations on 64-bit integers
- * @param[in,out] atom Pointer to the 64-bit atomic variable
- * @param expr Expression used update the variable.
- * @param mm Memory order to use.
- * @return The old value of the variable.
- */
-#define ATOMIC_OP_MM(atom, expr, mm) \
-({ \
- uint64_t old_val; \
- /* Loop while lock is already taken, stop when lock becomes clear */ \
- while (__atomic_test_and_set(&(atom)->lock, \
- (mm) == _ODP_MEMMODEL_SC ? \
- __ATOMIC_SEQ_CST : __ATOMIC_ACQUIRE)) \
- (void)0; \
- old_val = (atom)->v; \
- (expr); /* Perform whatever update is desired */ \
- __atomic_clear(&(atom)->lock, \
- (mm) == _ODP_MEMMODEL_SC ? \
- __ATOMIC_SEQ_CST : __ATOMIC_RELEASE); \
- old_val; /* Return old value */ \
-})
-#endif
-
-/**
- * Atomic load of 64-bit atomic variable
- *
- * @param atom Pointer to a 64-bit atomic variable
- * @param mmodel Memory order associated with the load operation
- *
- * @return Value of the variable
- */
-static inline uint64_t _odp_atomic_u64_load_mm(odp_atomic_u64_t *atom,
- _odp_memmodel_t mmodel)
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP_MM(atom, (void)0, mmodel);
-#else
- return __atomic_load_n(&atom->v, mmodel);
-#endif
-}
-
-/**
- * Atomic store to 64-bit atomic variable
- *
- * @param[out] atom Pointer to a 64-bit atomic variable
- * @param val Value to write to the atomic variable
- * @param mmodel Memory order associated with the store operation
- */
-static inline void _odp_atomic_u64_store_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP_MM(atom, atom->v = val, mmodel);
-#else
- __atomic_store_n(&atom->v, val, mmodel);
-#endif
-}
-
-/**
- * Atomic exchange (swap) of 64-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param val New value to write to the atomic variable
- * @param mmodel Memory order associated with the exchange operation
- *
- * @return Old value of variable
- */
-static inline uint64_t _odp_atomic_u64_xchg_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP_MM(atom, atom->v = val, mmodel);
-#else
- return __atomic_exchange_n(&atom->v, val, mmodel);
-#endif
-}
-
-/**
- * Atomic compare and exchange (swap) of 64-bit atomic variable
- * "Strong" semantics, will not fail spuriously.
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param[in,out] exp Pointer to expected value (updated on failure)
- * @param val New value to write
- * @param success Memory order associated with a successful compare-and-swap
- * operation
- * @param failure Memory order associated with a failed compare-and-swap
- * operation
- *
- * @retval 1 exchange successful
- * @retval 0 exchange failed and '*exp' updated with current value
- */
-static inline int _odp_atomic_u64_cmp_xchg_strong_mm(odp_atomic_u64_t *atom,
- uint64_t *exp,
- uint64_t val,
- _odp_memmodel_t success,
- _odp_memmodel_t failure)
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- /* Possibly we are a bit pessimistic with the memory models */
- odp_bool_t ret_succ;
- /* Loop while lock is already taken, stop when lock becomes clear */
- while (__atomic_test_and_set(&(atom)->lock,
- (success) == _ODP_MEMMODEL_SC ?
- __ATOMIC_SEQ_CST : __ATOMIC_ACQUIRE))
- (void)0;
- if (atom->v == *exp) {
- atom->v = val;
- ret_succ = 1;
- } else {
- *exp = atom->v;
- ret_succ = 0;
- }
- __atomic_clear(&(atom)->lock,
- (ret_succ ? success : failure) == _ODP_MEMMODEL_SC ?
- __ATOMIC_SEQ_CST : __ATOMIC_RELEASE);
- return ret_succ;
-#else
- return __atomic_compare_exchange_n(&atom->v, exp, val,
- false/*strong*/, success, failure);
-#endif
-}
-
-/**
- * Atomic fetch and add of 64-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param val Value to add to the atomic variable
- * @param mmodel Memory order associated with the add operation
- *
- * @return Value of the atomic variable before the addition
- */
-static inline uint64_t _odp_atomic_u64_fetch_add_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP_MM(atom, atom->v += val, mmodel);
-#else
- return __atomic_fetch_add(&atom->v, val, mmodel);
-#endif
-}
-
-/**
- * Atomic add of 64-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param val Value to add to the atomic variable
- * @param mmodel Memory order associated with the add operation.
- */
-static inline void _odp_atomic_u64_add_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP_MM(atom, atom->v += val, mmodel);
-#else
- (void)__atomic_fetch_add(&atom->v, val, mmodel);
-#endif
-}
-
-/**
- * Atomic fetch and subtract of 64-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param val Value to subtract from the atomic variable
- * @param mmodel Memory order associated with the subtract operation
- *
- * @return Value of the atomic variable before the subtraction
- */
-static inline uint64_t _odp_atomic_u64_fetch_sub_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP_MM(atom, atom->v -= val, mmodel);
-#else
- return __atomic_fetch_sub(&atom->v, val, mmodel);
-#endif
-}
-
-/**
- * Atomic subtract of 64-bit atomic variable
- *
- * @param[in,out] atom Pointer to a 64-bit atomic variable
- * @param val Value to subtract from the atomic variable
- * @param mmodel Memory order associated with the subtract operation
- */
-static inline void _odp_atomic_u64_sub_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
-
-{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP_MM(atom, atom->v -= val, mmodel);
-#else
- (void)__atomic_fetch_sub(&atom->v, val, mmodel);
-#endif
-}
-
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
-#undef ATOMIC_OP_MM
-#endif
-
-/*****************************************************************************
- * Operations on pointer atomics
- * _odp_atomic_ptr_init - no return value
- * _odp_atomic_ptr_load - return current value
- * _odp_atomic_ptr_store - no return value
- * _odp_atomic_ptr_xchg - return old value
- *****************************************************************************/
-
-/**
- * Initialization of pointer atomic variable
- *
- * @param[out] atom Pointer to a pointer atomic variable
- * @param val Value to initialize the variable with
- */
-static inline void _odp_atomic_ptr_init(_odp_atomic_ptr_t *atom, void *val)
-{
- __atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
-}
-
-/**
- * Atomic load of pointer atomic variable
- *
- * @param atom Pointer to a pointer atomic variable
- * @param mmodel Memory order associated with the load operation
- *
- * @return Value of the variable
- */
-static inline void *_odp_atomic_ptr_load(const _odp_atomic_ptr_t *atom,
- _odp_memmodel_t mmodel)
-{
- return __atomic_load_n(&atom->v, mmodel);
-}
-
-/**
- * Atomic store to pointer atomic variable
- *
- * @param[out] atom Pointer to a pointer atomic variable
- * @param val Value to write to the atomic variable
- * @param mmodel Memory order associated with the store operation
- */
-static inline void _odp_atomic_ptr_store(_odp_atomic_ptr_t *atom,
- void *val,
- _odp_memmodel_t mmodel)
-{
- __atomic_store_n(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic exchange (swap) of pointer atomic variable
- *
- * @param[in,out] atom Pointer to a pointer atomic variable
- * @param val New value to write
- * @param mmodel Memory order associated with the exchange operation
- *
- * @return Old value of variable
- */
-static inline void *_odp_atomic_ptr_xchg(_odp_atomic_ptr_t *atom,
- void *val,
- _odp_memmodel_t mmodel)
-{
- return __atomic_exchange_n(&atom->v, val, mmodel);
-}
-
-/**
- * Atomic compare and exchange (swap) of pointer atomic variable
- * "Strong" semantics, will not fail spuriously.
- *
- * @param[in,out] atom Pointer to a pointer atomic variable
- * @param[in,out] exp Pointer to expected value (updated on failure)
- * @param val New value to write
- * @param success Memory order associated with a successful compare-and-swap
- * operation
- * @param failure Memory order associated with a failed compare-and-swap
- * operation
- *
- * @retval 1 exchange successful
- * @retval 0 exchange failed and '*exp' updated with current value
- */
-static inline int _odp_atomic_ptr_cmp_xchg_strong(
- _odp_atomic_ptr_t *atom,
- void **exp,
- void *val,
- _odp_memmodel_t success,
- _odp_memmodel_t failure)
-{
- return __atomic_compare_exchange_n(&atom->v, exp, val,
- false/*strong*/, success, failure);
-}
+} _odp_memmodel_t;
/*****************************************************************************
* Operations on flag atomics
@@ -541,7 +74,7 @@ static inline int _odp_atomic_ptr_cmp_xchg_strong(
* @param val The initial value of the variable
*/
static inline void _odp_atomic_flag_init(_odp_atomic_flag_t *flag,
- odp_bool_t val)
+ odp_bool_t val)
{
__atomic_clear(flag, __ATOMIC_RELAXED);
if (val)
@@ -605,13 +138,11 @@ static inline void _odp_atomic_flag_clear(_odp_atomic_flag_t *flag)
#endif
#ifdef ODP_ATOMIC_U128
-/** An unsigned 128-bit (16-byte) scalar type */
-typedef __int128 _uint128_t;
/** Atomic 128-bit type */
-typedef struct {
- _uint128_t v; /**< Actual storage for the atomic variable */
-} _odp_atomic_u128_t ODP_ALIGNED(16);
+typedef struct ODP_ALIGNED(16) {
+ _odp_u128_t v; /**< Actual storage for the atomic variable */
+} _odp_atomic_u128_t;
/**
* 16-byte atomic exchange operation
@@ -622,37 +153,12 @@ typedef struct {
* @param mmodel Memory model associated with the exchange operation
*/
static inline void _odp_atomic_u128_xchg_mm(_odp_atomic_u128_t *ptr,
- _uint128_t *val,
- _uint128_t *old,
+ _odp_u128_t *val,
+ _odp_u128_t *old,
_odp_memmodel_t mm)
{
__atomic_exchange(&ptr->v, val, old, mm);
}
-
-/**
- * Atomic compare and exchange (swap) of 16-byte atomic variable
- * "Strong" semantics, will not fail spuriously.
- *
- * @param ptr Pointer to a 16-byte atomic variable
- * @param exp Pointer to expected value (updated on failure)
- * @param val Pointer to new value to write
- * @param succ Memory model associated with a successful compare-and-swap
- * operation
- * @param fail Memory model associated with a failed compare-and-swap
- * operation
- *
- * @retval 1 exchange successul
- * @retval 0 exchange failed and '*exp' updated with current value
- */
-static inline int _odp_atomic_u128_cmp_xchg_mm(_odp_atomic_u128_t *ptr,
- _uint128_t *exp,
- _uint128_t *val,
- _odp_memmodel_t succ,
- _odp_memmodel_t fail)
-{
- return __atomic_compare_exchange(&ptr->v, exp, val,
- false/*strong*/, succ, fail);
-}
#endif
/**
diff --git a/platform/linux-generic/include/odp_bitmap_internal.h b/platform/linux-generic/include/odp_bitmap_internal.h
deleted file mode 100644
index 1be4d0287..000000000
--- a/platform/linux-generic/include/odp_bitmap_internal.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP generic bitmap types and operations.
- */
-
-#ifndef ODP_BITMAP_INTERNAL_H_
-#define ODP_BITMAP_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-#include <stdbool.h>
-#include <string.h>
-#include <odp/api/hints.h>
-
-/* Generate unique identifier for instantiated class */
-#define TOKENIZE(template, line) \
- template ## _ ## line ## _ ## __COUNTER__
-
-/* Array size in general */
-#define ARRAY_SIZE(array) (sizeof(array) / sizeof(array[0]))
-
-#define BITS_PER_BYTE (8)
-#define BITS_PER_LONG __WORDSIZE
-#define BYTES_PER_LONG (BITS_PER_LONG / BITS_PER_BYTE)
-
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr) BIT_WORD(nr + BITS_PER_LONG - 1)
-
-#define BITMAP_FIRST_WORD_MASK(start) \
- (~0UL << ((start) & (BITS_PER_LONG - 1)))
-#define BITMAP_LAST_WORD_MASK(nbits) \
- (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
-
-/* WAPL bitmap base class */
-typedef struct {
- unsigned int nwords;
- unsigned int *pl;
- unsigned long *ul;
-} wapl_bitmap_t;
-
-/*
- * Word-Aligned Position List (WAPL) bitmap, which actually
- * is not a compression, but with an extra list of non-empty
- * word positions.
- *
- * WAPL accelerates bitwise operations and iterations by
- * applying only to non-empty positions instead of walking
- * through the whole bitmap.
- *
- * WAPL uses [1 ~ N] instead of [0 ~ N - 1] as position
- * values and an extra 0 as end indicator for position list.
- * This is the reason to allocate one extra room below.
- */
-#define instantiate_wapl_bitmap(line, nbits) \
- struct TOKENIZE(wapl_bitmap, line) { \
- unsigned int pl[BITS_TO_LONGS(nbits) + 1]; \
- unsigned long ul[BITS_TO_LONGS(nbits) + 1]; \
- }
-
-#define WAPL_BITMAP(nbits) instantiate_wapl_bitmap(__LINE__, nbits)
-
-/*
- * Upcast any derived WAPL bitmap class to its base class
- */
-#define __wapl_upcast(base, derived) \
- do { \
- __typeof__(derived) p = derived; \
- base.pl = p->pl; \
- base.ul = p->ul; \
- base.nwords = ARRAY_SIZE(p->ul) - 1; \
- } while (0)
-
-/*
- * WAPL base class bitmap operations
- */
-void __wapl_bitmap_and(wapl_bitmap_t *dst,
- wapl_bitmap_t *src, wapl_bitmap_t *and);
-
-void __wapl_bitmap_or(wapl_bitmap_t *dst, wapl_bitmap_t *or);
-
-void __wapl_bitmap_set(wapl_bitmap_t *map, unsigned int bit);
-
-void __wapl_bitmap_clear(wapl_bitmap_t *map, unsigned int bit);
-
-/*
- * Generic WAPL bitmap operations
- */
-#define wapl_bitmap_zero(map) \
- ({ \
- __typeof__(map) p = map; \
- memset((void *)p, 0, sizeof(__typeof__(*p))); \
- })
-
-#define wapl_bitmap_copy(dst, src) \
- ({ \
- __typeof__(dst) d = dst; \
- __typeof__(src) s = src; \
- if (d != s) \
- memcpy((void *)d, (void *)s, \
- sizeof(__typeof__(*d))); \
- })
-
-#define wapl_bitmap_and(dst, src, and) \
- ({ \
- wapl_bitmap_t d, s, a; \
- __wapl_upcast(d, dst); \
- __wapl_upcast(s, src); \
- __wapl_upcast(a, and); \
- __wapl_bitmap_and(&d, &s, &a); \
- })
-
-#define wapl_bitmap_or(dst, src, or) \
- ({ \
- wapl_bitmap_t d, o; \
- wapl_bitmap_copy(dst, src); \
- __wapl_upcast(d, dst); \
- __wapl_upcast(o, or); \
- __wapl_bitmap_or(&d, &o); \
- })
-
-#define wapl_bitmap_set(map, bit) \
- ({ \
- wapl_bitmap_t b; \
- __wapl_upcast(b, map); \
- __wapl_bitmap_set(&b, bit); \
- })
-
-#define wapl_bitmap_clear(map, bit) \
- ({ \
- wapl_bitmap_t b; \
- __wapl_upcast(b, map); \
- __wapl_bitmap_clear(&b, bit); \
- })
-
-/*
- * Round robin iterator runs upon a WAPL bitmap:
- *
- * wapl_bitmap_iterator(iterator, WAPL bitmap);
- * for (iterator->start(); iterator->has_next(); ) {
- * unsigned int bit_index = iterator->next();
- * ...operations on this bit index...
- * }
- */
-typedef struct wapl_bitmap_iterator {
- int _start, _next, _nbits;
- wapl_bitmap_t _base;
-
- void (*start)(struct wapl_bitmap_iterator *this);
- bool (*has_next)(struct wapl_bitmap_iterator *this);
- unsigned int (*next)(struct wapl_bitmap_iterator *this);
-} wapl_bitmap_iterator_t;
-
-/*
- * WAPL bitmap iterator constructor
- */
-void __wapl_bitmap_iterator(wapl_bitmap_iterator_t *this);
-
-/*
- * Generic constructor accepts any derived WAPL bitmap class
- */
-#define wapl_bitmap_iterator(iterator, map) \
- ({ \
- __typeof__(iterator) __it = iterator; \
- __wapl_upcast(__it->_base, map); \
- __wapl_bitmap_iterator(__it); \
- })
-
-/* Sparse bitmap base class */
-typedef struct {
- unsigned int nbits;
- unsigned int *last, *pl, *il;
-} sparse_bitmap_t;
-
-/*
- * Sparse bitmap, lists all bit indexes directly as an array.
- * Expected to be significantly straightforward iteration.
- */
-#define instantiate_sparse_bitmap(line, nbits) \
- struct TOKENIZE(sparse_bitmap, line) { \
- unsigned int last; \
- unsigned int pl[nbits]; \
- unsigned int il[nbits]; \
- }
-
-#define SPARSE_BITMAP(nbits) instantiate_sparse_bitmap(__LINE__, nbits)
-
-/*
- * Upcast any derived sparse bitmap class to its base class
- */
-#define __sparse_upcast(base, derived) \
- do { \
- __typeof__(derived) p = derived; \
- base.pl = p->pl; \
- base.il = p->il; \
- base.last = &p->last; \
- base.nbits = ARRAY_SIZE(p->il); \
- } while (0)
-
-/*
- * Sparse base class bitmap operations
- */
-void __sparse_bitmap_set(sparse_bitmap_t *map, unsigned int bit);
-
-void __sparse_bitmap_clear(sparse_bitmap_t *map, unsigned int bit);
-
-/*
- * Generic sparse bitmap operations
- */
-#define sparse_bitmap_zero(map) \
- ({ \
- __typeof__(map) p = map; \
- memset((void *)p, 0, sizeof(__typeof__(*p))); \
- })
-
-#define sparse_bitmap_set(map, bit) \
- ({ \
- sparse_bitmap_t b; \
- __sparse_upcast(b, map); \
- __sparse_bitmap_set(&b, bit); \
- })
-
-#define sparse_bitmap_clear(map, bit) \
- ({ \
- sparse_bitmap_t b; \
- __sparse_upcast(b, map); \
- __sparse_bitmap_clear(&b, bit); \
- })
-
-/*
- * Round robin iterator runs upon a sparse bitmap:
- *
- * sparse_bitmap_iterator(iterator, SPARSE bitmap);
- * for (iterator->start(); iterator->has_next(); ) {
- * unsigned int bit_index = iterator->next();
- * ...operations on this bit index...
- * }
- */
-typedef struct sparse_bitmap_iterator {
- int _start, _next, _nbits;
- sparse_bitmap_t _base;
-
- void (*start)(struct sparse_bitmap_iterator *this);
- bool (*has_next)(struct sparse_bitmap_iterator *this);
- unsigned int (*next)(struct sparse_bitmap_iterator *this);
-} sparse_bitmap_iterator_t;
-
-/*
- * Sparse bitmap iterator constructor
- */
-void __sparse_bitmap_iterator(sparse_bitmap_iterator_t *this);
-
-/*
- * Generic constructor accepts any derived sparse bitmap class.
- */
-#define sparse_bitmap_iterator(iterator, map) \
- ({ \
- __typeof__(iterator) __it = iterator; \
- __sparse_upcast(__it->_base, map); \
- __sparse_bitmap_iterator(__it); \
- })
-
-/*
- * Raw bitmap atomic set and clear.
- */
-void raw_bitmap_set(unsigned long *map, unsigned int bit);
-
-void raw_bitmap_clear(unsigned long *map, unsigned int bit);
-
-/*
- * It will enter infinite loop incase that all bits are zero,
- * so please make sure the bitmap at least has one set.
- */
-static inline int __bitmap_wraparound_next(
- unsigned long *addr, unsigned int nbits, int start)
-{
- unsigned long tmp;
-
- if (start >= (int)nbits)
- start = 0;
-
- tmp = addr[BIT_WORD(start)];
-
- /* Handle 1st word. */
- tmp &= BITMAP_FIRST_WORD_MASK(start);
- start = start & ~(BITS_PER_LONG - 1);
-
- while (!tmp) {
- start += BITS_PER_LONG;
- if (start >= (int)nbits)
- start = 0;
-
- tmp = addr[BIT_WORD(start)];
- }
-
- start += __builtin_ffsl(tmp) - 1;
- return start;
-}
-
-/**
- * @}
- */
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_bitset.h b/platform/linux-generic/include/odp_bitset.h
new file mode 100644
index 000000000..e55b9ef1a
--- /dev/null
+++ b/platform/linux-generic/include/odp_bitset.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_BITSET_H_
+#define _ODP_BITSET_H_
+
+#include <odp_cpu.h>
+
+#include <limits.h>
+
+/******************************************************************************
+ * bitset abstract data type
+ *****************************************************************************/
+/* This could be a struct of scalars to support larger bit sets */
+
+/*
+ * Size of atomic bit set. This limits the max number of threads,
+ * scheduler groups and reorder windows. On ARMv8/64-bit and x86-64, the
+ * (lock-free) max is 128
+ */
+
+#if ATOM_BITSET_SIZE <= 32
+
+/* Return first-bit-set with StdC ffs() semantics */
+static inline uint32_t bitset_ffs(bitset_t b)
+{
+ return __builtin_ffsl(b);
+}
+
+#elif ATOM_BITSET_SIZE <= 64
+
+/* Return first-bit-set with StdC ffs() semantics */
+static inline uint32_t bitset_ffs(bitset_t b)
+{
+ return __builtin_ffsll(b);
+}
+
+#elif ATOM_BITSET_SIZE <= 128
+
+/* Return first-bit-set with StdC ffs() semantics */
+static inline uint32_t bitset_ffs(bitset_t b)
+{
+ if ((uint64_t)b != 0)
+ return __builtin_ffsll((uint64_t)b);
+ else if ((b >> 64) != 0)
+ return __builtin_ffsll((uint64_t)(b >> 64)) + 64;
+ else
+ return 0;
+}
+
+#else
+#error Unsupported size of bit sets (ATOM_BITSET_SIZE)
+#endif
+
+/* Return a & ~b */
+static inline bitset_t bitset_andn(bitset_t a, bitset_t b)
+{
+ return a & ~b;
+}
+
+static inline bool bitset_is_eql(bitset_t a, bitset_t b)
+{
+ return a == b;
+}
+
+static inline bitset_t bitset_clr(bitset_t bs, uint32_t bit)
+{
+ return bs & ~bitset_mask(bit);
+}
+
+static inline bitset_t bitset_set(bitset_t bs, uint32_t bit)
+{
+ return bs | bitset_mask(bit);
+}
+
+static inline bitset_t bitset_null(void)
+{
+ return 0U;
+}
+
+static inline bool bitset_is_null(bitset_t a)
+{
+ return a == 0U;
+}
+
+static inline bool bitset_is_set(bitset_t a, uint32_t bit)
+{
+ return (a & bitset_mask(bit)) != 0;
+}
+
+#endif
diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h
deleted file mode 100644
index cf817d907..000000000
--- a/platform/linux-generic/include/odp_buffer_inlines.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * Inline functions for ODP buffer mgmt routines - implementation internal
- */
-
-#ifndef ODP_BUFFER_INLINES_H_
-#define ODP_BUFFER_INLINES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp_buffer_internal.h>
-
-odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf);
-void _odp_buffer_event_type_set(odp_buffer_t buf, int ev);
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf);
-
-static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr)
-{
- return hdr->handle.handle;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 076abe96e..676b9f116 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -24,101 +24,41 @@ extern "C" {
#include <odp/api/buffer.h>
#include <odp/api/debug.h>
#include <odp/api/align.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp/api/byteorder.h>
#include <odp/api/thread.h>
#include <odp/api/event.h>
-#include <odp_forward_typedefs_internal.h>
-#include <odp_schedule_if.h>
+#include <odp_event_internal.h>
#include <stddef.h>
-typedef union odp_buffer_bits_t {
- odp_buffer_t handle;
-
- union {
- uint32_t u32;
-
- struct {
- uint32_t pool_id: 8;
- uint32_t index: 24;
- };
- };
-} odp_buffer_bits_t;
-
-#define BUFFER_BURST_SIZE CONFIG_BURST_SIZE
-
-/* Common buffer header */
-struct odp_buffer_hdr_t {
- /* Handle union */
- odp_buffer_bits_t handle;
-
- /* Initial buffer data pointer and length */
- uint8_t *base_data;
- uint8_t *buf_end;
-
- /* Max data size */
- uint32_t size;
-
- /* Pool type */
- int8_t type;
-
- /* Burst counts */
- uint8_t burst_num;
- uint8_t burst_first;
-
- /* Segment count */
- uint8_t segcount;
-
- /* Segments */
- struct {
- void *hdr;
- uint8_t *data;
- uint32_t len;
- } seg[CONFIG_PACKET_MAX_SEGS];
-
- /* Next buf in a list */
- struct odp_buffer_hdr_t *next;
-
- /* User context pointer or u64 */
- union {
- uint64_t buf_u64;
- void *buf_ctx;
- const void *buf_cctx; /* const alias for ctx */
- };
+/* Internal buffer header */
+typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
/* User area pointer */
- void *uarea_addr;
-
- /* User area size */
- uint32_t uarea_size;
-
- /* Event type. Maybe different than pool type (crypto compl event) */
- int8_t event_type;
+ void *uarea_addr;
- /* Burst table */
- struct odp_buffer_hdr_t *burst[BUFFER_BURST_SIZE];
+ /* Data */
+ uint8_t data[];
+} odp_buffer_hdr_t;
- /* Used only if _ODP_PKTIO_IPC is set.
- * ipc mapped process can not walk over pointers,
- * offset has to be used */
- uint64_t ipc_data_offset;
+/* Buffer header size is critical for performance. Ensure that it does not accidentally
+ * grow over cache line size. Note that ODP_ALIGNED_CACHE rounds up struct size to a multiple of
+ * ODP_CACHE_LINE_SIZE. */
+ODP_STATIC_ASSERT(sizeof(odp_buffer_hdr_t) <= ODP_CACHE_LINE_SIZE, "BUFFER_HDR_SIZE_ERROR");
- /* Pool handle */
- odp_pool_t pool_hdl;
-
- /* Data or next header */
- uint8_t data[0];
-};
-
-ODP_STATIC_ASSERT(CONFIG_PACKET_MAX_SEGS < 256,
- "CONFIG_PACKET_MAX_SEGS_TOO_LARGE");
+static inline odp_buffer_hdr_t *_odp_buf_hdr(odp_buffer_t buf)
+{
+ return (odp_buffer_hdr_t *)(uintptr_t)buf;
+}
-ODP_STATIC_ASSERT(BUFFER_BURST_SIZE < 256, "BUFFER_BURST_SIZE_TOO_LARGE");
+static inline void _odp_buffer_subtype_set(odp_buffer_t buffer, int subtype)
+{
+ odp_buffer_hdr_t *buf_hdr = _odp_buf_hdr(buffer);
-/* Forward declarations */
-int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount);
-void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount);
+ buf_hdr->event_hdr.subtype = subtype;
+}
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_chksum_internal.h b/platform/linux-generic/include/odp_chksum_internal.h
new file mode 100644
index 000000000..e589ecb94
--- /dev/null
+++ b/platform/linux-generic/include/odp_chksum_internal.h
@@ -0,0 +1,204 @@
+/* Copyright (c) 2020, 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_CHKSUM_INTERNAL_H_
+#define ODP_CHKSUM_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/hints.h>
+#include <odp/api/byteorder.h>
+#include <odp_cpu.h>
+#include <stdint.h>
+
+/*
+ * Compute the final Internet checksum (RFC 1071) based on a partial
+ * sum. A partial sum can be obtained e.g. by calling
+ * chksum_partial().
+ */
+static inline uint16_t chksum_finalize(uint64_t sum)
+{
+ sum = (sum >> 32) + (sum & 0xffffffff);
+ sum = (sum >> 16) + (sum & 0xffff);
+ /*
+ * The final & 0xffff is intentionally omitted, the extra bits
+ * are discarded by the implicit cast to the return type.
+ */
+ return (sum >> 16) + sum;
+}
+
+/*
+ * Compute a partial checksum. Several partial checksums may be summed
+ * together. The final checksum may be obtained by calling
+ * chksum_finalize(). Parameter offset is the offset of this segment
+ * of data from the start of IP header.
+ *
+ * This implementation
+ *
+ * - Accepts unaligned data.
+ *
+ * - Accepts data at any byte offset from the start of IP header,
+ * including odd offsets.
+ *
+ * - Uses unaligned memory access only if available.
+ *
+ * - Is optimized (for skylake, cn96, a53) by trial and error.
+ *
+ * The following did not improve performance (in synthetic tests):
+ *
+ * - 2 or 4 sub-sums in the main loop (to break dependency chains).
+ *
+ * - Aligning to 8 bytes instead of 4 (for ldp instruction). This
+ * makes the main loop faster on a53 (only), but the extra
+ * conditional branch has its cost.
+ *
+ * - __builtin_assume_aligned().
+ */
+static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
+{
+ const uint8_t *b;
+#if _ODP_UNALIGNED
+ /*
+ * _ODP_UNALIGNED does not guarantee that all possible ways of
+ * accessing memory can be unaligned. Make the compiler aware
+ * of the possible unalignment so that it does not generate
+ * instructions (such as LDM of AArch32) that require higher
+ * alignment than one byte.
+ */
+ typedef uint32_t x_uint32_t ODP_ALIGNED(1);
+ typedef uint16_t x_uint16_t ODP_ALIGNED(1);
+#else
+ /* In this case we can use normal types as we align manually. */
+ typedef uint32_t x_uint32_t;
+ typedef uint16_t x_uint16_t;
+#endif
+ const x_uint16_t *w;
+ const x_uint32_t *d;
+ uint64_t sum = 0;
+
+ /*
+ * Offset is either even or odd, the rest of it doesn't
+ * matter.
+ */
+ offset &= 1;
+
+ if (_ODP_UNALIGNED) {
+ /*
+ * We have efficient unaligned access. Just read
+ * dwords starting at the given address.
+ */
+ d = (const x_uint32_t *)addr;
+ } else {
+ /*
+ * We must avoid unaligned access, so align to 4 bytes
+ * by summing up the first up to 3 bytes.
+ */
+ b = (const uint8_t *)addr;
+
+ if (odp_unlikely((uintptr_t)b & 1) && len >= 1) {
+ /*
+ * Align to 2 bytes by handling an odd
+ * byte. Since addr is unaligned, the first
+ * byte goes into the second byte of the sum.
+ */
+ sum += odp_cpu_to_be_16(*b++);
+ len -= 1;
+
+ /* An odd byte negates the effect of offset. */
+ offset ^= 1;
+ }
+
+ /*
+ * This cast increases alignment, but it's OK, since
+ * we've made sure that the pointer value is aligned.
+ */
+ w = (const x_uint16_t *)(uintptr_t)b;
+
+ if ((uintptr_t)w & 2 && len >= 2) {
+ /* Align bytes by handling an odd word. */
+ sum += *w++;
+ len -= 2;
+ }
+
+ /* Increases alignment. */
+ d = (const x_uint32_t *)(uintptr_t)w;
+ }
+
+ while (len >= 32) {
+ /* 8 dwords or 32 bytes per round. */
+
+ sum += *d++;
+ sum += *d++;
+ sum += *d++;
+ sum += *d++;
+
+ sum += *d++;
+ sum += *d++;
+ sum += *d++;
+ sum += *d++;
+
+ len -= 32;
+ }
+
+ /* Last up to 7 dwords. */
+ switch (len >> 2) {
+ case 7:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 6:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 5:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 4:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 3:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 2:
+ sum += *d++;
+ /* FALLTHROUGH */
+ case 1:
+ sum += *d++;
+ /* FALLTHROUGH */
+ default:
+ break;
+ }
+
+ len &= 3;
+
+ w = (const x_uint16_t *)d;
+ if (len > 1) {
+ /* Last word. */
+ sum += *w++;
+ len -= 2;
+ }
+
+ if (len) {
+ /* Last byte. */
+ b = (const uint8_t *)w;
+ sum += odp_cpu_to_be_16((uint16_t)*b << 8);
+ }
+
+ /*
+ * If offset is odd, our sum is byte-flipped and we need to
+ * flip odd and even bytes.
+ */
+ if (odp_unlikely(offset))
+ sum = ((sum & 0xff00ff00ff00ff) << 8) | ((sum & 0xff00ff00ff00ff00) >> 8);
+
+ return sum;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index 9df756bf9..c042a5308 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -1,10 +1,9 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -21,128 +20,147 @@ extern "C" {
#include <odp/api/spinlock.h>
#include <odp/api/classification.h>
+#include <odp/api/debug.h>
+
+#include <odp_macros_internal.h>
#include <odp_pool_internal.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
-#include <odp_queue_internal.h>
+#include <odp_queue_if.h>
+
#include <protocols/ip.h>
/* Maximum Class Of Service Entry */
-#define ODP_COS_MAX_ENTRY 64
+#define CLS_COS_MAX_ENTRY 64
+/* Invalid CoS index */
+#define CLS_COS_IDX_NONE CLS_COS_MAX_ENTRY
/* Maximum PMR Entry */
-#define ODP_PMR_MAX_ENTRY 256
+#define CLS_PMR_MAX_ENTRY 256
/* Maximum PMR Terms in a PMR Set */
-#define ODP_PMRTERM_MAX 8
+#define CLS_PMRTERM_MAX 8
/* Maximum PMRs attached in PKTIO Level */
-#define ODP_PMR_PER_COS_MAX 8
-/* L2 Priority Bits */
-#define ODP_COS_L2_QOS_BITS 3
-/* Max L2 QoS value */
-#define ODP_COS_MAX_L2_QOS (1 << ODP_COS_L2_QOS_BITS)
-/* L2 DSCP Bits */
-#define ODP_COS_L3_QOS_BITS 6
-/* Max L3 QoS Value */
-#define ODP_COS_MAX_L3_QOS (1 << ODP_COS_L3_QOS_BITS)
-/* Max PMR Term bits */
-#define ODP_PMR_TERM_BYTES_MAX 16
-
-/**
-Packet Matching Rule Term Value
+#define CLS_PMR_PER_COS_MAX 8
+/* Max PMR Term size */
+#define MAX_PMR_TERM_SIZE 16
+/* Max queue per Class of service */
+#define CLS_COS_QUEUE_MAX 32
+/* Max number of implementation created queues */
+#define CLS_QUEUE_GROUP_MAX (CLS_COS_MAX_ENTRY * CLS_COS_QUEUE_MAX)
+
+/* CoS index is stored in odp_packet_hdr_t */
+ODP_STATIC_ASSERT(CLS_COS_MAX_ENTRY <= UINT16_MAX, "CoS_does_not_fit_16_bits");
+
+typedef union {
+ /* All proto fields */
+ uint32_t all;
+
+ struct {
+ uint32_t ipv4:1;
+ uint32_t ipv6:1;
+ uint32_t udp:1;
+ uint32_t tcp:1;
+ };
+} odp_cls_hash_proto_t;
-Stores the Term and Value mapping for a PMR.
-The maximum size of value currently supported in 64 bits
-**/
+/*
+ * Term and value mapping for a PMR
+ */
typedef struct pmr_term_value {
- odp_cls_pmr_term_t term; /* PMR Term */
- odp_bool_t range_term; /* True if range, false if match */
+ /* PMR Term */
+ odp_cls_pmr_term_t term;
+
+ /* True if range, false if match */
+ odp_bool_t range_term;
+
union {
+ /* Match value and mask */
struct {
- /** Value to be matched */
- uint64_t value;
- /** Masked set of bits to be matched */
- uint64_t mask;
+ /* Value to be matched. Arrays are used with custom and
+ * IPv6 address terms. */
+ union {
+ uint64_t value;
+ uint8_t value_u8[MAX_PMR_TERM_SIZE];
+ uint64_t value_u64[2];
+ };
+
+ /* Mask for the data to be matched */
+ union {
+ uint64_t mask;
+ uint8_t mask_u8[MAX_PMR_TERM_SIZE];
+ uint64_t mask_u64[2];
+ };
+
} match;
+
+ /* Range values */
struct {
- /** Start value of the range */
- uint64_t val_start;
- /** End value of the range */
- uint64_t val_end;
+ /* Start value of the range */
+ union {
+ uint64_t start;
+ uint8_t start_u8[MAX_PMR_TERM_SIZE];
+ uint64_t start_u64[2];
+ };
+
+ /* End value of the range */
+ union {
+ uint64_t end;
+ uint8_t end_u8[MAX_PMR_TERM_SIZE];
+ uint64_t end_u64[2];
+ };
+
} range;
- struct {
- _odp_ipv6_addr_t addr;
- _odp_ipv6_addr_t mask;
- } match_ipv6;
- struct {
- _odp_ipv6_addr_t addr_start;
- _odp_ipv6_addr_t addr_end;
- } range_ipv6;
+
};
- uint32_t offset; /**< Offset if term == ODP_PMR_CUSTOM_FRAME */
- uint32_t val_sz; /**< Size of the value to be matched */
+
+ /* Offset used with custom PMR */
+ uint32_t offset;
+
+ /* Size of the value to be matched */
+ uint32_t val_sz;
+
} pmr_term_value_t;
/*
Class Of Service
*/
-struct cos_s {
- queue_entry_t *queue; /* Associated Queue */
- odp_pool_t pool; /* Associated Buffer pool */
- union pmr_u *pmr[ODP_PMR_PER_COS_MAX]; /* Chained PMR */
- union cos_u *linked_cos[ODP_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/
+typedef struct ODP_ALIGNED_CACHE cos_s {
uint32_t valid; /* validity Flag */
- odp_cls_drop_t drop_policy; /* Associated Drop Policy */
+ odp_atomic_u32_t num_rule; /* num of PMRs attached with this CoS */
+ struct pmr_s *pmr[CLS_PMR_PER_COS_MAX]; /* Chained PMR */
+ struct cos_s *linked_cos[CLS_PMR_PER_COS_MAX]; /* Chained CoS with PMR*/
+ odp_bool_t stats_enable;
+ odp_cos_action_t action; /* Action */
+ odp_queue_t queue; /* Associated Queue */
+ uint32_t num_queue;
+ odp_pool_t pool; /* Associated Buffer pool */
+ uint8_t index;
+ bool queue_group;
+ odp_cls_hash_proto_t hash_proto;
+ odp_pktin_vector_config_t vector; /* Packet vector config */
size_t headroom; /* Headroom for this CoS */
odp_spinlock_t lock; /* cos lock */
- odp_atomic_u32_t num_rule; /* num of PMRs attached with this CoS */
+ odp_queue_param_t queue_param;
char name[ODP_COS_NAME_LEN]; /* name */
-};
-
-typedef union cos_u {
- struct cos_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct cos_s))];
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t packets;
+ } stats, queue_stats[CLS_COS_QUEUE_MAX];
} cos_t;
-
-/**
-Packet Matching Rule
-
-**/
-struct pmr_s {
+/* Pattern Matching Rule */
+typedef struct ODP_ALIGNED_CACHE pmr_s {
uint32_t valid; /* Validity Flag */
- odp_atomic_u32_t count; /* num of packets matching this rule */
uint32_t num_pmr; /* num of PMR Term Values*/
+ uint16_t mark;
+ pmr_term_value_t pmr_term_value[CLS_PMRTERM_MAX];
+ /* List of associated PMR Terms */
odp_spinlock_t lock; /* pmr lock*/
cos_t *src_cos; /* source CoS where PMR is attached */
- pmr_term_value_t pmr_term_value[ODP_PMRTERM_MAX];
- /* List of associated PMR Terms */
-};
-
-typedef union pmr_u {
- struct pmr_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pmr_s))];
} pmr_t;
-/**
-L2 QoS and CoS Map
-
-This structure holds the mapping between L2 QoS value and
-corresponding cos_t object
-**/
-typedef struct pmr_l2_cos {
- odp_spinlock_t lock; /* pmr_l2_cos lock */
- cos_t *cos[ODP_COS_MAX_L2_QOS]; /* Array of CoS objects */
-} pmr_l2_cos_t;
-
-/**
-L3 QoS and CoS Map
-
-This structure holds the mapping between L3 QoS value and
-corresponding cos_t object
-**/
-typedef struct pmr_l3_cos {
- odp_spinlock_t lock; /* pmr_l3_cos lock */
- cos_t *cos[ODP_COS_MAX_L3_QOS]; /* Array of CoS objects */
-} pmr_l3_cos_t;
+typedef struct ODP_ALIGNED_CACHE {
+ odp_queue_t queue[CLS_QUEUE_GROUP_MAX];
+} _cls_queue_grp_tbl_t;
/**
Linux Generic Classifier
@@ -153,9 +171,6 @@ the classifier configuration value.
typedef struct classifier {
cos_t *error_cos; /* Associated Error CoS */
cos_t *default_cos; /* Associated Default CoS */
- uint32_t l3_precedence; /* L3 QoS precedence */
- pmr_l2_cos_t l2_cos_table; /* L2 QoS-CoS table map */
- pmr_l3_cos_t l3_cos_table; /* L3 Qos-CoS table map */
size_t headroom; /* Pktio Headroom */
size_t skip; /* Pktio Skip Offset */
} classifier_t;
@@ -164,16 +179,27 @@ typedef struct classifier {
Class of Service Table
**/
typedef struct odp_cos_table {
- cos_t cos_entry[ODP_COS_MAX_ENTRY];
+ cos_t cos_entry[CLS_COS_MAX_ENTRY];
} cos_tbl_t;
/**
PMR table
**/
typedef struct pmr_tbl {
- pmr_t pmr[ODP_PMR_MAX_ENTRY];
+ pmr_t pmr[CLS_PMR_MAX_ENTRY];
} pmr_tbl_t;
+/**
+Classifier global data
+**/
+typedef struct cls_global_t {
+ cos_tbl_t cos_tbl;
+ pmr_tbl_t pmr_tbl;
+ _cls_queue_grp_tbl_t queue_grp_tbl;
+ odp_shm_t shm;
+
+} cls_global_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_classification_inlines.h b/platform/linux-generic/include/odp_classification_inlines.h
deleted file mode 100644
index 2747db8cc..000000000
--- a/platform/linux-generic/include/odp_classification_inlines.h
+++ /dev/null
@@ -1,377 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP Classification Inlines
- * Classification Inlines Functions
- */
-#ifndef __ODP_CLASSIFICATION_INLINES_H_
-#define __ODP_CLASSIFICATION_INLINES_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/debug.h>
-#include <protocols/eth.h>
-#include <protocols/ip.h>
-#include <protocols/ipsec.h>
-#include <protocols/udp.h>
-#include <protocols/tcp.h>
-#include <odp_packet_internal.h>
-#include <stdio.h>
-#include <inttypes.h>
-
-/* PMR term value verification function
-These functions verify the given PMR term value with the value in the packet
-These following functions return 1 on success and 0 on failure
-*/
-
-static inline int verify_pmr_packet_len(odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- if (term_value->match.value == (packet_len(pkt_hdr) &
- term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ip_proto(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ipv4hdr_t *ip;
- uint8_t proto;
- if (!pkt_hdr->p.input_flags.ipv4)
- return 0;
- ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
- proto = ip->proto;
- if (term_value->match.value == (proto & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ipv4_saddr(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ipv4hdr_t *ip;
- uint32_t ipaddr;
- if (!pkt_hdr->p.input_flags.ipv4)
- return 0;
- ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
- ipaddr = odp_be_to_cpu_32(ip->src_addr);
- if (term_value->match.value == (ipaddr & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ipv4_daddr(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ipv4hdr_t *ip;
- uint32_t ipaddr;
- if (!pkt_hdr->p.input_flags.ipv4)
- return 0;
- ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
- ipaddr = odp_be_to_cpu_32(ip->dst_addr);
- if (term_value->match.value == (ipaddr & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_tcp_sport(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint16_t sport;
- const _odp_tcphdr_t *tcp;
- if (!pkt_hdr->p.input_flags.tcp)
- return 0;
- tcp = (const _odp_tcphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
- sport = odp_be_to_cpu_16(tcp->src_port);
- if (term_value->match.value == (sport & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_tcp_dport(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint16_t dport;
- const _odp_tcphdr_t *tcp;
- if (!pkt_hdr->p.input_flags.tcp)
- return 0;
- tcp = (const _odp_tcphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
- dport = odp_be_to_cpu_16(tcp->dst_port);
- if (term_value->match.value == (dport & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_udp_dport(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint16_t dport;
- const _odp_udphdr_t *udp;
- if (!pkt_hdr->p.input_flags.udp)
- return 0;
- udp = (const _odp_udphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
- dport = odp_be_to_cpu_16(udp->dst_port);
- if (term_value->match.value == (dport & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_udp_sport(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint16_t sport;
- const _odp_udphdr_t *udp;
-
- if (!pkt_hdr->p.input_flags.udp)
- return 0;
- udp = (const _odp_udphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
- sport = odp_be_to_cpu_16(udp->src_port);
- if (term_value->match.value == (sport & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_dmac(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint64_t dmac = 0;
- uint64_t dmac_be = 0;
- const _odp_ethhdr_t *eth;
-
- if (!packet_hdr_has_eth(pkt_hdr))
- return 0;
-
- eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
- memcpy(&dmac_be, eth->dst.addr, _ODP_ETHADDR_LEN);
- dmac = odp_be_to_cpu_64(dmac_be);
- /* since we are converting a 48 bit ethernet address from BE to cpu
- format using odp_be_to_cpu_64() the last 16 bits needs to be right
- shifted */
- if (dmac_be != dmac)
- dmac = dmac >> (64 - (_ODP_ETHADDR_LEN * 8));
-
- if (term_value->match.value == (dmac & term_value->match.mask))
- return 1;
- return 0;
-}
-
-static inline int verify_pmr_ipv6_saddr(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ipv6hdr_t *ipv6;
- uint64_t addr[2];
-
- if (!packet_hdr_has_ipv6(pkt_hdr))
- return 0;
-
- ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
-
- addr[0] = ipv6->src_addr.u64[0];
- addr[1] = ipv6->src_addr.u64[1];
-
- /* 128 bit address is processed as two 64 bit value
- * for bitwise AND operation */
- addr[0] = addr[0] & term_value->match_ipv6.mask.u64[0];
- addr[1] = addr[1] & term_value->match_ipv6.mask.u64[1];
-
- if (!memcmp(addr, term_value->match_ipv6.addr.u8, _ODP_IPV6ADDR_LEN))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ipv6_daddr(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ipv6hdr_t *ipv6;
- uint64_t addr[2];
-
- if (!packet_hdr_has_ipv6(pkt_hdr))
- return 0;
- ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
- addr[0] = ipv6->dst_addr.u64[0];
- addr[1] = ipv6->dst_addr.u64[1];
-
- /* 128 bit address is processed as two 64 bit value
- * for bitwise AND operation */
- addr[0] = addr[0] & term_value->match_ipv6.mask.u64[0];
- addr[1] = addr[1] & term_value->match_ipv6.mask.u64[1];
-
- if (!memcmp(addr, term_value->match_ipv6.addr.u8, _ODP_IPV6ADDR_LEN))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_vlan_id_0(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ethhdr_t *eth;
- const _odp_vlanhdr_t *vlan;
- uint16_t tci;
- uint16_t vlan_id;
-
- if (!pkt_hdr->p.input_flags.vlan_qinq)
- return 0;
-
- eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
- vlan = (const _odp_vlanhdr_t *)(eth + 1);
- tci = odp_be_to_cpu_16(vlan->tci);
- vlan_id = tci & 0x0fff;
-
- if (term_value->match.value == (vlan_id & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_vlan_id_x(const uint8_t *pkt_addr ODP_UNUSED,
- odp_packet_hdr_t *pkt_hdr ODP_UNUSED,
- pmr_term_value_t *term_value ODP_UNUSED)
-{
- const _odp_ethhdr_t *eth;
- const _odp_vlanhdr_t *vlan;
- uint16_t tci;
- uint16_t vlan_id;
-
- if (!pkt_hdr->p.input_flags.vlan_qinq)
- return 0;
-
- eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
- vlan = (const _odp_vlanhdr_t *)(eth + 1);
- vlan++;
- tci = odp_be_to_cpu_16(vlan->tci);
- vlan_id = tci & 0x0fff;
-
- if (term_value->match.value == (vlan_id & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ipsec_spi(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint32_t spi;
-
- pkt_addr += pkt_hdr->p.l4_offset;
-
- if (pkt_hdr->p.input_flags.ipsec_ah) {
- const _odp_ahhdr_t *ahhdr = (const _odp_ahhdr_t *)pkt_addr;
-
- spi = odp_be_to_cpu_32(ahhdr->spi);
- } else if (pkt_hdr->p.input_flags.ipsec_esp) {
- const _odp_esphdr_t *esphdr = (const _odp_esphdr_t *)pkt_addr;
-
- spi = odp_be_to_cpu_32(esphdr->spi);
- } else {
- return 0;
- }
-
- if (term_value->match.value == (spi & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_ld_vni(const uint8_t *pkt_addr ODP_UNUSED,
- odp_packet_hdr_t *pkt_hdr ODP_UNUSED,
- pmr_term_value_t *term_value ODP_UNUSED)
-{
- ODP_UNIMPLEMENTED();
- return 0;
-}
-
-static inline int verify_pmr_custom_frame(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- uint64_t val = 0;
- uint32_t offset = term_value->offset;
- uint32_t val_sz = term_value->val_sz;
-
- ODP_ASSERT(val_sz <= ODP_PMR_TERM_BYTES_MAX);
-
- if (packet_len(pkt_hdr) <= offset + val_sz)
- return 0;
-
- memcpy(&val, pkt_addr + offset, val_sz);
- if (term_value->match.value == (val & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_eth_type_0(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ethhdr_t *eth;
- uint16_t ethtype;
-
- if (!pkt_hdr->p.input_flags.vlan_qinq)
- return 0;
-
- eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
- ethtype = odp_be_to_cpu_16(eth->type);
-
- if (term_value->match.value == (ethtype & term_value->match.mask))
- return 1;
-
- return 0;
-}
-
-static inline int verify_pmr_eth_type_x(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
-{
- const _odp_ethhdr_t *eth;
- uint16_t ethtype;
- const _odp_vlanhdr_t *vlan;
-
- if (!pkt_hdr->p.input_flags.vlan_qinq)
- return 0;
-
- eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
- vlan = (const _odp_vlanhdr_t *)(eth + 1);
- ethtype = odp_be_to_cpu_16(vlan->type);
-
- if (term_value->match.value == (ethtype & term_value->match.mask))
- return 1;
-
- return 0;
-}
-#ifdef __cplusplus
-}
-#endif
-#endif
diff --git a/platform/linux-generic/include/odp_classification_internal.h b/platform/linux-generic/include/odp_classification_internal.h
index 78eaac904..7841e64fb 100644
--- a/platform/linux-generic/include/odp_classification_internal.h
+++ b/platform/linux-generic/include/odp_classification_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -19,124 +19,234 @@
extern "C" {
#endif
+#include <odp/api/atomic.h>
#include <odp/api/classification.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/pool.h>
#include <odp/api/queue.h>
+#include <odp/api/std_types.h>
+
+#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
-#include <odp/api/packet_io.h>
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
-/** Classification Internal function **/
+#include <stdint.h>
-/**
-@internal
-match_qos_cos
+extern cls_global_t *_odp_cls_global;
-Select a CoS for the given Packet based on QoS values
-This function returns the COS object matching the L2 and L3 QoS
-based on the l3_preference value of the pktio
-**/
-cos_t *match_qos_cos(pktio_entry_t *entry, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr);
-/**
-@internal
+static inline cos_t *_odp_cos_entry_from_idx(uint32_t ndx)
+{
+ return &_odp_cls_global->cos_tbl.cos_entry[ndx];
+}
-Packet Classifier
+static inline int _odp_cos_queue_idx(const cos_t *cos, odp_queue_t queue)
+{
+ uint32_t i, tbl_idx;
+ int queue_idx = -1;
+
+ if (cos->num_queue == 1) {
+ if (odp_unlikely(cos->queue != queue))
+ return -1;
+ return 0;
+ }
+
+ tbl_idx = cos->index * CLS_COS_QUEUE_MAX;
+ for (i = 0; i < cos->num_queue; i++) {
+ if (_odp_cls_global->queue_grp_tbl.queue[tbl_idx + i] == queue) {
+ queue_idx = i;
+ break;
+ }
+ }
+ return queue_idx;
+}
-Start function for Packet Classifier
-This function calls Classifier module internal functions for a given packet and
-selects destination queue and packet pool based on selected PMR and CoS.
-**/
-int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
- uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
- odp_packet_hdr_t *pkt_hdr);
+static inline void _odp_cos_queue_stats_add(cos_t *cos, odp_queue_t queue,
+ uint64_t packets, uint64_t discards)
+{
+ int queue_idx = _odp_cos_queue_idx(cos, queue);
-/**
-Packet IO classifier init
+ if (odp_unlikely(queue_idx < 0)) {
+ _ODP_ERR("Queue not attached to the CoS\n");
+ return;
+ }
-This function does initialization of classifier object associated with pktio.
-This function should be called during pktio initialization.
-**/
-int pktio_classifier_init(pktio_entry_t *pktio);
+ if (packets)
+ odp_atomic_add_u64(&cos->queue_stats[queue_idx].packets, packets);
+ if (discards)
+ odp_atomic_add_u64(&cos->queue_stats[queue_idx].discards, discards);
+}
+
+static inline void _odp_cos_vector_enq(odp_queue_t queue, odp_event_t events[], uint32_t num,
+ cos_t *cos)
+{
+ odp_packet_vector_t pktv;
+ const odp_pool_t pool = cos->vector.pool;
+ const uint32_t max_size = cos->vector.max_size;
+ uint32_t num_enq;
+ int num_pktv = (num + max_size - 1) / max_size;
+ int ret;
+ int i;
+ odp_packet_vector_t pktv_tbl[num_pktv];
+ odp_event_t event_tbl[num_pktv];
+
+ for (i = 0; i < num_pktv; i++) {
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ break;
+ pktv_tbl[i] = pktv;
+ event_tbl[i] = odp_packet_vector_to_event(pktv);
+ }
+ if (odp_unlikely(i == 0)) {
+ odp_event_free_multi(events, num);
+ _odp_cos_queue_stats_add(cos, queue, 0, num);
+ return;
+ }
+ num_pktv = i;
+ num_enq = 0;
+ for (i = 0; i < num_pktv; i++) {
+ odp_packet_t *pkt_tbl;
+ int pktv_size = max_size;
+
+ pktv = pktv_tbl[i];
+
+ if (num_enq + max_size > num)
+ pktv_size = num - num_enq;
+
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ odp_packet_from_event_multi(pkt_tbl, &events[num_enq], pktv_size);
+ odp_packet_vector_size_set(pktv, pktv_size);
+ num_enq += pktv_size;
+ }
+
+ ret = odp_queue_enq_multi(queue, event_tbl, num_pktv);
+ if (odp_likely(ret == num_pktv)) {
+ _odp_cos_queue_stats_add(cos, queue, num_enq, num - num_enq);
+ } else {
+ uint32_t enqueued;
+
+ if (ret < 0)
+ ret = 0;
+ enqueued = max_size * ret;
+ _odp_cos_queue_stats_add(cos, queue, enqueued, num - enqueued);
+ odp_event_free_multi(&event_tbl[ret], num_pktv - ret);
+ }
+}
/**
-@internal
-match_pmr_cos
+ * Enqueue packets into destination CoS
+ */
+static inline void _odp_cos_enq(uint16_t cos_id, odp_queue_t dst, odp_packet_t packets[], int num)
+{
+ _ODP_ASSERT(cos_id != CLS_COS_IDX_NONE);
+ _ODP_ASSERT(dst != ODP_QUEUE_INVALID);
+
+ cos_t *cos = _odp_cos_entry_from_idx(cos_id);
+
+ if (num < 2 || !cos->vector.enable) {
+ int ret = odp_queue_enq_multi(dst, (odp_event_t *)packets, num);
+
+ if (odp_unlikely(ret != num)) {
+ if (ret < 0)
+ ret = 0;
+
+ odp_packet_free_multi(&packets[ret], num - ret);
+ }
+ _odp_cos_queue_stats_add(cos, dst, ret, num - ret);
+ } else {
+ _odp_cos_vector_enq(dst, (odp_event_t *)packets, num, cos);
+ }
+}
-Match a PMR chain with a Packet and return matching CoS
-This function gets called recursively to check the chained PMR Term value
-with the packet.
+/**
+ * Enqueue all remaining packets in 'packets' array
+ */
+static inline void _odp_cls_enq_all(odp_packet_t packets[], int num)
+{
+ odp_packet_hdr_t *prev_hdr;
+ odp_packet_hdr_t *latest_hdr = packet_hdr(packets[num - 1]);
+
+ if (num < 2) {
+ _odp_cos_enq(latest_hdr->cos, latest_hdr->dst_queue, packets, 1);
+ return;
+ }
+
+ prev_hdr = packet_hdr(packets[num - 2]);
+
+ if (prev_hdr->dst_queue == latest_hdr->dst_queue && prev_hdr->cos == latest_hdr->cos) {
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num);
+ } else {
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num - 1);
+ _odp_cos_enq(latest_hdr->cos, latest_hdr->dst_queue, &packets[num - 1], 1);
+ }
+}
-**/
-cos_t *match_pmr_cos(cos_t *cos, const uint8_t *pkt_addr, pmr_t *pmr,
- odp_packet_hdr_t *hdr);
/**
-@internal
-CoS associated with L3 QoS value
+ * Enqueue packets into classifier destination queue
+ *
+ * Called by pktio devices for each received packet when classifier has been enabled. Postpones the
+ * actual enqueue operation and stores packets in 'packets' array until destination queue or CoS
+ * change, or 'last' flag is set.
+ *
+ * @param[out] packets Packet array to be enqueued
+ * @param num Number of handles in 'packets' array
+ * @param last Enqueue all packets
+ *
+ * @return Number of packets remaining in 'packets' array
+ */
+static inline int _odp_cls_enq(odp_packet_t packets[], int num, odp_bool_t last)
+{
+ odp_packet_hdr_t *prev_hdr, *latest_hdr;
-This function returns the CoS associated with L3 QoS value
-**/
-cos_t *match_qos_l3_cos(pmr_l3_cos_t *l3_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr);
+ _ODP_ASSERT(num > 0);
-/**
-@internal
-CoS associated with L2 QoS value
+ if (last) {
+ _odp_cls_enq_all(packets, num);
+ return 0;
+ }
-This function returns the CoS associated with L2 QoS value
-**/
-cos_t *match_qos_l2_cos(pmr_l2_cos_t *l2_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr);
-/**
-@internal
-Flow Signature Calculation
+ /* Only one packet, so postpone enqueue */
+ if (num < 2)
+ return num;
-This function calculates the Flow Signature for a packet based on
-CoS and updates in Packet Meta Data
-**/
-int update_flow_signature(uint8_t *pkt_addr, cos_t *cos);
+ prev_hdr = packet_hdr(packets[num - 2]);
+ latest_hdr = packet_hdr(packets[num - 1]);
-/**
-@internal
-Allocate a odp_pmr_t Handle
-*/
-odp_pmr_t alloc_pmr(pmr_t **pmr);
+ /* Postpone enqueue if destination queue and CoS have not changed */
+ if (prev_hdr->dst_queue == latest_hdr->dst_queue && prev_hdr->cos == latest_hdr->cos)
+ return num;
-/**
-@internal
-Pointer to pmr_t Handle
-This function checks for validity of odp_pmr_t Handle
-*/
-pmr_t *get_pmr_entry(odp_pmr_t pmr_id);
+ /* Perform previously postponed enqueue operation and move the last packet (different
+ * destination) in 'packets' array to be the first entry */
+ _odp_cos_enq(prev_hdr->cos, prev_hdr->dst_queue, packets, num - 1);
+ packets[0] = packets[num - 1];
-/**
-@internal
-Pointer to pmr_t Handle
-*/
-pmr_t *get_pmr_entry_internal(odp_pmr_t pmr_id);
+ return 1;
+}
-/**
-@internal
-Pointer to odp_cos_t Handle
-*/
-cos_t *get_cos_entry(odp_cos_t cos_id);
+/** Classification Internal function **/
/**
@internal
-Pointer to odp_cos_t Handle
-This function checks for validity of odp_cos_t Handle
-*/
-cos_t *get_cos_entry_internal(odp_cos_t cos_id);
+
+Packet Classifier
+
+Start function for Packet Classifier
+This function calls Classifier module internal functions for a given packet and
+selects destination queue and packet pool based on selected PMR and CoS.
+**/
+int _odp_cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
+ odp_pool_t *pool, odp_packet_hdr_t *pkt_hdr);
/**
-@internal
-Verify PMR with a Packet
+Packet IO classifier init
-This function goes through each PMR_TERM value in pmr_t structure and
-calls verification function for each term.Returns 1 if PMR matches or 0
-Otherwise.
+This function does initialization of classifier object associated with pktio.
+This function should be called during pktio initialization.
**/
-int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr);
+int _odp_pktio_classifier_init(pktio_entry_t *pktio);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index e7d84c904..89d89936c 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -12,32 +13,71 @@ extern "C" {
#endif
/*
- * Maximum number of pools
+ * Maximum number of supported CPU identifiers. The maximum supported CPU ID is
+ * CONFIG_NUM_CPU_IDS - 1. Note that the maximum number of ODP threads is
+ * defined by ODP_THREAD_COUNT_MAX.
*/
-#define ODP_CONFIG_POOLS 64
+#define CONFIG_NUM_CPU_IDS 256
+
+/*
+ * Maximum number of packet IO resources
+ */
+#define CONFIG_PKTIO_ENTRIES 64
+
+/*
+ * Pools reserved for internal usage, 1 for IPsec status events and one per packet
+ * I/O for TX completion
+ */
+#define CONFIG_INTERNAL_POOLS (1 + CONFIG_PKTIO_ENTRIES)
+
+/*
+ * Maximum number of pools.
+ */
+#define CONFIG_POOLS 128
+
+/*
+ * Queues reserved for ODP internal use
+ */
+#define CONFIG_INTERNAL_QUEUES 64
+
+/*
+ * Maximum number of plain ODP queues
+ */
+#define CONFIG_MAX_PLAIN_QUEUES 1024
+
+/*
+ * Maximum number of scheduled ODP queues
+ *
+ * Must be a power of two.
+ */
+#define CONFIG_MAX_SCHED_QUEUES 1024
/*
* Maximum number of queues
*/
-#define ODP_CONFIG_QUEUES 1024
+#define CONFIG_MAX_QUEUES (CONFIG_INTERNAL_QUEUES + \
+ CONFIG_MAX_PLAIN_QUEUES + \
+ CONFIG_MAX_SCHED_QUEUES)
/*
* Maximum number of ordered locks per queue
*/
-#define CONFIG_QUEUE_MAX_ORD_LOCKS 4
+#define CONFIG_QUEUE_MAX_ORD_LOCKS 2
/*
- * Maximum number of packet IO resources
+ * Maximum number of DMA sessions
*/
-#define ODP_CONFIG_PKTIO_ENTRIES 64
+#define CONFIG_MAX_DMA_SESSIONS 32
/*
- * Minimum buffer alignment
- *
- * This defines the minimum supported buffer alignment. Requests for values
- * below this will be rounded up to this value.
+ * Stashes reserved for internal usage
+ */
+#define CONFIG_INTERNAL_STASHES CONFIG_MAX_DMA_SESSIONS
+
+/*
+ * Maximum number of stashes
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MIN 64
+#define CONFIG_MAX_STASHES 2048
/*
* Maximum buffer alignment
@@ -45,7 +85,7 @@ extern "C" {
* This defines the maximum supported buffer alignment. Requests for values
* above this will fail.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
+#define CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
/*
* Default packet headroom
@@ -56,10 +96,10 @@ extern "C" {
* size e.g. due to HW or a protocol specific alignment requirement.
*
* @internal In odp-linux implementation:
- * The default value (66) allows a 1500-byte packet to be received into a single
- * segment with Ethernet offset alignment and room for some header expansion.
+ * The default value (128) allows a 1500-byte packet to be received into a
+ * single segment with room for some header expansion.
*/
-#define CONFIG_PACKET_HEADROOM 66
+#define CONFIG_PACKET_HEADROOM 128
/*
* Default packet tailroom
@@ -73,14 +113,9 @@ extern "C" {
#define CONFIG_PACKET_TAILROOM 0
/*
- * Maximum number of segments per packet
- */
-#define CONFIG_PACKET_MAX_SEGS 6
-
-/*
* Maximum packet segment size including head- and tailrooms
*/
-#define CONFIG_PACKET_SEG_SIZE (8 * 1024)
+#define CONFIG_PACKET_SEG_SIZE (60 * 1024)
/* Maximum data length in a segment
*
@@ -98,13 +133,26 @@ extern "C" {
* defined segment length (seg_len in odp_pool_param_t) will be rounded up into
* this value.
*/
-#define CONFIG_PACKET_SEG_LEN_MIN CONFIG_PACKET_MAX_SEG_LEN
+#define CONFIG_PACKET_SEG_LEN_MIN ((2 * 1024) + \
+ CONFIG_PACKET_HEADROOM + \
+ CONFIG_PACKET_TAILROOM)
-/* Maximum number of shared memory blocks.
+/*
+ * Number of shared memory blocks reserved for implementation internal use.
*
- * This the the number of separate SHM areas that can be reserved concurrently
+ * Each pool requires three blocks (buffers, ring, user area), 20 blocks
+ * are reserved for per ODP module global data and one block per packet I/O is
+ * reserved for TX completion usage.
*/
-#define ODP_CONFIG_SHM_BLOCKS (ODP_CONFIG_POOLS + 48)
+#define CONFIG_INTERNAL_SHM_BLOCKS ((CONFIG_POOLS * 3) + 20 + CONFIG_PKTIO_ENTRIES)
+
+/*
+ * Maximum number of shared memory blocks.
+ *
+ * This is the number of separate SHM blocks that an application can reserve
+ * concurrently.
+ */
+#define CONFIG_SHM_BLOCKS 64
/*
* Maximum event burst size
@@ -112,27 +160,53 @@ extern "C" {
* This controls the burst size on various enqueue, dequeue, etc calls. Large
* burst size improves throughput, but may degrade QoS (increase latency).
*/
-#define CONFIG_BURST_SIZE 16
+#define CONFIG_BURST_SIZE 32
/*
- * Maximum number of events in a pool
+ * Maximum number of events in a pool. Power of two minus one results optimal
+ * memory usage for the ring.
*/
-#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024)
+#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
/*
* Maximum number of events in a thread local pool cache
*/
-#define CONFIG_POOL_CACHE_SIZE 256
+#define CONFIG_POOL_CACHE_MAX_SIZE 256
+
+/* Maximum packet vector size */
+#define CONFIG_PACKET_VECTOR_MAX_SIZE 256
+
+/* Enable pool statistics collection */
+#define CONFIG_POOL_STATISTICS 1
+
+/*
+ * Maximum number of IPsec SAs. The actual maximum number can be further
+ * limited by the number of sessions supported by the crypto subsystem and
+ * is reported by odp_ipsec_capability().
+ */
+#define CONFIG_IPSEC_MAX_NUM_SA 4000
/*
- * Size of the virtual address space pre-reserver for ISHM
+ * Use 128-bit atomics for timer implementation (if available)
*
- * This is just virtual space preallocation size, not memory allocation.
- * This address space is used by ISHM to map things at a common address in
- * all ODP threads (when the _ODP_ISHM_SINGLE_VA flag is used).
- * In bytes.
+ * On some platforms 128-bit atomic operations may be available, but the
+ * implementation of used 128-bit GCC built-in functions (e.g.
+ * __atomic_compare_exchange_n) utilizes expensive locking. Set to zero to use
+ * ODP lock based implementation instead.
*/
-#define ODP_CONFIG_ISHM_VA_PREALLOC_SZ (536870912L)
+#define CONFIG_TIMER_128BIT_ATOMICS 1
+
+/* Enable timer scan performance benchmark. This works with inline enabled. */
+#define CONFIG_TIMER_PROFILE_INLINE 0
+
+/* Maximum number of ML models that can be created or loaded. */
+#define CONFIG_ML_MAX_MODELS 4
+
+/* Maximum number of inputs for a ML model. */
+#define CONFIG_ML_MAX_INPUTS 4
+
+/* Maximum number of outputs for a ML model. */
+#define CONFIG_ML_MAX_OUTPUTS 4
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_crypto_internal.h b/platform/linux-generic/include/odp_crypto_internal.h
deleted file mode 100644
index f85b76eaa..000000000
--- a/platform/linux-generic/include/odp_crypto_internal.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_CRYPTO_INTERNAL_H_
-#define ODP_CRYPTO_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <openssl/des.h>
-#include <openssl/aes.h>
-#include <openssl/evp.h>
-
-#define MAX_IV_LEN 64
-#define OP_RESULT_MAGIC 0x91919191
-
-/** Forward declaration of session structure */
-typedef struct odp_crypto_generic_session odp_crypto_generic_session_t;
-
-/**
- * Algorithm handler function prototype
- */
-typedef
-odp_crypto_alg_err_t (*crypto_func_t)(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session);
-
-/**
- * Per crypto session data structure
- */
-struct odp_crypto_generic_session {
- struct odp_crypto_generic_session *next;
-
- /* Session creation parameters */
- odp_crypto_session_param_t p;
-
- odp_bool_t do_cipher_first;
-
- struct {
- /* Copy of session IV data */
- uint8_t iv_data[MAX_IV_LEN];
-
- union {
- struct {
- DES_key_schedule ks1;
- DES_key_schedule ks2;
- DES_key_schedule ks3;
- } des;
- struct {
- AES_KEY key;
- } aes;
- struct {
- EVP_CIPHER_CTX *ctx;
- } aes_gcm;
- } data;
- crypto_func_t func;
- } cipher;
-
- struct {
- union {
- struct {
- uint8_t key[16];
- uint32_t bytes;
- } md5;
- struct {
- uint8_t key[32];
- uint32_t bytes;
- } sha256;
- } data;
- crypto_func_t func;
- } auth;
-};
-
-/**
- * Per packet operation result
- */
-typedef struct odp_crypto_generic_op_result {
- uint32_t magic;
- odp_crypto_op_result_t result;
-} odp_crypto_generic_op_result_t;
-
-/**
- * Per session creation operation result
- */
-typedef struct odp_crypto_generic_session_result {
- odp_crypto_ses_create_err_t rc;
- odp_crypto_session_t session;
-} odp_crypto_generic_session_result_t;
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_debug_internal.h b/platform/linux-generic/include/odp_debug_internal.h
index 02ae87a90..d1fc0d0ba 100644
--- a/platform/linux-generic/include/odp_debug_internal.h
+++ b/platform/linux-generic/include/odp_debug_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -16,72 +17,58 @@
#ifndef ODP_DEBUG_INTERNAL_H_
#define ODP_DEBUG_INTERNAL_H_
+#include <odp/autoheader_external.h>
+
+#include <odp/api/debug.h>
+
+#include <odp/api/plat/debug_inlines.h>
+
#include <stdio.h>
#include <stdlib.h>
-#include <odp/api/debug.h>
-#include <odp_internal.h>
+
#ifdef __cplusplus
extern "C" {
#endif
-/** @addtogroup odp_ver_abt_log_dbg
- * @{
- */
+#pragma GCC diagnostic push
-/**
- * Runtime assertion-macro - aborts if 'cond' is false.
- */
-#define ODP_ASSERT(cond) \
- do { if ((ODP_DEBUG == 1) && (!(cond))) { \
- ODP_ERR("%s\n", #cond); \
- odp_global_data.abort_fn(); } \
- } while (0)
+#ifdef __clang__
+#pragma GCC diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
+#endif
+
+/* Debug level configure option. Zero is the highest level. Value of N prints debug messages from
+ * level 0 to N. */
+#define CONFIG_DEBUG_LEVEL 0
/**
* This macro is used to indicate when a given function is not implemented
*/
#define ODP_UNIMPLEMENTED() \
- odp_global_data.log_fn(ODP_LOG_UNIMPLEMENTED, \
+ _ODP_LOG_FN(ODP_LOG_UNIMPLEMENTED, \
"%s:%d:The function %s() is not implemented\n", \
__FILE__, __LINE__, __func__)
-/**
- * Log debug message if ODP_DEBUG_PRINT flag is set.
+
+/*
+ * Print debug message to log, if ODP_DEBUG_PRINT flag is set and CONFIG_DEBUG_LEVEL is high enough.
*/
-#define ODP_DBG(fmt, ...) \
+#define ODP_DBG_LVL(level, ...) \
do { \
- if (ODP_DEBUG_PRINT == 1) \
- ODP_LOG(ODP_LOG_DBG, fmt, ##__VA_ARGS__);\
+ if (ODP_DEBUG_PRINT == 1 && CONFIG_DEBUG_LEVEL >= (level)) \
+ __extension__ ({ \
+ _ODP_LOG(ODP_LOG_DBG, "DBG", ##__VA_ARGS__); \
+ }); \
} while (0)
-/**
- * Log error message.
- */
-#define ODP_ERR(fmt, ...) \
- ODP_LOG(ODP_LOG_ERR, fmt, ##__VA_ARGS__)
-
-/**
- * Log abort message and then stop execution (by default call abort()).
- * This function should not return.
+/*
+ * Same as ODP_DBG_LVL() but does not add file/line/function name prefix
*/
-#define ODP_ABORT(fmt, ...) \
+#define ODP_DBG_RAW(level, ...) \
do { \
- ODP_LOG(ODP_LOG_ABORT, fmt, ##__VA_ARGS__); \
- odp_global_data.abort_fn(); \
+ if (ODP_DEBUG_PRINT == 1 && CONFIG_DEBUG_LEVEL >= (level)) \
+ _ODP_LOG_FN(ODP_LOG_DBG, ##__VA_ARGS__); \
} while (0)
-/**
- * ODP LOG macro.
- */
-#define ODP_LOG(level, fmt, ...) \
- odp_global_data.log_fn(level, "%s:%d:%s():" fmt, __FILE__, \
- __LINE__, __func__, ##__VA_ARGS__)
-
-/**
- * Log print message when the application calls one of the ODP APIs
- * specifically for dumping internal data.
- */
-#define ODP_PRINT(fmt, ...) \
- odp_global_data.log_fn(ODP_LOG_PRINT, " " fmt, ##__VA_ARGS__)
+#pragma GCC diagnostic pop
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_errno_define.h b/platform/linux-generic/include/odp_errno_define.h
index 94c30e793..3f97618b0 100644
--- a/platform/linux-generic/include/odp_errno_define.h
+++ b/platform/linux-generic/include/odp_errno_define.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, Linaro Limited
+/* Copyright (c) 2017-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,7 +17,7 @@
extern "C" {
#endif
-extern __thread int __odp_errno;
+extern __thread int _odp_errno;
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_ethtool_rss.h b/platform/linux-generic/include/odp_ethtool_rss.h
new file mode 100644
index 000000000..66221aa51
--- /dev/null
+++ b/platform/linux-generic/include/odp_ethtool_rss.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ETHTOOL_RSS_H_
+#define ODP_ETHTOOL_RSS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/packet_io.h>
+
+/**
+ * Get enabled RSS hash protocols of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto[out] Hash protocols
+ *
+ * @returns Number enabled hash protocols
+ */
+int _odp_rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
+
+/**
+ * Get supported RSS hash protocols of a packet socket
+ *
+ * Can be both read and modified.
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto[out] Hash protocols
+ *
+ * @returns Number of supported hash protocols
+ */
+int _odp_rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
+
+/**
+ * Set RSS hash protocols of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto Hash protocols
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int _odp_rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *proto);
+
+/**
+ * Print enabled RSS hash protocols
+ *
+ * @param hash_proto Hash protocols
+ */
+void _odp_rss_conf_print(const odp_pktin_hash_proto_t *hash_proto);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_ETHTOOL_RSS_H_ */
diff --git a/platform/linux-generic/include/odp_ethtool_stats.h b/platform/linux-generic/include/odp_ethtool_stats.h
new file mode 100644
index 000000000..2888d1c81
--- /dev/null
+++ b/platform/linux-generic/include/odp_ethtool_stats.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ETHTOOL_H_
+#define ODP_ETHTOOL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <odp/api/packet_io_stats.h>
+
+/**
+ * Get ethtool statistics of a packet socket
+ */
+int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
+
+int _odp_ethtool_extra_stat_info(int fd, const char *name, odp_pktio_extra_stat_info_t info[],
+ int num);
+int _odp_ethtool_extra_stats(int fd, const char *name, uint64_t stats[], int num);
+int _odp_ethtool_extra_stat_counter(int fd, const char *name, uint32_t id, uint64_t *stat);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_ETHTOOL_H_ */
diff --git a/platform/linux-generic/include/odp_event_internal.h b/platform/linux-generic/include/odp_event_internal.h
new file mode 100644
index 000000000..1b85d64fc
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_internal.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_INTERNAL_H_
+#define ODP_EVENT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/pool_types.h>
+#include <odp/api/std_types.h>
+
+#include <odp_config_internal.h>
+
+/* Combined pool and event index */
+typedef union _odp_event_index_t {
+ uint32_t u32;
+
+ struct {
+ uint32_t pool :8;
+ uint32_t event :24;
+ };
+} _odp_event_index_t;
+
+/* Check that pool index fit into bit field */
+ODP_STATIC_ASSERT(CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
+
+/* Check that buffer index fit into bit field */
+ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
+
+/* Type size limits number of flow IDs supported */
+#define BUF_HDR_MAX_FLOW_ID 255
+
+/* Common header for all event types without alignment constraints. */
+typedef struct _odp_event_hdr_t {
+ /* Initial buffer data pointer */
+ uint8_t *base_data;
+
+ /* Pool handle */
+ odp_pool_t pool;
+
+ /* --- Mostly read only data --- */
+
+ /* Initial buffer tail pointer and endmark location (if enabled) */
+ uint8_t *buf_end;
+
+ /* Combined pool and event index */
+ _odp_event_index_t index;
+
+ /* Pool type */
+ int8_t type;
+
+ /* Event type. Maybe different than pool type (crypto compl event) */
+ int8_t event_type;
+
+ /* Event subtype */
+ int8_t subtype;
+
+ /* Event flow id */
+ uint8_t flow_id;
+
+} _odp_event_hdr_t;
+
+static inline odp_event_t _odp_event_from_hdr(_odp_event_hdr_t *hdr)
+{
+ return (odp_event_t)hdr;
+}
+
+static inline _odp_event_hdr_t *_odp_event_hdr(odp_event_t event)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)event;
+}
+
+static inline void _odp_event_type_set(odp_event_t event, int ev)
+{
+ _odp_event_hdr(event)->event_type = ev;
+}
+
+static inline uint64_t *_odp_event_endmark_get_ptr(odp_event_t event)
+{
+ return (uint64_t *)(uintptr_t)_odp_event_hdr(event)->buf_end;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_event_validation_internal.h b/platform/linux-generic/include/odp_event_validation_internal.h
new file mode 100644
index 000000000..f4ac16f31
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_validation_internal.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_EVENT_VALIDATION_INTERNAL_H_
+#define ODP_EVENT_VALIDATION_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_validation_external.h>
+
+#include <odp_event_internal.h>
+
+#include <stdint.h>
+
+#if _ODP_EVENT_VALIDATION
+
+#define _ODP_EV_ENDMARK_VAL 0xDEADBEEFDEADBEEF
+#define _ODP_EV_ENDMARK_SIZE (sizeof(uint64_t))
+
+static inline void _odp_event_endmark_set(odp_event_t event)
+{
+ uint64_t *endmark_ptr;
+
+ endmark_ptr = _odp_event_endmark_get_ptr(event);
+ *endmark_ptr = _ODP_EV_ENDMARK_VAL;
+}
+
+#else
+
+#define _ODP_EV_ENDMARK_VAL 0
+#define _ODP_EV_ENDMARK_SIZE 0
+
+static inline void _odp_event_endmark_set(odp_event_t event ODP_UNUSED)
+{
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/platform/linux-generic/include/odp_event_vector_internal.h b/platform/linux-generic/include/odp_event_vector_internal.h
new file mode 100644
index 000000000..39f9daf04
--- /dev/null
+++ b/platform/linux-generic/include/odp_event_vector_internal.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP event vector descriptor - implementation internal
+ */
+
+#ifndef ODP_EVENT_VECTOR_INTERNAL_H_
+#define ODP_EVENT_VECTOR_INTERNAL_H_
+
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/api/packet.h>
+
+#include <odp/api/plat/event_vector_inline_types.h>
+
+#include <odp_event_internal.h>
+
+#include <stdint.h>
+
+/**
+ * Internal event vector header
+ */
+typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
+
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* Event vector size */
+ uint32_t size;
+
+ /* Flags */
+ _odp_event_vector_flags_t flags;
+
+ /* Vector of packet handles */
+ odp_packet_t packet[];
+
+} odp_event_vector_hdr_t;
+
+/* Vector header size is critical for performance. Ensure that it does not accidentally
+ * grow over cache line size. */
+ODP_STATIC_ASSERT(sizeof(odp_event_vector_hdr_t) <= ODP_CACHE_LINE_SIZE,
+ "EVENT_VECTOR_HDR_SIZE_ERROR");
+
+/**
+ * Return the vector header
+ */
+static inline odp_event_vector_hdr_t *_odp_packet_vector_hdr(odp_packet_vector_t pktv)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)pktv;
+}
+
+/**
+ * Return the event header
+ */
+static inline _odp_event_hdr_t *_odp_packet_vector_to_event_hdr(odp_packet_vector_t pktv)
+{
+ return (_odp_event_hdr_t *)(uintptr_t)&_odp_packet_vector_hdr(pktv)->event_hdr;
+}
+
+/**
+ * Free packet vector and contained packets
+ */
+static inline void _odp_packet_vector_free_full(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ if (pktv_hdr->size)
+ odp_packet_free_multi(pktv_hdr->packet, pktv_hdr->size);
+
+ odp_packet_vector_free(pktv);
+}
+
+#endif /* ODP_EVENT_VECTOR_INTERNAL_H_ */
diff --git a/platform/linux-generic/include/_fdserver_internal.h b/platform/linux-generic/include/odp_fdserver_internal.h
index 22b280287..0b0a9bb0e 100644
--- a/platform/linux-generic/include/_fdserver_internal.h
+++ b/platform/linux-generic/include/odp_fdserver_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -12,7 +12,7 @@ extern "C" {
#endif
/*
- * the following enum defines the different contextes by which the
+ * the following enum defines the different contexts by which the
* FD server may be used: In the FD server, the keys used to store/retrieve
* a file descriptor are actually context based:
* Both the context and the key are stored at fd registration time,
diff --git a/platform/linux-generic/include/odp_forward_typedefs_internal.h b/platform/linux-generic/include/odp_forward_typedefs_internal.h
index f8832f777..350ad6a36 100644
--- a/platform/linux-generic/include/odp_forward_typedefs_internal.h
+++ b/platform/linux-generic/include/odp_forward_typedefs_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,7 +10,7 @@
* ODP forward typedefs - implementation internal
*
* This needs to be a separate file because it is needed by both
- * odp_queue_internal.h and odp_buffer_internal.h and clang prohibits forward
+ * odp_queue_internal.h and odp_queue_lf.h and clang prohibits forward
* "redefining" typedefs. Note that this file can be extended with additional
* forward typedefs as needed.
*/
@@ -22,8 +22,7 @@
extern "C" {
#endif
-typedef struct odp_buffer_hdr_t odp_buffer_hdr_t;
-typedef union queue_entry_u queue_entry_t;
+typedef struct queue_entry_s queue_entry_t;
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_global_data.h b/platform/linux-generic/include/odp_global_data.h
new file mode 100644
index 000000000..2a87192df
--- /dev/null
+++ b/platform/linux-generic/include/odp_global_data.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_GLOBAL_DATA_H_
+#define ODP_GLOBAL_DATA_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/cpumask.h>
+#include <odp/api/init.h>
+#include <odp/api/random.h>
+#include <odp/api/system_info.h>
+#include <odp/api/std_types.h>
+
+#include <odp_config_internal.h>
+
+#include <libconfig.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#define MODEL_STR_SIZE 128
+#define UID_MAXLEN 30
+
+typedef struct {
+ uint64_t cpu_hz_max[CONFIG_NUM_CPU_IDS];
+ uint64_t cpu_hz[CONFIG_NUM_CPU_IDS];
+ uint64_t default_cpu_hz_max;
+ uint64_t default_cpu_hz;
+ uint64_t page_size;
+ int cache_line_size;
+ uint8_t cpu_hz_static;
+ uint8_t cpu_constant_tsc;
+ odp_cpu_arch_t cpu_arch;
+ odp_cpu_arch_isa_t cpu_isa_sw;
+ odp_cpu_arch_isa_t cpu_isa_hw;
+ char cpu_arch_str[128];
+ char model_str[CONFIG_NUM_CPU_IDS][MODEL_STR_SIZE];
+} system_info_t;
+
+typedef struct {
+ uint64_t default_huge_page_size;
+ char *default_huge_page_dir;
+} hugepage_info_t;
+
+/* Read-only global data. Members should not be modified after global init
+ * to enable process more support. */
+typedef struct odp_global_data_ro_t {
+ odp_init_t init_param;
+ /* directory for odp mapped files */
+ char *shm_dir;
+ /* overload default with env */
+ int shm_dir_from_env;
+ uint64_t shm_max_memory;
+ uint64_t shm_max_size;
+ int shm_single_va;
+ pid_t main_pid;
+ pid_t fdserver_pid;
+ char uid[UID_MAXLEN];
+ system_info_t system_info;
+ hugepage_info_t hugepage_info;
+ odp_cpumask_t all_cpus;
+ odp_cpumask_t control_cpus;
+ odp_cpumask_t worker_cpus;
+ int num_cpus_installed;
+ uint8_t has_config_rt;
+ config_t libconfig_default;
+ config_t libconfig_runtime;
+
+ /* Disabled features during global init */
+ struct {
+ uint8_t compress;
+ uint8_t crypto;
+ uint8_t dma;
+ uint8_t ipsec;
+ uint8_t stash;
+ uint8_t traffic_mngr;
+ uint8_t ml;
+
+ } disable;
+
+} odp_global_data_ro_t;
+
+/* Modifiable global data. Memory region is shared and synchronized amongst all
+ * worker processes. */
+typedef struct odp_global_data_rw_t {
+ odp_bool_t dpdk_initialized;
+ odp_bool_t inline_timers;
+ odp_bool_t schedule_configured;
+
+} odp_global_data_rw_t;
+
+extern odp_global_data_ro_t odp_global_ro;
+extern odp_global_data_rw_t *odp_global_rw;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_init_internal.h b/platform/linux-generic/include/odp_init_internal.h
new file mode 100644
index 000000000..ca5d68c87
--- /dev/null
+++ b/platform/linux-generic/include/odp_init_internal.h
@@ -0,0 +1,115 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_INIT_INTERNAL_H_
+#define ODP_INIT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/init.h>
+#include <odp/api/thread.h>
+
+int _odp_cpumask_init_global(const odp_init_t *params);
+int _odp_cpumask_term_global(void);
+
+int _odp_system_info_init(void);
+int _odp_system_info_term(void);
+
+int _odp_thread_init_global(void);
+int _odp_thread_init_local(odp_thread_type_t type);
+int _odp_thread_term_local(void);
+int _odp_thread_term_global(void);
+
+int _odp_pcapng_init_global(void);
+int _odp_pcapng_term_global(void);
+
+int _odp_pool_init_global(void);
+int _odp_pool_init_local(void);
+int _odp_pool_term_global(void);
+int _odp_pool_term_local(void);
+
+int _odp_event_validation_init_global(void);
+int _odp_event_validation_term_global(void);
+
+int _odp_queue_init_global(void);
+int _odp_queue_term_global(void);
+
+int _odp_schedule_init_global(void);
+int _odp_schedule_term_global(void);
+
+int _odp_pktio_init_global(void);
+int _odp_pktio_term_global(void);
+int _odp_pktio_init_local(void);
+
+int _odp_classification_init_global(void);
+int _odp_classification_term_global(void);
+
+int _odp_queue_init_global(void);
+int _odp_queue_term_global(void);
+
+int _odp_random_init_local(void);
+int _odp_random_term_local(void);
+
+int _odp_crypto_init_global(void);
+int _odp_crypto_term_global(void);
+int _odp_crypto_init_local(void);
+int _odp_crypto_term_local(void);
+
+int _odp_comp_init_global(void);
+int _odp_comp_term_global(void);
+
+int _odp_timer_init_global(const odp_init_t *params);
+int _odp_timer_init_local(void);
+int _odp_timer_term_global(void);
+int _odp_timer_term_local(void);
+
+int _odp_time_init_global(void);
+int _odp_time_term_global(void);
+
+int _odp_tm_init_global(void);
+int _odp_tm_term_global(void);
+
+int _odp_int_name_tbl_init_global(void);
+int _odp_int_name_tbl_term_global(void);
+
+int _odp_fdserver_init_global(void);
+int _odp_fdserver_term_global(void);
+
+int _odp_ishm_init_global(const odp_init_t *init);
+int _odp_ishm_init_local(void);
+int _odp_ishm_term_global(void);
+int _odp_ishm_term_local(void);
+
+int _odp_ipsec_init_global(void);
+int _odp_ipsec_term_global(void);
+
+int _odp_ipsec_sad_init_global(void);
+int _odp_ipsec_sad_term_global(void);
+
+int _odp_ipsec_events_init_global(void);
+int _odp_ipsec_events_term_global(void);
+
+int _odp_cpu_cycles_init_global(void);
+
+int _odp_hash_init_global(void);
+int _odp_hash_term_global(void);
+
+int _odp_stash_init_global(void);
+int _odp_stash_term_global(void);
+
+int _odp_dma_init_global(void);
+int _odp_dma_term_global(void);
+
+int _odp_ml_init_global(void);
+int _odp_ml_term_global(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
deleted file mode 100644
index e1267cff7..000000000
--- a/platform/linux-generic/include/odp_internal.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP HW system information
- */
-
-#ifndef ODP_INTERNAL_H_
-#define ODP_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/init.h>
-#include <odp/api/cpumask.h>
-#include <odp/api/thread.h>
-#include <odp_errno_define.h>
-#include <stdio.h>
-#include <sys/types.h>
-
-#define MAX_CPU_NUMBER 128
-
-typedef struct {
- uint64_t cpu_hz_max[MAX_CPU_NUMBER];
- uint64_t page_size;
- int cache_line_size;
- int cpu_count;
- char cpu_arch_str[128];
- char model_str[MAX_CPU_NUMBER][128];
-} system_info_t;
-
-typedef struct {
- uint64_t default_huge_page_size;
- char *default_huge_page_dir;
-} hugepage_info_t;
-
-struct odp_global_data_s {
- pid_t main_pid;
- odp_log_func_t log_fn;
- odp_abort_func_t abort_fn;
- system_info_t system_info;
- hugepage_info_t hugepage_info;
- odp_cpumask_t control_cpus;
- odp_cpumask_t worker_cpus;
- int num_cpus_installed;
-};
-
-enum init_stage {
- NO_INIT = 0, /* No init stages completed */
- CPUMASK_INIT,
- TIME_INIT,
- SYSINFO_INIT,
- FDSERVER_INIT,
- ISHM_INIT,
- THREAD_INIT,
- POOL_INIT,
- QUEUE_INIT,
- SCHED_INIT,
- PKTIO_INIT,
- TIMER_INIT,
- CRYPTO_INIT,
- CLASSIFICATION_INIT,
- TRAFFIC_MNGR_INIT,
- NAME_TABLE_INIT,
- ALL_INIT /* All init stages completed */
-};
-
-extern struct odp_global_data_s odp_global_data;
-
-int _odp_term_global(enum init_stage stage);
-int _odp_term_local(enum init_stage stage);
-
-int odp_cpumask_init_global(const odp_init_t *params);
-int odp_cpumask_term_global(void);
-
-int odp_system_info_init(void);
-int odp_system_info_term(void);
-
-int odp_thread_init_global(void);
-int odp_thread_init_local(odp_thread_type_t type);
-int odp_thread_term_local(void);
-int odp_thread_term_global(void);
-
-int odp_pool_init_global(void);
-int odp_pool_init_local(void);
-int odp_pool_term_global(void);
-int odp_pool_term_local(void);
-
-int odp_pktio_init_global(void);
-int odp_pktio_term_global(void);
-int odp_pktio_init_local(void);
-
-int odp_classification_init_global(void);
-int odp_classification_term_global(void);
-
-int odp_queue_init_global(void);
-int odp_queue_term_global(void);
-
-int odp_crypto_init_global(void);
-int odp_crypto_term_global(void);
-
-int odp_timer_init_global(void);
-int odp_timer_term_global(void);
-int odp_timer_disarm_all(void);
-
-int odp_time_init_global(void);
-int odp_time_term_global(void);
-
-int odp_tm_init_global(void);
-int odp_tm_term_global(void);
-
-int _odp_int_name_tbl_init_global(void);
-int _odp_int_name_tbl_term_global(void);
-
-int _odp_fdserver_init_global(void);
-int _odp_fdserver_term_global(void);
-
-int _odp_ishm_init_global(void);
-int _odp_ishm_init_local(void);
-int _odp_ishm_term_global(void);
-int _odp_ishm_term_local(void);
-
-int cpuinfo_parser(FILE *file, system_info_t *sysinfo);
-uint64_t odp_cpu_hz_current(int id);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h
new file mode 100644
index 000000000..b97aa7031
--- /dev/null
+++ b/platform/linux-generic/include/odp_ipsec_internal.h
@@ -0,0 +1,413 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2018, 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP internal IPsec routines
+ */
+
+#ifndef ODP_IPSEC_INTERNAL_H_
+#define ODP_IPSEC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/event.h>
+#include <odp/api/ipsec.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/strong_types.h>
+
+#include <protocols/ip.h>
+#include <stdint.h>
+
+/** @addtogroup odp_ipsec
+ * @{
+ */
+
+typedef ODP_HANDLE_T(ipsec_status_t);
+
+#define ODP_IPSEC_STATUS_INVALID \
+ _odp_cast_scalar(ipsec_status_t, 0xffffffff)
+
+typedef struct ipsec_sa_s ipsec_sa_t;
+
+/**
+ * @internal Get ipsec_status handle from event
+ *
+ * Converts an ODP_EVENT_IPSEC_STATUS type event to an IPsec status event.
+ *
+ * @param ev Event handle
+ *
+ * @return IPsec status handle
+ *
+ * @see odp_event_type()
+ */
+ipsec_status_t _odp_ipsec_status_from_event(odp_event_t ev);
+
+/**
+ * @internal Free IPsec status event
+ *
+ * Frees the ipsec_status into the ipsec_status pool it was allocated from.
+ *
+ * @param res IPsec status handle
+ */
+void _odp_ipsec_status_free(ipsec_status_t status);
+
+/**
+ * @internal Send ODP_IPSEC_STATUS event
+ *
+ * Sends the ipsec_status event using provided information
+ *
+ * @param queue destination queue
+ * @param id status id
+ * @param sa SA respective to the operation
+ * @param result status value
+ * @param warn generated warning
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int _odp_ipsec_status_send(odp_queue_t queue,
+ odp_ipsec_status_id_t id,
+ odp_ipsec_sa_t sa,
+ int result,
+ odp_ipsec_warn_t warn);
+
+#define IPSEC_MAX_IV_LEN 16 /**< Maximum cipher IV length in bytes */
+
+#define IPSEC_MAX_SALT_LEN 4 /**< Maximum salt length in bytes */
+
+#define CBC_SALT_LEN 8
+#define CBC_IV_LEN (CBC_SALT_LEN + sizeof(uint64_t))
+
+#define IPSEC_SEQ_HI_LEN 4 /**< ESN Higher bits length in bytes */
+
+/* The minimum supported AR window size */
+#define IPSEC_AR_WIN_SIZE_MIN 32
+
+/* The maximum supported AR window size */
+#define IPSEC_AR_WIN_SIZE_MAX 4096
+
+/* For a 64-bit bucket size */
+#define IPSEC_AR_WIN_BUCKET_BITS 6
+#define IPSEC_AR_WIN_BUCKET_SIZE (1 << IPSEC_AR_WIN_BUCKET_BITS)
+#define IPSEC_AR_WIN_BITLOC_MASK (IPSEC_AR_WIN_BUCKET_SIZE - 1)
+
+/*
+ * We need one extra bucket in addition to the buckets that contain
+ * part of the window.
+ */
+#define IPSEC_AR_WIN_NUM_BUCKETS(window_size) \
+ (((window_size) - 1) / IPSEC_AR_WIN_BUCKET_SIZE + 2)
+
+/* Maximum number of buckets */
+#define IPSEC_AR_WIN_BUCKET_MAX \
+ IPSEC_AR_WIN_NUM_BUCKETS(IPSEC_AR_WIN_SIZE_MAX)
+
+struct ipsec_sa_s {
+ odp_atomic_u32_t state ODP_ALIGNED_CACHE;
+
+ /*
+ * State that gets updated very frequently. Grouped separately
+ * to avoid false cache line sharing with other data.
+ */
+ struct ODP_ALIGNED_CACHE {
+ /* Statistics for soft/hard expiration */
+ odp_atomic_u64_t bytes;
+ odp_atomic_u64_t packets;
+
+ union {
+ struct {
+ /* AR window lock */
+ odp_spinlock_t lock;
+
+ /* AR window top sequence number */
+ odp_atomic_u64_t wintop_seq;
+
+ /* AR window bucket array */
+ uint64_t bucket_arr[IPSEC_AR_WIN_BUCKET_MAX];
+ } in;
+
+ struct {
+ /*
+ * 64-bit sequence number that is also used as
+ * CTR/GCM IV
+ */
+ odp_atomic_u64_t seq;
+ } out;
+ };
+ } hot;
+
+ uint32_t ipsec_sa_idx;
+ odp_ipsec_sa_t ipsec_sa_hdl;
+
+ odp_ipsec_protocol_t proto;
+ uint32_t spi;
+
+ odp_ipsec_mode_t mode;
+
+ /* Limits */
+ uint64_t soft_limit_bytes;
+ uint64_t soft_limit_packets;
+ uint64_t hard_limit_bytes;
+ uint64_t hard_limit_packets;
+
+ odp_crypto_session_t session;
+ void *context;
+ odp_queue_t queue;
+
+ uint32_t icv_len;
+ uint32_t esp_iv_len;
+ uint32_t esp_pad_mask;
+
+ union {
+ uint8_t salt[IPSEC_MAX_SALT_LEN];
+ uint8_t cbc_salt[CBC_SALT_LEN];
+ };
+ uint32_t salt_length;
+ odp_ipsec_lookup_mode_t lookup_mode;
+
+ union {
+ unsigned flags;
+ struct {
+ unsigned inbound : 1;
+ unsigned dec_ttl : 1;
+ unsigned copy_dscp : 1;
+ unsigned copy_df : 1;
+ unsigned copy_flabel : 1;
+ unsigned aes_ctr_iv : 1;
+ unsigned udp_encap : 1;
+ unsigned esn : 1;
+ unsigned insert_seq_hi : 1;
+
+ /* Only for outbound */
+ unsigned use_counter_iv : 1;
+ unsigned use_cbc_iv : 1;
+ unsigned tun_ipv4 : 1;
+
+ /* Only for inbound */
+ unsigned antireplay : 1;
+ };
+ };
+
+ union {
+ struct {
+ odp_ipsec_ip_version_t lookup_ver;
+
+ /* Anti-replay window management. */
+ struct {
+ /* Number of buckets for AR window */
+ uint16_t num_buckets;
+
+ /* AR window size */
+ uint32_t win_size;
+ } ar;
+
+ union {
+ odp_u32be_t lookup_dst_ipv4;
+ uint8_t lookup_dst_ipv6[_ODP_IPV6ADDR_LEN];
+ };
+ } in;
+
+ struct {
+ odp_ipsec_frag_mode_t frag_mode;
+ odp_atomic_u32_t mtu;
+
+ union {
+ struct {
+ odp_ipsec_ipv4_param_t param;
+ odp_u32be_t src_ip;
+ odp_u32be_t dst_ip;
+ } tun_ipv4;
+ struct {
+ odp_ipsec_ipv6_param_t param;
+ uint8_t src_ip[_ODP_IPV6ADDR_LEN];
+ uint8_t dst_ip[_ODP_IPV6ADDR_LEN];
+ } tun_ipv6;
+ };
+ } out;
+ };
+
+ struct {
+ odp_atomic_u64_t proto_err;
+ odp_atomic_u64_t auth_err;
+ odp_atomic_u64_t antireplay_err;
+ odp_atomic_u64_t alg_err;
+ odp_atomic_u64_t mtu_err;
+ odp_atomic_u64_t hard_exp_bytes_err;
+ odp_atomic_u64_t hard_exp_pkts_err;
+
+ /*
+ * Track error packets and bytes after lifetime check is done.
+ * Required since, the stats tracking lifetime is being
+ * used for SA success packets stats.
+ */
+ odp_atomic_u64_t post_lifetime_err_pkts;
+ odp_atomic_u64_t post_lifetime_err_bytes;
+ } stats;
+
+ uint32_t next_sa;
+
+ /* Data stored solely for odp_ipsec_sa_info() */
+ struct {
+ odp_cipher_alg_t cipher_alg;
+ uint32_t cipher_key_len;
+ uint32_t cipher_key_extra_len;
+
+ odp_auth_alg_t auth_alg;
+ uint32_t auth_key_len;
+ uint32_t auth_key_extra_len;
+
+ uint32_t icv_len;
+ uint32_t context_len;
+ union {
+ struct {
+ uint32_t antireplay_ws;
+ } in;
+ struct{
+ uint32_t mtu;
+ } out;
+ };
+ } sa_info;
+
+ /*
+ * Flag to check if the SA soft expiry status event was already
+ * sent. This field is applicable only for the soft expiry status
+ * event that gets generated for IPsec SAs configured in inline
+ * outbound mode.
+ */
+ odp_atomic_u32_t soft_expiry_notified;
+};
+
+/**
+ * IPSEC Security Association (SA) lookup parameters
+ */
+typedef struct odp_ipsec_sa_lookup_s {
+ /** IPSEC protocol: ESP or AH */
+ odp_ipsec_protocol_t proto;
+
+ /** SPI value */
+ uint32_t spi;
+
+ /** IP protocol version */
+ odp_ipsec_ip_version_t ver;
+
+ /** IP destination address (NETWORK ENDIAN) */
+ void *dst_addr;
+} ipsec_sa_lookup_t;
+
+/** IPSEC AAD */
+typedef struct ODP_PACKED {
+ /**< Security Parameter Index */
+ odp_u32be_t spi;
+
+ /**< Sequence Number */
+ union {
+ odp_u32be_t seq_no;
+ odp_u64be_t seq_no64;
+ };
+} ipsec_aad_t;
+
+/* Return IV length required for the cipher for IPsec use */
+uint32_t _odp_ipsec_cipher_iv_len(odp_cipher_alg_t cipher);
+
+/* Return digest length required for the cipher for IPsec use */
+uint32_t _odp_ipsec_auth_digest_len(odp_auth_alg_t auth);
+
+/* Return the maximum number of SAs supported by the implementation */
+uint32_t _odp_ipsec_max_num_sa(void);
+
+/*
+ * Get SA entry from handle without obtaining a reference
+ */
+ipsec_sa_t *_odp_ipsec_sa_entry_from_hdl(odp_ipsec_sa_t sa);
+
+/**
+ * Obtain SA reference
+ */
+ipsec_sa_t *_odp_ipsec_sa_use(odp_ipsec_sa_t sa);
+
+/**
+ * Release SA reference
+ */
+void _odp_ipsec_sa_unuse(ipsec_sa_t *ipsec_sa);
+
+/**
+ * Lookup SA corresponding to inbound packet pkt
+ */
+ipsec_sa_t *_odp_ipsec_sa_lookup(const ipsec_sa_lookup_t *lookup);
+
+/**
+ * Run pre-check on SA usage statistics.
+ *
+ * @retval <0 if hard limits were breached
+ */
+int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status);
+
+/**
+ * Update SA lifetime counters, filling respective status for the packet.
+ *
+ * @retval <0 if hard limits were breached
+ */
+int _odp_ipsec_sa_lifetime_update(ipsec_sa_t *ipsec_sa, uint32_t len,
+ odp_ipsec_op_status_t *status);
+
+/* Run pre-check on sequence number of the packet.
+ *
+ * @retval <0 if the packet falls out of window
+ */
+int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint64_t seq,
+ odp_ipsec_op_status_t *status);
+
+/* Run check on sequence number of the packet and update window if necessary.
+ *
+ * @retval <0 if the packet falls out of window
+ */
+int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint64_t seq,
+ odp_ipsec_op_status_t *status);
+
+/**
+ * Allocate an IPv4 ID for an outgoing packet.
+ */
+uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa);
+
+/**
+ * Try inline IPsec processing of provided packet.
+ *
+ * @retval 0 if packet was processed and will be queue using IPsec inline
+ * processing
+ */
+int _odp_ipsec_try_inline(odp_packet_t *pkt);
+
+/**
+ * Populate number of packets and bytes of data successfully processed by the SA
+ * in the odp_ipsec_stats_t structure passed.
+ *
+ */
+void _odp_ipsec_sa_stats_pkts(ipsec_sa_t *sa, odp_ipsec_stats_t *stats);
+
+/**
+ * Return true if IPsec operates in sync mode in the given direction.
+ */
+odp_bool_t _odp_ipsec_is_sync_mode(odp_ipsec_dir_t dir);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/_ishmphy_internal.h b/platform/linux-generic/include/odp_ishmphy_internal.h
index 4fe560fd2..2bc9911ce 100644
--- a/platform/linux-generic/include/_ishmphy_internal.h
+++ b/platform/linux-generic/include/odp_ishmphy_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,9 +13,9 @@ extern "C" {
#include <stdint.h>
-void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align);
-int _odp_ishmphy_unbook_va(void);
-void *_odp_ishmphy_map(int fd, void *start, uint64_t size, int flags);
+void *_odp_ishmphy_reserve_single_va(uint64_t len, int fd);
+int _odp_ishmphy_free_single_va(void);
+void *_odp_ishmphy_map(int fd, uint64_t size, uint64_t offset, int flags);
int _odp_ishmphy_unmap(void *start, uint64_t len, int flags);
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_ishmpool_internal.h b/platform/linux-generic/include/odp_ishmpool_internal.h
new file mode 100644
index 000000000..d5a0ccd47
--- /dev/null
+++ b/platform/linux-generic/include/odp_ishmpool_internal.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ISHMBUDDY_INTERNAL_H_
+#define ODP_ISHMBUDDY_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+#include <odp/api/spinlock.h>
+
+typedef struct _odp_ishm_pool_ctrl_t {
+ uint32_t element_sz; /* 0 for buddy pools, >0 for slab. */
+ int ishm_blk_idx; /* the block index returned by _ishm_resrve()*/
+ odp_spinlock_t lock; /* for pool access mutex */
+ void *user_addr; /* user pool area ('real user pool') */
+ union {
+ struct { /* things needed for buddy pools: */
+ uint8_t order; /* pool is 2^order bytes long */
+ uint8_t min_order; /*alloc won't go below 2^min_order*/
+ void **free_heads; /* 'order' free list heads. */
+ uint8_t *alloced_order; /* size of blocks, 0=free */
+ };
+ struct { /* things needed for slab pools: */
+ void *free_head; /* free element list head */
+ uint64_t nb_elem;/* total number of elements in pool */
+ };
+ };
+} _odp_ishm_pool_ctrl_t;
+
+typedef struct _odp_ishm_pool_t {
+ _odp_ishm_pool_ctrl_t ctrl; /* control part */
+ uint8_t mem[1]; /* area for heads, saved alloc'd orders, data*/
+} _odp_ishm_pool_t;
+
+_odp_ishm_pool_t *_odp_ishm_pool_create(const char *pool_name,
+ uint64_t size,
+ uint64_t min_alloc,
+ uint64_t max_alloc, int flags);
+int _odp_ishm_pool_destroy(_odp_ishm_pool_t *pool);
+void *_odp_ishm_pool_alloc(_odp_ishm_pool_t *pool, uint64_t size);
+int _odp_ishm_pool_free(_odp_ishm_pool_t *pool, void *addr);
+void _odp_ishm_pool_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_libconfig_internal.h b/platform/linux-generic/include/odp_libconfig_internal.h
new file mode 100644
index 000000000..3f051547b
--- /dev/null
+++ b/platform/linux-generic/include/odp_libconfig_internal.h
@@ -0,0 +1,41 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * Common libconfig functions
+ */
+
+#ifndef ODP_LIBCONFIG_INTERNAL_H_
+#define ODP_LIBCONFIG_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int _odp_libconfig_init_global(void);
+int _odp_libconfig_term_global(void);
+
+int _odp_libconfig_lookup_int(const char *path, int *value);
+int _odp_libconfig_lookup_str(const char *path, char *value,
+ unsigned int str_size);
+int _odp_libconfig_lookup_array(const char *path, int value[], int max_num);
+int _odp_libconfig_lookup_array_str(const char *path, char **value,
+ int max_count, unsigned int max_str);
+
+int _odp_libconfig_lookup_ext_int(const char *base_path,
+ const char *local_path,
+ const char *name,
+ int *value);
+
+int _odp_libconfig_print(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_llqueue.h b/platform/linux-generic/include/odp_llqueue.h
new file mode 100644
index 000000000..29810ebf3
--- /dev/null
+++ b/platform/linux-generic/include/odp_llqueue.h
@@ -0,0 +1,324 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_LLQUEUE_H_
+#define ODP_LLQUEUE_H_
+
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+#include <odp/api/spinlock.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_types_internal.h>
+#include <odp_cpu.h>
+
+#include <stdint.h>
+#include <stdlib.h>
+
+/******************************************************************************
+ * Linked list queues
+ *****************************************************************************/
+
+struct llqueue;
+struct llnode;
+
+static struct llnode *llq_head(struct llqueue *llq);
+static void llqueue_init(struct llqueue *llq);
+static void llq_enqueue(struct llqueue *llq, struct llnode *node);
+static struct llnode *llq_dequeue(struct llqueue *llq);
+static odp_bool_t llq_dequeue_cond(struct llqueue *llq, struct llnode *exp);
+static odp_bool_t llq_cond_rotate(struct llqueue *llq, struct llnode *node);
+static odp_bool_t llq_on_queue(struct llnode *node);
+
+/******************************************************************************
+ * The implementation(s)
+ *****************************************************************************/
+
+#define SENTINEL ((void *)~(uintptr_t)0)
+#define MAX_SPIN_COUNT 1000
+
+#ifdef CONFIG_LLDSCD
+/* Implement queue operations using double-word LL/SC */
+
+/* The scalar equivalent of a double pointer */
+#if __SIZEOF_PTRDIFF_T__ == 4
+typedef uint64_t dintptr_t;
+#endif
+#if __SIZEOF_PTRDIFF_T__ == 8
+typedef _odp_u128_t dintptr_t;
+#endif
+
+struct llnode {
+ struct llnode *next;
+};
+
+union llht {
+ struct {
+ struct llnode *head, *tail;
+ } st;
+ dintptr_t ui;
+};
+
+struct llqueue {
+ union llht u;
+};
+
+static inline struct llnode *llq_head(struct llqueue *llq)
+{
+ return __atomic_load_n(&llq->u.st.head, __ATOMIC_RELAXED);
+}
+
+static inline void llqueue_init(struct llqueue *llq)
+{
+ llq->u.st.head = NULL;
+ llq->u.st.tail = NULL;
+}
+
+static inline void llq_enqueue(struct llqueue *llq, struct llnode *node)
+{
+ union llht old, neu;
+
+ _ODP_ASSERT(node->next == NULL);
+ node->next = SENTINEL;
+ do {
+ old.ui = lld(&llq->u.ui, __ATOMIC_RELAXED);
+ neu.st.head = old.st.head == NULL ? node : old.st.head;
+ neu.st.tail = node;
+ } while (odp_unlikely(scd(&llq->u.ui, neu.ui, __ATOMIC_RELEASE)));
+ if (old.st.tail != NULL) {
+ /* List was not empty */
+ _ODP_ASSERT(old.st.tail->next == SENTINEL);
+ old.st.tail->next = node;
+ }
+}
+
+static inline struct llnode *llq_dequeue(struct llqueue *llq)
+{
+ struct llnode *head;
+ union llht old, neu;
+
+ /* llq_dequeue() may be used in a busy-waiting fashion
+ * Read head using plain load to avoid disturbing remote LL/SC
+ */
+ head = __atomic_load_n(&llq->u.st.head, __ATOMIC_ACQUIRE);
+ if (head == NULL)
+ return NULL;
+ /* Read head->next before LL to minimize cache miss latency
+ * in LL/SC below
+ */
+ (void)__atomic_load_n(&head->next, __ATOMIC_RELAXED);
+
+ do {
+restart_loop:
+ old.ui = lld(&llq->u.ui, __ATOMIC_RELAXED);
+ if (odp_unlikely(old.st.head == NULL)) {
+ /* Empty list */
+ return NULL;
+ } else if (odp_unlikely(old.st.head == old.st.tail)) {
+ /* Single-element in list */
+ neu.st.head = NULL;
+ neu.st.tail = NULL;
+ } else {
+ /* Multi-element list, dequeue head */
+ struct llnode *next;
+ int spin_count = 0;
+
+ /* Wait until llq_enqueue() has written true next
+ * pointer
+ */
+ while ((next = __atomic_load_n(&old.st.head->next,
+ __ATOMIC_RELAXED)) ==
+ SENTINEL) {
+ odp_cpu_pause();
+ if (++spin_count >= MAX_SPIN_COUNT)
+ goto restart_loop;
+ }
+ neu.st.head = next;
+ neu.st.tail = old.st.tail;
+ }
+ } while (odp_unlikely(scd(&llq->u.ui, neu.ui, __ATOMIC_RELAXED)));
+ old.st.head->next = NULL;
+ return old.st.head;
+}
+
+static inline odp_bool_t llq_dequeue_cond(struct llqueue *llq,
+ struct llnode *exp)
+{
+ union llht old, neu;
+
+ do {
+restart_loop:
+ old.ui = lld(&llq->u.ui, __ATOMIC_ACQUIRE);
+ if (odp_unlikely(old.st.head == NULL || old.st.head != exp)) {
+ /* Empty list or wrong head */
+ return false;
+ } else if (odp_unlikely(old.st.head == old.st.tail)) {
+ /* Single-element in list */
+ neu.st.head = NULL;
+ neu.st.tail = NULL;
+ } else {
+ /* Multi-element list, dequeue head */
+ struct llnode *next;
+ int spin_count = 0;
+
+ /* Wait until llq_enqueue() has written true next
+ * pointer */
+ while ((next = __atomic_load_n(&old.st.head->next,
+ __ATOMIC_RELAXED)) ==
+ SENTINEL) {
+ odp_cpu_pause();
+ if (++spin_count >= MAX_SPIN_COUNT)
+ goto restart_loop;
+ }
+
+ neu.st.head = next;
+ neu.st.tail = old.st.tail;
+ }
+ } while (odp_unlikely(scd(&llq->u.ui, neu.ui, __ATOMIC_RELAXED)));
+ old.st.head->next = NULL;
+ return true;
+}
+
+/* If 'node' is a head of llq then move it to tail */
+static inline odp_bool_t llq_cond_rotate(struct llqueue *llq,
+ struct llnode *node)
+{
+ /* Difficult to make this into a single atomic operation
+ * Instead use existing primitives.
+ */
+ if (odp_likely(llq_dequeue_cond(llq, node))) {
+ llq_enqueue(llq, node);
+ return true;
+ }
+ return false;
+}
+
+static inline odp_bool_t llq_on_queue(struct llnode *node)
+{
+ return node->next != NULL;
+}
+
+#else
+/* Implement queue operations protected by a spin lock */
+
+struct llnode {
+ struct llnode *next;
+};
+
+struct llqueue {
+ struct llnode *head, *tail;
+ odp_spinlock_t lock;
+};
+
+static inline struct llnode *llq_head(struct llqueue *llq)
+{
+ return __atomic_load_n(&llq->head, __ATOMIC_RELAXED);
+}
+
+static inline void llqueue_init(struct llqueue *llq)
+{
+ llq->head = NULL;
+ llq->tail = NULL;
+ odp_spinlock_init(&llq->lock);
+}
+
+static inline void llq_enqueue(struct llqueue *llq, struct llnode *node)
+{
+ _ODP_ASSERT(node->next == NULL);
+ node->next = SENTINEL;
+
+ odp_spinlock_lock(&llq->lock);
+ if (llq->head == NULL) {
+ llq->head = node;
+ llq->tail = node;
+ } else {
+ llq->tail->next = node;
+ llq->tail = node;
+ }
+ odp_spinlock_unlock(&llq->lock);
+}
+
+static inline struct llnode *llq_dequeue(struct llqueue *llq)
+{
+ struct llnode *head;
+ struct llnode *node = NULL;
+
+ head = __atomic_load_n(&llq->head, __ATOMIC_RELAXED);
+ if (head == NULL)
+ return NULL;
+
+ odp_spinlock_lock(&llq->lock);
+ if (llq->head != NULL) {
+ node = llq->head;
+ if (llq->head == llq->tail) {
+ _ODP_ASSERT(node->next == SENTINEL);
+ llq->head = NULL;
+ llq->tail = NULL;
+ } else {
+ _ODP_ASSERT(node->next != SENTINEL);
+ llq->head = node->next;
+ }
+ node->next = NULL;
+ }
+ odp_spinlock_unlock(&llq->lock);
+ return node;
+}
+
+static inline odp_bool_t llq_dequeue_cond(struct llqueue *llq,
+ struct llnode *node)
+{
+ odp_bool_t success = false;
+
+ odp_spinlock_lock(&llq->lock);
+ if (odp_likely(llq->head != NULL && llq->head == node)) {
+ success = true;
+ if (llq->head == llq->tail) {
+ _ODP_ASSERT(node->next == SENTINEL);
+ llq->head = NULL;
+ llq->tail = NULL;
+ } else {
+ _ODP_ASSERT(node->next != SENTINEL);
+ llq->head = node->next;
+ }
+ node->next = NULL;
+ }
+ odp_spinlock_unlock(&llq->lock);
+ return success;
+}
+
+/* If 'node' is a head of llq then move it to tail */
+static inline odp_bool_t llq_cond_rotate(struct llqueue *llq,
+ struct llnode *node)
+{
+ odp_bool_t success = false;
+
+ odp_spinlock_lock(&llq->lock);
+ if (odp_likely(llq->head == node)) {
+ success = true;
+ if (llq->tail != node) {
+ _ODP_ASSERT(node->next != SENTINEL);
+ llq->head = node->next;
+ llq->tail->next = node;
+ llq->tail = node;
+ node->next = SENTINEL;
+ }
+ /* Else 'node' is only element on list => nothing to do */
+ }
+ odp_spinlock_unlock(&llq->lock);
+ return success;
+}
+
+static inline odp_bool_t llq_on_queue(struct llnode *node)
+{
+ return node->next != NULL;
+}
+
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_macros_internal.h b/platform/linux-generic/include/odp_macros_internal.h
new file mode 100644
index 000000000..047e550f9
--- /dev/null
+++ b/platform/linux-generic/include/odp_macros_internal.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2022-2024, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP miscellaneous macros
+ */
+
+#ifndef ODP_MACROS_INTERNAL_H_
+#define ODP_MACROS_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+
+#include <stdint.h>
+
+#define _ODP_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define _ODP_MIN(a, b) \
+ __extension__ ({ \
+ __typeof__(a) min_a = (a); \
+ __typeof__(b) min_b = (b); \
+ min_a < min_b ? min_a : min_b; \
+ })
+
+#define _ODP_MAX(a, b) \
+ __extension__ ({ \
+ __typeof__(a) max_a = (a); \
+ __typeof__(b) max_b = (b); \
+ max_a > max_b ? max_a : max_b; \
+ })
+
+#define _ODP_MIN3(a, b, c) \
+__extension__ ({ \
+ __typeof__(a) min3_a = (a); \
+ __typeof__(b) min3_b = (b); \
+ __typeof__(c) min3_c = (c); \
+ (min3_a < min3_b ? (min3_a < min3_c ? min3_a : min3_c) : \
+ (min3_b < min3_c ? min3_b : min3_c)); \
+})
+
+#define _ODP_MAX3(a, b, c) \
+__extension__ ({ \
+ __typeof__(a) max3_a = (a); \
+ __typeof__(b) max3_b = (b); \
+ __typeof__(c) max3_c = (c); \
+ (max3_a > max3_b ? (max3_a > max3_c ? max3_a : max3_c) : \
+ (max3_b > max3_c ? max3_b : max3_c)); \
+})
+
+/* Macros to calculate ODP_ROUNDUP_POWER2_U32() in five rounds of shift
+ * and OR operations. */
+#define __ODP_RSHIFT_U32(x, y) (((uint32_t)(x)) >> (y))
+#define __ODP_POW2_U32_R1(x) (((uint32_t)(x)) | __ODP_RSHIFT_U32(x, 1))
+#define __ODP_POW2_U32_R2(x) (__ODP_POW2_U32_R1(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R1(x), 2))
+#define __ODP_POW2_U32_R3(x) (__ODP_POW2_U32_R2(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R2(x), 4))
+#define __ODP_POW2_U32_R4(x) (__ODP_POW2_U32_R3(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R3(x), 8))
+#define __ODP_POW2_U32_R5(x) (__ODP_POW2_U32_R4(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R4(x), 16))
+
+/* Round up a uint32_t value 'x' to the next power of two.
+ *
+ * The value is not round up, if it's already a power of two (including 1).
+ * The value must be larger than 0 and not exceed 0x80000000.
+ */
+#define _ODP_ROUNDUP_POWER2_U32(x) \
+ ((((uint32_t)(x)) > 0x80000000) ? 0 : (__ODP_POW2_U32_R5(x - 1) + 1))
+
+/*
+ * Round up 'x' to alignment 'align'
+ */
+#define _ODP_ROUNDUP_ALIGN(x, align)\
+ ((align) * (((x) + (align) - 1) / (align)))
+
+/*
+ * Round up 'x' to cache line size alignment
+ */
+#define _ODP_ROUNDUP_CACHE_LINE(x)\
+ _ODP_ROUNDUP_ALIGN(x, ODP_CACHE_LINE_SIZE)
+
+/*
+ * Round down 'x' to 'align' alignment, which is a power of two
+ */
+#define _ODP_ROUNDDOWN_POWER2(x, align)\
+ ((x) & (~((align) - 1)))
+
+/*
+ * Check if value is a power of two
+ */
+#define _ODP_CHECK_IS_POWER2(x) ((((x) - 1) & (x)) == 0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ml_fp16.h b/platform/linux-generic/include/odp_ml_fp16.h
new file mode 100644
index 000000000..476028cb4
--- /dev/null
+++ b/platform/linux-generic/include/odp_ml_fp16.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ML_FP16_H_
+#define ODP_ML_FP16_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+uint16_t _odp_float32_to_float16(float x);
+float _odp_float16_to_float32(uint16_t f16);
+uint16_t _odp_float32_to_bfloat16(float x);
+float _odp_bfloat16_to_float32(uint16_t f16);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_ML_FP16_H_ */
diff --git a/platform/linux-generic/include/odp_name_table_internal.h b/platform/linux-generic/include/odp_name_table_internal.h
index 52b202ca2..9101acfa8 100644
--- a/platform/linux-generic/include/odp_name_table_internal.h
+++ b/platform/linux-generic/include/odp_name_table_internal.h
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,7 +14,6 @@ extern "C" {
#endif
#include <stdint.h>
-#include <odp_api.h>
typedef enum {
ODP_COS_HANDLE,
diff --git a/platform/linux-generic/include/odp_packet_dpdk.h b/platform/linux-generic/include/odp_packet_dpdk.h
index 4d7e0fc47..23b1677a5 100644
--- a/platform/linux-generic/include/odp_packet_dpdk.h
+++ b/platform/linux-generic/include/odp_packet_dpdk.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,64 +8,73 @@
#ifndef ODP_PACKET_DPDK_H
#define ODP_PACKET_DPDK_H
+#include <stdint.h>
+
#include <odp/api/packet_io.h>
-#include <odp/api/pool.h>
-#include <odp/api/ticketlock.h>
-#include <net/if.h>
+#include <odp_packet_internal.h>
+#include <odp_parse_internal.h>
-#ifdef ODP_PKTIO_DPDK
-#include <rte_config.h>
#include <rte_mbuf.h>
+#include <rte_version.h>
-#define DPDK_MEMORY_MB 512
-#define DPDK_NB_MBUF 16384
-#define DPDK_MBUF_BUF_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
-#define DPDK_MEMPOOL_CACHE_SIZE 64
-#define DPDK_NM_RX_DESC 128
-#define DPDK_NM_TX_DESC 512
-
-ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) &&
- (DPDK_MEMPOOL_CACHE_SIZE <= RTE_MEMPOOL_CACHE_MAX_SIZE) &&
- (DPDK_MEMPOOL_CACHE_SIZE <= DPDK_MBUF_BUF_SIZE * 10 / 15)
- , "DPDK mempool cache size failure");
+#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
+ #define RTE_MBUF_F_RX_IP_CKSUM_MASK PKT_RX_IP_CKSUM_MASK
+ #define RTE_MBUF_F_RX_IP_CKSUM_GOOD PKT_RX_IP_CKSUM_GOOD
+ #define RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN PKT_RX_IP_CKSUM_UNKNOWN
+ #define RTE_MBUF_F_RX_L4_CKSUM_MASK PKT_RX_L4_CKSUM_MASK
+ #define RTE_MBUF_F_RX_L4_CKSUM_GOOD PKT_RX_L4_CKSUM_GOOD
+ #define RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN PKT_RX_L4_CKSUM_UNKNOWN
#endif
-#define DPDK_IXGBE_MIN_RX_BURST 4
-
-/** Cache for storing packets */
-struct pkt_cache_t {
- /** array for storing extra RX packets */
- struct rte_mbuf *pkt[DPDK_IXGBE_MIN_RX_BURST];
- unsigned idx; /**< head of cache */
- unsigned count; /**< packets in cache */
-};
-
-typedef union {
- struct pkt_cache_t s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pkt_cache_t))];
-} pkt_cache_t ODP_ALIGNED_CACHE;
-
-/** Packet IO using DPDK interface */
-typedef struct {
- odp_pool_t pool; /**< pool to alloc packets from */
- struct rte_mempool *pkt_pool; /**< DPDK packet pool */
- odp_pktio_capability_t capa; /**< interface capabilities */
- uint32_t data_room; /**< maximum packet length */
- uint16_t mtu; /**< maximum transmission unit */
- /** DPDK packet pool name (pktpool_<ifname>) */
- char pool_name[IF_NAMESIZE + 8];
- /** Use system call to get/set vdev promisc mode */
- odp_bool_t vdev_sysc_promisc;
- uint8_t port_id; /**< DPDK port identifier */
- unsigned min_rx_burst; /**< minimum RX burst size */
- odp_pktin_hash_proto_t hash; /**< Packet input hash protocol */
- odp_bool_t lockless_rx; /**< no locking for rx */
- odp_bool_t lockless_tx; /**< no locking for tx */
- odp_ticketlock_t rx_lock[PKTIO_MAX_QUEUES]; /**< RX queue locks */
- odp_ticketlock_t tx_lock[PKTIO_MAX_QUEUES]; /**< TX queue locks */
- /** cache for storing extra RX packets */
- pkt_cache_t rx_cache[PKTIO_MAX_QUEUES];
-} pkt_dpdk_t;
+#define IP4_CSUM_RESULT(ol_flags) ((ol_flags) & RTE_MBUF_F_RX_IP_CKSUM_MASK)
+#define L4_CSUM_RESULT(ol_flags) ((ol_flags) & RTE_MBUF_F_RX_L4_CKSUM_MASK)
+
+/** Packet parser using DPDK interface */
+static inline
+int _odp_dpdk_packet_parse_common(odp_packet_hdr_t *pkt_hdr, const uint8_t *ptr,
+ uint32_t frame_len, uint32_t seg_len,
+ struct rte_mbuf *mbuf, int layer,
+ odp_pktin_config_opt_t pktin_cfg)
+{
+ packet_parser_t *prs = &pkt_hdr->p;
+ uint64_t mbuf_ol = mbuf->ol_flags;
+
+ if (odp_unlikely(layer == ODP_PROTO_LAYER_NONE))
+ return 0;
+
+ /* Assume valid L2 header, no CRC/FCS check in SW */
+ prs->l2_offset = 0;
+
+ if (layer >= ODP_PROTO_LAYER_L3) {
+ int ip_chksum = IP4_CSUM_RESULT(mbuf_ol);
+
+ if (ip_chksum == RTE_MBUF_F_RX_IP_CKSUM_GOOD) {
+ prs->input_flags.l3_chksum_done = 1;
+ } else if (ip_chksum != RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN) {
+ prs->input_flags.l3_chksum_done = 1;
+ prs->flags.l3_chksum_err = 1;
+ }
+ }
+
+ if (layer >= ODP_PROTO_LAYER_L4) {
+ int l4_chksum = L4_CSUM_RESULT(mbuf_ol);
+
+ if (l4_chksum == RTE_MBUF_F_RX_L4_CKSUM_GOOD) {
+ prs->input_flags.l4_chksum_done = 1;
+ } else if (l4_chksum != RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN) {
+ prs->input_flags.l4_chksum_done = 1;
+ prs->flags.l4_chksum_err = 1;
+ }
+ }
+
+ pktin_cfg.bit.ipv4_chksum = 0;
+ pktin_cfg.bit.udp_chksum = 0;
+ pktin_cfg.bit.tcp_chksum = 0;
+ pktin_cfg.bit.sctp_chksum = 0;
+
+ return _odp_packet_parse_common(pkt_hdr, ptr, frame_len, seg_len, layer,
+ pktin_cfg);
+}
#endif
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 0a9f17799..0b03aa211 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,98 +19,56 @@ extern "C" {
#endif
#include <odp/api/align.h>
+#include <odp/api/atomic.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_inlines.h>
+#include <odp/api/hints.h>
#include <odp/api/packet.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
-#include <odp_crypto_internal.h>
-#include <odp/api/plat/packet_types.h>
+#include <odp/api/comp.h>
+#include <odp/api/std.h>
+
+#include <odp/api/plat/packet_inline_types.h>
-/** Minimum segment length expected by packet_parse_common() */
-#define PACKET_PARSE_SEG_LEN 96
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_if.h>
+#include <odp_config_internal.h>
+#include <stdint.h>
+#include <string.h>
ODP_STATIC_ASSERT(sizeof(_odp_packet_input_flags_t) == sizeof(uint64_t),
"INPUT_FLAGS_SIZE_ERROR");
-/**
- * Packet error flags
- */
-typedef union {
- /* All error flags */
- uint32_t all;
-
- struct {
- /* Bitfield flags for each detected error */
- uint32_t app_error:1; /**< Error bit for application use */
- uint32_t frame_len:1; /**< Frame length error */
- uint32_t snap_len:1; /**< Snap length error */
- uint32_t l2_chksum:1; /**< L2 checksum error, checks TBD */
- uint32_t ip_err:1; /**< IP error, checks TBD */
- uint32_t tcp_err:1; /**< TCP error, checks TBD */
- uint32_t udp_err:1; /**< UDP error, checks TBD */
- };
-} error_flags_t;
+ODP_STATIC_ASSERT(sizeof(_odp_packet_flags_t) == sizeof(uint32_t),
+ "PACKET_FLAGS_SIZE_ERROR");
-ODP_STATIC_ASSERT(sizeof(error_flags_t) == sizeof(uint32_t),
- "ERROR_FLAGS_SIZE_ERROR");
+/* Maximum number of segments per packet */
+#define PKT_MAX_SEGS 255
-/**
- * Packet output flags
- */
-typedef union {
- /* All output flags */
- uint32_t all;
-
- struct {
- /** adjustment for traffic mgr */
- uint32_t shaper_len_adj:8;
-
- /* Bitfield flags for each output option */
- uint32_t l3_chksum_set:1; /**< L3 chksum bit is valid */
- uint32_t l3_chksum:1; /**< L3 chksum override */
- uint32_t l4_chksum_set:1; /**< L3 chksum bit is valid */
- uint32_t l4_chksum:1; /**< L4 chksum override */
- };
-} output_flags_t;
-
-ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t),
- "OUTPUT_FLAGS_SIZE_ERROR");
-
-/**
- * Protocol stack layers
- */
-typedef enum {
- LAYER_NONE = 0,
- LAYER_L1,
- LAYER_L2,
- LAYER_L3,
- LAYER_L4,
- LAYER_ALL
-} layer_t;
+ODP_STATIC_ASSERT(PKT_MAX_SEGS < UINT16_MAX, "PACKET_MAX_SEGS_ERROR");
/**
* Packet parser metadata
*/
typedef struct {
+ /* Packet input flags */
_odp_packet_input_flags_t input_flags;
- error_flags_t error_flags;
- output_flags_t output_flags;
- uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */
- uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */
- uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+ /* Other flags */
+ _odp_packet_flags_t flags;
- uint32_t l3_len; /**< Layer 3 length */
- uint32_t l4_len; /**< Layer 4 length */
+ /* offset to L2 hdr, e.g. Eth */
+ uint16_t l2_offset;
- uint16_t ethtype; /**< EtherType */
- uint8_t ip_proto; /**< IP protocol */
- uint8_t parsed_layers; /**< Highest parsed protocol stack layer */
+ /* offset to L3 hdr, e.g. IPv4, IPv6 */
+ uint16_t l3_offset;
+ /* offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */
+ uint16_t l4_offset;
} packet_parser_t;
/**
@@ -119,46 +78,107 @@ typedef struct {
* packet_init(). Because of this any new fields added must be reviewed for
* initialization requirements.
*/
-typedef struct {
- /* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
- /*
- * Following members are initialized by packet_init()
- */
+ /* Segment data start */
+ uint8_t *seg_data;
packet_parser_t p;
+ /* --- 64-byte cache line boundary --- */
+
odp_pktio_t input;
+ /* Next header which continues the segment list */
+ struct odp_packet_hdr_t *seg_next;
+
+ /* Total packet length */
uint32_t frame_len;
- uint32_t headroom;
- uint32_t tailroom;
- /*
- * Members below are not initialized by packet_init()
- */
+ /* Segment data length */
+ uint32_t seg_len;
+
+ /* Total segment count */
+ uint16_t seg_count;
+
+ uint16_t headroom;
+
+ uint16_t tailroom;
+
+ /* Classifier handle index */
+ uint16_t cos;
+
+ /* Used as classifier destination queue, in IPsec inline input processing and as Tx
+ * completion event queue. */
+ odp_queue_t dst_queue;
+
+ /* Reference count */
+ odp_atomic_u32_t ref_cnt;
/* Flow hash value */
uint32_t flow_hash;
+ /* User area pointer */
+ void *uarea_addr;
+
+ /* User context pointer */
+ const void *user_ptr;
+
+ /* --- 64-byte cache line boundary --- */
+
/* Timestamp value */
odp_time_t timestamp;
- /* Classifier destination queue */
- odp_queue_t dst_queue;
+ /* Classifier mark */
+ uint16_t cls_mark;
+
+ /* Offset to payload start */
+ uint16_t payload_offset;
+
+ /* Max payload size in a LSO segment */
+ uint16_t lso_max_payload;
+
+ /* Packet aging drop timeout before enqueue. Once enqueued holds the maximum age (time of
+ * request + requested drop timeout). */
+ uint64_t tx_aging_ns;
+
+ /* Tx completion poll completion identifier */
+ uint32_t tx_compl_id;
- /* Result for crypto */
- odp_crypto_generic_op_result_t op_result;
+ /* LSO profile index */
+ uint8_t lso_profile_idx;
+
+ /* Pktio where packet is used as a memory source */
+ uint8_t ms_pktio_idx;
+
+ union {
+ /* Result for crypto packet op */
+ odp_crypto_packet_result_t crypto_op_result;
+
+ /* Context for IPsec */
+ odp_ipsec_packet_result_t ipsec_ctx;
+
+ /* Result for comp packet op */
+ odp_comp_packet_result_t comp_op_result;
+ };
/* Packet data storage */
- uint8_t data[0];
+ uint8_t data[];
+
} odp_packet_hdr_t;
+/* Packet header size is critical for performance. Ensure that it does not accidentally
+ * grow over 256 bytes. */
+ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 256, "PACKET_HDR_SIZE_ERROR");
+
+ODP_STATIC_ASSERT(CONFIG_PKTIO_ENTRIES < UINT8_MAX, "MS_PKTIO_IDX_SIZE_ERROR");
+
/**
* Return the packet header
*/
-static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt)
+static inline odp_packet_hdr_t *packet_hdr(odp_packet_t pkt)
{
return (odp_packet_hdr_t *)(uintptr_t)pkt;
}
@@ -168,81 +188,258 @@ static inline odp_packet_t packet_handle(odp_packet_hdr_t *pkt_hdr)
return (odp_packet_t)pkt_hdr;
}
-static inline void copy_packet_parser_metadata(odp_packet_hdr_t *src_hdr,
- odp_packet_hdr_t *dst_hdr)
+static inline _odp_event_hdr_t *packet_to_event_hdr(odp_packet_t pkt)
{
- dst_hdr->p = src_hdr->p;
+ return (_odp_event_hdr_t *)(uintptr_t)&packet_hdr(pkt)->event_hdr;
}
-static inline void copy_packet_cls_metadata(odp_packet_hdr_t *src_hdr,
- odp_packet_hdr_t *dst_hdr)
+static inline odp_packet_t packet_from_event_hdr(_odp_event_hdr_t *event_hdr)
{
- dst_hdr->p = src_hdr->p;
- dst_hdr->dst_queue = src_hdr->dst_queue;
- dst_hdr->flow_hash = src_hdr->flow_hash;
- dst_hdr->timestamp = src_hdr->timestamp;
- dst_hdr->op_result = src_hdr->op_result;
+ return (odp_packet_t)(uintptr_t)event_hdr;
}
-static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+static inline uint32_t packet_first_seg_len(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->seg_len;
+}
+
+static inline odp_packet_hdr_t *packet_last_seg(odp_packet_hdr_t *hdr)
{
- int last = pkt_hdr->buf_hdr.segcount - 1;
+ while (hdr->seg_next != NULL)
+ hdr = hdr->seg_next;
- pkt_hdr->tailroom += len;
- pkt_hdr->frame_len -= len;
- pkt_hdr->buf_hdr.seg[last].len -= len;
+ return hdr;
}
-static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
+static inline void packet_subtype_set(odp_packet_t pkt, int subtype)
{
- return pkt_hdr->frame_len;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->event_hdr.subtype = subtype;
}
-static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+/**
+ * Initialize packet
+ */
+static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
+ uint32_t seg_len;
+ int num = pkt_hdr->seg_count;
+
+ if (odp_likely(num == 1)) {
+ seg_len = len;
+ pkt_hdr->seg_len = len;
+ } else {
+ odp_packet_hdr_t *last;
+
+ seg_len = len - ((num - 1) * pool->seg_len);
+
+ /* Last segment data length */
+ last = packet_last_seg(pkt_hdr);
+ last->seg_len = seg_len;
+ }
+
+ /* Clear all flags. Resets also return value of cls_mark, user_ptr, etc. */
+ pkt_hdr->p.input_flags.all = 0;
+ pkt_hdr->p.flags.all_flags = 0;
+
+ pkt_hdr->p.l2_offset = 0;
+ pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+
+ /*
+ * Packet headroom is set from the pool's headroom
+ * Packet tailroom is rounded up to fill the last
+ * segment occupied by the allocated length.
+ */
pkt_hdr->frame_len = len;
+ pkt_hdr->headroom = pool->headroom;
+ pkt_hdr->tailroom = pool->seg_len - seg_len + pool->tailroom;
+
+ if (odp_unlikely(pkt_hdr->event_hdr.subtype != ODP_EVENT_PACKET_BASIC))
+ pkt_hdr->event_hdr.subtype = ODP_EVENT_PACKET_BASIC;
+
+ pkt_hdr->input = ODP_PKTIO_INVALID;
+}
+
+/**
+ * Check if copying packet metadata between pools is possible
+ *
+ * @retval 0 when possible without user area copy
+ * @retval >0 when possible with user area copy
+ * @retval <0 when not possible
+ */
+static inline int _odp_packet_copy_md_possible(odp_pool_t dst_pool,
+ odp_pool_t src_pool)
+{
+ const pool_t *src_hdr;
+ const pool_t *dst_hdr;
+
+ if (src_pool == dst_pool)
+ return 0;
+
+ src_hdr = _odp_pool_entry(src_pool);
+ dst_hdr = _odp_pool_entry(dst_pool);
+
+ if (dst_hdr->param_uarea_size < src_hdr->param_uarea_size)
+ return -1;
+
+ return 1;
+}
+
+/**
+ * Copy packet metadata
+ *
+ * This function is assumed to never fail. Use _odp_packet_copy_md_possible() to
+ * check beforehand that copying packet metadata between source and destination
+ * packet pools is possible.
+ *
+ * @param uarea_copy Copy user area data. If false, user area pointers
+ * are swapped between the packet headers (allowed
+ * only when packets are from the same pool).
+ */
+static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
+ odp_packet_hdr_t *src_hdr,
+ odp_bool_t uarea_copy)
+{
+ int8_t subtype = src_hdr->event_hdr.subtype;
+
+ /* Lengths and segmentation data are not copied:
+ * .frame_len
+ * .headroom
+ * .tailroom
+ * .seg_data
+ * .seg_next
+ * .seg_len
+ * .seg_count
+ */
+ dst_hdr->input = src_hdr->input;
+ dst_hdr->event_hdr.subtype = subtype;
+ dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
+ dst_hdr->cls_mark = src_hdr->cls_mark;
+ dst_hdr->user_ptr = src_hdr->user_ptr;
+
+ if (src_hdr->p.input_flags.flow_hash)
+ dst_hdr->flow_hash = src_hdr->flow_hash;
+
+ if (src_hdr->p.input_flags.timestamp)
+ dst_hdr->timestamp = src_hdr->timestamp;
+
+ if (src_hdr->p.flags.lso) {
+ dst_hdr->lso_max_payload = src_hdr->lso_max_payload;
+ dst_hdr->lso_profile_idx = src_hdr->lso_profile_idx;
+ }
+
+ if (src_hdr->p.flags.payload_off)
+ dst_hdr->payload_offset = src_hdr->payload_offset;
+
+ dst_hdr->p = src_hdr->p;
+
+ if (src_hdr->uarea_addr) {
+ if (uarea_copy) {
+ const pool_t *src_pool = _odp_pool_entry(src_hdr->event_hdr.pool);
+ const pool_t *dst_pool = _odp_pool_entry(dst_hdr->event_hdr.pool);
+ const uint32_t src_uarea_size = src_pool->param_uarea_size;
+ const uint32_t dst_uarea_size = dst_pool->param_uarea_size;
+
+ _ODP_ASSERT(dst_hdr->uarea_addr != NULL);
+ _ODP_ASSERT(dst_uarea_size >= src_uarea_size);
+
+ memcpy(dst_hdr->uarea_addr, src_hdr->uarea_addr, src_uarea_size);
+ } else {
+ void *src_uarea = src_hdr->uarea_addr;
+
+ /* If user area exists, packets should always be from the same pool, so
+ * user area pointers can simply be swapped. */
+ _ODP_ASSERT(dst_hdr->event_hdr.pool == src_hdr->event_hdr.pool);
+
+ src_hdr->uarea_addr = dst_hdr->uarea_addr;
+ dst_hdr->uarea_addr = src_uarea;
+ }
+ }
+
+ if (odp_unlikely(subtype != ODP_EVENT_PACKET_BASIC)) {
+ if (subtype == ODP_EVENT_PACKET_IPSEC)
+ dst_hdr->ipsec_ctx = src_hdr->ipsec_ctx;
+ else if (subtype == ODP_EVENT_PACKET_CRYPTO)
+ dst_hdr->crypto_op_result = src_hdr->crypto_op_result;
+ else if (subtype == ODP_EVENT_PACKET_COMP)
+ dst_hdr->comp_op_result = src_hdr->comp_op_result;
+ }
}
-static inline int packet_parse_l2_not_done(packet_parser_t *prs)
+static inline void _odp_packet_copy_cls_md(odp_packet_hdr_t *dst_hdr,
+ odp_packet_hdr_t *src_hdr)
{
- return !prs->input_flags.parsed_l2;
+ dst_hdr->p = src_hdr->p;
+ dst_hdr->dst_queue = src_hdr->dst_queue;
+ dst_hdr->cos = src_hdr->cos;
+ dst_hdr->cls_mark = src_hdr->cls_mark;
}
-static inline int packet_parse_not_complete(odp_packet_hdr_t *pkt_hdr)
+static inline void *packet_data(odp_packet_hdr_t *pkt_hdr)
{
- return pkt_hdr->p.parsed_layers != LAYER_ALL;
+ return pkt_hdr->seg_data;
}
-/* Forward declarations */
-int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt);
+static inline void push_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ pkt_hdr->headroom -= len;
+ pkt_hdr->frame_len += len;
+ pkt_hdr->seg_data -= len;
+ pkt_hdr->seg_len += len;
+}
-/* Packet alloc of pktios */
-int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
- odp_packet_t pkt[], int max_num);
+static inline void pull_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ pkt_hdr->headroom += len;
+ pkt_hdr->frame_len -= len;
+ pkt_hdr->seg_data += len;
+ pkt_hdr->seg_len -= len;
+}
-/* Fill in parser metadata for L2 */
-void packet_parse_l2(packet_parser_t *prs, uint32_t frame_len);
+static inline void pull_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ odp_packet_hdr_t *last = packet_last_seg(pkt_hdr);
-/* Perform packet parse up to a given protocol layer */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer);
+ pkt_hdr->tailroom += len;
+ pkt_hdr->frame_len -= len;
+ last->seg_len -= len;
+}
-/* Reset parser metadata for a new parse */
-void packet_parse_reset(odp_packet_hdr_t *pkt_hdr);
+static inline uint32_t packet_len(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->frame_len;
+}
-/* Convert a packet handle to a buffer handle */
-odp_buffer_t _odp_packet_to_buffer(odp_packet_t pkt);
+static inline void packet_set_len(odp_packet_hdr_t *pkt_hdr, uint32_t len)
+{
+ pkt_hdr->frame_len = len;
+}
-/* Convert a buffer handle to a packet handle */
-odp_packet_t _odp_packet_from_buffer(odp_buffer_t buf);
+/* Packet alloc of pktios */
+int _odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
+ odp_packet_t pkt[], int max_num);
-static inline int packet_hdr_has_l2(odp_packet_hdr_t *pkt_hdr)
+/* Reset parser metadata for a new parse */
+static inline void packet_parse_reset(odp_packet_hdr_t *pkt_hdr, int all)
{
- return pkt_hdr->p.input_flags.l2;
+ pkt_hdr->p.input_flags.all = 0;
+ pkt_hdr->p.l2_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+
+ if (all)
+ pkt_hdr->p.flags.all_flags = 0;
+ else /* Keep user ptr and pktout flags */
+ pkt_hdr->p.flags.all.error = 0;
}
-static inline void packet_hdr_has_l2_set(odp_packet_hdr_t *pkt_hdr, int val)
+static inline int packet_hdr_has_l2(odp_packet_hdr_t *pkt_hdr)
{
- pkt_hdr->p.input_flags.l2 = val;
+ return pkt_hdr->p.input_flags.l2;
}
static inline int packet_hdr_has_eth(odp_packet_hdr_t *pkt_hdr)
@@ -255,6 +452,13 @@ static inline int packet_hdr_has_ipv6(odp_packet_hdr_t *pkt_hdr)
return pkt_hdr->p.input_flags.ipv6;
}
+static inline void packet_set_flow_hash(odp_packet_hdr_t *pkt_hdr,
+ uint32_t flow_hash)
+{
+ pkt_hdr->flow_hash = flow_hash;
+ pkt_hdr->p.input_flags.flow_hash = 1;
+}
+
static inline void packet_set_ts(odp_packet_hdr_t *pkt_hdr, odp_time_t *ts)
{
if (ts != NULL) {
@@ -263,10 +467,19 @@ static inline void packet_set_ts(odp_packet_hdr_t *pkt_hdr, odp_time_t *ts)
}
}
-int packet_parse_common(packet_parser_t *pkt_hdr, const uint8_t *ptr,
- uint32_t pkt_len, uint32_t seg_len, layer_t layer);
+int _odp_packet_set_data(odp_packet_t pkt, uint32_t offset,
+ uint8_t c, uint32_t len);
+
+int _odp_packet_cmp_data(odp_packet_t pkt, uint32_t offset,
+ const void *s, uint32_t len);
+
+int _odp_packet_ipv4_chksum_insert(odp_packet_t pkt);
+int _odp_packet_tcp_chksum_insert(odp_packet_t pkt);
+int _odp_packet_udp_chksum_insert(odp_packet_t pkt);
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt);
-int _odp_cls_parse(odp_packet_hdr_t *pkt_hdr, const uint8_t *parseptr);
+int _odp_packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
+ odp_pktin_config_opt_t opt, uint64_t l4_part_sum);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 89bb6f3a3..6c8a2305b 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -18,22 +18,36 @@
extern "C" {
#endif
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
#include <odp/api/spinlock.h>
#include <odp/api/ticketlock.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/packet_io_inlines.h>
+
+#include <odp/autoheader_internal.h>
#include <odp_classification_datamodel.h>
-#include <odp_align_internal.h>
+#include <odp_config_internal.h>
#include <odp_debug_internal.h>
-#include <odp_packet_io_ring_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_stats_common.h>
+#include <odp_queue_if.h>
-#include <odp_config_internal.h>
-#include <odp/api/hints.h>
+#include <inttypes.h>
+#include <linux/if_ether.h>
#include <net/if.h>
+#include <string.h>
+#include <sys/select.h>
+
+#define PKTIO_LSO_PROFILES 16
+/* Assume at least Ethernet header per each segment */
+#define PKTIO_LSO_MIN_PAYLOAD_OFFSET 14
+#define PKTIO_LSO_MAX_PAYLOAD_OFFSET 128
+/* Allow 64 kB packet to be split into about 1kB segments */
+#define PKTIO_LSO_MAX_SEGMENTS 64
-#define PKTIO_MAX_QUEUES 64
-#include <odp_packet_socket.h>
-#include <odp_packet_netmap.h>
-#include <odp_packet_tap.h>
-#include <odp_packet_dpdk.h>
+ODP_STATIC_ASSERT(PKTIO_LSO_PROFILES < UINT8_MAX, "PKTIO_LSO_PROFILES_ERROR");
#define PKTIO_NAME_LEN 256
@@ -49,83 +63,43 @@ extern "C" {
/* Forward declaration */
struct pktio_if_ops;
-typedef struct {
- odp_queue_t loopq; /**< loopback queue for "loop" device */
- odp_bool_t promisc; /**< promiscuous mode state */
-} pkt_loop_t;
-
-#ifdef HAVE_PCAP
-typedef struct {
- char *fname_rx; /**< name of pcap file for rx */
- char *fname_tx; /**< name of pcap file for tx */
- void *rx; /**< rx pcap handle */
- void *tx; /**< tx pcap handle */
- void *tx_dump; /**< tx pcap dumper handle */
- odp_pool_t pool; /**< rx pool */
- unsigned char *buf; /**< per-pktio temp buffer */
- int loops; /**< number of times to loop rx pcap */
- int loop_cnt; /**< number of loops completed */
- odp_bool_t promisc; /**< promiscuous mode state */
-} pkt_pcap_t;
+#if defined(_ODP_PKTIO_XDP) && ODP_CACHE_LINE_SIZE == 128
+#define PKTIO_PRIVATE_SIZE 33792
+#elif defined(_ODP_PKTIO_XDP)
+#define PKTIO_PRIVATE_SIZE 29696
+#else
+#define PKTIO_PRIVATE_SIZE 9216
#endif
-typedef struct {
- /* TX */
- struct {
- _ring_t *send; /**< ODP ring for IPC msg packets
- indexes transmitted to shared
- memory */
- _ring_t *free; /**< ODP ring for IPC msg packets
- indexes already processed by remote
- process */
- } tx;
- /* RX */
- struct {
- _ring_t *recv; /**< ODP ring for IPC msg packets
- indexes received from shared
- memory (from remote process) */
- _ring_t *free; /**< ODP ring for IPC msg packets
- indexes already processed by
- current process */
- } rx; /* slave */
- void *pool_base; /**< Remote pool base addr */
- void *pool_mdata_base; /**< Remote pool mdata base addr */
- uint64_t pkt_size; /**< Packet size in remote pool */
- odp_pool_t pool; /**< Pool of main process */
- enum {
- PKTIO_TYPE_IPC_MASTER = 0, /**< Master is the process which
- creates shm */
- PKTIO_TYPE_IPC_SLAVE /**< Slave is the process which
- connects to shm */
- } type; /**< define if it's master or slave process */
- odp_atomic_u32_t ready; /**< 1 - pktio is ready and can recv/send
- packet, 0 - not yet ready */
- void *pinfo;
- odp_shm_t pinfo_shm;
- odp_shm_t remote_pool_shm; /**< shm of remote pool get with
- _ipc_map_remote_pool() */
-} _ipc_pktio_t;
-
-struct pktio_entry {
+typedef struct ODP_ALIGNED_CACHE {
const struct pktio_if_ops *ops; /**< Implementation specific methods */
/* These two locks together lock the whole pktio device */
odp_ticketlock_t rxl; /**< RX ticketlock */
odp_ticketlock_t txl; /**< TX ticketlock */
- int cls_enabled; /**< is classifier enabled */
+ odp_proto_layer_t parse_layer;
+ uint16_t pktin_frame_offset;
+
+ struct {
+ union {
+ uint8_t all_flags;
+
+ struct {
+ /* Pktout checksum offload */
+ uint8_t chksum_insert : 1;
+ /* Classifier */
+ uint8_t cls : 1;
+ /* Tx timestamp */
+ uint8_t tx_ts : 1;
+ /* Tx completion events */
+ uint8_t tx_compl : 1;
+ /* Packet aging */
+ uint8_t tx_aging : 1;
+ };
+ };
+ } enabled;
+
odp_pktio_t handle; /**< pktio handle */
- union {
- pkt_loop_t pkt_loop; /**< Using loopback for IO */
- pkt_sock_t pkt_sock; /**< using socket API for IO */
- pkt_sock_mmap_t pkt_sock_mmap; /**< using socket mmap
- * API for IO */
- pkt_netmap_t pkt_nm; /**< using netmap API for IO */
- pkt_dpdk_t pkt_dpdk; /**< using DPDK for IO */
-#ifdef HAVE_PCAP
- pkt_pcap_t pkt_pcap; /**< Using pcap for IO */
-#endif
- pkt_tap_t pkt_tap; /**< using TAP for IO */
- _ipc_pktio_t ipc; /**< IPC pktio data */
- };
+ unsigned char pkt_priv[PKTIO_PRIVATE_SIZE] ODP_ALIGNED_CACHE;
enum {
/* Not allocated */
PKTIO_STATE_FREE = 0,
@@ -146,43 +120,76 @@ struct pktio_entry {
} state;
odp_pktio_config_t config; /**< Device configuration */
classifier_t cls; /**< classifier linked with this pktio*/
- odp_pktio_stats_t stats; /**< statistic counters for pktio */
- enum {
- STATS_SYSFS = 0,
- STATS_ETHTOOL,
- STATS_UNSUPPORTED
- } stats_type;
+ /* Driver level statistics counters */
+ odp_pktio_stats_t stats;
+ /* Statistics counters used also outside drivers */
+ struct {
+ odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t in_errors;
+ odp_atomic_u64_t out_discards;
+ } stats_extra;
+ /* Latest Tx timestamp */
+ odp_atomic_u64_t tx_ts;
+ pktio_stats_type_t stats_type;
char name[PKTIO_NAME_LEN]; /**< name of pktio provided to
- pktio_open() */
-
+ internal pktio_open() calls */
+ char full_name[PKTIO_NAME_LEN]; /**< original pktio name passed to
+ odp_pktio_open() and returned by
+ odp_pktio_info() */
odp_pool_t pool;
odp_pktio_param_t param;
+ odp_pktio_capability_t capa; /**< Packet IO capabilities */
+
+ /* Pool for Tx completion events */
+ odp_pool_t tx_compl_pool;
+ /* Status map SHM handle */
+ odp_shm_t tx_compl_status_shm;
+ /* Status map for Tx completion identifiers */
+ odp_atomic_u32_t *tx_compl_status;
/* Storage for queue handles
* Multi-queue support is pktio driver specific */
- unsigned num_in_queue;
- unsigned num_out_queue;
+ uint32_t num_in_queue;
+ uint32_t num_out_queue;
struct {
odp_queue_t queue;
odp_pktin_queue_t pktin;
- } in_queue[PKTIO_MAX_QUEUES];
+ odp_pktin_vector_config_t vector;
+ } in_queue[ODP_PKTIN_MAX_QUEUES];
struct {
odp_queue_t queue;
odp_pktout_queue_t pktout;
- } out_queue[PKTIO_MAX_QUEUES];
-};
+ } out_queue[ODP_PKTOUT_MAX_QUEUES];
-typedef union {
- struct pktio_entry s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
} pktio_entry_t;
typedef struct {
+ odp_lso_profile_param_t param;
+ int used;
+ uint8_t index;
+
+} lso_profile_t;
+
+/* Global variables */
+typedef struct {
odp_spinlock_t lock;
- pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
-} pktio_table_t;
+ odp_shm_t shm;
+
+ struct {
+ /* Frame start offset from base pointer at packet input */
+ uint16_t pktin_frame_offset;
+ /* Pool size for potential completion events */
+ uint32_t tx_compl_pool_size;
+ } config;
+
+ pktio_entry_t entries[CONFIG_PKTIO_ENTRIES];
+
+ lso_profile_t lso_profile[PKTIO_LSO_PROFILES];
+ int num_lso_profiles;
+
+} pktio_global_t;
typedef struct pktio_if_ops {
const char *name;
@@ -197,17 +204,36 @@ typedef struct pktio_if_ops {
int (*stop)(pktio_entry_t *pktio_entry);
int (*stats)(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats);
int (*stats_reset)(pktio_entry_t *pktio_entry);
- uint64_t (*pktin_ts_res)(pktio_entry_t *pktio_entry);
- odp_time_t (*pktin_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
+ int (*pktin_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats);
+ int (*pktout_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats);
+ int (*extra_stat_info)(pktio_entry_t *pktio_entry, odp_pktio_extra_stat_info_t info[],
+ int num);
+ int (*extra_stats)(pktio_entry_t *pktio_entry, uint64_t stats[], int num);
+ int (*extra_stat_counter)(pktio_entry_t *pktio_entry, uint32_t id, uint64_t *stat);
+ uint64_t (*pktio_ts_res)(pktio_entry_t *pktio_entry);
+ odp_time_t (*pktio_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
+ odp_time_t (*pktio_time)(pktio_entry_t *pktio_entry, odp_time_t *global_ts);
int (*recv)(pktio_entry_t *entry, int index, odp_packet_t packets[],
int num);
+ int (*recv_tmo)(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num, uint64_t wait_usecs);
+ int (*recv_mq_tmo)(pktio_entry_t *entry[], int index[], uint32_t num_q,
+ odp_packet_t packets[], int num, uint32_t *from,
+ uint64_t wait_usecs);
+ int (*fd_set)(pktio_entry_t *entry, int index, fd_set *readfds);
int (*send)(pktio_entry_t *entry, int index,
const odp_packet_t packets[], int num);
- uint32_t (*mtu_get)(pktio_entry_t *pktio_entry);
+ uint32_t (*maxlen_get)(pktio_entry_t *pktio_entry);
+ int (*maxlen_set)(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output);
int (*promisc_mode_set)(pktio_entry_t *pktio_entry, int enable);
int (*promisc_mode_get)(pktio_entry_t *pktio_entry);
int (*mac_get)(pktio_entry_t *pktio_entry, void *mac_addr);
+ int (*mac_set)(pktio_entry_t *pktio_entry, const void *mac_addr);
int (*link_status)(pktio_entry_t *pktio_entry);
+ int (*link_info)(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info);
int (*capability)(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa);
int (*config)(pktio_entry_t *pktio_entry,
@@ -218,68 +244,127 @@ typedef struct pktio_if_ops {
const odp_pktout_queue_param_t *p);
} pktio_if_ops_t;
-extern void *pktio_entry_ptr[];
+typedef struct {
+ const void *user_ptr;
+} _odp_pktio_tx_compl_t;
-static inline int pktio_to_id(odp_pktio_t pktio)
-{
- return _odp_typeval(pktio) - 1;
-}
+extern void *_odp_pktio_entry_ptr[];
static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
{
+ int idx;
+
if (odp_unlikely(pktio == ODP_PKTIO_INVALID))
return NULL;
- if (odp_unlikely(_odp_typeval(pktio) > ODP_CONFIG_PKTIO_ENTRIES)) {
- ODP_DBG("pktio limit %d/%d exceed\n",
- _odp_typeval(pktio), ODP_CONFIG_PKTIO_ENTRIES);
+ if (odp_unlikely(_odp_typeval(pktio) > CONFIG_PKTIO_ENTRIES)) {
+ _ODP_DBG("pktio limit %" PRIuPTR "/%d exceed\n",
+ _odp_typeval(pktio), CONFIG_PKTIO_ENTRIES);
return NULL;
}
- return pktio_entry_ptr[pktio_to_id(pktio)];
+ idx = odp_pktio_index(pktio);
+
+ return _odp_pktio_entry_ptr[idx];
}
static inline int pktio_cls_enabled(pktio_entry_t *entry)
{
- return entry->s.cls_enabled;
+ return entry->enabled.cls;
}
-static inline void pktio_cls_enabled_set(pktio_entry_t *entry, int ena)
+static inline int _odp_pktio_tx_ts_enabled(pktio_entry_t *entry)
{
- entry->s.cls_enabled = ena;
+ return entry->enabled.tx_ts;
}
-/*
- * Dummy single queue implementations of multi-queue API
- */
-int single_capability(odp_pktio_capability_t *capa);
-int single_input_queues_config(pktio_entry_t *entry,
- const odp_pktin_queue_param_t *param);
-int single_output_queues_config(pktio_entry_t *entry,
- const odp_pktout_queue_param_t *param);
-int single_recv_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
- int num);
-int single_send_queue(pktio_entry_t *entry, int index,
- const odp_packet_t packets[], int num);
-
-extern const pktio_if_ops_t netmap_pktio_ops;
-extern const pktio_if_ops_t dpdk_pktio_ops;
-extern const pktio_if_ops_t sock_mmsg_pktio_ops;
-extern const pktio_if_ops_t sock_mmap_pktio_ops;
-extern const pktio_if_ops_t loopback_pktio_ops;
-#ifdef HAVE_PCAP
-extern const pktio_if_ops_t pcap_pktio_ops;
+static inline int _odp_pktio_tx_compl_enabled(const pktio_entry_t *entry)
+{
+ return entry->enabled.tx_compl;
+}
+
+static inline int _odp_pktio_tx_aging_enabled(pktio_entry_t *entry)
+{
+ return entry->enabled.tx_aging;
+}
+
+static inline void _odp_pktio_tx_ts_set(pktio_entry_t *entry)
+{
+ odp_time_t ts_val = odp_time_global();
+
+ odp_atomic_store_u64(&entry->tx_ts, ts_val.u64);
+}
+
+extern const pktio_if_ops_t _odp_dpdk_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_xdp_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_mmsg_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_mmap_pktio_ops;
+extern const pktio_if_ops_t _odp_loopback_pktio_ops;
+#ifdef _ODP_PKTIO_PCAP
+extern const pktio_if_ops_t _odp_pcap_pktio_ops;
#endif
-extern const pktio_if_ops_t tap_pktio_ops;
-extern const pktio_if_ops_t ipc_pktio_ops;
-extern const pktio_if_ops_t * const pktio_if_ops[];
-
-int sysfs_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats);
-int sock_stats_fd(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats,
- int fd);
-int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
+extern const pktio_if_ops_t _odp_tap_pktio_ops;
+extern const pktio_if_ops_t _odp_null_pktio_ops;
+extern const pktio_if_ops_t _odp_ipc_pktio_ops;
+extern const pktio_if_ops_t * const _odp_pktio_if_ops[];
+
+/**
+ * Try interrupt-driven receive
+ *
+ * @param queues Pktin queues
+ * @param num_q Number of queues
+ * @param packets Output packet slots
+ * @param num Number of output packet slots
+ * @param from Queue from which the call received packets
+ * @param usecs Microseconds to wait
+ * @param trial_successful Will receive information whether trial was successful
+ *
+ * @return >=0 on success, number of packets received
+ * @return <0 on failure
+ */
+int _odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ uint32_t num_q, uint32_t *from,
+ odp_packet_t packets[], int num,
+ uint64_t usecs,
+ int *trial_successful);
+
+/* Setup PKTOUT with single queue for TM */
+int _odp_pktio_pktout_tm_config(odp_pktio_t pktio_hdl,
+ odp_pktout_queue_t *queue, bool reconf);
+
+/* LSO functions shared with TM */
+odp_lso_profile_t _odp_lso_prof_from_idx(uint8_t idx);
+
+int _odp_lso_num_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t *len_out, uint32_t *left_over_out);
+
+int _odp_lso_create_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t payload_len, uint32_t left_over_len,
+ odp_packet_t pkt_out[], int num_pkt);
+
+void _odp_pktio_process_tx_compl(const pktio_entry_t *entry, const odp_packet_t packets[],
+ int num);
+
+static inline int _odp_pktio_packet_to_pool(odp_packet_t *pkt,
+ odp_packet_hdr_t **pkt_hdr,
+ odp_pool_t new_pool)
+{
+ odp_packet_t new_pkt;
+
+ if (odp_likely(new_pool == odp_packet_pool(*pkt)))
+ return 0;
+
+ new_pkt = odp_packet_copy(*pkt, new_pool);
+
+ if (odp_unlikely(new_pkt == ODP_PACKET_INVALID))
+ return 1;
+
+ odp_packet_free(*pkt);
+ *pkt = new_pkt;
+ *pkt_hdr = packet_hdr(new_pkt);
+
+ return 0;
+}
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_io_ipc_internal.h b/platform/linux-generic/include/odp_packet_io_ipc_internal.h
deleted file mode 100644
index 7cd294886..000000000
--- a/platform/linux-generic/include/odp_packet_io_ipc_internal.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/packet_io.h>
-#include <odp_packet_io_internal.h>
-#include <odp/api/packet.h>
-#include <odp_packet_internal.h>
-#include <odp_internal.h>
-#include <odp/api/shared_memory.h>
-
-#include <string.h>
-#include <unistd.h>
-#include <stdlib.h>
-
-/* IPC packet I/O over shared memory ring */
-#include <odp_packet_io_ring_internal.h>
-
-/* number of odp buffers in odp ring queue */
-#define PKTIO_IPC_ENTRIES 4096
-
-/* that struct is exported to shared memory, so that processes can find
- * each other.
- */
-struct pktio_info {
- struct {
- /* number of buffer*/
- int num;
- /* size of packet/segment in remote pool */
- uint32_t block_size;
- /* offset from shared memory block start
- * to pool *base_addr in remote process.
- * (odp-linux pool specific) */
- size_t base_addr_offset;
- char pool_name[ODP_POOL_NAME_LEN];
- /* 1 if master finished creation of all shared objects */
- int init_done;
- } master;
- struct {
- /* offset from shared memory block start
- * to pool *base_addr in remote process.
- * (odp-linux pool specific) */
- size_t base_addr_offset;
- void *base_addr;
- uint32_t block_size;
- char pool_name[ODP_POOL_NAME_LEN];
- /* pid of the slave process written to shm and
- * used by master to look up memory created by
- * slave
- */
- int pid;
- int init_done;
- } slave;
-} ODP_PACKED;
diff --git a/platform/linux-generic/include/odp_packet_io_queue.h b/platform/linux-generic/include/odp_packet_io_queue.h
deleted file mode 100644
index d1d4b2251..000000000
--- a/platform/linux-generic/include/odp_packet_io_queue.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP packet IO - implementation internal
- */
-
-#ifndef ODP_PACKET_IO_QUEUE_H_
-#define ODP_PACKET_IO_QUEUE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp_queue_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp_config_internal.h>
-
-/** Max nbr of pkts to receive in one burst (keep same as QUEUE_MULTI_MAX) */
-#define ODP_PKTIN_QUEUE_MAX_BURST CONFIG_BURST_SIZE
-/* pktin_deq_multi() depends on the condition: */
-ODP_STATIC_ASSERT(ODP_PKTIN_QUEUE_MAX_BURST >= QUEUE_MULTI_MAX,
- "ODP_PKTIN_DEQ_MULTI_MAX_ERROR");
-
-int pktin_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
-odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *queue);
-
-int pktin_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-int pktin_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-
-
-int pktout_enqueue(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
-odp_buffer_hdr_t *pktout_dequeue(queue_entry_t *queue);
-
-int pktout_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num);
-int pktout_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_packet_io_ring_internal.h b/platform/linux-generic/include/odp_packet_io_ring_internal.h
deleted file mode 100644
index d044f9319..000000000
--- a/platform/linux-generic/include/odp_packet_io_ring_internal.h
+++ /dev/null
@@ -1,589 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Derived from FreeBSD's bufring.c
- *
- **************************************************************************
- *
- * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. The name of Kip Macy nor the names of other
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- ***************************************************************************/
-
-/**
- * ODP Ring
- *
- * The Ring Manager is a fixed-size queue, implemented as a table of
- * pointers. Head and tail pointers are modified atomically, allowing
- * concurrent access to it. It has the following features:
- *
- * - FIFO (First In First Out)
- * - Maximum size is fixed; the pointers are stored in a table.
- * - Lockless implementation.
- * - Multi- or single-consumer dequeue.
- * - Multi- or single-producer enqueue.
- * - Bulk dequeue.
- * - Bulk enqueue.
- *
- * Note: the ring implementation is not preemptable. A lcore must not
- * be interrupted by another task that uses the same ring.
- *
- */
-
-#ifndef _RING_H_
-#define _RING_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/std_types.h>
-#include <odp/api/hints.h>
-#include <odp/api/atomic.h>
-#include <errno.h>
-#include <sys/queue.h>
-#include <odp_debug_internal.h>
-
-enum _ring_queue_behavior {
- _RING_QUEUE_FIXED = 0, /**< Enq/Deq a fixed number
- of items from a ring */
- _RING_QUEUE_VARIABLE /**< Enq/Deq as many items
- a possible from ring */
-};
-
-#define _RING_NAMESIZE 32 /**< The maximum length of a ring name. */
-
-/**
- * An ODP ring structure.
- *
- * The producer and the consumer have a head and a tail index. The particularity
- * of these index is that they are not between 0 and size(ring). These indexes
- * are between 0 and 2^32, and we mask their value when we access the ring[]
- * field. Thanks to this assumption, we can do subtractions between 2 index
- * values in a modulo-32bit base: that's why the overflow of the indexes is not
- * a problem.
- */
-typedef struct _ring {
- /** @private Next in list. */
- TAILQ_ENTRY(_ring) next;
-
- /** @private Name of the ring. */
- char name[_RING_NAMESIZE];
- /** @private Flags supplied at creation. */
- int flags;
-
- /** @private Producer */
- struct _prod {
- uint32_t watermark; /* Maximum items */
- uint32_t sp_enqueue; /* True, if single producer. */
- uint32_t size; /* Size of ring. */
- uint32_t mask; /* Mask (size-1) of ring. */
- volatile uint32_t head; /* Producer head. */
- volatile uint32_t tail; /* Producer tail. */
- } prod ODP_ALIGNED_CACHE;
-
- /** @private Consumer */
- struct _cons {
- uint32_t sc_dequeue; /* True, if single consumer. */
- uint32_t size; /* Size of the ring. */
- uint32_t mask; /* Mask (size-1) of ring. */
- volatile uint32_t head; /* Consumer head. */
- volatile uint32_t tail; /* Consumer tail. */
- } cons ODP_ALIGNED_CACHE;
-
- /** @private Memory space of ring starts here. */
- void *ring[0] ODP_ALIGNED_CACHE;
-} _ring_t;
-
-/* The default enqueue is "single-producer".*/
-#define _RING_F_SP_ENQ (1 << 0)
-/* The default dequeue is "single-consumer".*/
-#define _RING_F_SC_DEQ (1 << 1)
-/* If set - ring is visible from different processes.
- * Default is thread visible.*/
-#define _RING_SHM_PROC (1 << 2)
- /* Do not link ring to linked list. */
-#define _RING_NO_LIST (1 << 3)
-/* Quota exceed for burst ops */
-#define _RING_QUOT_EXCEED (1 << 31)
-/* Ring size mask */
-#define _RING_SZ_MASK (unsigned)(0x0fffffff)
-
-/**
- * Create a new ring named *name* in memory.
- *
- * This function uses odp_shm_reserve() to allocate memory. Its size is
- * set to *count*, which must be a power of two. Water marking is
- * disabled by default. Note that the real usable ring size is count-1
- * instead of count.
- *
- * @param name
- * The name of the ring.
- * @param count
- * The size of the ring (must be a power of 2).
- * @param flags
- * An OR of the following:
- * - RING_F_SP_ENQ: If this flag is set, the default behavior when
- * using ``odph_ring_enqueue()`` or ``odph_ring_enqueue_bulk()``
- * is "single-producer". Otherwise, it is "multi-producers".
- * - RING_F_SC_DEQ: If this flag is set, the default behavior when
- * using ``odph_ring_dequeue()`` or ``odph_ring_dequeue_bulk()``
- * is "single-consumer". Otherwise, it is "multi-consumers".
- * @return
- * On success, the pointer to the new allocated ring. NULL on error with
- * odp_errno set appropriately. Possible errno values include:
- * - EINVAL - count provided is not a power of 2
- * - ENOSPC - the maximum number of memzones has already been allocated
- * - EEXIST - a memzone with the same name already exists
- * - ENOMEM - no appropriate memory area found in which to create memzone
- */
-_ring_t *_ring_create(const char *name, unsigned count,
- unsigned flags);
-
-/**
- * Destroy the ring created with *name*.
- *
- * @param name name of the ring to be destroyed.
- * @return 0 on success and negative value on error.
- */
-int _ring_destroy(const char *name);
-
-/**
- * Change the high water mark.
- *
- * If *count* is 0, water marking is disabled. Otherwise, it is set to the
- * *count* value. The *count* value must be greater than 0 and less
- * than the ring size.
- *
- * This function can be called at any time (not necessarily at
- * initialization).
- *
- * @param r Pointer to the ring structure.
- * @param count New water mark value.
- * @return 0: Success; water mark changed.
- * -EINVAL: Invalid water mark value.
- */
-int _ring_set_water_mark(_ring_t *r, unsigned count);
-
-/**
- * Dump the status of the ring to the console.
- *
- * @param r A pointer to the ring structure.
- */
-void _ring_dump(const _ring_t *r);
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * ODPH_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * ODPH_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
- * @return
- * Depend on the behavior value
- * if behavior = ODPH_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = ODPH_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
- */
-int ___ring_mp_do_enqueue(_ring_t *r, void * const *obj_table,
- unsigned n,
- enum _ring_queue_behavior behavior);
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @param behavior
- * ODPH_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
- * ODPH_RING_QUEUE_VARIABLE: Enqueue as many items a possible from ring
- * @return
- * Depend on the behavior value
- * if behavior = ODPH_RING_QUEUE_FIXED
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- * if behavior = ODPH_RING_QUEUE_VARIABLE
- * - n: Actual number of objects enqueued.
- */
-int ___ring_sp_do_enqueue(_ring_t *r, void * const *obj_table,
- unsigned n,
- enum _ring_queue_behavior behavior);
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe). When
- * the request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @param behavior
- * ODPH_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * ODPH_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
- * @return
- * Depend on the behavior value
- * if behavior = ODPH_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = ODPH_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
- */
-
-int ___ring_mc_do_dequeue(_ring_t *r, void **obj_table,
- unsigned n,
- enum _ring_queue_behavior behavior);
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).
- * When the request objects are more than the available objects, only dequeue
- * the actual number of objects
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @param behavior
- * ODPH_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
- * ODPH_RING_QUEUE_VARIABLE: Dequeue as many items a possible from ring
- * @return
- * Depend on the behavior value
- * if behavior = ODPH_RING_QUEUE_FIXED
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- * if behavior = ODPH_RING_QUEUE_VARIABLE
- * - n: Actual number of objects dequeued.
- */
-int ___ring_sc_do_dequeue(_ring_t *r, void **obj_table,
- unsigned n,
- enum _ring_queue_behavior behavior);
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @return
- * - 0: Success; objects enqueue.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue, no object is enqueued.
- */
-int _ring_mp_enqueue_bulk(_ring_t *r, void * const *obj_table,
- unsigned n);
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @return
- * - 0: Success; objects enqueued.
- * - -EDQUOT: Quota exceeded. The objects have been enqueued, but the
- * high water mark is exceeded.
- * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
- */
-int _ring_sp_enqueue_bulk(_ring_t *r, void * const *obj_table,
- unsigned n);
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- */
-int _ring_mc_dequeue_bulk(_ring_t *r, void **obj_table, unsigned n);
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table,
- * must be strictly positive.
- * @return
- * - 0: Success; objects dequeued.
- * - -ENOENT: Not enough entries in the ring to dequeue; no object is
- * dequeued.
- */
-int _ring_sc_dequeue_bulk(_ring_t *r, void **obj_table, unsigned n);
-
-/**
- * Test if a ring is full.
- *
- * @param r
- * A pointer to the ring structure.
- * @return
- * - 1: The ring is full.
- * - 0: The ring is not full.
- */
-int _ring_full(const _ring_t *r);
-
-/**
- * Test if a ring is empty.
- *
- * @param r
- * A pointer to the ring structure.
- * @return
- * - 1: The ring is empty.
- * - 0: The ring is not empty.
- */
-int _ring_empty(const _ring_t *r);
-
-/**
- * Return the number of entries in a ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @return
- * The number of entries in the ring.
- */
-unsigned _ring_count(const _ring_t *r);
-
-/**
- * Return the number of free entries in a ring.
- *
- * @param r
- * A pointer to the ring structure.
- * @return
- * The number of free entries in the ring.
- */
-unsigned _ring_free_count(const _ring_t *r);
-
-/**
- * search ring by name
- * @param name ring name to search
- * @return pointer to ring otherwise NULL
- */
-_ring_t *_ring_lookup(const char *name);
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- *
- * This function uses a "compare and set" instruction to move the
- * producer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @return
- * - n: Actual number of objects enqueued.
- */
-int _ring_mp_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n);
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @return
- * - n: Actual number of objects enqueued.
- */
-int _ring_sp_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n);
-/**
- * Enqueue several objects on a ring.
- *
- * This function calls the multi-producer or the single-producer
- * version depending on the default behavior that was specified at
- * ring creation time (see flags).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects).
- * @param n
- * The number of objects to add in the ring from the obj_table.
- * @return
- * - n: Actual number of objects enqueued.
- */
-int _ring_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n);
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe). When the request
- * objects are more than the available objects, only dequeue the actual number
- * of objects
- *
- * This function uses a "compare and set" instruction to move the
- * consumer index atomically.
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @return
- * - n: Actual number of objects dequeued, 0 if ring is empty
- */
-int _ring_mc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n);
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).When the
- * request objects are more than the available objects, only dequeue the
- * actual number of objects
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @return
- * - n: Actual number of objects dequeued, 0 if ring is empty
- */
-int _ring_sc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n);
-
-/**
- * Dequeue multiple objects from a ring up to a maximum number.
- *
- * This function calls the multi-consumers or the single-consumer
- * version, depending on the default behaviour that was specified at
- * ring creation time (see flags).
- *
- * @param r
- * A pointer to the ring structure.
- * @param obj_table
- * A pointer to a table of void * pointers (objects) that will be filled.
- * @param n
- * The number of objects to dequeue from the ring to the obj_table.
- * @return
- * - Number of objects dequeued, or a negative error code on error
- */
-int _ring_dequeue_burst(_ring_t *r, void **obj_table, unsigned n);
-
-/**
- * dump the status of all rings on the console
- */
-void _ring_list_dump(void);
-
-/**
- * initialise ring tailq
- */
-void _ring_tailq_init(void);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_packet_io_stats.h b/platform/linux-generic/include/odp_packet_io_stats.h
new file mode 100644
index 000000000..c1b37fb29
--- /dev/null
+++ b/platform/linux-generic/include/odp_packet_io_stats.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PACKET_IO_STATS_H_
+#define ODP_PACKET_IO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/std_types.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/packet_io_stats.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_io_stats_common.h>
+
+int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd);
+int _odp_sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
+
+void _odp_sock_stats_capa(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa);
+
+int _odp_sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num,
+ int fd);
+int _odp_sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[], int num,
+ int fd);
+int _odp_sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat, int fd);
+
+pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_PACKET_IO_STATS_H_ */
diff --git a/platform/linux-generic/include/odp_packet_io_stats_common.h b/platform/linux-generic/include/odp_packet_io_stats_common.h
new file mode 100644
index 000000000..19bd7c448
--- /dev/null
+++ b/platform/linux-generic/include/odp_packet_io_stats_common.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PACKET_IO_STATS_COMMON_H_
+#define ODP_PACKET_IO_STATS_COMMON_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef enum {
+ STATS_SYSFS = 0,
+ STATS_ETHTOOL,
+ STATS_UNSUPPORTED
+} pktio_stats_type_t;
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_PACKET_IO_STATS_COMMON_H_ */
diff --git a/platform/linux-generic/include/odp_packet_netmap.h b/platform/linux-generic/include/odp_packet_netmap.h
deleted file mode 100644
index a6f68d569..000000000
--- a/platform/linux-generic/include/odp_packet_netmap.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PACKET_NETMAP_H
-#define ODP_PACKET_NETMAP_H
-
-#include <odp/api/align.h>
-#include <odp/api/debug.h>
-#include <odp/api/packet_io.h>
-#include <odp/api/pool.h>
-#include <odp/api/ticketlock.h>
-#include <odp_align_internal.h>
-
-#include <linux/if_ether.h>
-#include <net/if.h>
-
-#define NM_MAX_DESC 64
-
-/** Ring for mapping pktin/pktout queues to netmap descriptors */
-struct netmap_ring_t {
- unsigned first; /**< Index of first netmap descriptor */
- unsigned last; /**< Index of last netmap descriptor */
- unsigned num; /**< Number of netmap descriptors */
- /** Netmap metadata for the device */
- struct nm_desc *desc[NM_MAX_DESC];
- unsigned cur; /**< Index of current netmap descriptor */
- odp_ticketlock_t lock; /**< Queue lock */
-};
-
-typedef union {
- struct netmap_ring_t s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct netmap_ring_t))];
-} netmap_ring_t ODP_ALIGNED_CACHE;
-
-/** Netmap ring slot */
-typedef struct {
- char *buf; /**< Slot buffer pointer */
- uint16_t len; /**< Slot length */
-} netmap_slot_t;
-
-/** Packet socket using netmap mmaped rings for both Rx and Tx */
-typedef struct {
- odp_pool_t pool; /**< pool to alloc packets from */
- size_t max_frame_len; /**< buf_size - sizeof(pkt_hdr) */
- uint32_t if_flags; /**< interface flags */
- uint32_t mtu; /**< maximum transmission unit */
- int sockfd; /**< control socket */
- unsigned char if_mac[ETH_ALEN]; /**< eth mac address */
- char nm_name[IF_NAMESIZE + 7]; /**< netmap:<ifname> */
- char if_name[IF_NAMESIZE]; /**< interface name used in ioctl */
- odp_bool_t is_virtual; /**< nm virtual port (VALE/pipe) */
- odp_pktio_capability_t capa; /**< interface capabilities */
- uint32_t num_rx_rings; /**< number of nm rx rings */
- uint32_t num_tx_rings; /**< number of nm tx rings */
- unsigned num_rx_desc_rings; /**< number of rx descriptor rings */
- unsigned num_tx_desc_rings; /**< number of tx descriptor rings */
- odp_bool_t lockless_rx; /**< no locking for rx */
- odp_bool_t lockless_tx; /**< no locking for tx */
- /** mapping of pktin queues to netmap rx descriptors */
- netmap_ring_t rx_desc_ring[PKTIO_MAX_QUEUES];
- /** mapping of pktout queues to netmap tx descriptors */
- netmap_ring_t tx_desc_ring[PKTIO_MAX_QUEUES];
-} pkt_netmap_t;
-
-#endif
diff --git a/platform/linux-generic/include/odp_packet_socket.h b/platform/linux-generic/include/odp_packet_socket.h
deleted file mode 100644
index dbfc9f1f8..000000000
--- a/platform/linux-generic/include/odp_packet_socket.h
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * Copyright (c) 2013, Nokia Solutions and Networks
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PACKET_SOCKET_H
-#define ODP_PACKET_SOCKET_H
-
-#include <linux/if_packet.h>
-#include <linux/if_ether.h>
-#include <sys/socket.h>
-#include <string.h>
-#include <stddef.h>
-
-#include <odp/api/align.h>
-#include <odp/api/buffer.h>
-#include <odp/api/debug.h>
-#include <odp/api/pool.h>
-#include <odp/api/packet.h>
-#include <odp/api/packet_io.h>
-
-#include <linux/version.h>
-
-/*
- * Packet socket config:
- */
-
-/** Max receive (Rx) burst size*/
-#define ODP_PACKET_SOCKET_MAX_BURST_RX 32
-/** Max transmit (Tx) burst size*/
-#define ODP_PACKET_SOCKET_MAX_BURST_TX 32
-
-/*
- * This makes sure that building for kernels older than 3.1 works
- * and a fanout requests fails (for invalid packet socket option)
- * in runtime if requested
- */
-#ifndef PACKET_FANOUT
-#define PACKET_FANOUT 18
-#define PACKET_FANOUT_HASH 0
-#endif /* PACKET_FANOUT */
-
-typedef struct {
- int sockfd; /**< socket descriptor */
- odp_pool_t pool; /**< pool to alloc packets from */
- uint32_t mtu; /**< maximum transmission unit */
- unsigned char if_mac[ETH_ALEN]; /**< IF eth mac addr */
- uint8_t *cache_ptr[ODP_PACKET_SOCKET_MAX_BURST_RX];
- odp_shm_t shm;
-} pkt_sock_t;
-
-/** packet mmap ring */
-struct ring {
- struct iovec *rd;
- unsigned frame_num;
- int rd_num;
-
- int sock;
- int type;
- int version;
- uint8_t *mm_space;
- size_t mm_len;
- size_t rd_len;
- int flen;
-
- struct tpacket_req req;
-};
-
-ODP_STATIC_ASSERT(offsetof(struct ring, mm_space) <= ODP_CACHE_LINE_SIZE,
- "ERR_STRUCT_RING");
-
-/** Packet socket using mmap rings for both Rx and Tx */
-typedef struct {
- /** Packet mmap ring for Rx */
- struct ring rx_ring ODP_ALIGNED_CACHE;
- /** Packet mmap ring for Tx */
- struct ring tx_ring ODP_ALIGNED_CACHE;
-
- int sockfd ODP_ALIGNED_CACHE;
- odp_pool_t pool;
- size_t frame_offset; /**< frame start offset from start of pkt buf */
- uint8_t *mmap_base;
- unsigned mmap_len;
- unsigned char if_mac[ETH_ALEN];
- struct sockaddr_ll ll;
- int fanout;
-} pkt_sock_mmap_t;
-
-static inline void
-ethaddr_copy(unsigned char mac_dst[], unsigned char mac_src[])
-{
- memcpy(mac_dst, mac_src, ETH_ALEN);
-}
-
-static inline int
-ethaddrs_equal(unsigned char mac_a[], unsigned char mac_b[])
-{
- return !memcmp(mac_a, mac_b, ETH_ALEN);
-}
-
-/**
- * Read the MAC address from a packet socket
- */
-int mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[]);
-
-/**
- * Read the MTU from a packet socket
- */
-uint32_t mtu_get_fd(int fd, const char *name);
-
-/**
- * Enable/Disable promisc mode for a packet socket
- */
-int promisc_mode_set_fd(int fd, const char *name, int enable);
-
-/**
- * Return promisc mode of a packet socket
- */
-int promisc_mode_get_fd(int fd, const char *name);
-
-/**
- * Return link status of a packet socket (up/down)
- */
-int link_status_fd(int fd, const char *name);
-
-/**
- * Get enabled RSS hash protocols of a packet socket
- *
- * @param fd Socket file descriptor
- * @param name Interface name
- * @param hash_proto[out] Hash protocols
- *
- * @returns Number enabled hash protocols
- */
-int rss_conf_get_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto);
-
-/**
- * Get supported RSS hash protocols of a packet socket
- *
- * Can be both read and modified.
- *
- * @param fd Socket file descriptor
- * @param name Interface name
- * @param hash_proto[out] Hash protocols
- *
- * @returns Number of supported hash protocols
- */
-int rss_conf_get_supported_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto);
-
-/**
- * Set RSS hash protocols of a packet socket
- *
- * @param fd Socket file descriptor
- * @param name Interface name
- * @param hash_proto Hash protocols
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-int rss_conf_set_fd(int fd, const char *name,
- const odp_pktin_hash_proto_t *proto);
-
-/**
- * Print enabled RSS hash protocols
- *
- * @param hash_proto Hash protocols
- */
-void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto);
-
-/**
- * Get ethtool statistics of a packet socket
- */
-int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
-
-#endif
diff --git a/platform/linux-generic/include/odp_packet_tap.h b/platform/linux-generic/include/odp_packet_tap.h
deleted file mode 100644
index a90bfbce0..000000000
--- a/platform/linux-generic/include/odp_packet_tap.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2015, Ilya Maximets <i.maximets@samsung.com>
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_PACKET_TAP_H_
-#define ODP_PACKET_TAP_H_
-
-#include <odp/api/pool.h>
-
-typedef struct {
- int fd; /**< file descriptor for tap interface*/
- int skfd; /**< socket descriptor */
- uint32_t mtu; /**< cached mtu */
- unsigned char if_mac[ETH_ALEN]; /**< MAC address of pktio side (not a
- MAC address of kernel interface)*/
- odp_pool_t pool; /**< pool to alloc packets from */
-} pkt_tap_t;
-
-#endif
diff --git a/platform/linux-generic/include/odp_parse_internal.h b/platform/linux-generic/include/odp_parse_internal.h
new file mode 100644
index 000000000..8b3fb480b
--- /dev/null
+++ b/platform/linux-generic/include/odp_parse_internal.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PARSE_INTERNAL_H_
+#define ODP_PARSE_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_packet_internal.h>
+#include <odp_macros_internal.h>
+#include <protocols/eth.h>
+#include <protocols/ip.h>
+#include <protocols/sctp.h>
+#include <protocols/tcp.h>
+#include <protocols/udp.h>
+#include <odp/api/plat/packet_inline_types.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/packet_types.h>
+#include <stdint.h>
+
+/*
+ * In the worst case we look at the Ethernet header, 8 bytes of LLC/SNAP
+ * header and two VLAN tags in the same packet.
+ */
+#define PARSE_ETH_BYTES (sizeof(_odp_ethhdr_t) + 8 + 2 * sizeof(_odp_vlanhdr_t))
+#define PARSE_IPV4_BYTES (0xfU * 4) /* max IPv4 header length with options */
+/*
+ * Peek 2 bytes beyond IPv6 base header without length check if there are
+ * extension headers.
+ */
+#define PARSE_IPV6_BYTES (sizeof(_odp_ipv6hdr_t) + 2)
+#define PARSE_TCP_BYTES (sizeof(_odp_tcphdr_t))
+/*
+ * In the worst case we look at the UDP header and 4 bytes of the UDP
+ * payload (the non-ESP marker to distinguish IKE packets from ESP packets).
+ */
+#define PARSE_UDP_BYTES (sizeof(_odp_udphdr_t) + 4)
+#define PARSE_SCTP_BYTES (sizeof(_odp_sctphdr_t))
+
+/* _odp_packet_parse_common_l3_l4() requires up to this many bytes. */
+#define PARSE_L3_L4_BYTES (_ODP_MAX(PARSE_IPV4_BYTES, PARSE_IPV6_BYTES) + \
+ _ODP_MAX3(PARSE_TCP_BYTES, PARSE_UDP_BYTES, PARSE_SCTP_BYTES))
+
+/* _odp_packet_parse_common() requires up to this many bytes. */
+#define PARSE_BYTES (PARSE_ETH_BYTES + PARSE_L3_L4_BYTES)
+
+uint16_t _odp_parse_eth(packet_parser_t *prs, const uint8_t **parseptr,
+ uint32_t *offset, uint32_t frame_len);
+
+/*
+ * Parse common L3 and L4 packet headers up to given layer
+ *
+ * See _odp_packet_parse_common(). Requires up to PARSE_L3_L4_BYTES bytes of
+ * contiguous packet data.
+ *
+ * - offset is the offset of the first byte of the data pointed to by parseptr
+ * - seg_end is the maximum offset that can be accessed plus one
+ */
+int _odp_packet_parse_common_l3_l4(packet_parser_t *prs,
+ const uint8_t *parseptr, uint32_t offset,
+ uint32_t frame_len, uint32_t seg_end,
+ int layer, uint16_t ethtype,
+ uint64_t *l4_part_sum,
+ odp_pktin_config_opt_t opt);
+
+/**
+ * Parse common packet headers up to given layer
+ *
+ * Requires up to PARSE_BYTES bytes of contiguous packet data. Also parse
+ * metadata must be already initialized.
+ *
+ * Returns 0 on success, 1 on packet errors, and -1 if the packet should be
+ * dropped.
+ */
+static inline int _odp_packet_parse_common(odp_packet_hdr_t *pkt_hdr,
+ const uint8_t *ptr,
+ uint32_t frame_len, uint32_t seg_len,
+ int layer,
+ odp_pktin_config_opt_t opt)
+{
+ int r;
+ uint32_t offset;
+ uint16_t ethtype;
+ const uint8_t *parseptr;
+ packet_parser_t *prs = &pkt_hdr->p;
+ uint64_t l4_part_sum = 0;
+
+ parseptr = ptr;
+ offset = 0;
+
+ if (odp_unlikely(layer == ODP_PROTO_LAYER_NONE))
+ return 0;
+
+ /* Assume valid L2 header, no CRC/FCS check in SW */
+ prs->l2_offset = offset;
+
+ ethtype = _odp_parse_eth(prs, &parseptr, &offset, frame_len);
+
+ r = _odp_packet_parse_common_l3_l4(prs, parseptr, offset, frame_len,
+ seg_len, layer, ethtype,
+ &l4_part_sum, opt);
+
+ if (!r && layer >= ODP_PROTO_LAYER_L4)
+ r = _odp_packet_l4_chksum(pkt_hdr, opt, l4_part_sum);
+
+ return r;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_pcapng.h b/platform/linux-generic/include/odp_pcapng.h
new file mode 100644
index 000000000..6f2a3dda5
--- /dev/null
+++ b/platform/linux-generic/include/odp_pcapng.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PCAPNG_H_
+#define ODP_PCAPNG_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/packet.h>
+#include <odp_packet_io_internal.h>
+
+#include <stdint.h>
+
+int _odp_pcapng_start(pktio_entry_t *entry);
+void _odp_pcapng_stop(pktio_entry_t *entry);
+int _odp_pcapng_dump_pkts(pktio_entry_t *entry, int qidx,
+ const odp_packet_t packets[], int num);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ODP_PCAPNG_H_ */
diff --git a/platform/linux-generic/include/odp_pkt_queue_internal.h b/platform/linux-generic/include/odp_pkt_queue_internal.h
index 83375d02c..03e5b575e 100644
--- a/platform/linux-generic/include/odp_pkt_queue_internal.h
+++ b/platform/linux-generic/include/odp_pkt_queue_internal.h
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,8 +13,9 @@
extern "C" {
#endif
+#include <odp/api/packet.h>
+
#include <stdint.h>
-#include <odp_api.h>
typedef uint64_t _odp_int_queue_pool_t;
typedef uint32_t _odp_int_pkt_queue_t;
@@ -43,6 +44,9 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
_odp_int_pkt_queue_t _odp_pkt_queue_create(_odp_int_queue_pool_t queue_pool);
+void _odp_pkt_queue_destroy(_odp_int_queue_pool_t queue_pool,
+ _odp_int_pkt_queue_t pkt_queue);
+
int _odp_pkt_queue_append(_odp_int_queue_pool_t queue_pool,
_odp_int_pkt_queue_t pkt_queue,
odp_packet_t pkt);
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index ebb779da2..2c33bb4a2 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -18,55 +18,95 @@
extern "C" {
#endif
+#include <odp/api/atomic.h>
#include <odp/api/shared_memory.h>
#include <odp/api/ticketlock.h>
+#include <odp/api/align.h>
#include <odp_buffer_internal.h>
+#include <odp_event_internal.h>
#include <odp_config_internal.h>
-#include <odp_ring_internal.h>
+#include <odp_ring_ptr_internal.h>
#include <odp/api/plat/strong_types.h>
-typedef struct pool_cache_t {
- uint32_t num;
+#define _ODP_POOL_MEM_SRC_DATA_SIZE 128
- odp_buffer_t buf[CONFIG_POOL_CACHE_SIZE];
+typedef struct ODP_ALIGNED_CACHE pool_cache_t {
+ /* Number of buffers in cache */
+ odp_atomic_u32_t cache_num;
+ /* Cached buffers */
+ _odp_event_hdr_t *event_hdr[CONFIG_POOL_CACHE_MAX_SIZE];
-} pool_cache_t ODP_ALIGNED_CACHE;
+} pool_cache_t;
-/* Buffer header ring */
-typedef struct {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+/* Event header ring */
+typedef struct ODP_ALIGNED_CACHE {
/* Ring header */
- ring_t hdr;
+ ring_ptr_t hdr;
/* Ring data: buffer handles */
- uint32_t buf[CONFIG_POOL_MAX_NUM];
+ _odp_event_hdr_t *event_hdr[CONFIG_POOL_MAX_NUM + 1];
+
+ /* Index to pointer look-up table for external memory pool */
+ _odp_event_hdr_t *event_hdr_by_index[];
-} pool_ring_t ODP_ALIGNED_CACHE;
+} pool_ring_t;
+#pragma GCC diagnostic pop
+
+struct _odp_pool_mem_src_ops_t;
typedef struct pool_t {
odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+ uint32_t pool_idx;
+ uint8_t reserved;
+ /* Everything under this mark are memset() to zero on pool create */
+ uint8_t memset_mark;
+ uint8_t type;
+ uint8_t pool_ext;
char name[ODP_POOL_NAME_LEN];
odp_pool_param_t params;
- odp_pool_t pool_hdl;
- uint32_t pool_idx;
uint32_t ring_mask;
+ uint32_t cache_size;
+ uint32_t burst_size;
odp_shm_t shm;
odp_shm_t uarea_shm;
- int reserved;
+ uint64_t shm_size;
+ uint64_t uarea_shm_size;
uint32_t num;
uint32_t align;
uint32_t headroom;
uint32_t tailroom;
- uint32_t data_size;
- uint32_t max_len;
+ uint32_t seg_len;
uint32_t max_seg_len;
+ uint32_t max_len;
+ uint32_t param_uarea_size;
uint32_t uarea_size;
uint32_t block_size;
- uint32_t shm_size;
- uint32_t uarea_shm_size;
+ uint32_t block_offset;
+ uint32_t num_populated;
+ uint32_t trailer_size;
uint8_t *base_addr;
+ uint8_t *max_addr;
uint8_t *uarea_base_addr;
+ odp_pool_type_t type_2;
+ odp_pool_ext_param_t ext_param;
+ uint32_t ext_head_offset;
+ uint32_t skipped_blocks;
+ uint8_t mem_from_huge_pages;
+ const struct _odp_pool_mem_src_ops_t *mem_src_ops;
+ /* Private area for memory source operations */
+ uint8_t mem_src_data[_ODP_POOL_MEM_SRC_DATA_SIZE] ODP_ALIGNED_CACHE;
+
+ struct ODP_ALIGNED_CACHE {
+ odp_atomic_u64_t alloc_ops;
+ odp_atomic_u64_t alloc_fails;
+ odp_atomic_u64_t free_ops;
+ odp_atomic_u64_t cache_alloc_ops;
+ odp_atomic_u64_t cache_free_ops;
+ } stats;
pool_cache_t local_cache[ODP_THREAD_COUNT_MAX];
@@ -75,56 +115,98 @@ typedef struct pool_t {
} pool_t;
-typedef struct pool_table_t {
- pool_t pool[ODP_CONFIG_POOLS];
+typedef struct pool_global_t {
+ pool_t pool[CONFIG_POOLS];
odp_shm_t shm;
-} pool_table_t;
-extern pool_table_t *pool_tbl;
+ struct {
+ uint32_t pkt_max_len;
+ uint32_t pkt_max_num;
+ uint32_t local_cache_size;
+ uint32_t burst_size;
+ uint32_t pkt_base_align;
+ uint32_t buf_min_align;
+ } config;
+
+} pool_global_t;
+
+/* Operations for when ODP packet pool is used as a memory source for e.g. zero-copy packet IO
+ * purposes */
+typedef struct _odp_pool_mem_src_ops_t {
+ /* Name of the ops provider */
+ const char *name;
+ /* Signal if ops provider is an active user for the pool as a memory source */
+ odp_bool_t (*is_active)(void);
+ /* Force disable for the ops provider (for now, if one active memory source user is found,
+ * others are disabled) */
+ void (*force_disable)(void);
+ /* Adjust pool block sizes as required by memory consumer */
+ void (*adjust_size)(uint8_t *data, uint32_t *block_size, uint32_t *block_offset,
+ uint32_t *flags);
+ /* Bind the pool as a memory source */
+ int (*bind)(uint8_t *data, pool_t *pool);
+ /* Unbind the pool as a memory source */
+ void (*unbind)(uint8_t *data);
+} _odp_pool_mem_src_ops_t;
+
+extern pool_global_t *_odp_pool_glb;
+
+static inline pool_t *_odp_pool_entry_from_idx(uint32_t pool_idx)
+{
+ return &_odp_pool_glb->pool[pool_idx];
+}
-static inline pool_t *pool_entry(uint32_t pool_idx)
+static inline pool_t *_odp_pool_entry(odp_pool_t pool_hdl)
{
- return &pool_tbl->pool[pool_idx];
+ return (pool_t *)(uintptr_t)pool_hdl;
}
-static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl)
+static inline odp_pool_t _odp_pool_handle(pool_t *pool)
{
- return &pool_tbl->pool[_odp_typeval(pool_hdl)];
+ return (odp_pool_t)(uintptr_t)pool;
}
-static inline odp_buffer_hdr_t *pool_buf_hdl_to_hdr(pool_t *pool,
- odp_buffer_t buf)
+static inline _odp_event_hdr_t *event_hdr_from_index(pool_t *pool,
+ uint32_t event_idx)
{
- odp_buffer_bits_t handle;
- uint32_t index, block_offset;
- odp_buffer_hdr_t *buf_hdr;
+ uint64_t block_offset;
+ _odp_event_hdr_t *event_hdr;
- handle.handle = buf;
- index = handle.index;
- block_offset = index * pool->block_size;
+ block_offset = (event_idx * (uint64_t)pool->block_size) +
+ pool->block_offset;
/* clang requires cast to uintptr_t */
- buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
+ event_hdr = (_odp_event_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
- return buf_hdr;
+ return event_hdr;
}
-static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
+static inline _odp_event_hdr_t *_odp_event_hdr_from_index_u32(uint32_t u32)
{
- odp_buffer_bits_t handle;
- uint32_t pool_id;
+ _odp_event_index_t index;
+ uint32_t pool_idx, event_idx;
pool_t *pool;
- handle.handle = buf;
- pool_id = handle.pool_id;
- pool = pool_entry(pool_id);
+ index.u32 = u32;
+ pool_idx = index.pool;
+ event_idx = index.event;
+ pool = _odp_pool_entry_from_idx(pool_idx);
+
+ return event_hdr_from_index(pool, event_idx);
+}
+
+odp_event_t _odp_event_alloc(pool_t *pool);
+int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int num);
+void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_free);
+int _odp_event_is_valid(odp_event_t event);
- return pool_buf_hdl_to_hdr(pool, buf);
+static inline void _odp_event_free(odp_event_t event)
+{
+ _odp_event_free_multi((_odp_event_hdr_t **)&event, 1);
}
-int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
- odp_buffer_hdr_t *buf_hdr[], int num);
-void buffer_free_multi(const odp_buffer_t buf[], int num_free);
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_posix_extensions.h b/platform/linux-generic/include/odp_posix_extensions.h
index 2c468b4b6..93c8fdb6c 100644
--- a/platform/linux-generic/include/odp_posix_extensions.h
+++ b/platform/linux-generic/include/odp_posix_extensions.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/include/odp_print_internal.h b/platform/linux-generic/include/odp_print_internal.h
new file mode 100644
index 000000000..949a1cc70
--- /dev/null
+++ b/platform/linux-generic/include/odp_print_internal.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PRINT_INTERNAL_H_
+#define ODP_PRINT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+
+int _odp_snprint(char *str, size_t size, const char *format, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h
new file mode 100644
index 000000000..3cdcf8600
--- /dev/null
+++ b/platform/linux-generic/include/odp_queue_basic_internal.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_QUEUE_BASIC_INTERNAL_H_
+#define ODP_QUEUE_BASIC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+#include <odp/api/queue.h>
+#include <odp/api/shared_memory.h>
+#include <odp_forward_typedefs_internal.h>
+#include <odp_queue_if.h>
+#include <odp_buffer_internal.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/ticketlock.h>
+#include <odp_config_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
+#include <odp_ring_st_internal.h>
+#include <odp_ring_spsc_internal.h>
+#include <odp_queue_lf.h>
+
+#define QUEUE_STATUS_FREE 0
+#define QUEUE_STATUS_DESTROYED 1
+#define QUEUE_STATUS_READY 2
+#define QUEUE_STATUS_NOTSCHED 3
+#define QUEUE_STATUS_SCHED 4
+
+typedef struct ODP_ALIGNED_CACHE queue_entry_s {
+ /* The first cache line is read only */
+ queue_enq_fn_t enqueue ODP_ALIGNED_CACHE;
+ queue_deq_fn_t dequeue;
+ queue_enq_multi_fn_t enqueue_multi;
+ queue_deq_multi_fn_t dequeue_multi;
+ uint32_t *ring_data;
+ uint32_t ring_mask;
+ uint32_t index;
+ odp_queue_t handle;
+ odp_queue_type_t type;
+
+ /* MPMC ring (2 cache lines). */
+ ring_mpmc_u32_t ring_mpmc;
+
+ odp_ticketlock_t lock;
+ union {
+ ring_st_t ring_st;
+ ring_spsc_t ring_spsc;
+ };
+
+ odp_atomic_u64_t num_timers;
+ int status;
+
+ queue_deq_multi_fn_t orig_dequeue_multi;
+ odp_queue_param_t param;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ void *queue_lf;
+ int spsc;
+ char name[ODP_QUEUE_NAME_LEN];
+} queue_entry_t;
+
+typedef struct queue_global_t {
+ queue_entry_t queue[CONFIG_MAX_QUEUES];
+ uint32_t *ring_data;
+ uint32_t queue_lf_num;
+ uint32_t queue_lf_size;
+ queue_lf_func_t queue_lf_func;
+ odp_shm_t queue_gbl_shm;
+ odp_shm_t queue_ring_shm;
+
+ struct {
+ uint32_t max_queue_size;
+ uint32_t default_queue_size;
+ } config;
+
+} queue_global_t;
+
+extern queue_global_t *_odp_queue_glb;
+
+static inline uint32_t queue_to_index(odp_queue_t handle)
+{
+ queue_entry_t *qentry = (queue_entry_t *)(uintptr_t)handle;
+
+ return qentry->index;
+}
+
+static inline queue_entry_t *qentry_from_index(uint32_t queue_id)
+{
+ return &_odp_queue_glb->queue[queue_id];
+}
+
+static inline odp_queue_t queue_from_index(uint32_t queue_id)
+{
+ return (odp_queue_t)qentry_from_index(queue_id);
+}
+
+static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
+
+/* Functions for schedulers */
+void _odp_sched_queue_set_status(uint32_t queue_index, int status);
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
+ int update_status);
+int _odp_sched_queue_empty(uint32_t queue_index);
+
+/* Functions by schedulers */
+int _odp_sched_basic_get_spread(uint32_t queue_index);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_queue_if.h b/platform/linux-generic/include/odp_queue_if.h
new file mode 100644
index 000000000..ed4ec4e61
--- /dev/null
+++ b/platform/linux-generic/include/odp_queue_if.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2017, ARM Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_QUEUE_IF_H_
+#define ODP_QUEUE_IF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/packet_io.h>
+
+#include <odp_event_internal.h>
+#include <odp_forward_typedefs_internal.h>
+
+#define QUEUE_MULTI_MAX CONFIG_BURST_SIZE
+
+typedef int (*queue_init_global_fn_t)(void);
+typedef int (*queue_term_global_fn_t)(void);
+typedef int (*queue_init_local_fn_t)(void);
+typedef int (*queue_term_local_fn_t)(void);
+typedef int (*queue_enq_fn_t)(odp_queue_t queue, _odp_event_hdr_t *event_hdr);
+typedef int (*queue_enq_multi_fn_t)(odp_queue_t queue,
+ _odp_event_hdr_t **event_hdr, int num);
+typedef _odp_event_hdr_t *(*queue_deq_fn_t)(odp_queue_t queue);
+typedef int (*queue_deq_multi_fn_t)(odp_queue_t queue,
+ _odp_event_hdr_t **event_hdr, int num);
+typedef odp_pktout_queue_t (*queue_get_pktout_fn_t)(odp_queue_t queue);
+typedef void (*queue_set_pktout_fn_t)(odp_queue_t queue, odp_pktio_t pktio,
+ int index);
+typedef odp_pktin_queue_t (*queue_get_pktin_fn_t)(odp_queue_t queue);
+typedef void (*queue_set_pktin_fn_t)(odp_queue_t queue, odp_pktio_t pktio,
+ int index);
+typedef void (*queue_set_enq_deq_fn_t)(odp_queue_t queue,
+ queue_enq_fn_t enq,
+ queue_enq_multi_fn_t enq_multi,
+ queue_deq_fn_t deq,
+ queue_deq_multi_fn_t deq_multi);
+typedef void (*queue_timer_add_fn_t)(odp_queue_t queue);
+typedef void (*queue_timer_rem_fn_t)(odp_queue_t queue);
+
+/* Queue functions towards other internal components */
+typedef struct {
+ queue_init_global_fn_t init_global;
+ queue_term_global_fn_t term_global;
+ queue_init_local_fn_t init_local;
+ queue_term_local_fn_t term_local;
+ queue_get_pktout_fn_t get_pktout;
+ queue_set_pktout_fn_t set_pktout;
+ queue_get_pktin_fn_t get_pktin;
+ queue_set_pktin_fn_t set_pktin;
+ queue_set_enq_deq_fn_t set_enq_deq_fn;
+ queue_timer_add_fn_t timer_add;
+ queue_timer_rem_fn_t timer_rem;
+
+ /* Original queue dequeue multi function (before override). May be used
+ * by an overriding dequeue function. */
+ queue_deq_multi_fn_t orig_deq_multi;
+} queue_fn_t;
+
+extern const queue_fn_t *_odp_queue_fn;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h
deleted file mode 100644
index 560f826e5..000000000
--- a/platform/linux-generic/include/odp_queue_internal.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP queue - implementation internal
- */
-
-#ifndef ODP_QUEUE_INTERNAL_H_
-#define ODP_QUEUE_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/queue.h>
-#include <odp_forward_typedefs_internal.h>
-#include <odp_schedule_if.h>
-#include <odp_buffer_internal.h>
-#include <odp_align_internal.h>
-#include <odp/api/packet_io.h>
-#include <odp/api/align.h>
-#include <odp/api/hints.h>
-#include <odp/api/ticketlock.h>
-#include <odp_config_internal.h>
-
-#define QUEUE_MULTI_MAX CONFIG_BURST_SIZE
-
-#define QUEUE_STATUS_FREE 0
-#define QUEUE_STATUS_DESTROYED 1
-#define QUEUE_STATUS_READY 2
-#define QUEUE_STATUS_NOTSCHED 3
-#define QUEUE_STATUS_SCHED 4
-
-
-/* forward declaration */
-union queue_entry_u;
-
-typedef int (*enq_func_t)(union queue_entry_u *, odp_buffer_hdr_t *);
-typedef odp_buffer_hdr_t *(*deq_func_t)(union queue_entry_u *);
-
-typedef int (*enq_multi_func_t)(union queue_entry_u *,
- odp_buffer_hdr_t **, int);
-typedef int (*deq_multi_func_t)(union queue_entry_u *,
- odp_buffer_hdr_t **, int);
-
-struct queue_entry_s {
- odp_ticketlock_t lock ODP_ALIGNED_CACHE;
-
- odp_buffer_hdr_t *head;
- odp_buffer_hdr_t *tail;
- int status;
-
- struct {
- odp_atomic_u64_t ctx; /**< Current ordered context id */
- odp_atomic_u64_t next_ctx; /**< Next unallocated context id */
- /** Array of ordered locks */
- odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
- } ordered ODP_ALIGNED_CACHE;
-
- enq_func_t enqueue ODP_ALIGNED_CACHE;
- deq_func_t dequeue;
- enq_multi_func_t enqueue_multi;
- deq_multi_func_t dequeue_multi;
-
- uint32_t index;
- odp_queue_t handle;
- odp_queue_type_t type;
- odp_queue_param_t param;
- odp_pktin_queue_t pktin;
- odp_pktout_queue_t pktout;
- char name[ODP_QUEUE_NAME_LEN];
-};
-
-union queue_entry_u {
- struct queue_entry_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
-};
-
-
-queue_entry_t *get_qentry(uint32_t queue_id);
-
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr);
-odp_buffer_hdr_t *queue_deq(queue_entry_t *queue);
-
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num);
-
-void queue_lock(queue_entry_t *queue);
-void queue_unlock(queue_entry_t *queue);
-
-static inline uint32_t queue_to_id(odp_queue_t handle)
-{
- return _odp_typeval(handle) - 1;
-}
-
-static inline queue_entry_t *queue_to_qentry(odp_queue_t handle)
-{
- uint32_t queue_id;
-
- queue_id = queue_to_id(handle);
- return get_qentry(queue_id);
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_queue_lf.h b/platform/linux-generic/include/odp_queue_lf.h
new file mode 100644
index 000000000..9419812cf
--- /dev/null
+++ b/platform/linux-generic/include/odp_queue_lf.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_QUEUE_LF_H_
+#define ODP_QUEUE_LF_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_queue_if.h>
+
+/* Lock-free queue functions */
+typedef struct {
+ queue_enq_fn_t enq;
+ queue_enq_multi_fn_t enq_multi;
+ queue_deq_fn_t deq;
+ queue_deq_multi_fn_t deq_multi;
+
+} queue_lf_func_t;
+
+uint32_t _odp_queue_lf_init_global(uint32_t *queue_lf_size,
+ queue_lf_func_t *lf_func);
+void _odp_queue_lf_term_global(void);
+void *_odp_queue_lf_create(queue_entry_t *queue);
+void _odp_queue_lf_destroy(void *queue_lf);
+uint32_t _odp_queue_lf_length(void *queue_lf);
+uint32_t _odp_queue_lf_max_length(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h
new file mode 100644
index 000000000..dccc2aef9
--- /dev/null
+++ b/platform/linux-generic/include/odp_queue_scalable_internal.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_QUEUE_SCALABLE_INTERNAL_H_
+#define ODP_QUEUE_SCALABLE_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/plat/strong_types.h>
+#include <odp/api/queue.h>
+#include <odp_forward_typedefs_internal.h>
+#include <odp_queue_if.h>
+#include <odp_event_internal.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/ticketlock.h>
+#include <odp_config_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_schedule_scalable.h>
+#include <odp_schedule_scalable_ordered.h>
+
+#define QUEUE_STATUS_FREE 0
+#define QUEUE_STATUS_DESTROYED 1
+#define QUEUE_STATUS_READY 2
+
+struct ODP_ALIGNED_CACHE queue_entry_s {
+ sched_elem_t sched_elem;
+
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+ odp_atomic_u64_t num_timers;
+ int status;
+
+ queue_enq_fn_t enqueue ODP_ALIGNED_CACHE;
+ queue_deq_fn_t dequeue;
+ queue_enq_multi_fn_t enqueue_multi;
+ queue_deq_multi_fn_t dequeue_multi;
+ queue_deq_multi_fn_t orig_dequeue_multi;
+
+ uint32_t index;
+ odp_queue_t handle;
+ odp_queue_type_t type;
+ odp_queue_param_t param;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ char name[ODP_QUEUE_NAME_LEN];
+};
+
+int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num);
+int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num);
+int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num);
+int _odp_queue_enq_sp(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num);
+queue_entry_t *_odp_qentry_from_ext(odp_queue_t handle);
+
+/* Round up memory size to next cache line size to
+ * align all memory addresses on cache line boundary.
+ */
+static inline void *shm_pool_alloc_align(_odp_ishm_pool_t *pool, uint32_t size)
+{
+ void *addr;
+
+ addr = _odp_ishm_pool_alloc(pool, _ODP_ROUNDUP_CACHE_LINE(size));
+ _ODP_ASSERT(((uintptr_t)addr & (ODP_CACHE_LINE_SIZE - 1)) == 0);
+
+ return addr;
+}
+
+static inline uint32_t queue_to_id(odp_queue_t handle)
+{
+ return _odp_qentry_from_ext(handle)->index;
+}
+
+static inline queue_entry_t *qentry_from_int(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
+static inline odp_queue_t qentry_to_int(queue_entry_t *qentry)
+{
+ return (odp_queue_t)qentry;
+}
+
+static inline odp_queue_t queue_get_handle(queue_entry_t *queue)
+{
+ return queue->handle;
+}
+
+static inline reorder_window_t *queue_get_rwin(queue_entry_t *queue)
+{
+ return queue->sched_elem.rwin;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_random_openssl_internal.h b/platform/linux-generic/include/odp_random_openssl_internal.h
new file mode 100644
index 000000000..5cb9006d1
--- /dev/null
+++ b/platform/linux-generic/include/odp_random_openssl_internal.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RANDOM_OPENSSL_INTERNAL_H_
+#define ODP_RANDOM_OPENSSL_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len);
+int _odp_random_openssl_init_local(void);
+int _odp_random_openssl_term_local(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_RANDOM_OPENSSL_INTERNAL_H_ */
diff --git a/platform/linux-generic/include/odp_random_std_internal.h b/platform/linux-generic/include/odp_random_std_internal.h
new file mode 100644
index 000000000..fb350fd22
--- /dev/null
+++ b/platform/linux-generic/include/odp_random_std_internal.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RANDOM_STD_INTERNAL_H_
+#define ODP_RANDOM_STD_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+int32_t _odp_random_std_test_data(uint8_t *buf, uint32_t len, uint64_t *seed);
+int32_t _odp_random_std_data(uint8_t *buf, uint32_t len);
+int _odp_random_std_init_local(void);
+int _odp_random_std_term_local(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_RANDOM_STD_INTERNAL_H_ */
diff --git a/platform/linux-generic/include/odp_ring_common.h b/platform/linux-generic/include/odp_ring_common.h
new file mode 100644
index 000000000..a2d9e4be5
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_common.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2019-2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_COMMON_H_
+#define ODP_RING_COMMON_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _ODP_RING_TYPE_U32 1
+#define _ODP_RING_TYPE_U64 2
+#define _ODP_RING_TYPE_PTR 3
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index 55fedeb3a..296a87116 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -1,9 +1,13 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+/* This header should NOT be included directly. There are no include guards for
+ * the function definitions! */
+
#ifndef ODP_RING_INTERNAL_H_
#define ODP_RING_INTERNAL_H_
@@ -11,86 +15,174 @@
extern "C" {
#endif
+#include <odp/api/align.h>
#include <odp/api/atomic.h>
+#include <odp/api/cpu.h>
#include <odp/api/hints.h>
-#include <odp_align_internal.h>
-/* Ring empty, not a valid data value. */
-#define RING_EMPTY ((uint32_t)-1)
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
+
+#include <odp_ring_common.h>
-/* Ring of uint32_t data
+/* Generic ring implementation
*
* Ring stores head and tail counters. Ring indexes are formed from these
* counters with a mask (mask = ring_size - 1), which requires that ring size
* must be a power of two. Also ring size must be larger than the maximum
- * number of data items that will be stored on it (there's no check against
- * overwriting). */
-typedef struct {
+ * number of data items that will be stored on it as write operations are
+ * assumed to succeed eventually (after readers complete their current
+ * operations). */
+
+struct ring_common {
/* Writer head and tail */
- odp_atomic_u32_t w_head;
- odp_atomic_u32_t w_tail;
- uint8_t pad[ODP_CACHE_LINE_SIZE - (2 * sizeof(odp_atomic_u32_t))];
+ struct ODP_ALIGNED_CACHE {
+ odp_atomic_u32_t w_head;
+ odp_atomic_u32_t w_tail;
+ };
/* Reader head and tail */
- odp_atomic_u32_t r_head;
- odp_atomic_u32_t r_tail;
+ struct ODP_ALIGNED_CACHE {
+ odp_atomic_u32_t r_head;
+ odp_atomic_u32_t r_tail;
+ };
+};
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_common r;
+ uint32_t data[];
+} ring_u32_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_common r;
+ uint64_t data[];
+} ring_u64_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_common r;
+ void *data[];
+} ring_ptr_t;
+
+/* 32-bit CAS with memory order selection */
+static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val, int mo_success, int mo_failure)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ mo_success,
+ mo_failure);
+}
- uint32_t data[0];
-} ring_t ODP_ALIGNED_CACHE;
+#endif /* End of include guards */
+
+#undef _ring_gen_t
+#undef _ring_data_t
+#undef _RING_INIT
+#undef _RING_DEQ
+#undef _RING_DEQ_MULTI
+#undef _RING_DEQ_BATCH
+#undef _RING_ENQ
+#undef _RING_ENQ_MULTI
+#undef _RING_LEN
+
+/* Remap generic types and function names to ring data type specific ones. One
+ * should never use the generic names (e.g. _RING_INIT) directly. */
+
+#if _ODP_RING_TYPE == _ODP_RING_TYPE_U32
+ #define _ring_gen_t ring_u32_t
+ #define _ring_data_t uint32_t
+
+ #define _RING_INIT ring_u32_init
+ #define _RING_DEQ ring_u32_deq
+ #define _RING_DEQ_MULTI ring_u32_deq_multi
+ #define _RING_DEQ_BATCH ring_u32_deq_batch
+ #define _RING_ENQ ring_u32_enq
+ #define _RING_ENQ_MULTI ring_u32_enq_multi
+ #define _RING_LEN ring_u32_len
+#elif _ODP_RING_TYPE == _ODP_RING_TYPE_U64
+ #define _ring_gen_t ring_u64_t
+ #define _ring_data_t uint64_t
+
+ #define _RING_INIT ring_u64_init
+ #define _RING_DEQ ring_u64_deq
+ #define _RING_DEQ_MULTI ring_u64_deq_multi
+ #define _RING_DEQ_BATCH ring_u64_deq_batch
+ #define _RING_ENQ ring_u64_enq
+ #define _RING_ENQ_MULTI ring_u64_enq_multi
+ #define _RING_LEN ring_u64_len
+#elif _ODP_RING_TYPE == _ODP_RING_TYPE_PTR
+ #define _ring_gen_t ring_ptr_t
+ #define _ring_data_t void *
+
+ #define _RING_INIT ring_ptr_init
+ #define _RING_DEQ ring_ptr_deq
+ #define _RING_DEQ_MULTI ring_ptr_deq_multi
+ #define _RING_DEQ_BATCH ring_ptr_deq_batch
+ #define _RING_ENQ ring_ptr_enq
+ #define _RING_ENQ_MULTI ring_ptr_enq_multi
+ #define _RING_LEN ring_ptr_len
+#endif
/* Initialize ring */
-static inline void ring_init(ring_t *ring)
+static inline void _RING_INIT(_ring_gen_t *ring)
{
- odp_atomic_init_u32(&ring->w_head, 0);
- odp_atomic_init_u32(&ring->w_tail, 0);
- odp_atomic_init_u32(&ring->r_head, 0);
- odp_atomic_init_u32(&ring->r_tail, 0);
+ odp_atomic_init_u32(&ring->r.w_head, 0);
+ odp_atomic_init_u32(&ring->r.w_tail, 0);
+ odp_atomic_init_u32(&ring->r.r_head, 0);
+ odp_atomic_init_u32(&ring->r.r_tail, 0);
}
/* Dequeue data from the ring head */
-static inline uint32_t ring_deq(ring_t *ring, uint32_t mask)
+static inline uint32_t _RING_DEQ(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t *data)
{
uint32_t head, tail, new_head;
- uint32_t data;
- head = odp_atomic_load_u32(&ring->r_head);
+ /* Load/CAS acquire of r_head ensures that w_tail load happens after
+ * r_head load, and thus head value is always behind or equal to tail
+ * value. */
+ head = odp_atomic_load_acq_u32(&ring->r.r_head);
/* Move reader head. This thread owns data at the new head. */
do {
- tail = odp_atomic_load_u32(&ring->w_tail);
+ tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
if (head == tail)
- return RING_EMPTY;
+ return 0;
new_head = head + 1;
- } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head,
- new_head) == 0));
+ } while (odp_unlikely(cas_mo_u32(&ring->r.r_head, &head, new_head,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0));
- /* Read queue index */
- data = ring->data[new_head & mask];
+ /* Read data. */
+ *data = ring->data[new_head & mask];
/* Wait until other readers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != head))
odp_cpu_pause();
- /* Now update the reader tail */
- odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
- return data;
+ return 1;
}
/* Dequeue multiple data from the ring head. Num is smaller than ring size. */
-static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask,
- uint32_t data[], uint32_t num)
+static inline uint32_t _RING_DEQ_MULTI(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t data[], uint32_t num)
{
uint32_t head, tail, new_head, i;
- head = odp_atomic_load_u32(&ring->r_head);
+ /* Load/CAS acquire of r_head ensures that w_tail load happens after
+ * r_head load, and thus head value is always behind or equal to tail
+ * value. */
+ head = odp_atomic_load_acq_u32(&ring->r.r_head);
/* Move reader head. This thread owns data at the new head. */
do {
- tail = odp_atomic_load_u32(&ring->w_tail);
+ tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
/* Ring is empty */
if (head == tail)
@@ -102,59 +194,108 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask,
new_head = head + num;
- } while (odp_unlikely(odp_atomic_cas_acq_u32(&ring->r_head, &head,
- new_head) == 0));
+ } while (odp_unlikely(cas_mo_u32(&ring->r.r_head, &head, new_head,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0));
+
+ /* Read data. */
+ for (i = 0; i < num; i++)
+ data[i] = ring->data[(head + 1 + i) & mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != head))
+ odp_cpu_pause();
+
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
+
+ return num;
+}
+
+/* Dequeue batch of data (0 or num) from the ring head. Num is smaller than ring size. */
+static inline uint32_t _RING_DEQ_BATCH(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t data[], uint32_t num)
+{
+ uint32_t head, tail, new_head, i;
+
+ /* Load/CAS acquire of r_head ensures that w_tail load happens after
+ * r_head load, and thus head value is always behind or equal to tail
+ * value. */
+ head = odp_atomic_load_acq_u32(&ring->r.r_head);
+
+ /* Move reader head. This thread owns data at the new head. */
+ do {
+ tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
+
+ /* Not enough data available */
+ if ((tail - head) < num)
+ return 0;
+
+ new_head = head + num;
+
+ } while (odp_unlikely(cas_mo_u32(&ring->r.r_head, &head, new_head,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0));
- /* Read queue index */
+ /* Read data. */
for (i = 0; i < num; i++)
data[i] = ring->data[(head + 1 + i) & mask];
/* Wait until other readers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) != head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != head))
odp_cpu_pause();
- /* Now update the reader tail */
- odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
return num;
}
/* Enqueue data into the ring tail */
-static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data)
+static inline void _RING_ENQ(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t data)
{
uint32_t old_head, new_head;
+ uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */
- old_head = odp_atomic_fetch_inc_u32(&ring->w_head);
+ old_head = odp_atomic_fetch_inc_u32(&ring->r.w_head);
new_head = old_head + 1;
- /* Ring is full. Wait for the last reader to finish. */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head))
+ /* Wait for the last reader to finish. This prevents overwrite when
+ * a reader has been left behind (e.g. due to an interrupt) and is
+ * still reading the same slot. */
+ while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r.r_tail)
+ >= size))
odp_cpu_pause();
/* Write data */
ring->data[new_head & mask] = data;
/* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
odp_cpu_pause();
- /* Now update the writer tail */
- odp_atomic_store_rel_u32(&ring->w_tail, new_head);
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.w_tail, new_head);
}
/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */
-static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[],
- uint32_t num)
+static inline void _RING_ENQ_MULTI(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t data[], uint32_t num)
{
uint32_t old_head, new_head, i;
+ uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */
- old_head = odp_atomic_fetch_add_u32(&ring->w_head, num);
+ old_head = odp_atomic_fetch_add_u32(&ring->r.w_head, num);
new_head = old_head + 1;
- /* Ring is full. Wait for the last reader to finish. */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->r_tail) == new_head))
+ /* Wait for the last reader to finish. This prevents overwrite when
+ * a reader has been left behind (e.g. due to an interrupt) and is
+ * still reading these slots. */
+ while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r.r_tail)
+ >= size))
odp_cpu_pause();
/* Write data */
@@ -162,15 +303,21 @@ static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[],
ring->data[(new_head + i) & mask] = data[i];
/* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
odp_cpu_pause();
- /* Now update the writer tail */
- odp_atomic_store_rel_u32(&ring->w_tail, old_head + num);
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.w_tail, old_head + num);
}
-#ifdef __cplusplus
+static inline uint32_t _RING_LEN(_ring_gen_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
+
+ return tail - head;
}
-#endif
+#ifdef __cplusplus
+}
#endif
diff --git a/platform/linux-generic/include/odp_ring_mpmc_internal.h b/platform/linux-generic/include/odp_ring_mpmc_internal.h
new file mode 100644
index 000000000..3526b3866
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_internal.h
@@ -0,0 +1,350 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_INTERNAL_H_
+#define ODP_RING_MPMC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/atomic.h>
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
+
+#include <odp_ring_common.h>
+
+/* Ring of uint32_t/uint64_t data
+ *
+ * Ring stores head and tail counters. Ring indexes are formed from these
+ * counters with a mask (mask = ring_size - 1), which requires that ring size
+ * must be a power of two.
+ *
+ * The following figures depict an example where a ring is being simultaneously
+ * enqueued to and dequeued from. Ring slots containing data are marked with
+ * letter D, empty slots with E, and slots being modified with X.
+ *
+ * Ring status before enq/deq operations.
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | E | E | D | D | D | D | D | D | E | E | E | E | E | E | E | E |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * ^ ^
+ * | |
+ * r_head w_head
+ * r_tail w_tail
+ *
+ * Ring status while being enqueued and dequeued.
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | E | E | X | X | D | D | D | D | X | X | X | E | E | E | E | E |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * ^ ^ ^ ^
+ * | | | |
+ * r_tail r_head w_tail w_head
+ *
+ * Ring status after enq/deq operations.
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | E | E | E | E | D | D | D | D | D | D | D | E | E | E | E | E |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * ^ ^
+ * | |
+ * r_head w_head
+ * r_tail w_tail
+ */
+
+struct ring_mpmc_common {
+ odp_atomic_u32_t r_head ODP_ALIGNED_CACHE;
+ odp_atomic_u32_t r_tail;
+
+ odp_atomic_u32_t w_head ODP_ALIGNED_CACHE;
+ odp_atomic_u32_t w_tail;
+};
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_mpmc_common r;
+} ring_mpmc_u32_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct ring_mpmc_common r;
+} ring_mpmc_u64_t;
+
+static inline int ring_mpmc_cas_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+}
+
+#endif /* End of include guards */
+
+#undef _ring_mpmc_gen_t
+#undef _ring_mpmc_data_t
+#undef _RING_MPMC_INIT
+#undef _RING_MPMC_DEQ_MULTI
+#undef _RING_MPMC_ENQ_MULTI
+#undef _RING_MPMC_DEQ_BATCH
+#undef _RING_MPMC_ENQ_BATCH
+#undef _RING_MPMC_IS_EMPTY
+#undef _RING_MPMC_LEN
+
+/* This header should NOT be included directly. There are no include guards for
+ * the following types and function definitions! */
+#ifndef _ODP_RING_TYPE
+#error Include type specific (u32/u64) ring header instead of this common file.
+#endif
+
+#if _ODP_RING_TYPE == _ODP_RING_TYPE_U32
+ #define _ring_mpmc_gen_t ring_mpmc_u32_t
+ #define _ring_mpmc_data_t uint32_t
+
+ #define _RING_MPMC_INIT ring_mpmc_u32_init
+ #define _RING_MPMC_DEQ_MULTI ring_mpmc_u32_deq_multi
+ #define _RING_MPMC_ENQ_MULTI ring_mpmc_u32_enq_multi
+ #define _RING_MPMC_DEQ_BATCH ring_mpmc_u32_deq_batch
+ #define _RING_MPMC_ENQ_BATCH ring_mpmc_u32_enq_batch
+ #define _RING_MPMC_IS_EMPTY ring_mpmc_u32_is_empty
+ #define _RING_MPMC_LEN ring_mpmc_u32_len
+#elif _ODP_RING_TYPE == _ODP_RING_TYPE_U64
+ #define _ring_mpmc_gen_t ring_mpmc_u64_t
+ #define _ring_mpmc_data_t uint64_t
+
+ #define _RING_MPMC_INIT ring_mpmc_u64_init
+ #define _RING_MPMC_DEQ_MULTI ring_mpmc_u64_deq_multi
+ #define _RING_MPMC_ENQ_MULTI ring_mpmc_u64_enq_multi
+ #define _RING_MPMC_DEQ_BATCH ring_mpmc_u64_deq_batch
+ #define _RING_MPMC_ENQ_BATCH ring_mpmc_u64_enq_batch
+ #define _RING_MPMC_IS_EMPTY ring_mpmc_u64_is_empty
+ #define _RING_MPMC_LEN ring_mpmc_u64_len
+#endif
+
+/* Initialize ring */
+static inline void _RING_MPMC_INIT(_ring_mpmc_gen_t *ring)
+{
+ odp_atomic_init_u32(&ring->r.w_head, 0);
+ odp_atomic_init_u32(&ring->r.w_tail, 0);
+ odp_atomic_init_u32(&ring->r.r_head, 0);
+ odp_atomic_init_u32(&ring->r.r_tail, 0);
+}
+
+/* Dequeue data from the ring head */
+static inline uint32_t _RING_MPMC_DEQ_MULTI(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, w_tail, num_data, i;
+
+ /* Load acquires ensure that w_tail load happens after r_head load,
+ * and thus r_head value is always behind or equal to w_tail value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new r_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r.r_head);
+ odp_prefetch(&ring_data[(old_head + 1) & ring_mask]);
+ w_tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
+ num_data = w_tail - old_head;
+
+ /* Ring is empty */
+ if (num_data == 0)
+ return 0;
+
+ /* Try to take all available */
+ if (num > num_data)
+ num = num_data;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.r_head, &old_head,
+ new_head) == 0));
+
+ /* Read data. This will not move above load acquire of r_head. */
+ for (i = 0; i < num; i++)
+ data[i] = ring_data[(old_head + 1 + i) & ring_mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new reader tail, writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
+
+ return num;
+}
+
+/* Dequeue num or 0 data from the ring head */
+static inline uint32_t _RING_MPMC_DEQ_BATCH(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, w_tail, num_data, i;
+
+ /* Load acquires ensure that w_tail load happens after r_head load,
+ * and thus r_head value is always behind or equal to w_tail value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new r_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r.r_head);
+ odp_prefetch(&ring_data[(old_head + 1) & ring_mask]);
+ w_tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
+ num_data = w_tail - old_head;
+
+ /* Not enough data available */
+ if (num_data < num)
+ return 0;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.r_head, &old_head,
+ new_head) == 0));
+
+ /* Read data. This will not move above load acquire of r_head. */
+ for (i = 0; i < num; i++)
+ data[i] = ring_data[(old_head + 1 + i) & ring_mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new reader tail, writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
+
+ return num;
+}
+
+/* Enqueue multiple data into the ring tail */
+static inline uint32_t _RING_MPMC_ENQ_MULTI(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ const _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, r_tail, num_free, i;
+ uint32_t size = ring_mask + 1;
+
+ /* The CAS operation guarantees that w_head value is up to date. Load
+ * acquire is used to ensure that r_tail is read after w_head. This
+ * guarantees that w_head - r_tail <= size. Any additional delay in
+ * reading r_tail makes the subtraction result only smaller. This
+ * avoids returning zero when the ring is not actually full.
+ *
+ * When CAS operation succeeds, this thread owns data between old and
+ * new w_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r.w_head);
+ r_tail = odp_atomic_load_acq_u32(&ring->r.r_tail);
+
+ num_free = size - (old_head - r_tail);
+
+ /* Ring is full */
+ if (num_free == 0)
+ return 0;
+
+ /* Try to use all available */
+ if (num > num_free)
+ num = num_free;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.w_head, &old_head,
+ new_head) == 0));
+
+ /* Write data. This will not move above load acquire of w_head. */
+ for (i = 0; i < num; i++)
+ ring_data[(old_head + 1 + i) & ring_mask] = data[i];
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.w_tail, new_head);
+
+ return num;
+}
+
+/* Enqueue num or 0 data into the ring tail */
+static inline uint32_t _RING_MPMC_ENQ_BATCH(_ring_mpmc_gen_t *ring,
+ _ring_mpmc_data_t *ring_data,
+ uint32_t ring_mask,
+ const _ring_mpmc_data_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, r_tail, num_free, i;
+ uint32_t size = ring_mask + 1;
+
+ /* The CAS operation guarantees that w_head value is up to date. Load
+ * acquire is used to ensure that r_tail is read after w_head. This
+ * guarantees that w_head - r_tail <= size. Any additional delay in
+ * reading r_tail makes the subtraction result only smaller. This
+ * avoids returning zero when the ring is not actually full.
+ *
+ * When CAS operation succeeds, this thread owns data between old and
+ * new w_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r.w_head);
+ r_tail = odp_atomic_load_acq_u32(&ring->r.r_tail);
+
+ num_free = size - (old_head - r_tail);
+
+ /* Not enough free space available */
+ if (num_free < num)
+ return 0;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r.w_head, &old_head,
+ new_head) == 0));
+
+ /* Write data. This will not move above load acquire of w_head. */
+ for (i = 0; i < num; i++)
+ ring_data[(old_head + 1 + i) & ring_mask] = data[i];
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.w_tail, new_head);
+
+ return num;
+}
+
+/* Check if ring is empty */
+static inline int _RING_MPMC_IS_EMPTY(_ring_mpmc_gen_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
+
+ return head == tail;
+}
+
+/* Return current ring length */
+static inline uint32_t _RING_MPMC_LEN(_ring_mpmc_gen_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->r.r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->r.w_tail);
+
+ return tail - head;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h b/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h
new file mode 100644
index 000000000..4699b5b47
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_u32_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_U32_INTERNAL_H_
+#define ODP_RING_MPMC_U32_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U32
+
+#include <odp_ring_mpmc_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h b/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h
new file mode 100644
index 000000000..e7bf31a94
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_u64_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_U64_INTERNAL_H_
+#define ODP_RING_MPMC_U64_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U64
+
+#include <odp_ring_mpmc_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_ptr_internal.h b/platform/linux-generic/include/odp_ring_ptr_internal.h
new file mode 100644
index 000000000..13b2b2fbf
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_ptr_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_PTR_INTERNAL_H_
+#define ODP_RING_PTR_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_PTR
+
+#include <odp_ring_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_spsc_internal.h b/platform/linux-generic/include/odp_ring_spsc_internal.h
new file mode 100644
index 000000000..23e7d8868
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_spsc_internal.h
@@ -0,0 +1,130 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_SPSC_INTERNAL_H_
+#define ODP_RING_SPSC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+#include <odp/api/atomic.h>
+#include <odp/api/plat/atomic_inlines.h>
+
+/* Lock-free ring for single-producer / single-consumer usage.
+ *
+ * Thread doing an operation may be different each time, but the same operation
+ * (enq- or dequeue) must not be called concurrently. The next thread may call
+ * the same operation only when it's sure that the previous thread have returned
+ * from the call, or will never return back to finish the call when interrupted
+ * during the call.
+ *
+ * Enqueue and dequeue operations can be done concurrently.
+ */
+typedef struct {
+ odp_atomic_u32_t head;
+ odp_atomic_u32_t tail;
+
+} ring_spsc_t;
+
+/* Initialize ring. Ring size must be a power of two. */
+static inline void ring_spsc_init(ring_spsc_t *ring)
+{
+ odp_atomic_init_u32(&ring->head, 0);
+ odp_atomic_init_u32(&ring->tail, 0);
+}
+
+/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
+static inline uint32_t ring_spsc_deq_multi(ring_spsc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask, uint32_t data[],
+ uint32_t max_num)
+{
+ uint32_t head, tail, idx;
+ uint32_t num, i;
+
+ tail = odp_atomic_load_acq_u32(&ring->tail);
+ head = odp_atomic_load_u32(&ring->head);
+ num = tail - head;
+
+ /* Empty */
+ if (num == 0)
+ return 0;
+
+ if (num > max_num)
+ num = max_num;
+
+ idx = head & ring_mask;
+
+ for (i = 0; i < num; i++) {
+ data[i] = ring_data[idx];
+ idx = (idx + 1) & ring_mask;
+ }
+
+ odp_atomic_store_rel_u32(&ring->head, head + num);
+
+ return num;
+}
+
+/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
+static inline uint32_t ring_spsc_enq_multi(ring_spsc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask,
+ const uint32_t data[],
+ uint32_t num_data)
+{
+ uint32_t head, tail, size, idx;
+ uint32_t num, i;
+
+ head = odp_atomic_load_acq_u32(&ring->head);
+ tail = odp_atomic_load_u32(&ring->tail);
+ size = ring_mask + 1;
+ num = size - (tail - head);
+
+ /* Full */
+ if (num == 0)
+ return 0;
+
+ if (num > num_data)
+ num = num_data;
+
+ idx = tail & ring_mask;
+
+ for (i = 0; i < num; i++) {
+ ring_data[idx] = data[i];
+ idx = (idx + 1) & ring_mask;
+ }
+
+ odp_atomic_store_rel_u32(&ring->tail, tail + num);
+
+ return num;
+}
+
+/* Check if ring is empty */
+static inline int ring_spsc_is_empty(ring_spsc_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->head);
+ uint32_t tail = odp_atomic_load_u32(&ring->tail);
+
+ return head == tail;
+}
+
+/* Return current ring length */
+static inline uint32_t ring_spsc_length(ring_spsc_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->head);
+ uint32_t tail = odp_atomic_load_u32(&ring->tail);
+
+ return tail - head;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_st_internal.h b/platform/linux-generic/include/odp_ring_st_internal.h
new file mode 100644
index 000000000..406d043b5
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_st_internal.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_ST_INTERNAL_H_
+#define ODP_RING_ST_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+
+/* Basic ring for single thread usage. Operations must be synchronized by using
+ * locks (or other means), when multiple threads use the same ring. */
+typedef struct {
+ uint32_t head;
+ uint32_t tail;
+} ring_st_t;
+
+/* Initialize ring. Ring size must be a power of two. */
+static inline void ring_st_init(ring_st_t *ring)
+{
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
+static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t *ring_data,
+ uint32_t ring_mask, uint32_t data[],
+ uint32_t max_num)
+{
+ uint32_t head, tail, idx;
+ uint32_t num, i;
+
+ head = ring->head;
+ tail = ring->tail;
+ num = tail - head;
+
+ /* Empty */
+ if (num == 0)
+ return 0;
+
+ if (num > max_num)
+ num = max_num;
+
+ idx = head & ring_mask;
+
+ for (i = 0; i < num; i++) {
+ data[i] = ring_data[idx];
+ idx = (idx + 1) & ring_mask;
+ }
+
+ ring->head = head + num;
+
+ return num;
+}
+
+/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
+static inline uint32_t ring_st_enq_multi(ring_st_t *ring, uint32_t *ring_data,
+ uint32_t ring_mask,
+ const uint32_t data[],
+ uint32_t num_data)
+{
+ uint32_t head, tail, size, idx;
+ uint32_t num, i;
+
+ head = ring->head;
+ tail = ring->tail;
+ size = ring_mask + 1;
+ num = size - (tail - head);
+
+ /* Full */
+ if (num == 0)
+ return 0;
+
+ if (num > num_data)
+ num = num_data;
+
+ idx = tail & ring_mask;
+
+ for (i = 0; i < num; i++) {
+ ring_data[idx] = data[i];
+ idx = (idx + 1) & ring_mask;
+ }
+
+ ring->tail = tail + num;
+
+ return num;
+}
+
+/* Check if ring is empty */
+static inline int ring_st_is_empty(ring_st_t *ring)
+{
+ return ring->head == ring->tail;
+}
+
+/* Return current ring length */
+static inline uint32_t ring_st_length(ring_st_t *ring)
+{
+ return ring->tail - ring->head;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_u32_internal.h b/platform/linux-generic/include/odp_ring_u32_internal.h
new file mode 100644
index 000000000..baa02e4ca
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_u32_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_U32_INTERNAL_H_
+#define ODP_RING_U32_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U32
+
+#include <odp_ring_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_u64_internal.h b/platform/linux-generic/include/odp_ring_u64_internal.h
new file mode 100644
index 000000000..4b4c7b1b5
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_u64_internal.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_U64_INTERNAL_H_
+#define ODP_RING_U64_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_ring_common.h>
+
+#undef _ODP_RING_TYPE
+#define _ODP_RING_TYPE _ODP_RING_TYPE_U64
+
+#include <odp_ring_internal.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index 530d157f2..07f935bc9 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -12,40 +13,63 @@ extern "C" {
#endif
#include <odp/api/queue.h>
-#include <odp_queue_internal.h>
#include <odp/api/schedule.h>
+#include <odp/api/plat/schedule_inline_types.h>
-typedef void (*schedule_pktio_start_fn_t)(int pktio_index, int num_in_queue,
- int in_queue_idx[]);
+#include <odp_event_internal.h>
+#include <odp_queue_if.h>
+
+#define _ODP_SCHED_ID_BASIC 0
+#define _ODP_SCHED_ID_SP 1
+#define _ODP_SCHED_ID_SCALABLE 2
+
+/* Scheduler identifier */
+extern int _odp_sched_id;
+
+typedef struct schedule_config_t {
+ struct {
+ int all;
+ int worker;
+ int control;
+ } group_enable;
+
+} schedule_config_t;
+
+typedef void (*schedule_pktio_start_fn_t)(int pktio_index,
+ int num_in_queue,
+ int in_queue_idx[],
+ odp_queue_t odpq[]);
typedef int (*schedule_thr_add_fn_t)(odp_schedule_group_t group, int thr);
typedef int (*schedule_thr_rem_fn_t)(odp_schedule_group_t group, int thr);
typedef int (*schedule_num_grps_fn_t)(void);
-typedef int (*schedule_init_queue_fn_t)(uint32_t queue_index,
- const odp_schedule_param_t *sched_param
- );
+typedef int (*schedule_create_queue_fn_t)(uint32_t queue_index,
+ const odp_schedule_param_t *param);
typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_unsched_queue_fn_t)(uint32_t queue_index);
-typedef int (*schedule_ord_enq_multi_fn_t)(uint32_t queue_index,
- void *buf_hdr[], int num, int *ret);
+typedef int (*schedule_ord_enq_multi_fn_t)(odp_queue_t queue, void *event_hdr[],
+ int num, int *ret);
typedef int (*schedule_init_global_fn_t)(void);
typedef int (*schedule_term_global_fn_t)(void);
typedef int (*schedule_init_local_fn_t)(void);
typedef int (*schedule_term_local_fn_t)(void);
typedef void (*schedule_order_lock_fn_t)(void);
typedef void (*schedule_order_unlock_fn_t)(void);
-typedef unsigned (*schedule_max_ordered_locks_fn_t)(void);
-typedef void (*schedule_save_context_fn_t)(queue_entry_t *queue);
+typedef void (*schedule_order_unlock_lock_fn_t)(void);
+typedef void (*schedule_order_lock_start_fn_t)(void);
+typedef void (*schedule_order_lock_wait_fn_t)(void);
+typedef uint32_t (*schedule_max_ordered_locks_fn_t)(void);
+typedef void (*schedule_get_config_fn_t)(schedule_config_t *config);
+typedef const _odp_schedule_api_fn_t *(*schedule_sched_api_fn_t)(void);
typedef struct schedule_fn_t {
schedule_pktio_start_fn_t pktio_start;
schedule_thr_add_fn_t thr_add;
schedule_thr_rem_fn_t thr_rem;
schedule_num_grps_fn_t num_grps;
- schedule_init_queue_fn_t init_queue;
+ schedule_create_queue_fn_t create_queue;
schedule_destroy_queue_fn_t destroy_queue;
schedule_sched_queue_fn_t sched_queue;
- schedule_unsched_queue_fn_t unsched_queue;
schedule_ord_enq_multi_fn_t ord_enq_multi;
schedule_init_global_fn_t init_global;
schedule_term_global_fn_t term_global;
@@ -53,52 +77,22 @@ typedef struct schedule_fn_t {
schedule_term_local_fn_t term_local;
schedule_order_lock_fn_t order_lock;
schedule_order_unlock_fn_t order_unlock;
+ schedule_order_lock_start_fn_t start_order_lock;
+ schedule_order_lock_wait_fn_t wait_order_lock;
+ schedule_order_unlock_lock_fn_t order_unlock_lock;
schedule_max_ordered_locks_fn_t max_ordered_locks;
- schedule_save_context_fn_t save_context;
+ schedule_get_config_fn_t get_config;
+ schedule_sched_api_fn_t sched_api;
+
} schedule_fn_t;
/* Interface towards the scheduler */
-extern const schedule_fn_t *sched_fn;
+extern const schedule_fn_t *_odp_sched_fn;
/* Interface for the scheduler */
-int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[]);
-void sched_cb_pktio_stop_finalize(int pktio_index);
-int sched_cb_num_pktio(void);
-int sched_cb_num_queues(void);
-int sched_cb_queue_prio(uint32_t queue_index);
-int sched_cb_queue_grp(uint32_t queue_index);
-int sched_cb_queue_is_ordered(uint32_t queue_index);
-int sched_cb_queue_is_atomic(uint32_t queue_index);
-odp_queue_t sched_cb_queue_handle(uint32_t queue_index);
-void sched_cb_queue_destroy_finalize(uint32_t queue_index);
-int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num);
-int sched_cb_queue_empty(uint32_t queue_index);
-
-/* API functions */
-typedef struct {
- uint64_t (*schedule_wait_time)(uint64_t);
- odp_event_t (*schedule)(odp_queue_t *, uint64_t);
- int (*schedule_multi)(odp_queue_t *, uint64_t, odp_event_t [], int);
- void (*schedule_pause)(void);
- void (*schedule_resume)(void);
- void (*schedule_release_atomic)(void);
- void (*schedule_release_ordered)(void);
- void (*schedule_prefetch)(int);
- int (*schedule_num_prio)(void);
- odp_schedule_group_t (*schedule_group_create)(const char *,
- const odp_thrmask_t *);
- int (*schedule_group_destroy)(odp_schedule_group_t);
- odp_schedule_group_t (*schedule_group_lookup)(const char *);
- int (*schedule_group_join)(odp_schedule_group_t, const odp_thrmask_t *);
- int (*schedule_group_leave)(odp_schedule_group_t,
- const odp_thrmask_t *);
- int (*schedule_group_thrmask)(odp_schedule_group_t, odp_thrmask_t *);
- int (*schedule_group_info)(odp_schedule_group_t,
- odp_schedule_group_info_t *);
- void (*schedule_order_lock)(unsigned);
- void (*schedule_order_unlock)(unsigned);
-
-} schedule_api_t;
+int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
+ _odp_event_hdr_t *hdr_tbl[], int num);
+void _odp_sched_cb_pktio_stop_finalize(int pktio_index);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_schedule_scalable.h b/platform/linux-generic/include/odp_schedule_scalable.h
new file mode 100644
index 000000000..28c0a9894
--- /dev/null
+++ b/platform/linux-generic/include/odp_schedule_scalable.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SCHEDULE_SCALABLE_H
+#define ODP_SCHEDULE_SCALABLE_H
+
+#include <odp/api/align.h>
+#include <odp/api/schedule.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_event_internal.h>
+#include <odp_schedule_scalable_config.h>
+#include <odp_schedule_scalable_ordered.h>
+#include <odp_llqueue.h>
+
+/*
+ * Define scalable scheduler internal maximum priority count
+ * ODP_SCHED_PRIO_NUM as it is not compile-time constant at API
+ * level. The current API for this is odp_schedule_num_prio().
+ * The other schedulers also define this internally as NUM_PRIO.
+ *
+ * One additional priority level for idle pktin queues.
+ * This is only for internal use and not visible to the user.
+ */
+#define ODP_SCHED_PRIO_PKTIN 8
+#define ODP_SCHED_PRIO_NUM 9
+
+typedef struct ODP_ALIGNED_CACHE {
+ union {
+ struct {
+ struct llqueue llq;
+ uint32_t prio;
+ };
+ char line[ODP_CACHE_LINE_SIZE];
+ };
+} sched_queue_t;
+
+#define TICKET_INVALID (uint16_t)(~0U)
+
+typedef struct ODP_ALIGNED(sizeof(uint64_t)) {
+ int32_t numevts;
+ uint16_t wrr_budget;
+ uint8_t cur_ticket;
+ uint8_t nxt_ticket;
+} qschedstate_t;
+
+typedef uint32_t ringidx_t;
+
+#ifdef CONFIG_SPLIT_PRODCONS
+#define SPLIT_PC ODP_ALIGNED_CACHE
+#else
+#define SPLIT_PC
+#endif
+
+#define ODP_NO_SCHED_QUEUE (ODP_SCHED_SYNC_ORDERED + 1)
+
+typedef struct ODP_ALIGNED_CACHE {
+ struct llnode node;
+ sched_queue_t *schedq;
+#ifdef CONFIG_QSCHST_LOCK
+ odp_ticketlock_t qschlock;
+#endif
+ qschedstate_t qschst;
+ uint8_t pop_deficit;
+ uint8_t qschst_type;
+ uint8_t pktio_idx;
+ uint8_t rx_queue;
+ uint16_t xoffset;
+ uint8_t sched_prio;
+ ringidx_t prod_read SPLIT_PC;
+ ringidx_t prod_write;
+ ringidx_t prod_mask;
+ _odp_event_hdr_t **prod_ring;
+ ringidx_t cons_write SPLIT_PC;
+ ringidx_t cons_read;
+ reorder_window_t *rwin;
+ void *user_ctx;
+#ifdef CONFIG_SPLIT_PRODCONS
+ _odp_event_hdr_t **cons_ring;
+ ringidx_t cons_mask;
+ uint16_t cons_type;
+#else
+#define cons_mask prod_mask
+#define cons_ring prod_ring
+#define cons_type qschst_type
+#endif
+ odp_schedule_group_t sched_grp;
+ uint32_t loop_check[CONFIG_NUM_CPU_IDS];
+} sched_elem_t;
+
+/* Number of scheduling groups */
+#define MAX_SCHED_GROUP (sizeof(sched_group_mask_t) * CHAR_BIT)
+
+typedef bitset_t sched_group_mask_t;
+
+typedef struct {
+ /* Threads currently associated with the sched group */
+ bitset_t thr_actual[ODP_SCHED_PRIO_NUM] ODP_ALIGNED_CACHE;
+ bitset_t thr_wanted;
+ /* Used to spread queues over schedq's */
+ uint32_t xcount[ODP_SCHED_PRIO_NUM];
+ /* Number of schedq's per prio */
+ uint32_t xfactor;
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+ /* ODP_SCHED_PRIO_NUM * xfactor. Must be last. */
+ sched_queue_t schedq[1] ODP_ALIGNED_CACHE;
+} sched_group_t;
+
+/* Number of reorder contexts per thread */
+#define TS_RVEC_SIZE 16
+
+typedef struct ODP_ALIGNED_CACHE {
+ /* Atomic queue currently being processed or NULL */
+ sched_elem_t *atomq;
+ /* Schedq the currently processed queue was popped from */
+ sched_queue_t *src_schedq;
+ /* Current reorder context or NULL */
+ reorder_context_t *rctx;
+ uint8_t pause;
+ uint8_t out_of_order;
+ uint8_t tidx;
+ uint8_t pad;
+ uint32_t dequeued; /* Number of events dequeued from atomic queue */
+ uint16_t ticket; /* Ticket for atomic queue or TICKET_INVALID */
+ uint16_t num_schedq;
+ uint16_t sg_sem; /* Set when sg_wanted is modified by other thread */
+#define SCHEDQ_PER_THREAD (MAX_SCHED_GROUP * ODP_SCHED_PRIO_NUM)
+ sched_queue_t *schedq_list[SCHEDQ_PER_THREAD];
+ /* Current sched_group membership */
+ sched_group_mask_t sg_actual[ODP_SCHED_PRIO_NUM];
+ /* Future sched_group membership. */
+ sched_group_mask_t sg_wanted[ODP_SCHED_PRIO_NUM];
+ bitset_t priv_rvec_free;
+ /* Bitset of free entries in rvec[] */
+ bitset_t rvec_free ODP_ALIGNED_CACHE;
+ /* Reordering contexts to allocate from */
+ reorder_context_t rvec[TS_RVEC_SIZE] ODP_ALIGNED_CACHE;
+ uint32_t loop_cnt; /*Counter to check pktio ingress queue dead loop */
+} sched_scalable_thread_state_t;
+
+void _odp_sched_update_enq(sched_elem_t *q, uint32_t actual);
+void _odp_sched_update_enq_sp(sched_elem_t *q, uint32_t actual);
+sched_queue_t *_odp_sched_queue_add(odp_schedule_group_t grp, uint32_t prio);
+void _odp_sched_queue_rem(odp_schedule_group_t grp, uint32_t prio);
+
+#endif /* ODP_SCHEDULE_SCALABLE_H */
diff --git a/platform/linux-generic/include/odp_schedule_scalable_config.h b/platform/linux-generic/include/odp_schedule_scalable_config.h
new file mode 100644
index 000000000..3462d047b
--- /dev/null
+++ b/platform/linux-generic/include/odp_schedule_scalable_config.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SCHEDULE_SCALABLE_CONFIG_H_
+#define ODP_SCHEDULE_SCALABLE_CONFIG_H_
+
+/* Maximum number of events that can be stored in a queue */
+#define CONFIG_SCAL_QUEUE_SIZE 4096
+
+/*
+ * Default scaling factor for the scheduler group
+ *
+ * This scaling factor is used when the application creates a scheduler
+ * group with no worker threads.
+ */
+#define CONFIG_DEFAULT_XFACTOR 4
+
+/*
+ * Default weight (in events) for WRR in scalable scheduler
+ *
+ * This controls the per-queue weight for WRR between queues of the same
+ * priority in the scalable scheduler
+ * A higher value improves throughput while a lower value increases fairness
+ * and thus likely decreases latency
+ *
+ * If WRR is undesired, set the value to ~0 which will use the largest possible
+ * weight
+ *
+ * Note: an API for specifying this on a per-queue basis would be useful but is
+ * not yet available
+ */
+#define CONFIG_WRR_WEIGHT 64
+
+/*
+ * Split queue producer/consumer metadata into separate cache lines.
+ * This is beneficial on e.g. Cortex-A57 but not so much on A53.
+ */
+#define CONFIG_SPLIT_PRODCONS
+
+/*
+ * Use locks to protect queue (ring buffer) and scheduler state updates
+ * On x86, this decreases overhead noticeably.
+ */
+#if !defined(__arm__) && !defined(__aarch64__)
+#define CONFIG_QSCHST_LOCK
+/* Keep all ring buffer/qschst data together when using locks */
+#undef CONFIG_SPLIT_PRODCONS
+#endif
+
+#endif /* ODP_SCHEDULE_SCALABLE_CONFIG_H_ */
diff --git a/platform/linux-generic/include/odp_schedule_scalable_ordered.h b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
new file mode 100644
index 000000000..be4894f73
--- /dev/null
+++ b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SCHEDULE_SCALABLE_ORDERED_H
+#define ODP_SCHEDULE_SCALABLE_ORDERED_H
+
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_bitset.h>
+#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ishmpool_internal.h>
+
+/* High level functioning of reordering
+ * Datastructures -
+ * Reorder Window - Every ordered queue is associated with a reorder window.
+ * Reorder window stores reorder contexts from threads that
+ * have completed processing out-of-order.
+ * Reorder Context - Reorder context consists of events that a thread
+ * wants to enqueue while processing a batch of events
+ * from an ordered queue.
+ *
+ * Algorithm -
+ * 1) Thread identifies the ordered queue.
+ * 2) It 'reserves a slot in the reorder window and dequeues the
+ * events' atomically. Atomicity is achieved by using a ticket-lock
+ * like design where the reorder window slot is the ticket.
+ * 3a) Upon order-release/next schedule call, the thread
+ * checks if it's slot (ticket) equals the head of the reorder window.
+ * If yes, enqueues the events to the destination queue till
+ * i) the reorder window is empty or
+ * ii) there is a gap in the reorder window
+ * If no, the reorder context is stored in the reorder window at
+ * the reserved slot.
+ * 3b) Upon the first enqueue, the thread checks if it's slot (ticket)
+ * equals the head of the reorder window.
+ * If yes, enqueues the events immediately to the destination queue
+ * If no, these (and subsequent) events are stored in the reorder context
+ * (in the application given order)
+ */
+
+/* Head and change indicator variables are used to synchronise between
+ * concurrent insert operations in the reorder window. A thread performing
+ * an in-order insertion must be notified about the newly inserted
+ * reorder contexts so that it doesn’t halt the retire process too early.
+ * A thread performing an out-of-order insertion must correspondingly
+ * notify the thread doing in-order insertion of the new waiting reorder
+ * context, which may need to be handled by that thread.
+ *
+ * Also, an out-of-order insertion may become an in-order insertion if the
+ * thread doing an in-order insertion completes before this thread completes.
+ * We need a point of synchronisation where this knowledge and potential state
+ * change can be transferred between threads.
+ */
+typedef struct ODP_ALIGNED(sizeof(uint64_t)) hc {
+ /* First missing context */
+ uint32_t head;
+ /* Change indicator */
+ uint32_t chgi;
+} hc_t;
+
+/* Number of reorder contects in the reorder window.
+ * Should be at least one per CPU.
+ */
+#define RWIN_SIZE 32
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(RWIN_SIZE), "RWIN_SIZE is not a power of 2");
+
+typedef struct reorder_context reorder_context_t;
+
+typedef struct reorder_window {
+ /* head and change indicator */
+ hc_t hc;
+ uint32_t winmask;
+ uint32_t tail;
+ uint32_t turn;
+ uint32_t olock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ uint32_t lock_count;
+ /* Reorder contexts in this window */
+ reorder_context_t *ring[RWIN_SIZE];
+} reorder_window_t;
+
+/* Number of events that can be stored in a reorder context.
+ * This size is chosen so that there is no space left unused at the end
+ * of the last cache line (for 64b architectures and 64b handles).
+ */
+#define RC_EVT_SIZE 18
+
+struct ODP_ALIGNED_CACHE reorder_context {
+ /* Reorder window to which this context belongs */
+ reorder_window_t *rwin;
+ /* Pointer to TS->rvec_free */
+ bitset_t *rvec_free;
+ /* Our slot number in the reorder window */
+ uint32_t sn;
+ uint8_t olock_flags;
+ /* Our index in thread_state rvec array */
+ uint8_t idx;
+ /* Use to link reorder contexts together */
+ uint8_t next_idx;
+ /* Current reorder context to save events in */
+ uint8_t cur_idx;
+ /* Number of events stored in this reorder context */
+ uint8_t numevts;
+ /* Events stored in this context */
+ _odp_event_hdr_t *events[RC_EVT_SIZE];
+ queue_entry_t *destq[RC_EVT_SIZE];
+};
+
+reorder_window_t *_odp_rwin_alloc(_odp_ishm_pool_t *pool,
+ unsigned int lock_count);
+int _odp_rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin);
+bool _odp_rwin_reserve(reorder_window_t *rwin, uint32_t *sn);
+bool _odp_rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn);
+void _odp_rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn);
+void _odp_rctx_init(reorder_context_t *rctx, uint16_t idx,
+ reorder_window_t *rwin, uint32_t sn);
+void _odp_rctx_release(reorder_context_t *rctx);
+int _odp_rctx_save(queue_entry_t *queue, _odp_event_hdr_t *event_hdr[], int num);
+
+#endif /* ODP_SCHEDULE_SCALABLE_ORDERED_H */
diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h
index 8bd105d9f..9e51ffad1 100644
--- a/platform/linux-generic/include/odp_shm_internal.h
+++ b/platform/linux-generic/include/odp_shm_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2019, Nokia
+ * Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,13 +12,40 @@
extern "C" {
#endif
+#include <sys/types.h>
+#include <inttypes.h>
+
#include <odp/api/shared_memory.h>
-#define SHM_DEVNAME_MAXLEN (ODP_SHM_NAME_LEN + 16)
-#define SHM_DEVNAME_FORMAT "/odp-%d-%s" /* /dev/shm/odp-<pid>-<name> */
+/* flags available at ishm_reserve: */
+#define _ODP_ISHM_SINGLE_VA 1
+#define _ODP_ISHM_LOCK 2
+#define _ODP_ISHM_EXPORT 4 /* create export descr file in /tmp */
+
+/**
+ * Shared memory block info
+ */
+typedef struct _odp_ishm_info_t {
+ const char *name; /**< Block name */
+ void *addr; /**< Block address */
+ uint64_t size; /**< Block size in bytes */
+ uint64_t page_size; /**< Memory page size */
+ uint32_t flags; /**< _ODP_ISHM_* flags */
+ uint32_t user_flags;/**< user specific flags */
+} _odp_ishm_info_t;
-#define _ODP_SHM_PROC_NOCREAT 0x40 /**< Do not create shm if not exist */
-#define _ODP_SHM_O_EXCL 0x80 /**< Do not create shm if exist */
+int _odp_ishm_reserve(const char *name, uint64_t size, int fd, uint32_t align,
+ uint64_t offset, uint32_t flags, uint32_t user_flags);
+int _odp_ishm_free_by_index(int block_index);
+int _odp_ishm_lookup_by_name(const char *name);
+int _odp_ishm_find_exported(const char *remote_name,
+ pid_t external_odp_pid,
+ const char *local_name);
+void *_odp_ishm_address(int block_index);
+int _odp_ishm_info(int block_index, _odp_ishm_info_t *info);
+int _odp_ishm_status(const char *title);
+int _odp_ishm_cleanup_files(const char *dirpath);
+void _odp_ishm_print(int block_index);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_socket_common.h b/platform/linux-generic/include/odp_socket_common.h
new file mode 100644
index 000000000..f48dabc3b
--- /dev/null
+++ b/platform/linux-generic/include/odp_socket_common.h
@@ -0,0 +1,73 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SOCKET_COMMON_H_
+#define ODP_SOCKET_COMMON_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/packet_io.h>
+#include <protocols/eth.h>
+
+#include <string.h>
+
+#define _ODP_SOCKET_MTU_MIN (68 + _ODP_ETHHDR_LEN)
+#define _ODP_SOCKET_MTU_MAX (9000 + _ODP_ETHHDR_LEN)
+
+static inline void
+ethaddr_copy(unsigned char mac_dst[], unsigned char mac_src[])
+{
+ memcpy(mac_dst, mac_src, _ODP_ETHADDR_LEN);
+}
+
+static inline int
+ethaddrs_equal(unsigned char mac_a[], unsigned char mac_b[])
+{
+ return !memcmp(mac_a, mac_b, _ODP_ETHADDR_LEN);
+}
+
+/**
+ * Read the MAC address from a packet socket
+ */
+int _odp_mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[]);
+
+/**
+ * Read the MTU from a packet socket
+ */
+uint32_t _odp_mtu_get_fd(int fd, const char *name);
+
+/**
+ * Set a packet socket MTU
+ */
+int _odp_mtu_set_fd(int fd, const char *name, int mtu);
+
+/**
+ * Enable/Disable promisc mode for a packet socket
+ */
+int _odp_promisc_mode_set_fd(int fd, const char *name, int enable);
+
+/**
+ * Return promisc mode of a packet socket
+ */
+int _odp_promisc_mode_get_fd(int fd, const char *name);
+
+/**
+ * Return link status of a packet socket (up/down)
+ */
+int _odp_link_status_fd(int fd, const char *name);
+
+/**
+ * Read link information from a packet socket
+ */
+int _odp_link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_SOCKET_COMMON_H_ */
diff --git a/platform/linux-generic/include/odp_sorted_list_internal.h b/platform/linux-generic/include/odp_sorted_list_internal.h
index 3266c4337..9e5a457dc 100644
--- a/platform/linux-generic/include/odp_sorted_list_internal.h
+++ b/platform/linux-generic/include/odp_sorted_list_internal.h
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/include/odp_sysfs_stats.h b/platform/linux-generic/include/odp_sysfs_stats.h
new file mode 100644
index 000000000..0adb67c84
--- /dev/null
+++ b/platform/linux-generic/include/odp_sysfs_stats.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SYSFS_STATS_H_
+#define ODP_SYSFS_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/packet_io_stats.h>
+#include <odp_packet_io_internal.h>
+
+int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats);
+
+int _odp_sysfs_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num);
+int _odp_sysfs_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num);
+int _odp_sysfs_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* ODP_SYSFS_STATS_H_ */
diff --git a/platform/linux-generic/include/odp_sysinfo_internal.h b/platform/linux-generic/include/odp_sysinfo_internal.h
new file mode 100644
index 000000000..c14cf78d9
--- /dev/null
+++ b/platform/linux-generic/include/odp_sysinfo_internal.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SYSINFO_INTERNAL_H_
+#define ODP_SYSINFO_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_global_data.h>
+#include <odp_debug_internal.h>
+#include <inttypes.h>
+#include <string.h>
+
+int _odp_cpuinfo_parser(FILE *file, system_info_t *sysinfo);
+uint64_t odp_cpu_arch_hz_current(int id);
+void _odp_sys_info_print_arch(void);
+
+static inline int _odp_dummy_cpuinfo(system_info_t *sysinfo)
+{
+ uint64_t cpu_hz_max = sysinfo->default_cpu_hz_max;
+ int i;
+
+ sysinfo->cpu_arch = ODP_CPU_ARCH_UNKNOWN;
+
+ _ODP_WARN("Use dummy values for freq and model string\n");
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++) {
+ _ODP_WARN("CPU[%i] uses default max frequency of "
+ "%" PRIu64 " Hz from config file\n", i, cpu_hz_max);
+ sysinfo->cpu_hz_max[i] = cpu_hz_max;
+ strcpy(sysinfo->model_str[i], "UNKNOWN");
+ }
+
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index 91b12c545..38192d917 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -1,10 +1,10 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
/**
* @file
*
@@ -16,23 +16,53 @@
#include <odp/api/align.h>
#include <odp/api/debug.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
#include <odp/api/timer.h>
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_pool_internal.h>
+
+/*
+ * Use as the argument to timer_run() to force a scan and to ignore rate
+ * limit.
+ */
+#define TIMER_SCAN_FORCE INT32_MAX
+
/**
* Internal Timeout header
*/
-typedef struct {
- /* common buffer header */
- odp_buffer_hdr_t buf_hdr;
+typedef struct ODP_ALIGNED_CACHE odp_timeout_hdr_t {
+ /* Common event header */
+ _odp_event_hdr_t event_hdr;
/* Requested expiration time */
uint64_t expiration;
+
/* User ptr inherited from parent timer */
- void *user_ptr;
+ const void *user_ptr;
+
+ /* User area pointer */
+ void *uarea_addr;
+
/* Parent timer */
odp_timer_t timer;
+
} odp_timeout_hdr_t;
+ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) <= ODP_CACHE_LINE_SIZE,
+ "TIMEOUT_HDR_SIZE_ERROR");
+
+/* A larger decrement value should be used after receiving events compared to
+ * an 'empty' call. */
+uint64_t _odp_timer_run_inline(int dec);
+
+/* Static inline wrapper to minimize modification of schedulers. */
+static inline uint64_t timer_run(int dec)
+{
+ if (odp_global_rw->inline_timers)
+ return _odp_timer_run_inline(dec);
+
+ return UINT64_MAX;
+}
+
#endif
diff --git a/platform/linux-generic/include/odp_timer_wheel_internal.h b/platform/linux-generic/include/odp_timer_wheel_internal.h
index a60ab516c..c0c411b91 100644
--- a/platform/linux-generic/include/odp_timer_wheel_internal.h
+++ b/platform/linux-generic/include/odp_timer_wheel_internal.h
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h
index 9f821fe4c..28744d8b1 100644
--- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
+++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -19,23 +19,22 @@
extern "C" {
#endif
-#include <pthread.h>
-#include <odp/api/traffic_mngr.h>
+#include <odp/api/barrier.h>
#include <odp/api/packet_io.h>
+#include <odp/api/traffic_mngr.h>
+
#include <odp_name_table_internal.h>
#include <odp_timer_wheel_internal.h>
#include <odp_pkt_queue_internal.h>
#include <odp_sorted_list_internal.h>
-#include <odp_internal.h>
#include <odp_debug_internal.h>
#include <odp_buffer_internal.h>
-#include <odp_queue_internal.h>
+#include <odp_queue_if.h>
#include <odp_packet_internal.h>
-typedef struct stat file_stat_t;
+#include <pthread.h>
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+typedef struct stat file_stat_t;
#define INPUT_WORK_RING_SIZE (16 * 1024)
@@ -87,13 +86,6 @@ typedef uint64_t tm_handle_t;
#define PF_REACHED_EGRESS 0x40
#define PF_ERROR 0x80
-typedef struct {
- uint32_t num_allocd;
- uint32_t num_used;
- uint32_t num_freed;
- void **array_ptrs; /* Ptr to an array of num_allocd void * ptrs. */
-} dynamic_tbl_t;
-
#define ODP_TM_NUM_PROFILES 4
typedef enum {
@@ -103,6 +95,11 @@ typedef enum {
TM_WRED_PROFILE
} profile_kind_t;
+typedef enum {
+ TM_STATUS_FREE = 0,
+ TM_STATUS_RESERVED
+} tm_status_t;
+
typedef struct tm_queue_obj_s tm_queue_obj_t;
typedef struct tm_node_obj_s tm_node_obj_t;
@@ -114,6 +111,7 @@ typedef struct {
_odp_int_name_t name_tbl_id;
odp_tm_threshold_t thresholds_profile;
uint32_t ref_cnt;
+ tm_status_t status;
} tm_queue_thresholds_t;
typedef struct {
@@ -126,6 +124,7 @@ typedef struct {
odp_tm_percent_t max_drop_prob;
odp_bool_t enable_wred;
odp_bool_t use_byte_fullness;
+ tm_status_t status;
} tm_wred_params_t;
typedef struct {
@@ -164,6 +163,7 @@ typedef struct {
uint32_t ref_cnt;
odp_tm_sched_mode_t sched_modes[ODP_TM_MAX_PRIORITIES];
uint16_t inverted_weights[ODP_TM_MAX_PRIORITIES];
+ tm_status_t status;
} tm_sched_params_t;
typedef enum {
@@ -199,6 +199,7 @@ typedef struct {
int8_t len_adjust;
odp_bool_t dual_rate;
odp_bool_t enabled;
+ tm_status_t status;
} tm_shaper_params_t;
typedef enum { NO_CALLBACK, UNDELAY_PKT } tm_shaper_callback_reason_t;
@@ -257,7 +258,7 @@ typedef struct {
uint8_t num_priorities;
uint8_t highest_priority;
uint8_t locked;
- tm_sched_state_t sched_states[0];
+ tm_sched_state_t sched_states[ODP_TM_MAX_PRIORITIES];
} tm_schedulers_obj_t;
struct tm_queue_obj_s {
@@ -268,7 +269,7 @@ struct tm_queue_obj_s {
uint32_t pkts_dequeued_cnt;
uint32_t pkts_consumed_cnt;
_odp_int_pkt_queue_t _odp_int_pkt_queue;
- tm_wred_node_t *tm_wred_node;
+ tm_wred_node_t tm_wred_node;
odp_packet_t pkt;
odp_packet_t sent_pkt;
uint32_t timer_seq;
@@ -286,13 +287,20 @@ struct tm_queue_obj_s {
uint8_t tm_idx;
uint8_t delayed_cnt;
uint8_t blocked_cnt;
- queue_entry_t tm_qentry;
+ odp_bool_t ordered_enqueue;
+ tm_status_t status;
+ /* Statistics for odp_tm_queue_stats_t */
+ struct {
+ odp_atomic_u64_t discards;
+ odp_atomic_u64_t errors;
+ odp_atomic_u64_t packets;
+ } stats;
};
struct tm_node_obj_s {
void *user_context;
- tm_wred_node_t *tm_wred_node;
- tm_schedulers_obj_t *schedulers_obj;
+ tm_wred_node_t tm_wred_node;
+ tm_schedulers_obj_t schedulers_obj;
tm_shaper_obj_t *fanin_list_head;
tm_shaper_obj_t *fanin_list_tail;
tm_shaper_obj_t shaper_obj;
@@ -305,6 +313,7 @@ struct tm_node_obj_s {
uint8_t level; /* Primarily for debugging */
uint8_t tm_idx;
uint8_t marked;
+ tm_status_t status;
};
typedef struct {
@@ -372,9 +381,8 @@ struct tm_system_s {
_odp_int_name_t name_tbl_id;
void *trace_buffer;
- uint32_t next_queue_num;
- tm_queue_obj_t **queue_num_tbl;
- input_work_queue_t *input_work_queue;
+ tm_queue_obj_t *queue_num_tbl[ODP_TM_MAX_TM_QUEUES];
+ input_work_queue_t input_work_queue;
tm_queue_cnts_t priority_queue_cnts;
tm_queue_cnts_t total_queue_cnts;
pkt_desc_t egress_pkt_desc;
@@ -383,7 +391,7 @@ struct tm_system_s {
_odp_timer_wheel_t _odp_int_timer_wheel;
_odp_int_sorted_pool_t _odp_int_sorted_pool;
- tm_node_obj_t *root_node;
+ tm_node_obj_t root_node;
odp_tm_egress_t egress;
odp_tm_requirements_t requirements;
odp_tm_capabilities_t capabilities;
@@ -400,6 +408,7 @@ struct tm_system_s {
uint8_t tm_idx;
uint8_t first_enq;
odp_bool_t is_idle;
+ tm_status_t status;
uint64_t shaper_green_cnt;
uint64_t shaper_yellow_cnt;
@@ -412,15 +421,13 @@ struct tm_system_s {
* while the input work queue is shared - timers are not. */
struct tm_system_group_s {
- tm_system_group_t *prev;
- tm_system_group_t *next;
-
odp_barrier_t tm_group_barrier;
tm_system_t *first_tm_system;
uint32_t num_tm_systems;
uint32_t first_enq;
pthread_t thread;
pthread_attr_t attr;
+ tm_status_t status;
};
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_types_internal.h b/platform/linux-generic/include/odp_types_internal.h
new file mode 100644
index 000000000..a97ac9cd4
--- /dev/null
+++ b/platform/linux-generic/include/odp_types_internal.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_TYPES_INTERNAL_H_
+#define ODP_TYPES_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __SIZEOF_INT128__
+
+__extension__ typedef unsigned __int128 _odp_u128_t;
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/protocols/eth.h b/platform/linux-generic/include/protocols/eth.h
index 6d00e7fb0..e66dd52c7 100644
--- a/platform/linux-generic/include/protocols/eth.h
+++ b/platform/linux-generic/include/protocols/eth.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,7 +17,11 @@
extern "C" {
#endif
-#include <odp_api.h>
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
+
+#include <stdint.h>
/** @addtogroup odp_header ODP HEADER
* @{
diff --git a/platform/linux-generic/include/protocols/ip.h b/platform/linux-generic/include/protocols/ip.h
index 20041f1c0..e145cbe25 100644
--- a/platform/linux-generic/include/protocols/ip.h
+++ b/platform/linux-generic/include/protocols/ip.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,7 +17,9 @@
extern "C" {
#endif
-#include <odp_api.h>
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
/** @addtogroup odp_header ODP HEADER
* @{
@@ -62,8 +64,8 @@ extern "C" {
/** @internal Returns IPv4 Don't fragment */
#define _ODP_IPV4HDR_FLAGS_DONT_FRAG(frag_offset) ((frag_offset) & 0x4000)
-/** @internal Returns IPv4 more fragments */
-#define _ODP_IPV4HDR_FLAGS_MORE_FRAGS(frag_offset) ((frag_offset) & 0x2000)
+/* IPv4 more fragments flag in the frag_offset field */
+#define _ODP_IPV4HDR_FRAG_OFFSET_MORE_FRAGS 0x2000
/** @internal Returns IPv4 fragment offset */
#define _ODP_IPV4HDR_FRAG_OFFSET(frag_offset) ((frag_offset) & 0x1fff)
@@ -157,13 +159,18 @@ typedef struct ODP_PACKED {
* IP protocol values (IPv4:'proto' or IPv6:'next_hdr')
* @{*/
#define _ODP_IPPROTO_HOPOPTS 0x00 /**< IPv6 hop-by-hop options */
-#define _ODP_IPPROTO_ICMP 0x01 /**< Internet Control Message Protocol (1) */
+#define _ODP_IPPROTO_ICMPV4 0x01 /**< Internet Control Message Protocol (1) */
+#define _ODP_IPPROTO_IPIP 0x04 /**< IP Encapsulation within IP (4) */
#define _ODP_IPPROTO_TCP 0x06 /**< Transmission Control Protocol (6) */
#define _ODP_IPPROTO_UDP 0x11 /**< User Datagram Protocol (17) */
+#define _ODP_IPPROTO_IPV6 0x29 /**< IPv6 Routing header (41) */
#define _ODP_IPPROTO_ROUTE 0x2B /**< IPv6 Routing header (43) */
#define _ODP_IPPROTO_FRAG 0x2C /**< IPv6 Fragment (44) */
#define _ODP_IPPROTO_AH 0x33 /**< Authentication Header (51) */
#define _ODP_IPPROTO_ESP 0x32 /**< Encapsulating Security Payload (50) */
+#define _ODP_IPPROTO_ICMPV6 0x3A /**< Internet Control Message Protocol (58) */
+#define _ODP_IPPROTO_NO_NEXT 0x3B /**< No Next Header (59) */
+#define _ODP_IPPROTO_DEST 0x3C /**< IPv6 Destination header (60) */
#define _ODP_IPPROTO_SCTP 0x84 /**< Stream Control Transmission protocol
(132) */
#define _ODP_IPPROTO_INVALID 0xFF /**< Reserved invalid by IANA */
diff --git a/platform/linux-generic/include/protocols/ipsec.h b/platform/linux-generic/include/protocols/ipsec.h
index 093177fba..0eb320330 100644
--- a/platform/linux-generic/include/protocols/ipsec.h
+++ b/platform/linux-generic/include/protocols/ipsec.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,7 +18,11 @@
extern "C" {
#endif
-#include <odp_api.h>
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
+
+#include <stdint.h>
/** @addtogroup odp_header ODP HEADER
* @{
@@ -34,7 +38,7 @@ extern "C" {
typedef struct ODP_PACKED {
odp_u32be_t spi; /**< Security Parameter Index */
odp_u32be_t seq_no; /**< Sequence Number */
- uint8_t iv[0]; /**< Initialization vector */
+ uint8_t iv[]; /**< Initialization vector */
} _odp_esphdr_t;
/** @internal Compile time assert */
@@ -47,7 +51,7 @@ ODP_STATIC_ASSERT(sizeof(_odp_esphdr_t) == _ODP_ESPHDR_LEN,
typedef struct ODP_PACKED {
uint8_t pad_len; /**< Padding length (0-255) */
uint8_t next_header; /**< Next header protocol */
- uint8_t icv[0]; /**< Integrity Check Value (optional) */
+ uint8_t icv[]; /**< Integrity Check Value (optional) */
} _odp_esptrl_t;
/** @internal Compile time assert */
@@ -63,7 +67,7 @@ typedef struct ODP_PACKED {
odp_u16be_t pad; /**< Padding (must be 0) */
odp_u32be_t spi; /**< Security Parameter Index */
odp_u32be_t seq_no; /**< Sequence Number */
- uint8_t icv[0]; /**< Integrity Check Value */
+ uint8_t icv[]; /**< Integrity Check Value */
} _odp_ahhdr_t;
/** @internal Compile time assert */
diff --git a/platform/linux-generic/include/protocols/sctp.h b/platform/linux-generic/include/protocols/sctp.h
new file mode 100644
index 000000000..0e7abb475
--- /dev/null
+++ b/platform/linux-generic/include/protocols/sctp.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP SCTP header
+ */
+
+#ifndef ODP_SCTP_H_
+#define ODP_SCTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
+
+/** @addtogroup odp_header ODP HEADER
+ * @{
+ */
+
+/** SCTP header length */
+#define _ODP_SCTPHDR_LEN 12
+
+/** SCTP header */
+typedef struct ODP_PACKED {
+ odp_u16be_t src_port; /**< Source port */
+ odp_u16be_t dst_port; /**< Destination port */
+ odp_u32be_t tag; /**< Verification tag */
+ odp_u32be_t chksum; /**< SCTP header and data checksum */
+} _odp_sctphdr_t;
+
+/** @internal Compile time assert */
+ODP_STATIC_ASSERT(sizeof(_odp_sctphdr_t) == _ODP_SCTPHDR_LEN,
+ "_ODP_SCTPHDR_T__SIZE_ERROR");
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/protocols/tcp.h b/platform/linux-generic/include/protocols/tcp.h
index 114262e97..5b302be53 100644
--- a/platform/linux-generic/include/protocols/tcp.h
+++ b/platform/linux-generic/include/protocols/tcp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -18,7 +18,8 @@
extern "C" {
#endif
-#include <odp_api.h>
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
/** @addtogroup odp_header ODP HEADER
* @{
diff --git a/platform/linux-generic/include/protocols/thash.h b/platform/linux-generic/include/protocols/thash.h
new file mode 100644
index 000000000..7e1bec549
--- /dev/null
+++ b/platform/linux-generic/include/protocols/thash.h
@@ -0,0 +1,111 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Toeplitz hash function
+ */
+
+#ifndef ODPH_THASH_H_
+#define ODPH_THASH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
+
+#include <protocols/ip.h>
+
+#include <stdint.h>
+
+/** rss data type */
+typedef union {
+ uint8_t u8[40];
+ uint32_t u32[10];
+} rss_key;
+
+/** IPv4 tuple
+ *
+ */
+typedef struct thash_ipv4_tuple {
+ uint32_t src_addr;
+ uint32_t dst_addr;
+ union {
+ struct {
+ uint16_t sport;
+ uint16_t dport;
+ };
+ uint32_t sctp_tag;
+ };
+} thash_ipv4_tuple_t;
+
+/** IPv6 tuple */
+typedef struct thash_ipv6_tuple {
+ _odp_ipv6_addr_t src_addr;
+ _odp_ipv6_addr_t dst_addr;
+ union {
+ struct {
+ uint16_t sport;
+ uint16_t dport;
+ };
+ uint32_t sctp_tag;
+ };
+} thash_ipv6_tuple_t;
+
+/** Thash tuple union */
+typedef union {
+ thash_ipv4_tuple_t v4;
+ thash_ipv6_tuple_t v6;
+} thash_tuple_t;
+
+static inline
+void thash_load_ipv6_addr(const _odp_ipv6hdr_t *ipv6,
+ thash_tuple_t *tuple)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ *(tuple->v6.src_addr.u32 + i) =
+ odp_be_to_cpu_32(*(ipv6->src_addr.u32 + i));
+
+ *(tuple->v6.dst_addr.u32 + i) =
+ odp_be_to_cpu_32(*(ipv6->dst_addr.u32 + i));
+ }
+}
+
+static inline
+uint32_t thash_softrss(uint32_t *tuple, uint8_t len,
+ const rss_key key)
+{
+ uint32_t i, j, ret = 0;
+
+ for (j = 0; j < len; j++) {
+ for (i = 0; i < 32; i++) {
+ if (tuple[j] & (1 << (31 - i))) {
+ ret ^= odp_cpu_to_be_32(((const uint32_t *)
+ key.u32)[j]) << i | (uint32_t)((uint64_t)
+ (odp_cpu_to_be_32(((const uint32_t *)key.u32)
+ [j + 1])) >> (32 - i));
+ }
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/protocols/udp.h b/platform/linux-generic/include/protocols/udp.h
index 535aba855..2614d31ab 100644
--- a/platform/linux-generic/include/protocols/udp.h
+++ b/platform/linux-generic/include/protocols/udp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,7 +17,9 @@
extern "C" {
#endif
-#include <odp_api.h>
+#include <odp/api/align.h>
+#include <odp/api/byteorder.h>
+#include <odp/api/debug.h>
/** @addtogroup odp_header ODP HEADER
* @{
@@ -38,6 +40,8 @@ typedef struct ODP_PACKED {
ODP_STATIC_ASSERT(sizeof(_odp_udphdr_t) == _ODP_UDPHDR_LEN,
"_ODP_UDPHDR_T__SIZE_ERROR");
+#define _ODP_UDP_IPSEC_PORT 4500
+
/**
* @}
*/
diff --git a/platform/linux-generic/libodp-linux.pc.in b/platform/linux-generic/libodp-linux.pc.in
new file mode 100644
index 000000000..62589c1a3
--- /dev/null
+++ b/platform/linux-generic/libodp-linux.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: lib@ODP_LIB_NAME@
+Description: The ODP packet processing engine
+Version: @PKGCONFIG_VERSION@
+Requires.private: libconfig@AARCH64CRYPTO_PKG@
+Libs: -L${libdir} -l@ODP_LIB_NAME@ @ATOMIC_LIBS_NON_ABI_COMPAT@
+Libs.private: @OPENSSL_STATIC_LIBS@ @DPDK_LIBS@ @PCAP_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ @LIBXDP_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@ @IPSEC_MB_LIBS@ @ORT_LIBS@
+Cflags: -I${includedir}
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index d3e5528c1..3306849d2 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -1,40 +1,84 @@
-# Enable -fvisibility=hidden if using a gcc that supports it
-OLD_CFLAGS="$CFLAGS"
-AC_MSG_CHECKING([whether $CC supports -fvisibility=hidden])
-VISIBILITY_CFLAGS="-fvisibility=hidden"
-CFLAGS="$CFLAGS $VISIBILITY_CFLAGS"
-AC_LINK_IFELSE([AC_LANG_PROGRAM()], AC_MSG_RESULT([yes]),
- [VISIBILITY_CFLAGS=""; AC_MSG_RESULT([no])]);
-
-AC_SUBST(VISIBILITY_CFLAGS)
-# Restore CFLAGS; VISIBILITY_CFLAGS are added to it where needed.
-CFLAGS=$OLD_CFLAGS
-
-AC_MSG_CHECKING(for GCC atomic builtins)
-AC_LINK_IFELSE(
- [AC_LANG_SOURCE(
- [[int main() {
- int v = 1;
- __atomic_fetch_add(&v, 1, __ATOMIC_RELAXED);
- __atomic_fetch_sub(&v, 1, __ATOMIC_RELAXED);
- __atomic_store_n(&v, 1, __ATOMIC_RELAXED);
- __atomic_load_n(&v, __ATOMIC_RELAXED);
- return 0;
- }
- ]])],
- AC_MSG_RESULT(yes),
- AC_MSG_RESULT(no)
- echo "GCC-style __atomic builtins not supported by the compiler."
- echo "Use newer version. For gcc > 4.7.0"
- exit -1)
-
-m4_include([platform/linux-generic/m4/odp_pthread.m4])
-m4_include([platform/linux-generic/m4/odp_openssl.m4])
+ODP_IMPLEMENTATION_NAME="odp-linux"
+ODP_LIB_NAME="odp-linux"
+
+ODP_VISIBILITY
+ODP_ATOMIC
+
+ODP_PTHREAD
+ODP_TIMER
+m4_include([platform/linux-generic/m4/odp_cpu.m4])
+m4_include([platform/linux-generic/m4/odp_event_validation.m4])
m4_include([platform/linux-generic/m4/odp_pcap.m4])
-m4_include([platform/linux-generic/m4/odp_netmap.m4])
+m4_include([platform/linux-generic/m4/odp_scheduler.m4])
+
+AC_ARG_WITH([pcap],
+ [AS_HELP_STRING([--without-pcap],
+ [compile without PCAP [default=with] (linux-generic)])],
+ [],
+ [with_pcap=yes])
+have_pcap=no
+AS_IF([test "x$with_pcap" != xno],
+ [ODP_PCAP([with_pcap=yes]‚[with_pcap=no])])
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([ODP_PKTIO_PCAP], [test x$have_pcap = xyes])
+])
+
+m4_include([platform/linux-generic/m4/odp_libconfig.m4])
+m4_include([platform/linux-generic/m4/odp_openssl.m4])
+m4_include([platform/linux-generic/m4/odp_crypto.m4])
+m4_include([platform/linux-generic/m4/odp_ipsec_mb.m4])
+m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
-m4_include([platform/linux-generic/m4/odp_ipc.m4])
-m4_include([platform/linux-generic/m4/odp_schedule.m4])
+m4_include([platform/linux-generic/m4/odp_wfe.m4])
+m4_include([platform/linux-generic/m4/odp_xdp.m4])
+m4_include([platform/linux-generic/m4/odp_ml.m4])
+ODP_EVENT_VALIDATION
+ODP_SCHEDULER
+
+AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${AARCH64CRYPTO_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${IPSEC_MB_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS} ${LIBXDP_LIBS} ${ORT_LIBS}"])
+
+# Add text to the end of configure with platform specific settings.
+# Make sure it's aligned same as other lines in configure.ac.
+AS_VAR_APPEND([PLAT_CFG_TEXT], ["
+ event_validation: ${enable_event_validation}
+ openssl: ${with_openssl}
+ openssl_rand: ${openssl_rand}
+ crypto: ${with_crypto}
+ pcap: ${have_pcap}
+ pcapng: ${have_pcapng}
+ wfe_locks: ${use_wfe_locks}
+ ml_support: ${ml_support}
+ default_config_path: ${default_config_path}"])
+
+# Ignore Clang specific errors about fields with variable sized type not at the
+# end of a struct. This style is used by e.g. odp_packet_hdr_t and
+# odp_timeout_hdr_t.
+ODP_CHECK_CFLAG([-Wno-error=gnu-variable-sized-type-not-at-end])
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([PLATFORM_IS_LINUX_GENERIC],
+ [test "${with_platform}" = "linux-generic"])
AC_CONFIG_FILES([platform/linux-generic/Makefile
- platform/linux-generic/include/odp/api/plat/static_inline.h])
+ platform/linux-generic/libodp-linux.pc
+ platform/linux-generic/dumpconfig/Makefile
+ platform/linux-generic/example/Makefile
+ platform/linux-generic/example/ml/Makefile
+ platform/linux-generic/test/Makefile
+ platform/linux-generic/test/example/Makefile
+ platform/linux-generic/test/example/classifier/Makefile
+ platform/linux-generic/test/example/generator/Makefile
+ platform/linux-generic/test/example/ipsec_api/Makefile
+ platform/linux-generic/test/example/ipsec_crypto/Makefile
+ platform/linux-generic/test/example/l2fwd_simple/Makefile
+ platform/linux-generic/test/example/l3fwd/Makefile
+ platform/linux-generic/test/example/packet/Makefile
+ platform/linux-generic/test/example/ping/Makefile
+ platform/linux-generic/test/example/simple_pipeline/Makefile
+ platform/linux-generic/test/example/switch/Makefile
+ platform/linux-generic/test/validation/api/shmem/Makefile
+ platform/linux-generic/test/validation/api/pktio/Makefile
+ platform/linux-generic/test/validation/api/ml/Makefile
+ platform/linux-generic/test/performance/Makefile
+ platform/linux-generic/test/performance/dmafwd/Makefile
+ platform/linux-generic/test/pktio_ipc/Makefile])
+])
diff --git a/platform/linux-generic/m4/odp_cpu.m4 b/platform/linux-generic/m4/odp_cpu.m4
new file mode 100644
index 000000000..35a83faf6
--- /dev/null
+++ b/platform/linux-generic/m4/odp_cpu.m4
@@ -0,0 +1,35 @@
+##########################################################################
+# Set ODP_CACHE_LINE_SIZE define
+##########################################################################
+# Currently used only for aarch64
+if test "${ARCH_DIR}" = "aarch64"; then
+ cache_line_size=64
+ # Use default cache size if cross-compiling
+ if test $build = $host; then
+ cpu_implementer=""
+ cpu_part=""
+
+ AC_PROG_GREP
+ AC_PROG_SED
+ while read line; do
+ if echo $line | $GREP -q "CPU implementer"; then
+ cpu_implementer=`echo $line | $SED 's/.*\:\s*//'`
+ fi
+ if echo $line | $GREP -q "CPU part"; then
+ cpu_part=`echo $line | $SED 's/.*\:\s*//'`
+ fi
+ done < /proc/cpuinfo
+
+ # Cavium
+ if test "$cpu_implementer" == "0x43"; then
+ # ThunderX2 (0x0af) 64B, others 128B
+ if test "$cpu_part" == "0x0af"; then
+ cache_line_size=64;
+ else
+ cache_line_size=128;
+ fi
+ fi
+ fi
+ AC_DEFINE_UNQUOTED([_ODP_CACHE_LINE_SIZE], [$cache_line_size],
+ [Define cache line size])
+fi
diff --git a/platform/linux-generic/m4/odp_crypto.m4 b/platform/linux-generic/m4/odp_crypto.m4
new file mode 100644
index 000000000..1cec6edb4
--- /dev/null
+++ b/platform/linux-generic/m4/odp_crypto.m4
@@ -0,0 +1,50 @@
+# ODP_CRYPTO
+# ----------
+# Select default crypto implementation
+AC_ARG_WITH([crypto],
+ [AS_HELP_STRING([--with-crypto],
+ [Choose crypto implementation (openssl/armv8crypto/ipsecmb/null)]
+ [[default=openssl] (linux-generic)])],
+ [], [with_crypto=openssl])
+
+# Default to OpenSSL implementation if crypto is enabled
+AS_IF([test "x$with_crypto" = "xyes"], [with_crypto=openssl])
+
+# Default to Null implementation if crypto is disabled
+AS_IF([test "x$with_crypto" = "xno"], [with_crypto=null])
+AS_IF([test "x$with_crypto" = "xopenssl" -a "x$with_openssl" = "xno"], [with_crypto=null])
+
+AS_IF([test "x$with_crypto" != "xopenssl" -a "x$with_crypto" != "xarmv8crypto" -a "x$with_crypto" != "xipsecmb" -a "x$with_crypto" != "xnull"],
+ [AC_MSG_ERROR([Invalid crypto implementation name])])
+
+##########################################################################
+# OpenSSL implementation
+##########################################################################
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([WITH_OPENSSL_CRYPTO], [test "x$with_crypto" == "xopenssl"])
+])
+
+##########################################################################
+# ARMv8 Crypto library implementation
+##########################################################################
+AS_IF([test "x$with_crypto" == "xarmv8crypto"],
+ [PKG_CHECK_MODULES([AARCH64CRYPTO], [libAArch64crypto])
+ AARCH64CRYPTO_PKG=", libAArch64crypto"
+ AC_SUBST([AARCH64CRYPTO_PKG])])
+
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([WITH_ARMV8_CRYPTO], [test "x$with_crypto" == "xarmv8crypto"])
+])
+
+##########################################################################
+# Multi-buffer IPSec library implementation
+##########################################################################
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([WITH_IPSECMB_CRYPTO], [test "x$with_crypto" == "xipsecmb"])
+])
+
+##########################################################################
+# Null implementation
+##########################################################################
+AS_IF([test "x$with_crypto" == "xnull"],
+ [AC_MSG_WARN([Using null crypto. Strong cryptography is not available])])
diff --git a/platform/linux-generic/m4/odp_dpdk.m4 b/platform/linux-generic/m4/odp_dpdk.m4
index 30347dce8..dc3c9610e 100644
--- a/platform/linux-generic/m4/odp_dpdk.m4
+++ b/platform/linux-generic/m4/odp_dpdk.m4
@@ -2,43 +2,67 @@
# Enable DPDK support
##########################################################################
pktio_dpdk_support=no
-AC_ARG_ENABLE([dpdk_support],
- [ --enable-dpdk-support include dpdk IO support],
- [if test x$enableval = xyes; then
- pktio_dpdk_support=yes
- fi])
+
+AC_ARG_ENABLE([dpdk],
+ [AS_HELP_STRING([--enable-dpdk],
+ [enable DPDK support for Packet I/O [default=disabled] (linux-generic)])],
+ [pktio_dpdk_support=$enableval
+ DPDK_PATH=system])
+
+AC_ARG_WITH([dpdk-path],
+[AS_HELP_STRING([--with-dpdk-path=DIR],
+ [path to DPDK build directory [default=system] (linux-generic)])],
+ [DPDK_PATH="$withval"
+ pktio_dpdk_support=yes],[])
##########################################################################
-# Set optional DPDK path
+# Use shared DPDK library
##########################################################################
-AC_ARG_WITH([dpdk-path],
-AC_HELP_STRING([--with-dpdk-path=DIR path to dpdk build directory],
- [(or in the default path if not specified).]),
- [DPDK_PATH=$withval
- AM_CPPFLAGS="$AM_CPPFLAGS -msse4.2 -isystem $DPDK_PATH/include"
- AM_LDFLAGS="$AM_LDFLAGS -L$DPDK_PATH/lib"
- LIBS="$LIBS -ldpdk -ldl -lpcap"
- pktio_dpdk_support=yes],[])
+dpdk_shared=no
+AC_ARG_ENABLE([dpdk-shared],
+ [AS_HELP_STRING([--enable-dpdk-shared],
+ [use shared DPDK library [default=disabled] (linux-generic)])],
+ [if test x$enableval = xyes; then
+ dpdk_shared=yes
+ fi])
##########################################################################
-# Save and set temporary compilation flags
+# Enable zero-copy DPDK pktio
##########################################################################
-OLD_CPPFLAGS=$CPPFLAGS
-CPPFLAGS="$AM_CPPFLAGS $CPPFLAGS"
+zero_copy=0
+AC_ARG_ENABLE([dpdk-zero-copy],
+ [AS_HELP_STRING([--enable-dpdk-zero-copy],
+ [enable experimental zero-copy DPDK pktio mode [default=disabled] (linux-generic)])],
+ [if test x$enableval = xyes; then
+ zero_copy=1
+ fi])
##########################################################################
# Check for DPDK availability
+#
+# DPDK pmd drivers are not linked unless the --whole-archive option is
+# used. No spaces are allowed between the --whole-archive flags.
##########################################################################
if test x$pktio_dpdk_support = xyes
then
- AC_CHECK_HEADERS([rte_config.h], [],
- [AC_MSG_FAILURE(["can't find DPDK header"])])
- ODP_CFLAGS="$ODP_CFLAGS -DODP_PKTIO_DPDK"
+ ODP_DPDK([$DPDK_PATH], [$dpdk_shared], [],
+ [AC_MSG_FAILURE([can't find DPDK])])
+
+ case "${host}" in
+ i?86* | x86*)
+ DPDK_CFLAGS="${DPDK_CFLAGS} -msse4.2"
+ ;;
+ esac
+
+ ODP_CHECK_CFLAG([-Wno-error=cast-align])
+ AC_DEFINE([_ODP_PKTIO_DPDK], [1],
+ [Define to 1 to enable DPDK packet I/O support])
+ AC_DEFINE_UNQUOTED([_ODP_DPDK_ZERO_COPY], [$zero_copy],
+ [Define to 1 to enable DPDK zero copy support])
else
pktio_dpdk_support=no
fi
-##########################################################################
-# Restore old saved variables
-##########################################################################
-CPPFLAGS=$OLD_CPPFLAGS
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([PKTIO_DPDK], [test x$pktio_dpdk_support = xyes ])
+])
diff --git a/platform/linux-generic/m4/odp_event_validation.m4 b/platform/linux-generic/m4/odp_event_validation.m4
new file mode 100644
index 000000000..08bb8902e
--- /dev/null
+++ b/platform/linux-generic/m4/odp_event_validation.m4
@@ -0,0 +1,23 @@
+# ODP_EVENT_VALIDATION
+# --------------------
+# Select event validation level
+AC_DEFUN([ODP_EVENT_VALIDATION], [dnl
+AC_ARG_ENABLE([event-validation],
+ [AS_HELP_STRING([--enable-event-validation],
+ [enable event validation (warn/abort)
+ [default=disabled] (linux-generic)])],
+ [], [AS_IF([test "x$enable_debug" = "xfull"],
+ [enable_event_validation=yes], [enable_event_validation=no])])
+
+# Default to abort mode if validation is enabled
+AS_IF([test "x$enable_event_validation" = "xyes"],
+ [enable_event_validation="abort"])
+
+validation_level=0
+AS_IF([test "x$enable_event_validation" = "xwarn"], [validation_level=1])
+AS_IF([test "x$enable_event_validation" = "xyes" -o "x$enable_event_validation" = "xabort"],
+ [validation_level=2])
+
+AC_DEFINE_UNQUOTED([_ODP_EVENT_VALIDATION], [$validation_level],
+ [Define to 1 or 2 to enable event validation])
+]) # ODP_EVENT_VALIDATION
diff --git a/platform/linux-generic/m4/odp_ipc.m4 b/platform/linux-generic/m4/odp_ipc.m4
deleted file mode 100644
index 78217e221..000000000
--- a/platform/linux-generic/m4/odp_ipc.m4
+++ /dev/null
@@ -1,9 +0,0 @@
-##########################################################################
-# Enable IPC pktio support
-##########################################################################
-AC_ARG_ENABLE([pktio_ipc_support],
- [ --enable-pktio_ipc-support include ipc IO support],
- [if test x$enableval = xyes; then
- pktio_ipc_support=yes
- ODP_CFLAGS="$ODP_CFLAGS -D_ODP_PKTIO_IPC"
- fi])
diff --git a/platform/linux-generic/m4/odp_ipsec_mb.m4 b/platform/linux-generic/m4/odp_ipsec_mb.m4
new file mode 100644
index 000000000..3268d94c0
--- /dev/null
+++ b/platform/linux-generic/m4/odp_ipsec_mb.m4
@@ -0,0 +1,19 @@
+#########################################################################
+# Check for libIPSec_MB availability
+#########################################################################
+ipsecmb_support=no
+AC_CHECK_HEADERS([ipsec-mb.h],
+ [AC_CHECK_LIB([IPSec_MB], [init_mb_mgr_auto], [ipsecmb_support=yes],
+ [ipsecmb_support=no])],
+ [ipsecmb_support=no])
+
+AS_IF([test "x$with_crypto" = "xipsecmb" -a "x$ipsecmb_support" = "xno"],
+ [AC_MSG_ERROR([IPSec MB library not found on this platform])])
+
+if test "x$with_crypto" = "xipsecmb"; then
+ IPSEC_MB_LIBS="-lIPSec_MB"
+else
+ IPSEC_MB_LIBS=""
+fi
+
+AC_SUBST([IPSEC_MB_LIBS])
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
new file mode 100644
index 000000000..77095e0fe
--- /dev/null
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -0,0 +1,36 @@
+##########################################################################
+# Configuration file version
+##########################################################################
+m4_define([_odp_config_version_generation], [0])
+m4_define([_odp_config_version_major], [1])
+m4_define([_odp_config_version_minor], [28])
+
+m4_define([_odp_config_version],
+ [_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
+
+_ODP_CONFIG_VERSION_GENERATION=_odp_config_version_generation
+AC_SUBST(_ODP_CONFIG_VERSION_GENERATION)
+_ODP_CONFIG_VERSION_MAJOR=_odp_config_version_major
+AC_SUBST(_ODP_CONFIG_VERSION_MAJOR)
+_ODP_CONFIG_VERSION_MINOR=_odp_config_version_minor
+AC_SUBST(_ODP_CONFIG_VERSION_MINOR)
+
+##########################################################################
+# Set optional path for the default configuration file
+##########################################################################
+default_config_path="${srcdir}/config/odp-linux-generic.conf"
+
+AC_CHECK_PROGS([REALPATH], [realpath])
+AS_IF([test -z "$REALPATH"], [AC_MSG_ERROR([Could not find 'realpath'])])
+
+AC_ARG_WITH([config-file],
+AS_HELP_STRING([--with-config-file=FILE], [path to the default configuration file]
+ [(this file must include all configuration options)]
+ [[default=SRCDIR/config/odp-<platform>.conf]]),
+ [default_config_path=$withval], [])
+
+rel_default_config_path=`realpath --relative-to=$(pwd) ${default_config_path}`
+AC_SUBST(default_config_path)
+AC_SUBST(rel_default_config_path)
+
+ODP_LIBCONFIG([linux-generic], [$rel_default_config_path])
diff --git a/platform/linux-generic/m4/odp_ml.m4 b/platform/linux-generic/m4/odp_ml.m4
new file mode 100644
index 000000000..a7b9a4fd6
--- /dev/null
+++ b/platform/linux-generic/m4/odp_ml.m4
@@ -0,0 +1,46 @@
+##########################################################################
+# Onnxruntime library path and name
+##########################################################################
+# Optional configure parameter for a non-standard install prefix of onnxruntime
+AC_ARG_WITH([ort-path],
+ [AS_HELP_STRING([--with-ort-path=DIR],
+ [path to onnxruntime libs and headers [default=system]])],
+ [ort_path_given=yes
+ ORT_CPPFLAGS="-I$withval/include"
+ ORT_LIBS="-L$withval/lib"
+ ORT_RPATH="-R$withval/lib"],
+ [])
+
+##########################################################################
+# Save and set temporary compilation flags
+##########################################################################
+OLD_CPPFLAGS=$CPPFLAGS
+OLD_LIBS=$LIBS
+CPPFLAGS="$ORT_CPPFLAGS $CPPFLAGS"
+LIBS="$ORT_LIBS $LIBS"
+
+#########################################################################
+# If ort is available, enable ML API
+#########################################################################
+ml_support=no
+AC_CHECK_HEADERS([onnxruntime_c_api.h],
+ [AC_CHECK_LIB(onnxruntime, OrtGetApiBase, [ml_support=yes], [], [])],
+ [AS_IF([test "x$ort_path_given" = "xyes"],
+ [AC_MSG_ERROR([ort not found at the specified path (--with-ort-path)])])])
+
+AS_IF([test "x$ml_support" != "xno"],
+ [ORT_LIBS="$ORT_RPATH $ORT_LIBS -lonnxruntime -lm"],
+ [ORT_CPPFLAGS="" ORT_LIBS="-lm"])
+
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([WITH_ML], [test x$ml_support = xyes ])
+])
+
+##########################################################################
+# Restore old saved variables
+##########################################################################
+LIBS=$OLD_LIBS
+CPPFLAGS=$OLD_CPPFLAGS
+
+AC_SUBST([ORT_CPPFLAGS])
+AC_SUBST([ORT_LIBS])
diff --git a/platform/linux-generic/m4/odp_netmap.m4 b/platform/linux-generic/m4/odp_netmap.m4
deleted file mode 100644
index 880e9d587..000000000
--- a/platform/linux-generic/m4/odp_netmap.m4
+++ /dev/null
@@ -1,42 +0,0 @@
-##########################################################################
-# Enable netmap support
-##########################################################################
-netmap_support=no
-AC_ARG_ENABLE([netmap_support],
- [ --enable-netmap-support include netmap IO support],
- [if test x$enableval = xyes; then
- netmap_support=yes
- fi])
-
-##########################################################################
-# Set optional netmap path
-##########################################################################
-AC_ARG_WITH([netmap-path],
-AC_HELP_STRING([--with-netmap-path=DIR path to netmap root directory],
- [(or in the default path if not specified).]),
- [NETMAP_PATH=$withval
- AM_CPPFLAGS="$AM_CPPFLAGS -isystem $NETMAP_PATH/sys"
- netmap_support=yes],[])
-
-##########################################################################
-# Save and set temporary compilation flags
-##########################################################################
-OLD_CPPFLAGS=$CPPFLAGS
-CPPFLAGS="$AM_CPPFLAGS $CPPFLAGS"
-
-##########################################################################
-# Check for netmap availability
-##########################################################################
-if test x$netmap_support = xyes
-then
- AC_CHECK_HEADERS([net/netmap_user.h], [],
- [AC_MSG_FAILURE(["can't find netmap header"])])
- ODP_CFLAGS="$ODP_CFLAGS -DODP_NETMAP"
-else
- netmap_support=no
-fi
-
-##########################################################################
-# Restore old saved variables
-##########################################################################
-CPPFLAGS=$OLD_CPPFLAGS
diff --git a/platform/linux-generic/m4/odp_openssl.m4 b/platform/linux-generic/m4/odp_openssl.m4
index 2344914b7..3d3f13a81 100644
--- a/platform/linux-generic/m4/odp_openssl.m4
+++ b/platform/linux-generic/m4/odp_openssl.m4
@@ -1,32 +1,33 @@
##########################################################################
-# Set optional OpenSSL path
-##########################################################################
-AC_ARG_WITH([openssl-path],
-AC_HELP_STRING([--with-openssl-path=DIR path to openssl libs and headers],
- [(or in the default path if not specified).]),
- [OPENSSL_PATH=$withval
- AM_CPPFLAGS="$AM_CPPFLAGS -I$OPENSSL_PATH/include"
- AM_LDFLAGS="$AM_LDFLAGS -L$OPENSSL_PATH/lib"
- ],[])
+# Enable/disable usage of OpenSSL library
+##########################################################################
+AC_ARG_WITH([openssl],
+ [AS_HELP_STRING([--without-openssl],
+ [compile without OpenSSL (may result in disabled crypto and random support)]
+ [[default=with] (linux-generic)])],
+ [],
+ [with_openssl=yes])
+AS_IF([test "$with_openssl" != "no"],
+ [ODP_OPENSSL
+ have_openssl=1], [have_openssl=0])
+AM_CONDITIONAL([WITH_OPENSSL], [test x$with_openssl != xno])
+AC_DEFINE_UNQUOTED([_ODP_OPENSSL], [$have_openssl],
+ [Define to 1 to enable OpenSSL support])
##########################################################################
-# Save and set temporary compilation flags
+# Enable/disable usage of OpenSSL for random data
##########################################################################
-OLD_LDFLAGS=$LDFLAGS
-OLD_CPPFLAGS=$CPPFLAGS
-LDFLAGS="$AM_LDFLAGS $LDFLAGS"
-CPPFLAGS="$AM_CPPFLAGS $CPPFLAGS"
+have_openssl_rand=1
+AC_ARG_ENABLE([openssl-rand],
+ [AS_HELP_STRING([--disable-openssl-rand],
+ [disable OpenSSL random data (use arch-specific instead)]
+ [[default=enabled] (linux-generic)])],
+ [if test "x$enableval" = "xno"; then
+ have_openssl_rand=0
+ fi])
-##########################################################################
-# Check for OpenSSL availability
-##########################################################################
-AC_CHECK_LIB([crypto], [EVP_EncryptInit], [],
- [AC_MSG_FAILURE([OpenSSL libraries required])])
-AC_CHECK_HEADERS([openssl/des.h openssl/rand.h openssl/hmac.h openssl/evp.h], [],
- [AC_MSG_ERROR([OpenSSL headers required])])
+AS_IF([test "$have_openssl" != "1"], [have_openssl_rand=0])
+AS_IF([test "$have_openssl_rand" = "1"], [openssl_rand=yes], [openssl_rand=no])
-##########################################################################
-# Restore old saved variables
-##########################################################################
-LDFLAGS=$OLD_LDFLAGS
-CPPFLAGS=$OLD_CPPFLAGS
+AC_DEFINE_UNQUOTED([_ODP_OPENSSL_RAND], [$have_openssl_rand],
+ [Define to 1 to enable OpenSSL support])
diff --git a/platform/linux-generic/m4/odp_pcap.m4 b/platform/linux-generic/m4/odp_pcap.m4
index 0439c60b1..dd1c16e69 100644
--- a/platform/linux-generic/m4/odp_pcap.m4
+++ b/platform/linux-generic/m4/odp_pcap.m4
@@ -1,3 +1,7 @@
+# ODP_PCAP([ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND])
+# --------------------------------------------------
+AC_DEFUN([ODP_PCAP],
+[dnl
#########################################################################
# Check for libpcap availability
#########################################################################
@@ -8,7 +12,19 @@ AC_CHECK_HEADER(pcap/pcap.h,
[])],
[])
-if test $have_pcap == yes; then
- AM_CFLAGS="$AM_CFLAGS -DHAVE_PCAP"
- LIBS="$LIBS -lpcap"
+if test "$have_pcap" = "yes"; then
+ AC_DEFINE([_ODP_PKTIO_PCAP], 1,
+ [Define to 1 to enable pcap packet I/O support])
+ PCAP_LIBS="-lpcap"
+else
+ PCAP_LIBS=""
fi
+
+AC_SUBST([PCAP_LIBS])
+
+if test "x$have_pcap" = "xyes" ; then
+ m4_default([$1], [:])
+else
+ m4_default([$2], [:])
+fi
+]) # ODP_PCAP
diff --git a/platform/linux-generic/m4/odp_pcapng.m4 b/platform/linux-generic/m4/odp_pcapng.m4
new file mode 100644
index 000000000..6ad92db88
--- /dev/null
+++ b/platform/linux-generic/m4/odp_pcapng.m4
@@ -0,0 +1,20 @@
+##########################################################################
+# Enable PCAPNG support
+##########################################################################
+have_pcapng=no
+pcapng_support=0
+
+AC_ARG_ENABLE([pcapng-support],
+ [AS_HELP_STRING([--enable-pcapng-support],
+ [enable experimental tcpdump for pktios [default=disabled] (linux-generic)])],
+ have_pcapng=$enableval
+ [if test x$enableval = xyes; then
+ pcapng_support=1
+ fi])
+
+AC_DEFINE_UNQUOTED([_ODP_PCAPNG], [$pcapng_support],
+ [Define to 1 to enable pcapng support])
+
+AC_CONFIG_COMMANDS_PRE([dnl
+AM_CONDITIONAL([have_pcapng], [test x$have_pcapng = xyes])
+])
diff --git a/platform/linux-generic/m4/odp_pthread.m4 b/platform/linux-generic/m4/odp_pthread.m4
deleted file mode 100644
index 7f391039b..000000000
--- a/platform/linux-generic/m4/odp_pthread.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-##########################################################################
-# Check for pthreads availability
-##########################################################################
-
-AX_PTHREAD([CC="$PTHREAD_CC"], [
- echo "Error! We require pthreads to be available"
- exit -1
- ])
-LIBS="$PTHREAD_LIBS $LIBS"
-AM_CFLAGS="$AM_CFLAGS $PTHREAD_CFLAGS"
-AM_LDFLAGS="$AM_LDFLAGS $PTHREAD_LDFLAGS"
-
-AM_LDFLAGS="$AM_LDFLAGS -pthread -lrt"
diff --git a/platform/linux-generic/m4/odp_schedule.m4 b/platform/linux-generic/m4/odp_schedule.m4
deleted file mode 100644
index 91c19f21a..000000000
--- a/platform/linux-generic/m4/odp_schedule.m4
+++ /dev/null
@@ -1,13 +0,0 @@
-AC_ARG_ENABLE([schedule-sp],
- [ --enable-schedule-sp enable strict priority scheduler],
- [if test x$enableval = xyes; then
- schedule_sp_enabled=yes
- ODP_CFLAGS="$ODP_CFLAGS -DODP_SCHEDULE_SP"
- fi])
-
-AC_ARG_ENABLE([schedule-iquery],
- [ --enable-schedule-iquery enable interests query (sparse bitmap) scheduler],
- [if test x$enableval = xyes; then
- schedule_iquery_enabled=yes
- ODP_CFLAGS="$ODP_CFLAGS -DODP_SCHEDULE_IQUERY"
- fi])
diff --git a/platform/linux-generic/m4/odp_scheduler.m4 b/platform/linux-generic/m4/odp_scheduler.m4
new file mode 100644
index 000000000..c199ceed5
--- /dev/null
+++ b/platform/linux-generic/m4/odp_scheduler.m4
@@ -0,0 +1,11 @@
+# ODP_SCHEDULER
+# -------------
+# Select default scheduler
+AC_DEFUN([ODP_SCHEDULER], [dnl
+AC_ARG_ENABLE([scheduler-default],
+ [AS_HELP_STRING([--enable-scheduler-default],
+ [choose default scheduler [default=basic] (linux-generic)])],
+ [], [enable_scheduler_default=basic])
+AC_DEFINE_UNQUOTED([_ODP_SCHEDULE_DEFAULT], ["$enable_scheduler_default"],
+ [Define to name default scheduler])
+]) # ODP_SCHEDULER
diff --git a/platform/linux-generic/m4/odp_wfe.m4 b/platform/linux-generic/m4/odp_wfe.m4
new file mode 100644
index 000000000..f0f0542f7
--- /dev/null
+++ b/platform/linux-generic/m4/odp_wfe.m4
@@ -0,0 +1,14 @@
+##########################################################################
+# Enable/disable WFE based lock implementations
+##########################################################################
+use_wfe_locks=no
+AC_ARG_ENABLE([wfe-locks],
+ [AS_HELP_STRING([--enable-wfe-locks],
+ [enable WFE based lock implementations on aarch64]
+ [[default=disabled] (linux-generic)])],
+ [use_wfe_locks=$enableval])
+
+if test x$use_wfe_locks = xyes; then
+ AC_DEFINE([_ODP_WFE_LOCKS], [1],
+ [Define to 1 to enable WFE based lock implementations on aarch64])
+fi
diff --git a/platform/linux-generic/m4/odp_xdp.m4 b/platform/linux-generic/m4/odp_xdp.m4
new file mode 100644
index 000000000..dcfd39ed7
--- /dev/null
+++ b/platform/linux-generic/m4/odp_xdp.m4
@@ -0,0 +1,15 @@
+##########################################################################
+# Check for libxdp availability
+##########################################################################
+AC_ARG_ENABLE([xdp], AS_HELP_STRING([--enable-xdp],
+ [enable experimental XDP support for Packet I/O [default=disabled] (linux-generic)]))
+
+AS_IF([test "x$enable_xdp" = "xyes"], [
+ PKG_CHECK_MODULES([LIBXDP], [libxdp >= 1.2.3],
+ [
+ AC_DEFINE(_ODP_PKTIO_XDP, [1], [Define to 1 to enable xdp packet I/O support])
+ ],
+ [
+ AS_IF([test "x$enable_xdp" == "xyes"], [AC_MSG_ERROR([libxdp not found])])
+ ])
+])
diff --git a/platform/linux-generic/miniz/miniz.c b/platform/linux-generic/miniz/miniz.c
new file mode 100644
index 000000000..d0e39ec4a
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz.c
@@ -0,0 +1,619 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz.h"
+
+typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
+typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
+typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- zlib-style API's */
+
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
+{
+ mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
+ size_t block_len = buf_len % 5552;
+ if (!ptr)
+ return MZ_ADLER32_INIT;
+ while (buf_len)
+ {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
+ {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ return (s2 << 16) + s1;
+}
+
+/* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */
+#if 0
+ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
+ {
+ static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
+ mz_uint32 crcu32 = (mz_uint32)crc;
+ if (!ptr)
+ return MZ_CRC32_INIT;
+ crcu32 = ~crcu32;
+ while (buf_len--)
+ {
+ mz_uint8 b = *ptr++;
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
+ }
+ return ~crcu32;
+ }
+#else
+/* Faster, but larger CPU cache footprint.
+ */
+mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
+{
+ static const mz_uint32 s_crc_table[256] =
+ {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535,
+ 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD,
+ 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D,
+ 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4,
+ 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
+ 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC,
+ 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB,
+ 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F,
+ 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB,
+ 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA,
+ 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE,
+ 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A,
+ 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409,
+ 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
+ 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739,
+ 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268,
+ 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0,
+ 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8,
+ 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF,
+ 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703,
+ 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7,
+ 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE,
+ 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
+ 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6,
+ 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D,
+ 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5,
+ 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605,
+ 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+ };
+
+ mz_uint32 crc32 = (mz_uint32)crc ^ 0xFFFFFFFF;
+ const mz_uint8 *pByte_buf = (const mz_uint8 *)ptr;
+
+ while (buf_len >= 4)
+ {
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF];
+ pByte_buf += 4;
+ buf_len -= 4;
+ }
+
+ while (buf_len)
+ {
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
+ ++pByte_buf;
+ --buf_len;
+ }
+
+ return ~crc32;
+}
+#endif
+
+void mz_free(void *p)
+{
+ MZ_FREE(p);
+}
+
+void *miniz_def_alloc_func(void *opaque, size_t items, size_t size)
+{
+ (void)opaque, (void)items, (void)size;
+ return MZ_MALLOC(items * size);
+}
+void miniz_def_free_func(void *opaque, void *address)
+{
+ (void)opaque, (void)address;
+ MZ_FREE(address);
+}
+void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size)
+{
+ (void)opaque, (void)address, (void)items, (void)size;
+ return MZ_REALLOC(address, items * size);
+}
+
+const char *mz_version(void)
+{
+ return MZ_VERSION;
+}
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+int mz_deflateInit(mz_streamp pStream, int level)
+{
+ return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY);
+}
+
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy)
+{
+ tdefl_compressor *pComp;
+ mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
+
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = MZ_ADLER32_INIT;
+ pStream->msg = NULL;
+ pStream->reserved = 0;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = miniz_def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = miniz_def_free_func;
+
+ pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pComp;
+
+ if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY)
+ {
+ mz_deflateEnd(pStream);
+ return MZ_PARAM_ERROR;
+ }
+
+ return MZ_OK;
+}
+
+int mz_deflateReset(mz_streamp pStream)
+{
+ if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree))
+ return MZ_STREAM_ERROR;
+ pStream->total_in = pStream->total_out = 0;
+ tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags);
+ return MZ_OK;
+}
+
+int mz_deflate(mz_streamp pStream, int flush)
+{
+ size_t in_bytes, out_bytes;
+ mz_ulong orig_total_in, orig_total_out;
+ int mz_status = MZ_OK;
+
+ if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out))
+ return MZ_STREAM_ERROR;
+ if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+
+ if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE)
+ return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
+
+ orig_total_in = pStream->total_in;
+ orig_total_out = pStream->total_out;
+ for (;;)
+ {
+ tdefl_status defl_status;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+
+ defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush);
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
+
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (defl_status < 0)
+ {
+ mz_status = MZ_STREAM_ERROR;
+ break;
+ }
+ else if (defl_status == TDEFL_STATUS_DONE)
+ {
+ mz_status = MZ_STREAM_END;
+ break;
+ }
+ else if (!pStream->avail_out)
+ break;
+ else if ((!pStream->avail_in) && (flush != MZ_FINISH))
+ {
+ if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out))
+ break;
+ return MZ_BUF_ERROR; /* Can't make forward progress without some input.
+ */
+ }
+ }
+ return mz_status;
+}
+
+int mz_deflateEnd(mz_streamp pStream)
+{
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state)
+ {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len)
+{
+ (void)pStream;
+ /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */
+ return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
+}
+
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level)
+{
+ int status;
+ mz_stream stream;
+ memset(&stream, 0, sizeof(stream));
+
+ /* In case mz_ulong is 64-bits (argh I hate longs). */
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_deflateInit(&stream, level);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_deflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END)
+ {
+ mz_deflateEnd(&stream);
+ return (status == MZ_OK) ? MZ_BUF_ERROR : status;
+ }
+
+ *pDest_len = stream.total_out;
+ return mz_deflateEnd(&stream);
+}
+
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
+{
+ return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION);
+}
+
+mz_ulong mz_compressBound(mz_ulong source_len)
+{
+ return mz_deflateBound(NULL, source_len);
+}
+
+int mz_inflateInit2(mz_streamp pStream, int window_bits)
+{
+ inflate_state *pDecomp;
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = 0;
+ pStream->msg = NULL;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ pStream->reserved = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = miniz_def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = miniz_def_free_func;
+
+ pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state));
+ if (!pDecomp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pDecomp;
+
+ tinfl_init(&pDecomp->m_decomp);
+ pDecomp->m_dict_ofs = 0;
+ pDecomp->m_dict_avail = 0;
+ pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
+ pDecomp->m_first_call = 1;
+ pDecomp->m_has_flushed = 0;
+ pDecomp->m_window_bits = window_bits;
+
+ return MZ_OK;
+}
+
+int mz_inflateInit(mz_streamp pStream)
+{
+ return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
+}
+
+int mz_inflateReset(mz_streamp pStream)
+{
+ inflate_state *pDecomp;
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = 0;
+ pStream->msg = NULL;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ pStream->reserved = 0;
+
+ pDecomp = (inflate_state *)pStream->state;
+
+ tinfl_init(&pDecomp->m_decomp);
+ pDecomp->m_dict_ofs = 0;
+ pDecomp->m_dict_avail = 0;
+ pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
+ pDecomp->m_first_call = 1;
+ pDecomp->m_has_flushed = 0;
+ /* pDecomp->m_window_bits = window_bits */;
+
+ return MZ_OK;
+}
+
+int mz_inflate(mz_streamp pStream, int flush)
+{
+ inflate_state *pState;
+ mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
+ size_t in_bytes, out_bytes, orig_avail_in;
+ tinfl_status status;
+
+ if ((!pStream) || (!pStream->state))
+ return MZ_STREAM_ERROR;
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+ if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+
+ pState = (inflate_state *)pStream->state;
+ if (pState->m_window_bits > 0)
+ decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
+ orig_avail_in = pStream->avail_in;
+
+ first_call = pState->m_first_call;
+ pState->m_first_call = 0;
+ if (pState->m_last_status < 0)
+ return MZ_DATA_ERROR;
+
+ if (pState->m_has_flushed && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+ pState->m_has_flushed |= (flush == MZ_FINISH);
+
+ if ((flush == MZ_FINISH) && (first_call))
+ {
+ /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */
+ decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+ status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags);
+ pState->m_last_status = status;
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (status < 0)
+ return MZ_DATA_ERROR;
+ else if (status != TINFL_STATUS_DONE)
+ {
+ pState->m_last_status = TINFL_STATUS_FAILED;
+ return MZ_BUF_ERROR;
+ }
+ return MZ_STREAM_END;
+ }
+ /* flush != MZ_FINISH then we must assume there's more input. */
+ if (flush != MZ_FINISH)
+ decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
+
+ if (pState->m_dict_avail)
+ {
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+ return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
+ }
+
+ for (;;)
+ {
+ in_bytes = pStream->avail_in;
+ out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
+
+ status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
+ pState->m_last_status = status;
+
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+
+ pState->m_dict_avail = (mz_uint)out_bytes;
+
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+
+ if (status < 0)
+ return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */
+ else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
+ return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */
+ else if (flush == MZ_FINISH)
+ {
+ /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */
+ if (status == TINFL_STATUS_DONE)
+ return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
+ /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */
+ else if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+ }
+ else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail))
+ break;
+ }
+
+ return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
+}
+
+int mz_inflateEnd(mz_streamp pStream)
+{
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state)
+ {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
+{
+ mz_stream stream;
+ int status;
+ memset(&stream, 0, sizeof(stream));
+
+ /* In case mz_ulong is 64-bits (argh I hate longs). */
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_inflateInit(&stream);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_inflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END)
+ {
+ mz_inflateEnd(&stream);
+ return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status;
+ }
+ *pDest_len = stream.total_out;
+
+ return mz_inflateEnd(&stream);
+}
+
+const char *mz_error(int err)
+{
+ static struct
+ {
+ int m_err;
+ const char *m_pDesc;
+ } s_error_descs[] =
+ {
+ { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" }
+ };
+ mz_uint i;
+ for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
+ if (s_error_descs[i].m_err == err)
+ return s_error_descs[i].m_pDesc;
+ return NULL;
+}
+
+#endif /*MINIZ_NO_ZLIB_APIS */
+
+#ifdef __cplusplus
+}
+#endif
+
+/*
+ This is free and unencumbered software released into the public domain.
+
+ Anyone is free to copy, modify, publish, use, compile, sell, or
+ distribute this software, either in source code form or as a compiled
+ binary, for any purpose, commercial or non-commercial, and by any
+ means.
+
+ In jurisdictions that recognize copyright laws, the author or authors
+ of this software dedicate any and all copyright interest in the
+ software to the public domain. We make this dedication for the benefit
+ of the public at large and to the detriment of our heirs and
+ successors. We intend this dedication to be an overt act of
+ relinquishment in perpetuity of all present and future rights to this
+ software under copyright law.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ For more information, please refer to <http://unlicense.org/>
+*/
diff --git a/platform/linux-generic/miniz/miniz.h b/platform/linux-generic/miniz/miniz.h
new file mode 100644
index 000000000..a2f90d907
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz.h
@@ -0,0 +1,363 @@
+/* miniz.c 2.1.0 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
+ See "unlicense" statement at the end of this file.
+ Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
+ Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
+
+ Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define
+ MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).
+
+ * Low-level Deflate/Inflate implementation notes:
+
+ Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or
+ greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses
+ approximately as well as zlib.
+
+ Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function
+ coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory
+ block large enough to hold the entire file.
+
+ The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation.
+
+ * zlib-style API notes:
+
+ miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in
+ zlib replacement in many apps:
+ The z_stream struct, optional memory allocation callbacks
+ deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
+ inflateInit/inflateInit2/inflate/inflateReset/inflateEnd
+ compress, compress2, compressBound, uncompress
+ CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines.
+ Supports raw deflate streams or standard zlib streams with adler-32 checking.
+
+ Limitations:
+ The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries.
+ I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but
+ there are no guarantees that miniz.c pulls this off perfectly.
+
+ * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by
+ Alex Evans. Supports 1-4 bytes/pixel images.
+
+ * ZIP archive API notes:
+
+ The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to
+ get the job done with minimal fuss. There are simple API's to retrieve file information, read files from
+ existing archives, create new archives, append new files to existing archives, or clone archive data from
+ one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h),
+ or you can specify custom file read/write callbacks.
+
+ - Archive reading: Just call this function to read a single file from a disk archive:
+
+ void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name,
+ size_t *pSize, mz_uint zip_flags);
+
+ For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central
+ directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files.
+
+ - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file:
+
+ int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags);
+
+ The locate operation can optionally check file comments too, which (as one example) can be used to identify
+ multiple versions of the same file in an archive. This function uses a simple linear search through the central
+ directory, so it's not very fast.
+
+ Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and
+ retrieve detailed info on each file by calling mz_zip_reader_file_stat().
+
+ - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data
+ to disk and builds an exact image of the central directory in memory. The central directory image is written
+ all at once at the end of the archive file when the archive is finalized.
+
+ The archive writer can optionally align each file's local header and file data to any power of 2 alignment,
+ which can be useful when the archive will be read from optical media. Also, the writer supports placing
+ arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still
+ readable by any ZIP tool.
+
+ - Archive appending: The simple way to add a single file to an archive is to call this function:
+
+ mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name,
+ const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);
+
+ The archive will be created if it doesn't already exist, otherwise it'll be appended to.
+ Note the appending is done in-place and is not an atomic operation, so if something goes wrong
+ during the operation it's possible the archive could be left without a central directory (although the local
+ file headers and file data will be fine, so the archive will be recoverable).
+
+ For more complex archive modification scenarios:
+ 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to
+ preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the
+ compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and
+ you're done. This is safe but requires a bunch of temporary disk space or heap memory.
+
+ 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(),
+ append new files as needed, then finalize the archive which will write an updated central directory to the
+ original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a
+ possibility that the archive's central directory could be lost with this method if anything goes wrong, though.
+
+ - ZIP archive support limitations:
+ No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files.
+ Requires streams capable of seeking.
+
+ * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the
+ below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it.
+
+ * Important: For best perf. be sure to customize the below macros for your target platform:
+ #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
+ #define MINIZ_LITTLE_ENDIAN 1
+ #define MINIZ_HAS_64BIT_REGISTERS 1
+
+ * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz
+ uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files
+ (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
+*/
+#pragma once
+
+/* Defines to completely disable specific portions of miniz.c:
+ If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */
+
+/* Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression API's. */
+/*#define MINIZ_NO_ZLIB_APIS */
+
+#include <stddef.h>
+
+#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
+/* MINIZ_X86_OR_X64_CPU is only used to help set the below macros. */
+#define MINIZ_X86_OR_X64_CPU 1
+#else
+#define MINIZ_X86_OR_X64_CPU 0
+#endif
+
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
+/* Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. */
+#define MINIZ_LITTLE_ENDIAN 1
+#else
+#define MINIZ_LITTLE_ENDIAN 0
+#endif
+
+/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses. */
+#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
+/* #define MINIZ_UNALIGNED_USE_MEMCPY */
+
+#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__) || defined (__arch64__)
+/* Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions). */
+#define MINIZ_HAS_64BIT_REGISTERS 1
+#else
+#define MINIZ_HAS_64BIT_REGISTERS 0
+#endif
+
+#include "miniz_common.h"
+#include "miniz_tdef.h"
+#include "miniz_tinfl.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- zlib-style API Definitions. */
+
+/* For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! */
+typedef unsigned long mz_ulong;
+
+/* mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap. */
+void mz_free(void *p);
+
+#define MZ_ADLER32_INIT (1)
+/* mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL. */
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
+
+#define MZ_CRC32_INIT (0)
+/* mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL. */
+mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
+
+/* Compression strategies. */
+enum
+{
+ MZ_DEFAULT_STRATEGY = 0,
+ MZ_FILTERED = 1,
+ MZ_HUFFMAN_ONLY = 2,
+ MZ_RLE = 3,
+ MZ_FIXED = 4
+};
+
+/* Method */
+#define MZ_DEFLATED 8
+
+/* Heap allocation callbacks.
+Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long. */
+typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
+typedef void (*mz_free_func)(void *opaque, void *address);
+typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size);
+
+/* Compression levels: 0-9 are the standard zlib-style levels, 10 is best possible compression (not zlib compatible, and may be very slow), MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. */
+enum
+{
+ MZ_NO_COMPRESSION = 0,
+ MZ_BEST_SPEED = 1,
+ MZ_BEST_COMPRESSION = 9,
+ MZ_UBER_COMPRESSION = 10,
+ MZ_DEFAULT_LEVEL = 6,
+ MZ_DEFAULT_COMPRESSION = -1
+};
+
+#define MZ_VERSION "10.1.0"
+#define MZ_VERNUM 0xA100
+#define MZ_VER_MAJOR 10
+#define MZ_VER_MINOR 1
+#define MZ_VER_REVISION 0
+#define MZ_VER_SUBREVISION 0
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+/* Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other values are for advanced use (refer to the zlib docs). */
+enum
+{
+ MZ_NO_FLUSH = 0,
+ MZ_PARTIAL_FLUSH = 1,
+ MZ_SYNC_FLUSH = 2,
+ MZ_FULL_FLUSH = 3,
+ MZ_FINISH = 4,
+ MZ_BLOCK = 5
+};
+
+/* Return status codes. MZ_PARAM_ERROR is non-standard. */
+enum
+{
+ MZ_OK = 0,
+ MZ_STREAM_END = 1,
+ MZ_NEED_DICT = 2,
+ MZ_ERRNO = -1,
+ MZ_STREAM_ERROR = -2,
+ MZ_DATA_ERROR = -3,
+ MZ_MEM_ERROR = -4,
+ MZ_BUF_ERROR = -5,
+ MZ_VERSION_ERROR = -6,
+ MZ_PARAM_ERROR = -10000
+};
+
+/* Window bits */
+#define MZ_DEFAULT_WINDOW_BITS 15
+
+struct mz_internal_state;
+
+/* Compression/decompression stream struct. */
+typedef struct mz_stream_s
+{
+ const unsigned char *next_in; /* pointer to next byte to read */
+ unsigned int avail_in; /* number of bytes available at next_in */
+ mz_ulong total_in; /* total number of bytes consumed so far */
+
+ unsigned char *next_out; /* pointer to next byte to write */
+ unsigned int avail_out; /* number of bytes that can be written to next_out */
+ mz_ulong total_out; /* total number of bytes produced so far */
+
+ char *msg; /* error msg (unused) */
+ struct mz_internal_state *state; /* internal state, allocated by zalloc/zfree */
+
+ mz_alloc_func zalloc; /* optional heap allocation function (defaults to malloc) */
+ mz_free_func zfree; /* optional heap free function (defaults to free) */
+ void *opaque; /* heap alloc function user pointer */
+
+ int data_type; /* data_type (unused) */
+ mz_ulong adler; /* adler32 of the source or uncompressed data */
+ mz_ulong reserved; /* not used */
+} mz_stream;
+
+typedef mz_stream *mz_streamp;
+
+/* Returns the version string of miniz.c. */
+const char *mz_version(void);
+
+/* mz_deflateInit() initializes a compressor with default options: */
+/* Parameters: */
+/* pStream must point to an initialized mz_stream struct. */
+/* level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. */
+/* level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio. */
+/* (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) */
+/* Return values: */
+/* MZ_OK on success. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_PARAM_ERROR if the input parameters are bogus. */
+/* MZ_MEM_ERROR on out of memory. */
+int mz_deflateInit(mz_streamp pStream, int level);
+
+/* mz_deflateInit2() is like mz_deflate(), except with more control: */
+/* Additional parameters: */
+/* method must be MZ_DEFLATED */
+/* window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer) */
+/* mem_level must be between [1, 9] (it's checked but ignored by miniz.c) */
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy);
+
+/* Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). */
+int mz_deflateReset(mz_streamp pStream);
+
+/* mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible. */
+/* Parameters: */
+/* pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
+/* flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. */
+/* Return values: */
+/* MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full). */
+/* MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_PARAM_ERROR if one of the parameters is invalid. */
+/* MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.) */
+int mz_deflate(mz_streamp pStream, int flush);
+
+/* mz_deflateEnd() deinitializes a compressor: */
+/* Return values: */
+/* MZ_OK on success. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+int mz_deflateEnd(mz_streamp pStream);
+
+/* mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH. */
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
+
+/* Single-call compression functions mz_compress() and mz_compress2(): */
+/* Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure. */
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level);
+
+/* mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress(). */
+mz_ulong mz_compressBound(mz_ulong source_len);
+
+/* Initializes a decompressor. */
+int mz_inflateInit(mz_streamp pStream);
+
+/* mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window size and whether or not the stream has been wrapped with a zlib header/footer: */
+/* window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate). */
+int mz_inflateInit2(mz_streamp pStream, int window_bits);
+
+/* Quickly resets a compressor without having to reallocate anything. Same as calling mz_inflateEnd() followed by mz_inflateInit()/mz_inflateInit2(). */
+int mz_inflateReset(mz_streamp pStream);
+
+/* Decompresses the input stream to the output, consuming only as much of the input as needed, and writing as much to the output as possible. */
+/* Parameters: */
+/* pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
+/* flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. */
+/* On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both sized large enough to decompress the entire stream in a single call (this is slightly faster). */
+/* MZ_FINISH implies that there are no more source bytes available beside what's already in the input buffer, and that the output buffer is large enough to hold the rest of the decompressed data. */
+/* Return values: */
+/* MZ_OK on success. Either more input is needed but not available, and/or there's more output to be written but the output buffer is full. */
+/* MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. For zlib streams, the adler-32 of the decompressed data has also been verified. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_DATA_ERROR if the deflate stream is invalid. */
+/* MZ_PARAM_ERROR if one of the parameters is invalid. */
+/* MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the inflater needs more input to continue, or if the output buffer is not large enough. Call mz_inflate() again */
+/* with more input data, or with more room in the output buffer (except when using single call decompression, described above). */
+int mz_inflate(mz_streamp pStream, int flush);
+
+/* Deinitializes a decompressor. */
+int mz_inflateEnd(mz_streamp pStream);
+
+/* Single-call decompression. */
+/* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
+
+/* Returns a string description of the specified error code, or NULL if the error code is invalid. */
+const char *mz_error(int err);
+
+#endif /* MINIZ_NO_ZLIB_APIS */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_common.h b/platform/linux-generic/miniz/miniz_common.h
new file mode 100644
index 000000000..0945775c8
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_common.h
@@ -0,0 +1,68 @@
+#pragma once
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* ------------------- Types and macros */
+typedef unsigned char mz_uint8;
+typedef signed short mz_int16;
+typedef unsigned short mz_uint16;
+typedef unsigned int mz_uint32;
+typedef unsigned int mz_uint;
+typedef int64_t mz_int64;
+typedef uint64_t mz_uint64;
+typedef int mz_bool;
+
+#define MZ_FALSE (0)
+#define MZ_TRUE (1)
+
+/* Works around MSVC's spammy "warning C4127: conditional expression is constant" message. */
+#ifdef _MSC_VER
+#define MZ_MACRO_END while (0, 0)
+#else
+#define MZ_MACRO_END while (0)
+#endif
+
+#define MZ_ASSERT(x) assert(x)
+
+#define MZ_MALLOC(x) NULL
+#define MZ_FREE(x) (void)x, ((void)0)
+#define MZ_REALLOC(p, x) NULL
+
+#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
+#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
+#else
+#define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
+#define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
+#endif
+
+#define MZ_READ_LE64(p) (((mz_uint64)MZ_READ_LE32(p)) | (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) << 32U))
+
+#ifdef _MSC_VER
+#define MZ_FORCEINLINE __forceinline
+#elif defined(__GNUC__)
+#define MZ_FORCEINLINE __inline__ __attribute__((__always_inline__))
+#else
+#define MZ_FORCEINLINE inline
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void *miniz_def_alloc_func(void *opaque, size_t items, size_t size);
+extern void miniz_def_free_func(void *opaque, void *address);
+extern void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size);
+
+#define MZ_UINT16_MAX (0xFFFFU)
+#define MZ_UINT32_MAX (0xFFFFFFFFU)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tdef.c b/platform/linux-generic/miniz/miniz_tdef.c
new file mode 100644
index 000000000..477a1c5df
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tdef.c
@@ -0,0 +1,1564 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz_tdef.h"
+#include "miniz.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- Low-level Compression (independent from all decompression API's) */
+
+/* Purposely making these tables static for faster init and thread safety. */
+static const mz_uint16 s_tdefl_len_sym[256] =
+ {
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
+ 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
+ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
+ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
+ };
+
+static const mz_uint8 s_tdefl_len_extra[256] =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
+ };
+
+static const mz_uint8 s_tdefl_small_dist_sym[512] =
+ {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
+ };
+
+static const mz_uint8 s_tdefl_small_dist_extra[512] =
+ {
+ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7
+ };
+
+static const mz_uint8 s_tdefl_large_dist_sym[128] =
+ {
+ 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+ };
+
+static const mz_uint8 s_tdefl_large_dist_extra[128] =
+ {
+ 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
+ };
+
+/* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */
+typedef struct
+{
+ mz_uint16 m_key, m_sym_index;
+} tdefl_sym_freq;
+static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1)
+{
+ mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
+ tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
+ MZ_CLEAR_OBJ(hist);
+ for (i = 0; i < num_syms; i++)
+ {
+ mz_uint freq = pSyms0[i].m_key;
+ hist[freq & 0xFF]++;
+ hist[256 + ((freq >> 8) & 0xFF)]++;
+ }
+ while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
+ total_passes--;
+ for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
+ {
+ const mz_uint32 *pHist = &hist[pass << 8];
+ mz_uint offsets[256], cur_ofs = 0;
+ for (i = 0; i < 256; i++)
+ {
+ offsets[i] = cur_ofs;
+ cur_ofs += pHist[i];
+ }
+ for (i = 0; i < num_syms; i++)
+ pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
+ {
+ tdefl_sym_freq *t = pCur_syms;
+ pCur_syms = pNew_syms;
+ pNew_syms = t;
+ }
+ }
+ return pCur_syms;
+}
+
+/* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. */
+static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
+{
+ int root, leaf, next, avbl, used, dpth;
+ if (n == 0)
+ return;
+ else if (n == 1)
+ {
+ A[0].m_key = 1;
+ return;
+ }
+ A[0].m_key += A[1].m_key;
+ root = 0;
+ leaf = 2;
+ for (next = 1; next < n - 1; next++)
+ {
+ if (leaf >= n || A[root].m_key < A[leaf].m_key)
+ {
+ A[next].m_key = A[root].m_key;
+ A[root++].m_key = (mz_uint16)next;
+ }
+ else
+ A[next].m_key = A[leaf++].m_key;
+ if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key))
+ {
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
+ A[root++].m_key = (mz_uint16)next;
+ }
+ else
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
+ }
+ A[n - 2].m_key = 0;
+ for (next = n - 3; next >= 0; next--)
+ A[next].m_key = A[A[next].m_key].m_key + 1;
+ avbl = 1;
+ used = dpth = 0;
+ root = n - 2;
+ next = n - 1;
+ while (avbl > 0)
+ {
+ while (root >= 0 && (int)A[root].m_key == dpth)
+ {
+ used++;
+ root--;
+ }
+ while (avbl > used)
+ {
+ A[next--].m_key = (mz_uint16)(dpth);
+ avbl--;
+ }
+ avbl = 2 * used;
+ dpth++;
+ used = 0;
+ }
+}
+
+/* Limits canonical Huffman code table's max code size. */
+enum
+{
+ TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32
+};
+static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
+{
+ int i;
+ mz_uint32 total = 0;
+ if (code_list_len <= 1)
+ return;
+ for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
+ pNum_codes[max_code_size] += pNum_codes[i];
+ for (i = max_code_size; i > 0; i--)
+ total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
+ while (total != (1UL << max_code_size))
+ {
+ pNum_codes[max_code_size]--;
+ for (i = max_code_size - 1; i > 0; i--)
+ if (pNum_codes[i])
+ {
+ pNum_codes[i]--;
+ pNum_codes[i + 1] += 2;
+ break;
+ }
+ total--;
+ }
+}
+
+static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
+{
+ int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
+ mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
+ MZ_CLEAR_OBJ(num_codes);
+ if (static_table)
+ {
+ for (i = 0; i < table_len; i++)
+ num_codes[d->m_huff_code_sizes[table_num][i]]++;
+ }
+ else
+ {
+ tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
+ int num_used_syms = 0;
+ const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
+ for (i = 0; i < table_len; i++)
+ if (pSym_count[i])
+ {
+ syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
+ syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
+ }
+
+ pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
+ tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
+
+ for (i = 0; i < num_used_syms; i++)
+ num_codes[pSyms[i].m_key]++;
+
+ tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
+
+ MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
+ MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
+ for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
+ for (l = num_codes[i]; l > 0; l--)
+ d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
+ }
+
+ next_code[1] = 0;
+ for (j = 0, i = 2; i <= code_size_limit; i++)
+ next_code[i] = j = ((j + num_codes[i - 1]) << 1);
+
+ for (i = 0; i < table_len; i++)
+ {
+ mz_uint rev_code = 0, code, code_size;
+ if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
+ continue;
+ code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, code >>= 1)
+ rev_code = (rev_code << 1) | (code & 1);
+ d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
+ }
+}
+
+#define TDEFL_PUT_BITS(b, l) \
+ do \
+ { \
+ mz_uint bits = b; \
+ mz_uint len = l; \
+ MZ_ASSERT(bits <= ((1U << len) - 1U)); \
+ d->m_bit_buffer |= (bits << d->m_bits_in); \
+ d->m_bits_in += len; \
+ while (d->m_bits_in >= 8) \
+ { \
+ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
+ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
+ d->m_bit_buffer >>= 8; \
+ d->m_bits_in -= 8; \
+ } \
+ } \
+ MZ_MACRO_END
+
+#define TDEFL_RLE_PREV_CODE_SIZE() \
+ { \
+ if (rle_repeat_count) \
+ { \
+ if (rle_repeat_count < 3) \
+ { \
+ d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
+ while (rle_repeat_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
+ } \
+ else \
+ { \
+ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 16; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \
+ } \
+ rle_repeat_count = 0; \
+ } \
+ }
+
+#define TDEFL_RLE_ZERO_CODE_SIZE() \
+ { \
+ if (rle_z_count) \
+ { \
+ if (rle_z_count < 3) \
+ { \
+ d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
+ while (rle_z_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = 0; \
+ } \
+ else if (rle_z_count <= 10) \
+ { \
+ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 17; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \
+ } \
+ else \
+ { \
+ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 18; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
+ } \
+ rle_z_count = 0; \
+ } \
+ }
+
+static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+
+static void tdefl_start_dynamic_block(tdefl_compressor *d)
+{
+ int num_lit_codes, num_dist_codes, num_bit_lengths;
+ mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
+ mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;
+
+ d->m_huff_count[0][256] = 1;
+
+ tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
+ tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
+
+ for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
+ if (d->m_huff_code_sizes[0][num_lit_codes - 1])
+ break;
+ for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
+ if (d->m_huff_code_sizes[1][num_dist_codes - 1])
+ break;
+
+ memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
+ memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
+ total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
+ num_packed_code_sizes = 0;
+ rle_z_count = 0;
+ rle_repeat_count = 0;
+
+ memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
+ for (i = 0; i < total_code_sizes_to_pack; i++)
+ {
+ mz_uint8 code_size = code_sizes_to_pack[i];
+ if (!code_size)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ if (++rle_z_count == 138)
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+ }
+ else
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ if (code_size != prev_code_size)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1);
+ packed_code_sizes[num_packed_code_sizes++] = code_size;
+ }
+ else if (++rle_repeat_count == 6)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ }
+ }
+ prev_code_size = code_size;
+ }
+ if (rle_repeat_count)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ }
+ else
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+
+ tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
+
+ TDEFL_PUT_BITS(2, 2);
+
+ TDEFL_PUT_BITS(num_lit_codes - 257, 5);
+ TDEFL_PUT_BITS(num_dist_codes - 1, 5);
+
+ for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
+ if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
+ break;
+ num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
+ TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
+ for (i = 0; (int)i < num_bit_lengths; i++)
+ TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
+
+ for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;)
+ {
+ mz_uint code = packed_code_sizes[packed_code_sizes_index++];
+ MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
+ TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
+ if (code >= 16)
+ TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
+ }
+}
+
+static void tdefl_start_static_block(tdefl_compressor *d)
+{
+ mz_uint i;
+ mz_uint8 *p = &d->m_huff_code_sizes[0][0];
+
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+
+ memset(d->m_huff_code_sizes[1], 5, 32);
+
+ tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
+ tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
+
+ TDEFL_PUT_BITS(1, 2);
+}
+
+static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
+{
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+ mz_uint8 *pOutput_buf = d->m_pOutput_buf;
+ mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
+ mz_uint64 bit_buffer = d->m_bit_buffer;
+ mz_uint bits_in = d->m_bits_in;
+
+#define TDEFL_PUT_BITS_FAST(b, l) \
+ { \
+ bit_buffer |= (((mz_uint64)(b)) << bits_in); \
+ bits_in += (l); \
+ }
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1)
+ {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+
+ if (flags & 1)
+ {
+ mz_uint s0, s1, n0, n1, sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
+
+ /* This sequence coaxes MSVC into using cmov's vs. jmp's. */
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ n0 = s_tdefl_small_dist_extra[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[match_dist >> 8];
+ n1 = s_tdefl_large_dist_extra[match_dist >> 8];
+ sym = (match_dist < 512) ? s0 : s1;
+ num_extra_bits = (match_dist < 512) ? n0 : n1;
+
+ MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
+ }
+ else
+ {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
+ {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
+ {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+ }
+ }
+ }
+
+ if (pOutput_buf >= d->m_pOutput_buf_end)
+ return MZ_FALSE;
+
+ *(mz_uint64 *)pOutput_buf = bit_buffer;
+ pOutput_buf += (bits_in >> 3);
+ bit_buffer >>= (bits_in & ~7);
+ bits_in &= 7;
+ }
+
+#undef TDEFL_PUT_BITS_FAST
+
+ d->m_pOutput_buf = pOutput_buf;
+ d->m_bits_in = 0;
+ d->m_bit_buffer = 0;
+
+ while (bits_in)
+ {
+ mz_uint32 n = MZ_MIN(bits_in, 16);
+ TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
+ bit_buffer >>= n;
+ bits_in -= n;
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#else
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
+{
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1)
+ {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+ if (flags & 1)
+ {
+ mz_uint sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
+
+ if (match_dist < 512)
+ {
+ sym = s_tdefl_small_dist_sym[match_dist];
+ num_extra_bits = s_tdefl_small_dist_extra[match_dist];
+ }
+ else
+ {
+ sym = s_tdefl_large_dist_sym[match_dist >> 8];
+ num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
+ }
+ MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
+ }
+ else
+ {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+ }
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS */
+
+static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
+{
+ if (static_block)
+ tdefl_start_static_block(d);
+ else
+ tdefl_start_dynamic_block(d);
+ return tdefl_compress_lz_codes(d);
+}
+
+static int tdefl_flush_block(tdefl_compressor *d, int flush)
+{
+ mz_uint saved_bit_buf, saved_bits_in;
+ mz_uint8 *pSaved_output_buf;
+ mz_bool comp_block_succeeded = MZ_FALSE;
+ int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
+ mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;
+
+ d->m_pOutput_buf = pOutput_buf_start;
+ d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
+
+ MZ_ASSERT(!d->m_output_flush_remaining);
+ d->m_output_flush_ofs = 0;
+ d->m_output_flush_remaining = 0;
+
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
+ d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
+
+ if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
+ {
+ TDEFL_PUT_BITS(0x78, 8);
+ TDEFL_PUT_BITS(0x01, 8);
+ }
+
+ TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
+
+ pSaved_output_buf = d->m_pOutput_buf;
+ saved_bit_buf = d->m_bit_buffer;
+ saved_bits_in = d->m_bits_in;
+
+ if (!use_raw_block)
+ comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));
+
+ /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */
+ if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
+ ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size))
+ {
+ mz_uint i;
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ TDEFL_PUT_BITS(0, 2);
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF)
+ {
+ TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
+ }
+ for (i = 0; i < d->m_total_lz_bytes; ++i)
+ {
+ TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
+ }
+ }
+ /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */
+ else if (!comp_block_succeeded)
+ {
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ tdefl_compress_block(d, MZ_TRUE);
+ }
+
+ if (flush)
+ {
+ if (flush == TDEFL_FINISH)
+ {
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER)
+ {
+ mz_uint i, a = d->m_adler32;
+ for (i = 0; i < 4; i++)
+ {
+ TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
+ a <<= 8;
+ }
+ }
+ }
+ else
+ {
+ mz_uint i, z = 0;
+ TDEFL_PUT_BITS(0, 3);
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, z ^= 0xFFFF)
+ {
+ TDEFL_PUT_BITS(z & 0xFFFF, 16);
+ }
+ }
+ }
+
+ MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
+
+ memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
+ d->m_total_lz_bytes = 0;
+ d->m_block_index++;
+
+ if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0)
+ {
+ if (d->m_pPut_buf_func)
+ {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
+ return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
+ }
+ else if (pOutput_buf_start == d->m_output_buf)
+ {
+ int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
+ d->m_out_buf_ofs += bytes_to_copy;
+ if ((n -= bytes_to_copy) != 0)
+ {
+ d->m_output_flush_ofs = bytes_to_copy;
+ d->m_output_flush_remaining = n;
+ }
+ }
+ else
+ {
+ d->m_out_buf_ofs += n;
+ }
+ }
+
+ return d->m_output_flush_remaining;
+}
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+static mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p)
+{
+ mz_uint16 ret;
+ memcpy(&ret, p, sizeof(mz_uint16));
+ return ret;
+}
+static mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p)
+{
+ mz_uint16 ret;
+ memcpy(&ret, p, sizeof(mz_uint16));
+ return ret;
+}
+#else
+#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
+#define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p)
+#endif
+static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
+{
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
+ mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s);
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;)
+ {
+ for (;;)
+ {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ q = (const mz_uint16 *)(d->m_dict + probe_pos);
+ if (TDEFL_READ_UNALIGNED_WORD2(q) != s01)
+ continue;
+ p = s;
+ probe_len = 32;
+ do
+ {
+ } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
+ (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
+ if (!probe_len)
+ {
+ *pMatch_dist = dist;
+ *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN);
+ break;
+ }
+ else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len)
+ {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len)
+ break;
+ c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
+ }
+ }
+}
+#else
+static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
+{
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint8 *s = d->m_dict + pos, *p, *q;
+ mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;)
+ {
+ for (;;)
+ {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ p = s;
+ q = d->m_dict + probe_pos;
+ for (probe_len = 0; probe_len < max_match_len; probe_len++)
+ if (*p++ != *q++)
+ break;
+ if (probe_len > match_len)
+ {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = probe_len) == max_match_len)
+ return;
+ c0 = d->m_dict[pos + match_len];
+ c1 = d->m_dict[pos + match_len - 1];
+ }
+ }
+}
+#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES */
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+static mz_uint32 TDEFL_READ_UNALIGNED_WORD32(const mz_uint8* p)
+{
+ mz_uint32 ret;
+ memcpy(&ret, p, sizeof(mz_uint32));
+ return ret;
+}
+#else
+#define TDEFL_READ_UNALIGNED_WORD32(p) *(const mz_uint32 *)(p)
+#endif
+static mz_bool tdefl_compress_fast(tdefl_compressor *d)
+{
+ /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */
+ mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
+ mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
+ mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+
+ while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size)))
+ {
+ const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
+ mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
+ d->m_src_buf_left -= num_bytes_to_process;
+ lookahead_size += num_bytes_to_process;
+
+ while (num_bytes_to_process)
+ {
+ mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
+ memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
+ d->m_pSrc += n;
+ dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
+ num_bytes_to_process -= n;
+ }
+
+ dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
+ if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
+ break;
+
+ while (lookahead_size >= 4)
+ {
+ mz_uint cur_match_dist, cur_match_len = 1;
+ mz_uint8 *pCur_dict = d->m_dict + cur_pos;
+ mz_uint first_trigram = TDEFL_READ_UNALIGNED_WORD32(pCur_dict) & 0xFFFFFF;
+ mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
+ mz_uint probe_pos = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)lookahead_pos;
+
+ if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((TDEFL_READ_UNALIGNED_WORD32(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram))
+ {
+ const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
+ const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
+ mz_uint32 probe_len = 32;
+ do
+ {
+ } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
+ (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
+ cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
+ if (!probe_len)
+ cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
+
+ if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)))
+ {
+ cur_match_len = 1;
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ }
+ else
+ {
+ mz_uint32 s0, s1;
+ cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
+
+ MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ cur_match_dist--;
+
+ pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+ memcpy(&pLZ_code_buf[1], &cur_match_dist, sizeof(cur_match_dist));
+#else
+ *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
+#endif
+ pLZ_code_buf += 3;
+ *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
+
+ s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
+ d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
+
+ d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
+ }
+ }
+ else
+ {
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ }
+
+ if (--num_flags_left == 0)
+ {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ total_lz_bytes += cur_match_len;
+ lookahead_pos += cur_match_len;
+ dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
+ MZ_ASSERT(lookahead_size >= cur_match_len);
+ lookahead_size -= cur_match_len;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
+ {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+
+ while (lookahead_size)
+ {
+ mz_uint8 lit = d->m_dict[cur_pos];
+
+ total_lz_bytes++;
+ *pLZ_code_buf++ = lit;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ if (--num_flags_left == 0)
+ {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ d->m_huff_count[0][lit]++;
+
+ lookahead_pos++;
+ dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ lookahead_size--;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
+ {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+ }
+
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ return MZ_TRUE;
+}
+#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */
+
+static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
+{
+ d->m_total_lz_bytes++;
+ *d->m_pLZ_code_buf++ = lit;
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
+ if (--d->m_num_flags_left == 0)
+ {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+ d->m_huff_count[0][lit]++;
+}
+
+static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
+{
+ mz_uint32 s0, s1;
+
+ MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ d->m_total_lz_bytes += match_len;
+
+ d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
+
+ match_dist -= 1;
+ d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
+ d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
+ d->m_pLZ_code_buf += 3;
+
+ *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
+ if (--d->m_num_flags_left == 0)
+ {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
+ d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
+
+ if (match_len >= TDEFL_MIN_MATCH_LEN)
+ d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
+}
+
+static mz_bool tdefl_compress_normal(tdefl_compressor *d)
+{
+ const mz_uint8 *pSrc = d->m_pSrc;
+ size_t src_buf_left = d->m_src_buf_left;
+ tdefl_flush flush = d->m_flush;
+
+ while ((src_buf_left) || ((flush) && (d->m_lookahead_size)))
+ {
+ mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
+ /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */
+ if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1))
+ {
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
+ mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
+ const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
+ src_buf_left -= num_bytes_to_process;
+ d->m_lookahead_size += num_bytes_to_process;
+ while (pSrc != pSrc_end)
+ {
+ mz_uint8 c = *pSrc++;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ ins_pos++;
+ }
+ }
+ else
+ {
+ while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
+ {
+ mz_uint8 c = *pSrc++;
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
+ src_buf_left--;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN)
+ {
+ mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
+ mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ }
+ }
+ }
+ d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
+ if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
+ break;
+
+ /* Simple lazy/greedy parsing state machine. */
+ len_to_move = 1;
+ cur_match_dist = 0;
+ cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
+ cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+ if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS))
+ {
+ if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))
+ {
+ mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ cur_match_len = 0;
+ while (cur_match_len < d->m_lookahead_size)
+ {
+ if (d->m_dict[cur_pos + cur_match_len] != c)
+ break;
+ cur_match_len++;
+ }
+ if (cur_match_len < TDEFL_MIN_MATCH_LEN)
+ cur_match_len = 0;
+ else
+ cur_match_dist = 1;
+ }
+ }
+ else
+ {
+ tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
+ }
+ if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5)))
+ {
+ cur_match_dist = cur_match_len = 0;
+ }
+ if (d->m_saved_match_len)
+ {
+ if (cur_match_len > d->m_saved_match_len)
+ {
+ tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
+ if (cur_match_len >= 128)
+ {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ d->m_saved_match_len = 0;
+ len_to_move = cur_match_len;
+ }
+ else
+ {
+ d->m_saved_lit = d->m_dict[cur_pos];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ }
+ else
+ {
+ tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
+ len_to_move = d->m_saved_match_len - 1;
+ d->m_saved_match_len = 0;
+ }
+ }
+ else if (!cur_match_dist)
+ tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
+ else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128))
+ {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ len_to_move = cur_match_len;
+ }
+ else
+ {
+ d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ /* Move the lookahead forward by len_to_move bytes. */
+ d->m_lookahead_pos += len_to_move;
+ MZ_ASSERT(d->m_lookahead_size >= len_to_move);
+ d->m_lookahead_size -= len_to_move;
+ d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ /* Check if it's time to flush the current LZ codes to the internal output buffer. */
+ if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
+ ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))))
+ {
+ int n;
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ }
+ }
+
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ return MZ_TRUE;
+}
+
+static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
+{
+ if (d->m_pIn_buf_size)
+ {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ }
+
+ if (d->m_pOut_buf_size)
+ {
+ size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
+ d->m_output_flush_ofs += (mz_uint)n;
+ d->m_output_flush_remaining -= (mz_uint)n;
+ d->m_out_buf_ofs += n;
+
+ *d->m_pOut_buf_size = d->m_out_buf_ofs;
+ }
+
+ return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
+{
+ if (!d)
+ {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return TDEFL_STATUS_BAD_PARAM;
+ }
+
+ d->m_pIn_buf = pIn_buf;
+ d->m_pIn_buf_size = pIn_buf_size;
+ d->m_pOut_buf = pOut_buf;
+ d->m_pOut_buf_size = pOut_buf_size;
+ d->m_pSrc = (const mz_uint8 *)(pIn_buf);
+ d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
+ d->m_out_buf_ofs = 0;
+ d->m_flush = flush;
+
+ if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
+ (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf))
+ {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
+ }
+ d->m_wants_to_finish |= (flush == TDEFL_FINISH);
+
+ if ((d->m_output_flush_remaining) || (d->m_finished))
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+ if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
+ ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
+ ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0))
+ {
+ if (!tdefl_compress_fast(d))
+ return d->m_prev_return_status;
+ }
+ else
+#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */
+ {
+ if (!tdefl_compress_normal(d))
+ return d->m_prev_return_status;
+ }
+
+ if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
+ d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);
+
+ if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining))
+ {
+ if (tdefl_flush_block(d, flush) < 0)
+ return d->m_prev_return_status;
+ d->m_finished = (flush == TDEFL_FINISH);
+ if (flush == TDEFL_FULL_FLUSH)
+ {
+ MZ_CLEAR_OBJ(d->m_hash);
+ MZ_CLEAR_OBJ(d->m_next);
+ d->m_dict_size = 0;
+ }
+ }
+
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+}
+
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
+{
+ MZ_ASSERT(d->m_pPut_buf_func);
+ return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
+}
+
+tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ d->m_pPut_buf_func = pPut_buf_func;
+ d->m_pPut_buf_user = pPut_buf_user;
+ d->m_flags = (mz_uint)(flags);
+ d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
+ d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
+ d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
+ if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
+ MZ_CLEAR_OBJ(d->m_hash);
+ d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
+ d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_pOutput_buf = d->m_output_buf;
+ d->m_pOutput_buf_end = d->m_output_buf;
+ d->m_prev_return_status = TDEFL_STATUS_OKAY;
+ d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
+ d->m_adler32 = 1;
+ d->m_pIn_buf = NULL;
+ d->m_pOut_buf = NULL;
+ d->m_pIn_buf_size = NULL;
+ d->m_pOut_buf_size = NULL;
+ d->m_flush = TDEFL_NO_FLUSH;
+ d->m_pSrc = NULL;
+ d->m_src_buf_left = 0;
+ d->m_out_buf_ofs = 0;
+ if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
+ MZ_CLEAR_OBJ(d->m_dict);
+ memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+ return TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
+{
+ return d->m_prev_return_status;
+}
+
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
+{
+ return d->m_adler32;
+}
+
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ tdefl_compressor *pComp;
+ mz_bool succeeded;
+ if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
+ return MZ_FALSE;
+ pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_FALSE;
+ succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
+ succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
+ MZ_FREE(pComp);
+ return succeeded;
+}
+
+typedef struct
+{
+ size_t m_size, m_capacity;
+ mz_uint8 *m_pBuf;
+ mz_bool m_expandable;
+} tdefl_output_buffer;
+
+static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
+{
+ tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
+ size_t new_size = p->m_size + len;
+ if (new_size > p->m_capacity)
+ {
+ size_t new_capacity = p->m_capacity;
+ mz_uint8 *pNew_buf;
+ if (!p->m_expandable)
+ return MZ_FALSE;
+ do
+ {
+ new_capacity = MZ_MAX(128U, new_capacity << 1U);
+ } while (new_size > new_capacity);
+ pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
+ if (!pNew_buf)
+ return MZ_FALSE;
+ p->m_pBuf = pNew_buf;
+ p->m_capacity = new_capacity;
+ }
+ memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
+ p->m_size = new_size;
+ return MZ_TRUE;
+}
+
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
+{
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_len)
+ return MZ_FALSE;
+ else
+ *pOut_len = 0;
+ out_buf.m_expandable = MZ_TRUE;
+ if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return NULL;
+ *pOut_len = out_buf.m_size;
+ return out_buf.m_pBuf;
+}
+
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
+{
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_buf)
+ return 0;
+ out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
+ out_buf.m_capacity = out_buf_len;
+ if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return 0;
+ return out_buf.m_size;
+}
+
+static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
+
+/* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy)
+{
+ mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
+ if (window_bits > 0)
+ comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
+
+ if (!level)
+ comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
+ else if (strategy == MZ_FILTERED)
+ comp_flags |= TDEFL_FILTER_MATCHES;
+ else if (strategy == MZ_HUFFMAN_ONLY)
+ comp_flags &= ~TDEFL_MAX_PROBES_MASK;
+ else if (strategy == MZ_FIXED)
+ comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
+ else if (strategy == MZ_RLE)
+ comp_flags |= TDEFL_RLE_MATCHES;
+
+ return comp_flags;
+}
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) */
+#endif
+
+/* Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at
+ http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
+ This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck. */
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip)
+{
+ /* Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined. */
+ static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
+ tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ tdefl_output_buffer out_buf;
+ int i, bpl = w * num_chans, y, z;
+ mz_uint32 c;
+ *pLen_out = 0;
+ if (!pComp)
+ return NULL;
+ MZ_CLEAR_OBJ(out_buf);
+ out_buf.m_expandable = MZ_TRUE;
+ out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
+ if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity)))
+ {
+ MZ_FREE(pComp);
+ return NULL;
+ }
+ /* write dummy header */
+ for (z = 41; z; --z)
+ tdefl_output_buffer_putter(&z, 1, &out_buf);
+ /* compress image data */
+ tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
+ for (y = 0; y < h; ++y)
+ {
+ tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
+ tdefl_compress_buffer(pComp, (const mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH);
+ }
+ if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE)
+ {
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ /* write real header */
+ *pLen_out = out_buf.m_size - 41;
+ {
+ static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 };
+ mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d,
+ 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44,
+ 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x49, 0x44, 0x41,
+ 0x54 };
+ pnghdr[18] = (mz_uint8)(w >> 8);
+ pnghdr[19] = (mz_uint8)w;
+ pnghdr[22] = (mz_uint8)(h >> 8);
+ pnghdr[23] = (mz_uint8)h;
+ pnghdr[25] = chans[num_chans];
+ pnghdr[33] = (mz_uint8)(*pLen_out >> 24);
+ pnghdr[34] = (mz_uint8)(*pLen_out >> 16);
+ pnghdr[35] = (mz_uint8)(*pLen_out >> 8);
+ pnghdr[36] = (mz_uint8)*pLen_out;
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
+ memcpy(out_buf.m_pBuf, pnghdr, 41);
+ }
+ /* write footer (IDAT CRC-32, followed by IEND chunk) */
+ if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf))
+ {
+ *pLen_out = 0;
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
+ /* compute final size of file, grab compressed data buffer and return */
+ *pLen_out += 57;
+ MZ_FREE(pComp);
+ return out_buf.m_pBuf;
+}
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out)
+{
+ /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */
+ return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE);
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tdef.h b/platform/linux-generic/miniz/miniz_tdef.h
new file mode 100644
index 000000000..25448b6fa
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tdef.h
@@ -0,0 +1,183 @@
+#pragma once
+#include "miniz.h"
+#include "miniz_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* ------------------- Low-level Compression API Definitions */
+
+/* Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently). */
+#define TDEFL_LESS_MEMORY 0
+
+/* tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search): */
+/* TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression). */
+enum
+{
+ TDEFL_HUFFMAN_ONLY = 0,
+ TDEFL_DEFAULT_MAX_PROBES = 128,
+ TDEFL_MAX_PROBES_MASK = 0xFFF
+};
+
+/* TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data. */
+/* TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers). */
+/* TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing. */
+/* TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory). */
+/* TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) */
+/* TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. */
+/* TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. */
+/* TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. */
+/* The low 12 bits are reserved to control the max # of hash probes per dictionary lookup (see TDEFL_MAX_PROBES_MASK). */
+enum
+{
+ TDEFL_WRITE_ZLIB_HEADER = 0x01000,
+ TDEFL_COMPUTE_ADLER32 = 0x02000,
+ TDEFL_GREEDY_PARSING_FLAG = 0x04000,
+ TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
+ TDEFL_RLE_MATCHES = 0x10000,
+ TDEFL_FILTER_MATCHES = 0x20000,
+ TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
+ TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
+};
+
+/* High level compression functions: */
+/* tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc(). */
+/* On entry: */
+/* pSrc_buf, src_buf_len: Pointer and size of source block to compress. */
+/* flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression. */
+/* On return: */
+/* Function returns a pointer to the compressed data, or NULL on failure. */
+/* *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data. */
+/* The caller must free() the returned block when it's no longer needed. */
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
+
+/* tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory. */
+/* Returns 0 on failure. */
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
+
+/* Compresses an image to a compressed PNG file in memory. */
+/* On entry: */
+/* pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4. */
+/* The image pitch in bytes per scanline will be w*num_chans. The leftmost pixel on the top scanline is stored first in memory. */
+/* level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL */
+/* If flip is true, the image will be flipped on the Y axis (useful for OpenGL apps). */
+/* On return: */
+/* Function returns a pointer to the compressed data, or NULL on failure. */
+/* *pLen_out will be set to the size of the PNG image file. */
+/* The caller must mz_free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed. */
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip);
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out);
+
+/* Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. */
+typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
+
+/* tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally. */
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+enum
+{
+ TDEFL_MAX_HUFF_TABLES = 3,
+ TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TDEFL_LZ_DICT_SIZE = 32768,
+ TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
+ TDEFL_MIN_MATCH_LEN = 3,
+ TDEFL_MAX_MATCH_LEN = 258
+};
+
+/* TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes). */
+#if TDEFL_LESS_MEMORY
+enum
+{
+ TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 12,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#else
+enum
+{
+ TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 15,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#endif
+
+/* The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions. */
+typedef enum {
+ TDEFL_STATUS_BAD_PARAM = -2,
+ TDEFL_STATUS_PUT_BUF_FAILED = -1,
+ TDEFL_STATUS_OKAY = 0,
+ TDEFL_STATUS_DONE = 1
+} tdefl_status;
+
+/* Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums */
+typedef enum {
+ TDEFL_NO_FLUSH = 0,
+ TDEFL_SYNC_FLUSH = 2,
+ TDEFL_FULL_FLUSH = 3,
+ TDEFL_FINISH = 4
+} tdefl_flush;
+
+/* tdefl's compression state structure. */
+typedef struct
+{
+ tdefl_put_buf_func_ptr m_pPut_buf_func;
+ void *m_pPut_buf_user;
+ mz_uint m_flags, m_max_probes[2];
+ int m_greedy_parsing;
+ mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
+ mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
+ mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
+ mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
+ tdefl_status m_prev_return_status;
+ const void *m_pIn_buf;
+ void *m_pOut_buf;
+ size_t *m_pIn_buf_size, *m_pOut_buf_size;
+ tdefl_flush m_flush;
+ const mz_uint8 *m_pSrc;
+ size_t m_src_buf_left, m_out_buf_ofs;
+ mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
+ mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
+ mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
+ mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
+ mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
+} tdefl_compressor;
+
+/* Initializes the compressor. */
+/* There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory. */
+/* pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression. */
+/* If pBut_buf_func is NULL the user should always call the tdefl_compress() API. */
+/* flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.) */
+tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+/* Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible. */
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);
+
+/* tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr. */
+/* tdefl_compress_buffer() always consumes the entire input buffer. */
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
+
+/* Create tdefl_compress() flags given zlib-style compression parameters. */
+/* level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files) */
+/* window_bits may be -15 (raw deflate) or 15 (zlib) */
+/* strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED */
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tinfl.c b/platform/linux-generic/miniz/miniz_tinfl.c
new file mode 100644
index 000000000..3dfa1d550
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tinfl.c
@@ -0,0 +1,725 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz_tinfl.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- Low-level Decompression (completely independent from all compression API's) */
+
+#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
+#define TINFL_MEMSET(p, c, l) memset(p, c, l)
+
+#define TINFL_CR_BEGIN \
+ switch (r->m_state) \
+ { \
+ case 0:
+#define TINFL_CR_RETURN(state_index, result) \
+ do \
+ { \
+ status = result; \
+ r->m_state = state_index; \
+ goto common_exit; \
+ case state_index:; \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_RETURN_FOREVER(state_index, result) \
+ do \
+ { \
+ for (;;) \
+ { \
+ TINFL_CR_RETURN(state_index, result); \
+ } \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_FINISH }
+
+#define TINFL_GET_BYTE(state_index, c) \
+ do \
+ { \
+ while (pIn_buf_cur >= pIn_buf_end) \
+ { \
+ TINFL_CR_RETURN(state_index, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); \
+ } \
+ c = *pIn_buf_cur++; \
+ } \
+ MZ_MACRO_END
+
+#define TINFL_NEED_BITS(state_index, n) \
+ do \
+ { \
+ mz_uint c; \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < (mz_uint)(n))
+#define TINFL_SKIP_BITS(state_index, n) \
+ do \
+ { \
+ if (num_bits < (mz_uint)(n)) \
+ { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+#define TINFL_GET_BITS(state_index, b, n) \
+ do \
+ { \
+ if (num_bits < (mz_uint)(n)) \
+ { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ b = bit_buf & ((1 << (n)) - 1); \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+
+/* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. */
+/* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */
+/* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */
+/* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
+#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
+ do \
+ { \
+ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
+ if (temp >= 0) \
+ { \
+ code_len = temp >> 9; \
+ if ((code_len) && (num_bits >= code_len)) \
+ break; \
+ } \
+ else if (num_bits > TINFL_FAST_LOOKUP_BITS) \
+ { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do \
+ { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while ((temp < 0) && (num_bits >= (code_len + 1))); \
+ if (temp >= 0) \
+ break; \
+ } \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < 15);
+
+/* TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read */
+/* beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully */
+/* decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. */
+/* The slow path is only executed at the very end of the input buffer. */
+/* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */
+/* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */
+#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
+ do \
+ { \
+ int temp; \
+ mz_uint code_len, c; \
+ if (num_bits < 15) \
+ { \
+ if ((pIn_buf_end - pIn_buf_cur) < 2) \
+ { \
+ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
+ } \
+ else \
+ { \
+ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
+ pIn_buf_cur += 2; \
+ num_bits += 16; \
+ } \
+ } \
+ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
+ code_len = temp >> 9, temp &= 511; \
+ else \
+ { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do \
+ { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while (temp < 0); \
+ } \
+ sym = temp; \
+ bit_buf >>= code_len; \
+ num_bits -= code_len; \
+ } \
+ MZ_MACRO_END
+
+tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
+{
+ static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
+ static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };
+ static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
+ static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
+ static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+ static const int s_min_table_sizes[3] = { 257, 1, 4 };
+
+ tinfl_status status = TINFL_STATUS_FAILED;
+ mz_uint32 num_bits, dist, counter, num_extra;
+ tinfl_bit_buf_t bit_buf;
+ const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
+ mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
+ size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
+
+ /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */
+ if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start))
+ {
+ *pIn_buf_size = *pOut_buf_size = 0;
+ return TINFL_STATUS_BAD_PARAM;
+ }
+
+ num_bits = r->m_num_bits;
+ bit_buf = r->m_bit_buf;
+ dist = r->m_dist;
+ counter = r->m_counter;
+ num_extra = r->m_num_extra;
+ dist_from_out_buf_start = r->m_dist_from_out_buf_start;
+ TINFL_CR_BEGIN
+
+ bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
+ r->m_z_adler32 = r->m_check_adler32 = 1;
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
+ {
+ TINFL_GET_BYTE(1, r->m_zhdr0);
+ TINFL_GET_BYTE(2, r->m_zhdr1);
+ counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
+ if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
+ counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
+ if (counter)
+ {
+ TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
+ }
+ }
+
+ do
+ {
+ TINFL_GET_BITS(3, r->m_final, 3);
+ r->m_type = r->m_final >> 1;
+ if (r->m_type == 0)
+ {
+ TINFL_SKIP_BITS(5, num_bits & 7);
+ for (counter = 0; counter < 4; ++counter)
+ {
+ if (num_bits)
+ TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
+ else
+ TINFL_GET_BYTE(7, r->m_raw_header[counter]);
+ }
+ if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8))))
+ {
+ TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
+ }
+ while ((counter) && (num_bits))
+ {
+ TINFL_GET_BITS(51, dist, 8);
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)dist;
+ counter--;
+ }
+ while (counter)
+ {
+ size_t n;
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ while (pIn_buf_cur >= pIn_buf_end)
+ {
+ TINFL_CR_RETURN(38, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS);
+ }
+ n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
+ TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
+ pIn_buf_cur += n;
+ pOut_buf_cur += n;
+ counter -= (mz_uint)n;
+ }
+ }
+ else if (r->m_type == 3)
+ {
+ TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
+ }
+ else
+ {
+ if (r->m_type == 1)
+ {
+ mz_uint8 *p = r->m_tables[0].m_code_size;
+ mz_uint i;
+ r->m_table_sizes[0] = 288;
+ r->m_table_sizes[1] = 32;
+ TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+ }
+ else
+ {
+ for (counter = 0; counter < 3; counter++)
+ {
+ TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
+ r->m_table_sizes[counter] += s_min_table_sizes[counter];
+ }
+ MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
+ for (counter = 0; counter < r->m_table_sizes[2]; counter++)
+ {
+ mz_uint s;
+ TINFL_GET_BITS(14, s, 3);
+ r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
+ }
+ r->m_table_sizes[2] = 19;
+ }
+ for (; (int)r->m_type >= 0; r->m_type--)
+ {
+ int tree_next, tree_cur;
+ tinfl_huff_table *pTable;
+ mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16];
+ pTable = &r->m_tables[r->m_type];
+ MZ_CLEAR_OBJ(total_syms);
+ MZ_CLEAR_OBJ(pTable->m_look_up);
+ MZ_CLEAR_OBJ(pTable->m_tree);
+ for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
+ total_syms[pTable->m_code_size[i]]++;
+ used_syms = 0, total = 0;
+ next_code[0] = next_code[1] = 0;
+ for (i = 1; i <= 15; ++i)
+ {
+ used_syms += total_syms[i];
+ next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
+ }
+ if ((65536 != total) && (used_syms > 1))
+ {
+ TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
+ }
+ for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
+ {
+ mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index];
+ if (!code_size)
+ continue;
+ cur_code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, cur_code >>= 1)
+ rev_code = (rev_code << 1) | (cur_code & 1);
+ if (code_size <= TINFL_FAST_LOOKUP_BITS)
+ {
+ mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
+ while (rev_code < TINFL_FAST_LOOKUP_SIZE)
+ {
+ pTable->m_look_up[rev_code] = k;
+ rev_code += (1 << code_size);
+ }
+ continue;
+ }
+ if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)]))
+ {
+ pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ }
+ rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
+ for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
+ {
+ tree_cur -= ((rev_code >>= 1) & 1);
+ if (!pTable->m_tree[-tree_cur - 1])
+ {
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ }
+ else
+ tree_cur = pTable->m_tree[-tree_cur - 1];
+ }
+ tree_cur -= ((rev_code >>= 1) & 1);
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
+ }
+ if (r->m_type == 2)
+ {
+ for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);)
+ {
+ mz_uint s;
+ TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
+ if (dist < 16)
+ {
+ r->m_len_codes[counter++] = (mz_uint8)dist;
+ continue;
+ }
+ if ((dist == 16) && (!counter))
+ {
+ TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
+ }
+ num_extra = "\02\03\07"[dist - 16];
+ TINFL_GET_BITS(18, s, num_extra);
+ s += "\03\03\013"[dist - 16];
+ TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
+ counter += s;
+ }
+ if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
+ {
+ TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
+ }
+ TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]);
+ TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
+ }
+ }
+ for (;;)
+ {
+ mz_uint8 *pSrc;
+ for (;;)
+ {
+ if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
+ {
+ TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
+ if (counter >= 256)
+ break;
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)counter;
+ }
+ else
+ {
+ int sym2;
+ mz_uint code_len;
+#if TINFL_USE_64BIT_BITBUF
+ if (num_bits < 30)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 4;
+ num_bits += 32;
+ }
+#else
+ if (num_bits < 15)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
+ code_len = sym2 >> 9;
+ else
+ {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do
+ {
+ sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ counter = sym2;
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+ if (counter & 256)
+ break;
+
+#if !TINFL_USE_64BIT_BITBUF
+ if (num_bits < 15)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
+ code_len = sym2 >> 9;
+ else
+ {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do
+ {
+ sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+
+ pOut_buf_cur[0] = (mz_uint8)counter;
+ if (sym2 & 256)
+ {
+ pOut_buf_cur++;
+ counter = sym2;
+ break;
+ }
+ pOut_buf_cur[1] = (mz_uint8)sym2;
+ pOut_buf_cur += 2;
+ }
+ }
+ if ((counter &= 511) == 256)
+ break;
+
+ num_extra = s_length_extra[counter - 257];
+ counter = s_length_base[counter - 257];
+ if (num_extra)
+ {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(25, extra_bits, num_extra);
+ counter += extra_bits;
+ }
+
+ TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
+ num_extra = s_dist_extra[dist];
+ dist = s_dist_base[dist];
+ if (num_extra)
+ {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(27, extra_bits, num_extra);
+ dist += extra_bits;
+ }
+
+ dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
+ if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
+ {
+ TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
+ }
+
+ pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
+
+ if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
+ {
+ while (counter--)
+ {
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
+ }
+ continue;
+ }
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+ else if ((counter >= 9) && (counter <= dist))
+ {
+ const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
+ do
+ {
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+ memcpy(pOut_buf_cur, pSrc, sizeof(mz_uint32)*2);
+#else
+ ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
+ ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
+#endif
+ pOut_buf_cur += 8;
+ } while ((pSrc += 8) < pSrc_end);
+ if ((counter &= 7) < 3)
+ {
+ if (counter)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ if (counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ continue;
+ }
+ }
+#endif
+ while(counter>2)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur[2] = pSrc[2];
+ pOut_buf_cur += 3;
+ pSrc += 3;
+ counter -= 3;
+ }
+ if (counter > 0)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ if (counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ }
+ }
+ } while (!(r->m_final & 1));
+
+ /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
+ /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */
+ TINFL_SKIP_BITS(32, num_bits & 7);
+ while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
+ {
+ --pIn_buf_cur;
+ num_bits -= 8;
+ }
+ bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
+ MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */
+
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
+ {
+ for (counter = 0; counter < 4; ++counter)
+ {
+ mz_uint s;
+ if (num_bits)
+ TINFL_GET_BITS(41, s, 8);
+ else
+ TINFL_GET_BYTE(42, s);
+ r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
+ }
+ }
+ TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
+
+ TINFL_CR_FINISH
+
+common_exit:
+ /* As long as we aren't telling the caller that we NEED more input to make forward progress: */
+ /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
+ /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */
+ if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS))
+ {
+ while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
+ {
+ --pIn_buf_cur;
+ num_bits -= 8;
+ }
+ }
+ r->m_num_bits = num_bits;
+ r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
+ r->m_dist = dist;
+ r->m_counter = counter;
+ r->m_num_extra = num_extra;
+ r->m_dist_from_out_buf_start = dist_from_out_buf_start;
+ *pIn_buf_size = pIn_buf_cur - pIn_buf_next;
+ *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
+ if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
+ {
+ const mz_uint8 *ptr = pOut_buf_next;
+ size_t buf_len = *pOut_buf_size;
+ mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16;
+ size_t block_len = buf_len % 5552;
+ while (buf_len)
+ {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
+ {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ r->m_check_adler32 = (s2 << 16) + s1;
+ if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32))
+ status = TINFL_STATUS_ADLER32_MISMATCH;
+ }
+ return status;
+}
+
+/* Higher level helper functions. */
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
+{
+ tinfl_decompressor decomp;
+ void *pBuf = NULL, *pNew_buf;
+ size_t src_buf_ofs = 0, out_buf_capacity = 0;
+ *pOut_len = 0;
+ tinfl_init(&decomp);
+ for (;;)
+ {
+ size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
+ tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size,
+ (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
+ {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ src_buf_ofs += src_buf_size;
+ *pOut_len += dst_buf_size;
+ if (status == TINFL_STATUS_DONE)
+ break;
+ new_out_buf_capacity = out_buf_capacity * 2;
+ if (new_out_buf_capacity < 128)
+ new_out_buf_capacity = 128;
+ pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
+ if (!pNew_buf)
+ {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ pBuf = pNew_buf;
+ out_buf_capacity = new_out_buf_capacity;
+ }
+ return pBuf;
+}
+
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
+{
+ tinfl_decompressor decomp;
+ tinfl_status status;
+ tinfl_init(&decomp);
+ status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
+}
+
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ int result = 0;
+ tinfl_decompressor decomp;
+ mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
+ size_t in_buf_ofs = 0, dict_ofs = 0;
+ if (!pDict)
+ return TINFL_STATUS_FAILED;
+ tinfl_init(&decomp);
+ for (;;)
+ {
+ size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
+ tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
+ (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
+ in_buf_ofs += in_buf_size;
+ if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
+ break;
+ if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
+ {
+ result = (status == TINFL_STATUS_DONE);
+ break;
+ }
+ dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
+ }
+ MZ_FREE(pDict);
+ *pIn_buf_size = in_buf_ofs;
+ return result;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tinfl.h b/platform/linux-generic/miniz/miniz_tinfl.h
new file mode 100644
index 000000000..28ca15fcb
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tinfl.h
@@ -0,0 +1,146 @@
+#pragma once
+#include "miniz.h"
+#include "miniz_common.h"
+/* ------------------- Low-level Decompression API Definitions */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Decompression flags used by tinfl_decompress(). */
+/* TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream. */
+/* TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input. */
+/* TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB). */
+/* TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes. */
+enum
+{
+ TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
+ TINFL_FLAG_HAS_MORE_INPUT = 2,
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
+ TINFL_FLAG_COMPUTE_ADLER32 = 8
+};
+
+/* High level decompression functions: */
+/* tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc(). */
+/* On entry: */
+/* pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress. */
+/* On return: */
+/* Function returns a pointer to the decompressed data, or NULL on failure. */
+/* *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data. */
+/* The caller must call mz_free() on the returned block when it's no longer needed. */
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
+
+/* tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory. */
+/* Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success. */
+#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
+
+/* tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer. */
+/* Returns 1 on success or 0 on failure. */
+typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+struct tinfl_decompressor_tag;
+typedef struct tinfl_decompressor_tag tinfl_decompressor;
+
+/* Max size of LZ dictionary. */
+#define TINFL_LZ_DICT_SIZE 32768
+
+/* Return status. */
+typedef enum {
+ /* This flags indicates the inflator needs 1 or more input bytes to make forward progress, but the caller is indicating that no more are available. The compressed data */
+ /* is probably corrupted. If you call the inflator again with more bytes it'll try to continue processing the input but this is a BAD sign (either the data is corrupted or you called it incorrectly). */
+ /* If you call it again with no input you'll just get TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS again. */
+ TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS = -4,
+
+ /* This flag indicates that one or more of the input parameters was obviously bogus. (You can try calling it again, but if you get this error the calling code is wrong.) */
+ TINFL_STATUS_BAD_PARAM = -3,
+
+ /* This flags indicate the inflator is finished but the adler32 check of the uncompressed data didn't match. If you call it again it'll return TINFL_STATUS_DONE. */
+ TINFL_STATUS_ADLER32_MISMATCH = -2,
+
+ /* This flags indicate the inflator has somehow failed (bad code, corrupted input, etc.). If you call it again without resetting via tinfl_init() it it'll just keep on returning the same status failure code. */
+ TINFL_STATUS_FAILED = -1,
+
+ /* Any status code less than TINFL_STATUS_DONE must indicate a failure. */
+
+ /* This flag indicates the inflator has returned every byte of uncompressed data that it can, has consumed every byte that it needed, has successfully reached the end of the deflate stream, and */
+ /* if zlib headers and adler32 checking enabled that it has successfully checked the uncompressed data's adler32. If you call it again you'll just get TINFL_STATUS_DONE over and over again. */
+ TINFL_STATUS_DONE = 0,
+
+ /* This flag indicates the inflator MUST have more input data (even 1 byte) before it can make any more forward progress, or you need to clear the TINFL_FLAG_HAS_MORE_INPUT */
+ /* flag on the next call if you don't have any more source data. If the source data was somehow corrupted it's also possible (but unlikely) for the inflator to keep on demanding input to */
+ /* proceed, so be sure to properly set the TINFL_FLAG_HAS_MORE_INPUT flag. */
+ TINFL_STATUS_NEEDS_MORE_INPUT = 1,
+
+ /* This flag indicates the inflator definitely has 1 or more bytes of uncompressed data available, but it cannot write this data into the output buffer. */
+ /* Note if the source compressed data was corrupted it's possible for the inflator to return a lot of uncompressed data to the caller. I've been assuming you know how much uncompressed data to expect */
+ /* (either exact or worst case) and will stop calling the inflator and fail after receiving too much. In pure streaming scenarios where you have no idea how many bytes to expect this may not be possible */
+ /* so I may need to add some code to address this. */
+ TINFL_STATUS_HAS_MORE_OUTPUT = 2
+} tinfl_status;
+
+/* Initializes the decompressor to its initial state. */
+#define tinfl_init(r) \
+ do \
+ { \
+ (r)->m_state = 0; \
+ } \
+ MZ_MACRO_END
+#define tinfl_get_adler32(r) (r)->m_check_adler32
+
+/* Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability. */
+/* This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output. */
+tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);
+
+/* Internal/private bits follow. */
+enum
+{
+ TINFL_MAX_HUFF_TABLES = 3,
+ TINFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TINFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TINFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TINFL_FAST_LOOKUP_BITS = 10,
+ TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
+};
+
+typedef struct
+{
+ mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
+ mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
+} tinfl_huff_table;
+
+#if MINIZ_HAS_64BIT_REGISTERS
+#define TINFL_USE_64BIT_BITBUF 1
+#else
+#define TINFL_USE_64BIT_BITBUF 0
+#endif
+
+#if TINFL_USE_64BIT_BITBUF
+typedef mz_uint64 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (64)
+#else
+typedef mz_uint32 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (32)
+#endif
+
+struct tinfl_decompressor_tag
+{
+ mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
+ tinfl_bit_buf_t m_bit_buf;
+ size_t m_dist_from_out_buf_start;
+ tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
+ mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
+};
+
+typedef struct
+{
+ tinfl_decompressor m_decomp;
+ mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
+ int m_window_bits;
+ mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
+ tinfl_status m_last_status;
+} inflate_state;
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/odp_atomic_api.c b/platform/linux-generic/odp_atomic_api.c
new file mode 100644
index 000000000..c56f2c53f
--- /dev/null
+++ b/platform/linux-generic/odp_atomic_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/atomic.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/atomic_inlines.h>
diff --git a/platform/linux-generic/odp_barrier.c b/platform/linux-generic/odp_barrier.c
index a2c62676b..3747713f6 100644
--- a/platform/linux-generic/odp_barrier.c
+++ b/platform/linux-generic/odp_barrier.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,6 +8,9 @@
#include <odp/api/sync.h>
#include <odp/api/cpu.h>
#include <odp/api/atomic.h>
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
void odp_barrier_init(odp_barrier_t *barrier, int count)
{
@@ -32,7 +35,7 @@ void odp_barrier_wait(odp_barrier_t *barrier)
uint32_t count;
int wasless;
- odp_mb_full();
+ odp_mb_release();
count = odp_atomic_fetch_inc_u32(&barrier->bar);
wasless = count < barrier->count;
@@ -46,5 +49,5 @@ void odp_barrier_wait(odp_barrier_t *barrier)
odp_cpu_pause();
}
- odp_mb_full();
+ odp_mb_acquire();
}
diff --git a/platform/linux-generic/odp_bitmap.c b/platform/linux-generic/odp_bitmap.c
deleted file mode 100644
index a29b9ef28..000000000
--- a/platform/linux-generic/odp_bitmap.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <string.h>
-#include <unistd.h>
-#include <odp/api/std_types.h>
-#include <odp/api/byteorder.h>
-#include <odp_bitmap_internal.h>
-
-/*
- * WAPL base class bitmap operations
- */
-static inline void __wapl_add_pos(
- wapl_bitmap_t *map, unsigned int p)
-{
- unsigned int s, k = 0;
- unsigned int *pl = map->pl;
-
- while (pl[k] && p > pl[k])
- k++;
-
- if (p == pl[k])
- return;
-
- /* sorted insertion */
- for (; pl[k] && p < pl[k]; k++) {
- s = pl[k];
- pl[k] = p;
- p = s;
- }
-
- if (k < map->nwords)
- pl[k++] = p;
-
- pl[k] = 0;
-}
-
-static inline void __wapl_remove_pos(
- wapl_bitmap_t *map, unsigned int p)
-{
- unsigned int k = 0;
- unsigned int *pl = map->pl;
-
- while (pl[k] && p != pl[k])
- k++;
-
- for (; pl[k]; k++)
- pl[k] = pl[k + 1];
-}
-
-void __wapl_bitmap_and(wapl_bitmap_t *dst,
- wapl_bitmap_t *src, wapl_bitmap_t *and)
-{
- unsigned int k = 0, p;
- unsigned int *pl = src->pl;
-
- while ((p = *pl++) != 0) {
- dst->ul[p] = src->ul[p] & and->ul[p];
- if (dst->ul[p])
- dst->pl[k++] = p;
- }
-
- dst->pl[k] = 0;
-}
-
-void __wapl_bitmap_or(wapl_bitmap_t *dst, wapl_bitmap_t *or)
-{
- unsigned int p;
- unsigned int *pl = or->pl;
-
- while ((p = *pl++) != 0) {
- if (dst->ul[p] == 0)
- __wapl_add_pos(dst, p);
-
- dst->ul[p] |= or->ul[p];
- }
-}
-
-void __wapl_bitmap_set(wapl_bitmap_t *map, unsigned int bit)
-{
- unsigned int p = BIT_WORD(bit) + 1;
- unsigned long set = 1UL << (bit & (BITS_PER_LONG - 1));
-
- if (p > map->nwords)
- return;
-
- if (map->ul[p] == 0)
- __wapl_add_pos(map, p);
-
- map->ul[p] |= set;
-}
-
-void __wapl_bitmap_clear(wapl_bitmap_t *map, unsigned int bit)
-{
- unsigned int p = BIT_WORD(bit) + 1;
- unsigned long clear = 1UL << (bit & (BITS_PER_LONG - 1));
-
- if (p > map->nwords)
- return;
-
- map->ul[p] &= ~clear;
-
- if (map->ul[p] == 0)
- __wapl_remove_pos(map, p);
-}
-
-/*
- * WAPL bitmap iterator implementation
- */
-static void __wapl_iterator_start(wapl_bitmap_iterator_t *this)
-{
- this->_nbits = this->_base.nwords * BITS_PER_LONG;
-
- /* Advance to next queue index to start this
- * new round iteration.
- */
- if (this->_base.pl[0] == 0)
- this->_start = -1;
- else
- this->_start = __bitmap_wraparound_next(
- &this->_base.ul[1], this->_nbits, this->_start + 1);
-
- this->_next = this->_start;
-}
-
-static bool __wapl_iterator_has_next(wapl_bitmap_iterator_t *this)
-{
- return (this->_next != -1);
-}
-
-static unsigned int __wapl_iterator_next(wapl_bitmap_iterator_t *this)
-{
- int next = this->_next;
-
- this->_next = __bitmap_wraparound_next(
- &this->_base.ul[1], this->_nbits, this->_next + 1);
-
- if (this->_next == this->_start)
- this->_next = -1;
-
- return next;
-}
-
-void __wapl_bitmap_iterator(wapl_bitmap_iterator_t *this)
-{
- this->start = __wapl_iterator_start;
- this->has_next = __wapl_iterator_has_next;
- this->next = __wapl_iterator_next;
-
- this->_start = -1;
- this->_next = this->_start;
-}
-
-/*
- * Sparse base class bitmap operations
- */
-void __sparse_bitmap_set(sparse_bitmap_t *map, unsigned int bit)
-{
- unsigned int last = *map->last;
-
- /* Index exceeds */
- if (bit >= map->nbits)
- return;
-
- /* Full bitmap */
- if (last >= map->nbits)
- return;
-
- /* Bit was not set previously,
- * also record where we set the bit
- */
- if (!map->pl[bit]) {
- map->il[last++] = bit;
- map->pl[bit] = last;
-
- *map->last = last;
- }
-}
-
-void __sparse_bitmap_clear(sparse_bitmap_t *map, unsigned int bit)
-{
- unsigned int p, i;
- unsigned int last = *map->last;
-
- /* Index exceeds */
- if (bit >= map->nbits)
- return;
-
- /* Empty bitmap */
- if (last == 0)
- return;
-
- /* Bit was set previously */
- if (map->pl[bit]) {
- p = map->pl[bit] - 1;
- map->pl[bit] = 0;
-
- last--;
- *map->last = last;
-
- /* Fill the hole with the latest index */
- if (p < last) {
- i = map->il[last];
- map->pl[i] = p + 1;
- map->il[p] = i;
- }
- }
-}
-
-/*
- * Sparse bitmap iterator implementation
- */
-static void __sparse_iterator_start(sparse_bitmap_iterator_t *this)
-{
- this->_nbits = (int)*this->_base.last;
-
- /* Advance to next queue index to start this
- * new round iteration.
- */
- if (this->_nbits == 0)
- this->_start = -1;
- else
- this->_start = (this->_start + 1) & (this->_nbits - 1);
-
- this->_next = this->_start;
-}
-
-static bool __sparse_iterator_has_next(sparse_bitmap_iterator_t *this)
-{
- return (this->_next != -1);
-}
-
-static unsigned int __sparse_iterator_next(sparse_bitmap_iterator_t *this)
-{
- int next = this->_next;
-
- this->_next = (this->_next + 1) & (this->_nbits - 1);
- if (this->_next == this->_start)
- this->_next = -1;
-
- return this->_base.il[next];
-}
-
-void __sparse_bitmap_iterator(sparse_bitmap_iterator_t *this)
-{
- this->start = __sparse_iterator_start;
- this->has_next = __sparse_iterator_has_next;
- this->next = __sparse_iterator_next;
-
- this->_start = -1;
- this->_next = this->_start;
-}
-
-/*
- * Generic byte-width atomic set/clear
- */
-static inline void atomic_byte_set(
- unsigned char *addr, unsigned int bit)
-{
- unsigned char load, store;
- unsigned char set = 1 << (bit & (BITS_PER_BYTE - 1));
-
- do {
- load = *addr;
- store = load | set;
- } while (!__atomic_compare_exchange_n(addr, &load, store,
- 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
-}
-
-static inline void atomic_byte_clear(
- unsigned char *addr, unsigned int bit)
-{
- unsigned char load, store;
- unsigned char clear = 1 << (bit & (BITS_PER_BYTE - 1));
-
- do {
- load = *addr;
- store = load & ~clear;
- } while (!__atomic_compare_exchange_n(addr, &load, store,
- 0, __ATOMIC_RELEASE, __ATOMIC_RELAXED));
-}
-
-static inline unsigned char *__bit_byte(
- unsigned long *word, unsigned int bit)
-{
- unsigned int i;
- unsigned char *b;
-
- b = (unsigned char *)word;
-
- i = bit & (BITS_PER_LONG - 1);
- i = i / BITS_PER_BYTE;
-
-#if (ODP_BYTE_ORDER == ODP_BIG_ENDIAN)
- i = BYTES_PER_LONG - 1 - i;
-#endif
- return &b[i];
-}
-
-void raw_bitmap_set(unsigned long *map, unsigned int bit)
-{
- unsigned long *p = map + BIT_WORD(bit);
-
- atomic_byte_set(__bit_byte(p, bit), bit);
-}
-
-void raw_bitmap_clear(unsigned long *map, unsigned int bit)
-{
- unsigned long *p = map + BIT_WORD(bit);
-
- atomic_byte_clear(__bit_byte(p, bit), bit);
-}
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index 88c8140b5..82bf61add 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -1,80 +1,62 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
#include <odp/api/buffer.h>
+
+#include <odp/api/plat/buffer_inline_types.h>
+
#include <odp_pool_internal.h>
#include <odp_buffer_internal.h>
-#include <odp_buffer_inlines.h>
#include <odp_debug_internal.h>
+#include <odp_print_internal.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
-odp_buffer_t odp_buffer_from_event(odp_event_t ev)
-{
- return (odp_buffer_t)ev;
-}
+#include <odp/visibility_begin.h>
-odp_event_t odp_buffer_to_event(odp_buffer_t buf)
-{
- return (odp_event_t)buf;
-}
+/* Buffer header field offsets for inline functions */
+const _odp_buffer_inline_offset_t _odp_buffer_inline_offset ODP_ALIGNED_CACHE = {
+ .uarea_addr = offsetof(odp_buffer_hdr_t, uarea_addr)
+};
-void *odp_buffer_addr(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
-
- return hdr->seg[0].data;
-}
-
-uint32_t odp_buffer_size(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *hdr = buf_hdl_to_hdr(buf);
-
- return hdr->size;
-}
+#include <odp/visibility_end.h>
-int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf)
+void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
if (!odp_buffer_is_valid(buf)) {
- ODP_PRINT("Buffer is not valid.\n");
- return len;
+ _ODP_ERR("Buffer is not valid.\n");
+ return;
}
- hdr = buf_hdl_to_hdr(buf);
-
- len += snprintf(&str[len], n-len,
- "Buffer\n");
- len += snprintf(&str[len], n-len,
- " pool %" PRIu64 "\n",
- odp_pool_to_u64(hdr->pool_hdl));
- len += snprintf(&str[len], n-len,
- " addr %p\n", hdr->seg[0].data);
- len += snprintf(&str[len], n-len,
- " size %" PRIu32 "\n", hdr->size);
- len += snprintf(&str[len], n-len,
- " type %i\n", hdr->type);
-
- return len;
-}
-
-void odp_buffer_print(odp_buffer_t buf)
-{
- int max_len = 512;
- char str[max_len];
- int len;
-
- len = odp_buffer_snprint(str, max_len-1, buf);
+ hdr = _odp_buf_hdr(buf);
+
+ len += _odp_snprint(&str[len], n - len, "Buffer info\n");
+ len += _odp_snprint(&str[len], n - len, "-----------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_buffer_to_u64(buf));
+ len += _odp_snprint(&str[len], n - len, " pool index %u\n", hdr->event_hdr.index.pool);
+ len += _odp_snprint(&str[len], n - len, " buffer index %u\n",
+ hdr->event_hdr.index.event);
+ len += _odp_snprint(&str[len], n - len, " addr %p\n",
+ (void *)hdr->event_hdr.base_data);
+ len += _odp_snprint(&str[len], n - len, " size %u\n", odp_buffer_size(buf));
+ len += _odp_snprint(&str[len], n - len, " user area %p\n", hdr->uarea_addr);
str[len] = 0;
- ODP_PRINT("\n%s\n", str);
+ _ODP_PRINT("%s\n", str);
}
uint64_t odp_buffer_to_u64(odp_buffer_t hdl)
diff --git a/platform/linux-generic/odp_buffer_api.c b/platform/linux-generic/odp_buffer_api.c
new file mode 100644
index 000000000..01f99b158
--- /dev/null
+++ b/platform/linux-generic/odp_buffer_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2019, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/buffer.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/buffer_inlines.h>
diff --git a/platform/linux-generic/odp_byteorder.c b/platform/linux-generic/odp_byteorder_api.c
index a344c53f7..4b9f0da7a 100644
--- a/platform/linux-generic/odp_byteorder.c
+++ b/platform/linux-generic/odp_byteorder_api.c
@@ -1,10 +1,11 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/byteorder.h>
-#if ODP_ABI_COMPAT == 1
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
#include <odp/api/plat/byteorder_inlines.h>
-#endif
diff --git a/platform/linux-generic/odp_chksum.c b/platform/linux-generic/odp_chksum.c
new file mode 100644
index 000000000..a0336893e
--- /dev/null
+++ b/platform/linux-generic/odp_chksum.c
@@ -0,0 +1,14 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/chksum.h>
+#include <odp/api/std_types.h>
+#include <odp_chksum_internal.h>
+
+uint16_t odp_chksum_ones_comp16(const void *p, uint32_t len)
+{
+ return chksum_finalize(chksum_partial(p, len, 0));
+}
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 5d96b00b3..016a8f0c5 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,23 +7,34 @@
#include <odp/api/classification.h>
#include <odp/api/align.h>
-#include <odp/api/queue.h>
#include <odp/api/debug.h>
-#include <odp_internal.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/pool.h>
+#include <odp/api/queue.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/spinlock.h>
+
+#include <odp_init_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
-#include <odp/api/packet_io.h>
#include <odp_packet_io_internal.h>
#include <odp_classification_datamodel.h>
-#include <odp_classification_inlines.h>
#include <odp_classification_internal.h>
-#include <odp/api/shared_memory.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
+#include <protocols/ipsec.h>
+#include <protocols/udp.h>
+#include <protocols/tcp.h>
+#include <protocols/thash.h>
#include <string.h>
#include <errno.h>
#include <stdbool.h>
-#include <odp/api/spinlock.h>
+#include <inttypes.h>
+
+/* Debug level for per packet classification operations */
+#define CLS_DBG 3
+#define MAX_MARK UINT16_MAX
#define LOCK(a) odp_spinlock_lock(a)
#define UNLOCK(a) odp_spinlock_unlock(a)
@@ -30,100 +42,106 @@
static cos_tbl_t *cos_tbl;
static pmr_tbl_t *pmr_tbl;
+static _cls_queue_grp_tbl_t *queue_grp_tbl;
+
+cls_global_t *_odp_cls_global;
+
+static const rss_key default_rss = {
+ .u8 = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa,
+ }
+};
+
+static inline uint32_t _odp_cos_to_ndx(odp_cos_t cos)
+{
+ return _odp_typeval(cos) - 1;
+}
+
+static inline odp_cos_t _odp_cos_from_ndx(uint32_t ndx)
+{
+ return _odp_cast_scalar(odp_cos_t, ndx + 1);
+}
+
+static inline uint32_t _odp_pmr_to_ndx(odp_pmr_t pmr)
+{
+ return _odp_typeval(pmr) - 1;
+}
-cos_t *get_cos_entry_internal(odp_cos_t cos_id)
+static inline odp_pmr_t _odp_pmr_from_ndx(uint32_t ndx)
{
- return &cos_tbl->cos_entry[_odp_typeval(cos_id)];
+ return _odp_cast_scalar(odp_pmr_t, ndx + 1);
}
-pmr_t *get_pmr_entry_internal(odp_pmr_t pmr_id)
+static
+cos_t *get_cos_entry_internal(odp_cos_t cos)
{
- return &pmr_tbl->pmr[_odp_typeval(pmr_id)];
+ return &cos_tbl->cos_entry[_odp_cos_to_ndx(cos)];
}
-int odp_classification_init_global(void)
+static
+pmr_t *get_pmr_entry_internal(odp_pmr_t pmr)
{
- odp_shm_t cos_shm;
- odp_shm_t pmr_shm;
+ return &pmr_tbl->pmr[_odp_pmr_to_ndx(pmr)];
+}
+
+int _odp_classification_init_global(void)
+{
+ odp_shm_t shm;
int i;
- cos_shm = odp_shm_reserve("shm_odp_cos_tbl",
- sizeof(cos_tbl_t),
- sizeof(cos_t), 0);
+ shm = odp_shm_reserve("_odp_cls_global", sizeof(cls_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
- if (cos_shm == ODP_SHM_INVALID) {
- ODP_ERR("shm allocation failed for shm_odp_cos_tbl");
- goto error;
- }
+ _odp_cls_global = odp_shm_addr(shm);
+ memset(_odp_cls_global, 0, sizeof(cls_global_t));
- cos_tbl = odp_shm_addr(cos_shm);
- if (cos_tbl == NULL)
- goto error_cos;
+ _odp_cls_global->shm = shm;
+ cos_tbl = &_odp_cls_global->cos_tbl;
+ pmr_tbl = &_odp_cls_global->pmr_tbl;
+ queue_grp_tbl = &_odp_cls_global->queue_grp_tbl;
- memset(cos_tbl, 0, sizeof(cos_tbl_t));
- for (i = 0; i < ODP_COS_MAX_ENTRY; i++) {
+ for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
/* init locks */
- cos_t *cos =
- get_cos_entry_internal(_odp_cast_scalar(odp_cos_t, i));
- LOCK_INIT(&cos->s.lock);
- }
+ cos_t *cos = get_cos_entry_internal(_odp_cos_from_ndx(i));
- pmr_shm = odp_shm_reserve("shm_odp_pmr_tbl",
- sizeof(pmr_tbl_t),
- sizeof(pmr_t), 0);
-
- if (pmr_shm == ODP_SHM_INVALID) {
- ODP_ERR("shm allocation failed for shm_odp_pmr_tbl");
- goto error_cos;
+ LOCK_INIT(&cos->lock);
}
- pmr_tbl = odp_shm_addr(pmr_shm);
- if (pmr_tbl == NULL)
- goto error_pmr;
-
- memset(pmr_tbl, 0, sizeof(pmr_tbl_t));
- for (i = 0; i < ODP_PMR_MAX_ENTRY; i++) {
+ for (i = 0; i < CLS_PMR_MAX_ENTRY; i++) {
/* init locks */
- pmr_t *pmr =
- get_pmr_entry_internal(_odp_cast_scalar(odp_pmr_t, i));
- LOCK_INIT(&pmr->s.lock);
+ pmr_t *pmr = get_pmr_entry_internal(_odp_pmr_from_ndx(i));
+
+ LOCK_INIT(&pmr->lock);
}
return 0;
-
-error_pmr:
- odp_shm_free(pmr_shm);
-error_cos:
- odp_shm_free(cos_shm);
-error:
- return -1;
}
-int odp_classification_term_global(void)
+int _odp_classification_term_global(void)
{
- int ret = 0;
- int rc = 0;
-
- ret = odp_shm_free(odp_shm_lookup("shm_odp_cos_tbl"));
- if (ret < 0) {
- ODP_ERR("shm free failed for shm_odp_cos_tbl");
- rc = -1;
- }
-
- ret = odp_shm_free(odp_shm_lookup("shm_odp_pmr_tbl"));
- if (ret < 0) {
- ODP_ERR("shm free failed for shm_odp_pmr_tbl");
- rc = -1;
+ if (_odp_cls_global && odp_shm_free(_odp_cls_global->shm)) {
+ _ODP_ERR("shm free failed\n");
+ return -1;
}
- return rc;
+ return 0;
}
void odp_cls_cos_param_init(odp_cls_cos_param_t *param)
{
+ memset(param, 0, sizeof(odp_cls_cos_param_t));
+
param->queue = ODP_QUEUE_INVALID;
param->pool = ODP_POOL_INVALID;
- param->drop_policy = ODP_COS_DROP_NEVER;
+ param->num_queue = 1;
+ param->vector.enable = false;
+ odp_queue_param_init(&param->queue_param);
}
void odp_cls_pmr_param_init(odp_pmr_param_t *param)
@@ -133,45 +151,132 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param)
int odp_cls_capability(odp_cls_capability_t *capability)
{
- unsigned count = 0;
-
- for (int i = 0; i < ODP_PMR_MAX_ENTRY; i++)
- if (!pmr_tbl->pmr[i].s.valid)
- count++;
-
- capability->max_pmr_terms = ODP_PMR_MAX_ENTRY;
- capability->available_pmr_terms = count;
- capability->max_cos = ODP_COS_MAX_ENTRY;
+ memset(capability, 0, sizeof(odp_cls_capability_t));
+ capability->max_pmr = CLS_PMR_MAX_ENTRY;
+ capability->max_pmr_per_cos = CLS_PMR_PER_COS_MAX;
+ capability->max_terms_per_pmr = CLS_PMRTERM_MAX;
+ capability->max_cos = CLS_COS_MAX_ENTRY;
+ capability->max_cos_stats = capability->max_cos;
capability->pmr_range_supported = false;
capability->supported_terms.all_bits = 0;
+ capability->supported_terms.bit.len = 1;
+ capability->supported_terms.bit.ethtype_0 = 1;
+ capability->supported_terms.bit.ethtype_x = 1;
+ capability->supported_terms.bit.vlan_id_0 = 1;
+ capability->supported_terms.bit.vlan_id_x = 1;
+ capability->supported_terms.bit.vlan_pcp_0 = 1;
+ capability->supported_terms.bit.dmac = 1;
capability->supported_terms.bit.ip_proto = 1;
+ capability->supported_terms.bit.ip_dscp = 1;
capability->supported_terms.bit.udp_dport = 1;
capability->supported_terms.bit.udp_sport = 1;
capability->supported_terms.bit.tcp_dport = 1;
capability->supported_terms.bit.tcp_sport = 1;
capability->supported_terms.bit.sip_addr = 1;
capability->supported_terms.bit.dip_addr = 1;
+ capability->supported_terms.bit.sip6_addr = 1;
+ capability->supported_terms.bit.dip6_addr = 1;
+ capability->supported_terms.bit.ipsec_spi = 1;
+ capability->supported_terms.bit.custom_frame = 1;
+ capability->supported_terms.bit.custom_l3 = 1;
+ capability->random_early_detection = ODP_SUPPORT_NO;
+ capability->back_pressure = ODP_SUPPORT_NO;
+ capability->threshold_red.all_bits = 0;
+ capability->threshold_bp.all_bits = 0;
+ capability->max_hash_queues = CLS_COS_QUEUE_MAX;
+ capability->hash_protocols.proto.ipv4_udp = 1;
+ capability->hash_protocols.proto.ipv4_tcp = 1;
+ capability->hash_protocols.proto.ipv4 = 1;
+ capability->hash_protocols.proto.ipv6_udp = 1;
+ capability->hash_protocols.proto.ipv6_tcp = 1;
+ capability->hash_protocols.proto.ipv6 = 1;
+ capability->max_mark = MAX_MARK;
+ capability->stats.cos.counter.discards = 1;
+ capability->stats.cos.counter.packets = 1;
+ capability->stats.queue.counter.discards = 1;
+ capability->stats.queue.counter.packets = 1;
+
return 0;
}
-odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
+void odp_cls_pmr_create_opt_init(odp_pmr_create_opt_t *opt)
{
- int i, j;
- queue_entry_t *queue;
- odp_cls_drop_t drop_policy;
+ opt->terms = NULL;
+ opt->num_terms = 0;
+ opt->mark = 0;
+}
- /* Packets are dropped if Queue or Pool is invalid*/
- if (param->queue == ODP_QUEUE_INVALID)
- queue = NULL;
- else
- queue = queue_to_qentry(param->queue);
+static void _odp_cls_update_hash_proto(cos_t *cos,
+ odp_pktin_hash_proto_t hash_proto)
+{
+ if (hash_proto.proto.ipv4 || hash_proto.proto.ipv4_tcp ||
+ hash_proto.proto.ipv4_udp)
+ cos->hash_proto.ipv4 = 1;
+ if (hash_proto.proto.ipv6 || hash_proto.proto.ipv6_tcp ||
+ hash_proto.proto.ipv6_udp)
+ cos->hash_proto.ipv6 = 1;
+ if (hash_proto.proto.ipv4_tcp || hash_proto.proto.ipv6_tcp)
+ cos->hash_proto.tcp = 1;
+ if (hash_proto.proto.ipv4_udp || hash_proto.proto.ipv6_udp)
+ cos->hash_proto.udp = 1;
+}
+
+static inline void _cls_queue_unwind(uint32_t tbl_index, uint32_t j)
+{
+ while (j > 0)
+ odp_queue_destroy(queue_grp_tbl->queue[tbl_index + --j]);
+}
+
+odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_in)
+{
+ uint32_t i, j;
+ odp_queue_t queue;
+ cos_t *cos;
+ uint32_t tbl_index;
+ odp_cls_cos_param_t param = *param_in;
+
+ if (param.action == ODP_COS_ACTION_DROP) {
+ param.num_queue = 1;
+ param.queue = ODP_QUEUE_INVALID;
+ param.pool = ODP_POOL_INVALID;
+ param.vector.enable = false;
+ } else {
+ if (param.num_queue == 1 && param.queue == ODP_QUEUE_INVALID)
+ return ODP_COS_INVALID;
+ }
+
+ /* num_queue should not be zero */
+ if (param.num_queue > CLS_COS_QUEUE_MAX || param.num_queue < 1)
+ return ODP_COS_INVALID;
- drop_policy = param->drop_policy;
+ /* Validate packet vector parameters */
+ if (param.vector.enable) {
+ odp_pool_t pool = param.vector.pool;
+ odp_pool_info_t pool_info;
+
+ if (pool == ODP_POOL_INVALID || odp_pool_info(pool, &pool_info)) {
+ _ODP_ERR("invalid packet vector pool\n");
+ return ODP_COS_INVALID;
+ }
+ if (pool_info.params.type != ODP_POOL_VECTOR) {
+ _ODP_ERR("wrong pool type\n");
+ return ODP_COS_INVALID;
+ }
+ if (param.vector.max_size == 0) {
+ _ODP_ERR("vector.max_size is zero\n");
+ return ODP_COS_INVALID;
+ }
+ if (param.vector.max_size > pool_info.params.vector.max_size) {
+ _ODP_ERR("vector.max_size larger than pool max vector size\n");
+ return ODP_COS_INVALID;
+ }
+ }
- for (i = 0; i < ODP_COS_MAX_ENTRY; i++) {
- LOCK(&cos_tbl->cos_entry[i].s.lock);
- if (0 == cos_tbl->cos_entry[i].s.valid) {
- char *cos_name = cos_tbl->cos_entry[i].s.name;
+ for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
+ cos = &cos_tbl->cos_entry[i];
+ LOCK(&cos->lock);
+ if (0 == cos->valid) {
+ char *cos_name = cos->name;
if (name == NULL) {
cos_name[0] = 0;
@@ -179,65 +284,134 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
strncpy(cos_name, name, ODP_COS_NAME_LEN - 1);
cos_name[ODP_COS_NAME_LEN - 1] = 0;
}
- for (j = 0; j < ODP_PMR_PER_COS_MAX; j++) {
- cos_tbl->cos_entry[i].s.pmr[j] = NULL;
- cos_tbl->cos_entry[i].s.linked_cos[j] = NULL;
+ for (j = 0; j < CLS_PMR_PER_COS_MAX; j++) {
+ cos->pmr[j] = NULL;
+ cos->linked_cos[j] = NULL;
}
- cos_tbl->cos_entry[i].s.queue = queue;
- cos_tbl->cos_entry[i].s.pool = param->pool;
- cos_tbl->cos_entry[i].s.headroom = 0;
- cos_tbl->cos_entry[i].s.valid = 1;
- cos_tbl->cos_entry[i].s.drop_policy = drop_policy;
- odp_atomic_init_u32(&cos_tbl->cos_entry[i]
- .s.num_rule, 0);
- UNLOCK(&cos_tbl->cos_entry[i].s.lock);
- return _odp_cast_scalar(odp_cos_t, i);
+
+ cos->num_queue = param.num_queue;
+
+ if (param.num_queue > 1) {
+ cos->queue_param = param.queue_param;
+ cos->queue_group = true;
+ cos->queue = ODP_QUEUE_INVALID;
+ _odp_cls_update_hash_proto(cos,
+ param.hash_proto);
+ tbl_index = i * CLS_COS_QUEUE_MAX;
+ for (j = 0; j < param.num_queue; j++) {
+ char hq_name[ODP_QUEUE_NAME_LEN];
+
+ snprintf(hq_name, sizeof(hq_name), "_odp_cos_hq_%u_%u",
+ i, j);
+ queue = odp_queue_create(hq_name, &cos->queue_param);
+ if (queue == ODP_QUEUE_INVALID) {
+ /* unwind the queues */
+ _cls_queue_unwind(tbl_index, j);
+ UNLOCK(&cos->lock);
+ return ODP_COS_INVALID;
+ }
+ queue_grp_tbl->queue[tbl_index + j] =
+ queue;
+ }
+
+ } else {
+ cos->queue_group = false;
+ cos->queue = param.queue;
+ }
+
+ odp_atomic_init_u64(&cos->stats.discards, 0);
+ odp_atomic_init_u64(&cos->stats.packets, 0);
+
+ /* Initialize statistics counters */
+ for (j = 0; j < cos->num_queue; j++) {
+ odp_atomic_init_u64(&cos->queue_stats[j].discards, 0);
+ odp_atomic_init_u64(&cos->queue_stats[j].packets, 0);
+ }
+
+ cos->action = param.action;
+ cos->pool = param.pool;
+ cos->headroom = 0;
+ cos->valid = 1;
+ odp_atomic_init_u32(&cos->num_rule, 0);
+ cos->index = i;
+ cos->vector = param.vector;
+ cos->stats_enable = param.stats_enable;
+ UNLOCK(&cos->lock);
+ return _odp_cos_from_ndx(i);
}
- UNLOCK(&cos_tbl->cos_entry[i].s.lock);
+ UNLOCK(&cos->lock);
}
- ODP_ERR("ODP_COS_MAX_ENTRY reached");
+ _ODP_ERR("CLS_COS_MAX_ENTRY reached\n");
return ODP_COS_INVALID;
}
+int odp_cls_cos_create_multi(const char *name[], const odp_cls_cos_param_t param[],
+ odp_cos_t cos[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(param != NULL);
+ _ODP_ASSERT(cos != NULL);
+
+ for (i = 0; i < num; i++) {
+ const char *cur_name = name != NULL ? name[i] : NULL;
+ odp_cos_t new_cos = odp_cls_cos_create(cur_name, &param[i]);
+
+ if (odp_unlikely(new_cos == ODP_COS_INVALID))
+ return (i == 0) ? -1 : i;
+
+ cos[i] = new_cos;
+ }
+ return i;
+}
+
+/*
+ * Allocate an odp_pmr_t Handle
+ */
+static
odp_pmr_t alloc_pmr(pmr_t **pmr)
{
int i;
- for (i = 0; i < ODP_PMR_MAX_ENTRY; i++) {
- LOCK(&pmr_tbl->pmr[i].s.lock);
- if (0 == pmr_tbl->pmr[i].s.valid) {
- pmr_tbl->pmr[i].s.valid = 1;
- odp_atomic_init_u32(&pmr_tbl->pmr[i].s.count, 0);
- pmr_tbl->pmr[i].s.num_pmr = 0;
+ for (i = 0; i < CLS_PMR_MAX_ENTRY; i++) {
+ LOCK(&pmr_tbl->pmr[i].lock);
+ if (0 == pmr_tbl->pmr[i].valid) {
+ pmr_tbl->pmr[i].valid = 1;
+ pmr_tbl->pmr[i].num_pmr = 0;
*pmr = &pmr_tbl->pmr[i];
/* return as locked */
- return _odp_cast_scalar(odp_pmr_t, i);
+ return _odp_pmr_from_ndx(i);
}
- UNLOCK(&pmr_tbl->pmr[i].s.lock);
+ UNLOCK(&pmr_tbl->pmr[i].lock);
}
- ODP_ERR("ODP_PMR_MAX_ENTRY reached");
- return ODP_PMR_INVAL;
+ _ODP_ERR("CLS_PMR_MAX_ENTRY reached\n");
+ return ODP_PMR_INVALID;
}
-cos_t *get_cos_entry(odp_cos_t cos_id)
+static
+cos_t *get_cos_entry(odp_cos_t cos)
{
- if (_odp_typeval(cos_id) >= ODP_COS_MAX_ENTRY ||
- cos_id == ODP_COS_INVALID)
+ uint32_t cos_id = _odp_cos_to_ndx(cos);
+
+ if (cos_id >= CLS_COS_MAX_ENTRY || cos == ODP_COS_INVALID)
return NULL;
- if (cos_tbl->cos_entry[_odp_typeval(cos_id)].s.valid == 0)
+ if (cos_tbl->cos_entry[cos_id].valid == 0)
return NULL;
- return &cos_tbl->cos_entry[_odp_typeval(cos_id)];
+ return &cos_tbl->cos_entry[cos_id];
}
-pmr_t *get_pmr_entry(odp_pmr_t pmr_id)
+static
+pmr_t *get_pmr_entry(odp_pmr_t pmr)
{
- if (_odp_typeval(pmr_id) >= ODP_PMR_MAX_ENTRY ||
- pmr_id == ODP_PMR_INVAL)
+ uint32_t pmr_id = _odp_pmr_to_ndx(pmr);
+
+ if (pmr_id >= CLS_PMR_MAX_ENTRY ||
+ pmr == ODP_PMR_INVALID)
return NULL;
- if (pmr_tbl->pmr[_odp_typeval(pmr_id)].s.valid == 0)
+ if (pmr_tbl->pmr[pmr_id].valid == 0)
return NULL;
- return &pmr_tbl->pmr[_odp_typeval(pmr_id)];
+ return &pmr_tbl->pmr[pmr_id];
}
int odp_cos_destroy(odp_cos_t cos_id)
@@ -245,28 +419,56 @@ int odp_cos_destroy(odp_cos_t cos_id)
cos_t *cos = get_cos_entry(cos_id);
if (NULL == cos) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return -1;
}
- cos->s.valid = 0;
+ if (cos->queue_group)
+ _cls_queue_unwind(cos->index * CLS_COS_QUEUE_MAX, cos->num_queue);
+
+ cos->valid = 0;
return 0;
}
+int odp_cos_destroy_multi(odp_cos_t cos[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(cos != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ int ret = odp_cos_destroy(cos[i]);
+
+ if (ret)
+ return (i == 0) ? ret : i;
+ }
+
+ return i;
+}
+
int odp_cos_queue_set(odp_cos_t cos_id, odp_queue_t queue_id)
{
cos_t *cos = get_cos_entry(cos_id);
if (cos == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return -1;
}
+
+ if (queue_id == ODP_QUEUE_INVALID) {
+ _ODP_ERR("Invalid queue\n");
+ return -1;
+ }
+
+ if (cos->num_queue != 1) {
+ _ODP_ERR("Hashing enabled, cannot set queue\n");
+ return -1;
+ }
+
/* Locking is not required as intermittent stale
data during CoS modification is acceptable*/
- if (queue_id == ODP_QUEUE_INVALID)
- cos->s.queue = NULL;
- else
- cos->s.queue = queue_to_qentry(queue_id);
+ cos->queue = queue_id;
return 0;
}
@@ -275,60 +477,79 @@ odp_queue_t odp_cos_queue(odp_cos_t cos_id)
cos_t *cos = get_cos_entry(cos_id);
if (!cos) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return ODP_QUEUE_INVALID;
}
- if (!cos->s.queue)
- return ODP_QUEUE_INVALID;
-
- return cos->s.queue->s.handle;
+ return cos->queue;
}
-int odp_cos_drop_set(odp_cos_t cos_id, odp_cls_drop_t drop_policy)
+uint32_t odp_cls_cos_num_queue(odp_cos_t cos_id)
{
cos_t *cos = get_cos_entry(cos_id);
if (!cos) {
- ODP_ERR("Invalid odp_cos_t handle");
- return -1;
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return 0;
}
- /*Drop policy is not supported in v1.0*/
- cos->s.drop_policy = drop_policy;
- return 0;
+ return cos->num_queue;
}
-odp_cls_drop_t odp_cos_drop(odp_cos_t cos_id)
+uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[],
+ uint32_t num)
{
- cos_t *cos = get_cos_entry(cos_id);
+ uint32_t num_queues;
+ cos_t *cos;
+ uint32_t tbl_index;
+ uint32_t i;
+ cos = get_cos_entry(cos_id);
if (!cos) {
- ODP_ERR("Invalid odp_cos_t handle");
- return -1;
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return 0;
+ }
+
+ if (cos->num_queue == 1) {
+ if (num == 0)
+ return 1;
+
+ queue[0] = cos->queue;
+ return 1;
}
- return cos->s.drop_policy;
+ if (num < cos->num_queue)
+ num_queues = num;
+ else
+ num_queues = cos->num_queue;
+
+ tbl_index = cos->index * CLS_COS_QUEUE_MAX;
+ for (i = 0; i < num_queues; i++)
+ queue[i] = queue_grp_tbl->queue[tbl_index + i];
+
+ return cos->num_queue;
}
int odp_pktio_default_cos_set(odp_pktio_t pktio_in, odp_cos_t default_cos)
{
pktio_entry_t *entry;
- cos_t *cos;
+ cos_t *cos = NULL;
entry = get_pktio_entry(pktio_in);
if (entry == NULL) {
- ODP_ERR("Invalid odp_pktio_t handle");
+ _ODP_ERR("Invalid odp_pktio_t handle\n");
return -1;
}
- cos = get_cos_entry(default_cos);
- if (cos == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
- return -1;
+
+ if (default_cos != ODP_COS_INVALID) {
+ cos = get_cos_entry(default_cos);
+ if (cos == NULL) {
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return -1;
+ }
}
- entry->s.cls.default_cos = cos;
- pktio_cls_enabled_set(entry, 1);
+ entry->cls.default_cos = cos;
return 0;
}
@@ -339,31 +560,27 @@ int odp_pktio_error_cos_set(odp_pktio_t pktio_in, odp_cos_t error_cos)
entry = get_pktio_entry(pktio_in);
if (entry == NULL) {
- ODP_ERR("Invalid odp_pktio_t handle");
+ _ODP_ERR("Invalid odp_pktio_t handle\n");
return -1;
}
cos = get_cos_entry(error_cos);
if (cos == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return -1;
}
- entry->s.cls.error_cos = cos;
+ entry->cls.error_cos = cos;
return 0;
}
int odp_pktio_skip_set(odp_pktio_t pktio_in, uint32_t offset)
{
- pktio_entry_t *entry = get_pktio_entry(pktio_in);
-
- if (entry == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
- return -1;
- }
+ (void)pktio_in;
+ (void)offset;
- entry->s.cls.skip = offset;
- return 0;
+ /* Skipping bytes before parsing is not supported */
+ return -ENOTSUP;
}
int odp_pktio_headroom_set(odp_pktio_t pktio_in, uint32_t headroom)
@@ -371,122 +588,103 @@ int odp_pktio_headroom_set(odp_pktio_t pktio_in, uint32_t headroom)
pktio_entry_t *entry = get_pktio_entry(pktio_in);
if (entry == NULL) {
- ODP_ERR("Invalid odp_pktio_t handle");
+ _ODP_ERR("Invalid odp_pktio_t handle\n");
return -1;
}
- entry->s.cls.headroom = headroom;
+ entry->cls.headroom = headroom;
return 0;
}
-int odp_cos_with_l2_priority(odp_pktio_t pktio_in,
- uint8_t num_qos,
- uint8_t qos_table[],
- odp_cos_t cos_table[])
+static int pmr_create_term(pmr_term_value_t *value,
+ const odp_pmr_param_t *param)
{
- pmr_l2_cos_t *l2_cos;
- uint32_t i;
- cos_t *cos;
- pktio_entry_t *entry = get_pktio_entry(pktio_in);
+ uint32_t size;
+ uint8_t i;
+ int custom = 0;
+ odp_cls_pmr_term_t term = param->term;
- if (entry == NULL) {
- ODP_ERR("Invalid odp_pktio_t handle");
+ if (param->range_term) {
+ _ODP_ERR("PMR value range not supported\n");
return -1;
}
- l2_cos = &entry->s.cls.l2_cos_table;
- LOCK(&l2_cos->lock);
- /* Update the L2 QoS table*/
- for (i = 0; i < num_qos; i++) {
- cos = get_cos_entry(cos_table[i]);
- if (cos != NULL) {
- if (ODP_COS_MAX_L2_QOS > qos_table[i])
- l2_cos->cos[qos_table[i]] = cos;
- }
- }
- pktio_cls_enabled_set(entry, 1);
- UNLOCK(&l2_cos->lock);
- return 0;
-}
+ value->term = term;
+ value->range_term = param->range_term;
-int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
- uint32_t num_qos,
- uint8_t qos_table[],
- odp_cos_t cos_table[],
- odp_bool_t l3_preference)
-{
- pmr_l3_cos_t *l3_cos;
- uint32_t i;
- pktio_entry_t *entry = get_pktio_entry(pktio_in);
- cos_t *cos;
+ switch (term) {
+ case ODP_PMR_VLAN_PCP_0:
+ /* Fall through */
+ case ODP_PMR_IPPROTO:
+ /* Fall through */
+ case ODP_PMR_IP_DSCP:
+ size = 1;
+ break;
+
+ case ODP_PMR_ETHTYPE_0:
+ /* Fall through */
+ case ODP_PMR_ETHTYPE_X:
+ /* Fall through */
+ case ODP_PMR_VLAN_ID_0:
+ /* Fall through */
+ case ODP_PMR_VLAN_ID_X:
+ /* Fall through */
+ case ODP_PMR_UDP_DPORT:
+ /* Fall through */
+ case ODP_PMR_TCP_DPORT:
+ /* Fall through */
+ case ODP_PMR_UDP_SPORT:
+ /* Fall through */
+ case ODP_PMR_TCP_SPORT:
+ size = 2;
+ break;
+
+ case ODP_PMR_LEN:
+ /* Fall through */
+ case ODP_PMR_SIP_ADDR:
+ /* Fall through */
+ case ODP_PMR_DIP_ADDR:
+ /* Fall through */
+ case ODP_PMR_IPSEC_SPI:
+ /* Fall through */
+ case ODP_PMR_LD_VNI:
+ size = 4;
+ break;
+
+ case ODP_PMR_DMAC:
+ size = 6;
+ break;
- if (entry == NULL) {
- ODP_ERR("Invalid odp_pktio_t handle");
+ case ODP_PMR_SIP6_ADDR:
+ /* Fall through */
+ case ODP_PMR_DIP6_ADDR:
+ size = 16;
+ break;
+
+ case ODP_PMR_CUSTOM_FRAME:
+ /* Fall through */
+ case ODP_PMR_CUSTOM_L3:
+ custom = 1;
+ size = MAX_PMR_TERM_SIZE;
+ break;
+
+ default:
+ _ODP_ERR("Bad PMR term\n");
return -1;
}
- entry->s.cls.l3_precedence = l3_preference;
- l3_cos = &entry->s.cls.l3_cos_table;
-
- LOCK(&l3_cos->lock);
- /* Update the L3 QoS table*/
- for (i = 0; i < num_qos; i++) {
- cos = get_cos_entry(cos_table[i]);
- if (cos != NULL) {
- if (ODP_COS_MAX_L3_QOS > qos_table[i])
- l3_cos->cos[qos_table[i]] = cos;
- }
+ if ((!custom && param->val_sz != size) ||
+ (custom && param->val_sz > size)) {
+ _ODP_ERR("Bad PMR value size: %u\n", param->val_sz);
+ return -1;
}
- pktio_cls_enabled_set(entry, 1);
- UNLOCK(&l3_cos->lock);
- return 0;
-}
-static int odp_pmr_create_term(pmr_term_value_t *value,
- const odp_pmr_param_t *param)
-{
- value->term = param->term;
- value->range_term = param->range_term;
- uint8_t i;
+ memset(&value->match, 0, sizeof(value->match));
+ memcpy(&value->match.value, param->match.value, param->val_sz);
+ memcpy(&value->match.mask, param->match.mask, param->val_sz);
- switch (value->term) {
- case ODP_PMR_SIP6_ADDR:
- case ODP_PMR_DIP6_ADDR:
- if (!value->range_term) {
- memset(value->match_ipv6.addr.u8, 0, 16);
- memset(value->match_ipv6.mask.u8, 0, 16);
- memcpy(&value->match_ipv6.addr.u8, param->match.value,
- param->val_sz);
- memcpy(&value->match_ipv6.mask.u8, param->match.mask,
- param->val_sz);
- for (i = 0; i < 2; i++)
- value->match_ipv6.addr.u64[i] &=
- value->match_ipv6.mask.u64[i];
- } else {
- memset(value->range_ipv6.addr_start.u8, 0, 16);
- memset(value->range_ipv6.addr_end.u8, 0, 16);
- memcpy(&value->range_ipv6.addr_start.u8, param->range.val_start,
- param->val_sz);
- memcpy(&value->range_ipv6.addr_end.u8, param->range.val_end,
- param->val_sz);
- }
+ for (i = 0; i < param->val_sz; i++)
+ value->match.value_u8[i] &= value->match.mask_u8[i];
- break;
- default:
- if (!value->range_term) {
- value->match.value = 0;
- value->match.mask = 0;
- memcpy(&value->match.value, param->match.value, param->val_sz);
- memcpy(&value->match.mask, param->match.mask, param->val_sz);
- value->match.value &= value->match.mask;
- } else {
- value->range.val_start = 0;
- value->range.val_end = 0;
- memcpy(&value->range.val_start, param->range.val_start,
- param->val_sz);
- memcpy(&value->range.val_end, param->range.val_end,
- param->val_sz);
- }
- }
value->offset = param->offset;
value->val_sz = param->val_sz;
return 0;
@@ -500,91 +698,147 @@ int odp_cls_pmr_destroy(odp_pmr_t pmr_id)
uint8_t i;
pmr = get_pmr_entry(pmr_id);
- if (pmr == NULL || pmr->s.src_cos == NULL)
+ if (pmr == NULL || pmr->src_cos == NULL)
return -1;
- src_cos = pmr->s.src_cos;
- LOCK(&src_cos->s.lock);
- loc = odp_atomic_load_u32(&src_cos->s.num_rule);
+ src_cos = pmr->src_cos;
+ LOCK(&src_cos->lock);
+ loc = odp_atomic_load_u32(&src_cos->num_rule);
if (loc == 0)
goto no_rule;
loc -= 1;
for (i = 0; i <= loc; i++)
- if (src_cos->s.pmr[i] == pmr) {
- src_cos->s.pmr[i] = src_cos->s.pmr[loc];
- src_cos->s.linked_cos[i] = src_cos->s.linked_cos[loc];
+ if (src_cos->pmr[i] == pmr) {
+ src_cos->pmr[i] = src_cos->pmr[loc];
+ src_cos->linked_cos[i] = src_cos->linked_cos[loc];
}
- odp_atomic_dec_u32(&src_cos->s.num_rule);
+ odp_atomic_dec_u32(&src_cos->num_rule);
no_rule:
- pmr->s.valid = 0;
- UNLOCK(&src_cos->s.lock);
+ pmr->valid = 0;
+ UNLOCK(&src_cos->lock);
return 0;
}
-odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms,
- odp_cos_t src_cos, odp_cos_t dst_cos)
+int odp_cls_pmr_destroy_multi(odp_pmr_t pmr[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(pmr != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ int ret = odp_cls_pmr_destroy(pmr[i]);
+
+ if (ret)
+ return (i == 0) ? ret : i;
+ }
+
+ return i;
+}
+
+static odp_pmr_t cls_pmr_create(const odp_pmr_param_t *terms, int num_terms, uint16_t mark,
+ odp_cos_t src_cos, odp_cos_t dst_cos)
{
pmr_t *pmr;
int i;
odp_pmr_t id;
- int val_sz;
uint32_t loc;
cos_t *cos_src = get_cos_entry(src_cos);
cos_t *cos_dst = get_cos_entry(dst_cos);
if (NULL == cos_src || NULL == cos_dst) {
- ODP_ERR("Invalid input handle");
- return ODP_PMR_INVAL;
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return ODP_PMR_INVALID;
}
- if (num_terms > ODP_PMRTERM_MAX) {
- ODP_ERR("no of terms greater than supported ODP_PMRTERM_MAX");
- return ODP_PMR_INVAL;
+ if (num_terms > CLS_PMRTERM_MAX) {
+ _ODP_ERR("no of terms greater than supported CLS_PMRTERM_MAX\n");
+ return ODP_PMR_INVALID;
}
- if (ODP_PMR_PER_COS_MAX == odp_atomic_load_u32(&cos_src->s.num_rule))
- return ODP_PMR_INVAL;
+ if (CLS_PMR_PER_COS_MAX == odp_atomic_load_u32(&cos_src->num_rule))
+ return ODP_PMR_INVALID;
id = alloc_pmr(&pmr);
/*if alloc_pmr is successful it returns with the acquired lock*/
- if (id == ODP_PMR_INVAL)
+ if (id == ODP_PMR_INVALID)
return id;
- pmr->s.num_pmr = num_terms;
+ pmr->num_pmr = num_terms;
for (i = 0; i < num_terms; i++) {
- val_sz = terms[i].val_sz;
- if (val_sz > ODP_PMR_TERM_BYTES_MAX) {
- pmr->s.valid = 0;
- return ODP_PMR_INVAL;
- }
- if (0 > odp_pmr_create_term(&pmr->s.pmr_term_value[i],
- &terms[i])) {
- UNLOCK(&pmr->s.lock);
- return ODP_PMR_INVAL;
+ if (pmr_create_term(&pmr->pmr_term_value[i], &terms[i])) {
+ pmr->valid = 0;
+ UNLOCK(&pmr->lock);
+ return ODP_PMR_INVALID;
}
}
- loc = odp_atomic_fetch_inc_u32(&cos_src->s.num_rule);
- cos_src->s.pmr[loc] = pmr;
- cos_src->s.linked_cos[loc] = cos_dst;
- pmr->s.src_cos = cos_src;
+ pmr->mark = mark;
- UNLOCK(&pmr->s.lock);
+ loc = odp_atomic_fetch_inc_u32(&cos_src->num_rule);
+ cos_src->pmr[loc] = pmr;
+ cos_src->linked_cos[loc] = cos_dst;
+ pmr->src_cos = cos_src;
+
+ UNLOCK(&pmr->lock);
return id;
}
+odp_pmr_t odp_cls_pmr_create(const odp_pmr_param_t *terms, int num_terms,
+ odp_cos_t src_cos, odp_cos_t dst_cos)
+{
+ return cls_pmr_create(terms, num_terms, 0, src_cos, dst_cos);
+}
+
+odp_pmr_t odp_cls_pmr_create_opt(const odp_pmr_create_opt_t *opt,
+ odp_cos_t src_cos, odp_cos_t dst_cos)
+{
+ if (opt == NULL) {
+ _ODP_ERR("Bad parameter\n");
+ return ODP_PMR_INVALID;
+ }
+
+ if (opt->mark > MAX_MARK) {
+ _ODP_ERR("Too large mark value: %" PRIu64 "\n", opt->mark);
+ return ODP_PMR_INVALID;
+ }
+
+ return cls_pmr_create(opt->terms, opt->num_terms, opt->mark, src_cos, dst_cos);
+}
+
+int odp_cls_pmr_create_multi(const odp_pmr_create_opt_t opt[], odp_cos_t src_cos[],
+ odp_cos_t dst_cos[], odp_pmr_t pmr[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(opt != NULL);
+ _ODP_ASSERT(src_cos != NULL);
+ _ODP_ASSERT(dst_cos != NULL);
+ _ODP_ASSERT(pmr != NULL);
+
+ for (i = 0; i < num; i++) {
+ odp_pmr_t new_pmr = odp_cls_pmr_create_opt(&opt[i], src_cos[i], dst_cos[i]);
+
+ if (odp_unlikely(new_pmr == ODP_PMR_INVALID))
+ return (i == 0) ? -1 : i;
+
+ pmr[i] = new_pmr;
+ }
+ return i;
+}
+
int odp_cls_cos_pool_set(odp_cos_t cos_id, odp_pool_t pool)
{
cos_t *cos;
cos = get_cos_entry(cos_id);
if (cos == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return -1;
}
- cos->s.pool = pool;
+ cos->pool = pool;
return 0;
}
@@ -595,30 +849,470 @@ odp_pool_t odp_cls_cos_pool(odp_cos_t cos_id)
cos = get_cos_entry(cos_id);
if (cos == NULL) {
- ODP_ERR("Invalid odp_cos_t handle");
+ _ODP_ERR("Invalid odp_cos_t handle\n");
return ODP_POOL_INVALID;
}
- return cos->s.pool;
+ return cos->pool;
+}
+
+static inline int verify_pmr_packet_len(odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ if (term_value->match.value == (packet_len(pkt_hdr) &
+ term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv4_proto(const _odp_ipv4hdr_t *ipv4, pmr_term_value_t *term_value)
+{
+ uint8_t proto;
+
+ proto = ipv4->proto;
+ if (term_value->match.value == (proto & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv6_next_hdr(const _odp_ipv6hdr_t *ipv6, pmr_term_value_t *term_value)
+{
+ uint8_t next_hdr;
+
+ next_hdr = ipv6->next_hdr;
+ if (term_value->match.value == (next_hdr & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv4_dscp(const _odp_ipv4hdr_t *ipv4, pmr_term_value_t *term_value)
+{
+ uint8_t dscp;
+
+ dscp = _ODP_IPV4HDR_DSCP(ipv4->tos);
+ if (term_value->match.value == (dscp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv6_dscp(const _odp_ipv6hdr_t *ipv6, pmr_term_value_t *term_value)
+{
+ uint8_t dscp;
+
+ dscp = _ODP_IPV6HDR_DSCP(odp_be_to_cpu_32(ipv6->ver_tc_flow));
+ if (term_value->match.value == (dscp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv4_saddr(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ipv4hdr_t *ip;
+ uint32_t ipaddr;
+
+ if (!pkt_hdr->p.input_flags.ipv4)
+ return 0;
+ ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ ipaddr = ip->src_addr;
+ if (term_value->match.value == (ipaddr & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv4_daddr(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ipv4hdr_t *ip;
+ uint32_t ipaddr;
+
+ if (!pkt_hdr->p.input_flags.ipv4)
+ return 0;
+ ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ ipaddr = ip->dst_addr;
+ if (term_value->match.value == (ipaddr & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_tcp_sport(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint16_t sport;
+ const _odp_tcphdr_t *tcp;
+
+ if (!pkt_hdr->p.input_flags.tcp)
+ return 0;
+ tcp = (const _odp_tcphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
+ sport = tcp->src_port;
+ if (term_value->match.value == (sport & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_tcp_dport(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint16_t dport;
+ const _odp_tcphdr_t *tcp;
+
+ if (!pkt_hdr->p.input_flags.tcp)
+ return 0;
+ tcp = (const _odp_tcphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
+ dport = tcp->dst_port;
+ if (term_value->match.value == (dport & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_udp_dport(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint16_t dport;
+ const _odp_udphdr_t *udp;
+
+ if (!pkt_hdr->p.input_flags.udp)
+ return 0;
+ udp = (const _odp_udphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
+ dport = udp->dst_port;
+ if (term_value->match.value == (dport & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_udp_sport(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint16_t sport;
+ const _odp_udphdr_t *udp;
+
+ if (!pkt_hdr->p.input_flags.udp)
+ return 0;
+ udp = (const _odp_udphdr_t *)(pkt_addr + pkt_hdr->p.l4_offset);
+ sport = udp->src_port;
+ if (term_value->match.value == (sport & term_value->match.mask))
+ return 1;
+
+ return 0;
}
-int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr)
+static inline int verify_pmr_dmac(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ uint16_t dmac[3];
+ uint16_t *mask = (uint16_t *)&term_value->match.mask;
+ uint16_t *value = (uint16_t *)&term_value->match.value;
+
+ if (!packet_hdr_has_eth(pkt_hdr))
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ memcpy(dmac, eth->dst.addr, _ODP_ETHADDR_LEN);
+ dmac[0] &= mask[0];
+ dmac[1] &= mask[1];
+ dmac[2] &= mask[2];
+
+ if (value[0] == dmac[0] && value[1] == dmac[1] && value[2] == dmac[2])
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv6_saddr(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ipv6hdr_t *ipv6;
+ uint64_t addr[2];
+
+ if (!packet_hdr_has_ipv6(pkt_hdr))
+ return 0;
+
+ ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ memcpy(addr, ipv6->src_addr.u64, _ODP_IPV6ADDR_LEN);
+
+ addr[0] = addr[0] & term_value->match.mask_u64[0];
+ addr[1] = addr[1] & term_value->match.mask_u64[1];
+
+ if (addr[0] == term_value->match.value_u64[0] &&
+ addr[1] == term_value->match.value_u64[1])
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv6_daddr(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ipv6hdr_t *ipv6;
+ uint64_t addr[2];
+
+ if (!packet_hdr_has_ipv6(pkt_hdr))
+ return 0;
+
+ ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ memcpy(addr, ipv6->dst_addr.u64, _ODP_IPV6ADDR_LEN);
+
+ addr[0] = addr[0] & term_value->match.mask_u64[0];
+ addr[1] = addr[1] & term_value->match.mask_u64[1];
+
+ if (addr[0] == term_value->match.value_u64[0] &&
+ addr[1] == term_value->match.value_u64[1])
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_vlan_id_0(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ const _odp_vlanhdr_t *vlan;
+ uint16_t tci;
+ uint16_t vlan_id;
+
+ if (!packet_hdr_has_eth(pkt_hdr) || !pkt_hdr->p.input_flags.vlan)
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ vlan = (const _odp_vlanhdr_t *)(eth + 1);
+ tci = vlan->tci;
+ vlan_id = tci & odp_cpu_to_be_16(0x0fff);
+
+ if (term_value->match.value == (vlan_id & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_vlan_id_x(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ const _odp_vlanhdr_t *vlan;
+ uint16_t tci;
+ uint16_t vlan_id;
+
+ if (!pkt_hdr->p.input_flags.vlan && !pkt_hdr->p.input_flags.vlan_qinq)
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ vlan = (const _odp_vlanhdr_t *)(eth + 1);
+
+ if (pkt_hdr->p.input_flags.vlan_qinq)
+ vlan++;
+
+ tci = vlan->tci;
+ vlan_id = tci & odp_cpu_to_be_16(0x0fff);
+
+ if (term_value->match.value == (vlan_id & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_vlan_pcp_0(const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ const _odp_vlanhdr_t *vlan;
+ uint16_t tci;
+ uint8_t pcp;
+
+ if (!packet_hdr_has_eth(pkt_hdr) || !pkt_hdr->p.input_flags.vlan)
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ vlan = (const _odp_vlanhdr_t *)(eth + 1);
+ tci = odp_be_to_cpu_16(vlan->tci);
+ pcp = tci >> _ODP_VLANHDR_PCP_SHIFT;
+
+ if (term_value->match.value == (pcp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipsec_spi(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint32_t spi;
+
+ pkt_addr += pkt_hdr->p.l4_offset;
+
+ if (pkt_hdr->p.input_flags.ipsec_ah)
+ spi = ((const _odp_ahhdr_t *)pkt_addr)->spi;
+ else if (pkt_hdr->p.input_flags.ipsec_esp)
+ spi = ((const _odp_esphdr_t *)pkt_addr)->spi;
+ else
+ return 0;
+
+ if (term_value->match.value == (spi & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ld_vni(const uint8_t *pkt_addr ODP_UNUSED,
+ odp_packet_hdr_t *pkt_hdr ODP_UNUSED,
+ pmr_term_value_t *term_value ODP_UNUSED)
+{
+ ODP_UNIMPLEMENTED();
+ return 0;
+}
+
+static inline int verify_pmr_custom_frame(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint32_t i;
+ uint8_t val;
+ uint32_t offset = term_value->offset;
+ uint32_t val_sz = term_value->val_sz;
+
+ _ODP_ASSERT(val_sz <= MAX_PMR_TERM_SIZE);
+
+ if (packet_len(pkt_hdr) <= offset + val_sz)
+ return 0;
+
+ pkt_addr += offset;
+
+ for (i = 0; i < val_sz; i++) {
+ val = pkt_addr[i] & term_value->match.mask_u8[i];
+
+ if (val != term_value->match.value_u8[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int verify_pmr_custom_l3(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ uint32_t i;
+ uint8_t val;
+ uint32_t l3_offset = pkt_hdr->p.l3_offset;
+ uint32_t offset = l3_offset + term_value->offset;
+ uint32_t val_sz = term_value->val_sz;
+
+ _ODP_ASSERT(val_sz <= MAX_PMR_TERM_SIZE);
+
+ if (pkt_hdr->p.input_flags.l2 == 0 ||
+ l3_offset == ODP_PACKET_OFFSET_INVALID)
+ return 0;
+
+ if (packet_len(pkt_hdr) <= offset + val_sz)
+ return 0;
+
+ pkt_addr += offset;
+
+ for (i = 0; i < val_sz; i++) {
+ val = pkt_addr[i] & term_value->match.mask_u8[i];
+
+ if (val != term_value->match.value_u8[i])
+ return 0;
+ }
+
+ return 1;
+}
+
+static inline int verify_pmr_eth_type_0(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ uint16_t ethtype;
+
+ if (!packet_hdr_has_eth(pkt_hdr))
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ ethtype = eth->type;
+
+ if (term_value->match.value == (ethtype & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_eth_type_x(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ uint16_t ethtype;
+ const _odp_vlanhdr_t *vlan;
+
+ if (!pkt_hdr->p.input_flags.vlan && !pkt_hdr->p.input_flags.vlan_qinq)
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ vlan = (const _odp_vlanhdr_t *)(eth + 1);
+
+ if (pkt_hdr->p.input_flags.vlan_qinq)
+ vlan++;
+
+ ethtype = vlan->type;
+
+ if (term_value->match.value == (ethtype & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * This function goes through each PMR_TERM value in pmr_t structure and calls
+ * verification function for each term.Returns 1 if PMR matches or 0 otherwise.
+ */
+static int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr)
{
int pmr_failure = 0;
int num_pmr;
int i;
pmr_term_value_t *term_value;
+ const _odp_ipv4hdr_t *ipv4 = NULL;
+ const _odp_ipv6hdr_t *ipv6 = NULL;
/* Locking is not required as PMR rules for in-flight packets
delivery during a PMR change is indeterminate*/
- if (!pmr->s.valid)
+ if (!pmr->valid)
return 0;
- num_pmr = pmr->s.num_pmr;
+ num_pmr = pmr->num_pmr;
+
+ if (pkt_hdr->p.input_flags.ipv4)
+ ipv4 = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ if (pkt_hdr->p.input_flags.ipv6)
+ ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
/* Iterate through list of PMR Term values in a pmr_t */
for (i = 0; i < num_pmr; i++) {
- term_value = &pmr->s.pmr_term_value[i];
+ term_value = &pmr->pmr_term_value[i];
switch (term_value->term) {
case ODP_PMR_LEN:
if (!verify_pmr_packet_len(pkt_hdr, term_value))
@@ -644,15 +1338,36 @@ int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr)
term_value))
pmr_failure = 1;
break;
+ case ODP_PMR_VLAN_PCP_0:
+ if (!verify_pmr_vlan_pcp_0(pkt_addr, pkt_hdr, term_value))
+ pmr_failure = 1;
+ break;
case ODP_PMR_DMAC:
if (!verify_pmr_dmac(pkt_addr, pkt_hdr,
term_value))
pmr_failure = 1;
break;
case ODP_PMR_IPPROTO:
- if (!verify_pmr_ip_proto(pkt_addr, pkt_hdr,
- term_value))
+ if (ipv4) {
+ if (!verify_pmr_ipv4_proto(ipv4, term_value))
+ pmr_failure = 1;
+ } else if (ipv6) {
+ if (!verify_pmr_ipv6_next_hdr(ipv6, term_value))
+ pmr_failure = 1;
+ } else {
pmr_failure = 1;
+ }
+ break;
+ case ODP_PMR_IP_DSCP:
+ if (ipv4) {
+ if (!verify_pmr_ipv4_dscp(ipv4, term_value))
+ pmr_failure = 1;
+ } else if (ipv6) {
+ if (!verify_pmr_ipv6_dscp(ipv6, term_value))
+ pmr_failure = 1;
+ } else {
+ pmr_failure = 1;
+ }
break;
case ODP_PMR_UDP_DPORT:
if (!verify_pmr_udp_dport(pkt_addr, pkt_hdr,
@@ -709,55 +1424,184 @@ int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr)
term_value))
pmr_failure = 1;
break;
+ case ODP_PMR_CUSTOM_L3:
+ if (!verify_pmr_custom_l3(pkt_addr, pkt_hdr,
+ term_value))
+ pmr_failure = 1;
+ break;
case ODP_PMR_INNER_HDR_OFF:
break;
+ default:
+ pmr_failure = 1;
+ break;
}
if (pmr_failure)
- return false;
+ return 0;
+ }
+ return 1;
+}
+
+static const char *format_pmr_name(odp_cls_pmr_term_t pmr_term)
+{
+ const char *name;
+
+ switch (pmr_term) {
+ case ODP_PMR_LEN:
+ name = "PMR_LEN";
+ break;
+ case ODP_PMR_ETHTYPE_0:
+ name = "PMR_ETHTYPE_0";
+ break;
+ case ODP_PMR_ETHTYPE_X:
+ name = "PMR_ETHTYPE_X";
+ break;
+ case ODP_PMR_VLAN_ID_0:
+ name = "PMR_VLAN_ID_0";
+ break;
+ case ODP_PMR_VLAN_ID_X:
+ name = "PMR_VLAN_ID_X";
+ break;
+ case ODP_PMR_VLAN_PCP_0:
+ name = "PMR_VLAN_PCP_0";
+ break;
+ case ODP_PMR_DMAC:
+ name = "PMR_DMAC";
+ break;
+ case ODP_PMR_IPPROTO:
+ name = "PMR_IPPROTO";
+ break;
+ case ODP_PMR_IP_DSCP:
+ name = "PMR_IP_DSCP";
+ break;
+ case ODP_PMR_UDP_DPORT:
+ name = "PMR_UDP_DPORT";
+ break;
+ case ODP_PMR_TCP_DPORT:
+ name = "PMR_TCP_DPORT";
+ break;
+ case ODP_PMR_UDP_SPORT:
+ name = "PMR_UDP_SPORT";
+ break;
+ case ODP_PMR_TCP_SPORT:
+ name = "PMR_TCP_SPORT";
+ break;
+ case ODP_PMR_SIP_ADDR:
+ name = "PMR_SIP_ADDR";
+ break;
+ case ODP_PMR_DIP_ADDR:
+ name = "PMR_DIP_ADDR";
+ break;
+ case ODP_PMR_SIP6_ADDR:
+ name = "PMR_SIP6_ADDR";
+ break;
+ case ODP_PMR_DIP6_ADDR:
+ name = "PMR_DIP6_ADDR";
+ break;
+ case ODP_PMR_IPSEC_SPI:
+ name = "PMR_IPSEC_SPI";
+ break;
+ case ODP_PMR_LD_VNI:
+ name = "PMR_LD_VNI";
+ break;
+ case ODP_PMR_CUSTOM_FRAME:
+ name = "PMR_CUSTOM_FRAME";
+ break;
+ case ODP_PMR_CUSTOM_L3:
+ name = "PMR_CUSTOM_L3";
+ break;
+ default:
+ name = "unknown";
+ break;
}
- odp_atomic_inc_u32(&pmr->s.count);
- return true;
+
+ return name;
}
-cos_t *match_pmr_cos(cos_t *cos, const uint8_t *pkt_addr, pmr_t *pmr,
- odp_packet_hdr_t *hdr)
+static inline void pmr_debug_print(pmr_t *pmr, cos_t *cos)
{
- cos_t *retcos;
uint32_t i;
+ const char *pmr_name;
+ const char *cos_name = cos->name;
+ uint32_t cos_index = cos->index;
+ uint32_t num_pmr = pmr->num_pmr;
+
+ if (ODP_DEBUG_PRINT == 0)
+ return;
+
+ if (num_pmr == 1) {
+ pmr_name = format_pmr_name(pmr->pmr_term_value[0].term);
+ ODP_DBG_RAW(CLS_DBG, " PMR matched: %s -> cos: %s(%u)\n", pmr_name, cos_name,
+ cos_index);
+ return;
+ }
- retcos = NULL;
+ ODP_DBG_RAW(CLS_DBG, " PMRs matched:");
+ for (i = 0; i < num_pmr; i++) {
+ pmr_name = format_pmr_name(pmr->pmr_term_value[i].term);
+ ODP_DBG_RAW(CLS_DBG, " %s", pmr_name);
+ }
- if (cos == NULL || pmr == NULL)
- return NULL;
+ ODP_DBG_RAW(CLS_DBG, " -> cos: %s(%u)\n", cos_name, cos_index);
+}
- if (!cos->s.valid)
- return NULL;
+/*
+ * Match a PMR chain with a Packet and return matching CoS
+ * This function performs a depth-first search in the CoS tree.
+ */
+static cos_t *match_pmr_cos(cos_t *cos, const uint8_t *pkt_addr, odp_packet_hdr_t *hdr)
+{
+ pmr_t *pmr_match = NULL;
- if (verify_pmr(pmr, pkt_addr, hdr)) {
- /** This gets called recursively to check all the PMRs in
- * a PMR chain */
- if (0 == odp_atomic_load_u32(&cos->s.num_rule))
- return cos;
+ while (1) {
+ uint32_t i, num_rule = odp_atomic_load_u32(&cos->num_rule);
+
+ for (i = 0; i < num_rule; i++) {
+ pmr_t *pmr = cos->pmr[i];
+ struct cos_s *linked_cos = cos->linked_cos[i];
+
+ if (odp_unlikely(!linked_cos->valid))
+ continue;
+
+ if (verify_pmr(pmr, pkt_addr, hdr)) {
+ /* PMR matched */
- for (i = 0; i < odp_atomic_load_u32(&cos->s.num_rule); i++) {
- retcos = match_pmr_cos(cos->s.linked_cos[i], pkt_addr,
- cos->s.pmr[i], hdr);
- if (!retcos)
- return cos;
+ pmr_match = pmr;
+ cos = linked_cos;
+
+ pmr_debug_print(pmr, cos);
+
+ if (cos->stats_enable)
+ odp_atomic_inc_u64(&cos->stats.packets);
+
+ break;
+ }
+ }
+
+ /* If no PMR matched, the current CoS is the best match. */
+ if (i == num_rule)
+ break;
+ }
+
+ if (pmr_match) {
+ hdr->p.input_flags.cls_mark = 0;
+ if (pmr_match->mark) {
+ hdr->p.input_flags.cls_mark = 1;
+ hdr->cls_mark = pmr_match->mark;
}
}
- return retcos;
+
+ return cos;
}
-int pktio_classifier_init(pktio_entry_t *entry)
+int _odp_pktio_classifier_init(pktio_entry_t *entry)
{
classifier_t *cls;
/* classifier lock should be acquired by the calling function */
if (entry == NULL)
return -1;
- cls = &entry->s.cls;
+ cls = &entry->cls;
cls->error_cos = NULL;
cls->default_cos = NULL;
cls->headroom = 0;
@@ -771,8 +1615,6 @@ Select a CoS for the given Packet based on pktio
This function will call all the PMRs associated with a pktio for
a given packet and will return the matched COS object.
-This function will check PMR, L2 and L3 QoS COS object associated
-with the PKTIO interface.
Returns the default cos if the packet does not match any PMR
Returns the error_cos if the packet has an error
@@ -781,38 +1623,40 @@ static inline cos_t *cls_select_cos(pktio_entry_t *entry,
const uint8_t *pkt_addr,
odp_packet_hdr_t *pkt_hdr)
{
- pmr_t *pmr;
cos_t *cos;
cos_t *default_cos;
- uint32_t i;
classifier_t *cls;
- cls = &entry->s.cls;
+ cls = &entry->cls;
default_cos = cls->default_cos;
- /* Check for lazy parse needed */
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
/* Return error cos for error packet */
- if (pkt_hdr->p.error_flags.all)
- return cls->error_cos;
+ if (pkt_hdr->p.flags.all.error) {
+ cos = cls->error_cos;
+ goto done;
+ }
+
/* Calls all the PMRs attached at the PKTIO level*/
- for (i = 0; i < odp_atomic_load_u32(&default_cos->s.num_rule); i++) {
- pmr = default_cos->s.pmr[i];
- cos = default_cos->s.linked_cos[i];
- cos = match_pmr_cos(cos, pkt_addr, pmr, pkt_hdr);
- if (cos)
+ if (default_cos && default_cos->valid) {
+ cos = match_pmr_cos(default_cos, pkt_addr, pkt_hdr);
+ if (cos && cos != default_cos)
return cos;
}
- cos = match_qos_cos(entry, pkt_addr, pkt_hdr);
- if (cos)
- return cos;
+ ODP_DBG_RAW(CLS_DBG, " No match -> default cos\n");
+ cos = cls->default_cos;
+
+done:
+ if (cos && cos->stats_enable)
+ odp_atomic_inc_u64(&cos->stats.packets);
- return cls->default_cos;
+ return cos;
}
+static uint32_t packet_rss_hash(odp_packet_hdr_t *pkt_hdr,
+ odp_cls_hash_proto_t hash_proto,
+ const uint8_t *base);
+
/**
* Classify packet
*
@@ -823,112 +1667,276 @@ static inline cos_t *cls_select_cos(pktio_entry_t *entry,
* @param pool[out] Packet pool
* @param pkt_hdr[out] Packet header
*
- * @retval 0 on success
- * @retval -EFAULT Bug
- * @retval -EINVAL Config error
+ * @retval 0 success
+ * @retval -1 drop packet and increment in_discards
+ * @retval 1 drop packet
*
* @note *base is not released
*/
-int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
- uint16_t pkt_len, uint32_t seg_len, odp_pool_t *pool,
- odp_packet_hdr_t *pkt_hdr)
+int _odp_cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
+ odp_pool_t *pool, odp_packet_hdr_t *pkt_hdr)
{
cos_t *cos;
+ uint32_t tbl_index;
+ uint32_t hash;
- packet_parse_reset(pkt_hdr);
- packet_set_len(pkt_hdr, pkt_len);
+ ODP_DBG_LVL(CLS_DBG, "Classify packet from %s\n", entry->full_name);
- packet_parse_common(&pkt_hdr->p, base, pkt_len, seg_len, LAYER_ALL);
cos = cls_select_cos(entry, base, pkt_hdr);
if (cos == NULL)
- return -EINVAL;
+ return -1;
+
+ if (cos->action == ODP_COS_ACTION_DROP)
+ return 1;
- if (cos->s.queue == NULL || cos->s.pool == ODP_POOL_INVALID)
- return -EFAULT;
+ *pool = cos->pool;
+ if (*pool == ODP_POOL_INVALID)
+ *pool = entry->pool;
- *pool = cos->s.pool;
pkt_hdr->p.input_flags.dst_queue = 1;
- pkt_hdr->dst_queue = cos->s.queue->s.handle;
+ pkt_hdr->cos = cos->index;
+ if (!cos->queue_group) {
+ pkt_hdr->dst_queue = cos->queue;
+ return 0;
+ }
+
+ hash = packet_rss_hash(pkt_hdr, cos->hash_proto, base);
+ /* CLS_COS_QUEUE_MAX is a power of 2 */
+ hash = hash & (CLS_COS_QUEUE_MAX - 1);
+ tbl_index = (cos->index * CLS_COS_QUEUE_MAX) + (hash %
+ cos->num_queue);
+ pkt_hdr->dst_queue = queue_grp_tbl->queue[tbl_index];
return 0;
}
-cos_t *match_qos_l3_cos(pmr_l3_cos_t *l3_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
+static uint32_t packet_rss_hash(odp_packet_hdr_t *pkt_hdr,
+ odp_cls_hash_proto_t hash_proto,
+ const uint8_t *base)
{
- uint8_t dscp;
- cos_t *cos = NULL;
+ thash_tuple_t tuple;
const _odp_ipv4hdr_t *ipv4;
+ const _odp_udphdr_t *udp;
+ const _odp_tcphdr_t *tcp;
const _odp_ipv6hdr_t *ipv6;
+ uint32_t hash;
+ uint32_t tuple_len;
+
+ tuple_len = 0;
+ hash = 0;
+ if (pkt_hdr->p.input_flags.ipv4) {
+ if (hash_proto.ipv4) {
+ /* add ipv4 */
+ ipv4 = (const _odp_ipv4hdr_t *)(base +
+ pkt_hdr->p.l3_offset);
+ tuple.v4.src_addr = ipv4->src_addr;
+ tuple.v4.dst_addr = ipv4->dst_addr;
+ tuple_len += 2;
+ }
- if (hdr->p.input_flags.l3 && hdr->p.input_flags.ipv4) {
- ipv4 = (const _odp_ipv4hdr_t *)(pkt_addr + hdr->p.l3_offset);
- dscp = _ODP_IPV4HDR_DSCP(ipv4->tos);
- cos = l3_cos->cos[dscp];
- } else if (hdr->p.input_flags.l3 && hdr->p.input_flags.ipv6) {
- ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + hdr->p.l3_offset);
- dscp = _ODP_IPV6HDR_DSCP(ipv6->ver_tc_flow);
- cos = l3_cos->cos[dscp];
+ if (pkt_hdr->p.input_flags.tcp && hash_proto.tcp) {
+ /* add tcp */
+ tcp = (const _odp_tcphdr_t *)(base +
+ pkt_hdr->p.l4_offset);
+ tuple.v4.sport = tcp->src_port;
+ tuple.v4.dport = tcp->dst_port;
+ tuple_len += 1;
+ } else if (pkt_hdr->p.input_flags.udp && hash_proto.udp) {
+ /* add udp */
+ udp = (const _odp_udphdr_t *)(base +
+ pkt_hdr->p.l4_offset);
+ tuple.v4.sport = udp->src_port;
+ tuple.v4.dport = udp->dst_port;
+ tuple_len += 1;
+ }
+ } else if (pkt_hdr->p.input_flags.ipv6) {
+ if (hash_proto.ipv6) {
+ /* add ipv6 */
+ ipv6 = (const _odp_ipv6hdr_t *)(base +
+ pkt_hdr->p.l3_offset);
+ thash_load_ipv6_addr(ipv6, &tuple);
+ tuple_len += 8;
+ }
+ if (pkt_hdr->p.input_flags.tcp && hash_proto.tcp) {
+ tcp = (const _odp_tcphdr_t *)(base +
+ pkt_hdr->p.l4_offset);
+ tuple.v6.sport = tcp->src_port;
+ tuple.v6.dport = tcp->dst_port;
+ tuple_len += 1;
+ } else if (pkt_hdr->p.input_flags.udp && hash_proto.udp) {
+ /* add udp */
+ udp = (const _odp_udphdr_t *)(base +
+ pkt_hdr->p.l4_offset);
+ tuple.v6.sport = udp->src_port;
+ tuple.v6.dport = udp->dst_port;
+ tuple_len += 1;
+ }
}
+ if (tuple_len)
+ hash = thash_softrss((uint32_t *)&tuple,
+ tuple_len, default_rss);
+ return hash;
+}
- return cos;
+uint64_t odp_cos_to_u64(odp_cos_t hdl)
+{
+ return _odp_pri(hdl);
}
-cos_t *match_qos_l2_cos(pmr_l2_cos_t *l2_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
+uint64_t odp_pmr_to_u64(odp_pmr_t hdl)
{
- cos_t *cos = NULL;
- const _odp_ethhdr_t *eth;
- const _odp_vlanhdr_t *vlan;
- uint16_t qos;
+ return _odp_pri(hdl);
+}
- if (packet_hdr_has_l2(hdr) && hdr->p.input_flags.vlan &&
- packet_hdr_has_eth(hdr)) {
- eth = (const _odp_ethhdr_t *)(pkt_addr + hdr->p.l2_offset);
- vlan = (const _odp_vlanhdr_t *)(eth + 1);
- qos = odp_be_to_cpu_16(vlan->tci);
- qos = ((qos >> 13) & 0x07);
- cos = l2_cos->cos[qos];
+int odp_cls_cos_stats(odp_cos_t hdl, odp_cls_cos_stats_t *stats)
+{
+ cos_t *cos = get_cos_entry(hdl);
+
+ if (odp_unlikely(cos == NULL)) {
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return -1;
}
- return cos;
+
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output structure NULL\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(*stats));
+ stats->discards = odp_atomic_load_u64(&cos->stats.discards);
+ stats->packets = odp_atomic_load_u64(&cos->stats.packets);
+
+ return 0;
}
-cos_t *match_qos_cos(pktio_entry_t *entry, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
+int odp_cls_queue_stats(odp_cos_t hdl, odp_queue_t queue,
+ odp_cls_queue_stats_t *stats)
{
- classifier_t *cls = &entry->s.cls;
- pmr_l2_cos_t *l2_cos;
- pmr_l3_cos_t *l3_cos;
- cos_t *cos;
+ cos_t *cos = get_cos_entry(hdl);
+ int queue_idx;
+
+ if (odp_unlikely(cos == NULL)) {
+ _ODP_ERR("Invalid odp_cos_t handle\n");
+ return -1;
+ }
- l2_cos = &cls->l2_cos_table;
- l3_cos = &cls->l3_cos_table;
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output structure NULL\n");
+ return -1;
+ }
- if (cls->l3_precedence) {
- cos = match_qos_l3_cos(l3_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- cos = match_qos_l2_cos(l2_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- } else {
- cos = match_qos_l2_cos(l2_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- cos = match_qos_l3_cos(l3_cos, pkt_addr, hdr);
- if (cos)
- return cos;
+ queue_idx = _odp_cos_queue_idx(cos, queue);
+ if (odp_unlikely(queue_idx < 0)) {
+ _ODP_ERR("Invalid odp_queue_t handle\n");
+ return -1;
}
- return NULL;
+
+ memset(stats, 0, sizeof(odp_cls_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&cos->queue_stats[queue_idx].discards);
+ stats->packets = odp_atomic_load_u64(&cos->queue_stats[queue_idx].packets);
+
+ return 0;
}
-uint64_t odp_cos_to_u64(odp_cos_t hdl)
+static
+void print_cos_ident(cos_t *cos)
{
- return _odp_pri(hdl);
+ if (strlen(cos->name))
+ _ODP_PRINT("%s", cos->name);
+
+ _ODP_PRINT("(%" PRIu64 ")\n", odp_cos_to_u64(_odp_cos_from_ndx(cos->index)));
}
-uint64_t odp_pmr_to_u64(odp_pmr_t hdl)
+static
+void print_queue_ident(odp_queue_t q)
{
- return _odp_pri(hdl);
+ odp_queue_info_t info;
+
+ if (!odp_queue_info(q, &info) && strlen(info.name))
+ _ODP_PRINT(" %s\n", info.name);
+ else
+ _ODP_PRINT(" %" PRIx64 "\n", odp_queue_to_u64(q));
+}
+
+static
+void print_hex(const void *vp, int len)
+{
+ const uint8_t *p = vp;
+
+ for (int i = 0; i < len; i++)
+ _ODP_PRINT("%02x", *p++);
+}
+
+static
+void cls_print_cos(cos_t *cos)
+{
+ uint32_t tbl_index = cos->index * CLS_COS_QUEUE_MAX;
+ uint32_t num_rule = odp_atomic_load_u32(&cos->num_rule);
+ bool first = true;
+
+ _ODP_PRINT("cos: ");
+ print_cos_ident(cos);
+ _ODP_PRINT(" queues:\n");
+
+ if (!cos->queue_group) {
+ print_queue_ident(cos->queue);
+ } else {
+ for (uint32_t i = 0; i < cos->num_queue; i++)
+ print_queue_ident(queue_grp_tbl->queue[tbl_index + i]);
+ }
+
+ for (uint32_t j = 0; j < num_rule; j++) {
+ pmr_t *pmr = cos->pmr[j];
+
+ LOCK(&pmr->lock);
+ for (uint32_t k = 0; k < pmr->num_pmr; k++) {
+ pmr_term_value_t *v = &pmr->pmr_term_value[k];
+
+ if (first)
+ _ODP_PRINT(" rules: ");
+ else
+ _ODP_PRINT(" ");
+
+ first = false;
+
+ _ODP_PRINT("%s: ", format_pmr_name(v->term));
+
+ if (v->term == ODP_PMR_CUSTOM_FRAME ||
+ v->term == ODP_PMR_CUSTOM_L3)
+ _ODP_PRINT("offset:%" PRIu32 " ", v->offset);
+
+ if (v->range_term) {
+ _ODP_PRINT("<range>");
+ } else {
+ print_hex(v->match.value_u8, v->val_sz);
+ _ODP_PRINT(" ");
+ print_hex(v->match.mask_u8, v->val_sz);
+ }
+
+ _ODP_PRINT(" -> ");
+
+ if (pmr->mark)
+ _ODP_PRINT("mark:%" PRIu16 " ", pmr->mark);
+
+ print_cos_ident(cos->linked_cos[j]);
+ }
+ UNLOCK(&pmr->lock);
+ }
+}
+
+void odp_cls_print_all(void)
+{
+ _ODP_PRINT("\n"
+ "Classifier info\n"
+ "---------------\n\n");
+
+ for (uint32_t i = 0; i < CLS_COS_MAX_ENTRY; i++) {
+ cos_t *cos = &cos_tbl->cos_entry[i];
+
+ LOCK(&cos->lock);
+ if (cos->valid)
+ cls_print_cos(cos);
+ UNLOCK(&cos->lock);
+ }
}
diff --git a/platform/linux-generic/odp_comp.c b/platform/linux-generic/odp_comp.c
new file mode 100644
index 000000000..13999457f
--- /dev/null
+++ b/platform/linux-generic/odp_comp.c
@@ -0,0 +1,680 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <string.h>
+
+#include <odp/api/comp.h>
+#include <odp/api/event.h>
+#include <odp/api/packet.h>
+#include <odp/api/queue.h>
+
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_packet_internal.h>
+
+#include "miniz/miniz.h"
+
+#define MAX_SESSIONS 16
+#define MEM_LEVEL 8
+
+/** Forward declaration of session structure */
+typedef struct odp_comp_generic_session odp_comp_generic_session_t;
+
+#define to_gen_session(s) ((odp_comp_generic_session_t *)(intptr_t)(s))
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef
+int (*comp_func_t)(odp_packet_t pkt_in,
+ odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session);
+
+/**
+ * Per session data structure
+ */
+struct odp_comp_generic_session {
+ struct odp_comp_generic_session *next;
+ odp_comp_session_param_t params;
+ struct {
+ comp_func_t func;
+ mz_stream stream;
+ union {
+ tdefl_compressor comp;
+ inflate_state inflate;
+ } data;
+ } comp;
+};
+
+typedef struct odp_comp_global_s {
+ odp_spinlock_t lock;
+ odp_shm_t global_shm;
+ odp_comp_generic_session_t *free;
+ odp_comp_generic_session_t sessions[MAX_SESSIONS];
+} odp_comp_global_t;
+
+static odp_comp_global_t *global;
+
+static
+odp_comp_generic_session_t *alloc_session(void)
+{
+ odp_comp_generic_session_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ return session;
+}
+
+static
+void free_session(odp_comp_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static int
+null_comp_routine(odp_packet_t pkt_in ODP_UNUSED,
+ odp_packet_t pkt_out ODP_UNUSED,
+ const odp_comp_packet_op_param_t *params ODP_UNUSED,
+ odp_comp_generic_session_t *session ODP_UNUSED)
+{
+ return 0;
+}
+
+static
+odp_comp_packet_result_t *get_op_result_from_packet(odp_packet_t pkt)
+{
+ return &(packet_hdr(pkt)->comp_op_result);
+}
+
+void odp_comp_session_param_init(odp_comp_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_comp_session_param_t));
+}
+
+static void process_input(odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session,
+ odp_comp_packet_result_t *result,
+ odp_bool_t sync)
+{
+ mz_streamp streamp = &session->comp.stream;
+ int ret = 0;
+ uint8_t *out_data = NULL;
+ uint32_t out_len = 0;
+ uint32_t written = 0;
+ uint32_t start = 0;
+ uint32_t output_end = 0;
+ uint32_t space_avail = 0;
+ odp_packet_seg_t cur_seg = ODP_PACKET_SEG_INVALID;
+ odp_packet_data_range_t *res_data_range;
+ int finish = 0;
+
+ res_data_range = &result->output_data_range;
+
+ start = res_data_range->offset + res_data_range->length;
+ space_avail = params->out_data_range.length -
+ res_data_range->length;
+ output_end = space_avail + start;
+
+ do {
+ out_data =
+ odp_packet_offset(pkt_out, start, &out_len, &cur_seg);
+ _ODP_DBG("out_data %p seg_data_ptr %p out_len %d seg %p\n",
+ (void *)out_data, odp_packet_seg_data(pkt_out, cur_seg),
+ out_len, (void *)cur_seg);
+
+ if (0 == out_len) {
+ /* there are no more segments */
+ _ODP_DBG("Ran out of space. (streamp->avail_out) %d\n",
+ (streamp->avail_out));
+ result->status = ODP_COMP_STATUS_OUT_OF_SPACE_TERM;
+ break;
+ }
+
+ /* if segment length is greater than user given available
+ * space, then adjust output len
+ */
+ if (out_len > space_avail)
+ out_len = space_avail;
+
+ streamp->next_out = out_data;
+ streamp->avail_out = out_len;
+
+ _ODP_DBG("next_in %p, avail_in %d next_out %p avail_out %d, sync %d\n",
+ (const void *)streamp->next_in, streamp->avail_in,
+ (void *)streamp->next_out, streamp->avail_out, sync);
+
+ if (session->params.op == ODP_COMP_OP_COMPRESS)
+ ret = mz_deflate(streamp,
+ sync ? MZ_FINISH : MZ_NO_FLUSH);
+ else
+ ret = mz_inflate(streamp, MZ_NO_FLUSH);
+
+ _ODP_DBG("ret %d streamp->avail_out %d avail_in %d\n",
+ ret, streamp->avail_out, streamp->avail_in);
+
+ out_len = out_len - streamp->avail_out;
+ written += out_len;
+
+ /* increase next offset by amount of data written into
+ * output buffer and decrease available space by amount
+ * of space consumed.
+ */
+ start += out_len;
+ space_avail -= out_len;
+
+ _ODP_DBG("ret %d,written %d\n", ret, out_len);
+
+ if (ret == MZ_STREAM_END) {
+ if (session->params.op == ODP_COMP_OP_COMPRESS) {
+ /* required to continue processing of next pkt
+ with same stream */
+ mz_deflateReset(streamp);
+ } else {
+ mz_inflateReset(streamp);
+ }
+ finish = 1;
+ break;
+ }
+ if ((ret != MZ_BUF_ERROR) && (ret != MZ_OK)) {
+ _ODP_DBG("deflate failed. Err %s,ret %d"
+ "(streamp->avail_out) %d\n",
+ streamp->msg, ret, (streamp->avail_out));
+ result->status = ODP_COMP_STATUS_FAILURE;
+ return;
+ }
+ } while (!streamp->avail_out && (start < output_end));
+
+ res_data_range->length += written;
+
+ if ((!finish) && !(streamp->avail_out)) {
+ /* if write stopped as output exhausted,
+ return OUT_OF_SPACE_ERR
+ */
+ _ODP_DBG("Ran out of space. (out avail) %d,"
+ "to process %d\n", streamp->avail_out,
+ streamp->avail_in);
+ result->status = ODP_COMP_STATUS_OUT_OF_SPACE_TERM;
+ } else {
+ result->status = ODP_COMP_STATUS_SUCCESS;
+ }
+}
+
+/*
+ * Deflate routine to perform deflate based compression/decompression
+ *
+ * NOTE: Current implementation does not support in-place
+ */
+static int deflate_comp(odp_packet_t pkt_in,
+ odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session)
+{
+ mz_streamp streamp;
+ uint8_t *data = NULL;
+ uint32_t len;
+ uint32_t in_len = 0;
+ uint32_t read = 0;
+ uint32_t consumed = 0;
+ odp_bool_t sync = false;
+ odp_packet_seg_t in_seg = ODP_PACKET_SEG_INVALID;
+ odp_comp_packet_result_t *result = get_op_result_from_packet(pkt_out);
+
+ _ODP_ASSERT(session != NULL);
+ _ODP_ASSERT(params != NULL);
+ _ODP_ASSERT(pkt_in != ODP_PACKET_INVALID);
+ _ODP_ASSERT(pkt_out != ODP_PACKET_INVALID);
+
+ streamp = &session->comp.stream;
+
+ /* Adjust pointer for beginning of area to compress.
+ Since we need to pass phys cont area so we need to deal with segments
+ here as packet inherently are segmented and segments may not be
+ contiguous.
+ */
+
+ read = params->in_data_range.offset;
+ len = params->in_data_range.length;
+
+ while (read < (len + params->in_data_range.offset)) {
+ data = odp_packet_offset(pkt_in,
+ read,
+ &in_len,
+ &in_seg);
+ _ODP_DBG("data %p in_len %d seg %p len %d\n",
+ (void *)data, in_len, (void *)in_seg, len);
+
+ if (in_len > len)
+ in_len = len;
+
+ /* tracker for data consumed from input */
+ consumed += in_len;
+ streamp->next_in = data;
+ streamp->avail_in = in_len;
+
+ if (consumed >= len) {
+ _ODP_DBG("This is last chunk\n");
+ sync = true;
+ }
+
+ process_input(pkt_out, params, session, result, sync);
+
+ if (result->status != ODP_COMP_STATUS_SUCCESS)
+ return -1;
+
+ read += in_len;
+ }
+
+ _ODP_DBG("Read %d Written %d\n", read, result->output_data_range.length);
+
+ return 0;
+}
+
+static void *comp_zalloc(void *opaque, size_t items, size_t size)
+{
+ odp_comp_generic_session_t *session = opaque;
+
+ if (items * size > sizeof(session->comp.data))
+ return NULL;
+ else
+ return &session->comp.data;
+}
+
+static void comp_zfree(void *opaque ODP_UNUSED, void *data ODP_UNUSED)
+{
+ /* Do nothing */
+}
+
+static int deflate_init(odp_comp_generic_session_t *session)
+{
+ mz_streamp streamp = &session->comp.stream;
+ uint32_t level;
+ uint32_t strategy;
+ int32_t window_bits;
+ uint32_t cl;
+ odp_comp_huffman_code_t cc;
+
+ /* optional check as such may not required */
+ _ODP_ASSERT(strcmp(mz_version(), MZ_VERSION) == 0);
+
+ memset(&session->comp.stream, 0, sizeof(mz_stream));
+
+ /* let zlib handles required memory allocations
+ we will identify if there any memory allocations issues that
+ may come b/w odp and zlib allocated memory
+ */
+ streamp->zalloc = comp_zalloc;
+ streamp->zfree = comp_zfree;
+ streamp->opaque = session;
+
+ switch (session->params.comp_algo) {
+ case ODP_COMP_ALG_ZLIB:
+ cl = session->params.alg_param.zlib.deflate.comp_level;
+ cc = session->params.alg_param.zlib.deflate.huffman_code;
+ window_bits = MZ_DEFAULT_WINDOW_BITS;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ cl = session->params.alg_param.deflate.comp_level;
+ cc = session->params.alg_param.deflate.huffman_code;
+ window_bits = -MZ_DEFAULT_WINDOW_BITS;
+ break;
+ default:
+ return -1;
+ }
+
+ level = MZ_DEFAULT_COMPRESSION; /* Z_BEST_COMPRESSION; */
+ if (cl)
+ level = cl;
+
+ switch (cc) {
+ case ODP_COMP_HUFFMAN_DEFAULT:
+ case ODP_COMP_HUFFMAN_DYNAMIC:/*Z_HUFFMAN_ONLY */
+ strategy = MZ_DEFAULT_STRATEGY;
+ break;
+ case ODP_COMP_HUFFMAN_FIXED:
+ strategy = MZ_FIXED;
+ break;
+ default:
+ return -1;
+ }
+ _ODP_DBG(" level %d strategy %d window %d\n", level, strategy, window_bits);
+
+ if (ODP_COMP_OP_COMPRESS == session->params.op) {
+ if (mz_deflateInit2(streamp, level, MZ_DEFLATED, window_bits,
+ MEM_LEVEL, strategy) != MZ_OK) {
+ _ODP_DBG("Err in Deflate Initialization %s\n", streamp->msg);
+ return -1;
+ }
+ } else {
+ if (mz_inflateInit2(streamp, window_bits) != MZ_OK) {
+ _ODP_DBG("Err in Inflate Initialization %s\n", streamp->msg);
+ return -1;
+ }
+ }
+
+ session->comp.func = deflate_comp;
+
+ return 0;
+}
+
+static int term_def(odp_comp_generic_session_t *session)
+{
+ int rc = 0;
+ mz_streamp streamp = &session->comp.stream;
+
+ if (ODP_COMP_OP_COMPRESS == session->params.op) {
+ rc = mz_deflateEnd(streamp);
+
+ if (rc != MZ_OK) {
+ _ODP_ERR("deflateEnd failed. Err %s,rc %d\n", streamp->msg, rc);
+ /* we choose to just return 0 with error info */
+ }
+ } else {
+ rc = mz_inflateEnd(streamp);
+ if (rc != MZ_OK) {
+ _ODP_ERR("inflateEnd failed. Err %s\n", streamp->msg);
+ /* we choose to just return 0 with error info */
+ }
+ }
+
+ return 0;
+}
+
+odp_comp_session_t
+odp_comp_session_create(const odp_comp_session_param_t *params)
+{
+ odp_comp_generic_session_t *session;
+ int rc;
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (NULL == session)
+ return ODP_COMP_SESSION_INVALID;
+
+ /* Copy stuff over */
+ memcpy(&session->params, params, sizeof(*params));
+
+ /* Process based on compress */
+ switch (params->comp_algo) {
+ case ODP_COMP_ALG_NULL:
+ session->comp.func = null_comp_routine;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ case ODP_COMP_ALG_ZLIB:
+ rc = deflate_init(session);
+ if (rc < 0)
+ goto cleanup;
+ break;
+ default:
+ rc = -1;
+ goto cleanup;
+ }
+
+ return (odp_comp_session_t)session;
+
+cleanup:
+ free_session(session);
+
+ return ODP_COMP_SESSION_INVALID;
+}
+
+int odp_comp_session_destroy(odp_comp_session_t session)
+{
+ odp_comp_generic_session_t *generic;
+ int32_t rc = 0;
+
+ generic = (odp_comp_generic_session_t *)(intptr_t)session;
+
+ switch (generic->params.comp_algo) {
+ case ODP_COMP_ALG_DEFLATE:
+ case ODP_COMP_ALG_ZLIB:
+ rc = term_def(generic);
+ break;
+ default:
+ break;
+ }
+ if (rc < 0) {
+ _ODP_ERR("Compression Unit could not be terminated\n");
+ return -1;
+ }
+
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+int odp_comp_capability(odp_comp_capability_t *capa)
+{
+ if (NULL == capa)
+ return -1;
+
+ /* Initialize comp capability structure */
+ memset(capa, 0, sizeof(odp_comp_capability_t));
+
+ capa->comp_algos.bit.null = 1;
+ capa->comp_algos.bit.deflate = 1;
+ capa->comp_algos.bit.zlib = 1;
+ capa->hash_algos.bit.none = 1;
+ capa->sync = ODP_SUPPORT_YES;
+ capa->async = ODP_SUPPORT_YES;
+ capa->max_sessions = MAX_SESSIONS;
+ return 0;
+}
+
+int
+odp_comp_alg_capability(odp_comp_alg_t comp,
+ odp_comp_alg_capability_t *capa)
+{
+ switch (comp) {
+ case ODP_COMP_ALG_ZLIB:
+ capa->hash_algo.all_bits = 0;
+ capa->hash_algo.bit.none = 1;
+ capa->max_level = MZ_BEST_COMPRESSION;
+ capa->compression_ratio = 50;
+ return 0;
+ case ODP_COMP_ALG_DEFLATE:
+ capa->hash_algo.all_bits = 0;
+ capa->hash_algo.bit.none = 1;
+ capa->max_level = MZ_BEST_COMPRESSION;
+ capa->compression_ratio = 50;
+ return 0;
+ default:
+ /* Error unsupported enum */
+ return -1;
+ }
+ return -1;
+}
+
+int
+odp_comp_hash_alg_capability(odp_comp_hash_alg_t hash,
+ odp_comp_hash_alg_capability_t *capa)
+{
+ (void)capa;
+ switch (hash) {
+ case ODP_COMP_HASH_ALG_NONE:
+ capa[0].digest_len = 0;
+ return 0;
+ default:
+ return -1;
+ }
+ return -1;
+}
+
+static int _odp_comp_single(odp_packet_t pkt_in, odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *param)
+{
+ odp_comp_generic_session_t *session;
+ odp_comp_packet_result_t *result;
+ int rc;
+
+ session = to_gen_session(param->session);
+ _ODP_ASSERT(session);
+ _ODP_ASSERT(pkt_in != ODP_PACKET_INVALID);
+ _ODP_ASSERT(pkt_out != ODP_PACKET_INVALID);
+
+ result = get_op_result_from_packet(pkt_out);
+ _ODP_ASSERT(result);
+
+ result->pkt_in = pkt_in;
+ result->output_data_range.offset = param->out_data_range.offset;
+ result->output_data_range.length = 0;
+
+ packet_subtype_set(pkt_out, ODP_EVENT_PACKET_COMP);
+
+ rc = session->comp.func(pkt_in, pkt_out, param, session);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int odp_comp_op(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[])
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ rc = _odp_comp_single(pkt_in[i], pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_comp_op_enq(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[])
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ odp_event_t event;
+ odp_comp_generic_session_t *session;
+
+ rc = _odp_comp_single(pkt_in[i], pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt_out[i]);
+ session = to_gen_session(param[i].session);
+ if (odp_queue_enq(session->params.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
+
+int odp_comp_result(odp_comp_packet_result_t *result,
+ odp_packet_t packet)
+{
+ odp_comp_packet_result_t *op_result;
+
+ _ODP_ASSERT(odp_event_subtype(odp_packet_to_event(packet))
+ == ODP_EVENT_PACKET_COMP);
+
+ op_result = get_op_result_from_packet(packet);
+ _ODP_DBG("Copy operational result back\n");
+ memcpy(result, op_result, sizeof(*result));
+ return 0;
+}
+
+int _odp_comp_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ if (odp_global_ro.disable.compress) {
+ _ODP_PRINT("\nODP compress is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(*global);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("_odp_comp_global", mem_size,
+ ODP_CACHE_LINE_SIZE, 0);
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+ global->global_shm = shm;
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_comp_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_comp_generic_session_t *session;
+
+ if (odp_global_ro.disable.compress)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("comp sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(global->global_shm);
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for comp_pool\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+odp_packet_t odp_comp_packet_from_event(odp_event_t ev)
+{
+ /* This check not mandated by the API specification */
+ _ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ _ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_COMP);
+
+ return odp_packet_from_event(ev);
+}
+
+odp_event_t odp_comp_packet_to_event(odp_packet_t pkt)
+{
+ return odp_packet_to_event(pkt);
+}
+
+/** Get printable format of odp_comp_session_t */
+uint64_t odp_comp_session_to_u64(odp_comp_session_t hdl)
+{
+ return _odp_pri(hdl);
+}
diff --git a/platform/linux-generic/odp_cpu.c b/platform/linux-generic/odp_cpu.c
deleted file mode 100644
index 282defd44..000000000
--- a/platform/linux-generic/odp_cpu.c
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/cpu.h>
-#include <odp/api/hints.h>
-
-uint64_t odp_cpu_cycles_diff(uint64_t c2, uint64_t c1)
-{
- if (odp_likely(c2 >= c1))
- return c2 - c1;
-
- return c2 + (odp_cpu_cycles_max() - c1) + 1;
-}
diff --git a/platform/linux-generic/odp_cpu_api.c b/platform/linux-generic/odp_cpu_api.c
new file mode 100644
index 000000000..503fbd4c7
--- /dev/null
+++ b/platform/linux-generic/odp_cpu_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/cpu.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/cpu_inlines.h>
diff --git a/platform/linux-generic/odp_cpumask.c b/platform/linux-generic/odp_cpumask.c
index 64559a6d5..7d7575f51 100644
--- a/platform/linux-generic/odp_cpumask.c
+++ b/platform/linux-generic/odp_cpumask.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,11 +7,12 @@
#include <odp_posix_extensions.h>
#include <sched.h>
-#include <pthread.h>
#include <odp/api/cpumask.h>
#include <odp/api/init.h>
#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
#include <stdlib.h>
#include <string.h>
@@ -227,33 +228,30 @@ int odp_cpumask_next(const odp_cpumask_t *mask, int cpu)
*/
static int get_available_cpus(void)
{
- int cpu_idnum;
cpu_set_t cpuset;
- int ret;
+ int cpu, ret;
- /* Clear the global cpumasks for control and worker CPUs */
- odp_cpumask_zero(&odp_global_data.control_cpus);
- odp_cpumask_zero(&odp_global_data.worker_cpus);
+ odp_cpumask_zero(&odp_global_ro.all_cpus);
CPU_ZERO(&cpuset);
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
if (ret < 0) {
- ODP_ERR("Failed to get cpu affinity");
+ _ODP_ERR("Failed to get cpu affinity");
return -1;
}
- for (cpu_idnum = 0; cpu_idnum < CPU_SETSIZE - 1; cpu_idnum++) {
- if (CPU_ISSET(cpu_idnum, &cpuset)) {
- odp_global_data.num_cpus_installed++;
- /* Add the CPU to our default cpumasks */
- odp_cpumask_set(&odp_global_data.control_cpus,
- (int)cpu_idnum);
- odp_cpumask_set(&odp_global_data.worker_cpus,
- (int)cpu_idnum);
+ for (cpu = 0; cpu < CPU_SETSIZE - 1; cpu++) {
+ if (CPU_ISSET(cpu, &cpuset)) {
+ odp_global_ro.num_cpus_installed++;
+ odp_cpumask_set(&odp_global_ro.all_cpus, cpu);
}
}
+ /* Initialize control and worker masks with all CPUs */
+ odp_cpumask_copy(&odp_global_ro.control_cpus, &odp_global_ro.all_cpus);
+ odp_cpumask_copy(&odp_global_ro.worker_cpus, &odp_global_ro.all_cpus);
+
return 0;
}
@@ -265,8 +263,8 @@ static int get_available_cpus(void)
*/
static void init_default_control_cpumask(int worker_cpus_default)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
int i;
/* (Bits for all available CPUs are SET in control cpumask) */
@@ -277,7 +275,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* If only one or two CPUs installed, use CPU 0 for control.
* Otherwise leave it for the kernel and start with CPU 1.
*/
- if (odp_global_data.num_cpus_installed < 3) {
+ if (odp_global_ro.num_cpus_installed < 3) {
/*
* If only two CPUS, use CPU 0 for control and
* use CPU 1 for workers.
@@ -290,7 +288,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* reserve remaining CPUs for workers
*/
odp_cpumask_clr(control_mask, 0);
- for (i = 2; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 2; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(worker_mask, i))
odp_cpumask_clr(control_mask, i);
}
@@ -299,7 +297,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* The worker cpumask was specified so first ensure
* the control cpumask does not overlap any worker CPUs
*/
- for (i = 0; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 0; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(worker_mask, i))
odp_cpumask_clr(control_mask, i);
@@ -307,7 +305,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* If only one or two CPUs installed,
* ensure availability of CPU 0 for control threads
*/
- if (odp_global_data.num_cpus_installed < 3) {
+ if (odp_global_ro.num_cpus_installed < 3) {
odp_cpumask_set(control_mask, 0);
odp_cpumask_clr(control_mask, 1);
} else {
@@ -333,8 +331,8 @@ static void init_default_control_cpumask(int worker_cpus_default)
*/
static void init_default_worker_cpumask(int control_cpus_default)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
int i;
/* (Bits for all available CPUs are SET in worker cpumask) */
@@ -344,10 +342,10 @@ static void init_default_worker_cpumask(int control_cpus_default)
* The control cpumask was also unspecified...
* CPU 0 is only used for workers on uniprocessor systems
*/
- if (odp_global_data.num_cpus_installed > 1)
+ if (odp_global_ro.num_cpus_installed > 1)
odp_cpumask_clr(worker_mask, 0);
- if (odp_global_data.num_cpus_installed > 2)
+ if (odp_global_ro.num_cpus_installed > 2)
/*
* If three or more CPUs, reserve CPU 0 for kernel,
* reserve CPU 1 for control, and
@@ -359,7 +357,7 @@ static void init_default_worker_cpumask(int control_cpus_default)
* The control cpumask was specified so first ensure
* the worker cpumask does not overlap any control CPUs
*/
- for (i = 0; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 0; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(control_mask, i))
odp_cpumask_clr(worker_mask, i);
@@ -367,7 +365,7 @@ static void init_default_worker_cpumask(int control_cpus_default)
* If only one CPU installed, use CPU 0 for workers
* even though it is used for control as well.
*/
- if (odp_global_data.num_cpus_installed < 2)
+ if (odp_global_ro.num_cpus_installed < 2)
odp_cpumask_set(worker_mask, 0);
else
odp_cpumask_clr(worker_mask, 0);
@@ -380,10 +378,10 @@ static void init_default_worker_cpumask(int control_cpus_default)
* It also allows the default cpumasks to be overridden by
* externally specified cpumasks passed in as initialization parameters.
*/
-int odp_cpumask_init_global(const odp_init_t *params)
+int _odp_cpumask_init_global(const odp_init_t *params)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
odp_cpumask_t check_mask;
int control_cpus_default = 1;
int worker_cpus_default = 1;
@@ -449,7 +447,7 @@ int odp_cpumask_init_global(const odp_init_t *params)
}
}
-int odp_cpumask_term_global(void)
+int _odp_cpumask_term_global(void)
{
return 0;
}
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index 10885ce6b..a579b2e7e 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -6,83 +7,79 @@
#include <odp_posix_extensions.h>
-#include <sched.h>
-#include <pthread.h>
-
#include <odp/api/cpumask.h>
+
#include <odp_debug_internal.h>
+#include <odp_global_data.h>
-int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
+#include <sched.h>
+
+int odp_cpumask_default_worker(odp_cpumask_t *mask, int max_num)
{
- odp_cpumask_t overlap;
- int cpu, i;
-
- /*
- * If no user supplied number or it's too large, then attempt
- * to use all CPUs
- */
- cpu = odp_cpumask_count(&odp_global_data.worker_cpus);
- if (0 == num || cpu < num)
- num = cpu;
-
- /* build the mask, allocating down from highest numbered CPU */
+ int num, cpu, ret;
+ odp_cpumask_t *worker_cpus = &odp_global_ro.worker_cpus;
+
+ num = odp_cpumask_count(worker_cpus);
+
+ if (max_num && num > max_num)
+ num = max_num;
+
+ if (mask == NULL)
+ return num;
+
odp_cpumask_zero(mask);
- for (cpu = 0, i = CPU_SETSIZE - 1; i >= 0 && cpu < num; --i) {
- if (odp_cpumask_isset(&odp_global_data.worker_cpus, i)) {
- odp_cpumask_set(mask, i);
- cpu++;
+
+ /* Allocate down from the highest numbered CPU */
+ cpu = odp_cpumask_last(worker_cpus);
+ ret = num;
+
+ while (cpu >= 0 && num > 0) {
+ if (odp_cpumask_isset(worker_cpus, cpu)) {
+ odp_cpumask_set(mask, cpu);
+ num--;
}
- }
- odp_cpumask_and(&overlap, mask, &odp_global_data.control_cpus);
- if (odp_cpumask_count(&overlap))
- ODP_DBG("\n\tWorker CPUs overlap with control CPUs...\n"
- "\tthis will likely have a performance impact on the worker threads.\n");
+ cpu--;
+ }
- return cpu;
+ return ret;
}
-int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
+int odp_cpumask_default_control(odp_cpumask_t *mask, int max_num)
{
- odp_cpumask_t overlap;
- int cpu, i;
-
- /*
- * If no user supplied number then default to one control CPU.
- */
- if (0 == num) {
- num = 1;
- } else {
- /*
- * If user supplied number is too large, then attempt
- * to use all installed control CPUs
- */
- cpu = odp_cpumask_count(&odp_global_data.control_cpus);
- if (cpu < num)
- num = cpu;
- }
+ int num, cpu, last, ret;
+ odp_cpumask_t *control_cpus = &odp_global_ro.control_cpus;
+
+ num = odp_cpumask_count(control_cpus);
+
+ if (max_num && num > max_num)
+ num = max_num;
+
+ if (mask == NULL)
+ return num;
- /* build the mask, allocating upwards from lowest numbered CPU */
odp_cpumask_zero(mask);
- for (cpu = 0, i = 0; i < CPU_SETSIZE && cpu < num; i++) {
- if (odp_cpumask_isset(&odp_global_data.control_cpus, i)) {
- odp_cpumask_set(mask, i);
- cpu++;
+
+ /* Allocate up from the lowest numbered CPU */
+ cpu = odp_cpumask_first(control_cpus);
+ last = odp_cpumask_last(control_cpus);
+ ret = num;
+
+ while (cpu <= last && num > 0) {
+ if (odp_cpumask_isset(control_cpus, cpu)) {
+ odp_cpumask_set(mask, cpu);
+ num--;
}
- }
- odp_cpumask_and(&overlap, mask, &odp_global_data.worker_cpus);
- if (odp_cpumask_count(&overlap))
- ODP_DBG("\n\tControl CPUs overlap with worker CPUs...\n"
- "\tthis will likely have a performance impact on the worker threads.\n");
+ cpu++;
+ }
- return cpu;
+ return ret;
}
int odp_cpumask_all_available(odp_cpumask_t *mask)
{
- odp_cpumask_or(mask, &odp_global_data.worker_cpus,
- &odp_global_data.control_cpus);
+ odp_cpumask_copy(mask, &odp_global_ro.all_cpus);
- return odp_cpumask_count(mask);
+ return odp_global_ro.num_cpus_installed;
}
diff --git a/platform/linux-generic/odp_crypto.c b/platform/linux-generic/odp_crypto.c
deleted file mode 100644
index 54b222fd2..000000000
--- a/platform/linux-generic/odp_crypto.c
+++ /dev/null
@@ -1,1132 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_posix_extensions.h>
-#include <odp/api/crypto.h>
-#include <odp_internal.h>
-#include <odp/api/atomic.h>
-#include <odp/api/spinlock.h>
-#include <odp/api/sync.h>
-#include <odp/api/debug.h>
-#include <odp/api/align.h>
-#include <odp/api/shared_memory.h>
-#include <odp_crypto_internal.h>
-#include <odp_debug_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/random.h>
-#include <odp_packet_internal.h>
-
-#include <string.h>
-#include <stdlib.h>
-
-#include <openssl/des.h>
-#include <openssl/rand.h>
-#include <openssl/hmac.h>
-#include <openssl/evp.h>
-
-#define MAX_SESSIONS 32
-
-/*
- * Cipher algorithm capabilities
- *
- * Keep sorted: first by key length, then by IV length
- */
-static const odp_crypto_cipher_capability_t cipher_capa_des[] = {
-{.key_len = 24, .iv_len = 8} };
-
-static const odp_crypto_cipher_capability_t cipher_capa_trides_cbc[] = {
-{.key_len = 24, .iv_len = 8} };
-
-static const odp_crypto_cipher_capability_t cipher_capa_aes_cbc[] = {
-{.key_len = 16, .iv_len = 16} };
-
-static const odp_crypto_cipher_capability_t cipher_capa_aes_gcm[] = {
-{.key_len = 16, .iv_len = 12} };
-
-/*
- * Authentication algorithm capabilities
- *
- * Keep sorted: first by digest length, then by key length
- */
-static const odp_crypto_auth_capability_t auth_capa_md5_hmac[] = {
-{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
-
-static const odp_crypto_auth_capability_t auth_capa_sha256_hmac[] = {
-{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
-
-static const odp_crypto_auth_capability_t auth_capa_aes_gcm[] = {
-{.digest_len = 16, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
-
-typedef struct odp_crypto_global_s odp_crypto_global_t;
-
-struct odp_crypto_global_s {
- odp_spinlock_t lock;
- odp_ticketlock_t **openssl_lock;
- odp_crypto_generic_session_t *free;
- odp_crypto_generic_session_t sessions[0];
-};
-
-static odp_crypto_global_t *global;
-
-static
-odp_crypto_generic_op_result_t *get_op_result_from_event(odp_event_t ev)
-{
- odp_packet_hdr_t *hdr = odp_packet_hdr(odp_packet_from_event(ev));
-
- return &hdr->op_result;
-}
-
-static
-odp_crypto_generic_session_t *alloc_session(void)
-{
- odp_crypto_generic_session_t *session = NULL;
-
- odp_spinlock_lock(&global->lock);
- session = global->free;
- if (session)
- global->free = session->next;
- odp_spinlock_unlock(&global->lock);
-
- return session;
-}
-
-static
-void free_session(odp_crypto_generic_session_t *session)
-{
- odp_spinlock_lock(&global->lock);
- session->next = global->free;
- global->free = session;
- odp_spinlock_unlock(&global->lock);
-}
-
-static odp_crypto_alg_err_t
-null_crypto_routine(odp_crypto_op_param_t *param ODP_UNUSED,
- odp_crypto_generic_session_t *session ODP_UNUSED)
-{
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t md5_gen(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint8_t *icv = data;
- uint32_t len = param->auth_range.length;
- uint8_t hash[EVP_MAX_MD_SIZE];
-
- /* Adjust pointer for beginning of area to auth */
- data += param->auth_range.offset;
- icv += param->hash_result_offset;
-
- /* Hash it */
- HMAC(EVP_md5(),
- session->auth.data.md5.key,
- 16,
- data,
- len,
- hash,
- NULL);
-
- /* Copy to the output location */
- memcpy(icv, hash, session->auth.data.md5.bytes);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t md5_check(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint8_t *icv = data;
- uint32_t len = param->auth_range.length;
- uint32_t bytes = session->auth.data.md5.bytes;
- uint8_t hash_in[EVP_MAX_MD_SIZE];
- uint8_t hash_out[EVP_MAX_MD_SIZE];
-
- /* Adjust pointer for beginning of area to auth */
- data += param->auth_range.offset;
- icv += param->hash_result_offset;
-
- /* Copy current value out and clear it before authentication */
- memset(hash_in, 0, sizeof(hash_in));
- memcpy(hash_in, icv, bytes);
- memset(icv, 0, bytes);
- memset(hash_out, 0, sizeof(hash_out));
-
- /* Hash it */
- HMAC(EVP_md5(),
- session->auth.data.md5.key,
- 16,
- data,
- len,
- hash_out,
- NULL);
-
- /* Verify match */
- if (0 != memcmp(hash_in, hash_out, bytes))
- return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
-
- /* Matched */
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t sha256_gen(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint8_t *icv = data;
- uint32_t len = param->auth_range.length;
- uint8_t hash[EVP_MAX_MD_SIZE];
-
- /* Adjust pointer for beginning of area to auth */
- data += param->auth_range.offset;
- icv += param->hash_result_offset;
-
- /* Hash it */
- HMAC(EVP_sha256(),
- session->auth.data.sha256.key,
- 32,
- data,
- len,
- hash,
- NULL);
-
- /* Copy to the output location */
- memcpy(icv, hash, session->auth.data.sha256.bytes);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t sha256_check(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint8_t *icv = data;
- uint32_t len = param->auth_range.length;
- uint32_t bytes = session->auth.data.sha256.bytes;
- uint8_t hash_in[EVP_MAX_MD_SIZE];
- uint8_t hash_out[EVP_MAX_MD_SIZE];
-
- /* Adjust pointer for beginning of area to auth */
- data += param->auth_range.offset;
- icv += param->hash_result_offset;
-
- /* Copy current value out and clear it before authentication */
- memset(hash_in, 0, sizeof(hash_in));
- memcpy(hash_in, icv, bytes);
- memset(icv, 0, bytes);
- memset(hash_out, 0, sizeof(hash_out));
-
- /* Hash it */
- HMAC(EVP_sha256(),
- session->auth.data.sha256.key,
- 32,
- data,
- len,
- hash_out,
- NULL);
-
- /* Verify match */
- if (0 != memcmp(hash_in, hash_out, bytes))
- return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
-
- /* Matched */
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t aes_encrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t len = param->cipher_range.length;
- unsigned char iv_enc[AES_BLOCK_SIZE];
- void *iv_ptr;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
-
- /* Adjust pointer for beginning of area to cipher */
- data += param->cipher_range.offset;
- /* Encrypt it */
- AES_cbc_encrypt(data, data, len, &session->cipher.data.aes.key,
- iv_enc, AES_ENCRYPT);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t aes_decrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t len = param->cipher_range.length;
- unsigned char iv_enc[AES_BLOCK_SIZE];
- void *iv_ptr;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
-
- /* Adjust pointer for beginning of area to cipher */
- data += param->cipher_range.offset;
- /* Encrypt it */
- AES_cbc_encrypt(data, data, len, &session->cipher.data.aes.key,
- iv_enc, AES_DECRYPT);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static int process_aes_param(odp_crypto_generic_session_t *session)
-{
- /* Verify IV len is either 0 or 16 */
- if (!((0 == session->p.iv.length) || (16 == session->p.iv.length)))
- return -1;
-
- /* Set function */
- if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
- session->cipher.func = aes_encrypt;
- AES_set_encrypt_key(session->p.cipher_key.data, 128,
- &session->cipher.data.aes.key);
- } else {
- session->cipher.func = aes_decrypt;
- AES_set_decrypt_key(session->p.cipher_key.data, 128,
- &session->cipher.data.aes.key);
- }
-
- return 0;
-}
-
-static
-odp_crypto_alg_err_t aes_gcm_encrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t plain_len = param->cipher_range.length;
- uint8_t *aad_head = data + param->auth_range.offset;
- uint8_t *aad_tail = data + param->cipher_range.offset +
- param->cipher_range.length;
- uint32_t auth_len = param->auth_range.length;
- unsigned char iv_enc[AES_BLOCK_SIZE];
- void *iv_ptr;
- uint8_t *tag = data + param->hash_result_offset;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /* All cipher data must be part of the authentication */
- if (param->auth_range.offset > param->cipher_range.offset ||
- param->auth_range.offset + auth_len <
- param->cipher_range.offset + plain_len)
- return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
-
- /* Adjust pointer for beginning of area to cipher/auth */
- uint8_t *plaindata = data + param->cipher_range.offset;
-
- /* Encrypt it */
- EVP_CIPHER_CTX *ctx = session->cipher.data.aes_gcm.ctx;
- int cipher_len = 0;
-
- EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, iv_enc);
-
- /* Authenticate header data (if any) without encrypting them */
- if (aad_head < plaindata) {
- EVP_EncryptUpdate(ctx, NULL, &cipher_len,
- aad_head, plaindata - aad_head);
- }
-
- EVP_EncryptUpdate(ctx, plaindata, &cipher_len,
- plaindata, plain_len);
- cipher_len = plain_len;
-
- /* Authenticate footer data (if any) without encrypting them */
- if (aad_head + auth_len > plaindata + plain_len) {
- EVP_EncryptUpdate(ctx, NULL, NULL, aad_tail,
- auth_len - (aad_tail - aad_head));
- }
-
- EVP_EncryptFinal_ex(ctx, plaindata + cipher_len, &cipher_len);
- EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, 16, tag);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t aes_gcm_decrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t cipher_len = param->cipher_range.length;
- uint8_t *aad_head = data + param->auth_range.offset;
- uint8_t *aad_tail = data + param->cipher_range.offset +
- param->cipher_range.length;
- uint32_t auth_len = param->auth_range.length;
- unsigned char iv_enc[AES_BLOCK_SIZE];
- void *iv_ptr;
- uint8_t *tag = data + param->hash_result_offset;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /* All cipher data must be part of the authentication */
- if (param->auth_range.offset > param->cipher_range.offset ||
- param->auth_range.offset + auth_len <
- param->cipher_range.offset + cipher_len)
- return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv_enc, iv_ptr, AES_BLOCK_SIZE);
-
- /* Adjust pointer for beginning of area to cipher/auth */
- uint8_t *cipherdata = data + param->cipher_range.offset;
- /* Encrypt it */
- EVP_CIPHER_CTX *ctx = session->cipher.data.aes_gcm.ctx;
- int plain_len = 0;
-
- EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, iv_enc);
-
- EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG, 16, tag);
-
- /* Authenticate header data (if any) without encrypting them */
- if (aad_head < cipherdata) {
- EVP_DecryptUpdate(ctx, NULL, &plain_len,
- aad_head, cipherdata - aad_head);
- }
-
- EVP_DecryptUpdate(ctx, cipherdata, &plain_len,
- cipherdata, cipher_len);
- plain_len = cipher_len;
-
- /* Authenticate footer data (if any) without encrypting them */
- if (aad_head + auth_len > cipherdata + cipher_len) {
- EVP_DecryptUpdate(ctx, NULL, NULL, aad_tail,
- auth_len - (aad_tail - aad_head));
- }
-
- if (EVP_DecryptFinal_ex(ctx, cipherdata + cipher_len, &plain_len) < 0)
- return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static int process_aes_gcm_param(odp_crypto_generic_session_t *session)
-{
- /* Verify Key len is 16 */
- if (session->p.cipher_key.length != 16)
- return -1;
-
- /* Set function */
- EVP_CIPHER_CTX *ctx =
- session->cipher.data.aes_gcm.ctx = EVP_CIPHER_CTX_new();
-
- if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
- session->cipher.func = aes_gcm_encrypt;
- EVP_EncryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, NULL, NULL);
- } else {
- session->cipher.func = aes_gcm_decrypt;
- EVP_DecryptInit_ex(ctx, EVP_aes_128_gcm(), NULL, NULL, NULL);
- }
-
- EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
- session->p.iv.length, NULL);
- if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
- EVP_EncryptInit_ex(ctx, NULL, NULL,
- session->p.cipher_key.data, NULL);
- } else {
- EVP_DecryptInit_ex(ctx, NULL, NULL,
- session->p.cipher_key.data, NULL);
- }
-
- return 0;
-}
-
-static
-odp_crypto_alg_err_t des_encrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t len = param->cipher_range.length;
- DES_cblock iv;
- void *iv_ptr;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv, iv_ptr, sizeof(iv));
-
- /* Adjust pointer for beginning of area to cipher */
- data += param->cipher_range.offset;
- /* Encrypt it */
- DES_ede3_cbc_encrypt(data,
- data,
- len,
- &session->cipher.data.des.ks1,
- &session->cipher.data.des.ks2,
- &session->cipher.data.des.ks3,
- &iv,
- 1);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static
-odp_crypto_alg_err_t des_decrypt(odp_crypto_op_param_t *param,
- odp_crypto_generic_session_t *session)
-{
- uint8_t *data = odp_packet_data(param->out_pkt);
- uint32_t len = param->cipher_range.length;
- DES_cblock iv;
- void *iv_ptr;
-
- if (param->override_iv_ptr)
- iv_ptr = param->override_iv_ptr;
- else if (session->p.iv.data)
- iv_ptr = session->cipher.iv_data;
- else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
-
- /*
- * Create a copy of the IV. The DES library modifies IV
- * and if we are processing packets on parallel threads
- * we could get corruption.
- */
- memcpy(iv, iv_ptr, sizeof(iv));
-
- /* Adjust pointer for beginning of area to cipher */
- data += param->cipher_range.offset;
-
- /* Decrypt it */
- DES_ede3_cbc_encrypt(data,
- data,
- len,
- &session->cipher.data.des.ks1,
- &session->cipher.data.des.ks2,
- &session->cipher.data.des.ks3,
- &iv,
- 0);
-
- return ODP_CRYPTO_ALG_ERR_NONE;
-}
-
-static int process_des_param(odp_crypto_generic_session_t *session)
-{
- /* Verify IV len is either 0 or 8 */
- if (!((0 == session->p.iv.length) || (8 == session->p.iv.length)))
- return -1;
-
- /* Set function */
- if (ODP_CRYPTO_OP_ENCODE == session->p.op)
- session->cipher.func = des_encrypt;
- else
- session->cipher.func = des_decrypt;
-
- /* Convert keys */
- DES_set_key((DES_cblock *)&session->p.cipher_key.data[0],
- &session->cipher.data.des.ks1);
- DES_set_key((DES_cblock *)&session->p.cipher_key.data[8],
- &session->cipher.data.des.ks2);
- DES_set_key((DES_cblock *)&session->p.cipher_key.data[16],
- &session->cipher.data.des.ks3);
-
- return 0;
-}
-
-static int process_md5_param(odp_crypto_generic_session_t *session,
- uint32_t bits)
-{
- /* Set function */
- if (ODP_CRYPTO_OP_ENCODE == session->p.op)
- session->auth.func = md5_gen;
- else
- session->auth.func = md5_check;
-
- /* Number of valid bytes */
- session->auth.data.md5.bytes = bits / 8;
-
- /* Convert keys */
- memcpy(session->auth.data.md5.key, session->p.auth_key.data, 16);
-
- return 0;
-}
-
-static int process_sha256_param(odp_crypto_generic_session_t *session,
- uint32_t bits)
-{
- /* Set function */
- if (ODP_CRYPTO_OP_ENCODE == session->p.op)
- session->auth.func = sha256_gen;
- else
- session->auth.func = sha256_check;
-
- /* Number of valid bytes */
- session->auth.data.sha256.bytes = bits / 8;
-
- /* Convert keys */
- memcpy(session->auth.data.sha256.key, session->p.auth_key.data, 32);
-
- return 0;
-}
-
-int odp_crypto_capability(odp_crypto_capability_t *capa)
-{
- if (NULL == capa)
- return -1;
-
- /* Initialize crypto capability structure */
- memset(capa, 0, sizeof(odp_crypto_capability_t));
-
- capa->ciphers.bit.null = 1;
- capa->ciphers.bit.des = 1;
- capa->ciphers.bit.trides_cbc = 1;
- capa->ciphers.bit.aes_cbc = 1;
- capa->ciphers.bit.aes_gcm = 1;
-
- capa->auths.bit.null = 1;
- capa->auths.bit.md5_hmac = 1;
- capa->auths.bit.sha256_hmac = 1;
- capa->auths.bit.aes_gcm = 1;
-
- /* Deprecated */
- capa->ciphers.bit.aes128_cbc = 1;
- capa->ciphers.bit.aes128_gcm = 1;
- capa->auths.bit.md5_96 = 1;
- capa->auths.bit.sha256_128 = 1;
- capa->auths.bit.aes128_gcm = 1;
-
- capa->max_sessions = MAX_SESSIONS;
-
- return 0;
-}
-
-int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
- odp_crypto_cipher_capability_t dst[],
- int num_copy)
-{
- const odp_crypto_cipher_capability_t *src;
- int num;
- int size = sizeof(odp_crypto_cipher_capability_t);
-
- switch (cipher) {
- case ODP_CIPHER_ALG_NULL:
- src = NULL;
- num = 0;
- break;
- case ODP_CIPHER_ALG_DES:
- src = cipher_capa_des;
- num = sizeof(cipher_capa_des) / size;
- break;
- case ODP_CIPHER_ALG_3DES_CBC:
- src = cipher_capa_trides_cbc;
- num = sizeof(cipher_capa_trides_cbc) / size;
- break;
- case ODP_CIPHER_ALG_AES_CBC:
- src = cipher_capa_aes_cbc;
- num = sizeof(cipher_capa_aes_cbc) / size;
- break;
- case ODP_CIPHER_ALG_AES_GCM:
- src = cipher_capa_aes_gcm;
- num = sizeof(cipher_capa_aes_gcm) / size;
- break;
- default:
- return -1;
- }
-
- if (num < num_copy)
- num_copy = num;
-
- memcpy(dst, src, num_copy * size);
-
- return num;
-}
-
-int odp_crypto_auth_capability(odp_auth_alg_t auth,
- odp_crypto_auth_capability_t dst[], int num_copy)
-{
- const odp_crypto_auth_capability_t *src;
- int num;
- int size = sizeof(odp_crypto_auth_capability_t);
-
- switch (auth) {
- case ODP_AUTH_ALG_NULL:
- src = NULL;
- num = 0;
- break;
- case ODP_AUTH_ALG_MD5_HMAC:
- src = auth_capa_md5_hmac;
- num = sizeof(auth_capa_md5_hmac) / size;
- break;
- case ODP_AUTH_ALG_SHA256_HMAC:
- src = auth_capa_sha256_hmac;
- num = sizeof(auth_capa_sha256_hmac) / size;
- break;
- case ODP_AUTH_ALG_AES_GCM:
- src = auth_capa_aes_gcm;
- num = sizeof(auth_capa_aes_gcm) / size;
- break;
- default:
- return -1;
- }
-
- if (num < num_copy)
- num_copy = num;
-
- memcpy(dst, src, num_copy * size);
-
- return num;
-}
-
-int
-odp_crypto_session_create(odp_crypto_session_param_t *param,
- odp_crypto_session_t *session_out,
- odp_crypto_ses_create_err_t *status)
-{
- int rc;
- odp_crypto_generic_session_t *session;
-
- /* Default to successful result */
- *status = ODP_CRYPTO_SES_CREATE_ERR_NONE;
-
- /* Allocate memory for this session */
- session = alloc_session();
- if (NULL == session) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_ENOMEM;
- return -1;
- }
-
- /* Copy parameters */
- session->p = *param;
-
- /* Copy IV data */
- if (session->p.iv.data) {
- if (session->p.iv.length > MAX_IV_LEN) {
- ODP_DBG("Maximum IV length exceeded\n");
- return -1;
- }
-
- memcpy(session->cipher.iv_data, session->p.iv.data,
- session->p.iv.length);
- }
-
- /* Derive order */
- if (ODP_CRYPTO_OP_ENCODE == param->op)
- session->do_cipher_first = param->auth_cipher_text;
- else
- session->do_cipher_first = !param->auth_cipher_text;
-
- /* Process based on cipher */
- switch (param->cipher_alg) {
- case ODP_CIPHER_ALG_NULL:
- session->cipher.func = null_crypto_routine;
- rc = 0;
- break;
- case ODP_CIPHER_ALG_DES:
- case ODP_CIPHER_ALG_3DES_CBC:
- rc = process_des_param(session);
- break;
- case ODP_CIPHER_ALG_AES_CBC:
- /* deprecated */
- case ODP_CIPHER_ALG_AES128_CBC:
- rc = process_aes_param(session);
- break;
- case ODP_CIPHER_ALG_AES_GCM:
- /* deprecated */
- case ODP_CIPHER_ALG_AES128_GCM:
- /* AES-GCM requires to do both auth and
- * cipher at the same time */
- if (param->auth_alg == ODP_AUTH_ALG_AES_GCM ||
- param->auth_alg == ODP_AUTH_ALG_AES128_GCM)
- rc = process_aes_gcm_param(session);
- else
- rc = -1;
- break;
- default:
- rc = -1;
- }
-
- /* Check result */
- if (rc) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_INV_CIPHER;
- return -1;
- }
-
- /* Process based on auth */
- switch (param->auth_alg) {
- case ODP_AUTH_ALG_NULL:
- session->auth.func = null_crypto_routine;
- rc = 0;
- break;
- case ODP_AUTH_ALG_MD5_HMAC:
- /* deprecated */
- case ODP_AUTH_ALG_MD5_96:
- rc = process_md5_param(session, 96);
- break;
- case ODP_AUTH_ALG_SHA256_HMAC:
- /* deprecated */
- case ODP_AUTH_ALG_SHA256_128:
- rc = process_sha256_param(session, 128);
- break;
- case ODP_AUTH_ALG_AES_GCM:
- /* deprecated */
- case ODP_AUTH_ALG_AES128_GCM:
- /* AES-GCM requires to do both auth and
- * cipher at the same time */
- if (param->cipher_alg == ODP_CIPHER_ALG_AES_GCM ||
- param->cipher_alg == ODP_CIPHER_ALG_AES128_GCM) {
- session->auth.func = null_crypto_routine;
- rc = 0;
- } else {
- rc = -1;
- }
- break;
- default:
- rc = -1;
- }
-
- /* Check result */
- if (rc) {
- *status = ODP_CRYPTO_SES_CREATE_ERR_INV_AUTH;
- return -1;
- }
-
- /* We're happy */
- *session_out = (intptr_t)session;
- return 0;
-}
-
-int odp_crypto_session_destroy(odp_crypto_session_t session)
-{
- odp_crypto_generic_session_t *generic;
-
- generic = (odp_crypto_generic_session_t *)(intptr_t)session;
- if (generic->p.cipher_alg == ODP_CIPHER_ALG_AES128_GCM ||
- generic->p.cipher_alg == ODP_CIPHER_ALG_AES_GCM)
- EVP_CIPHER_CTX_free(generic->cipher.data.aes_gcm.ctx);
- memset(generic, 0, sizeof(*generic));
- free_session(generic);
- return 0;
-}
-
-int
-odp_crypto_operation(odp_crypto_op_param_t *param,
- odp_bool_t *posted,
- odp_crypto_op_result_t *result)
-{
- odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
- odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
- odp_crypto_generic_session_t *session;
- odp_crypto_op_result_t local_result;
-
- session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
-
- /* Resolve output buffer */
- if (ODP_PACKET_INVALID == param->out_pkt &&
- ODP_POOL_INVALID != session->p.output_pool)
- param->out_pkt = odp_packet_alloc(session->p.output_pool,
- odp_packet_len(param->pkt));
-
- if (odp_unlikely(ODP_PACKET_INVALID == param->out_pkt)) {
- ODP_DBG("Alloc failed.\n");
- return -1;
- }
-
- if (param->pkt != param->out_pkt) {
- (void)odp_packet_copy_from_pkt(param->out_pkt,
- 0,
- param->pkt,
- 0,
- odp_packet_len(param->pkt));
- _odp_packet_copy_md_to_packet(param->pkt, param->out_pkt);
- odp_packet_free(param->pkt);
- param->pkt = ODP_PACKET_INVALID;
- }
-
- /* Invoke the functions */
- if (session->do_cipher_first) {
- rc_cipher = session->cipher.func(param, session);
- rc_auth = session->auth.func(param, session);
- } else {
- rc_auth = session->auth.func(param, session);
- rc_cipher = session->cipher.func(param, session);
- }
-
- /* Fill in result */
- local_result.ctx = param->ctx;
- local_result.pkt = param->out_pkt;
- local_result.cipher_status.alg_err = rc_cipher;
- local_result.cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
- local_result.auth_status.alg_err = rc_auth;
- local_result.auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
- local_result.ok =
- (rc_cipher == ODP_CRYPTO_ALG_ERR_NONE) &&
- (rc_auth == ODP_CRYPTO_ALG_ERR_NONE);
-
- /* If specified during creation post event to completion queue */
- if (ODP_QUEUE_INVALID != session->p.compl_queue) {
- odp_event_t completion_event;
- odp_crypto_generic_op_result_t *op_result;
-
- /* Linux generic will always use packet for completion event */
- completion_event = odp_packet_to_event(param->out_pkt);
- _odp_buffer_event_type_set(
- odp_buffer_from_event(completion_event),
- ODP_EVENT_CRYPTO_COMPL);
- /* Asynchronous, build result (no HW so no errors) and send it*/
- op_result = get_op_result_from_event(completion_event);
- op_result->magic = OP_RESULT_MAGIC;
- op_result->result = local_result;
- if (odp_queue_enq(session->p.compl_queue, completion_event)) {
- odp_event_free(completion_event);
- return -1;
- }
-
- /* Indicate to caller operation was async */
- *posted = 1;
- } else {
- /* Synchronous, simply return results */
- if (!result)
- return -1;
- *result = local_result;
-
- /* Indicate to caller operation was sync */
- *posted = 0;
- }
- return 0;
-}
-
-static void ODP_UNUSED openssl_thread_id(CRYPTO_THREADID ODP_UNUSED *id)
-{
- CRYPTO_THREADID_set_numeric(id, odp_thread_id());
-}
-
-static void ODP_UNUSED openssl_lock(int mode, int n,
- const char *file ODP_UNUSED,
- int line ODP_UNUSED)
-{
- if (mode & CRYPTO_LOCK)
- odp_ticketlock_lock((odp_ticketlock_t *)
- &global->openssl_lock[n]);
- else
- odp_ticketlock_unlock((odp_ticketlock_t *)
- &global->openssl_lock[n]);
-}
-
-int
-odp_crypto_init_global(void)
-{
- size_t mem_size;
- odp_shm_t shm;
- int idx;
- int nlocks = CRYPTO_num_locks();
-
- /* Calculate the memory size we need */
- mem_size = sizeof(*global);
- mem_size += (MAX_SESSIONS * sizeof(odp_crypto_generic_session_t));
- mem_size += nlocks * sizeof(odp_ticketlock_t);
-
- /* Allocate our globally shared memory */
- shm = odp_shm_reserve("crypto_pool", mem_size,
- ODP_CACHE_LINE_SIZE, 0);
-
- global = odp_shm_addr(shm);
-
- /* Clear it out */
- memset(global, 0, mem_size);
-
- /* Initialize free list and lock */
- for (idx = 0; idx < MAX_SESSIONS; idx++) {
- global->sessions[idx].next = global->free;
- global->free = &global->sessions[idx];
- }
- odp_spinlock_init(&global->lock);
-
- if (nlocks > 0) {
- global->openssl_lock =
- (odp_ticketlock_t **)&global->sessions[MAX_SESSIONS];
-
- for (idx = 0; idx < nlocks; idx++)
- odp_ticketlock_init((odp_ticketlock_t *)
- &global->openssl_lock[idx]);
-
- CRYPTO_THREADID_set_callback(openssl_thread_id);
- CRYPTO_set_locking_callback(openssl_lock);
- }
-
- return 0;
-}
-
-int odp_crypto_term_global(void)
-{
- int rc = 0;
- int ret;
- int count = 0;
- odp_crypto_generic_session_t *session;
-
- for (session = global->free; session != NULL; session = session->next)
- count++;
- if (count != MAX_SESSIONS) {
- ODP_ERR("crypto sessions still active\n");
- rc = -1;
- }
-
- CRYPTO_set_locking_callback(NULL);
- CRYPTO_set_id_callback(NULL);
-
- ret = odp_shm_free(odp_shm_lookup("crypto_pool"));
- if (ret < 0) {
- ODP_ERR("shm free failed for crypto_pool\n");
- rc = -1;
- }
-
- return rc;
-}
-
-odp_random_kind_t odp_random_max_kind(void)
-{
- return ODP_RANDOM_CRYPTO;
-}
-
-int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
-{
- int rc;
-
- switch (kind) {
- case ODP_RANDOM_BASIC:
- RAND_pseudo_bytes(buf, len);
- return len;
-
- case ODP_RANDOM_CRYPTO:
- rc = RAND_bytes(buf, len);
- return (1 == rc) ? (int)len /*success*/: -1 /*failure*/;
-
- case ODP_RANDOM_TRUE:
- default:
- return -1;
- }
-}
-
-int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
-{
- union {
- uint32_t rand_word;
- uint8_t rand_byte[4];
- } u;
- uint32_t i = 0, j;
- uint32_t seed32 = (*seed) & 0xffffffff;
-
- while (i < len) {
- u.rand_word = rand_r(&seed32);
-
- for (j = 0; j < 4 && i < len; j++, i++)
- *buf++ = u.rand_byte[j];
- }
-
- *seed = seed32;
- return len;
-}
-
-odp_crypto_compl_t odp_crypto_compl_from_event(odp_event_t ev)
-{
- /* This check not mandated by the API specification */
- if (odp_event_type(ev) != ODP_EVENT_CRYPTO_COMPL)
- ODP_ABORT("Event not a crypto completion");
- return (odp_crypto_compl_t)ev;
-}
-
-odp_event_t odp_crypto_compl_to_event(odp_crypto_compl_t completion_event)
-{
- return (odp_event_t)completion_event;
-}
-
-void
-odp_crypto_compl_result(odp_crypto_compl_t completion_event,
- odp_crypto_op_result_t *result)
-{
- odp_event_t ev = odp_crypto_compl_to_event(completion_event);
- odp_crypto_generic_op_result_t *op_result;
-
- op_result = get_op_result_from_event(ev);
-
- if (OP_RESULT_MAGIC != op_result->magic)
- ODP_ABORT();
-
- memcpy(result, &op_result->result, sizeof(*result));
-}
-
-void
-odp_crypto_compl_free(odp_crypto_compl_t completion_event)
-{
- _odp_buffer_event_type_set(
- odp_buffer_from_event((odp_event_t)completion_event),
- ODP_EVENT_PACKET);
-}
-
-void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
-{
- memset(param, 0, sizeof(odp_crypto_session_param_t));
-}
-
-uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
-{
- return (uint64_t)hdl;
-}
-
-uint64_t odp_crypto_compl_to_u64(odp_crypto_compl_t hdl)
-{
- return _odp_pri(hdl);
-}
diff --git a/platform/linux-generic/odp_crypto_api.c b/platform/linux-generic/odp_crypto_api.c
new file mode 100644
index 000000000..646472e2e
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/crypto.h>
+
+/* Non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/crypto_inlines.h>
diff --git a/platform/linux-generic/odp_crypto_ipsecmb.c b/platform/linux-generic/odp_crypto_ipsecmb.c
new file mode 100644
index 000000000..e58844098
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_ipsecmb.c
@@ -0,0 +1,895 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp/autoheader_internal.h>
+
+#include <odp/api/crypto.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/hints.h>
+
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_packet_internal.h>
+
+#include <ipsec-mb.h>
+
+#define MAX_SESSIONS 4000
+/* Length in bytes */
+#define IPSEC_MB_CRYPTO_MAX_CIPHER_KEY_LENGTH 32
+#define IPSEC_MB_CRYPTO_MAX_AUTH_KEY_LENGTH 32
+#define IPSEC_MB_CRYPTO_MAX_DATA_LENGTH 65536
+#define ZUC_DIGEST_LENGTH 4
+#define SNOW3G_DIGEST_LENGTH 4
+
+#define ODP_CRYPTO_IPSEC_MB_SHM_NAME "_odp_crypto_ipsecmb"
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
+{.key_len = 0, .iv_len = 0} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_zuc_eea3[] = {
+{.key_len = 16, .iv_len = 16},
+{.key_len = 32, .iv_len = 25} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_snow3g_uea2[] = {
+{.key_len = 16, .iv_len = 16} };
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_null[] = {
+{.digest_len = 0, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_zuc_eia3[] = {
+{.digest_len = 4, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 16},
+{.digest_len = 4, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 25} };
+
+static const odp_crypto_auth_capability_t auth_capa_snow3g_uia2[] = {
+{.digest_len = SNOW3G_DIGEST_LENGTH, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 16} };
+
+/** Forward declaration of session structure */
+typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef odp_crypto_alg_err_t (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
+
+/**
+ * Per crypto session data structure
+ */
+struct odp_crypto_generic_session_t {
+ odp_crypto_generic_session_t *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
+ odp_bool_t do_cipher_first;
+ uint8_t null_crypto_enable :1;
+
+ struct {
+ union {
+ uint8_t key_data[IPSEC_MB_CRYPTO_MAX_CIPHER_KEY_LENGTH];
+ snow3g_key_schedule_t key_sched;
+ };
+ crypto_func_t func;
+ } cipher;
+
+ struct {
+ union {
+ uint8_t key[IPSEC_MB_CRYPTO_MAX_AUTH_KEY_LENGTH];
+ snow3g_key_schedule_t key_sched;
+ };
+ crypto_func_t func;
+ } auth;
+
+ unsigned int idx;
+};
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+
+struct odp_crypto_global_s {
+ odp_spinlock_t lock;
+ odp_crypto_generic_session_t *free;
+ odp_crypto_generic_session_t sessions[MAX_SESSIONS];
+};
+
+static odp_crypto_global_t *global;
+
+typedef struct crypto_local_t {
+ uint8_t buffer[IPSEC_MB_CRYPTO_MAX_DATA_LENGTH];
+ IMB_MGR *mb_mgr;
+} crypto_local_t;
+
+static __thread crypto_local_t local;
+
+static
+odp_crypto_generic_session_t *alloc_session(void)
+{
+ odp_crypto_generic_session_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ if (!session)
+ return NULL;
+
+ session->idx = session - global->sessions;
+
+ return session;
+}
+
+static
+void free_session(odp_crypto_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static odp_crypto_alg_err_t
+null_crypto_routine(odp_packet_t pkt ODP_UNUSED,
+ const odp_crypto_packet_op_param_t *param ODP_UNUSED,
+ odp_crypto_generic_session_t *session ODP_UNUSED)
+{
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t ipsec_mb_cipher_op(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ const uint8_t *iv_ptr = param->cipher_iv_ptr;
+ uint32_t in_pos = param->cipher_range.offset;
+ uint32_t in_len = param->cipher_range.length;
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the cipher range. Copy the cipher
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.cipher_alg == ODP_CIPHER_ALG_ZUC_EEA3) {
+ if (session->p.cipher_key.length == 16) {
+ /* ZUC128 EEA3 */
+ IMB_ZUC_EEA3_1_BUFFER(mb_mgr, session->cipher.key_data,
+ iv_ptr,
+ data,
+ data,
+ in_len);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EEA3 */
+ IMB_ZUC256_EEA3_1_BUFFER(mb_mgr, session->cipher.key_data,
+ iv_ptr,
+ data,
+ data,
+ in_len);
+ }
+ } else {
+ /* Only ODP_CIPHER_ALG_SNOW3G_UEA2 */
+ IMB_SNOW3G_F8_1_BUFFER(mb_mgr, &session->cipher.key_sched,
+ iv_ptr,
+ data,
+ data,
+ in_len);
+ }
+
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ if (odp_unlikely(seg_len < in_len))
+ odp_packet_copy_from_mem(pkt, in_pos, in_len, data);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_zuc_eea3_param(odp_crypto_generic_session_t *session)
+{
+ if (!((16 == session->p.cipher_key.length &&
+ 16 == session->p.cipher_iv_len) ||
+ (32 == session->p.cipher_key.length &&
+ 25 == session->p.cipher_iv_len)))
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ session->cipher.func = ipsec_mb_cipher_op;
+
+ return 0;
+}
+
+static
+odp_crypto_alg_err_t auth_ipsec_mb_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ const uint8_t *iv_ptr = param->auth_iv_ptr;
+ uint32_t in_pos = param->auth_range.offset;
+ uint32_t in_len = param->auth_range.length;
+ uint32_t auth_tag;
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the auth range. Copy the auth
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.auth_alg == ODP_AUTH_ALG_ZUC_EIA3) {
+ if (session->p.auth_key.length == 16) {
+ /* ZUC128 EIA3 */
+ IMB_ZUC_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &auth_tag);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EIA3 */
+ IMB_ZUC256_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &auth_tag);
+ }
+ } else {
+ /* Only ODP_AUTH_ALG_SNOW3G_UIA2 */
+ IMB_SNOW3G_F9_1_BUFFER(mb_mgr, &session->auth.key_sched,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &auth_tag);
+ }
+
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len,
+ &auth_tag);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_ipsec_mb_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ IMB_MGR *mb_mgr = local.mb_mgr;
+ const uint8_t *iv_ptr = param->auth_iv_ptr;
+ uint32_t in_pos = param->auth_range.offset;
+ uint32_t in_len = param->auth_range.length;
+ uint32_t bytes = ZUC_DIGEST_LENGTH;
+ uint32_t hash_in;
+ uint32_t hash_out;
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, &hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ _ODP_ASSERT(iv_ptr != NULL);
+
+ uint32_t seg_len = 0;
+ uint8_t *data = odp_packet_offset(pkt, in_pos, &seg_len, NULL);
+
+ if (odp_unlikely(seg_len < in_len)) {
+ if (odp_unlikely(in_len > IPSEC_MB_CRYPTO_MAX_DATA_LENGTH))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Packet is segmented within the auth range. Copy the auth
+ * range to a contiguous buffer. */
+ odp_packet_copy_to_mem(pkt, in_pos, in_len, local.buffer);
+
+ data = local.buffer;
+ }
+
+ if (session->p.auth_alg == ODP_AUTH_ALG_ZUC_EIA3) {
+ if (session->p.auth_key.length == 16) {
+ /* ZUC128 EIA3 */
+ IMB_ZUC_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &hash_out);
+ } else {
+ /* Only 16 and 32 byte keys are supported
+ * ZUC256 EIA3 */
+ IMB_ZUC256_EIA3_1_BUFFER(mb_mgr, session->auth.key,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &hash_out);
+ }
+ } else {
+ /* Only ODP_AUTH_ALG_SNOW3G_UIA2 */
+ IMB_SNOW3G_F9_1_BUFFER(mb_mgr, &session->auth.key_sched,
+ iv_ptr,
+ data,
+ param->auth_range.length * 8,
+ &hash_out);
+ }
+
+ if (odp_unlikely(imb_get_errno(mb_mgr) != 0))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+
+ /* Verify match */
+ if (hash_in != hash_out)
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_auth_zuc_eia3_param(odp_crypto_generic_session_t *session)
+{
+ if (!((16 == session->p.auth_key.length &&
+ 16 == session->p.auth_iv_len) ||
+ (32 == session->p.auth_key.length &&
+ 25 == session->p.auth_iv_len)))
+ return -1;
+
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_ipsec_mb_gen;
+ else
+ session->auth.func = auth_ipsec_mb_check;
+
+ if (session->p.auth_digest_len != ZUC_DIGEST_LENGTH)
+ return -1;
+
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return 0;
+}
+
+static int process_snow3g_uea2_param(odp_crypto_generic_session_t *session)
+{
+ if (!(16 == session->p.cipher_key.length &&
+ 16 == session->p.cipher_iv_len))
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ session->cipher.func = ipsec_mb_cipher_op;
+
+ return IMB_SNOW3G_INIT_KEY_SCHED(local.mb_mgr, session->p.cipher_key.data,
+ &session->cipher.key_sched);
+}
+
+static int process_auth_snow3g_uia2_param(odp_crypto_generic_session_t *session)
+{
+ if (!(16 == session->p.auth_key.length &&
+ 16 == session->p.auth_iv_len))
+ return -1;
+
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_ipsec_mb_gen;
+ else
+ session->auth.func = auth_ipsec_mb_check;
+
+ if (session->p.auth_digest_len != SNOW3G_DIGEST_LENGTH)
+ return -1;
+
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return IMB_SNOW3G_INIT_KEY_SCHED(local.mb_mgr, session->p.auth_key.data,
+ &session->auth.key_sched);
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capa)
+{
+ if (NULL == capa)
+ return -1;
+
+ memset(capa, 0, sizeof(odp_crypto_capability_t));
+
+ capa->sync_mode = ODP_SUPPORT_PREFERRED;
+ capa->async_mode = ODP_SUPPORT_YES;
+ capa->queue_type_plain = 1;
+ capa->queue_type_sched = 1;
+
+ capa->ciphers.bit.null = 1;
+ capa->auths.bit.null = 1;
+
+ capa->ciphers.bit.zuc_eea3 = 1;
+ capa->auths.bit.zuc_eia3 = 1;
+
+ capa->ciphers.bit.snow3g_uea2 = 1;
+ capa->auths.bit.snow3g_uia2 = 1;
+
+ capa->max_sessions = MAX_SESSIONS;
+
+ return 0;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = cipher_capa_null;
+ num = sizeof(cipher_capa_null) / size;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ src = cipher_capa_zuc_eea3;
+ num = sizeof(cipher_capa_zuc_eea3) / size;
+ break;
+ case ODP_CIPHER_ALG_SNOW3G_UEA2:
+ src = cipher_capa_snow3g_uea2;
+ num = sizeof(cipher_capa_snow3g_uea2) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = auth_capa_null;
+ num = sizeof(auth_capa_null) / size;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ src = auth_capa_zuc_eia3;
+ num = sizeof(auth_capa_zuc_eia3) / size;
+ break;
+ case ODP_AUTH_ALG_SNOW3G_UIA2:
+ src = auth_capa_snow3g_uia2;
+ num = sizeof(auth_capa_snow3g_uia2) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ int rc = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->cipher_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->auth_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP ||
+ param->op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ session = alloc_session();
+ if (NULL == session) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ session->p = *param;
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ session->do_cipher_first = param->auth_cipher_text;
+ else
+ session->do_cipher_first = !param->auth_cipher_text;
+
+ /* Process based on cipher */
+ switch (param->cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ session->cipher.func = null_crypto_routine;
+ rc = 0;
+ break;
+ case ODP_CIPHER_ALG_ZUC_EEA3:
+ rc = process_zuc_eea3_param(session);
+ break;
+ case ODP_CIPHER_ALG_SNOW3G_UEA2:
+ rc = process_snow3g_uea2_param(session);
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (param->null_crypto_enable && param->op_mode == ODP_CRYPTO_SYNC)
+ rc = -1;
+ session->null_crypto_enable = !!param->null_crypto_enable;
+
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on auth */
+ switch (param->auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ session->auth.func = null_crypto_routine;
+ rc = 0;
+ break;
+ case ODP_AUTH_ALG_ZUC_EIA3:
+ rc = process_auth_zuc_eia3_param(session);
+ break;
+ case ODP_AUTH_ALG_SNOW3G_UIA2:
+ rc = process_auth_snow3g_uia2_param(session);
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL)
+ free_session(session);
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+ odp_crypto_generic_session_t *generic;
+
+ generic = (odp_crypto_generic_session_t *)(intptr_t)session;
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+int _odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(odp_crypto_global_t);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve(ODP_CRYPTO_IPSEC_MB_SHM_NAME, mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (ODP_SHM_INVALID == shm) {
+ _ODP_ERR("unable to allocate crypto pool\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(odp_shm_lookup(ODP_CRYPTO_IPSEC_MB_SHM_NAME));
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for %s\n", ODP_CRYPTO_IPSEC_MB_SHM_NAME);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_crypto_init_local(void)
+{
+ uint64_t flags = 0;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ memset(&local, 0, sizeof(local));
+
+ local.mb_mgr = alloc_mb_mgr(flags);
+ if (local.mb_mgr == NULL)
+ return -1;
+
+ init_mb_mgr_auto(local.mb_mgr, NULL);
+
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ free_mb_mgr(local.mb_mgr);
+ return 0;
+}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+ odp_crypto_packet_result_t *op_result;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ out_pkt = pkt_in;
+ } else {
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
+ }
+
+ if (odp_unlikely(session->null_crypto_enable && param->null_crypto))
+ goto out;
+
+ /* Invoke the crypto function */
+ if (session->do_cipher_first) {
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ rc_auth = session->auth.func(out_pkt, param, session);
+ } else {
+ rc_auth = session->auth.func(out_pkt, param, session);
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ }
+
+out:
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = rc_cipher;
+ op_result->auth_status.alg_err = rc_auth;
+
+ /* Synchronous, simply return results */
+ *pkt_out = out_pkt;
+
+ return 0;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ int i, rc;
+ odp_crypto_generic_session_t *session;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_packet_t pkt;
+ odp_event_t event;
+ odp_crypto_generic_session_t *session;
+ int i, rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
new file mode 100644
index 000000000..6eda0f455
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -0,0 +1,510 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp/api/crypto.h>
+#include <odp_init_internal.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/sync.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp/api/hints.h>
+#include <odp/api/random.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp_packet_internal.h>
+#include <odp/api/plat/queue_inlines.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+
+#define MAX_SESSIONS 32
+
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
+{.key_len = 0, .iv_len = 0} };
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_null[] = {
+{.digest_len = 0, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+/** Forward declaration of session structure */
+typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef
+odp_crypto_alg_err_t (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
+typedef void (*crypto_init_func_t)(odp_crypto_generic_session_t *session);
+
+/**
+ * Per crypto session data structure
+ */
+struct odp_crypto_generic_session_t {
+ odp_crypto_generic_session_t *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
+ unsigned int idx;
+};
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+
+struct odp_crypto_global_s {
+ odp_spinlock_t lock;
+ odp_crypto_generic_session_t *free;
+ odp_crypto_generic_session_t sessions[MAX_SESSIONS];
+
+ /* These flags are cleared at alloc_session() */
+ uint8_t ctx_valid[ODP_THREAD_COUNT_MAX][MAX_SESSIONS];
+
+ odp_ticketlock_t openssl_lock[];
+};
+
+static odp_crypto_global_t *global;
+
+static
+odp_crypto_generic_session_t *alloc_session(void)
+{
+ odp_crypto_generic_session_t *session = NULL;
+ unsigned int i;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ if (!session)
+ return NULL;
+
+ session->idx = session - global->sessions;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ global->ctx_valid[i][session->idx] = 0;
+
+ return session;
+}
+
+static
+void free_session(odp_crypto_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capa)
+{
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ return -1;
+ }
+
+ if (NULL == capa)
+ return -1;
+
+ /* Initialize crypto capability structure */
+ memset(capa, 0, sizeof(odp_crypto_capability_t));
+
+ capa->sync_mode = ODP_SUPPORT_PREFERRED;
+ capa->async_mode = ODP_SUPPORT_YES;
+ capa->queue_type_plain = 1;
+ capa->queue_type_sched = 1;
+
+ capa->ciphers.bit.null = 1;
+
+ capa->auths.bit.null = 1;
+
+ capa->max_sessions = MAX_SESSIONS;
+
+ return 0;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = cipher_capa_null;
+ num = sizeof(cipher_capa_null) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = auth_capa_null;
+ num = sizeof(auth_capa_null) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ int rc;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ if (param->cipher_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->auth_range_in_bits) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+ if (param->op_type == ODP_CRYPTO_OP_TYPE_OOP ||
+ param->op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ *status = ODP_CRYPTO_SES_ERR_PARAMS;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (NULL == session) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ /* Copy parameters */
+ session->p = *param;
+
+ /* Process based on cipher */
+ switch (param->cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ rc = 0;
+ break;
+ default:
+ rc = -1;
+ }
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on auth */
+ switch (param->auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ rc = 0;
+ break;
+ default:
+ rc = -1;
+ }
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ /* We're happy */
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL)
+ free_session(session);
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+ odp_crypto_generic_session_t *generic;
+
+ generic = (odp_crypto_generic_session_t *)(intptr_t)session;
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+int
+_odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(odp_crypto_global_t);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("_odp_crypto_null_global", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (ODP_SHM_INVALID == shm) {
+ _ODP_ERR("unable to allocate crypto pool\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(odp_shm_lookup("_odp_crypto_null_global"));
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for _odp_crypto_pool_null\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_crypto_init_local(void)
+{
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ return 0;
+}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+ odp_crypto_packet_result_t *op_result;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ out_pkt = pkt_in;
+ } else {
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
+ }
+
+ /* Fill in result */
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = ODP_CRYPTO_ALG_ERR_NONE;
+ op_result->auth_status.alg_err = ODP_CRYPTO_ALG_ERR_NONE;
+
+ /* Synchronous, simply return results */
+ *pkt_out = out_pkt;
+
+ return 0;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ int i, rc;
+ odp_crypto_generic_session_t *session;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_packet_t pkt = ODP_PACKET_INVALID;
+ odp_event_t event;
+ odp_crypto_generic_session_t *session;
+ int i, rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (session->p.op_type != ODP_CRYPTO_OP_TYPE_BASIC)
+ pkt = pkt_out[i];
+
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
new file mode 100644
index 000000000..879ce0b97
--- /dev/null
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -0,0 +1,2830 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+#include <odp/api/crypto.h>
+#include <odp_init_internal.h>
+#include <odp/api/spinlock.h>
+#include <odp/api/sync.h>
+#include <odp/api/debug.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp_debug_internal.h>
+#include <odp/api/hints.h>
+#include <odp/api/random.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp_global_data.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+
+#include <string.h>
+#include <stdlib.h>
+
+#include <openssl/hmac.h>
+#include <openssl/cmac.h>
+#include <openssl/evp.h>
+#include <openssl/opensslv.h>
+
+#if (OPENSSL_VERSION_NUMBER >= 0x10100000L) && !defined(OPENSSL_NO_POLY1305)
+#define _ODP_HAVE_CHACHA20_POLY1305 1
+#else
+#define _ODP_HAVE_CHACHA20_POLY1305 0
+#endif
+
+/* Ignore warnings about APIs deprecated in OpenSSL 3.0 */
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#define MAX_SESSIONS 4000
+#define AES_BLOCK_SIZE 16
+#define AES_KEY_LENGTH 16
+
+/*
+ * Cipher algorithm capabilities
+ *
+ * Keep sorted: first by key length, then by IV length
+ */
+static const odp_crypto_cipher_capability_t cipher_capa_null[] = {
+{.key_len = 0, .iv_len = 0, .bit_mode = 1} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_trides_cbc[] = {
+{.key_len = 24, .iv_len = 8} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_trides_ecb[] = {
+{.key_len = 24} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_cbc[] = {
+{.key_len = 16, .iv_len = 16},
+{.key_len = 24, .iv_len = 16},
+{.key_len = 32, .iv_len = 16} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_ctr[] = {
+{.key_len = 16, .iv_len = 16},
+{.key_len = 24, .iv_len = 16},
+{.key_len = 32, .iv_len = 16} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_ecb[] = {
+{.key_len = 16},
+{.key_len = 24},
+{.key_len = 32} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_cfb128[] = {
+{.key_len = 16, .iv_len = 16},
+{.key_len = 24, .iv_len = 16},
+{.key_len = 32, .iv_len = 16} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_xts[] = {
+{.key_len = 32, .iv_len = 16},
+{.key_len = 64, .iv_len = 16} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_gcm[] = {
+{.key_len = 16, .iv_len = 12},
+{.key_len = 24, .iv_len = 12},
+{.key_len = 32, .iv_len = 12} };
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_ccm[] = {
+{.key_len = 16, .iv_len = 11},
+{.key_len = 16, .iv_len = 13},
+{.key_len = 24, .iv_len = 11},
+{.key_len = 24, .iv_len = 13},
+{.key_len = 32, .iv_len = 11},
+{.key_len = 32, .iv_len = 13} };
+
+#if _ODP_HAVE_CHACHA20_POLY1305
+static const odp_crypto_cipher_capability_t cipher_capa_chacha20_poly1305[] = {
+{.key_len = 32, .iv_len = 12} };
+#endif
+
+static const odp_crypto_cipher_capability_t cipher_capa_aes_eea2[] = {
+{.key_len = 16, .iv_len = 16, .bit_mode = 1} };
+
+/*
+ * Authentication algorithm capabilities
+ *
+ * Keep sorted: first by digest length, then by key length
+ */
+static const odp_crypto_auth_capability_t auth_capa_null[] = {
+{.digest_len = 0, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0}, .bit_mode = 1} };
+
+static const odp_crypto_auth_capability_t auth_capa_md5_hmac[] = {
+{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha1_hmac[] = {
+{.digest_len = 12, .key_len = 20, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 20, .key_len = 20, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha224_hmac[] = {
+{.digest_len = 28, .key_len = 28, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha256_hmac[] = {
+{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 32, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha384_hmac[] = {
+{.digest_len = 24, .key_len = 48, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 48, .key_len = 48, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha512_hmac[] = {
+{.digest_len = 32, .key_len = 64, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 64, .key_len = 64, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_xcbc[] = {
+{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_gcm[] = {
+{.digest_len = 16, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_ccm[] = {
+{.digest_len = 8, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_gmac[] = {
+{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 12 },
+{.digest_len = 16, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 12 },
+{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 12 } };
+
+static const odp_crypto_auth_capability_t auth_capa_aes_cmac[] = {
+{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 12, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 12, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+#if _ODP_HAVE_CHACHA20_POLY1305
+static const odp_crypto_auth_capability_t auth_capa_chacha20_poly1305[] = {
+{.digest_len = 16, .key_len = 0, .aad_len = {.min = 8, .max = 12, .inc = 4} } };
+#endif
+
+static const odp_crypto_auth_capability_t auth_capa_aes_eia2[] = {
+{.digest_len = 4, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0},
+ .iv_len = 8} };
+
+static const odp_crypto_auth_capability_t auth_capa_md5[] = {
+{.digest_len = 16, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha1[] = {
+{.digest_len = 20, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha224[] = {
+{.digest_len = 28, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha256[] = {
+{.digest_len = 32, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha384[] = {
+{.digest_len = 48, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+static const odp_crypto_auth_capability_t auth_capa_sha512[] = {
+{.digest_len = 64, .key_len = 0, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
+
+/** Forward declaration of session structure */
+typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef
+odp_crypto_alg_err_t (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
+typedef void (*crypto_init_func_t)(odp_crypto_generic_session_t *session);
+
+/**
+ * Per crypto session data structure
+ */
+struct odp_crypto_generic_session_t {
+ odp_crypto_generic_session_t *next;
+
+ /* Session creation parameters */
+ odp_crypto_session_param_t p;
+
+ odp_bool_t do_cipher_first;
+ uint8_t cipher_range_in_bits : 1;
+ uint8_t auth_range_in_bits : 1;
+ uint8_t auth_range_used : 1;
+ uint8_t null_crypto_enable : 1;
+
+ struct {
+ uint8_t key_data[EVP_MAX_KEY_LENGTH];
+ const EVP_CIPHER *evp_cipher;
+ crypto_func_t func;
+ crypto_init_func_t init;
+ } cipher;
+
+ struct {
+ uint8_t key[EVP_MAX_KEY_LENGTH];
+ union {
+ const EVP_MD *evp_md;
+ const EVP_CIPHER *evp_cipher;
+ };
+ crypto_func_t func;
+ crypto_init_func_t init;
+ } auth;
+
+ unsigned idx;
+};
+
+typedef struct odp_crypto_global_s odp_crypto_global_t;
+
+struct odp_crypto_global_s {
+ odp_spinlock_t lock;
+ odp_crypto_generic_session_t *free;
+ odp_crypto_generic_session_t sessions[MAX_SESSIONS];
+
+ /* These flags are cleared at alloc_session() */
+ uint8_t ctx_valid[ODP_THREAD_COUNT_MAX][MAX_SESSIONS];
+
+ odp_ticketlock_t openssl_lock[];
+};
+
+static odp_crypto_global_t *global;
+
+typedef struct crypto_local_t {
+ EVP_MD_CTX *md_ctx[MAX_SESSIONS];
+ HMAC_CTX *hmac_ctx[MAX_SESSIONS];
+ CMAC_CTX *cmac_ctx[MAX_SESSIONS];
+ EVP_CIPHER_CTX *cipher_ctx[MAX_SESSIONS];
+ EVP_CIPHER_CTX *mac_cipher_ctx[MAX_SESSIONS];
+ uint8_t *ctx_valid;
+} crypto_local_t;
+
+static __thread crypto_local_t local;
+
+static inline void crypto_init(odp_crypto_generic_session_t *session)
+{
+ if (local.ctx_valid[session->idx])
+ return;
+
+ session->cipher.init(session);
+ session->auth.init(session);
+
+ local.ctx_valid[session->idx] = 1;
+}
+
+static
+odp_crypto_generic_session_t *alloc_session(void)
+{
+ odp_crypto_generic_session_t *session = NULL;
+ unsigned i;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ if (!session)
+ return NULL;
+
+ session->idx = session - global->sessions;
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ global->ctx_valid[i][session->idx] = 0;
+
+ return session;
+}
+
+static
+void free_session(odp_crypto_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static odp_crypto_alg_err_t
+null_crypto_routine(odp_packet_t pkt ODP_UNUSED,
+ const odp_crypto_packet_op_param_t *param ODP_UNUSED,
+ odp_crypto_generic_session_t *session ODP_UNUSED)
+{
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+null_crypto_init_routine(odp_crypto_generic_session_t *session)
+{
+ (void)session;
+}
+
+/* Mimic new OpenSSL 1.1.y API */
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+static HMAC_CTX *HMAC_CTX_new(void)
+{
+ HMAC_CTX *ctx = malloc(sizeof(*ctx));
+
+ HMAC_CTX_init(ctx);
+ return ctx;
+}
+
+static void HMAC_CTX_free(HMAC_CTX *ctx)
+{
+ HMAC_CTX_cleanup(ctx);
+ free(ctx);
+}
+
+static EVP_MD_CTX *EVP_MD_CTX_new(void)
+{
+ EVP_MD_CTX *ctx = malloc(sizeof(*ctx));
+
+ EVP_MD_CTX_init(ctx);
+ return ctx;
+}
+
+static void EVP_MD_CTX_free(EVP_MD_CTX *ctx)
+{
+ EVP_MD_CTX_cleanup(ctx);
+ free(ctx);
+}
+#endif
+
+static void
+auth_hmac_init(odp_crypto_generic_session_t *session)
+{
+ HMAC_CTX *ctx = local.hmac_ctx[session->idx];
+
+ HMAC_Init_ex(ctx,
+ session->auth.key,
+ session->p.auth_key.length,
+ session->auth.evp_md,
+ NULL);
+}
+
+static
+void packet_hmac(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session,
+ uint8_t *hash)
+{
+ HMAC_CTX *ctx = local.hmac_ctx[session->idx];
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ /* Reinitialize HMAC calculation without resetting the key */
+ HMAC_Init_ex(ctx, NULL, 0, NULL, NULL);
+
+ /* Hash it */
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ uint32_t maclen = len > seglen ? seglen : len;
+
+ HMAC_Update(ctx, mapaddr, maclen);
+ offset += maclen;
+ len -= maclen;
+ }
+
+ HMAC_Final(ctx, hash, NULL);
+}
+
+static void xor_block(uint8_t *res, const uint8_t *op)
+{
+ int i;
+
+ for (i = 0; i < AES_BLOCK_SIZE; i++)
+ res[i] ^= op[i];
+}
+
+static void memxor(uint8_t *res, const uint8_t *op, size_t len)
+{
+ for (size_t i = 0; i < len; i++)
+ res[i] ^= op[i];
+}
+
+static
+void packet_aes_xcbc_mac(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session,
+ uint8_t *hash)
+{
+ uint8_t e[AES_BLOCK_SIZE] = {0};
+ size_t eoff = 0;
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+ uint32_t seglen = 0;
+ uint32_t datalen = 0;
+ int dummy_len = 0;
+ EVP_CIPHER_CTX *ctx;
+ void *mapaddr;
+ uint8_t *data = NULL;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+ _ODP_ASSERT(session != NULL);
+ _ODP_ASSERT(sizeof(session->auth.key) >= 3 * AES_KEY_LENGTH);
+
+ ctx = EVP_CIPHER_CTX_new();
+ EVP_EncryptInit_ex(ctx, session->auth.evp_cipher,
+ NULL, session->auth.key, NULL);
+
+ while (len > 0) {
+ mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ datalen = seglen >= len ? len : seglen;
+ data = (uint8_t *)mapaddr;
+ offset += datalen;
+ len -= datalen;
+ if (eoff != 0) {
+ if (eoff + datalen > AES_BLOCK_SIZE) {
+ /* bytes needed to fill the partial block */
+ uint32_t remaining_len = AES_BLOCK_SIZE - eoff;
+
+ memxor(e + eoff, data, remaining_len);
+ datalen -= remaining_len;
+ data += remaining_len;
+ eoff = 0;
+ EVP_EncryptUpdate(ctx,
+ e, &dummy_len, e, sizeof(e));
+ } else {
+ memxor(e + eoff, data, datalen);
+ eoff += datalen;
+ continue;
+ }
+ }
+ while (datalen > AES_BLOCK_SIZE) {
+ xor_block(e, data);
+ EVP_EncryptUpdate(ctx, e, &dummy_len, e, sizeof(e));
+ data += AES_BLOCK_SIZE;
+ datalen -= AES_BLOCK_SIZE;
+ }
+ /* Segmentation handle */
+ if (datalen > 0) {
+ memxor(e, data, datalen);
+ eoff = datalen;
+ }
+ }
+
+ if (eoff == AES_BLOCK_SIZE) {
+ xor_block(e, session->auth.key + AES_KEY_LENGTH);
+ } else {
+ e[eoff] ^= 0x80;
+ xor_block(e, session->auth.key + AES_KEY_LENGTH * 2);
+ }
+ EVP_EncryptUpdate(ctx, hash, &dummy_len, e, sizeof(e));
+ EVP_CIPHER_CTX_free(ctx);
+}
+
+static
+odp_crypto_alg_err_t auth_xcbcmac_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint8_t hash[EVP_MAX_MD_SIZE];
+
+ /* Hash it */
+ packet_aes_xcbc_mac(pkt, param, session, hash);
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt,
+ param->hash_result_offset,
+ session->p.auth_digest_len,
+ hash);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static odp_crypto_alg_err_t
+auth_xcbcmac_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint32_t bytes = session->p.auth_digest_len;
+ uint8_t hash_in[EVP_MAX_MD_SIZE];
+ uint8_t hash_out[EVP_MAX_MD_SIZE];
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ /* Hash it */
+ packet_aes_xcbc_mac(pkt, param, session, hash_out);
+
+ /* Verify match */
+ if (0 != memcmp(hash_in, hash_out, bytes))
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ /* Matched */
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_aesxcbc_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ uint32_t k1[4] = { 0x01010101, 0x01010101, 0x01010101, 0x01010101 };
+ uint32_t k2[4] = { 0x02020202, 0x02020202, 0x02020202, 0x02020202 };
+ uint32_t k3[4] = { 0x03030303, 0x03030303, 0x03030303, 0x03030303 };
+ EVP_CIPHER_CTX *ctx;
+ int dummy_len = 0;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_xcbcmac_gen;
+ else
+ session->auth.func = auth_xcbcmac_check;
+ session->auth.init = null_crypto_init_routine;
+
+ session->auth.evp_cipher = cipher;
+ ctx = EVP_CIPHER_CTX_new();
+ EVP_EncryptInit_ex(ctx, session->auth.evp_cipher, NULL,
+ session->p.auth_key.data, NULL);
+ /* K1 = 0x01010101010101010101010101010101 encrypted with Key K */
+ EVP_EncryptUpdate(ctx, session->auth.key,
+ &dummy_len, (uint8_t *)k1, AES_BLOCK_SIZE);
+
+ /* K2 = 0x02020202020202020202020202020202 encrypted with Key K */
+ EVP_EncryptUpdate(ctx, session->auth.key + AES_KEY_LENGTH,
+ &dummy_len, (uint8_t *)k2, AES_BLOCK_SIZE);
+
+ /* K3 = 0x03030303030303030303030303030303 encrypted with Key K */
+ EVP_EncryptUpdate(ctx, session->auth.key + AES_KEY_LENGTH * 2,
+ &dummy_len, (uint8_t *)k3, AES_BLOCK_SIZE);
+
+ EVP_CIPHER_CTX_free(ctx);
+ return 0;
+}
+
+static
+odp_crypto_alg_err_t auth_hmac_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint8_t hash[EVP_MAX_MD_SIZE];
+
+ /* Hash it */
+ packet_hmac(pkt, param, session, hash);
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt,
+ param->hash_result_offset,
+ session->p.auth_digest_len,
+ hash);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_hmac_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint32_t bytes = session->p.auth_digest_len;
+ uint8_t hash_in[EVP_MAX_MD_SIZE];
+ uint8_t hash_out[EVP_MAX_MD_SIZE];
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ /* Hash it */
+ packet_hmac(pkt, param, session, hash_out);
+
+ /* Verify match */
+ if (0 != memcmp(hash_in, hash_out, bytes))
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ /* Matched */
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+auth_cmac_init(odp_crypto_generic_session_t *session)
+{
+ CMAC_CTX *ctx = local.cmac_ctx[session->idx];
+
+ CMAC_Init(ctx,
+ session->auth.key,
+ session->p.auth_key.length,
+ session->auth.evp_cipher,
+ NULL);
+}
+
+static
+void packet_cmac(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session,
+ uint8_t *hash)
+{
+ CMAC_CTX *ctx = local.cmac_ctx[session->idx];
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+ size_t outlen;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ /* Reinitialize CMAC calculation without resetting the key */
+ CMAC_Init(ctx, NULL, 0, NULL, NULL);
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ uint32_t maclen = len > seglen ? seglen : len;
+
+ CMAC_Update(ctx, mapaddr, maclen);
+ offset += maclen;
+ len -= maclen;
+ }
+
+ CMAC_Final(ctx, hash, &outlen);
+}
+
+static
+odp_crypto_alg_err_t auth_cmac_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint8_t hash[EVP_MAX_MD_SIZE];
+
+ /* Hash it */
+ packet_cmac(pkt, param, session, hash);
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt,
+ param->hash_result_offset,
+ session->p.auth_digest_len,
+ hash);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_cmac_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint32_t bytes = session->p.auth_digest_len;
+ uint8_t hash_in[EVP_MAX_MD_SIZE];
+ uint8_t hash_out[EVP_MAX_MD_SIZE];
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ /* Hash it */
+ packet_cmac(pkt, param, session, hash_out);
+
+ /* Verify match */
+ if (0 != memcmp(hash_in, hash_out, bytes))
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ /* Matched */
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+int packet_cmac_eia2(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session,
+ uint8_t *hash)
+{
+ CMAC_CTX *ctx = local.cmac_ctx[session->idx];
+ const void *iv_ptr = param->auth_iv_ptr;
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+ size_t outlen;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ /* Reinitialize CMAC calculation without resetting the key */
+ CMAC_Init(ctx, NULL, 0, NULL, NULL);
+
+ CMAC_Update(ctx, iv_ptr, session->p.auth_iv_len);
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ uint32_t maclen = len > seglen ? seglen : len;
+
+ CMAC_Update(ctx, mapaddr, maclen);
+ offset += maclen;
+ len -= maclen;
+ }
+
+ if (1 != CMAC_Final(ctx, hash, &outlen))
+ return ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ else
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_cmac_eia2_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t
+ *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint8_t hash[EVP_MAX_MD_SIZE];
+ int ret;
+
+ /* Hash it */
+ ret = packet_cmac_eia2(pkt, param, session, hash);
+ if (ret != ODP_CRYPTO_ALG_ERR_NONE)
+ return ret;
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt,
+ param->hash_result_offset,
+ session->p.auth_digest_len,
+ hash);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_cmac_eia2_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t
+ *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint32_t bytes = session->p.auth_digest_len;
+ uint8_t hash_in[EVP_MAX_MD_SIZE];
+ uint8_t hash_out[EVP_MAX_MD_SIZE];
+ int ret;
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ /* Hash it */
+ ret = packet_cmac_eia2(pkt, param, session, hash_out);
+ if (ret != ODP_CRYPTO_ALG_ERR_NONE)
+ return ret;
+
+ /* Verify match */
+ if (0 != memcmp(hash_in, hash_out, bytes))
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ /* Matched */
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+void packet_digest(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session,
+ uint8_t *hash)
+{
+ EVP_MD_CTX *ctx = local.md_ctx[session->idx];
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ EVP_DigestInit_ex(ctx,
+ session->auth.evp_md,
+ NULL);
+
+ /* Hash it */
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ uint32_t maclen = len > seglen ? seglen : len;
+
+ EVP_DigestUpdate(ctx, mapaddr, maclen);
+ offset += maclen;
+ len -= maclen;
+ }
+
+ EVP_DigestFinal_ex(ctx, hash, NULL);
+}
+
+static
+odp_crypto_alg_err_t auth_digest_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint8_t hash[EVP_MAX_MD_SIZE];
+
+ /* Hash it */
+ packet_digest(pkt, param, session, hash);
+
+ /* Copy to the output location */
+ odp_packet_copy_from_mem(pkt,
+ param->hash_result_offset,
+ session->p.auth_digest_len,
+ hash);
+
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t auth_digest_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t
+ *param,
+ odp_crypto_generic_session_t *session)
+{
+ uint32_t bytes = session->p.auth_digest_len;
+ uint8_t hash_in[EVP_MAX_MD_SIZE];
+ uint8_t hash_out[EVP_MAX_MD_SIZE];
+
+ /* Copy current value out and clear it before authentication */
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ bytes, hash_in);
+
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset, 0, bytes);
+
+ /* Hash it */
+ packet_digest(pkt, param, session, hash_out);
+
+ /* Verify match */
+ if (0 != memcmp(hash_in, hash_out, bytes))
+ return ODP_CRYPTO_ALG_ERR_ICV_CHECK;
+
+ /* Matched */
+ return ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+int internal_aad(EVP_CIPHER_CTX *ctx,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_bool_t encrypt)
+{
+ uint32_t offset = param->auth_range.offset;
+ uint32_t len = param->auth_range.length;
+ int dummy_len;
+ int ret;
+
+ _ODP_ASSERT(offset + len <= odp_packet_len(pkt));
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = odp_packet_offset(pkt, offset, &seglen, NULL);
+ uint32_t maclen = len > seglen ? seglen : len;
+
+ if (encrypt)
+ EVP_EncryptUpdate(ctx, NULL, &dummy_len, mapaddr, maclen);
+ else
+ EVP_DecryptUpdate(ctx, NULL, &dummy_len, mapaddr, maclen);
+ offset += maclen;
+ len -= maclen;
+ }
+
+ if (encrypt)
+ ret = EVP_EncryptFinal_ex(ctx, NULL, &dummy_len);
+ else
+ ret = EVP_DecryptFinal_ex(ctx, NULL, &dummy_len);
+
+ return ret;
+}
+
+typedef int (*evp_update_t)(EVP_CIPHER_CTX *, unsigned char *,
+ int *, const unsigned char *, int);
+
+typedef int (*evp_final_t)(EVP_CIPHER_CTX *, unsigned char *, int *);
+
+static inline int internal_crypt(EVP_CIPHER_CTX *ctx,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ evp_update_t EVP_update,
+ evp_final_t EVP_final)
+{
+ uint32_t in_pos = param->cipher_range.offset;
+ uint32_t out_pos = in_pos;
+ uint32_t in_len = param->cipher_range.length;
+ uint8_t block[EVP_MAX_BLOCK_LENGTH];
+ uint32_t buffered = 0;
+ uint32_t block_len = EVP_CIPHER_block_size(EVP_CIPHER_CTX_cipher(ctx));
+ int out_len;
+ int rc;
+
+ _ODP_ASSERT(in_pos + in_len <= odp_packet_len(pkt));
+
+ /*
+ * In the following loop we process one packet segment per iteration.
+ * We rely on the following properties of the encrypt/decrypt update
+ * function with the algorithms that we use:
+ *
+ * - The function processes (and writes to output) only whole blocks.
+ * - Input data beyond the last full block is buffered inside OpenSSL.
+ * - The amount of buffered data is always less than one block.
+ * - Total amount of output data does not exceed the total amount
+ * of input data at any point.
+ */
+ while (in_len > 0) {
+ uint32_t seglen = 0;
+ uint8_t *in_addr = odp_packet_offset(pkt, in_pos,
+ &seglen, NULL);
+ uint32_t len = in_len < seglen ? in_len : seglen;
+
+ if (odp_unlikely(buffered > 0)) {
+ /*
+ * Leftover data from the previous segment is
+ * in the buffer inside OpenSSL.
+ */
+ uint32_t remaining_len = block_len - buffered;
+
+ if (odp_likely(len >= remaining_len)) {
+ /*
+ * Let's fill the buffered input data to a
+ * full block and get the output block to
+ * a memory buffer. The buffer is then copied
+ * to the packet, crossing segment boundary.
+ */
+ rc = EVP_update(ctx, block, &out_len,
+ in_addr, remaining_len);
+ if (odp_unlikely(rc != 1))
+ goto err;
+ if (odp_unlikely(out_len != (int)block_len))
+ goto err;
+ in_addr += remaining_len;
+ in_pos += remaining_len;
+ len -= remaining_len;
+ in_len -= remaining_len;
+ buffered = 0;
+ rc = odp_packet_copy_from_mem(pkt, out_pos,
+ block_len, block);
+ if (odp_unlikely(rc))
+ goto err;
+ out_pos += block_len;
+ } else {
+ /*
+ * Not enough data in this segment to fill
+ * the buffer to a full block. Fill the buffer
+ * a bit more and go to the next segment.
+ */
+ rc = EVP_update(ctx, block, &out_len,
+ in_addr, len);
+ if (odp_unlikely(rc != 1))
+ goto err;
+ if (odp_unlikely(out_len > 0))
+ goto err;
+ in_pos += len;
+ in_len -= len;
+ buffered += len;
+ continue;
+ }
+ }
+ _ODP_ASSERT(buffered == 0);
+
+ if (in_len > 0) {
+ /*
+ * No input is buffered inside OpenSSL. We pass the
+ * whole remaining segment to OpenSSL and expect to
+ * get a multiple of block size of data processed,
+ * with the rest left in the buffer.
+ */
+ rc = EVP_update(ctx, in_addr, &out_len, in_addr, len);
+ if (odp_unlikely(rc != 1))
+ goto err;
+ _ODP_ASSERT(_ODP_CHECK_IS_POWER2(block_len));
+ buffered = len & (block_len - 1);
+ if (odp_unlikely(out_len + buffered != len))
+ goto err;
+ in_pos += len;
+ in_len -= len;
+ out_pos += len - buffered;
+ }
+ }
+ if (odp_unlikely(buffered > 0))
+ goto err;
+ /*
+ * We do not expect any more data out since the cipher range is
+ * supposed to be a multiple of the block size.
+ */
+ rc = EVP_final(ctx, block, &out_len);
+ if (odp_unlikely(out_len != 0))
+ return 0;
+ return rc;
+err:
+ _ODP_ERR("internal error\n");
+ (void)EVP_final(ctx, block, &out_len);
+ return 0;
+}
+
+static int internal_encrypt(EVP_CIPHER_CTX *ctx,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param)
+{
+ return internal_crypt(ctx, pkt, param,
+ EVP_EncryptUpdate,
+ EVP_EncryptFinal_ex);
+}
+
+static int internal_decrypt(EVP_CIPHER_CTX *ctx,
+ odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param)
+{
+ return internal_crypt(ctx, pkt, param,
+ EVP_DecryptUpdate,
+ EVP_DecryptFinal_ex);
+}
+
+static void
+cipher_encrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_EncryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ session->cipher.key_data, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t cipher_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int ret;
+
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ ret = internal_encrypt(ctx, pkt, param);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+cipher_decrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_DecryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ session->cipher.key_data, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t cipher_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int ret;
+
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ ret = internal_decrypt(ctx, pkt, param);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_cipher_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if ((uint32_t)EVP_CIPHER_iv_length(cipher) !=
+ session->p.cipher_iv_len)
+ return -1;
+
+ session->cipher.evp_cipher = cipher;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->cipher.func = cipher_encrypt;
+ session->cipher.init = cipher_encrypt_init;
+ } else {
+ session->cipher.func = cipher_decrypt;
+ session->cipher.init = cipher_decrypt_init;
+ }
+
+ return 0;
+}
+
+static odp_crypto_alg_err_t cipher_encrypt_bytes(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint32_t offset = param->cipher_range.offset;
+ uint8_t data[in_len];
+ int ret;
+
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+ odp_packet_copy_to_mem(pkt, offset, in_len, data);
+ EVP_EncryptUpdate(ctx, data, &cipher_len, data, in_len);
+ ret = EVP_EncryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+ odp_packet_copy_from_mem(pkt, offset, in_len, data);
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static odp_crypto_alg_err_t cipher_decrypt_bytes(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint32_t offset = param->cipher_range.offset;
+ uint8_t data[in_len];
+ int ret;
+
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+ odp_packet_copy_to_mem(pkt, offset, in_len, data);
+ EVP_DecryptUpdate(ctx, data, &cipher_len, data, in_len);
+ ret = EVP_DecryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+ odp_packet_copy_from_mem(pkt, offset, in_len, data);
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static odp_crypto_alg_err_t cipher_encrypt_bits(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ odp_crypto_packet_op_param_t new_param = *param;
+
+ new_param.cipher_range.offset /= 8;
+ new_param.cipher_range.length = (new_param.cipher_range.length + 7) / 8;
+ return cipher_encrypt_bytes(pkt, &new_param, session);
+}
+
+static odp_crypto_alg_err_t cipher_decrypt_bits(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ odp_crypto_packet_op_param_t new_param = *param;
+
+ new_param.cipher_range.offset /= 8;
+ new_param.cipher_range.length = (new_param.cipher_range.length + 7) / 8;
+ return cipher_decrypt_bytes(pkt, &new_param, session);
+}
+
+static int process_cipher_param_bits(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if ((uint32_t)EVP_CIPHER_iv_length(cipher) !=
+ session->p.cipher_iv_len)
+ return -1;
+
+ session->cipher.evp_cipher = cipher;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->cipher.init = cipher_encrypt_init;
+ session->cipher.func = session->cipher_range_in_bits ? cipher_encrypt_bits
+ : cipher_encrypt_bytes;
+
+ } else {
+ session->cipher.init = cipher_decrypt_init;
+ session->cipher.func = session->cipher_range_in_bits ? cipher_decrypt_bits
+ : cipher_decrypt_bytes;
+ }
+
+ return 0;
+}
+
+static void
+aes_gcm_encrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_EncryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ session->cipher.key_data, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
+ session->p.cipher_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_gcm_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ const uint8_t *aad_head = param->aad_ptr;
+ uint32_t aad_len = session->p.auth_aad_len;
+ int dummy_len = 0;
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ /* Authenticate header data (if any) without encrypting them */
+ if (aad_len > 0)
+ EVP_EncryptUpdate(ctx, NULL, &dummy_len,
+ aad_head, aad_len);
+
+ ret = internal_encrypt(ctx, pkt, param);
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG,
+ session->p.auth_digest_len, block);
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+aes_gcm_decrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_DecryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ session->cipher.key_data, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
+ session->p.cipher_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_gcm_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ const uint8_t *aad_head = param->aad_ptr;
+ uint32_t aad_len = session->p.auth_aad_len;
+ int dummy_len = 0;
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG,
+ session->p.auth_digest_len, block);
+
+ /* Authenticate header data (if any) without encrypting them */
+ if (aad_len > 0)
+ EVP_DecryptUpdate(ctx, NULL, &dummy_len,
+ aad_head, aad_len);
+
+ ret = internal_decrypt(ctx, pkt, param);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_ICV_CHECK :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_aes_gcm_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if (12 != session->p.cipher_iv_len)
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ session->cipher.evp_cipher = cipher;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->cipher.func = aes_gcm_encrypt;
+ session->cipher.init = aes_gcm_encrypt_init;
+ } else {
+ session->cipher.func = aes_gcm_decrypt;
+ session->cipher.init = aes_gcm_decrypt_init;
+ }
+
+ return 0;
+}
+
+static void
+aes_gmac_gen_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
+
+ EVP_EncryptInit_ex(ctx, session->auth.evp_cipher, NULL,
+ session->auth.key, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
+ session->p.auth_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_gmac_gen(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->auth_iv_ptr);
+
+ ret = internal_aad(ctx, pkt, param, true);
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG,
+ session->p.auth_digest_len, block);
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+aes_gmac_check_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
+
+ EVP_DecryptInit_ex(ctx, session->auth.evp_cipher, NULL,
+ session->auth.key, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
+ session->p.auth_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_gmac_check(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.mac_cipher_ctx[session->idx];
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->auth_iv_ptr);
+
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_TAG,
+ session->p.auth_digest_len, block);
+ if (odp_unlikely(session->p.hash_result_in_auth_range))
+ _odp_packet_set_data(pkt, param->hash_result_offset,
+ 0, session->p.auth_digest_len);
+
+ ret = internal_aad(ctx, pkt, param, false);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_ICV_CHECK :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_aes_gmac_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.auth_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if (12 != session->p.auth_iv_len)
+ return -1;
+
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ session->auth.evp_cipher = cipher;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->auth.func = aes_gmac_gen;
+ session->auth.init = aes_gmac_gen_init;
+ } else {
+ session->auth.func = aes_gmac_check;
+ session->auth.init = aes_gmac_check_init;
+ }
+
+ return 0;
+}
+
+static void
+aes_ccm_encrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_EncryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ NULL, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_IVLEN,
+ session->p.cipher_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_ccm_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ const uint8_t *aad_head = param->aad_ptr;
+ uint32_t aad_len = session->p.auth_aad_len;
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint8_t data[in_len];
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG,
+ session->p.auth_digest_len, NULL);
+ EVP_EncryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, param->cipher_iv_ptr);
+
+ /* Set len */
+ EVP_EncryptUpdate(ctx, NULL, &dummy_len, NULL, in_len);
+
+ /* Authenticate header data (if any) without encrypting them */
+ if (aad_len > 0)
+ EVP_EncryptUpdate(ctx, NULL, &dummy_len,
+ aad_head, aad_len);
+
+ odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ EVP_EncryptUpdate(ctx, data, &cipher_len, data, in_len);
+
+ ret = EVP_EncryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+
+ odp_packet_copy_from_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_GET_TAG,
+ session->p.auth_digest_len, block);
+ odp_packet_copy_from_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static void
+aes_ccm_decrypt_init(odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+
+ EVP_DecryptInit_ex(ctx, session->cipher.evp_cipher, NULL,
+ session->cipher.key_data, NULL);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_SET_IVLEN,
+ session->p.cipher_iv_len, NULL);
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+}
+
+static
+odp_crypto_alg_err_t aes_ccm_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ const uint8_t *aad_head = param->aad_ptr;
+ uint32_t aad_len = session->p.auth_aad_len;
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint8_t data[in_len];
+ uint8_t block[EVP_MAX_MD_SIZE];
+ int ret;
+
+ odp_packet_copy_to_mem(pkt, param->hash_result_offset,
+ session->p.auth_digest_len, block);
+ EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_CCM_SET_TAG,
+ session->p.auth_digest_len, block);
+ EVP_DecryptInit_ex(ctx, NULL, NULL, session->cipher.key_data, param->cipher_iv_ptr);
+
+ /* Set len */
+ EVP_DecryptUpdate(ctx, NULL, &dummy_len, NULL, in_len);
+
+ /* Authenticate header data (if any) without encrypting them */
+ if (aad_len > 0)
+ EVP_DecryptUpdate(ctx, NULL, &dummy_len,
+ aad_head, aad_len);
+
+ odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ ret = EVP_DecryptUpdate(ctx, data, &cipher_len, data, in_len);
+
+ EVP_DecryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+
+ odp_packet_copy_from_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_ICV_CHECK :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_aes_ccm_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if (11 != session->p.cipher_iv_len &&
+ 13 != session->p.cipher_iv_len)
+ return -1;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ session->cipher.evp_cipher = cipher;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->cipher.func = aes_ccm_encrypt;
+ session->cipher.init = aes_ccm_encrypt_init;
+ } else {
+ session->cipher.func = aes_ccm_decrypt;
+ session->cipher.init = aes_ccm_decrypt_init;
+ }
+
+ return 0;
+}
+
+static
+odp_crypto_alg_err_t xts_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint8_t data[in_len];
+ int ret;
+
+ EVP_EncryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ EVP_EncryptUpdate(ctx, data, &cipher_len, data, in_len);
+
+ ret = EVP_EncryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+
+ odp_packet_copy_from_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static
+odp_crypto_alg_err_t xts_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
+{
+ EVP_CIPHER_CTX *ctx = local.cipher_ctx[session->idx];
+ int dummy_len = 0;
+ int cipher_len;
+ uint32_t in_len = param->cipher_range.length;
+ uint8_t data[in_len];
+ int ret;
+
+ EVP_DecryptInit_ex(ctx, NULL, NULL, NULL, param->cipher_iv_ptr);
+
+ odp_packet_copy_to_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ EVP_DecryptUpdate(ctx, data, &cipher_len, data, in_len);
+
+ ret = EVP_DecryptFinal_ex(ctx, data + cipher_len, &dummy_len);
+ cipher_len += dummy_len;
+
+ odp_packet_copy_from_mem(pkt, param->cipher_range.offset, in_len,
+ data);
+
+ return ret <= 0 ? ODP_CRYPTO_ALG_ERR_DATA_SIZE :
+ ODP_CRYPTO_ALG_ERR_NONE;
+}
+
+static int process_xts_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.cipher_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if ((uint32_t)EVP_CIPHER_iv_length(cipher) !=
+ session->p.cipher_iv_len)
+ return -1;
+
+ session->cipher.evp_cipher = cipher;
+
+ memcpy(session->cipher.key_data, session->p.cipher_key.data,
+ session->p.cipher_key.length);
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op) {
+ session->cipher.func = xts_encrypt;
+ session->cipher.init = cipher_encrypt_init;
+ } else {
+ session->cipher.func = xts_decrypt;
+ session->cipher.init = cipher_decrypt_init;
+ }
+
+ return 0;
+}
+
+static int process_auth_hmac_param(odp_crypto_generic_session_t *session,
+ const EVP_MD *evp_md)
+{
+ /* Verify IV len is correct */
+ if (0 != session->p.auth_iv_len)
+ return -1;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_hmac_gen;
+ else
+ session->auth.func = auth_hmac_check;
+ session->auth.init = auth_hmac_init;
+
+ session->auth.evp_md = evp_md;
+
+ /* Number of valid bytes */
+ if (session->p.auth_digest_len < (unsigned)EVP_MD_size(evp_md) / 2)
+ return -1;
+
+ /* Convert keys */
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return 0;
+}
+
+static int process_auth_cmac_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.auth_key.length)
+ return -1;
+
+ if (0 != session->p.auth_iv_len)
+ return -1;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_cmac_gen;
+ else
+ session->auth.func = auth_cmac_check;
+ session->auth.init = auth_cmac_init;
+
+ session->auth.evp_cipher = cipher;
+
+ /* Number of valid bytes */
+ if (session->p.auth_digest_len <
+ (unsigned)EVP_CIPHER_block_size(cipher) / 2)
+ return -1;
+
+ /* Convert keys */
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return 0;
+}
+
+static int process_auth_cmac_eia2_param(odp_crypto_generic_session_t *session,
+ const EVP_CIPHER *cipher)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_CIPHER_key_length(cipher) !=
+ session->p.auth_key.length)
+ return -1;
+
+ /* Verify IV len is correct */
+ if (8 != session->p.auth_iv_len)
+ return -1;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_cmac_eia2_gen;
+ else
+ session->auth.func = auth_cmac_eia2_check;
+ session->auth.init = auth_cmac_init;
+
+ session->auth.evp_cipher = cipher;
+
+ /* Number of valid bytes */
+ if (session->p.auth_digest_len != 4)
+ return -1;
+
+ /* Convert keys */
+ memcpy(session->auth.key, session->p.auth_key.data,
+ session->p.auth_key.length);
+
+ return 0;
+}
+
+static int process_digest_param(odp_crypto_generic_session_t *session,
+ const EVP_MD *md)
+{
+ /* Verify Key len is valid */
+ if ((uint32_t)EVP_MD_size(md) !=
+ session->p.auth_digest_len)
+ return -1;
+
+ /* Set function */
+ if (ODP_CRYPTO_OP_ENCODE == session->p.op)
+ session->auth.func = auth_digest_gen;
+ else
+ session->auth.func = auth_digest_check;
+ session->auth.init = null_crypto_init_routine;
+
+ session->auth.evp_md = md;
+
+ return 0;
+}
+
+int odp_crypto_capability(odp_crypto_capability_t *capa)
+{
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ return -1;
+ }
+
+ if (NULL == capa)
+ return -1;
+
+ /* Initialize crypto capability structure */
+ memset(capa, 0, sizeof(odp_crypto_capability_t));
+
+ capa->max_sessions = MAX_SESSIONS;
+ capa->sync_mode = ODP_SUPPORT_PREFERRED;
+ capa->async_mode = ODP_SUPPORT_YES;
+ capa->queue_type_plain = 1;
+ capa->queue_type_sched = 1;
+
+ /* Memory allocation in libssl is not compatible with process mode */
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS) {
+ capa->ciphers.bit.null = 1;
+ capa->auths.bit.null = 1;
+ return 0;
+ }
+
+ capa->ciphers.bit.null = 1;
+ capa->ciphers.bit.trides_cbc = 1;
+ capa->ciphers.bit.trides_ecb = 1;
+ capa->ciphers.bit.aes_cbc = 1;
+ capa->ciphers.bit.aes_ctr = 1;
+ capa->ciphers.bit.aes_ecb = 1;
+ capa->ciphers.bit.aes_cfb128 = 1;
+ capa->ciphers.bit.aes_xts = 1;
+ capa->ciphers.bit.aes_gcm = 1;
+ capa->ciphers.bit.aes_ccm = 1;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ capa->ciphers.bit.chacha20_poly1305 = 1;
+#endif
+ capa->ciphers.bit.aes_eea2 = 1;
+
+ capa->auths.bit.null = 1;
+ capa->auths.bit.md5_hmac = 1;
+ capa->auths.bit.sha1_hmac = 1;
+ capa->auths.bit.sha224_hmac = 1;
+ capa->auths.bit.sha256_hmac = 1;
+ capa->auths.bit.sha384_hmac = 1;
+ capa->auths.bit.sha512_hmac = 1;
+ capa->auths.bit.aes_xcbc_mac = 1;
+ capa->auths.bit.aes_gcm = 1;
+ capa->auths.bit.aes_ccm = 1;
+ capa->auths.bit.aes_gmac = 1;
+ capa->auths.bit.aes_cmac = 1;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ capa->auths.bit.chacha20_poly1305 = 1;
+#endif
+ capa->auths.bit.aes_eia2 = 1;
+
+ capa->auths.bit.md5 = 1;
+ capa->auths.bit.sha1 = 1;
+ capa->auths.bit.sha224 = 1;
+ capa->auths.bit.sha256 = 1;
+ capa->auths.bit.sha384 = 1;
+ capa->auths.bit.sha512 = 1;
+
+ return 0;
+}
+
+int odp_crypto_cipher_capability(odp_cipher_alg_t cipher,
+ odp_crypto_cipher_capability_t dst[],
+ int num_copy)
+{
+ const odp_crypto_cipher_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_cipher_capability_t);
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ src = cipher_capa_null;
+ num = sizeof(cipher_capa_null) / size;
+ break;
+ case ODP_CIPHER_ALG_3DES_CBC:
+ src = cipher_capa_trides_cbc;
+ num = sizeof(cipher_capa_trides_cbc) / size;
+ break;
+ case ODP_CIPHER_ALG_3DES_ECB:
+ src = cipher_capa_trides_ecb;
+ num = sizeof(cipher_capa_trides_ecb) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ src = cipher_capa_aes_cbc;
+ num = sizeof(cipher_capa_aes_cbc) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ src = cipher_capa_aes_ctr;
+ num = sizeof(cipher_capa_aes_ctr) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_ECB:
+ src = cipher_capa_aes_ecb;
+ num = sizeof(cipher_capa_aes_ecb) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_CFB128:
+ src = cipher_capa_aes_cfb128;
+ num = sizeof(cipher_capa_aes_cfb128) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_XTS:
+ src = cipher_capa_aes_xts;
+ num = sizeof(cipher_capa_aes_xts) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ src = cipher_capa_aes_gcm;
+ num = sizeof(cipher_capa_aes_gcm) / size;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ src = cipher_capa_aes_ccm;
+ num = sizeof(cipher_capa_aes_ccm) / size;
+ break;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ src = cipher_capa_chacha20_poly1305;
+ num = sizeof(cipher_capa_chacha20_poly1305) / size;
+ break;
+#endif
+ case ODP_CIPHER_ALG_AES_EEA2:
+ src = cipher_capa_aes_eea2;
+ num = sizeof(cipher_capa_aes_eea2) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int odp_crypto_auth_capability(odp_auth_alg_t auth,
+ odp_crypto_auth_capability_t dst[], int num_copy)
+{
+ const odp_crypto_auth_capability_t *src;
+ int num;
+ int size = sizeof(odp_crypto_auth_capability_t);
+
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ src = auth_capa_null;
+ num = sizeof(auth_capa_null) / size;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ src = auth_capa_md5_hmac;
+ num = sizeof(auth_capa_md5_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ src = auth_capa_sha1_hmac;
+ num = sizeof(auth_capa_sha1_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ src = auth_capa_sha224_hmac;
+ num = sizeof(auth_capa_sha224_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ src = auth_capa_sha256_hmac;
+ num = sizeof(auth_capa_sha256_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ src = auth_capa_sha384_hmac;
+ num = sizeof(auth_capa_sha384_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ src = auth_capa_sha512_hmac;
+ num = sizeof(auth_capa_sha512_hmac) / size;
+ break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ src = auth_capa_aes_xcbc;
+ num = sizeof(auth_capa_aes_xcbc) / size;
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ src = auth_capa_aes_gcm;
+ num = sizeof(auth_capa_aes_gcm) / size;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ src = auth_capa_aes_gmac;
+ num = sizeof(auth_capa_aes_gmac) / size;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ src = auth_capa_aes_ccm;
+ num = sizeof(auth_capa_aes_ccm) / size;
+ break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ src = auth_capa_aes_cmac;
+ num = sizeof(auth_capa_aes_cmac) / size;
+ break;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ src = auth_capa_chacha20_poly1305;
+ num = sizeof(auth_capa_chacha20_poly1305) / size;
+ break;
+#endif
+ case ODP_AUTH_ALG_AES_EIA2:
+ src = auth_capa_aes_eia2;
+ num = sizeof(auth_capa_aes_eia2) / size;
+ break;
+ case ODP_AUTH_ALG_MD5:
+ src = auth_capa_md5;
+ num = sizeof(auth_capa_md5) / size;
+ break;
+ case ODP_AUTH_ALG_SHA1:
+ src = auth_capa_sha1;
+ num = sizeof(auth_capa_sha1) / size;
+ break;
+ case ODP_AUTH_ALG_SHA224:
+ src = auth_capa_sha224;
+ num = sizeof(auth_capa_sha224) / size;
+ break;
+ case ODP_AUTH_ALG_SHA256:
+ src = auth_capa_sha256;
+ num = sizeof(auth_capa_sha256) / size;
+ break;
+ case ODP_AUTH_ALG_SHA384:
+ src = auth_capa_sha384;
+ num = sizeof(auth_capa_sha384) / size;
+ break;
+ case ODP_AUTH_ALG_SHA512:
+ src = auth_capa_sha512;
+ num = sizeof(auth_capa_sha512) / size;
+ break;
+ default:
+ return -1;
+ }
+
+ if (num < num_copy)
+ num_copy = num;
+
+ memcpy(dst, src, num_copy * size);
+
+ return num;
+}
+
+int
+odp_crypto_session_create(const odp_crypto_session_param_t *param,
+ odp_crypto_session_t *session_out,
+ odp_crypto_ses_create_err_t *status)
+{
+ int rc;
+ odp_crypto_generic_session_t *session;
+ int cipher_bit_mode_supported = 0;
+ int auth_bit_mode_supported = 0;
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_ERR("Crypto is disabled\n");
+ /* Dummy output to avoid compiler warning about uninitialized
+ * variables */
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ /* Process mode is not supported with libssl based algos */
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS &&
+ (param->cipher_alg != ODP_CIPHER_ALG_NULL ||
+ param->auth_alg != ODP_AUTH_ALG_NULL)) {
+ *status = param->cipher_alg != ODP_CIPHER_ALG_NULL ?
+ ODP_CRYPTO_SES_ERR_CIPHER : ODP_CRYPTO_SES_ERR_AUTH;
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+ }
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (NULL == session) {
+ *status = ODP_CRYPTO_SES_ERR_ENOMEM;
+ goto err;
+ }
+
+ /* Copy parameters */
+ session->p = *param;
+
+ session->cipher_range_in_bits = !!param->cipher_range_in_bits;
+ session->auth_range_in_bits = !!param->auth_range_in_bits;
+ session->auth_range_used = 1;
+ session->null_crypto_enable = !!param->null_crypto_enable;
+
+ if (session->null_crypto_enable && param->op_mode == ODP_CRYPTO_SYNC) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ if (session->p.cipher_iv_len > EVP_MAX_IV_LENGTH) {
+ _ODP_DBG("Maximum IV length exceeded\n");
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ if (session->p.auth_iv_len > EVP_MAX_IV_LENGTH) {
+ _ODP_DBG("Maximum auth IV length exceeded\n");
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Derive order */
+ if (ODP_CRYPTO_OP_ENCODE == param->op)
+ session->do_cipher_first = param->auth_cipher_text;
+ else
+ session->do_cipher_first = !param->auth_cipher_text;
+
+ /* Process based on cipher */
+ switch (param->cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ session->cipher.func = null_crypto_routine;
+ session->cipher.init = null_crypto_init_routine;
+ cipher_bit_mode_supported = 1;
+ rc = 0;
+ break;
+ case ODP_CIPHER_ALG_3DES_CBC:
+ rc = process_cipher_param(session, EVP_des_ede3_cbc());
+ break;
+ case ODP_CIPHER_ALG_3DES_ECB:
+ rc = process_cipher_param(session, EVP_des_ede3_ecb());
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ if (param->cipher_key.length == 16)
+ rc = process_cipher_param(session, EVP_aes_128_cbc());
+ else if (param->cipher_key.length == 24)
+ rc = process_cipher_param(session, EVP_aes_192_cbc());
+ else if (param->cipher_key.length == 32)
+ rc = process_cipher_param(session, EVP_aes_256_cbc());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ if (param->cipher_key.length == 16)
+ rc = process_cipher_param(session, EVP_aes_128_ctr());
+ else if (param->cipher_key.length == 24)
+ rc = process_cipher_param(session, EVP_aes_192_ctr());
+ else if (param->cipher_key.length == 32)
+ rc = process_cipher_param(session, EVP_aes_256_ctr());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_ECB:
+ if (param->cipher_key.length == 16)
+ rc = process_cipher_param(session, EVP_aes_128_ecb());
+ else if (param->cipher_key.length == 24)
+ rc = process_cipher_param(session, EVP_aes_192_ecb());
+ else if (param->cipher_key.length == 32)
+ rc = process_cipher_param(session, EVP_aes_256_ecb());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_CFB128:
+ if (param->cipher_key.length == 16)
+ rc = process_cipher_param(session,
+ EVP_aes_128_cfb128());
+ else if (param->cipher_key.length == 24)
+ rc = process_cipher_param(session,
+ EVP_aes_192_cfb128());
+ else if (param->cipher_key.length == 32)
+ rc = process_cipher_param(session,
+ EVP_aes_256_cfb128());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_XTS:
+ if (param->cipher_key.length == 32)
+ rc = process_xts_param(session, EVP_aes_128_xts());
+ else if (param->cipher_key.length == 64)
+ rc = process_xts_param(session, EVP_aes_256_xts());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ /* AES-GCM requires to do both auth and
+ * cipher at the same time */
+ if (param->auth_alg != ODP_AUTH_ALG_AES_GCM)
+ rc = -1;
+ else if (param->cipher_key.length == 16)
+ rc = process_aes_gcm_param(session, EVP_aes_128_gcm());
+ else if (param->cipher_key.length == 24)
+ rc = process_aes_gcm_param(session, EVP_aes_192_gcm());
+ else if (param->cipher_key.length == 32)
+ rc = process_aes_gcm_param(session, EVP_aes_256_gcm());
+ else
+ rc = -1;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ /* AES-CCM requires to do both auth and
+ * cipher at the same time */
+ if (param->auth_alg != ODP_AUTH_ALG_AES_CCM)
+ rc = -1;
+ else if (param->cipher_key.length == 16)
+ rc = process_aes_ccm_param(session, EVP_aes_128_ccm());
+ else if (param->cipher_key.length == 24)
+ rc = process_aes_ccm_param(session, EVP_aes_192_ccm());
+ else if (param->cipher_key.length == 32)
+ rc = process_aes_ccm_param(session, EVP_aes_256_ccm());
+ else
+ rc = -1;
+ break;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ /* ChaCha20_Poly1305 requires to do both auth and
+ * cipher at the same time */
+ if (param->auth_alg != ODP_AUTH_ALG_CHACHA20_POLY1305)
+ rc = -1;
+ else
+ rc = process_aes_gcm_param(session,
+ EVP_chacha20_poly1305());
+ break;
+#endif
+ case ODP_CIPHER_ALG_AES_EEA2:
+ if (param->cipher_key.length == 16)
+ rc = process_cipher_param_bits(session,
+ EVP_aes_128_ctr());
+ else
+ rc = -1;
+ cipher_bit_mode_supported = 1;
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (session->cipher_range_in_bits && !cipher_bit_mode_supported)
+ rc = -1;
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_CIPHER;
+ goto err;
+ }
+
+ /* Process based on auth */
+ switch (param->auth_alg) {
+ case ODP_AUTH_ALG_NULL:
+ session->auth.func = null_crypto_routine;
+ session->auth.init = null_crypto_init_routine;
+ auth_bit_mode_supported = 1;
+ rc = 0;
+ break;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ rc = process_auth_hmac_param(session, EVP_md5());
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ rc = process_auth_hmac_param(session, EVP_sha1());
+ break;
+ case ODP_AUTH_ALG_SHA224_HMAC:
+ rc = process_auth_hmac_param(session, EVP_sha224());
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ rc = process_auth_hmac_param(session, EVP_sha256());
+ break;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ rc = process_auth_hmac_param(session, EVP_sha384());
+ break;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ rc = process_auth_hmac_param(session, EVP_sha512());
+ break;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ rc = process_aesxcbc_param(session, EVP_aes_128_ecb());
+ break;
+ case ODP_AUTH_ALG_AES_GCM:
+ /* AES-GCM requires to do both auth and
+ * cipher at the same time */
+ if (param->cipher_alg == ODP_CIPHER_ALG_AES_GCM) {
+ session->auth.func = null_crypto_routine;
+ session->auth.init = null_crypto_init_routine;
+ rc = 0;
+ } else {
+ rc = -1;
+ }
+ session->auth_range_used = 0;
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ if (param->auth_key.length == 16)
+ rc = process_aes_gmac_param(session, EVP_aes_128_gcm());
+ else if (param->auth_key.length == 24)
+ rc = process_aes_gmac_param(session, EVP_aes_192_gcm());
+ else if (param->auth_key.length == 32)
+ rc = process_aes_gmac_param(session, EVP_aes_256_gcm());
+ else
+ rc = -1;
+ break;
+ case ODP_AUTH_ALG_AES_CCM:
+ /* AES-CCM requires to do both auth and
+ * cipher at the same time */
+ if (param->cipher_alg == ODP_CIPHER_ALG_AES_CCM) {
+ session->auth.func = null_crypto_routine;
+ session->auth.init = null_crypto_init_routine;
+ rc = 0;
+ } else {
+ rc = -1;
+ }
+ session->auth_range_used = 0;
+ break;
+ case ODP_AUTH_ALG_AES_CMAC:
+ if (param->auth_key.length == 16)
+ rc = process_auth_cmac_param(session,
+ EVP_aes_128_cbc());
+ else if (param->auth_key.length == 24)
+ rc = process_auth_cmac_param(session,
+ EVP_aes_192_cbc());
+ else if (param->auth_key.length == 32)
+ rc = process_auth_cmac_param(session,
+ EVP_aes_256_cbc());
+ else
+ rc = -1;
+ break;
+#if _ODP_HAVE_CHACHA20_POLY1305
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ /* ChaCha20_Poly1305 requires to do both auth and
+ * cipher at the same time */
+ if (param->cipher_alg == ODP_CIPHER_ALG_CHACHA20_POLY1305) {
+ session->auth.func = null_crypto_routine;
+ session->auth.init = null_crypto_init_routine;
+ rc = 0;
+ } else {
+ rc = -1;
+ }
+ session->auth_range_used = 0;
+ break;
+#endif
+ case ODP_AUTH_ALG_AES_EIA2:
+ if (param->auth_key.length == 16)
+ rc = process_auth_cmac_eia2_param(session,
+ EVP_aes_128_cbc());
+ else
+ rc = -1;
+ break;
+ case ODP_AUTH_ALG_MD5:
+ rc = process_digest_param(session, EVP_md5());
+ break;
+ case ODP_AUTH_ALG_SHA1:
+ rc = process_digest_param(session, EVP_sha1());
+ break;
+ case ODP_AUTH_ALG_SHA224:
+ rc = process_digest_param(session, EVP_sha224());
+ break;
+ case ODP_AUTH_ALG_SHA256:
+ rc = process_digest_param(session, EVP_sha256());
+ break;
+ case ODP_AUTH_ALG_SHA384:
+ rc = process_digest_param(session, EVP_sha384());
+ break;
+ case ODP_AUTH_ALG_SHA512:
+ rc = process_digest_param(session, EVP_sha512());
+ break;
+ default:
+ rc = -1;
+ }
+
+ if (session->auth_range_in_bits && !auth_bit_mode_supported)
+ rc = -1;
+
+ /* Check result */
+ if (rc) {
+ *status = ODP_CRYPTO_SES_ERR_AUTH;
+ goto err;
+ }
+
+ /* We're happy */
+ *session_out = (intptr_t)session;
+ *status = ODP_CRYPTO_SES_ERR_NONE;
+ return 0;
+
+err:
+ /* error status should be set at this moment */
+ if (session != NULL)
+ free_session(session);
+ *session_out = ODP_CRYPTO_SESSION_INVALID;
+ return -1;
+}
+
+int odp_crypto_session_destroy(odp_crypto_session_t session)
+{
+ odp_crypto_generic_session_t *generic;
+
+ generic = (odp_crypto_generic_session_t *)(intptr_t)session;
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+static void ODP_UNUSED openssl_thread_id(CRYPTO_THREADID ODP_UNUSED *id)
+{
+ CRYPTO_THREADID_set_numeric(id, odp_thread_id());
+}
+
+static void ODP_UNUSED openssl_lock(int mode, int n,
+ const char *file ODP_UNUSED,
+ int line ODP_UNUSED)
+{
+ if (mode & CRYPTO_LOCK)
+ odp_ticketlock_lock(&global->openssl_lock[n]);
+ else
+ odp_ticketlock_unlock(&global->openssl_lock[n]);
+}
+#endif
+
+int _odp_crypto_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+ int nlocks = CRYPTO_num_locks();
+
+ if (odp_global_ro.disable.crypto) {
+ _ODP_PRINT("\nODP crypto is DISABLED\n");
+ return 0;
+ }
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(odp_crypto_global_t);
+ mem_size += nlocks * sizeof(odp_ticketlock_t);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("_odp_crypto_ssl_global", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (ODP_SHM_INVALID == shm) {
+ _ODP_ERR("unable to allocate crypto pool\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ if (nlocks > 0) {
+ for (idx = 0; idx < nlocks; idx++)
+ odp_ticketlock_init(&global->openssl_lock[idx]);
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ CRYPTO_THREADID_set_callback(openssl_thread_id);
+ CRYPTO_set_locking_callback(openssl_lock);
+#endif
+ }
+
+ return 0;
+}
+
+int _odp_crypto_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_crypto_generic_session_t *session;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+ if (count != MAX_SESSIONS) {
+ _ODP_ERR("crypto sessions still active\n");
+ rc = -1;
+ }
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ CRYPTO_set_locking_callback(NULL);
+ CRYPTO_set_id_callback(NULL);
+#endif
+
+ ret = odp_shm_free(odp_shm_lookup("_odp_crypto_ssl_global"));
+ if (ret < 0) {
+ _ODP_ERR("shm free failed for crypto_pool\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_crypto_init_local(void)
+{
+ unsigned i;
+ int id;
+
+ memset(&local, 0, sizeof(local));
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ local.hmac_ctx[i] = HMAC_CTX_new();
+ local.cmac_ctx[i] = CMAC_CTX_new();
+ local.cipher_ctx[i] = EVP_CIPHER_CTX_new();
+ local.mac_cipher_ctx[i] = EVP_CIPHER_CTX_new();
+ local.md_ctx[i] = EVP_MD_CTX_new();
+
+ if (local.hmac_ctx[i] == NULL ||
+ local.cmac_ctx[i] == NULL ||
+ local.md_ctx[i] == NULL ||
+ local.cipher_ctx[i] == NULL ||
+ local.mac_cipher_ctx[i] == NULL) {
+ _odp_crypto_term_local();
+ return -1;
+ }
+ }
+
+ id = odp_thread_id();
+ local.ctx_valid = global->ctx_valid[id];
+ /* No need to clear flags here, alloc_session did the job for us */
+
+ return 0;
+}
+
+int _odp_crypto_term_local(void)
+{
+ unsigned i;
+
+ if (odp_global_ro.disable.crypto)
+ return 0;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (local.cmac_ctx[i] != NULL)
+ CMAC_CTX_free(local.cmac_ctx[i]);
+ if (local.hmac_ctx[i] != NULL)
+ HMAC_CTX_free(local.hmac_ctx[i]);
+ if (local.cipher_ctx[i] != NULL)
+ EVP_CIPHER_CTX_free(local.cipher_ctx[i]);
+ if (local.mac_cipher_ctx[i] != NULL)
+ EVP_CIPHER_CTX_free(local.mac_cipher_ctx[i]);
+ if (local.md_ctx[i] != NULL)
+ EVP_MD_CTX_free(local.md_ctx[i]);
+ }
+
+ return 0;
+}
+
+void odp_crypto_session_param_init(odp_crypto_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_crypto_session_param_t));
+}
+
+uint64_t odp_crypto_session_to_u64(odp_crypto_session_t hdl)
+{
+ return (uint64_t)hdl;
+}
+
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ _ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ _ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+ odp_crypto_packet_result_t *op_result;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ out_pkt = pkt_in;
+ } else if (session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP &&
+ *pkt_out == ODP_PACKET_INVALID) {
+ out_pkt = pkt_in;
+ } else {
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
+ }
+
+ if (odp_unlikely(session->null_crypto_enable && param->null_crypto))
+ goto out;
+
+ if (ODP_DEBUG) {
+ if (session->p.auth_alg != ODP_AUTH_ALG_NULL &&
+ param->hash_result_offset + session->p.auth_digest_len
+ > odp_packet_len(out_pkt)) {
+ _ODP_ERR("Invalid hash result offset\n");
+ rc_cipher = ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ rc_auth = ODP_CRYPTO_ALG_ERR_DATA_SIZE;
+ goto out;
+ }
+ }
+ _ODP_ASSERT(session->p.cipher_iv_len == 0 || param->cipher_iv_ptr != NULL);
+ _ODP_ASSERT(session->p.auth_iv_len == 0 || param->auth_iv_ptr != NULL);
+
+ crypto_init(session);
+
+ /* Invoke the functions */
+ if (session->do_cipher_first) {
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ rc_auth = session->auth.func(out_pkt, param, session);
+ } else {
+ rc_auth = session->auth.func(out_pkt, param, session);
+ rc_cipher = session->cipher.func(out_pkt, param, session);
+ }
+
+out:
+ /* Fill in result */
+ packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
+ op_result = &packet_hdr(out_pkt)->crypto_op_result;
+ op_result->cipher_status.alg_err = rc_cipher;
+ op_result->auth_status.alg_err = rc_auth;
+
+ /* Synchronous, simply return results */
+ *pkt_out = out_pkt;
+
+ return 0;
+}
+
+/*
+ * Copy cipher range and auth range from src to dst,
+ * with shifting by dst_offset_shift.
+ */
+static void copy_ranges(odp_packet_t dst,
+ odp_packet_t src,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_packet_data_range_t c_range = param->cipher_range;
+ odp_packet_data_range_t a_range = param->auth_range;
+ int32_t shift = param->dst_offset_shift;
+ int rc;
+
+ if (session->cipher_range_in_bits) {
+ c_range.offset /= 8;
+ c_range.length = (c_range.length + 7) / 8;
+ }
+ if (session->auth_range_in_bits) {
+ a_range.offset /= 8;
+ a_range.length = (a_range.length + 7) / 8;
+ }
+
+ if (c_range.length > 0) {
+ rc = odp_packet_copy_from_pkt(dst, c_range.offset + shift,
+ src, c_range.offset,
+ c_range.length);
+ if (rc) {
+ _ODP_ERR("cipher range copying failed\n");
+ return;
+ }
+ }
+ if (session->auth_range_used && a_range.length > 0) {
+ rc = odp_packet_copy_from_pkt(dst, a_range.offset + shift,
+ src, a_range.offset,
+ a_range.length);
+ if (rc) {
+ _ODP_ERR("auth range copying failed\n");
+ return;
+ }
+ }
+}
+
+static int crypto_int_oop_encode(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_packet_op_param_t new_param = *param;
+ const uint32_t c_scale = session->cipher_range_in_bits ? 8 : 1;
+ const uint32_t a_scale = session->auth_range_in_bits ? 8 : 1;
+
+ copy_ranges(*pkt_out, pkt_in, session, param);
+
+ new_param.cipher_range.offset += param->dst_offset_shift * c_scale;
+ new_param.auth_range.offset += param->dst_offset_shift * a_scale;
+
+ return crypto_int(*pkt_out, pkt_out, &new_param);
+}
+
+static int crypto_int_oop_decode(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_generic_session_t *session,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_packet_t copy;
+ int rc;
+
+ copy = odp_packet_copy(pkt_in, odp_packet_pool(pkt_in));
+ if (copy == ODP_PACKET_INVALID)
+ return -1;
+
+ rc = crypto_int(copy, &copy, param);
+ if (rc < 0) {
+ odp_packet_free(copy);
+ return rc;
+ }
+
+ copy_ranges(*pkt_out, copy, session, param);
+
+ packet_subtype_set(*pkt_out, ODP_EVENT_PACKET_CRYPTO);
+ packet_hdr(*pkt_out)->crypto_op_result = packet_hdr(copy)->crypto_op_result;
+ odp_packet_free(copy);
+
+ return 0;
+}
+
+/*
+ * Slow out-of-place operation implemented using copying and in-place operation
+ */
+static int crypto_int_oop(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_generic_session_t *session;
+ int rc;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ if (session->p.op == ODP_CRYPTO_OP_ENCODE)
+ rc = crypto_int_oop_encode(pkt_in, pkt_out, session, param);
+ else
+ rc = crypto_int_oop_decode(pkt_in, pkt_out, session, param);
+ if (rc)
+ return rc;
+
+ if (session->p.op_mode == ODP_CRYPTO_ASYNC)
+ packet_hdr(*pkt_out)->crypto_op_result.pkt_in = pkt_in;
+
+ return 0;
+}
+
+int odp_crypto_op(const odp_packet_t pkt_in[],
+ odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ int i, rc;
+ odp_crypto_generic_session_t *session;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_SYNC == session->p.op_mode);
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ } else if (session->p.op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ rc = crypto_int_oop(pkt_in[i], &pkt_out[i], &param[i]);
+ } else if (session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ if (pkt_out[i] == ODP_PACKET_INVALID) /* basic */
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ else /* oop */
+ rc = crypto_int_oop(pkt_in[i], &pkt_out[i], &param[i]);
+ } else {
+ _ODP_ASSERT(session->p.op_type == ODP_CRYPTO_OP_TYPE_LEGACY);
+ rc = crypto_int(pkt_in[i], &pkt_out[i], &param[i]);
+ }
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_crypto_op_enq(const odp_packet_t pkt_in[],
+ const odp_packet_t pkt_out[],
+ const odp_crypto_packet_op_param_t param[],
+ int num_pkt)
+{
+ odp_packet_t pkt;
+ odp_event_t event;
+ odp_crypto_generic_session_t *session;
+ int i, rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ session = (odp_crypto_generic_session_t *)(intptr_t)param[i].session;
+ _ODP_ASSERT(ODP_CRYPTO_ASYNC == session->p.op_mode);
+ _ODP_ASSERT(ODP_QUEUE_INVALID != session->p.compl_queue);
+
+ if (odp_likely(session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC)) {
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ } else if (session->p.op_type == ODP_CRYPTO_OP_TYPE_OOP) {
+ pkt = pkt_out[i];
+ rc = crypto_int_oop(pkt_in[i], &pkt, &param[i]);
+ } else if (session->p.op_type == ODP_CRYPTO_OP_TYPE_BASIC_AND_OOP) {
+ pkt = pkt_out[i];
+ if (pkt_out[i] == ODP_PACKET_INVALID) /* basic */
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ else /* oop */
+ rc = crypto_int_oop(pkt_in[i], &pkt, &param[i]);
+ } else {
+ _ODP_ASSERT(session->p.op_type == ODP_CRYPTO_OP_TYPE_LEGACY);
+ pkt = pkt_out[i];
+ rc = crypto_int(pkt_in[i], &pkt, &param[i]);
+ }
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt);
+ if (odp_queue_enq(session->p.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
diff --git a/platform/linux-generic/odp_dma.c b/platform/linux-generic/odp_dma.c
new file mode 100644
index 000000000..b3439498c
--- /dev/null
+++ b/platform/linux-generic/odp_dma.c
@@ -0,0 +1,864 @@
+/* Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/dma.h>
+#include <odp/api/event.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/align.h>
+#include <odp/api/buffer.h>
+#include <odp/api/stash.h>
+#include <odp/api/packet.h>
+#include <odp/api/pool.h>
+#include <odp/api/queue.h>
+
+#include <odp/api/plat/std_inlines.h>
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_global_data.h>
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+#include <odp_event_internal.h>
+#include <odp_pool_internal.h>
+
+#include <string.h>
+#include <inttypes.h>
+
+#define MAX_SESSIONS CONFIG_MAX_DMA_SESSIONS
+#define MAX_TRANSFERS 256
+#define MAX_SEGS 16
+#define MAX_SEG_LEN (128 * 1024)
+
+ODP_STATIC_ASSERT(MAX_TRANSFERS < UINT32_MAX, "Too many inflight transfers");
+
+typedef struct segment_t {
+ void *addr;
+ uint32_t len;
+
+} segment_t;
+
+typedef struct transfer_t {
+ void *dst;
+ void *src;
+ uint32_t len;
+
+} transfer_t;
+
+typedef struct result_t {
+ void *user_ptr;
+
+} result_t;
+
+typedef struct ODP_ALIGNED_CACHE dma_session_t {
+ odp_ticketlock_t lock;
+ odp_dma_param_t dma_param;
+ uint8_t active;
+ char name[ODP_DMA_NAME_LEN];
+ odp_stash_t stash;
+ result_t result[MAX_TRANSFERS];
+
+} dma_session_t;
+
+typedef struct dma_global_t {
+ odp_shm_t shm;
+
+ /* Buffer pool capability and default parameters */
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t pool_param;
+
+ dma_session_t session[MAX_SESSIONS];
+
+} dma_global_t;
+
+static dma_global_t *_odp_dma_glb;
+
+static inline dma_session_t *dma_session_from_handle(odp_dma_t dma)
+{
+ return (dma_session_t *)(uintptr_t)dma;
+}
+
+int odp_dma_capability(odp_dma_capability_t *capa)
+{
+ if (odp_global_ro.disable.dma) {
+ _ODP_ERR("DMA is disabled\n");
+ return -1;
+ }
+
+ memset(capa, 0, sizeof(odp_dma_capability_t));
+
+ capa->max_sessions = MAX_SESSIONS;
+ capa->max_transfers = MAX_TRANSFERS;
+ capa->max_src_segs = MAX_SEGS;
+ capa->max_dst_segs = MAX_SEGS;
+ capa->max_segs = 2 * MAX_SEGS;
+ capa->max_seg_len = MAX_SEG_LEN;
+
+ capa->compl_mode_mask = ODP_DMA_COMPL_SYNC | ODP_DMA_COMPL_NONE |
+ ODP_DMA_COMPL_EVENT | ODP_DMA_COMPL_POLL;
+
+ capa->queue_type_sched = 1;
+ capa->queue_type_plain = 1;
+
+ capa->pool.max_pools = _odp_dma_glb->pool_capa.buf.max_pools;
+ capa->pool.max_num = _odp_dma_glb->pool_capa.buf.max_num;
+ capa->pool.max_uarea_size = _odp_dma_glb->pool_capa.buf.max_uarea_size;
+ capa->pool.uarea_persistence = _odp_dma_glb->pool_capa.buf.uarea_persistence;
+ capa->pool.min_cache_size = _odp_dma_glb->pool_capa.buf.min_cache_size;
+ capa->pool.max_cache_size = _odp_dma_glb->pool_capa.buf.max_cache_size;
+
+ return 0;
+}
+
+void odp_dma_param_init(odp_dma_param_t *param)
+{
+ memset(param, 0, sizeof(odp_dma_param_t));
+
+ param->direction = ODP_DMA_MAIN_TO_MAIN;
+ param->type = ODP_DMA_TYPE_COPY;
+ param->mt_mode = ODP_DMA_MT_SAFE;
+ param->order = ODP_DMA_ORDER_NONE;
+}
+
+static odp_stash_t create_stash(void)
+{
+ odp_stash_param_t stash_param;
+ odp_stash_t stash;
+ uint32_t id, tmp, i;
+ int32_t ret;
+
+ odp_stash_param_init(&stash_param);
+ stash_param.num_obj = MAX_TRANSFERS;
+ stash_param.obj_size = sizeof(uint32_t);
+ stash_param.cache_size = 0;
+
+ stash = odp_stash_create("_odp_dma_transfer_id", &stash_param);
+
+ if (stash == ODP_STASH_INVALID) {
+ _ODP_ERR("Stash create failed\n");
+ return ODP_STASH_INVALID;
+ }
+
+ /* Zero is invalid ID */
+ for (id = 1; id < MAX_TRANSFERS + 1; id++) {
+ ret = odp_stash_put_u32(stash, &id, 1);
+ if (ret != 1) {
+ _ODP_ERR("Stash put failed: %i, %u\n", ret, id);
+ break;
+ }
+ }
+
+ if (ret != 1) {
+ for (i = 0; i < id; i++) {
+ if (odp_stash_get_u32(stash, &tmp, 1) != 1) {
+ _ODP_ERR("Stash get failed: %u\n", i);
+ break;
+ }
+ }
+
+ if (odp_stash_destroy(stash))
+ _ODP_ERR("Stash destroy failed\n");
+
+ return ODP_STASH_INVALID;
+ }
+
+ return stash;
+}
+
+static int destroy_stash(odp_stash_t stash)
+{
+ uint32_t tmp;
+ int32_t num;
+ int ret = 0;
+
+ while (1) {
+ num = odp_stash_get_u32(stash, &tmp, 1);
+
+ if (num == 1)
+ continue;
+
+ if (num == 0)
+ break;
+
+ _ODP_ERR("Stash get failed: %i\n", num);
+ ret = -1;
+ break;
+ }
+
+ if (odp_stash_destroy(stash)) {
+ _ODP_ERR("Stash destroy failed\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+odp_dma_t odp_dma_create(const char *name, const odp_dma_param_t *param)
+{
+ odp_dma_capability_t dma_capa;
+ int i;
+ dma_session_t *session = NULL;
+
+ if (odp_global_ro.disable.dma) {
+ _ODP_ERR("DMA is disabled\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if ((param->direction != ODP_DMA_MAIN_TO_MAIN) ||
+ (param->type != ODP_DMA_TYPE_COPY)) {
+ _ODP_ERR("Bad DMA parameter\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask == 0) {
+ _ODP_ERR("Empty compl mode mask\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (odp_dma_capability(&dma_capa)) {
+ _ODP_ERR("DMA capa failed\n");
+ return ODP_DMA_INVALID;
+ }
+
+ if (param->compl_mode_mask & ~dma_capa.compl_mode_mask) {
+ _ODP_ERR("Compl mode not supported\n");
+ return ODP_DMA_INVALID;
+ }
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ if (_odp_dma_glb->session[i].active)
+ continue;
+
+ odp_ticketlock_lock(&_odp_dma_glb->session[i].lock);
+
+ if (_odp_dma_glb->session[i].active) {
+ odp_ticketlock_unlock(&_odp_dma_glb->session[i].lock);
+ continue;
+ }
+
+ session = &_odp_dma_glb->session[i];
+ session->active = 1;
+ odp_ticketlock_unlock(&_odp_dma_glb->session[i].lock);
+ break;
+ }
+
+ if (session == NULL) {
+ _ODP_DBG("Out of DMA sessions\n");
+ return ODP_DMA_INVALID;
+ }
+
+ session->stash = ODP_STASH_INVALID;
+
+ /* Create stash for transfer IDs */
+ if (param->compl_mode_mask & ODP_DMA_COMPL_POLL) {
+ session->stash = create_stash();
+
+ if (session->stash == ODP_STASH_INVALID) {
+ session->active = 0;
+ return ODP_DMA_INVALID;
+ }
+ }
+
+ session->name[0] = 0;
+
+ if (name) {
+ strncpy(session->name, name, ODP_DMA_NAME_LEN - 1);
+ session->name[ODP_DMA_NAME_LEN - 1] = 0;
+ }
+
+ session->dma_param = *param;
+
+ return (odp_dma_t)session;
+}
+
+int odp_dma_destroy(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ int ret = 0;
+
+ if (dma == ODP_DMA_INVALID) {
+ _ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (session->stash != ODP_STASH_INVALID)
+ if (destroy_stash(session->stash))
+ ret = -1;
+
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->active == 0) {
+ _ODP_ERR("Session not created\n");
+ odp_ticketlock_unlock(&session->lock);
+ return -1;
+ }
+
+ session->active = 0;
+ odp_ticketlock_unlock(&session->lock);
+
+ return ret;
+}
+
+odp_dma_t odp_dma_lookup(const char *name)
+{
+ dma_session_t *session;
+ int i;
+
+ for (i = 0; i < MAX_SESSIONS; i++) {
+ session = &_odp_dma_glb->session[i];
+
+ odp_ticketlock_lock(&session->lock);
+
+ if (session->active == 0) {
+ odp_ticketlock_unlock(&session->lock);
+ continue;
+ }
+
+ if (strcmp(session->name, name) == 0) {
+ /* found it */
+ odp_ticketlock_unlock(&session->lock);
+ return (odp_dma_t)session;
+ }
+ odp_ticketlock_unlock(&session->lock);
+ }
+
+ return ODP_DMA_INVALID;
+}
+
+static uint32_t transfer_len(const odp_dma_transfer_param_t *trs_param)
+{
+ uint32_t i;
+ uint32_t src_len = 0;
+ uint32_t dst_len = 0;
+
+ for (i = 0; i < trs_param->num_src; i++)
+ src_len += trs_param->src_seg[i].len;
+
+ for (i = 0; i < trs_param->num_dst; i++)
+ dst_len += trs_param->dst_seg[i].len;
+
+ if (src_len != dst_len)
+ return 0;
+
+ return src_len;
+}
+
+static inline void segment_raw(segment_t seg[], int num, const odp_dma_seg_t *dma_seg)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ seg[i].addr = dma_seg[i].addr;
+ seg[i].len = dma_seg[i].len;
+ }
+}
+
+static inline int segment_pkt(segment_t seg[], int num_seg, const odp_dma_seg_t *dma_seg)
+{
+ odp_packet_t pkt;
+ uint32_t offset;
+ void *addr;
+ uint32_t seg_len, tot_len, len;
+ int i;
+ int num = 0;
+
+ for (i = 0; i < num_seg; i++) {
+ pkt = dma_seg[i].packet;
+ offset = dma_seg[i].offset;
+ tot_len = dma_seg[i].len;
+
+ if (odp_unlikely(offset + tot_len > odp_packet_len(pkt))) {
+ _ODP_ERR("Bad packet segment len/offset (%u/%u)\n", tot_len, offset);
+ return 0;
+ }
+
+ while (tot_len) {
+ addr = odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ if (odp_unlikely(addr == NULL)) {
+ _ODP_ERR("Bad packet offset %u\n", offset);
+ return 0;
+ }
+
+ seg[num].addr = addr;
+ len = tot_len;
+ if (tot_len > seg_len)
+ len = seg_len;
+
+ seg[num].len = len;
+
+ tot_len -= len;
+ offset += len;
+ num++;
+
+ if (odp_unlikely(num > MAX_SEGS)) {
+ _ODP_ERR("Too many packet segments\n");
+ return 0;
+ }
+ }
+ }
+
+ return num;
+}
+
+static int transfer_table(transfer_t *trs, const segment_t src_seg[], const segment_t dst_seg[],
+ int max_num, uint32_t tot_len)
+{
+ uint32_t len, src_len, dst_len;
+ uint8_t *src_ptr, *dst_ptr;
+ int i;
+ int src = 0;
+ int dst = 0;
+
+ src_ptr = src_seg[0].addr;
+ dst_ptr = dst_seg[0].addr;
+ src_len = src_seg[0].len;
+ dst_len = dst_seg[0].len;
+
+ len = src_len;
+ if (dst_len < src_len)
+ len = dst_len;
+
+ for (i = 0; i < max_num; i++) {
+ trs[i].src = src_ptr;
+ trs[i].dst = dst_ptr;
+ trs[i].len = len;
+ tot_len -= len;
+
+ if (tot_len == 0)
+ break;
+
+ if (dst_len < src_len) {
+ dst++;
+ dst_ptr = dst_seg[dst].addr;
+ dst_len = dst_seg[dst].len;
+ src_ptr += len;
+ src_len -= len;
+ } else if (src_len < dst_len) {
+ src++;
+ src_ptr = src_seg[src].addr;
+ src_len = src_seg[src].len;
+ dst_ptr += len;
+ dst_len -= len;
+ } else { /* equal lengths */
+ dst++;
+ src++;
+ dst_ptr = dst_seg[dst].addr;
+ dst_len = dst_seg[dst].len;
+ src_ptr = src_seg[src].addr;
+ src_len = src_seg[src].len;
+ }
+
+ len = src_len;
+ if (dst_len < src_len)
+ len = dst_len;
+ }
+
+ return i + 1;
+}
+
+int odp_dma_transfer(odp_dma_t dma, const odp_dma_transfer_param_t *transfer,
+ odp_dma_result_t *result)
+{
+ int num, i;
+ uint32_t tot_len;
+ dma_session_t *session = dma_session_from_handle(dma);
+ int num_src, num_dst;
+ const int max_num = 2 * MAX_SEGS;
+ transfer_t trs[max_num];
+ segment_t src[MAX_SEGS];
+ segment_t dst[MAX_SEGS];
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ _ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(session->active == 0)) {
+ _ODP_ERR("Session not created\n");
+ return -1;
+ }
+
+ if (odp_unlikely(transfer->num_src == 0 || transfer->num_src > MAX_SEGS)) {
+ _ODP_ERR("Bad number of src segments\n");
+ return -1;
+ }
+
+ if (odp_unlikely(transfer->num_dst == 0 || transfer->num_dst > MAX_SEGS)) {
+ _ODP_ERR("Bad number of dst segments\n");
+ return -1;
+ }
+
+ tot_len = transfer_len(transfer);
+
+ if (odp_unlikely(tot_len == 0)) {
+ _ODP_ERR("Bad transfer length\n");
+ return -1;
+ }
+
+ if (transfer->src_format == ODP_DMA_FORMAT_ADDR) {
+ num_src = transfer->num_src;
+ segment_raw(src, num_src, transfer->src_seg);
+ } else {
+ num_src = segment_pkt(src, transfer->num_src, transfer->src_seg);
+
+ if (odp_unlikely(num_src == 0))
+ return -1;
+ }
+
+ if (transfer->dst_format == ODP_DMA_FORMAT_ADDR) {
+ num_dst = transfer->num_dst;
+ segment_raw(dst, num_dst, transfer->dst_seg);
+ } else {
+ num_dst = segment_pkt(dst, transfer->num_dst, transfer->dst_seg);
+
+ if (odp_unlikely(num_dst == 0))
+ return -1;
+ }
+
+ num = transfer_table(trs, src, dst, max_num, tot_len);
+
+ if (odp_unlikely(num > max_num)) {
+ _ODP_ERR("Segment table error\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++)
+ memcpy(trs[i].dst, trs[i].src, trs[i].len);
+
+ if (result) {
+ memset(result, 0, sizeof(odp_dma_result_t));
+ result->success = 1;
+ }
+
+ return 1;
+}
+
+int odp_dma_transfer_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ odp_dma_result_t *result[], int num)
+{
+ int i;
+ odp_dma_result_t *res = NULL;
+ int ret = 0;
+
+ if (odp_unlikely(num < 1)) {
+ _ODP_ERR("Bad number of transfers\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (result)
+ res = result[i];
+
+ ret = odp_dma_transfer(dma, trs_param[i], res);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+odp_dma_transfer_id_t odp_dma_transfer_id_alloc(odp_dma_t dma)
+{
+ int32_t num;
+ uint32_t id;
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ num = odp_stash_get_u32(session->stash, &id, 1);
+
+ if (odp_unlikely(num != 1))
+ return ODP_DMA_TRANSFER_ID_INVALID;
+
+ return id;
+}
+
+void odp_dma_transfer_id_free(odp_dma_t dma, odp_dma_transfer_id_t transfer_id)
+{
+ int32_t num;
+ dma_session_t *session = dma_session_from_handle(dma);
+ uint32_t id = transfer_id;
+
+ num = odp_stash_put_u32(session->stash, &id, 1);
+
+ if (odp_unlikely(num != 1))
+ _ODP_ERR("Stash put failed\n");
+}
+
+static inline uint32_t index_from_transfer_id(odp_dma_transfer_id_t transfer_id)
+{
+ return transfer_id - 1;
+}
+
+int odp_dma_transfer_start(odp_dma_t dma, const odp_dma_transfer_param_t *transfer,
+ const odp_dma_compl_param_t *compl)
+{
+ int ret;
+ dma_session_t *session = dma_session_from_handle(dma);
+ const uint32_t transfer_id = compl->transfer_id;
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ _ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ /* Check completion mode */
+ switch (compl->compl_mode) {
+ case ODP_DMA_COMPL_NONE:
+ break;
+ case ODP_DMA_COMPL_POLL:
+ if (transfer_id == ODP_DMA_TRANSFER_ID_INVALID || transfer_id > MAX_TRANSFERS) {
+ _ODP_ERR("Bad transfer ID: %u\n", transfer_id);
+ return -1;
+ }
+ break;
+ case ODP_DMA_COMPL_EVENT:
+ if (compl->event == ODP_EVENT_INVALID ||
+ compl->queue == ODP_QUEUE_INVALID) {
+ _ODP_ERR("Bad event or queue\n");
+ return -1;
+ }
+ break;
+ default:
+ _ODP_ERR("Bad completion mode %u\n", compl->compl_mode);
+ return -1;
+ }
+
+ ret = odp_dma_transfer(dma, transfer, NULL);
+
+ if (odp_unlikely(ret < 1))
+ return ret;
+
+ if (compl->compl_mode == ODP_DMA_COMPL_POLL) {
+ uint32_t index = index_from_transfer_id(transfer_id);
+
+ session->result[index].user_ptr = compl->user_ptr;
+
+ } else if (compl->compl_mode == ODP_DMA_COMPL_EVENT) {
+ odp_dma_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)compl->event;
+
+ if (odp_unlikely(odp_event_type(compl->event) != ODP_EVENT_DMA_COMPL)) {
+ _ODP_ERR("Bad completion event type\n");
+ return -1;
+ }
+
+ result = odp_buffer_addr(buf);
+ result->success = 1;
+ result->user_ptr = compl->user_ptr;
+
+ if (odp_unlikely(odp_queue_enq(compl->queue, compl->event))) {
+ _ODP_ERR("Completion event enqueue failed %" PRIu64 "\n",
+ odp_queue_to_u64(compl->queue));
+ return -1;
+ }
+ }
+
+ return 1;
+}
+
+int odp_dma_transfer_start_multi(odp_dma_t dma, const odp_dma_transfer_param_t *trs_param[],
+ const odp_dma_compl_param_t *compl_param[], int num)
+{
+ int i;
+ int ret = 0;
+
+ if (odp_unlikely(num < 1)) {
+ _ODP_ERR("Bad number of transfers\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = odp_dma_transfer_start(dma, trs_param[i], compl_param[i]);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+int odp_dma_transfer_done(odp_dma_t dma, odp_dma_transfer_id_t transfer_id,
+ odp_dma_result_t *result)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+ const uint32_t id = transfer_id;
+
+ if (odp_unlikely(dma == ODP_DMA_INVALID)) {
+ _ODP_ERR("Bad DMA handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(id == ODP_DMA_TRANSFER_ID_INVALID || id > MAX_TRANSFERS)) {
+ _ODP_ERR("Bad transfer ID: %u\n", id);
+ return -1;
+ }
+
+ if (result) {
+ uint32_t index = index_from_transfer_id(id);
+
+ result->success = 1;
+ result->user_ptr = session->result[index].user_ptr;
+ }
+
+ return 1;
+}
+
+void odp_dma_pool_param_init(odp_dma_pool_param_t *pool_param)
+{
+ memset(pool_param, 0, sizeof(odp_dma_pool_param_t));
+
+ pool_param->cache_size = _odp_dma_glb->pool_param.buf.cache_size;
+}
+
+odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *dma_pool_param)
+{
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ uint32_t num = dma_pool_param->num;
+ uint32_t uarea_size = dma_pool_param->uarea_size;
+ uint32_t cache_size = dma_pool_param->cache_size;
+
+ if (num > _odp_dma_glb->pool_capa.buf.max_num) {
+ _ODP_ERR("Too many DMA completion events: %u\n", num);
+ return ODP_POOL_INVALID;
+ }
+
+ if (uarea_size > _odp_dma_glb->pool_capa.buf.max_uarea_size) {
+ _ODP_ERR("Bad uarea size: %u\n", uarea_size);
+ return ODP_POOL_INVALID;
+ }
+
+ if (cache_size < _odp_dma_glb->pool_capa.buf.min_cache_size ||
+ cache_size > _odp_dma_glb->pool_capa.buf.max_cache_size) {
+ _ODP_ERR("Bad cache size: %u\n", cache_size);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&pool_param);
+ pool_param.type = ODP_POOL_BUFFER;
+ pool_param.uarea_init.init_fn = dma_pool_param->uarea_init.init_fn;
+ pool_param.uarea_init.args = dma_pool_param->uarea_init.args;
+ pool_param.buf.num = num;
+ pool_param.buf.uarea_size = uarea_size;
+ pool_param.buf.cache_size = cache_size;
+ pool_param.buf.size = sizeof(odp_dma_result_t);
+
+ pool = _odp_pool_create(name, &pool_param, ODP_POOL_DMA_COMPL);
+
+ return pool;
+}
+
+uint64_t odp_dma_to_u64(odp_dma_t dma)
+{
+ return _odp_pri(dma);
+}
+
+uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl)
+{
+ return _odp_pri(dma_compl);
+}
+
+void odp_dma_print(odp_dma_t dma)
+{
+ dma_session_t *session = dma_session_from_handle(dma);
+
+ if (dma == ODP_DMA_INVALID) {
+ _ODP_ERR("Bad DMA handle\n");
+ return;
+ }
+
+ _ODP_PRINT("\nDMA info\n");
+ _ODP_PRINT("--------\n");
+ _ODP_PRINT(" DMA handle 0x%" PRIx64 "\n", odp_dma_to_u64(dma));
+ _ODP_PRINT(" name %s\n", session->name);
+ _ODP_PRINT("\n");
+}
+
+void odp_dma_compl_print(odp_dma_compl_t dma_compl)
+{
+ odp_dma_result_t result;
+ int ret;
+
+ if (dma_compl == ODP_DMA_COMPL_INVALID) {
+ _ODP_ERR("Bad DMA compl handle\n");
+ return;
+ }
+
+ ret = odp_dma_compl_result(dma_compl, &result);
+
+ _ODP_PRINT("\nDMA completion\n");
+ _ODP_PRINT("--------------\n");
+ _ODP_PRINT(" Compl event handle: 0x%" PRIx64 "\n", _odp_pri(dma_compl));
+
+ if (ret == 0) {
+ _ODP_PRINT(" Result: %s\n", result.success ? "success" : "fail");
+ _ODP_PRINT(" User pointer: 0x%" PRIx64 "\n", _odp_pri(result.user_ptr));
+ } else {
+ _ODP_PRINT(" No result metadata\n");
+ }
+
+ _ODP_PRINT("\n");
+}
+
+int _odp_dma_init_global(void)
+{
+ odp_shm_t shm;
+ int i;
+
+ if (odp_global_ro.disable.dma) {
+ _ODP_PRINT("DMA is DISABLED\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_dma_global", sizeof(dma_global_t), ODP_CACHE_LINE_SIZE, 0);
+ _odp_dma_glb = odp_shm_addr(shm);
+
+ if (_odp_dma_glb == NULL) {
+ _ODP_ERR("SHM reserve failed\n");
+ return -1;
+ }
+
+ memset(_odp_dma_glb, 0, sizeof(dma_global_t));
+ _odp_dma_glb->shm = shm;
+
+ odp_pool_param_init(&_odp_dma_glb->pool_param);
+
+ if (odp_pool_capability(&_odp_dma_glb->pool_capa)) {
+ _ODP_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ for (i = 0; i < MAX_SESSIONS; i++)
+ odp_ticketlock_init(&_odp_dma_glb->session[i].lock);
+
+ return 0;
+}
+
+int _odp_dma_term_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.dma)
+ return 0;
+
+ if (_odp_dma_glb == NULL)
+ return 0;
+
+ shm = _odp_dma_glb->shm;
+
+ if (odp_shm_free(shm)) {
+ _ODP_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_dma_api.c b/platform/linux-generic/odp_dma_api.c
new file mode 100644
index 000000000..1e1d5d91f
--- /dev/null
+++ b/platform/linux-generic/odp_dma_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/dma.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/dma_inlines.h>
diff --git a/platform/linux-generic/odp_errno.c b/platform/linux-generic/odp_errno.c
index 408a4b95f..bce398834 100644
--- a/platform/linux-generic/odp_errno.c
+++ b/platform/linux-generic/odp_errno.c
@@ -1,33 +1,32 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/errno.h>
-#include <odp_internal.h>
#include <string.h>
#include <stdio.h>
#include <odp_debug_internal.h>
-__thread int __odp_errno;
+__thread int _odp_errno;
int odp_errno(void)
{
- return __odp_errno;
+ return _odp_errno;
}
void odp_errno_zero(void)
{
- __odp_errno = 0;
+ _odp_errno = 0;
}
void odp_errno_print(const char *str)
{
if (str != NULL)
- printf("%s ", str);
-
- ODP_PRINT("%s\n", strerror(__odp_errno));
+ _ODP_PRINT("%s %s\n", str, strerror(_odp_errno));
+ else
+ _ODP_PRINT("%s\n", strerror(_odp_errno));
}
const char *odp_errno_str(int errnum)
diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c
index d71f4464a..f3644f02b 100644
--- a/platform/linux-generic/odp_event.c
+++ b/platform/linux-generic/odp_event.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2020-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,39 +8,126 @@
#include <odp/api/event.h>
#include <odp/api/buffer.h>
#include <odp/api/crypto.h>
+#include <odp/api/dma.h>
#include <odp/api/packet.h>
#include <odp/api/timer.h>
#include <odp/api/pool.h>
+#include <odp/api/ml.h>
+
#include <odp_buffer_internal.h>
-#include <odp_buffer_inlines.h>
+#include <odp_ipsec_internal.h>
#include <odp_debug_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_event_vector_internal.h>
-odp_event_type_t odp_event_type(odp_event_t event)
-{
- return _odp_buffer_event_type(odp_buffer_from_event(event));
-}
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+#include <odp/api/plat/timer_inlines.h>
-void odp_event_free(odp_event_t event)
+#include <odp/api/plat/event_inline_types.h>
+
+#include <odp/visibility_begin.h>
+
+/* Fill in event header field offsets for inline functions */
+const _odp_event_inline_offset_t
+_odp_event_inline_offset ODP_ALIGNED_CACHE = {
+ .event_type = offsetof(_odp_event_hdr_t, event_type),
+ .base_data = offsetof(_odp_event_hdr_t, base_data),
+ .subtype = offsetof(_odp_event_hdr_t, subtype),
+ .flow_id = offsetof(_odp_event_hdr_t, flow_id),
+ .pool = offsetof(_odp_event_hdr_t, pool),
+};
+
+#include <odp/visibility_end.h>
+
+static inline void event_free(odp_event_t event, _odp_ev_id_t id)
{
switch (odp_event_type(event)) {
case ODP_EVENT_BUFFER:
+ _odp_buffer_validate(odp_buffer_from_event(event), id);
odp_buffer_free(odp_buffer_from_event(event));
break;
case ODP_EVENT_PACKET:
+ _odp_packet_validate(odp_packet_from_event(event), id);
odp_packet_free(odp_packet_from_event(event));
break;
+ case ODP_EVENT_PACKET_VECTOR:
+ _odp_packet_vector_free_full(odp_packet_vector_from_event(event));
+ break;
case ODP_EVENT_TIMEOUT:
odp_timeout_free(odp_timeout_from_event(event));
break;
- case ODP_EVENT_CRYPTO_COMPL:
- odp_crypto_compl_free(odp_crypto_compl_from_event(event));
+ case ODP_EVENT_IPSEC_STATUS:
+ _odp_ipsec_status_free(_odp_ipsec_status_from_event(event));
+ break;
+ case ODP_EVENT_PACKET_TX_COMPL:
+ odp_packet_tx_compl_free(odp_packet_tx_compl_from_event(event));
+ break;
+ case ODP_EVENT_DMA_COMPL:
+ odp_dma_compl_free(odp_dma_compl_from_event(event));
+ break;
+ case ODP_EVENT_ML_COMPL:
+ odp_ml_compl_free(odp_ml_compl_from_event(event));
break;
default:
- ODP_ABORT("Invalid event type: %d\n", odp_event_type(event));
+ _ODP_ABORT("Invalid event type: %d\n", odp_event_type(event));
}
}
+void odp_event_free(odp_event_t event)
+{
+ event_free(event, _ODP_EV_EVENT_FREE);
+}
+
+void odp_event_free_multi(const odp_event_t event[], int num)
+{
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_MULTI);
+}
+
+void odp_event_free_sp(const odp_event_t event[], int num)
+{
+ for (int i = 0; i < num; i++)
+ event_free(event[i], _ODP_EV_EVENT_FREE_SP);
+}
+
uint64_t odp_event_to_u64(odp_event_t hdl)
{
return _odp_pri(hdl);
}
+
+int odp_event_is_valid(odp_event_t event)
+{
+ if (event == ODP_EVENT_INVALID)
+ return 0;
+
+ if (_odp_event_is_valid(event) == 0)
+ return 0;
+
+ switch (odp_event_type(event)) {
+ case ODP_EVENT_BUFFER:
+ return !_odp_buffer_validate(odp_buffer_from_event(event), _ODP_EV_EVENT_IS_VALID);
+ case ODP_EVENT_PACKET:
+ return !_odp_packet_validate(odp_packet_from_event(event), _ODP_EV_EVENT_IS_VALID);
+ case ODP_EVENT_TIMEOUT:
+ /* Fall through */
+ case ODP_EVENT_IPSEC_STATUS:
+ /* Fall through */
+ case ODP_EVENT_PACKET_VECTOR:
+ /* Fall through */
+ case ODP_EVENT_DMA_COMPL:
+ /* Fall through */
+ case ODP_EVENT_ML_COMPL:
+ /* Fall through */
+ case ODP_EVENT_PACKET_TX_COMPL:
+ break;
+ default:
+ return 0;
+ }
+
+ return 1;
+}
diff --git a/platform/linux-generic/odp_event_api.c b/platform/linux-generic/odp_event_api.c
new file mode 100644
index 000000000..4fbc98ddf
--- /dev/null
+++ b/platform/linux-generic/odp_event_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/event.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/event_inlines.h>
diff --git a/platform/linux-generic/odp_event_validation.c b/platform/linux-generic/odp_event_validation.c
new file mode 100644
index 000000000..c2d430f1a
--- /dev/null
+++ b/platform/linux-generic/odp_event_validation.c
@@ -0,0 +1,260 @@
+/* Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/atomic.h>
+#include <odp/api/buffer.h>
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_buffer_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_print_internal.h>
+
+#include <inttypes.h>
+#include <string.h>
+
+#define EVENT_VALIDATION_NONE 0
+#define EVENT_VALIDATION_WARN 1
+#define EVENT_VALIDATION_ABORT 2
+
+#define EVENT_DATA_PRINT_MAX_LEN 128
+
+typedef struct {
+ odp_atomic_u64_t err_count[_ODP_EV_MAX];
+ odp_shm_t shm;
+
+} event_validation_global_t;
+
+typedef struct {
+ const char *str;
+} _odp_ev_info_t;
+
+static event_validation_global_t *_odp_ev_glb;
+
+#if _ODP_EVENT_VALIDATION
+
+/* Table for mapping function IDs to API function names */
+static const _odp_ev_info_t ev_info_tbl[] = {
+ [_ODP_EV_BUFFER_FREE] = {.str = "odp_buffer_free()"},
+ [_ODP_EV_BUFFER_FREE_MULTI] = {.str = "odp_buffer_free_multi()"},
+ [_ODP_EV_BUFFER_IS_VALID] = {.str = "odp_buffer_is_valid()"},
+ [_ODP_EV_EVENT_FREE] = {.str = "odp_event_free()"},
+ [_ODP_EV_EVENT_FREE_MULTI] = {.str = "odp_event_free_multi()"},
+ [_ODP_EV_EVENT_FREE_SP] = {.str = "odp_event_free()_sp"},
+ [_ODP_EV_EVENT_IS_VALID] = {.str = "odp_event_is_valid()"},
+ [_ODP_EV_PACKET_FREE] = {.str = "odp_packet_free()"},
+ [_ODP_EV_PACKET_FREE_MULTI] = {.str = "odp_packet_free_multi()"},
+ [_ODP_EV_PACKET_FREE_SP] = {.str = "odp_packet_free_sp()"},
+ [_ODP_EV_PACKET_IS_VALID] = {.str = "odp_packet_is_valid()"},
+ [_ODP_EV_QUEUE_ENQ] = {.str = "odp_queue_enq()"},
+ [_ODP_EV_QUEUE_ENQ_MULTI] = {.str = "odp_queue_enq_multi()"}
+};
+
+ODP_STATIC_ASSERT(_ODP_ARRAY_SIZE(ev_info_tbl) == _ODP_EV_MAX, "ev_info_tbl missing entries");
+
+static void print_event_data(odp_event_t event, odp_event_type_t type)
+{
+ const char *type_str;
+ const uint32_t bytes_per_row = 16;
+ uint32_t byte_len;
+ int num_rows, max_len, n;
+ int len = 0;
+ uint8_t *data;
+
+ if (type == ODP_EVENT_PACKET) {
+ odp_packet_t pkt = odp_packet_from_event(event);
+
+ data = odp_packet_data(pkt);
+ byte_len = odp_packet_seg_len(pkt);
+ type_str = "Packet";
+ } else {
+ odp_buffer_t buf = odp_buffer_from_event(event);
+
+ data = odp_buffer_addr(buf);
+ byte_len = odp_buffer_size(buf);
+ type_str = "Buffer";
+ }
+
+ if (byte_len > EVENT_DATA_PRINT_MAX_LEN)
+ byte_len = EVENT_DATA_PRINT_MAX_LEN;
+
+ num_rows = (byte_len + bytes_per_row - 1) / bytes_per_row;
+ max_len = 256 + (3 * byte_len) + (3 * num_rows);
+ n = max_len - 1;
+
+ char str[max_len];
+
+ len += _odp_snprint(&str[len], n - len, "%s %p data %p:\n", type_str, event, data);
+ while (byte_len) {
+ uint32_t row_len = byte_len > bytes_per_row ? bytes_per_row : byte_len;
+
+ len += _odp_snprint(&str[len], n - len, " ");
+
+ for (uint32_t i = 0; i < row_len; i++)
+ len += _odp_snprint(&str[len], n - len, " %02x", data[i]);
+
+ len += _odp_snprint(&str[len], n - len, "\n");
+
+ byte_len -= row_len;
+ data += row_len;
+ }
+
+ _ODP_PRINT("%s\n", str);
+}
+
+static inline int validate_event_endmark(odp_event_t event, _odp_ev_id_t id, odp_event_type_t type)
+{
+ uint64_t err_count;
+ uint64_t *endmark_ptr = _odp_event_endmark_get_ptr(event);
+
+ if (odp_likely(*endmark_ptr == _ODP_EV_ENDMARK_VAL))
+ return 0;
+
+ err_count = odp_atomic_fetch_inc_u64(&_odp_ev_glb->err_count[id]) + 1;
+
+ _ODP_ERR("Event %p endmark mismatch in %s: endmark=0x%" PRIx64 " (expected 0x%" PRIx64 ") "
+ "err_count=%" PRIu64 "\n", event, ev_info_tbl[id].str, *endmark_ptr,
+ _ODP_EV_ENDMARK_VAL, err_count);
+
+ print_event_data(event, type);
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_ABORT)
+ _ODP_ABORT("Abort due to event %p endmark mismatch\n", event);
+
+ /* Fix endmark value */
+ _odp_event_endmark_set(event);
+
+ return -1;
+}
+
+static inline int buffer_validate(odp_buffer_t buf, _odp_ev_id_t id)
+{
+ return validate_event_endmark(odp_buffer_to_event(buf), id, ODP_EVENT_BUFFER);
+}
+
+static inline int packet_validate(odp_packet_t pkt, _odp_ev_id_t id)
+{
+ return validate_event_endmark(odp_packet_to_event(pkt), id, ODP_EVENT_PACKET);
+}
+
+static inline int event_validate(odp_event_t event, int id)
+{
+ if (odp_event_type(event) == ODP_EVENT_BUFFER)
+ return buffer_validate(odp_buffer_from_event(event), id);
+ if (odp_event_type(event) == ODP_EVENT_PACKET)
+ return packet_validate(odp_packet_from_event(event), id);
+ return 0;
+}
+
+/* Enable usage from API inline files */
+#include <odp/visibility_begin.h>
+
+int _odp_buffer_validate(odp_buffer_t buf, _odp_ev_id_t id)
+{
+ return buffer_validate(buf, id);
+}
+
+int _odp_buffer_validate_multi(const odp_buffer_t buf[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(buffer_validate(buf[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_packet_validate(odp_packet_t pkt, _odp_ev_id_t id)
+{
+ return packet_validate(pkt, id);
+}
+
+int _odp_packet_validate_multi(const odp_packet_t pkt[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(packet_validate(pkt[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_event_validate(odp_event_t event, _odp_ev_id_t id)
+{
+ return event_validate(event, id);
+}
+
+int _odp_event_validate_multi(const odp_event_t event[], int num,
+ _odp_ev_id_t id)
+{
+ for (int i = 0; i < num; i++) {
+ if (odp_unlikely(event_validate(event[i], id)))
+ return -1;
+ }
+ return 0;
+}
+
+#include <odp/visibility_end.h>
+
+#endif /* _ODP_EVENT_VALIDATION */
+
+int _odp_event_validation_init_global(void)
+{
+ odp_shm_t shm;
+
+ _ODP_PRINT("\nEvent validation mode: %s\n\n",
+ _ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE ? "none" :
+ _ODP_EVENT_VALIDATION == EVENT_VALIDATION_WARN ? "warn" : "abort");
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE)
+ return 0;
+
+ shm = odp_shm_reserve("_odp_event_validation_global",
+ sizeof(event_validation_global_t),
+ ODP_CACHE_LINE_SIZE, ODP_SHM_EXPORT);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ _odp_ev_glb = odp_shm_addr(shm);
+ if (_odp_ev_glb == NULL)
+ return -1;
+
+ memset(_odp_ev_glb, 0, sizeof(event_validation_global_t));
+ _odp_ev_glb->shm = shm;
+
+ for (int i = 0; i < _ODP_EV_MAX; i++)
+ odp_atomic_init_u64(&_odp_ev_glb->err_count[i], 0);
+
+ return 0;
+}
+
+int _odp_event_validation_term_global(void)
+{
+ int ret;
+
+ if (_ODP_EVENT_VALIDATION == EVENT_VALIDATION_NONE)
+ return 0;
+
+ if (_odp_ev_glb == NULL)
+ return 0;
+
+ ret = odp_shm_free(_odp_ev_glb->shm);
+ if (ret) {
+ _ODP_ERR("SHM free failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/_fdserver.c b/platform/linux-generic/odp_fdserver.c
index 9aed7a9ff..e72df0669 100644
--- a/platform/linux-generic/_fdserver.c
+++ b/platform/linux-generic/odp_fdserver.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -29,7 +29,7 @@
* _odp_fdserver_deregister_fd(context, key);
* _odp_fdserver_lookup_fd(context, key);
*
- * which are used to register/deregister or querry for file descriptor based
+ * which are used to register/deregister or query for file descriptor based
* on a context and key value couple, which has to be unique.
*
* Note again that the file descriptors stored here are local to this server
@@ -37,10 +37,11 @@
*/
#include <odp_posix_extensions.h>
-#include <odp/api/spinlock.h>
-#include <odp_internal.h>
+#include <odp_config_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
#include <odp_debug_internal.h>
-#include <_fdserver_internal.h>
+#include <odp_fdserver_internal.h>
#include <sys/prctl.h>
#include <signal.h>
@@ -48,6 +49,7 @@
#include <stdlib.h>
#include <errno.h>
#include <string.h>
+#include <sys/stat.h>
#include <sys/types.h>
#include <signal.h>
#include <sys/socket.h>
@@ -57,19 +59,21 @@
#include <sys/mman.h>
#include <sys/wait.h>
-#define FDSERVER_SOCKPATH_MAXLEN 32
-#define FDSERVER_SOCKPATH_FORMAT "/tmp/odp-%d-fdserver"
+#define FDSERVER_SOCKPATH_MAXLEN 255
+#define FDSERVER_SOCK_FORMAT "%s/%s/odp-%d-fdserver"
+#define FDSERVER_SOCKDIR_FORMAT "%s/%s"
+#define FDSERVER_DEFAULT_DIR "/dev/shm"
#define FDSERVER_BACKLOG 5
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
-/* when accessing the client functions, clients should be mutexed: */
-odp_spinlock_t *client_lock;
+/* Debug level for the FD server */
+#define FD_DBG 3
/* define the tables of file descriptors handled by this server: */
-#define FDSERVER_MAX_ENTRIES 256
+#define FDSERVER_MAX_ENTRIES (CONFIG_SHM_BLOCKS + CONFIG_INTERNAL_SHM_BLOCKS)
typedef struct fdentry_s {
fd_server_context_e context;
uint64_t key;
@@ -126,7 +130,7 @@ static int send_fdserver_msg(int sock, int command,
char ancillary_data[CMSG_SPACE(sizeof(int))];
- /* prepare the register request body (single framgent): */
+ /* prepare the register request body (single fragment): */
msg.command = command;
msg.context = context;
msg.key = key;
@@ -154,8 +158,8 @@ static int send_fdserver_msg(int sock, int command,
}
res = sendmsg(sock, &socket_message, 0);
if (res < 0) {
- ODP_ERR("send_fdserver_msg: %s\n", strerror(errno));
- return(-1);
+ _ODP_ERR("sendmsg() failed: %s\n", strerror(errno));
+ return -1;
}
return 0;
@@ -200,8 +204,8 @@ static int recv_fdserver_msg(int sock, int *command,
/* receive the message */
if (recvmsg(sock, &socket_message, MSG_CMSG_CLOEXEC) < 0) {
- ODP_ERR("recv_fdserver_msg: %s\n", strerror(errno));
- return(-1);
+ _ODP_ERR("recvmsg() failed: %s\n", strerror(errno));
+ return -1;
}
*command = msg.command;
@@ -238,22 +242,30 @@ static int get_socket(void)
int len;
/* construct the named socket path: */
- snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
- odp_global_data.main_pid);
+ len = snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
+ odp_global_ro.shm_dir, odp_global_ro.uid,
+ odp_global_ro.main_pid);
+
+ if (len >= FDSERVER_SOCKPATH_MAXLEN || len >= (int)sizeof(remote.sun_path)) {
+ _ODP_ERR("path too long\n");
+ return -1;
+ }
s_sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (s_sock == -1) {
- ODP_ERR("cannot connect to server: %s\n", strerror(errno));
- return(-1);
+ _ODP_ERR("cannot connect to server: %s\n", strerror(errno));
+ return -1;
}
remote.sun_family = AF_UNIX;
strcpy(remote.sun_path, sockpath);
len = strlen(remote.sun_path) + sizeof(remote.sun_family);
- if (connect(s_sock, (struct sockaddr *)&remote, len) == -1) {
- ODP_ERR("cannot connect to server: %s\n", strerror(errno));
+ while (connect(s_sock, (struct sockaddr *)&remote, len) == -1) {
+ if (errno == EINTR)
+ continue;
+ _ODP_ERR("cannot connect to server: %s\n", strerror(errno));
close(s_sock);
- return(-1);
+ return -1;
}
return s_sock;
@@ -271,38 +283,31 @@ int _odp_fdserver_register_fd(fd_server_context_e context, uint64_t key,
int command;
int fd;
- odp_spinlock_lock(client_lock);
-
- ODP_DBG("FD client register: pid=%d key=%" PRIu64 ", fd=%d\n",
- getpid(), key, fd_to_send);
+ ODP_DBG_LVL(FD_DBG, "FD client register: pid=%d key=%" PRIu64 ", fd=%d\n",
+ getpid(), key, fd_to_send);
s_sock = get_socket();
- if (s_sock < 0) {
- odp_spinlock_unlock(client_lock);
- return(-1);
- }
+ if (s_sock < 0)
+ return -1;
res = send_fdserver_msg(s_sock, FD_REGISTER_REQ, context, key,
fd_to_send);
if (res < 0) {
- ODP_ERR("fd registration failure\n");
+ _ODP_ERR("fd registration failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
if ((res < 0) || (command != FD_REGISTER_ACK)) {
- ODP_ERR("fd registration failure\n");
+ _ODP_ERR("fd registration failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
close(s_sock);
- odp_spinlock_unlock(client_lock);
return 0;
}
@@ -317,37 +322,30 @@ int _odp_fdserver_deregister_fd(fd_server_context_e context, uint64_t key)
int command;
int fd;
- odp_spinlock_lock(client_lock);
-
- ODP_DBG("FD client deregister: pid=%d key=%" PRIu64 "\n",
- getpid(), key);
+ ODP_DBG_LVL(FD_DBG, "FD client deregister: pid=%d key=%" PRIu64 "\n",
+ getpid(), key);
s_sock = get_socket();
- if (s_sock < 0) {
- odp_spinlock_unlock(client_lock);
- return(-1);
- }
+ if (s_sock < 0)
+ return -1;
res = send_fdserver_msg(s_sock, FD_DEREGISTER_REQ, context, key, -1);
if (res < 0) {
- ODP_ERR("fd de-registration failure\n");
+ _ODP_ERR("fd de-registration failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
if ((res < 0) || (command != FD_DEREGISTER_ACK)) {
- ODP_ERR("fd de-registration failure\n");
+ _ODP_ERR("fd de-registration failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
close(s_sock);
- odp_spinlock_unlock(client_lock);
return 0;
}
@@ -363,68 +361,55 @@ int _odp_fdserver_lookup_fd(fd_server_context_e context, uint64_t key)
int command;
int fd;
- odp_spinlock_lock(client_lock);
-
s_sock = get_socket();
- if (s_sock < 0) {
- odp_spinlock_unlock(client_lock);
- return(-1);
- }
+ if (s_sock < 0)
+ return -1;
res = send_fdserver_msg(s_sock, FD_LOOKUP_REQ, context, key, -1);
if (res < 0) {
- ODP_ERR("fd lookup failure\n");
+ _ODP_ERR("fd lookup failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
res = recv_fdserver_msg(s_sock, &command, &context, &key, &fd);
if ((res < 0) || (command != FD_LOOKUP_ACK)) {
- ODP_ERR("fd lookup failure\n");
+ _ODP_ERR("fd lookup failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
close(s_sock);
- ODP_DBG("FD client lookup: pid=%d, key=%" PRIu64 ", fd=%d\n",
- getpid(), key, fd);
+ _ODP_DBG("FD client lookup: pid=%d, key=%" PRIu64 ", fd=%d\n",
+ getpid(), key, fd);
- odp_spinlock_unlock(client_lock);
return fd;
}
/*
- * request server terminaison:
+ * request server termination:
*/
static int stop_server(void)
{
int s_sock; /* server socket */
int res;
- odp_spinlock_lock(client_lock);
-
- ODP_DBG("FD sending server stop request\n");
+ ODP_DBG_LVL(FD_DBG, "FD sending server stop request\n");
s_sock = get_socket();
- if (s_sock < 0) {
- odp_spinlock_unlock(client_lock);
- return(-1);
- }
+ if (s_sock < 0)
+ return -1;
res = send_fdserver_msg(s_sock, FD_SERVERSTOP_REQ, 0, 0, -1);
if (res < 0) {
- ODP_ERR("fd stop request failure\n");
+ _ODP_ERR("fd stop request failure\n");
close(s_sock);
- odp_spinlock_unlock(client_lock);
return -1;
}
close(s_sock);
- odp_spinlock_unlock(client_lock);
return 0;
}
@@ -446,7 +431,7 @@ static int handle_request(int client_sock)
switch (command) {
case FD_REGISTER_REQ:
if ((fd < 0) || (context >= FD_SRV_CTX_END)) {
- ODP_ERR("Invalid register fd or context\n");
+ _ODP_ERR("Invalid register fd or context\n");
send_fdserver_msg(client_sock, FD_REGISTER_NACK,
FD_SRV_CTX_NA, 0, -1);
return 0;
@@ -457,10 +442,10 @@ static int handle_request(int client_sock)
fd_table[fd_table_nb_entries].context = context;
fd_table[fd_table_nb_entries].key = key;
fd_table[fd_table_nb_entries++].fd = fd;
- ODP_DBG("storing {ctx=%d, key=%" PRIu64 "}->fd=%d\n",
- context, key, fd);
+ ODP_DBG_LVL(FD_DBG, "storing {ctx=%d, key=%" PRIu64 "}->fd=%d\n",
+ context, key, fd);
} else {
- ODP_ERR("FD table full\n");
+ _ODP_ERR("FD table full\n");
send_fdserver_msg(client_sock, FD_REGISTER_NACK,
FD_SRV_CTX_NA, 0, -1);
return 0;
@@ -472,7 +457,7 @@ static int handle_request(int client_sock)
case FD_LOOKUP_REQ:
if (context >= FD_SRV_CTX_END) {
- ODP_ERR("invalid lookup context\n");
+ _ODP_ERR("invalid lookup context\n");
send_fdserver_msg(client_sock, FD_LOOKUP_NACK,
FD_SRV_CTX_NA, 0, -1);
return 0;
@@ -483,7 +468,7 @@ static int handle_request(int client_sock)
if ((fd_table[i].context == context) &&
(fd_table[i].key == key)) {
fd = fd_table[i].fd;
- ODP_DBG("lookup {ctx=%d,"
+ _ODP_DBG("lookup {ctx=%d,"
" key=%" PRIu64 "}->fd=%d\n",
context, key, fd);
send_fdserver_msg(client_sock,
@@ -500,7 +485,7 @@ static int handle_request(int client_sock)
case FD_DEREGISTER_REQ:
if (context >= FD_SRV_CTX_END) {
- ODP_ERR("invalid deregister context\n");
+ _ODP_ERR("invalid deregister context\n");
send_fdserver_msg(client_sock, FD_DEREGISTER_NACK,
FD_SRV_CTX_NA, 0, -1);
return 0;
@@ -510,9 +495,9 @@ static int handle_request(int client_sock)
for (i = 0; i < fd_table_nb_entries; i++) {
if ((fd_table[i].context == context) &&
(fd_table[i].key == key)) {
- ODP_DBG("drop {ctx=%d,"
- " key=%" PRIu64 "}->fd=%d\n",
- context, key, fd_table[i].fd);
+ ODP_DBG_LVL(FD_DBG, "drop {ctx=%d,"
+ " key=%" PRIu64 "}->fd=%d\n",
+ context, key, fd_table[i].fd);
close(fd_table[i].fd);
fd_table[i] = fd_table[--fd_table_nb_entries];
send_fdserver_msg(client_sock,
@@ -528,11 +513,11 @@ static int handle_request(int client_sock)
break;
case FD_SERVERSTOP_REQ:
- ODP_DBG("Stoping FD server\n");
+ ODP_DBG_LVL(FD_DBG, "Stopping FD server\n");
return 1;
default:
- ODP_ERR("Unexpected request\n");
+ _ODP_ERR("Unexpected request\n");
break;
}
return 0;
@@ -552,8 +537,11 @@ static void wait_requests(int sock)
addr_sz = sizeof(remote);
c_socket = accept(sock, (struct sockaddr *)&remote, &addr_sz);
if (c_socket == -1) {
- ODP_ERR("wait_requests: %s\n", strerror(errno));
- return;
+ if (errno == EINTR)
+ continue;
+
+ _ODP_ERR("accept() failed: %s\n", strerror(errno));
+ return;
}
if (handle_request(c_socket))
@@ -573,23 +561,29 @@ int _odp_fdserver_init_global(void)
int sock;
struct sockaddr_un local;
pid_t server_pid;
- int res;
+ int len, res;
- /* create the client spinlock that any client can see: */
- client_lock = mmap(NULL, sizeof(odp_spinlock_t), PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKDIR_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
- odp_spinlock_init(client_lock);
+ mkdir(sockpath, 0744);
/* construct the server named socket path: */
- snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
- odp_global_data.main_pid);
+ len = snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
+ odp_global_ro.shm_dir, odp_global_ro.uid,
+ odp_global_ro.main_pid);
+
+ if (len >= FDSERVER_SOCKPATH_MAXLEN || len >= (int)sizeof(local.sun_path)) {
+ _ODP_ERR("path too long\n");
+ return -1;
+ }
/* create UNIX domain socket: */
sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (sock == -1) {
- ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
- return(-1);
+ _ODP_ERR("socket() failed: %s\n", strerror(errno));
+ return -1;
}
/* remove previous named socket if it already exists: */
@@ -597,30 +591,60 @@ int _odp_fdserver_init_global(void)
/* bind to new named socket: */
local.sun_family = AF_UNIX;
- strncpy(local.sun_path, sockpath, sizeof(local.sun_path));
+ memcpy(local.sun_path, sockpath, sizeof(local.sun_path));
+ local.sun_path[sizeof(local.sun_path) - 1] = '\0';
+
res = bind(sock, (struct sockaddr *)&local, sizeof(struct sockaddr_un));
if (res == -1) {
- ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
+ _ODP_ERR("bind() failed: %s\n", strerror(errno));
close(sock);
- return(-1);
+ return -1;
}
- /* listen for incoming conections: */
+ /* listen for incoming connections: */
if (listen(sock, FDSERVER_BACKLOG) == -1) {
- ODP_ERR("_odp_fdserver_init_global: %s\n", strerror(errno));
+ _ODP_ERR("listen() failed: %s\n", strerror(errno));
close(sock);
- return(-1);
+ return -1;
}
/* fork a server process: */
server_pid = fork();
if (server_pid == -1) {
- ODP_ERR("Could not fork!\n");
+ _ODP_ERR("Could not fork!\n");
close(sock);
- return(-1);
+ return -1;
}
if (server_pid == 0) { /*child */
+ sigset_t sigset;
+ struct sigaction action;
+
+ sigfillset(&sigset);
+ /* undefined if these are ignored, as per POSIX */
+ sigdelset(&sigset, SIGFPE);
+ sigdelset(&sigset, SIGILL);
+ sigdelset(&sigset, SIGSEGV);
+ /* can not be masked */
+ sigdelset(&sigset, SIGKILL);
+ sigdelset(&sigset, SIGSTOP);
+ /* these we want to handle */
+ sigdelset(&sigset, SIGTERM);
+ if (sigprocmask(SIG_SETMASK, &sigset, NULL) == -1) {
+ _ODP_ERR("Could not set signal mask");
+ exit(1);
+ }
+
+ /* set default handlers for those signals we can handle */
+ memset(&action, 0, sizeof(action));
+ action.sa_handler = SIG_DFL;
+ sigemptyset(&action.sa_mask);
+ action.sa_flags = 0;
+ sigaction(SIGFPE, &action, NULL);
+ sigaction(SIGILL, &action, NULL);
+ sigaction(SIGSEGV, &action, NULL);
+ sigaction(SIGTERM, &action, NULL);
+
/* TODO: pin the server on appropriate service cpu mask */
/* when (if) we can agree on the usage of service mask */
@@ -628,10 +652,16 @@ int _odp_fdserver_init_global(void)
/* orphans being "adopted" by the init process... */
prctl(PR_SET_PDEATHSIG, SIGTERM);
+ res = setsid();
+ if (res == -1) {
+ _ODP_ERR("Could not setsid()");
+ exit(1);
+ }
+
/* allocate the space for the file descriptor<->key table: */
fd_table = malloc(FDSERVER_MAX_ENTRIES * sizeof(fdentry_t));
if (!fd_table) {
- ODP_ERR("maloc failed!\n");
+ _ODP_ERR("maloc failed!\n");
exit(1);
}
@@ -646,6 +676,7 @@ int _odp_fdserver_init_global(void)
}
/* parent */
+ odp_global_ro.fdserver_pid = server_pid;
close(sock);
return 0;
}
@@ -656,18 +687,35 @@ int _odp_fdserver_init_global(void)
int _odp_fdserver_term_global(void)
{
int status;
+ pid_t pid;
char sockpath[FDSERVER_SOCKPATH_MAXLEN];
- /* close the server and wait for child terminaison*/
- stop_server();
- wait(&status);
+ /* close fdserver and wait for it to terminate */
+ if (stop_server()) {
+ _ODP_ERR("Server stop failed\n");
+ return -1;
+ }
+
+ _ODP_DBG("Waiting for fdserver (%i) to stop\n", odp_global_ro.fdserver_pid);
+ pid = waitpid(odp_global_ro.fdserver_pid, &status, 0);
+
+ if (pid != odp_global_ro.fdserver_pid)
+ _ODP_ERR("Failed to wait for fdserver\n");
/* construct the server named socket path: */
- snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKPATH_FORMAT,
- odp_global_data.main_pid);
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid);
/* delete the UNIX domain socket: */
unlink(sockpath);
+ /* delete shm files directory */
+ snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKDIR_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
+ rmdir(sockpath);
+
return 0;
}
diff --git a/platform/linux-generic/odp_hash.c b/platform/linux-generic/odp_hash.c
deleted file mode 100644
index 55876c338..000000000
--- a/platform/linux-generic/odp_hash.c
+++ /dev/null
@@ -1,489 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <odp/api/hash.h>
-#include <odp/api/std_types.h>
-
-#include <stddef.h>
-
-static const uint32_t crc32c_tables[8][256] = {{
- 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F, 0x35F1141C,
- 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
- 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, 0x105EC76F, 0xE235446C,
- 0xF165B798, 0x030E349B, 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
- 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC,
- 0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
- 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, 0xAA64D611, 0x580F5512,
- 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
- 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD,
- 0x1642AE59, 0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
- 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC, 0xB3109EBF,
- 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
- 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, 0x0C38D26C, 0xFE53516F,
- 0xED03A29B, 0x1F682198, 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
- 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F,
- 0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
- 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, 0xA65C047D, 0x5437877E,
- 0x4767748A, 0xB50CF789, 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
- 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E,
- 0x90A324FA, 0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
- 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD, 0xCEB018DE,
- 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
- 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, 0x082F63B7, 0xFA44E0B4,
- 0xE9141340, 0x1B7F9043, 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
- 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B,
- 0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
- 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, 0xA24BB5A6, 0x502036A5,
- 0x4370C551, 0xB11B4652, 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
- 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975,
- 0x0E330A81, 0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
- 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06, 0xCAA7A905,
- 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
- 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, 0x0417B1DB, 0xF67C32D8,
- 0xE52CC12C, 0x1747422F, 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
- 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8,
- 0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
- 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, 0x9E902E7B, 0x6CFBAD78,
- 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
- 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6,
- 0x88D28022, 0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
- 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A, 0xC69F7B69,
- 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
- 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351,
-},
-{
- 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, 0x4E8A61DC, 0x5D28F9AB,
- 0x69CF5132, 0x7A6DC945, 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
- 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, 0x3FC5F181, 0x2C6769F6,
- 0x1880C16F, 0x0B225918, 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
- 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, 0xEC5B53E5, 0xFFF9CB92,
- 0xCB1E630B, 0xD8BCFB7C, 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
- 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, 0xE29F20BA, 0xF13DB8CD,
- 0xC5DA1054, 0xD6788823, 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
- 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, 0x0EC4735F, 0x1D66EB28,
- 0x298143B1, 0x3A23DBC6, 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
- 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, 0xFF17C604, 0xECB55E73,
- 0xD852F6EA, 0xCBF06E9D, 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
- 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, 0x2C896460, 0x3F2BFC17,
- 0x0BCC548E, 0x186ECCF9, 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
- 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, 0x5DC6F43D, 0x4E646C4A,
- 0x7A83C4D3, 0x69215CA4, 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
- 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, 0xCE1644DA, 0xDDB4DCAD,
- 0xE9537434, 0xFAF1EC43, 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
- 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, 0xBF59D487, 0xACFB4CF0,
- 0x981CE469, 0x8BBE7C1E, 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
- 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, 0x6CC776E3, 0x7F65EE94,
- 0x4B82460D, 0x5820DE7A, 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
- 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, 0x66D73941, 0x7575A136,
- 0x419209AF, 0x523091D8, 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
- 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, 0x8A8C6AA4, 0x992EF2D3,
- 0xADC95A4A, 0xBE6BC23D, 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
- 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, 0x844819FB, 0x97EA818C,
- 0xA30D2915, 0xB0AFB162, 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
- 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, 0x57D6BB9F, 0x447423E8,
- 0x70938B71, 0x63311306, 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
- 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, 0x26992BC2, 0x353BB3B5,
- 0x01DC1B2C, 0x127E835B, 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
- 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, 0x4A5E5D21, 0x59FCC556,
- 0x6D1B6DCF, 0x7EB9F5B8, 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
- 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, 0x3B11CD7C, 0x28B3550B,
- 0x1C54FD92, 0x0FF665E5, 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
- 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, 0xE88F6F18, 0xFB2DF76F,
- 0xCFCA5FF6, 0xDC68C781, 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
- 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, 0xE64B1C47, 0xF5E98430,
- 0xC10E2CA9, 0xD2ACB4DE, 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
- 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, 0x0A104FA2, 0x19B2D7D5,
- 0x2D557F4C, 0x3EF7E73B, 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
- 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483,
-},
-{
- 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, 0x9EDEA41A, 0x3B9F3664,
- 0xD1B1F617, 0x74F06469, 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
- 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, 0x70A27D8A, 0xD5E3EFF4,
- 0x3FCD2F87, 0x9A8CBDF9, 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
- 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, 0xD62DE755, 0x736C752B,
- 0x9942B558, 0x3C032726, 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
- 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, 0xD915C5D1, 0x7C5457AF,
- 0x967A97DC, 0x333B05A2, 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
- 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, 0x0F382284, 0xAA79B0FA,
- 0x40577089, 0xE516E2F7, 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
- 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, 0xC76580D9, 0x622412A7,
- 0x880AD2D4, 0x2D4B40AA, 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
- 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, 0x61EA1A06, 0xC4AB8878,
- 0x2E85480B, 0x8BC4DA75, 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
- 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, 0x8F96C396, 0x2AD751E8,
- 0xC0F9919B, 0x65B803E5, 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
- 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, 0xB8FFDFD7, 0x1DBE4DA9,
- 0xF7908DDA, 0x52D11FA4, 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
- 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, 0x56830647, 0xF3C29439,
- 0x19EC544A, 0xBCADC634, 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
- 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, 0xF00C9C98, 0x554D0EE6,
- 0xBF63CE95, 0x1A225CEB, 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
- 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, 0xB3764986, 0x1637DBF8,
- 0xFC191B8B, 0x595889F5, 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
- 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, 0x655BAED3, 0xC01A3CAD,
- 0x2A34FCDE, 0x8F756EA0, 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
- 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, 0x6A638C57, 0xCF221E29,
- 0x250CDE5A, 0x804D4C24, 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
- 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, 0xCCEC1688, 0x69AD84F6,
- 0x83834485, 0x26C2D6FB, 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
- 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, 0x2290CF18, 0x87D15D66,
- 0x6DFF9D15, 0xC8BE0F6B, 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
- 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, 0xD29C5380, 0x77DDC1FE,
- 0x9DF3018D, 0x38B293F3, 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
- 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, 0x3CE08A10, 0x99A1186E,
- 0x738FD81D, 0xD6CE4A63, 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
- 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, 0x9A6F10CF, 0x3F2E82B1,
- 0xD50042C2, 0x7041D0BC, 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
- 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, 0x9557324B, 0x3016A035,
- 0xDA386046, 0x7F79F238, 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
- 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, 0x437AD51E, 0xE63B4760,
- 0x0C158713, 0xA954156D, 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
- 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8,
-},
-{
- 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, 0x7B2231F3, 0xA6679B4B,
- 0xC4451272, 0x1900B8CA, 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
- 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, 0xE964B13D, 0x34211B85,
- 0x560392BC, 0x8B463804, 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
- 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, 0x6402E328, 0xB9474990,
- 0xDB65C0A9, 0x06206A11, 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
- 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, 0x2161776D, 0xFC24DDD5,
- 0x9E0654EC, 0x4343FE54, 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
- 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, 0x45639445, 0x98263EFD,
- 0xFA04B7C4, 0x27411D7C, 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
- 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, 0xABA65FE7, 0x76E3F55F,
- 0x14C17C66, 0xC984D6DE, 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
- 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, 0x26C00DF2, 0xFB85A74A,
- 0x99A72E73, 0x44E284CB, 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
- 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, 0xB4868D3C, 0x69C32784,
- 0x0BE1AEBD, 0xD6A40405, 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
- 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, 0x07A17A9F, 0xDAE4D027,
- 0xB8C6591E, 0x6583F3A6, 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
- 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, 0x95E7FA51, 0x48A250E9,
- 0x2A80D9D0, 0xF7C57368, 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
- 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, 0x1881A844, 0xC5C402FC,
- 0xA7E68BC5, 0x7AA3217D, 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
- 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, 0xA4E4AAD9, 0x79A10061,
- 0x1B838958, 0xC6C623E0, 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
- 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, 0xC0E649F1, 0x1DA3E349,
- 0x7F816A70, 0xA2C4C0C8, 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
- 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, 0x8585DDB4, 0x58C0770C,
- 0x3AE2FE35, 0xE7A7548D, 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
- 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, 0x08E38FA1, 0xD5A62519,
- 0xB784AC20, 0x6AC10698, 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
- 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, 0x9AA50F6F, 0x47E0A5D7,
- 0x25C22CEE, 0xF8878656, 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
- 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, 0x8224A72B, 0x5F610D93,
- 0x3D4384AA, 0xE0062E12, 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
- 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, 0x106227E5, 0xCD278D5D,
- 0xAF050464, 0x7240AEDC, 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
- 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, 0x9D0475F0, 0x4041DF48,
- 0x22635671, 0xFF26FCC9, 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
- 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, 0xD867E1B5, 0x05224B0D,
- 0x6700C234, 0xBA45688C, 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
- 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, 0xBC65029D, 0x6120A825,
- 0x0302211C, 0xDE478BA4, 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
- 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842,
-},
-{
- 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, 0xE045BEB0, 0xD854D11C,
- 0x906761E8, 0xA8760E44, 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
- 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, 0x8F2261D3, 0xB7330E7F,
- 0xFF00BE8B, 0xC711D127, 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
- 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, 0xAA00D4F2, 0x9211BB5E,
- 0xDA220BAA, 0xE2336406, 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
- 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, 0xDECFBEC6, 0xE6DED16A,
- 0xAEED619E, 0x96FC0E32, 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
- 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, 0x74CF6A34, 0x4CDE0598,
- 0x04EDB56C, 0x3CFCDAC0, 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
- 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, 0x37516AAE, 0x0F400502,
- 0x4773B5F6, 0x7F62DA5A, 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
- 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, 0x1273DF8F, 0x2A62B023,
- 0x625100D7, 0x5A406F7B, 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
- 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, 0x7D1400EC, 0x45056F40,
- 0x0D36DFB4, 0x3527B018, 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
- 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, 0xCCBC6149, 0xF4AD0EE5,
- 0xBC9EBE11, 0x848FD1BD, 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
- 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, 0xA3DBBE2A, 0x9BCAD186,
- 0xD3F96172, 0xEBE80EDE, 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
- 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, 0x86F90B0B, 0xBEE864A7,
- 0xF6DBD453, 0xCECABBFF, 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
- 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, 0xABC5DECD, 0x93D4B161,
- 0xDBE70195, 0xE3F66E39, 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
- 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, 0x01C50A3F, 0x39D46593,
- 0x71E7D567, 0x49F6BACB, 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
- 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, 0x750A600B, 0x4D1B0FA7,
- 0x0528BF53, 0x3D39D0FF, 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
- 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, 0x5028D52A, 0x6839BA86,
- 0x200A0A72, 0x181B65DE, 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
- 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, 0x3F4F0A49, 0x075E65E5,
- 0x4F6DD511, 0x777CBABD, 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
- 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, 0xB9B60142, 0x81A76EEE,
- 0xC994DE1A, 0xF185B1B6, 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
- 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, 0xD6D1DE21, 0xEEC0B18D,
- 0xA6F30179, 0x9EE26ED5, 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
- 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, 0xF3F36B00, 0xCBE204AC,
- 0x83D1B458, 0xBBC0DBF4, 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
- 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, 0x873C0134, 0xBF2D6E98,
- 0xF71EDE6C, 0xCF0FB1C0, 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
- 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, 0x2D3CD5C6, 0x152DBA6A,
- 0x5D1E0A9E, 0x650F6532, 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
- 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3,
-},
-{
- 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, 0xB2F53777, 0x5DC55C6E,
- 0x697997B4, 0x8649FCAD, 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
- 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, 0xC00C303E, 0x2F3C5B27,
- 0x1B8090FD, 0xF4B0FBE4, 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
- 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, 0x12FF1F56, 0xFDCF744F,
- 0xC973BF95, 0x2643D48C, 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
- 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, 0xE5F20E92, 0x0AC2658B,
- 0x3E7EAE51, 0xD14EC548, 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
- 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, 0xF70D11C4, 0x183D7ADD,
- 0x2C81B107, 0xC3B1DA1E, 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
- 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, 0x0E045BEB, 0xE13430F2,
- 0xD588FB28, 0x3AB89031, 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
- 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, 0xDCF77483, 0x33C71F9A,
- 0x077BD440, 0xE84BBF59, 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
- 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, 0xAE0E73CA, 0x413E18D3,
- 0x7582D309, 0x9AB2B810, 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
- 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, 0x39057A11, 0xD6351108,
- 0xE289DAD2, 0x0DB9B1CB, 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
- 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, 0x4BFC7D58, 0xA4CC1641,
- 0x9070DD9B, 0x7F40B682, 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
- 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, 0x990F5230, 0x763F3929,
- 0x4283F2F3, 0xADB399EA, 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
- 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, 0x7C0EAFC9, 0x933EC4D0,
- 0xA7820F0A, 0x48B26413, 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
- 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, 0x6EF1B09F, 0x81C1DB86,
- 0xB57D105C, 0x5A4D7B45, 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
- 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, 0x99FCA15B, 0x76CCCA42,
- 0x42700198, 0xAD406A81, 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
- 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, 0x4B0F8E33, 0xA43FE52A,
- 0x90832EF0, 0x7FB345E9, 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
- 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, 0x39F6897A, 0xD6C6E263,
- 0xE27A29B9, 0x0D4A42A0, 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
- 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, 0xA0F9DB4A, 0x4FC9B053,
- 0x7B757B89, 0x94451090, 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
- 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, 0xD200DC03, 0x3D30B71A,
- 0x098C7CC0, 0xE6BC17D9, 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
- 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, 0x00F3F36B, 0xEFC39872,
- 0xDB7F53A8, 0x344F38B1, 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
- 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, 0xF7FEE2AF, 0x18CE89B6,
- 0x2C72426C, 0xC3422975, 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
- 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, 0xE501FDF9, 0x0A3196E0,
- 0x3E8D5D3A, 0xD1BD3623, 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
- 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C,
-},
-{
- 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, 0xA5E0C5D1, 0xCDE3E919,
- 0x75E69C41, 0x1DE5B089, 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
- 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, 0x9C5BFAA6, 0xF458D66E,
- 0x4C5DA336, 0x245E8FFE, 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
- 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, 0x7796C224, 0x1F95EEEC,
- 0xA7909BB4, 0xCF93B77C, 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
- 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, 0x73767EEE, 0x1B755226,
- 0xA370277E, 0xCB730BB6, 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
- 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, 0x04E0BCCA, 0x6CE39002,
- 0xD4E6E55A, 0xBCE5C992, 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
- 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, 0x7AB7077A, 0x12B42BB2,
- 0xAAB15EEA, 0xC2B27222, 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
- 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, 0x917A3FF8, 0xF9791330,
- 0x417C6668, 0x297F4AA0, 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
- 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, 0xA8C1008F, 0xC0C22C47,
- 0x78C7591F, 0x10C475D7, 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
- 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, 0xE20C4116, 0x8A0F6DDE,
- 0x320A1886, 0x5A09344E, 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
- 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, 0xDBB77E61, 0xB3B452A9,
- 0x0BB127F1, 0x63B20B39, 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
- 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, 0x307A46E3, 0x58796A2B,
- 0xE07C1F73, 0x887F33BB, 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
- 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, 0xBB43F3A7, 0xD340DF6F,
- 0x6B45AA37, 0x034686FF, 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
- 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, 0xCCD53183, 0xA4D61D4B,
- 0x1CD36813, 0x74D044DB, 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
- 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, 0xC8358D49, 0xA036A181,
- 0x1833D4D9, 0x7030F811, 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
- 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, 0x23F8B5CB, 0x4BFB9903,
- 0xF3FEEC5B, 0x9BFDC093, 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
- 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, 0x1A438ABC, 0x7240A674,
- 0xCA45D32C, 0xA246FFE4, 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
- 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, 0x2A39CC5F, 0x423AE097,
- 0xFA3F95CF, 0x923CB907, 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
- 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, 0x1382F328, 0x7B81DFE0,
- 0xC384AAB8, 0xAB878670, 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
- 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, 0xF84FCBAA, 0x904CE762,
- 0x2849923A, 0x404ABEF2, 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
- 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, 0xFCAF7760, 0x94AC5BA8,
- 0x2CA92EF0, 0x44AA0238, 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
- 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, 0x8B39B544, 0xE33A998C,
- 0x5B3FECD4, 0x333CC01C, 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
- 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F,
-},
-{
- 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, 0x211D826D, 0x6821FF4A,
- 0xB3657823, 0xFA590504, 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
- 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, 0x847609B4, 0xCD4A7493,
- 0x160EF3FA, 0x5F328EDD, 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
- 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, 0xE7508F03, 0xAE6CF224,
- 0x7528754D, 0x3C14086A, 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
- 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, 0x4F3B6143, 0x06071C64,
- 0xDD439B0D, 0x947FE62A, 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
- 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, 0xA86BEE40, 0xE1579367,
- 0x3A13140E, 0x732F6929, 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
- 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, 0x1A00CB32, 0x533CB615,
- 0x8878317C, 0xC1444C5B, 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
- 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, 0x79264D85, 0x301A30A2,
- 0xEB5EB7CB, 0xA262CAEC, 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
- 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, 0xDC4DC65C, 0x9571BB7B,
- 0x4E353C12, 0x07094135, 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
- 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, 0x361D2CC6, 0x7F2151E1,
- 0xA465D688, 0xED59ABAF, 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
- 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, 0x9376A71F, 0xDA4ADA38,
- 0x010E5D51, 0x48322076, 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
- 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, 0xF05021A8, 0xB96C5C8F,
- 0x6228DBE6, 0x2B14A6C1, 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
- 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, 0x763A92BE, 0x3F06EF99,
- 0xE44268F0, 0xAD7E15D7, 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
- 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, 0x916A1DBD, 0xD856609A,
- 0x0312E7F3, 0x4A2E9AD4, 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
- 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, 0x3901F3FD, 0x703D8EDA,
- 0xAB7909B3, 0xE2457494, 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
- 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, 0x5A27754A, 0x131B086D,
- 0xC85F8F04, 0x8163F223, 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
- 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, 0xFF4CFE93, 0xB67083B4,
- 0x6D3404DD, 0x240879FA, 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
- 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, 0x0F1CDF3B, 0x4620A21C,
- 0x9D642575, 0xD4585852, 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
- 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, 0xAA7754E2, 0xE34B29C5,
- 0x380FAEAC, 0x7133D38B, 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
- 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, 0xC951D255, 0x806DAF72,
- 0x5B29281B, 0x1215553C, 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
- 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, 0x613A3C15, 0x28064132,
- 0xF342C65B, 0xBA7EBB7C, 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
- 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, 0x866AB316, 0xCF56CE31,
- 0x14124958, 0x5D2E347F, 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
- 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5,
-} };
-
-#define CRC32_UPD(crc, n) \
- (crc32c_tables[(n)][(crc) & 0xff] ^ \
- crc32c_tables[(n) - 1][((crc) >> 8) & 0xff])
-
-static inline uint32_t crc32c_u32(uint32_t data, uint32_t init_val)
-{
- uint32_t crc, term1, term2;
-
- crc = init_val;
- crc ^= data;
-
- term1 = CRC32_UPD(crc, 3);
- term2 = crc >> 16;
- crc = term1 ^ CRC32_UPD(term2, 1);
-
- return crc;
-}
-
-static inline uint32_t crc32c_u64(uint64_t data, uint32_t init_val)
-{
- union {
- uint64_t u64;
- uint32_t u32[2];
- } d;
- d.u64 = data;
-
- uint32_t crc, term1, term2;
-
- crc = init_val;
- crc ^= d.u32[0];
-
- term1 = CRC32_UPD(crc, 7);
- term2 = crc >> 16;
- crc = term1 ^ CRC32_UPD(term2, 5);
- term1 = CRC32_UPD(d.u32[1], 3);
- term2 = d.u32[1] >> 16;
- crc ^= term1 ^ CRC32_UPD(term2, 1);
-
- return crc;
-}
-
-uint32_t odp_hash_crc32c(const void *data, uint32_t data_len,
- uint32_t init_val)
-{
- size_t i;
- uint64_t temp = 0;
- uintptr_t pd = (uintptr_t)data;
-
- for (i = 0; i < data_len / 8; i++) {
- init_val = crc32c_u64(*(const uint64_t *)pd, init_val);
- pd += 8;
- }
-
- switch (7 - (data_len & 0x07)) {
- case 0:
- temp |= (uint64_t)*((const uint8_t *)pd + 6) << 48;
- /* Fallthrough */
- case 1:
- temp |= (uint64_t)*((const uint8_t *)pd + 5) << 40;
- /* Fallthrough */
- case 2:
- temp |= (uint64_t)*((const uint8_t *)pd + 4) << 32;
- temp |= *(const uint32_t *)pd;
- init_val = crc32c_u64(temp, init_val);
- break;
- case 3:
- init_val = crc32c_u32(*(const uint32_t *)pd, init_val);
- break;
- case 4:
- temp |= *((const uint8_t *)pd + 2) << 16;
- /* Fallthrough */
- case 5:
- temp |= *((const uint8_t *)pd + 1) << 8;
- /* Fallthrough */
- case 6:
- temp |= *(const uint8_t *)pd;
- init_val = crc32c_u32(temp, init_val);
- /* Fallthrough */
- default:
- break;
- }
-
- return init_val;
-}
diff --git a/platform/linux-generic/odp_hash_api.c b/platform/linux-generic/odp_hash_api.c
new file mode 100644
index 000000000..893c9dc11
--- /dev/null
+++ b/platform/linux-generic/odp_hash_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/hash.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/hash_inlines.h>
diff --git a/platform/linux-generic/odp_hash_crc_gen.c b/platform/linux-generic/odp_hash_crc_gen.c
new file mode 100644
index 000000000..f831c63b8
--- /dev/null
+++ b/platform/linux-generic/odp_hash_crc_gen.c
@@ -0,0 +1,248 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <odp/api/hash.h>
+#include <odp/api/hints.h>
+#include <odp/api/rwlock.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+typedef struct crc_table_t {
+ uint32_t crc[256];
+ uint32_t width;
+ uint32_t poly;
+ int reflect;
+ odp_rwlock_t rwlock;
+ odp_shm_t shm;
+
+} crc_table_t;
+
+static crc_table_t *crc_table;
+
+int _odp_hash_init_global(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_hash_crc_gen", sizeof(crc_table_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ crc_table = odp_shm_addr(shm);
+
+ if (crc_table == NULL) {
+ _ODP_ERR("Shm reserve failed for odp_hash_crc_gen\n");
+ return -1;
+ }
+
+ memset(crc_table, 0, sizeof(crc_table_t));
+
+ crc_table->shm = shm;
+ odp_rwlock_init(&crc_table->rwlock);
+
+ return 0;
+}
+
+int _odp_hash_term_global(void)
+{
+ if (odp_shm_free(crc_table->shm)) {
+ _ODP_ERR("Shm free failed for odp_hash_crc_gen\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Reflect bits in a byte */
+static inline uint8_t reflect_u8(uint8_t byte)
+{
+ uint8_t u8[8];
+
+ u8[0] = (byte & (0x1u << 7)) >> 7;
+ u8[1] = (byte & (0x1u << 6)) >> 5;
+ u8[2] = (byte & (0x1u << 5)) >> 3;
+ u8[3] = (byte & (0x1u << 4)) >> 1;
+
+ u8[4] = (byte & (0x1u << 3)) << 1;
+ u8[5] = (byte & (0x1u << 2)) << 3;
+ u8[6] = (byte & (0x1u << 1)) << 5;
+ u8[7] = (byte & 0x1u) << 7;
+
+ return u8[0] | u8[1] | u8[2] | u8[3] | u8[4] | u8[5] | u8[6] | u8[7];
+}
+
+/* Reflect 32 bits */
+static inline uint32_t reflect_u32(uint32_t u32)
+{
+ uint8_t u8[4];
+
+ u8[0] = reflect_u8((u32 & 0xff000000u) >> 24);
+ u8[1] = reflect_u8((u32 & 0x00ff0000u) >> 16);
+ u8[2] = reflect_u8((u32 & 0x0000ff00u) >> 8);
+ u8[3] = reflect_u8(u32 & 0xffu);
+
+ return (u8[3] << 24) | (u8[2] << 16) | (u8[1] << 8) | u8[0];
+}
+
+/* Reflect 24 bits */
+static inline uint32_t reflect_u24(uint32_t u32)
+{
+ uint8_t u8[4];
+
+ u8[0] = reflect_u8((u32 & 0xff0000u) >> 16);
+ u8[1] = reflect_u8((u32 & 0x00ff00u) >> 8);
+ u8[2] = reflect_u8(u32 & 0xffu);
+
+ return (u8[2] << 16) | (u8[1] << 8) | u8[0];
+}
+
+/* Reflect 16 bits */
+static inline uint32_t reflect_u16(uint32_t u32)
+{
+ uint8_t u8[4];
+
+ u8[0] = reflect_u8((u32 & 0xff00u) >> 8);
+ u8[1] = reflect_u8(u32 & 0xffu);
+
+ return (u8[1] << 8) | u8[0];
+}
+
+/* Generate table for a 32/24/16 bit CRCs.
+ *
+ * Based on an example in RFC 1952.
+ */
+static inline void crc_table_gen(uint32_t poly, int reflect, int width)
+{
+ uint32_t i, crc, bit, shift, msb, mask;
+
+ crc_table->width = width;
+ crc_table->poly = poly;
+ crc_table->reflect = reflect;
+
+ shift = width - 8;
+ mask = 0xffffffffu >> (32 - width);
+ msb = 0x1u << (width - 1);
+
+ if (reflect) {
+ if (width == 32)
+ poly = reflect_u32(poly);
+ else if (width == 24)
+ poly = reflect_u24(poly);
+ else
+ poly = reflect_u16(poly);
+ }
+
+ for (i = 0; i < 256; i++) {
+ if (reflect) {
+ crc = i;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (crc & 0x1u)
+ crc = poly ^ (crc >> 1);
+ else
+ crc = crc >> 1;
+ }
+ } else {
+ crc = i << shift;
+
+ for (bit = 0; bit < 8; bit++) {
+ if (crc & msb)
+ crc = poly ^ (crc << 1);
+ else
+ crc = crc << 1;
+ }
+ }
+
+ crc_table->crc[i] = crc & mask;
+ }
+}
+
+static inline uint32_t crc_calc(const uint8_t *data, uint32_t data_len,
+ uint32_t init_val, int reflect, int width)
+{
+ uint32_t i, crc, shift;
+ uint8_t byte;
+ uint32_t mask;
+
+ shift = width - 8;
+ mask = 0xffffffffu >> (32 - width);
+
+ crc = init_val;
+
+ for (i = 0; i < data_len; i++) {
+ byte = data[i];
+
+ if (reflect) {
+ crc = crc_table->crc[(crc ^ byte) & 0xffu] ^ (crc >> 8);
+ } else {
+ crc = crc_table->crc[(crc >> shift) ^ byte] ^
+ (crc << 8);
+ crc = crc & mask;
+ }
+ }
+
+ return crc;
+}
+
+int odp_hash_crc_gen64(const void *data_ptr, uint32_t data_len,
+ uint64_t init_val, odp_hash_crc_param_t *crc_param,
+ uint64_t *crc_out)
+{
+ uint32_t crc;
+ int update_table;
+ uint32_t poly = crc_param->poly;
+ uint32_t width = crc_param->width;
+ int reflect = crc_param->reflect_in;
+
+ if (odp_unlikely(crc_param->reflect_in != crc_param->reflect_out)) {
+ _ODP_ERR("Odd reflection setting not supported.\n");
+ return -1;
+ }
+
+ if (odp_unlikely(width != 32 && width != 24 && width != 16)) {
+ _ODP_ERR("CRC width %" PRIu32 " bits not supported.\n", width);
+ return -1;
+ }
+
+ /* TODO: fix implementation of 24 bit CRC with reflection */
+ if (odp_unlikely(width == 24 && reflect)) {
+ _ODP_ERR("24 bit CRC with reflection not supported.\n");
+ return -1;
+ }
+
+ odp_rwlock_read_lock(&crc_table->rwlock);
+
+ update_table = (crc_table->width != width) ||
+ (crc_table->poly != poly) ||
+ (crc_table->reflect != reflect);
+
+ /* Generate CRC table if not yet generated. */
+ if (odp_unlikely(update_table)) {
+ odp_rwlock_read_unlock(&crc_table->rwlock);
+ odp_rwlock_write_lock(&crc_table->rwlock);
+
+ crc_table_gen(poly, reflect, width);
+ }
+
+ crc = crc_calc(data_ptr, data_len, init_val, reflect, width);
+
+ if (odp_unlikely(update_table))
+ odp_rwlock_write_unlock(&crc_table->rwlock);
+ else
+ odp_rwlock_read_unlock(&crc_table->rwlock);
+
+ if (crc_param->xor_out)
+ crc = crc ^ (uint32_t)crc_param->xor_out;
+
+ *crc_out = crc;
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_impl.c b/platform/linux-generic/odp_impl.c
index 3fc2d6ab3..f8af2c51f 100644
--- a/platform/linux-generic/odp_impl.c
+++ b/platform/linux-generic/odp_impl.c
@@ -1,28 +1,15 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-
-/**
- * @file
- *
- * ODP Implementation information
- */
-
-#ifndef ODP_IMPL_H_
-#define ODP_IMPL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
+#include <odp/autoheader_internal.h>
#include <odp/api/version.h>
#define ODP_VERSION_IMPL 0
#define ODP_VERSION_IMPL_STR \
- ODP_VERSION_TO_STR(IMPLEMENTATION_NAME) " " \
+ _ODP_IMPLEMENTATION_NAME " " \
ODP_VERSION_TO_STR(ODP_VERSION_API_GENERATION) "." \
ODP_VERSION_TO_STR(ODP_VERSION_API_MAJOR) "." \
ODP_VERSION_TO_STR(ODP_VERSION_API_MINOR) "-" \
@@ -30,10 +17,7 @@ extern "C" {
ODP_VERSION_TO_STR(ODP_VERSION_API_GENERATION) "." \
ODP_VERSION_TO_STR(ODP_VERSION_API_MAJOR) "." \
ODP_VERSION_TO_STR(ODP_VERSION_API_MINOR) ") " \
- ODP_VERSION_TO_STR(GIT_HASH)
-
-#define ODP_VERSION_IMPL_NAME \
- ODP_VERSION_TO_STR(IMPLEMENTATION_NAME)
+ ODP_VERSION_TO_STR(ODP_VERSION_BUILD)
const char *odp_version_impl_str(void)
{
@@ -42,11 +26,5 @@ const char *odp_version_impl_str(void)
const char *odp_version_impl_name(void)
{
- return ODP_VERSION_IMPL_NAME;
+ return _ODP_IMPLEMENTATION_NAME;
}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 06c61435e..795252df1 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -1,316 +1,617 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+
+#include <odp_posix_extensions.h>
+
#include <odp/api/init.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp/api/plat/thread_inlines.h>
+
#include <odp_debug_internal.h>
-#include <odp/api/debug.h>
-#include <unistd.h>
-#include <odp_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
#include <odp_schedule_if.h>
+#include <odp_libconfig_internal.h>
+
#include <string.h>
#include <stdio.h>
-#include <linux/limits.h>
-#include <dirent.h>
#include <unistd.h>
-#include <string.h>
-#include <stdlib.h>
-#include <errno.h>
-#define _ODP_FILES_FMT "odp-%d-"
-#define _ODP_TMPDIR "/tmp"
+enum init_stage {
+ NO_INIT = 0, /* No init stages completed */
+ LIBCONFIG_INIT,
+ CPUMASK_INIT,
+ SYSINFO_INIT,
+ CPU_CYCLES_INIT,
+ TIME_INIT,
+ ISHM_INIT,
+ FDSERVER_INIT,
+ GLOBAL_RW_DATA_INIT,
+ HASH_INIT,
+ THREAD_INIT,
+ POOL_INIT,
+ EVENT_VALIDATION_INIT,
+ STASH_INIT,
+ QUEUE_INIT,
+ SCHED_INIT,
+ PKTIO_INIT,
+ TIMER_INIT,
+ RANDOM_INIT,
+ CRYPTO_INIT,
+ COMP_INIT,
+ CLASSIFICATION_INIT,
+ TRAFFIC_MNGR_INIT,
+ NAME_TABLE_INIT,
+ IPSEC_EVENTS_INIT,
+ IPSEC_SAD_INIT,
+ IPSEC_INIT,
+ DMA_INIT,
+ ML_INIT,
+ ALL_INIT /* All init stages completed */
+};
+
+odp_global_data_ro_t odp_global_ro;
+odp_global_data_rw_t *odp_global_rw;
+
+/* Global function pointers for inline header usage. The values are written
+ * during odp_init_global() (enables process mode support). */
+#include <odp/visibility_begin.h>
+
+odp_log_func_t ODP_PRINTF_FORMAT(2, 3) _odp_log_fn;
+odp_abort_func_t _odp_abort_fn;
+
+#include <odp/visibility_end.h>
+
+/* odp_init_local() call status */
+static __thread uint8_t init_local_called;
+
+static void disable_features(odp_global_data_ro_t *global_ro,
+ const odp_init_t *init_param)
+{
+ int disable_ipsec, disable_crypto;
+ int disable_dma;
+
+ if (init_param == NULL)
+ return;
+
+ disable_ipsec = init_param->not_used.feat.ipsec;
+ global_ro->disable.ipsec = disable_ipsec;
+
+ disable_crypto = init_param->not_used.feat.crypto;
+ /* Crypto can be disabled only if IPSec is disabled */
+ if (disable_ipsec && disable_crypto)
+ global_ro->disable.crypto = 1;
-struct odp_global_data_s odp_global_data;
+ disable_dma = init_param->not_used.feat.dma;
+ global_ro->disable.dma = disable_dma;
-/* remove all files staring with "odp-<pid>" from a directory "dir" */
-static int cleanup_files(const char *dirpath, int odp_pid)
+ /* DMA uses stash. Disable stash only when both are disabled. */
+ if (disable_dma && init_param->not_used.feat.stash)
+ global_ro->disable.stash = 1;
+
+ global_ro->disable.traffic_mngr = init_param->not_used.feat.tm;
+ global_ro->disable.compress = init_param->not_used.feat.compress;
+ global_ro->disable.ml = init_param->not_used.feat.ml;
+}
+
+void odp_init_param_init(odp_init_t *param)
+{
+ memset(param, 0, sizeof(odp_init_t));
+}
+
+static int global_rw_data_init(void)
{
- struct dirent *e;
- DIR *dir;
- char prefix[PATH_MAX];
- char *fullpath;
- int d_len = strlen(dirpath);
- int p_len;
- int f_len;
-
- dir = opendir(dirpath);
- if (!dir) {
- /* ok if the dir does not exist. no much to delete then! */
- ODP_DBG("opendir failed for %s: %s\n",
- dirpath, strerror(errno));
- return 0;
- }
- snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_pid);
- p_len = strlen(prefix);
- while ((e = readdir(dir)) != NULL) {
- if (strncmp(e->d_name, prefix, p_len) == 0) {
- f_len = strlen(e->d_name);
- fullpath = malloc(d_len + f_len + 2);
- if (fullpath == NULL) {
- closedir(dir);
- return -1;
- }
- snprintf(fullpath, PATH_MAX, "%s/%s",
- dirpath, e->d_name);
- ODP_DBG("deleting obsolete file: %s\n", fullpath);
- if (unlink(fullpath))
- ODP_ERR("unlink failed for %s: %s\n",
- fullpath, strerror(errno));
- free(fullpath);
- }
- }
- closedir(dir);
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_global_rw_data",
+ sizeof(odp_global_data_rw_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ odp_global_rw = odp_shm_addr(shm);
+ if (odp_global_rw == NULL) {
+ _ODP_ERR("Global RW data shm reserve failed.\n");
+ return -1;
+ }
+
+ memset(odp_global_rw, 0, sizeof(odp_global_data_rw_t));
+
+ return 0;
+}
+
+static int global_rw_data_term(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup("_odp_global_rw_data");
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Unable to find global RW data shm.\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ _ODP_ERR("Global RW data shm free failed.\n");
+ return -1;
+ }
return 0;
}
+static int term_global(enum init_stage stage)
+{
+ int rc = 0;
+
+ switch (stage) {
+ case ALL_INIT:
+ case ML_INIT:
+ if (_odp_ml_term_global()) {
+ _ODP_ERR("ODP ML term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case DMA_INIT:
+ if (_odp_dma_term_global()) {
+ _ODP_ERR("ODP DMA term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_INIT:
+ if (_odp_ipsec_term_global()) {
+ _ODP_ERR("ODP IPsec term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_SAD_INIT:
+ if (_odp_ipsec_sad_term_global()) {
+ _ODP_ERR("ODP IPsec SAD term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case IPSEC_EVENTS_INIT:
+ if (_odp_ipsec_events_term_global()) {
+ _ODP_ERR("ODP IPsec events term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case NAME_TABLE_INIT:
+ if (_odp_int_name_tbl_term_global()) {
+ _ODP_ERR("Name table term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TRAFFIC_MNGR_INIT:
+ if (_odp_tm_term_global()) {
+ _ODP_ERR("TM term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CLASSIFICATION_INIT:
+ if (_odp_classification_term_global()) {
+ _ODP_ERR("ODP classification term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case COMP_INIT:
+ if (_odp_comp_term_global()) {
+ _ODP_ERR("ODP comp term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CRYPTO_INIT:
+ if (_odp_crypto_term_global()) {
+ _ODP_ERR("ODP crypto term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TIMER_INIT:
+ if (_odp_timer_term_global()) {
+ _ODP_ERR("ODP timer term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case PKTIO_INIT:
+ if (_odp_pktio_term_global()) {
+ _ODP_ERR("ODP pktio term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case SCHED_INIT:
+ if (_odp_schedule_term_global()) {
+ _ODP_ERR("ODP schedule term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case QUEUE_INIT:
+ if (_odp_queue_term_global()) {
+ _ODP_ERR("ODP queue term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case STASH_INIT:
+ if (_odp_stash_term_global()) {
+ _ODP_ERR("ODP stash term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case EVENT_VALIDATION_INIT:
+ if (_odp_event_validation_term_global()) {
+ _ODP_ERR("ODP event validation term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case POOL_INIT:
+ if (_odp_pool_term_global()) {
+ _ODP_ERR("ODP buffer pool term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case THREAD_INIT:
+ if (_odp_thread_term_global()) {
+ _ODP_ERR("ODP thread term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case HASH_INIT:
+ if (_odp_hash_term_global()) {
+ _ODP_ERR("ODP hash term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case GLOBAL_RW_DATA_INIT:
+ if (global_rw_data_term()) {
+ _ODP_ERR("ODP global RW data term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case FDSERVER_INIT:
+ if (_odp_fdserver_term_global()) {
+ _ODP_ERR("ODP fdserver term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case ISHM_INIT:
+ if (_odp_ishm_term_global()) {
+ _ODP_ERR("ODP ishm term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case TIME_INIT:
+ if (_odp_time_term_global()) {
+ _ODP_ERR("ODP time term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CPU_CYCLES_INIT:
+ case SYSINFO_INIT:
+ if (_odp_system_info_term()) {
+ _ODP_ERR("ODP system info term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case CPUMASK_INIT:
+ if (_odp_cpumask_term_global()) {
+ _ODP_ERR("ODP cpumask term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case LIBCONFIG_INIT:
+ if (_odp_libconfig_term_global()) {
+ _ODP_ERR("ODP runtime config term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ default:
+ break;
+ }
+
+ return rc;
+}
+
int odp_init_global(odp_instance_t *instance,
const odp_init_t *params,
const odp_platform_init_t *platform_params ODP_UNUSED)
{
- char *hpdir;
-
- memset(&odp_global_data, 0, sizeof(struct odp_global_data_s));
- odp_global_data.main_pid = getpid();
-
enum init_stage stage = NO_INIT;
- odp_global_data.log_fn = odp_override_log;
- odp_global_data.abort_fn = odp_override_abort;
+ memset(&odp_global_ro, 0, sizeof(odp_global_data_ro_t));
+ odp_global_ro.main_pid = getpid();
+ _odp_log_fn = odp_override_log;
+ _odp_abort_fn = odp_override_abort;
+
+ odp_init_param_init(&odp_global_ro.init_param);
if (params != NULL) {
+ odp_global_ro.init_param = *params;
+
if (params->log_fn != NULL)
- odp_global_data.log_fn = params->log_fn;
+ _odp_log_fn = params->log_fn;
if (params->abort_fn != NULL)
- odp_global_data.abort_fn = params->abort_fn;
+ _odp_abort_fn = params->abort_fn;
+ if (params->mem_model == ODP_MEM_MODEL_PROCESS)
+ odp_global_ro.shm_single_va = 1;
}
- cleanup_files(_ODP_TMPDIR, odp_global_data.main_pid);
+ if (_odp_libconfig_init_global()) {
+ _ODP_ERR("ODP runtime config init failed.\n");
+ goto init_failed;
+ }
+ stage = LIBCONFIG_INIT;
- if (odp_cpumask_init_global(params)) {
- ODP_ERR("ODP cpumask init failed.\n");
+ disable_features(&odp_global_ro, params);
+
+ if (_odp_cpumask_init_global(params)) {
+ _ODP_ERR("ODP cpumask init failed.\n");
goto init_failed;
}
stage = CPUMASK_INIT;
- if (odp_time_init_global()) {
- ODP_ERR("ODP time init failed.\n");
+ if (_odp_system_info_init()) {
+ _ODP_ERR("ODP system_info init failed.\n");
+ goto init_failed;
+ }
+ stage = SYSINFO_INIT;
+
+ if (_odp_cpu_cycles_init_global()) {
+ _ODP_ERR("ODP cpu cycle init failed.\n");
+ goto init_failed;
+ }
+ stage = CPU_CYCLES_INIT;
+
+ if (_odp_time_init_global()) {
+ _ODP_ERR("ODP time init failed.\n");
goto init_failed;
}
stage = TIME_INIT;
- if (odp_system_info_init()) {
- ODP_ERR("ODP system_info init failed.\n");
+ if (_odp_ishm_init_global(params)) {
+ _ODP_ERR("ODP ishm init failed.\n");
goto init_failed;
}
- hpdir = odp_global_data.hugepage_info.default_huge_page_dir;
- /* cleanup obsolete huge page files, if any */
- if (hpdir)
- cleanup_files(hpdir, odp_global_data.main_pid);
- stage = SYSINFO_INIT;
+ stage = ISHM_INIT;
if (_odp_fdserver_init_global()) {
- ODP_ERR("ODP fdserver init failed.\n");
+ _ODP_ERR("ODP fdserver init failed.\n");
goto init_failed;
}
stage = FDSERVER_INIT;
- if (_odp_ishm_init_global()) {
- ODP_ERR("ODP ishm init failed.\n");
+ if (global_rw_data_init()) {
+ _ODP_ERR("ODP global RW data init failed.\n");
goto init_failed;
}
- stage = ISHM_INIT;
+ stage = GLOBAL_RW_DATA_INIT;
- if (odp_thread_init_global()) {
- ODP_ERR("ODP thread init failed.\n");
+ if (_odp_hash_init_global()) {
+ _ODP_ERR("ODP hash init failed.\n");
+ goto init_failed;
+ }
+ stage = HASH_INIT;
+
+ if (_odp_thread_init_global()) {
+ _ODP_ERR("ODP thread init failed.\n");
goto init_failed;
}
stage = THREAD_INIT;
- if (odp_pool_init_global()) {
- ODP_ERR("ODP pool init failed.\n");
+ if (_odp_pool_init_global()) {
+ _ODP_ERR("ODP pool init failed.\n");
goto init_failed;
}
stage = POOL_INIT;
- if (odp_queue_init_global()) {
- ODP_ERR("ODP queue init failed.\n");
+ if (_odp_event_validation_init_global()) {
+ _ODP_ERR("ODP event validation init failed.\n");
+ goto init_failed;
+ }
+ stage = EVENT_VALIDATION_INIT;
+
+ if (_odp_stash_init_global()) {
+ _ODP_ERR("ODP stash init failed.\n");
+ goto init_failed;
+ }
+ stage = STASH_INIT;
+
+ if (_odp_queue_init_global()) {
+ _ODP_ERR("ODP queue init failed.\n");
goto init_failed;
}
stage = QUEUE_INIT;
- if (sched_fn->init_global()) {
- ODP_ERR("ODP schedule init failed.\n");
+ if (_odp_schedule_init_global()) {
+ _ODP_ERR("ODP schedule init failed.\n");
goto init_failed;
}
stage = SCHED_INIT;
- if (odp_pktio_init_global()) {
- ODP_ERR("ODP packet io init failed.\n");
+ if (_odp_pktio_init_global()) {
+ _ODP_ERR("ODP packet io init failed.\n");
goto init_failed;
}
stage = PKTIO_INIT;
- if (odp_timer_init_global()) {
- ODP_ERR("ODP timer init failed.\n");
+ if (_odp_timer_init_global(params)) {
+ _ODP_ERR("ODP timer init failed.\n");
goto init_failed;
}
stage = TIMER_INIT;
- if (odp_crypto_init_global()) {
- ODP_ERR("ODP crypto init failed.\n");
+ if (_odp_crypto_init_global()) {
+ _ODP_ERR("ODP crypto init failed.\n");
goto init_failed;
}
stage = CRYPTO_INIT;
- if (odp_classification_init_global()) {
- ODP_ERR("ODP classification init failed.\n");
+ if (_odp_comp_init_global()) {
+ _ODP_ERR("ODP comp init failed.\n");
+ goto init_failed;
+ }
+ stage = COMP_INIT;
+
+ if (_odp_classification_init_global()) {
+ _ODP_ERR("ODP classification init failed.\n");
goto init_failed;
}
stage = CLASSIFICATION_INIT;
- if (odp_tm_init_global()) {
- ODP_ERR("ODP traffic manager init failed\n");
+ if (_odp_tm_init_global()) {
+ _ODP_ERR("ODP traffic manager init failed\n");
goto init_failed;
}
stage = TRAFFIC_MNGR_INIT;
if (_odp_int_name_tbl_init_global()) {
- ODP_ERR("ODP name table init failed\n");
+ _ODP_ERR("ODP name table init failed\n");
+ goto init_failed;
+ }
+ stage = NAME_TABLE_INIT;
+
+ if (_odp_ipsec_events_init_global()) {
+ _ODP_ERR("ODP IPsec events init failed.\n");
goto init_failed;
}
+ stage = IPSEC_EVENTS_INIT;
- *instance = (odp_instance_t)odp_global_data.main_pid;
+ if (_odp_ipsec_sad_init_global()) {
+ _ODP_ERR("ODP IPsec SAD init failed.\n");
+ goto init_failed;
+ }
+ stage = IPSEC_SAD_INIT;
+
+ if (_odp_ipsec_init_global()) {
+ _ODP_ERR("ODP IPsec init failed.\n");
+ goto init_failed;
+ }
+ stage = IPSEC_INIT;
+
+ if (_odp_dma_init_global()) {
+ _ODP_ERR("ODP DMA init failed.\n");
+ goto init_failed;
+ }
+ stage = DMA_INIT;
+
+ if (_odp_ml_init_global()) {
+ _ODP_ERR("ODP ML init failed.\n");
+ goto init_failed;
+ }
+ stage = ML_INIT;
+
+ *instance = (odp_instance_t)odp_global_ro.main_pid;
return 0;
init_failed:
- _odp_term_global(stage);
+ term_global(stage);
return -1;
}
int odp_term_global(odp_instance_t instance)
{
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
- ODP_ERR("Bad instance.\n");
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
+ _ODP_ERR("Bad instance.\n");
return -1;
}
- return _odp_term_global(ALL_INIT);
+ return term_global(ALL_INIT);
}
-int _odp_term_global(enum init_stage stage)
+static int term_local(enum init_stage stage)
{
int rc = 0;
+ int rc_thd = 0;
switch (stage) {
case ALL_INIT:
- case NAME_TABLE_INIT:
- if (_odp_int_name_tbl_term_global()) {
- ODP_ERR("Name table term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case TRAFFIC_MNGR_INIT:
- if (odp_tm_term_global()) {
- ODP_ERR("TM term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case CLASSIFICATION_INIT:
- if (odp_classification_term_global()) {
- ODP_ERR("ODP classification term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case CRYPTO_INIT:
- if (odp_crypto_term_global()) {
- ODP_ERR("ODP crypto term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case TIMER_INIT:
- if (odp_timer_term_global()) {
- ODP_ERR("ODP timer term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case PKTIO_INIT:
- if (odp_pktio_term_global()) {
- ODP_ERR("ODP pktio term failed.\n");
- rc = -1;
- }
- /* Fall through */
case SCHED_INIT:
- if (sched_fn->term_global()) {
- ODP_ERR("ODP schedule term failed.\n");
+ if (_odp_sched_fn->term_local()) {
+ _ODP_ERR("ODP schedule local term failed.\n");
rc = -1;
}
/* Fall through */
case QUEUE_INIT:
- if (odp_queue_term_global()) {
- ODP_ERR("ODP queue term failed.\n");
+ if (_odp_queue_fn->term_local()) {
+ _ODP_ERR("ODP queue local term failed.\n");
rc = -1;
}
/* Fall through */
case POOL_INIT:
- if (odp_pool_term_global()) {
- ODP_ERR("ODP buffer pool term failed.\n");
+ if (_odp_pool_term_local()) {
+ _ODP_ERR("ODP buffer pool local term failed.\n");
rc = -1;
}
/* Fall through */
- case THREAD_INIT:
- if (odp_thread_term_global()) {
- ODP_ERR("ODP thread term failed.\n");
- rc = -1;
- }
- /* Fall through */
-
- case ISHM_INIT:
- if (_odp_ishm_term_global()) {
- ODP_ERR("ODP ishm term failed.\n");
+ case CRYPTO_INIT:
+ if (_odp_crypto_term_local()) {
+ _ODP_ERR("ODP crypto local term failed.\n");
rc = -1;
}
/* Fall through */
- case FDSERVER_INIT:
- if (_odp_fdserver_term_global()) {
- ODP_ERR("ODP fdserver term failed.\n");
+ case RANDOM_INIT:
+ if (_odp_random_term_local()) {
+ _ODP_ERR("ODP random local term failed.\n");
rc = -1;
}
/* Fall through */
- case SYSINFO_INIT:
- if (odp_system_info_term()) {
- ODP_ERR("ODP system info term failed.\n");
+ case TIMER_INIT:
+ if (_odp_timer_term_local()) {
+ _ODP_ERR("ODP timer local term failed.\n");
rc = -1;
}
/* Fall through */
- case TIME_INIT:
- if (odp_time_term_global()) {
- ODP_ERR("ODP time term failed.\n");
+ case THREAD_INIT:
+ rc_thd = _odp_thread_term_local();
+ if (rc_thd < 0) {
+ _ODP_ERR("ODP thread local term failed.\n");
rc = -1;
+ } else {
+ if (!rc)
+ rc = (rc_thd == 0) ? 0 : 1;
}
/* Fall through */
- case CPUMASK_INIT:
- if (odp_cpumask_term_global()) {
- ODP_ERR("ODP cpumask term failed.\n");
+ case ISHM_INIT:
+ if (_odp_ishm_term_local()) {
+ _ODP_ERR("ODP ishm local term failed.\n");
rc = -1;
}
/* Fall through */
- case NO_INIT:
- ;
+ default:
+ break;
}
return rc;
@@ -320,37 +621,68 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
{
enum init_stage stage = NO_INIT;
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
- ODP_ERR("Bad instance.\n");
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
+ _ODP_ERR("Bad instance.\n");
goto init_fail;
}
+ /* Detect if odp_init_local() has been already called from this thread */
+ if (getpid() == odp_global_ro.main_pid && init_local_called) {
+ _ODP_ERR("%s() called multiple times by the same thread\n", __func__);
+ goto init_fail;
+ }
+ init_local_called = 1;
+
if (_odp_ishm_init_local()) {
- ODP_ERR("ODP ishm local init failed.\n");
+ _ODP_ERR("ODP ishm local init failed.\n");
goto init_fail;
}
stage = ISHM_INIT;
- if (odp_thread_init_local(thr_type)) {
- ODP_ERR("ODP thread local init failed.\n");
+ if (_odp_thread_init_local(thr_type)) {
+ _ODP_ERR("ODP thread local init failed.\n");
goto init_fail;
}
stage = THREAD_INIT;
- if (odp_pktio_init_local()) {
- ODP_ERR("ODP packet io local init failed.\n");
+ if (_odp_pktio_init_local()) {
+ _ODP_ERR("ODP packet io local init failed.\n");
goto init_fail;
}
stage = PKTIO_INIT;
- if (odp_pool_init_local()) {
- ODP_ERR("ODP pool local init failed.\n");
+ if (_odp_timer_init_local()) {
+ _ODP_ERR("ODP timer local init failed.\n");
+ goto init_fail;
+ }
+ stage = TIMER_INIT;
+
+ if (_odp_random_init_local()) {
+ _ODP_ERR("ODP random local init failed.\n");
+ goto init_fail;
+ }
+ stage = RANDOM_INIT;
+
+ if (_odp_crypto_init_local()) {
+ _ODP_ERR("ODP crypto local init failed.\n");
+ goto init_fail;
+ }
+ stage = CRYPTO_INIT;
+
+ if (_odp_pool_init_local()) {
+ _ODP_ERR("ODP pool local init failed.\n");
goto init_fail;
}
stage = POOL_INIT;
- if (sched_fn->init_local()) {
- ODP_ERR("ODP schedule local init failed.\n");
+ if (_odp_queue_fn->init_local()) {
+ _ODP_ERR("ODP queue local init failed.\n");
+ goto init_fail;
+ }
+ stage = QUEUE_INIT;
+
+ if (_odp_sched_fn->init_local()) {
+ _ODP_ERR("ODP schedule local init failed.\n");
goto init_fail;
}
/* stage = SCHED_INIT; */
@@ -358,58 +690,50 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
return 0;
init_fail:
- _odp_term_local(stage);
+ term_local(stage);
return -1;
}
int odp_term_local(void)
{
- return _odp_term_local(ALL_INIT);
+ /* Check that odp_init_local() has been called by this thread */
+ if (!init_local_called) {
+ _ODP_ERR("%s() called by a non-initialized thread\n", __func__);
+ return -1;
+ }
+ init_local_called = 0;
+
+ return term_local(ALL_INIT);
}
-int _odp_term_local(enum init_stage stage)
+int odp_term_abnormal(odp_instance_t instance, uint64_t flags, void *data ODP_UNUSED)
{
- int rc = 0;
- int rc_thd = 0;
-
- switch (stage) {
- case ALL_INIT:
+ if (flags & ODP_TERM_FROM_SIGH)
+ /* Called from signal handler, not safe to terminate with local/global,
+ * return with failure as not able to perform all actions */
+ return -1;
- case SCHED_INIT:
- if (sched_fn->term_local()) {
- ODP_ERR("ODP schedule local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+ if (odp_term_local() < 0) {
+ _ODP_ERR("ODP local terminate failed.\n");
+ return -2;
+ }
- case POOL_INIT:
- if (odp_pool_term_local()) {
- ODP_ERR("ODP buffer pool local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+ if (odp_term_global(instance) < 0) {
+ _ODP_ERR("ODP global terminate failed.\n");
+ return -3;
+ }
- case THREAD_INIT:
- rc_thd = odp_thread_term_local();
- if (rc_thd < 0) {
- ODP_ERR("ODP thread local term failed.\n");
- rc = -1;
- } else {
- if (!rc)
- rc = rc_thd;
- }
- /* Fall through */
+ return 0;
+}
- case ISHM_INIT:
- if (_odp_ishm_term_local()) {
- ODP_ERR("ODP ishm local term failed.\n");
- rc = -1;
- }
- /* Fall through */
+void odp_log_thread_fn_set(odp_log_func_t func)
+{
+ _odp_this_thread->log_fn = func;
+}
- default:
- break;
- }
+int odp_instance(odp_instance_t *instance)
+{
+ *instance = (odp_instance_t)odp_global_ro.main_pid;
- return rc;
+ return 0;
}
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
new file mode 100644
index 000000000..ee402b935
--- /dev/null
+++ b/platform/linux-generic/odp_ipsec.c
@@ -0,0 +1,2725 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2018-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/byteorder.h>
+#include <odp/api/ipsec.h>
+#include <odp/api/chksum.h>
+
+#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/ipsec_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_classification_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_schedule_if.h>
+
+#include <protocols/eth.h>
+#include <protocols/ip.h>
+#include <protocols/ipsec.h>
+#include <protocols/udp.h>
+
+#include <errno.h>
+#include <string.h>
+
+typedef enum {
+ IPSEC_ORDERING_NONE = 0,
+ IPSEC_ORDERING_SIMPLE,
+} ordering_mode_t;
+
+typedef struct {
+ ordering_mode_t inbound_ordering_mode;
+ ordering_mode_t outbound_ordering_mode;
+ odp_ipsec_config_t ipsec_config;
+} ipsec_global_t;
+
+static ipsec_global_t *ipsec_global;
+
+static odp_ipsec_config_t *ipsec_config;
+
+/*
+ * Wait until the ordered scheduling context of this thread corresponds
+ * to the head of its input queue. Do nothing if ordering is not requested
+ * or if not holding an ordered context.
+ */
+static void wait_for_order(ordering_mode_t mode)
+{
+ if (mode == IPSEC_ORDERING_NONE)
+ return;
+ _odp_sched_fn->order_lock();
+ /*
+ * We rely on the unlock being no-op, so let's not even bother
+ * calling it. Unlock cannot really be anything but a no-op since
+ * the scheduler cannot let other threads to continue until at
+ * scheduling context release time.
+ *
+ * _odp_sched_fn->order_unlock();
+ */
+}
+
+/*
+ * Set cabability bits for algorithms that are defined for use with IPsec
+ * and for which the IPsec crypto or auth capability function returns
+ * at least one supported instance.
+ */
+static int set_ipsec_crypto_capa(odp_ipsec_capability_t *capa)
+{
+ odp_crypto_capability_t crypto_capa;
+
+ crypto_capa.ciphers.all_bits = 0;
+ crypto_capa.auths.all_bits = 0;
+
+ if (odp_crypto_capability(&crypto_capa))
+ return -1;
+
+#define CHECK_CIPHER(field, alg) do { \
+ if (crypto_capa.ciphers.bit.field && \
+ odp_ipsec_cipher_capability(alg, NULL, 0) > 0) \
+ capa->ciphers.bit.field = 1; \
+} while (0)
+
+ CHECK_CIPHER(null, ODP_CIPHER_ALG_NULL);
+ CHECK_CIPHER(des, ODP_CIPHER_ALG_DES);
+ CHECK_CIPHER(trides_cbc, ODP_CIPHER_ALG_3DES_CBC);
+ CHECK_CIPHER(aes_cbc, ODP_CIPHER_ALG_AES_CBC);
+ CHECK_CIPHER(aes_ctr, ODP_CIPHER_ALG_AES_CTR);
+ CHECK_CIPHER(aes_gcm, ODP_CIPHER_ALG_AES_GCM);
+ CHECK_CIPHER(aes_ccm, ODP_CIPHER_ALG_AES_CCM);
+ CHECK_CIPHER(chacha20_poly1305, ODP_CIPHER_ALG_CHACHA20_POLY1305);
+
+#define CHECK_AUTH(field, alg) do { \
+ if (crypto_capa.auths.bit.field && \
+ odp_ipsec_auth_capability(alg, NULL, 0) > 0) \
+ capa->auths.bit.field = 1; \
+} while (0)
+
+ CHECK_AUTH(null, ODP_AUTH_ALG_NULL);
+ CHECK_AUTH(md5_hmac, ODP_AUTH_ALG_MD5_HMAC);
+ CHECK_AUTH(sha1_hmac, ODP_AUTH_ALG_SHA1_HMAC);
+ CHECK_AUTH(sha256_hmac, ODP_AUTH_ALG_SHA256_HMAC);
+ CHECK_AUTH(sha384_hmac, ODP_AUTH_ALG_SHA384_HMAC);
+ CHECK_AUTH(sha512_hmac, ODP_AUTH_ALG_SHA512_HMAC);
+ CHECK_AUTH(aes_gcm, ODP_AUTH_ALG_AES_GCM);
+ CHECK_AUTH(aes_gmac, ODP_AUTH_ALG_AES_GMAC);
+ CHECK_AUTH(aes_ccm, ODP_AUTH_ALG_AES_CCM);
+ CHECK_AUTH(aes_cmac, ODP_AUTH_ALG_AES_CMAC);
+ CHECK_AUTH(aes_xcbc_mac, ODP_AUTH_ALG_AES_XCBC_MAC);
+ CHECK_AUTH(chacha20_poly1305, ODP_AUTH_ALG_CHACHA20_POLY1305);
+
+ /*
+ * Certain combined mode algorithms are configured by setting
+ * both cipher and auth to the corresponding algorithm when
+ * creating an SA. Since such algorithms cannot be combined
+ * with anything else, clear both capability fields if the
+ * cipher and auth check did not both succeed.
+ *
+ * Although AES-GMAC is a combined mode algorithm, it does
+ * not appear here because it is configured by setting cipher
+ * to null.
+ */
+#define REQUIRE_BOTH(field) do { \
+ if (!capa->ciphers.bit.field) \
+ capa->auths.bit.field = 0; \
+ if (!capa->auths.bit.field) \
+ capa->ciphers.bit.field = 0; \
+ } while (0)
+
+ REQUIRE_BOTH(aes_gcm);
+ REQUIRE_BOTH(aes_ccm);
+ REQUIRE_BOTH(chacha20_poly1305);
+
+ return 0;
+}
+
+int odp_ipsec_capability(odp_ipsec_capability_t *capa)
+{
+ int rc;
+ odp_queue_capability_t queue_capa;
+
+ if (odp_global_ro.disable.ipsec) {
+ _ODP_ERR("IPSec is disabled\n");
+ return -1;
+ }
+
+ memset(capa, 0, sizeof(odp_ipsec_capability_t));
+
+ capa->op_mode_sync = ODP_SUPPORT_PREFERRED;
+ capa->op_mode_async = ODP_SUPPORT_PREFERRED;
+ capa->op_mode_inline_in = ODP_SUPPORT_PREFERRED;
+ capa->op_mode_inline_out = ODP_SUPPORT_PREFERRED;
+
+ capa->proto_ah = ODP_SUPPORT_YES;
+
+ capa->max_num_sa = _odp_ipsec_max_num_sa();
+
+ capa->max_antireplay_ws = IPSEC_AR_WIN_SIZE_MAX;
+
+ rc = set_ipsec_crypto_capa(capa);
+ if (rc < 0)
+ return rc;
+
+ capa->queue_type_plain = true;
+ capa->queue_type_sched = true;
+
+ rc = odp_queue_capability(&queue_capa);
+ if (rc < 0)
+ return rc;
+
+ capa->max_queues = queue_capa.max_queues;
+ capa->inline_ipsec_tm = ODP_SUPPORT_NO;
+
+ capa->test.sa_operations.seq_num = 1;
+
+ capa->reassembly.ip = false;
+ capa->reassembly.ipv4 = false;
+ capa->reassembly.ipv6 = false;
+ capa->reass_async = false;
+ capa->reass_inline = false;
+
+ return 0;
+}
+
+static int cipher_requires_randomness(odp_cipher_alg_t cipher)
+{
+ int ret;
+
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ case ODP_CIPHER_ALG_AES_CTR:
+ case ODP_CIPHER_ALG_AES_GCM:
+ case ODP_CIPHER_ALG_AES_CCM:
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ ret = 0;
+ break;
+ default:
+ ret = 1;
+ break;
+ }
+ return ret;
+}
+
+int odp_ipsec_cipher_capability(odp_cipher_alg_t cipher,
+ odp_ipsec_cipher_capability_t capa[], int num)
+{
+ uint32_t req_iv_len;
+ int rc, i, out, max_capa;
+
+ if (odp_random_max_kind() < ODP_RANDOM_CRYPTO &&
+ cipher_requires_randomness(cipher))
+ return 0;
+
+ max_capa = odp_crypto_cipher_capability(cipher, NULL, 0);
+ if (max_capa <= 0)
+ return max_capa;
+
+ odp_crypto_cipher_capability_t crypto_capa[max_capa];
+
+ rc = odp_crypto_cipher_capability(cipher, crypto_capa, max_capa);
+ if (rc <= 0)
+ return rc;
+
+ req_iv_len = _odp_ipsec_cipher_iv_len(cipher);
+ for (i = 0, out = 0; i < rc; i++) {
+ if (crypto_capa[i].iv_len != req_iv_len)
+ continue;
+
+ if (out < num)
+ capa[out].key_len = crypto_capa[i].key_len;
+ out++;
+ }
+
+ return out;
+}
+
+int odp_ipsec_auth_capability(odp_auth_alg_t auth,
+ odp_ipsec_auth_capability_t capa[], int num)
+{
+ uint32_t req_digest_len;
+ int rc, i, out, max_capa;
+
+ max_capa = odp_crypto_auth_capability(auth, NULL, 0);
+ if (max_capa <= 0)
+ return max_capa;
+
+ odp_crypto_auth_capability_t crypto_capa[max_capa];
+
+ rc = odp_crypto_auth_capability(auth, crypto_capa, max_capa);
+ if (rc <= 0)
+ return rc;
+
+ req_digest_len = _odp_ipsec_auth_digest_len(auth);
+ for (i = 0, out = 0; i < rc; i++) {
+ if (crypto_capa[i].digest_len != req_digest_len)
+ continue;
+
+ if (ODP_AUTH_ALG_AES_GCM == auth ||
+ ODP_AUTH_ALG_CHACHA20_POLY1305 == auth) {
+ uint8_t aad_len = 12;
+
+ if (aad_len < crypto_capa[i].aad_len.min ||
+ aad_len > crypto_capa[i].aad_len.max ||
+ 0 != (aad_len - crypto_capa[i].aad_len.min) %
+ crypto_capa[i].aad_len.inc)
+ continue;
+ }
+
+ if (out < num) {
+ capa[out].key_len = crypto_capa[i].key_len;
+ capa[out].icv_len = crypto_capa[i].digest_len;
+ }
+ out++;
+ }
+
+ return out;
+}
+
+void odp_ipsec_config_init(odp_ipsec_config_t *config)
+{
+ memset(config, 0, sizeof(odp_ipsec_config_t));
+ config->inbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config->outbound_mode = ODP_IPSEC_OP_MODE_SYNC;
+ config->max_num_sa = _odp_ipsec_max_num_sa();
+ config->inbound.default_queue = ODP_QUEUE_INVALID;
+ config->inbound.lookup.min_spi = 0;
+ config->inbound.lookup.max_spi = UINT32_MAX;
+ config->inbound.reassembly.max_num_frags = 2;
+ config->stats_en = false;
+}
+
+int odp_ipsec_config(const odp_ipsec_config_t *config)
+{
+ if (config->max_num_sa > _odp_ipsec_max_num_sa())
+ return -1;
+
+ *ipsec_config = *config;
+
+ return 0;
+}
+
+odp_bool_t _odp_ipsec_is_sync_mode(odp_ipsec_dir_t dir)
+{
+ return ((dir == ODP_IPSEC_DIR_INBOUND &&
+ ipsec_config->inbound_mode == ODP_IPSEC_OP_MODE_SYNC) ||
+ (dir == ODP_IPSEC_DIR_OUTBOUND &&
+ ipsec_config->outbound_mode == ODP_IPSEC_OP_MODE_SYNC));
+}
+
+static odp_ipsec_packet_result_t *ipsec_pkt_result(odp_packet_t packet)
+{
+ _ODP_ASSERT(ODP_EVENT_PACKET_IPSEC ==
+ odp_event_subtype(odp_packet_to_event(packet)));
+
+ return &packet_hdr(packet)->ipsec_ctx;
+}
+
+#define _ODP_IPV4HDR_PROTO_OFFSET ODP_OFFSETOF(_odp_ipv4hdr_t, proto)
+#define _ODP_IPV6HDR_NHDR_OFFSET ODP_OFFSETOF(_odp_ipv6hdr_t, next_hdr)
+#define _ODP_IPV6HDREXT_NHDR_OFFSET ODP_OFFSETOF(_odp_ipv6hdr_ext_t, next_hdr)
+
+#define ipv4_hdr_len(ip) (_ODP_IPV4HDR_IHL((ip)->ver_ihl) * 4)
+
+static const uint8_t ipsec_padding[255] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
+ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
+ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
+ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
+ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
+ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
+ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
+ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
+ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
+ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
+ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
+ 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
+ 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
+ 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
+ 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
+ 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
+ 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
+ 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
+ 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
+};
+
+typedef struct {
+ void *ip;
+ unsigned stats_length;
+ uint16_t ip_offset;
+ uint16_t ip_hdr_len;
+ uint16_t ip_tot_len;
+ uint16_t ip_next_hdr_offset;
+ uint8_t ip_next_hdr;
+ unsigned is_ipv4 : 1;
+ unsigned is_ipv6 : 1;
+ union {
+ struct {
+ uint32_t ip_flabel;
+ uint16_t ip_df;
+ uint8_t ip_tos;
+ } out_tunnel;
+ struct {
+ uint16_t hdr_len;
+ uint16_t trl_len;
+ uint64_t seq_no;
+ } in;
+ odp_u32be_t ipv4_addr;
+ uint8_t ipv6_addr[_ODP_IPV6ADDR_LEN];
+ };
+ union {
+ struct {
+ uint8_t tos;
+ uint8_t ttl;
+ odp_u16be_t frag_offset;
+ } ah_ipv4;
+ struct {
+ odp_u32be_t ver_tc_flow;
+ uint8_t hop_limit;
+ } ah_ipv6;
+ struct {
+ ipsec_aad_t aad;
+ } esp;
+ };
+ uint8_t iv[IPSEC_MAX_IV_LEN];
+} ipsec_state_t;
+
+#define MAX_BURST 32
+
+typedef struct {
+ ipsec_state_t state;
+ odp_ipsec_op_status_t status;
+ ipsec_sa_t *sa;
+ odp_ipsec_sa_t sa_hdl;
+ uint32_t orig_ip_len;
+} ipsec_op_t;
+
+#define MAX_HDR_LEN 100 /* Enough for VxLAN over IPv6 */
+
+typedef struct {
+ ipsec_op_t op;
+ uint8_t hdr_buf[MAX_HDR_LEN];
+} ipsec_inline_op_t;
+
+/*
+ * Computes 64-bit seq number according to RFC4303 A2
+ */
+static inline uint64_t ipsec_compute_esn(ipsec_sa_t *ipsec_sa, uint32_t seq)
+{
+ uint32_t wintop_h, wintop_l, winbot_l, ws;
+ uint64_t seq64 = 0, wintop = 0;
+
+ wintop = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+ wintop_l = wintop & 0xffffffff;
+ wintop_h = wintop >> 32;
+
+ ws = ipsec_sa->in.ar.win_size;
+ winbot_l = wintop_l - ws + 1;
+
+ /* case A: window is within one sequence number subspace */
+ if (wintop_l >= (ws - 1)) {
+ if (seq < winbot_l)
+ wintop_h++;
+ /* case B: window spans two sequence number subspaces */
+ } else {
+ if (seq >= winbot_l)
+ wintop_h--;
+ }
+
+ seq64 = ((uint64_t)wintop_h << 32) | seq;
+ return seq64;
+}
+
+static inline uint32_t ipsec_get_seqh_len(ipsec_sa_t *ipsec_sa)
+{
+ return ipsec_sa->insert_seq_hi * IPSEC_SEQ_HI_LEN;
+}
+
+static int ipsec_parse_ipv4(ipsec_state_t *state, odp_packet_t pkt)
+{
+ _odp_ipv4hdr_t ipv4hdr;
+
+ odp_packet_copy_to_mem(pkt, state->ip_offset,
+ _ODP_IPV4HDR_LEN, &ipv4hdr);
+
+ if (_ODP_IPV4HDR_IS_FRAGMENT(odp_be_to_cpu_16(ipv4hdr.frag_offset)))
+ return -1;
+
+ state->ip_hdr_len = ipv4_hdr_len(&ipv4hdr);
+ state->ip_tot_len = odp_be_to_cpu_16(ipv4hdr.tot_len);
+ state->ip_next_hdr = ipv4hdr.proto;
+ state->ip_next_hdr_offset = state->ip_offset +
+ _ODP_IPV4HDR_PROTO_OFFSET;
+ state->ipv4_addr = ipv4hdr.dst_addr;
+
+ return 0;
+}
+
+static int ipsec_parse_ipv6(ipsec_state_t *state, odp_packet_t pkt)
+{
+ _odp_ipv6hdr_t ipv6hdr;
+ _odp_ipv6hdr_ext_t ipv6hdrext;
+
+ odp_packet_copy_to_mem(pkt, state->ip_offset,
+ _ODP_IPV6HDR_LEN, &ipv6hdr);
+
+ state->ip_hdr_len = _ODP_IPV6HDR_LEN;
+ state->ip_next_hdr = ipv6hdr.next_hdr;
+ state->ip_next_hdr_offset = state->ip_offset + _ODP_IPV6HDR_NHDR_OFFSET;
+ /* FIXME: Jumbo frames */
+ state->ip_tot_len = odp_be_to_cpu_16(ipv6hdr.payload_len) +
+ _ODP_IPV6HDR_LEN;
+ memcpy(state->ipv6_addr, &ipv6hdr.dst_addr, _ODP_IPV6ADDR_LEN);
+
+ while (state->ip_next_hdr == _ODP_IPPROTO_HOPOPTS ||
+ state->ip_next_hdr == _ODP_IPPROTO_DEST ||
+ state->ip_next_hdr == _ODP_IPPROTO_ROUTE) {
+ odp_packet_copy_to_mem(pkt,
+ state->ip_offset + state->ip_hdr_len,
+ sizeof(ipv6hdrext),
+ &ipv6hdrext);
+ state->ip_next_hdr = ipv6hdrext.next_hdr;
+ state->ip_next_hdr_offset = state->ip_offset +
+ state->ip_hdr_len +
+ _ODP_IPV6HDREXT_NHDR_OFFSET;
+ state->ip_hdr_len += (ipv6hdrext.ext_len + 1) * 8;
+ }
+
+ if (_ODP_IPPROTO_FRAG == state->ip_next_hdr)
+ return -1;
+
+ return 0;
+}
+
+static inline ipsec_sa_t *ipsec_get_sa(odp_ipsec_sa_t sa,
+ odp_ipsec_protocol_t proto,
+ uint32_t spi,
+ odp_ipsec_ip_version_t ver,
+ void *dst_addr,
+ odp_ipsec_op_status_t *status)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ if (ODP_IPSEC_SA_INVALID == sa) {
+ ipsec_sa_lookup_t lookup;
+
+ lookup.proto = proto;
+ lookup.spi = spi;
+ lookup.ver = ver;
+ lookup.dst_addr = dst_addr;
+
+ ipsec_sa = _odp_ipsec_sa_lookup(&lookup);
+ if (NULL == ipsec_sa) {
+ status->error.sa_lookup = 1;
+ return NULL;
+ }
+ } else {
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+ _ODP_ASSERT(NULL != ipsec_sa);
+ if (ipsec_sa->proto != proto ||
+ ipsec_sa->spi != spi) {
+ status->error.proto = 1;
+ return ipsec_sa;
+ }
+ }
+
+ return ipsec_sa;
+}
+
+static int ipsec_in_iv(odp_packet_t pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ uint16_t iv_offset)
+{
+ if (ipsec_sa->salt_length > 0) {
+ /* It is faster to just copy MAX_SALT_LEN bytes than the exact length */
+ ODP_STATIC_ASSERT(IPSEC_MAX_SALT_LEN <= IPSEC_MAX_IV_LEN,
+ "IPSEC_MAX_SALT_LEN too large");
+ memcpy(state->iv, ipsec_sa->salt, IPSEC_MAX_SALT_LEN);
+ }
+ _ODP_ASSERT(ipsec_sa->salt_length + ipsec_sa->esp_iv_len <= IPSEC_MAX_IV_LEN);
+ if (odp_packet_copy_to_mem(pkt,
+ iv_offset,
+ ipsec_sa->esp_iv_len,
+ state->iv + ipsec_sa->salt_length) < 0)
+ return -1;
+
+ if (ipsec_sa->aes_ctr_iv) {
+ ODP_STATIC_ASSERT(IPSEC_MAX_IV_LEN >= 16, "IPSEC_MAX_IV_LEN too small");
+ state->iv[12] = 0;
+ state->iv[13] = 0;
+ state->iv[14] = 0;
+ state->iv[15] = 1;
+ }
+
+ return 0;
+}
+
+static int ipsec_in_esp(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t **_ipsec_sa,
+ odp_ipsec_sa_t sa,
+ odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status)
+{
+ _odp_esphdr_t esp;
+ uint16_t ipsec_offset;
+ ipsec_sa_t *ipsec_sa;
+ odp_bool_t udp_encap = false;
+
+ ipsec_offset = state->ip_offset + state->ip_hdr_len;
+
+ if (_ODP_IPPROTO_UDP == state->ip_next_hdr) {
+ _odp_udphdr_t udp;
+ uint16_t ip_data_len = state->ip_tot_len -
+ state->ip_hdr_len;
+
+ odp_packet_copy_to_mem(*pkt, ipsec_offset,
+ _ODP_UDPHDR_LEN, &udp);
+
+ if (udp.dst_port != odp_cpu_to_be_16(_ODP_UDP_IPSEC_PORT) ||
+ udp.length != odp_cpu_to_be_16(ip_data_len)) {
+ status->error.proto = 1;
+ return -1;
+ }
+
+ ipsec_offset += _ODP_UDPHDR_LEN;
+ state->ip_hdr_len += _ODP_UDPHDR_LEN;
+ udp_encap = true;
+ }
+
+ if (odp_packet_copy_to_mem(*pkt, ipsec_offset,
+ sizeof(esp), &esp) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ ipsec_sa = ipsec_get_sa(sa, ODP_IPSEC_ESP,
+ odp_be_to_cpu_32(esp.spi),
+ state->is_ipv4 ? ODP_IPSEC_IPV4 :
+ ODP_IPSEC_IPV6,
+ &state->ipv4_addr, status);
+ *_ipsec_sa = ipsec_sa;
+ if (status->error.all)
+ return -1;
+
+ if (!!ipsec_sa->udp_encap != udp_encap) {
+ status->error.proto = 1;
+ return -1;
+ }
+
+ if (ipsec_in_iv(*pkt, state, ipsec_sa,
+ ipsec_offset + _ODP_ESPHDR_LEN) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ state->in.hdr_len = _ODP_ESPHDR_LEN + ipsec_sa->esp_iv_len;
+ state->in.trl_len = _ODP_ESPTRL_LEN + ipsec_sa->icv_len;
+
+ if (odp_unlikely(state->ip_tot_len <
+ state->ip_hdr_len + state->in.hdr_len + ipsec_sa->icv_len)) {
+ status->error.proto = 1;
+ return -1;
+ }
+
+ param->cipher_range.offset = ipsec_offset + state->in.hdr_len;
+ param->cipher_range.length = state->ip_tot_len -
+ state->ip_hdr_len -
+ state->in.hdr_len -
+ ipsec_sa->icv_len;
+ param->cipher_iv_ptr = state->iv;
+ param->auth_iv_ptr = state->iv;
+
+ state->esp.aad.spi = esp.spi;
+ state->in.seq_no = odp_be_to_cpu_32(esp.seq_no);
+
+ if (ipsec_sa->esn) {
+ state->in.seq_no = ipsec_compute_esn(ipsec_sa, state->in.seq_no);
+ state->esp.aad.seq_no64 = odp_cpu_to_be_64(state->in.seq_no);
+ } else {
+ state->esp.aad.seq_no = esp.seq_no;
+ }
+ param->aad_ptr = (uint8_t *)&state->esp.aad;
+
+ /* Insert high-order bits of ESN before the ICV for ICV check
+ * with non-combined mode algorithms.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ uint32_t inb_seqh = odp_cpu_to_be_32(state->in.seq_no >> 32);
+ uint32_t icv_offset = odp_packet_len(*pkt) - ipsec_sa->icv_len;
+
+ if (odp_packet_extend_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ _ODP_ERR("odp_packet_extend_tail failed\n");
+ return -1;
+ }
+ odp_packet_move_data(*pkt, icv_offset + IPSEC_SEQ_HI_LEN, icv_offset,
+ ipsec_sa->icv_len);
+ odp_packet_copy_from_mem(*pkt, icv_offset, IPSEC_SEQ_HI_LEN, &inb_seqh);
+ }
+
+ param->auth_range.offset = ipsec_offset;
+ param->auth_range.length = state->ip_tot_len -
+ state->ip_hdr_len +
+ ipsec_get_seqh_len(ipsec_sa) -
+ ipsec_sa->icv_len;
+ param->hash_result_offset = state->ip_offset +
+ state->ip_tot_len +
+ ipsec_get_seqh_len(ipsec_sa) -
+ ipsec_sa->icv_len;
+
+ state->stats_length = param->cipher_range.length;
+ param->session = ipsec_sa->session;
+
+ return 0;
+}
+
+static int ipsec_in_esp_post(odp_packet_t pkt,
+ ipsec_state_t *state)
+{
+ _odp_esptrl_t esptrl;
+ uint32_t esptrl_offset = state->ip_offset +
+ state->ip_tot_len -
+ state->in.trl_len;
+
+ if (odp_packet_copy_to_mem(pkt, esptrl_offset,
+ sizeof(esptrl), &esptrl) < 0 ||
+ state->ip_offset + esptrl.pad_len > esptrl_offset ||
+ _odp_packet_cmp_data(pkt, esptrl_offset - esptrl.pad_len,
+ ipsec_padding, esptrl.pad_len) != 0)
+ return -1;
+
+ if (_ODP_IPPROTO_UDP == state->ip_next_hdr) {
+ state->ip_hdr_len -= _ODP_UDPHDR_LEN;
+ state->in.hdr_len += _ODP_UDPHDR_LEN;
+ }
+
+ odp_packet_copy_from_mem(pkt, state->ip_next_hdr_offset,
+ 1, &esptrl.next_header);
+ state->in.trl_len += esptrl.pad_len;
+ state->ip_next_hdr = esptrl.next_header;
+
+ return 0;
+}
+
+static int ipsec_in_ah(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t **_ipsec_sa,
+ odp_ipsec_sa_t sa,
+ odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status)
+{
+ _odp_ahhdr_t ah;
+ uint16_t ipsec_offset;
+ ipsec_sa_t *ipsec_sa;
+
+ ipsec_offset = state->ip_offset + state->ip_hdr_len;
+
+ if (odp_packet_copy_to_mem(*pkt, ipsec_offset,
+ sizeof(ah), &ah) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ ipsec_sa = ipsec_get_sa(sa, ODP_IPSEC_AH,
+ odp_be_to_cpu_32(ah.spi),
+ state->is_ipv4 ? ODP_IPSEC_IPV4 :
+ ODP_IPSEC_IPV6,
+ &state->ipv4_addr, status);
+ *_ipsec_sa = ipsec_sa;
+ if (status->error.all)
+ return -1;
+
+ if (ipsec_in_iv(*pkt, state, ipsec_sa,
+ ipsec_offset + _ODP_AHHDR_LEN) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ param->auth_iv_ptr = state->iv;
+
+ state->in.hdr_len = (ah.ah_len + 2) * 4;
+ state->in.trl_len = 0;
+
+ if (state->is_ipv4) {
+ _odp_ipv4hdr_t *ipv4hdr = state->ip;
+
+ /* Save everything to context */
+ state->ah_ipv4.tos = ipv4hdr->tos;
+ state->ah_ipv4.frag_offset = ipv4hdr->frag_offset;
+ state->ah_ipv4.ttl = ipv4hdr->ttl;
+
+ /* FIXME: zero copy of header, passing it to crypto! */
+ /*
+ * If authenticating, zero the mutable fields build the request
+ */
+ ipv4hdr->chksum = 0;
+ ipv4hdr->tos = 0;
+ ipv4hdr->frag_offset = 0;
+ ipv4hdr->ttl = 0;
+ } else {
+ _odp_ipv6hdr_t *ipv6hdr = state->ip;
+
+ state->ah_ipv6.ver_tc_flow = ipv6hdr->ver_tc_flow;
+ state->ah_ipv6.hop_limit = ipv6hdr->hop_limit;
+ ipv6hdr->ver_tc_flow =
+ odp_cpu_to_be_32(6 << _ODP_IPV6HDR_VERSION_SHIFT);
+ ipv6hdr->hop_limit = 0;
+ }
+
+ state->in.seq_no = odp_be_to_cpu_32(ah.seq_no);
+ if (ipsec_sa->esn)
+ state->in.seq_no = ipsec_compute_esn(ipsec_sa, state->in.seq_no);
+
+ /* ESN higher 32 bits are included at the end of the packet data
+ * for inbound ICV computation.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ uint32_t inb_seqh = odp_cpu_to_be_32(state->in.seq_no >> 32);
+ uint32_t seqh_offset = odp_packet_len(*pkt);
+
+ if (odp_packet_extend_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ _ODP_ERR("odp_packet_extend_tail failed\n");
+ return -1;
+ }
+ odp_packet_copy_from_mem(*pkt, seqh_offset, IPSEC_SEQ_HI_LEN, &inb_seqh);
+ }
+
+ param->auth_range.offset = state->ip_offset;
+ param->auth_range.length = state->ip_tot_len;
+ param->hash_result_offset = ipsec_offset + _ODP_AHHDR_LEN +
+ ipsec_sa->esp_iv_len;
+
+ state->stats_length = param->auth_range.length;
+ param->auth_range.length += ipsec_get_seqh_len(ipsec_sa);
+ param->session = ipsec_sa->session;
+
+ return 0;
+}
+
+static int ipsec_in_ah_post(odp_packet_t pkt,
+ ipsec_state_t *state)
+{
+ _odp_ahhdr_t ah;
+ uint16_t ipsec_offset;
+
+ ipsec_offset = state->ip_offset + state->ip_hdr_len;
+
+ if (odp_packet_copy_to_mem(pkt, ipsec_offset,
+ sizeof(ah), &ah) < 0)
+ return -1;
+
+ odp_packet_copy_from_mem(pkt, state->ip_next_hdr_offset,
+ 1, &ah.next_header);
+
+ /* Restore mutable fields */
+ if (state->is_ipv4) {
+ _odp_ipv4hdr_t *ipv4hdr = state->ip;
+
+ ipv4hdr->ttl = state->ah_ipv4.ttl;
+ ipv4hdr->tos = state->ah_ipv4.tos;
+ ipv4hdr->frag_offset = state->ah_ipv4.frag_offset;
+ } else {
+ _odp_ipv6hdr_t *ipv6hdr = odp_packet_l3_ptr(pkt, NULL);
+
+ ipv6hdr->ver_tc_flow = state->ah_ipv6.ver_tc_flow;
+ ipv6hdr->hop_limit = state->ah_ipv6.hop_limit;
+ }
+ state->ip_next_hdr = ah.next_header;
+
+ return 0;
+}
+
+static void
+ipsec_sa_err_stats_update(ipsec_sa_t *sa, odp_ipsec_op_status_t *status)
+{
+ odp_ipsec_op_status_t err_status;
+
+ if (odp_likely(ODP_IPSEC_OK == status->error.all))
+ return;
+
+ if (NULL == sa)
+ return;
+
+ err_status = *status;
+
+ if (err_status.error.proto)
+ odp_atomic_inc_u64(&sa->stats.proto_err);
+
+ if (err_status.error.auth)
+ odp_atomic_inc_u64(&sa->stats.auth_err);
+
+ if (err_status.error.antireplay)
+ odp_atomic_inc_u64(&sa->stats.antireplay_err);
+
+ if (err_status.error.alg)
+ odp_atomic_inc_u64(&sa->stats.alg_err);
+
+ if (err_status.error.mtu)
+ odp_atomic_inc_u64(&sa->stats.mtu_err);
+
+ if (err_status.error.hard_exp_bytes)
+ odp_atomic_inc_u64(&sa->stats.hard_exp_bytes_err);
+
+ if (err_status.error.hard_exp_packets)
+ odp_atomic_inc_u64(&sa->stats.hard_exp_pkts_err);
+}
+
+static int ipsec_in_parse_encap_packet(odp_packet_t pkt, ipsec_state_t *state,
+ odp_ipsec_op_status_t *status, uint32_t *orig_ip_len)
+{
+ int (*op)(ipsec_state_t *state, odp_packet_t pkt);
+
+ state->ip_offset = odp_packet_l3_offset(pkt);
+ _ODP_ASSERT(ODP_PACKET_OFFSET_INVALID != state->ip_offset);
+ state->ip = odp_packet_l3_ptr(pkt, NULL);
+ _ODP_ASSERT(NULL != state->ip);
+ state->is_ipv4 = (((uint8_t *)state->ip)[0] >> 4) == 0x4;
+ state->is_ipv6 = (((uint8_t *)state->ip)[0] >> 4) == 0x6;
+
+ if (odp_unlikely(!(state->is_ipv4 || state->is_ipv6)))
+ goto err;
+
+ op = state->is_ipv4 ? ipsec_parse_ipv4 : ipsec_parse_ipv6;
+
+ if (odp_unlikely(op(state, pkt) ||
+ state->ip_tot_len + state->ip_offset > odp_packet_len(pkt)))
+ goto err;
+
+ *orig_ip_len = state->ip_tot_len;
+
+ return 0;
+
+err:
+ status->error.alg = 1;
+
+ return -1;
+}
+
+static int ipsec_in_prepare_op(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t **ipsec_sa,
+ odp_ipsec_sa_t sa, odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status)
+{
+ int (*op)(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t **ipsec_sa,
+ odp_ipsec_sa_t sa, odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status);
+
+ memset(param, 0, sizeof(*param));
+
+ if (odp_unlikely(!(_ODP_IPPROTO_ESP == state->ip_next_hdr ||
+ _ODP_IPPROTO_UDP == state->ip_next_hdr ||
+ _ODP_IPPROTO_AH == state->ip_next_hdr))) {
+ status->error.proto = 1;
+
+ return -1;
+ }
+
+ op = _ODP_IPPROTO_ESP == state->ip_next_hdr || _ODP_IPPROTO_UDP == state->ip_next_hdr ?
+ ipsec_in_esp : ipsec_in_ah;
+
+ return op(pkt, state, ipsec_sa, sa, param, status);
+}
+
+static int ipsec_in_prepare_packet(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t **ipsec_sa,
+ odp_ipsec_sa_t sa, odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status, uint32_t *orig_ip_len)
+{
+ return ipsec_in_parse_encap_packet(*pkt, state, status, orig_ip_len) ||
+ ipsec_in_prepare_op(pkt, state, ipsec_sa, sa, param, status) ||
+ _odp_ipsec_sa_replay_precheck(*ipsec_sa, state->in.seq_no, status) < 0 ||
+ _odp_ipsec_sa_stats_precheck(*ipsec_sa, status) < 0;
+}
+
+static int ipsec_in_do_crypto(odp_packet_t *pkt, odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status)
+{
+ odp_crypto_packet_result_t result;
+ int rc;
+
+ if (odp_unlikely(odp_crypto_op(pkt, pkt, param, 1) < 0)) {
+ _ODP_DBG("Crypto failed\n");
+ goto alg_err;
+ }
+
+ rc = odp_crypto_result(&result, *pkt);
+
+ if (odp_likely(rc == 0))
+ return 0;
+
+ if (odp_unlikely(rc < -1)) {
+ _ODP_DBG("Crypto failed\n");
+ goto alg_err;
+ }
+
+ if (result.cipher_status.alg_err == ODP_CRYPTO_ALG_ERR_ICV_CHECK ||
+ result.auth_status.alg_err == ODP_CRYPTO_ALG_ERR_ICV_CHECK)
+ goto auth_err;
+
+alg_err:
+ status->error.alg = 1;
+
+ return -1;
+
+auth_err:
+ status->error.auth = 1;
+
+ return -1;
+}
+
+static int ipsec_in_finalize_op(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status)
+{
+ int (*op)(odp_packet_t pkt, ipsec_state_t *state);
+
+ state->ip = odp_packet_l3_ptr(*pkt, NULL);
+
+ if (odp_unlikely(!(ODP_IPSEC_ESP == ipsec_sa->proto || ODP_IPSEC_AH == ipsec_sa->proto)))
+ goto proto_err;
+
+ op = ODP_IPSEC_ESP == ipsec_sa->proto ? ipsec_in_esp_post : ipsec_in_ah_post;
+
+ if (odp_unlikely(op(*pkt, state)))
+ goto proto_err;
+
+ if (odp_unlikely(odp_packet_trunc_tail(pkt,
+ state->in.trl_len + ipsec_get_seqh_len(ipsec_sa),
+ NULL, NULL) < 0))
+ goto alg_err;
+
+ state->ip_tot_len -= state->in.trl_len;
+
+ return 0;
+
+proto_err:
+ status->error.proto = 1;
+
+ return -1;
+
+alg_err:
+ status->error.alg = 1;
+
+ return -1;
+}
+
+static int ipsec_in_strip_tunnel(odp_packet_t *pkt, ipsec_state_t *state,
+ odp_ipsec_op_status_t *status)
+{
+ odp_packet_move_data(*pkt, state->ip_hdr_len + state->in.hdr_len, 0, state->ip_offset);
+
+ if (odp_unlikely(odp_packet_trunc_head(pkt, state->ip_hdr_len + state->in.hdr_len, NULL,
+ NULL) < 0)) {
+ status->error.alg = 1;
+
+ return -1;
+ }
+
+ state->ip_tot_len -= state->ip_hdr_len + state->in.hdr_len;
+
+ if (odp_unlikely(!(_ODP_IPPROTO_IPIP == state->ip_next_hdr ||
+ _ODP_IPPROTO_IPV6 == state->ip_next_hdr ||
+ _ODP_IPPROTO_NO_NEXT == state->ip_next_hdr))) {
+ status->error.proto = 1;
+
+ return -1;
+ }
+
+ state->is_ipv4 = _ODP_IPPROTO_IPIP == state->ip_next_hdr;
+ state->is_ipv6 = _ODP_IPPROTO_IPV6 == state->ip_next_hdr;
+
+ return 0;
+}
+
+static int ipsec_in_strip_tp(odp_packet_t *pkt, ipsec_state_t *state,
+ odp_ipsec_op_status_t *status)
+{
+ odp_packet_move_data(*pkt, state->in.hdr_len, 0, state->ip_offset + state->ip_hdr_len);
+
+ if (odp_unlikely(odp_packet_trunc_head(pkt, state->in.hdr_len, NULL, NULL) < 0)) {
+ status->error.alg = 1;
+
+ return -1;
+ }
+
+ state->ip_tot_len -= state->in.hdr_len;
+
+ return 0;
+}
+
+static int ipsec_in_strip_headers(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status)
+{
+ int (*op)(odp_packet_t *pkt, ipsec_state_t *state, odp_ipsec_op_status_t *status);
+
+ op = ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode ? ipsec_in_strip_tunnel : ipsec_in_strip_tp;
+
+ return op(pkt, state, status);
+}
+
+static int ipsec_in_finalize_decap_header(odp_packet_t pkt, ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa, odp_ipsec_op_status_t *status)
+{
+ _odp_ipv4hdr_t *ipv4hdr;
+ _odp_ipv6hdr_t *ipv6hdr;
+
+ if (state->is_ipv4 && odp_packet_len(pkt) > _ODP_IPV4HDR_LEN) {
+ ipv4hdr = odp_packet_l3_ptr(pkt, NULL);
+
+ if (ODP_IPSEC_MODE_TRANSPORT == ipsec_sa->mode)
+ ipv4hdr->tot_len = odp_cpu_to_be_16(state->ip_tot_len);
+ else
+ ipv4hdr->ttl -= ipsec_sa->dec_ttl;
+
+ _odp_packet_ipv4_chksum_insert(pkt);
+ } else if (state->is_ipv6 && odp_packet_len(pkt) > _ODP_IPV6HDR_LEN) {
+ ipv6hdr = odp_packet_l3_ptr(pkt, NULL);
+
+ if (ODP_IPSEC_MODE_TRANSPORT == ipsec_sa->mode)
+ ipv6hdr->payload_len = odp_cpu_to_be_16(state->ip_tot_len -
+ _ODP_IPV6HDR_LEN);
+ else
+ ipv6hdr->hop_limit -= ipsec_sa->dec_ttl;
+ } else if (state->ip_next_hdr != _ODP_IPPROTO_NO_NEXT) {
+ status->error.proto = 1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ipsec_in_finalize_packet(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status)
+{
+ return _odp_ipsec_sa_lifetime_update(ipsec_sa, state->stats_length, status) < 0 ||
+ ipsec_in_finalize_op(pkt, state, ipsec_sa, status) ||
+ ipsec_in_strip_headers(pkt, state, ipsec_sa, status) ||
+ ipsec_in_finalize_decap_header(*pkt, state, ipsec_sa, status);
+}
+
+static void ipsec_in_reset_parse_data(odp_packet_t pkt, ipsec_state_t *state)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ packet_parse_reset(pkt_hdr, 0);
+ pkt_hdr->p.l3_offset = state->ip_offset;
+}
+
+static void ipsec_in_parse_packet(odp_packet_t pkt, ipsec_state_t *state)
+{
+ odp_packet_parse_param_t parse_param;
+
+ parse_param.proto = state->is_ipv4 ? ODP_PROTO_IPV4 :
+ state->is_ipv6 ? ODP_PROTO_IPV6 :
+ ODP_PROTO_NONE;
+ parse_param.last_layer = ipsec_config->inbound.parse_level;
+ parse_param.chksums = ipsec_config->inbound.chksums;
+ /* We do not care about return code here. Parsing error should not result in IPsec
+ * error. */
+ odp_packet_parse(pkt, state->ip_offset, &parse_param);
+}
+
+static void ipsec_in_parse_decap_packet(odp_packet_t pkt, ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa)
+{
+ void (*op)(odp_packet_t pkt, ipsec_state_t *state);
+
+ op = _ODP_IPPROTO_NO_NEXT == state->ip_next_hdr &&
+ ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode ? ipsec_in_reset_parse_data :
+ ipsec_in_parse_packet;
+
+ op(pkt, state);
+}
+
+static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
+ odp_ipsec_sa_t sa,
+ odp_packet_t *pkt_out,
+ odp_bool_t enqueue_op,
+ odp_ipsec_op_status_t *status,
+ uint32_t *orig_ip_len)
+{
+ ipsec_state_t state;
+ ipsec_sa_t *ipsec_sa = NULL;
+ odp_crypto_packet_op_param_t param;
+
+ if (odp_unlikely(ipsec_in_prepare_packet(&pkt, &state, &ipsec_sa, sa, &param, status,
+ orig_ip_len)))
+ goto exit;
+
+ if (ipsec_in_do_crypto(&pkt, &param, status))
+ goto exit;
+
+ if (ipsec_sa->antireplay) {
+ if (enqueue_op)
+ wait_for_order(ipsec_global->inbound_ordering_mode);
+
+ if (_odp_ipsec_sa_replay_update(ipsec_sa, state.in.seq_no, status) < 0)
+ goto exit;
+ }
+
+ if (odp_unlikely(ipsec_in_finalize_packet(&pkt, &state, ipsec_sa, status)))
+ goto post_lifetime_err_cnt_update;
+
+ ipsec_in_parse_decap_packet(pkt, &state, ipsec_sa);
+
+ goto exit;
+
+post_lifetime_err_cnt_update:
+ if (ipsec_config->stats_en) {
+ odp_atomic_inc_u64(&ipsec_sa->stats.post_lifetime_err_pkts);
+ odp_atomic_add_u64(&ipsec_sa->stats.post_lifetime_err_bytes, state.stats_length);
+ }
+
+exit:
+ *pkt_out = pkt;
+
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(ipsec_sa, status);
+
+ return ipsec_sa;
+}
+
+/* Generate sequence number */
+static inline
+uint64_t ipsec_seq_no(ipsec_sa_t *ipsec_sa)
+{
+ return odp_atomic_fetch_add_u64(&ipsec_sa->hot.out.seq, 1);
+}
+
+/* Helper for calculating encode length using data length and block size */
+#define IPSEC_PAD_LEN(x, b) ((((x) + ((b) - 1)) / (b)) * (b))
+
+/*
+ * Round len up to next multiple of pad_mask + 1.
+ * pad_mask + 1 must be a power of 2.
+ */
+static inline uint32_t ipsec_padded_len(uint32_t len, uint32_t pad_mask)
+{
+ _ODP_ASSERT(_ODP_CHECK_IS_POWER2(pad_mask + 1));
+
+ return (len + pad_mask) & ~pad_mask;
+}
+
+static int ipsec_out_tunnel_parse_ipv4(ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa)
+{
+ _odp_ipv4hdr_t *ipv4hdr = state->ip;
+ uint16_t flags = odp_be_to_cpu_16(ipv4hdr->frag_offset);
+
+ ipv4hdr->ttl -= ipsec_sa->dec_ttl;
+ state->out_tunnel.ip_tos = ipv4hdr->tos;
+ state->out_tunnel.ip_df = _ODP_IPV4HDR_FLAGS_DONT_FRAG(flags);
+ state->out_tunnel.ip_flabel = 0;
+ state->ip_next_hdr = ipv4hdr->proto;
+
+ return 0;
+}
+
+static int ipsec_out_tunnel_parse_ipv6(ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa)
+{
+ _odp_ipv6hdr_t *ipv6hdr = state->ip;
+ uint32_t ver_tc_flow = odp_be_to_cpu_32(ipv6hdr->ver_tc_flow);
+
+ ipv6hdr->hop_limit -= ipsec_sa->dec_ttl;
+ state->out_tunnel.ip_tos = (ver_tc_flow &
+ _ODP_IPV6HDR_TC_MASK) >>
+ _ODP_IPV6HDR_TC_SHIFT;
+ state->out_tunnel.ip_df = 0;
+ state->out_tunnel.ip_flabel = (ver_tc_flow &
+ _ODP_IPV6HDR_FLOW_LABEL_MASK) >>
+ _ODP_IPV6HDR_FLOW_LABEL_SHIFT;
+ state->ip_next_hdr = ipv6hdr->next_hdr;
+
+ return 0;
+}
+
+static int ipsec_out_tunnel_ipv4(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_ipv4_param_t *ipv4_param)
+{
+ _odp_ipv4hdr_t out_ip;
+ uint16_t flags;
+
+ out_ip.ver_ihl = 0x45;
+ if (ipsec_sa->copy_dscp)
+ out_ip.tos = state->out_tunnel.ip_tos;
+ else
+ out_ip.tos = (state->out_tunnel.ip_tos &
+ ~_ODP_IP_TOS_DSCP_MASK) |
+ (ipv4_param->dscp <<
+ _ODP_IP_TOS_DSCP_SHIFT);
+ state->ip_tot_len = odp_packet_len(*pkt) - state->ip_offset;
+ state->ip_tot_len += _ODP_IPV4HDR_LEN;
+
+ out_ip.tot_len = odp_cpu_to_be_16(state->ip_tot_len);
+ if (ipsec_sa->copy_df)
+ flags = state->out_tunnel.ip_df;
+ else
+ flags = ((uint16_t)ipv4_param->df) << 14;
+ out_ip.frag_offset = odp_cpu_to_be_16(flags);
+
+ /* Allocate unique IP ID only for non-atomic datagrams */
+ if (out_ip.frag_offset == 0)
+ out_ip.id = _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa);
+ else
+ out_ip.id = 0;
+
+ out_ip.ttl = ipv4_param->ttl;
+ /* Will be filled later by packet checksum update */
+ out_ip.chksum = 0;
+ memcpy(&out_ip.src_addr, ipv4_param->src_addr,
+ _ODP_IPV4ADDR_LEN);
+ memcpy(&out_ip.dst_addr, ipv4_param->dst_addr,
+ _ODP_IPV4ADDR_LEN);
+
+ if (odp_packet_extend_head(pkt, _ODP_IPV4HDR_LEN,
+ NULL, NULL) < 0)
+ return -1;
+
+ odp_packet_move_data(*pkt, 0, _ODP_IPV4HDR_LEN, state->ip_offset);
+
+ odp_packet_copy_from_mem(*pkt, state->ip_offset,
+ _ODP_IPV4HDR_LEN, &out_ip);
+
+ state->ip = odp_packet_l3_ptr(*pkt, NULL);
+ state->ip_hdr_len = _ODP_IPV4HDR_LEN;
+ if (state->is_ipv4)
+ state->ip_next_hdr = _ODP_IPPROTO_IPIP;
+ else if (state->is_ipv6)
+ state->ip_next_hdr = _ODP_IPPROTO_IPV6;
+ else
+ state->ip_next_hdr = _ODP_IPPROTO_NO_NEXT;
+ state->ip_next_hdr_offset = state->ip_offset +
+ _ODP_IPV4HDR_PROTO_OFFSET;
+
+ state->is_ipv4 = 1;
+ state->is_ipv6 = 0;
+
+ return 0;
+}
+
+static int ipsec_out_tunnel_ipv6(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_ipv6_param_t *ipv6_param)
+{
+ _odp_ipv6hdr_t out_ip;
+ uint32_t ver;
+
+ ver = 6 << _ODP_IPV6HDR_VERSION_SHIFT;
+ if (ipsec_sa->copy_dscp)
+ ver |= state->out_tunnel.ip_tos << _ODP_IPV6HDR_TC_SHIFT;
+ else
+ ver |= ((state->out_tunnel.ip_tos &
+ ~_ODP_IP_TOS_DSCP_MASK) |
+ (ipv6_param->dscp <<
+ _ODP_IP_TOS_DSCP_SHIFT)) <<
+ _ODP_IPV6HDR_TC_SHIFT;
+ if (ipsec_sa->copy_flabel)
+ ver |= state->out_tunnel.ip_flabel;
+ else
+ ver |= ipv6_param->flabel;
+ out_ip.ver_tc_flow = odp_cpu_to_be_32(ver);
+
+ state->ip_tot_len = odp_packet_len(*pkt) - state->ip_offset;
+ out_ip.payload_len = odp_cpu_to_be_16(state->ip_tot_len);
+ state->ip_tot_len += _ODP_IPV6HDR_LEN;
+
+ out_ip.hop_limit = ipv6_param->hlimit;
+ memcpy(&out_ip.src_addr, ipv6_param->src_addr,
+ _ODP_IPV6ADDR_LEN);
+ memcpy(&out_ip.dst_addr, ipv6_param->dst_addr,
+ _ODP_IPV6ADDR_LEN);
+
+ if (odp_packet_extend_head(pkt, _ODP_IPV6HDR_LEN,
+ NULL, NULL) < 0)
+ return -1;
+
+ odp_packet_move_data(*pkt, 0, _ODP_IPV6HDR_LEN, state->ip_offset);
+
+ odp_packet_copy_from_mem(*pkt, state->ip_offset,
+ sizeof(out_ip), &out_ip);
+
+ state->ip = odp_packet_l3_ptr(*pkt, NULL);
+ state->ip_hdr_len = _ODP_IPV6HDR_LEN;
+ if (state->is_ipv4)
+ state->ip_next_hdr = _ODP_IPPROTO_IPIP;
+ else if (state->is_ipv6)
+ state->ip_next_hdr = _ODP_IPPROTO_IPV6;
+ else
+ state->ip_next_hdr = _ODP_IPPROTO_NO_NEXT;
+ state->ip_next_hdr_offset = state->ip_offset + _ODP_IPV6HDR_NHDR_OFFSET;
+
+ state->is_ipv4 = 0;
+ state->is_ipv6 = 1;
+
+ return 0;
+}
+
+#define IPSEC_RANDOM_BUF_SIZE 256
+
+static int ipsec_random_data(uint8_t *data, uint32_t len)
+{
+ static __thread uint8_t buffer[IPSEC_RANDOM_BUF_SIZE];
+ static __thread uint32_t buffer_used = IPSEC_RANDOM_BUF_SIZE;
+
+ if (odp_likely(buffer_used + len <= IPSEC_RANDOM_BUF_SIZE)) {
+ memcpy(data, &buffer[buffer_used], len);
+ buffer_used += len;
+ } else if (odp_likely(len <= IPSEC_RANDOM_BUF_SIZE)) {
+ uint32_t rnd_len;
+
+ rnd_len = odp_random_data(buffer, IPSEC_RANDOM_BUF_SIZE,
+ ODP_RANDOM_CRYPTO);
+ if (odp_unlikely(rnd_len != IPSEC_RANDOM_BUF_SIZE))
+ return -1;
+ memcpy(data, &buffer[0], len);
+ buffer_used = len;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Generate cipher IV for outbound processing.
+ */
+static int ipsec_out_iv(ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ uint64_t seq_no)
+{
+ if (ipsec_sa->use_counter_iv) {
+ /* Both GCM and CTR use 8-bit counters */
+ _ODP_ASSERT(sizeof(seq_no) == ipsec_sa->esp_iv_len);
+
+ /* It is faster to just copy MAX_SALT_LEN bytes than the exact length */
+ ODP_STATIC_ASSERT(IPSEC_MAX_SALT_LEN <= IPSEC_MAX_IV_LEN,
+ "IPSEC_MAX_SALT_LEN too large");
+ memcpy(state->iv, ipsec_sa->salt, IPSEC_MAX_SALT_LEN);
+
+ _ODP_ASSERT(ipsec_sa->salt_length + sizeof(seq_no) <= IPSEC_MAX_IV_LEN);
+ memcpy(state->iv + ipsec_sa->salt_length, &seq_no, sizeof(seq_no));
+
+ if (ipsec_sa->aes_ctr_iv) {
+ ODP_STATIC_ASSERT(IPSEC_MAX_IV_LEN >= 16, "IPSEC_MAX_IV_LEN too small");
+ state->iv[12] = 0;
+ state->iv[13] = 0;
+ state->iv[14] = 0;
+ state->iv[15] = 1;
+ }
+ } else if (ipsec_sa->use_cbc_iv) {
+ /*
+ * For CBC mode ciphers with 16 byte IV we generate the cipher
+ * IV by concatenating a per-session random salt value and
+ * 64-bit sequence number. The ESP IV will be generated at
+ * ciphering time by CBC-encrypting a zero block using the
+ * cipher IV.
+ *
+ * This way each packet of an SA will have an unpredictable
+ * IV and different SAs (e.g. manually keyed SAs across
+ * restarts) will have different IV sequences (so one cannot
+ * predict IVs of an SA by observing the IVs of another SA
+ * with the same key).
+ */
+ _ODP_ASSERT(CBC_SALT_LEN + sizeof(seq_no) == ipsec_sa->esp_iv_len);
+ ODP_STATIC_ASSERT(CBC_SALT_LEN + sizeof(seq_no) <= IPSEC_MAX_IV_LEN,
+ "IPSEC_MAX_IV_LEN too small for CBC IV construction");
+ memcpy(state->iv, ipsec_sa->cbc_salt, CBC_SALT_LEN);
+ memcpy(state->iv + CBC_SALT_LEN, &seq_no, sizeof(seq_no));
+ } else if (odp_unlikely(ipsec_sa->esp_iv_len)) {
+ _ODP_ASSERT(ipsec_sa->esp_iv_len <= IPSEC_MAX_IV_LEN);
+ if (ipsec_random_data(state->iv, ipsec_sa->esp_iv_len))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ipsec_out_esp(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status,
+ uint32_t mtu,
+ odp_bool_t enqueue_op,
+ const odp_ipsec_out_opt_t *opt)
+{
+ _odp_esphdr_t esp;
+ _odp_esptrl_t esptrl;
+ _odp_udphdr_t udphdr;
+ uint32_t encrypt_len;
+ uint16_t ip_data_len = state->ip_tot_len -
+ state->ip_hdr_len;
+ uint16_t tfc_len = (opt->flag.tfc_pad || opt->flag.tfc_dummy) ?
+ opt->tfc_pad_len : 0;
+ uint16_t ipsec_offset = state->ip_offset + state->ip_hdr_len;
+ unsigned hdr_len;
+ unsigned trl_len;
+ unsigned pkt_len, new_len;
+ uint8_t proto = _ODP_IPPROTO_ESP;
+ uint64_t seq_no;
+
+ if (odp_unlikely(opt->flag.tfc_dummy)) {
+ ip_data_len = 0;
+ state->ip_tot_len = state->ip_offset + state->ip_hdr_len;
+ }
+
+ encrypt_len = ipsec_padded_len(ip_data_len + tfc_len + _ODP_ESPTRL_LEN,
+ ipsec_sa->esp_pad_mask);
+
+ hdr_len = _ODP_ESPHDR_LEN + ipsec_sa->esp_iv_len;
+ trl_len = encrypt_len -
+ ip_data_len +
+ ipsec_sa->icv_len;
+
+ if (ipsec_sa->udp_encap) {
+ hdr_len += _ODP_UDPHDR_LEN;
+ proto = _ODP_IPPROTO_UDP;
+ udphdr.src_port = odp_cpu_to_be_16(_ODP_UDP_IPSEC_PORT);
+ udphdr.dst_port = odp_cpu_to_be_16(_ODP_UDP_IPSEC_PORT);
+ udphdr.length = odp_cpu_to_be_16(ip_data_len +
+ hdr_len + trl_len);
+ udphdr.chksum = 0; /* should be 0 by RFC */
+ }
+
+ if (state->ip_tot_len + hdr_len + trl_len > mtu) {
+ status->error.mtu = 1;
+ return -1;
+ }
+
+ if (enqueue_op)
+ wait_for_order(ipsec_global->outbound_ordering_mode);
+ seq_no = ipsec_seq_no(ipsec_sa);
+
+ if (ipsec_out_iv(state, ipsec_sa, seq_no) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ param->cipher_iv_ptr = state->iv;
+ param->auth_iv_ptr = state->iv;
+
+ memset(&esp, 0, sizeof(esp));
+ esp.spi = odp_cpu_to_be_32(ipsec_sa->spi);
+ state->esp.aad.spi = esp.spi;
+ esp.seq_no = odp_cpu_to_be_32(seq_no & 0xffffffff);
+
+ if (ipsec_sa->esn)
+ state->esp.aad.seq_no64 = odp_cpu_to_be_64(seq_no);
+ else
+ state->esp.aad.seq_no = esp.seq_no;
+
+ param->aad_ptr = (uint8_t *)&state->esp.aad;
+
+ memset(&esptrl, 0, sizeof(esptrl));
+ esptrl.pad_len = encrypt_len - ip_data_len - tfc_len - _ODP_ESPTRL_LEN;
+ esptrl.next_header = state->ip_next_hdr;
+
+ odp_packet_copy_from_mem(*pkt, state->ip_next_hdr_offset, 1, &proto);
+ state->ip_tot_len += hdr_len + trl_len;
+ if (state->is_ipv4) {
+ _odp_ipv4hdr_t *ipv4hdr = state->ip;
+
+ ipv4hdr->tot_len = odp_cpu_to_be_16(state->ip_tot_len);
+ } else if (state->is_ipv6) {
+ _odp_ipv6hdr_t *ipv6hdr = state->ip;
+
+ ipv6hdr->payload_len = odp_cpu_to_be_16(state->ip_tot_len -
+ _ODP_IPV6HDR_LEN);
+ }
+
+ if (odp_packet_extend_head(pkt, hdr_len, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ pkt_len = odp_packet_len(*pkt);
+ new_len = state->ip_offset + state->ip_tot_len;
+ if (pkt_len >= new_len) {
+ if (odp_packet_trunc_tail(pkt, pkt_len - new_len,
+ NULL, NULL) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+ } else {
+ if (odp_packet_extend_tail(pkt, new_len - pkt_len,
+ NULL, NULL) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+ }
+
+ odp_packet_move_data(*pkt, 0, hdr_len, ipsec_offset);
+
+ uint32_t esptrl_offset = state->ip_offset +
+ state->ip_hdr_len +
+ hdr_len +
+ encrypt_len -
+ _ODP_ESPTRL_LEN;
+
+ if (ipsec_sa->udp_encap) {
+ odp_packet_copy_from_mem(*pkt, ipsec_offset, _ODP_UDPHDR_LEN,
+ &udphdr);
+ ipsec_offset += _ODP_UDPHDR_LEN;
+ hdr_len -= _ODP_UDPHDR_LEN;
+ state->ip_hdr_len += _ODP_UDPHDR_LEN;
+ }
+
+ odp_packet_copy_from_mem(*pkt,
+ ipsec_offset, _ODP_ESPHDR_LEN,
+ &esp);
+ if (!ipsec_sa->use_cbc_iv) {
+ /* copy the relevant part of cipher IV to ESP IV */
+ odp_packet_copy_from_mem(*pkt,
+ ipsec_offset + _ODP_ESPHDR_LEN,
+ ipsec_sa->esp_iv_len,
+ state->iv + ipsec_sa->salt_length);
+ }
+ /* 0xa5 is a good value to fill data instead of generating random data
+ * to create TFC padding */
+ _odp_packet_set_data(*pkt, esptrl_offset - esptrl.pad_len - tfc_len,
+ 0xa5, tfc_len);
+ odp_packet_copy_from_mem(*pkt,
+ esptrl_offset - esptrl.pad_len,
+ esptrl.pad_len, ipsec_padding);
+ odp_packet_copy_from_mem(*pkt,
+ esptrl_offset, _ODP_ESPTRL_LEN,
+ &esptrl);
+
+ /* Outbound ICV computation includes ESN higher 32 bits as part of ESP
+ * implicit trailer for individual algo's.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ uint32_t outb_seqh = odp_cpu_to_be_32(seq_no >> 32);
+
+ if (odp_packet_extend_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ _ODP_ERR("odp_packet_extend_tail failed\n");
+ return -1;
+ }
+ odp_packet_copy_from_mem(*pkt,
+ esptrl_offset + _ODP_ESPTRL_LEN,
+ IPSEC_SEQ_HI_LEN, &outb_seqh);
+ }
+
+ if (odp_unlikely(state->ip_tot_len <
+ state->ip_hdr_len + hdr_len + ipsec_sa->icv_len)) {
+ status->error.proto = 1;
+ return -1;
+ }
+
+ param->cipher_range.offset = ipsec_offset + hdr_len;
+ param->cipher_range.length = state->ip_tot_len -
+ state->ip_hdr_len -
+ hdr_len -
+ ipsec_sa->icv_len;
+
+ param->auth_range.offset = ipsec_offset;
+ param->auth_range.length = state->ip_tot_len -
+ state->ip_hdr_len +
+ ipsec_get_seqh_len(ipsec_sa) -
+ ipsec_sa->icv_len;
+ param->hash_result_offset = state->ip_offset +
+ state->ip_tot_len +
+ ipsec_get_seqh_len(ipsec_sa) -
+ ipsec_sa->icv_len;
+
+ state->stats_length = param->cipher_range.length;
+
+ if (ipsec_sa->use_cbc_iv) {
+ /*
+ * Encrypt zeroed ESP IV field using the special cipher IV
+ * to create the final unpredictable ESP IV
+ */
+ _ODP_ASSERT(ipsec_sa->esp_iv_len == CBC_IV_LEN);
+ param->cipher_range.offset -= CBC_IV_LEN;
+ param->cipher_range.length += CBC_IV_LEN;
+ _odp_packet_set_data(*pkt,
+ ipsec_offset + _ODP_ESPHDR_LEN,
+ 0,
+ CBC_IV_LEN);
+ }
+
+ param->session = ipsec_sa->session;
+
+ return 0;
+}
+
+static int ipsec_out_esp_post(ipsec_state_t *state, odp_packet_t *pkt,
+ ipsec_sa_t *ipsec_sa)
+{
+ if (state->is_ipv4)
+ _odp_packet_ipv4_chksum_insert(*pkt);
+
+ /* Remove the high order ESN bits that were added in the packet for ICV
+ * computation.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ uint32_t icv_offset = odp_packet_len(*pkt) - ipsec_sa->icv_len;
+
+ odp_packet_move_data(*pkt, icv_offset - IPSEC_SEQ_HI_LEN, icv_offset,
+ ipsec_sa->icv_len);
+ if (odp_packet_trunc_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ _ODP_ERR("odp_packet_trunc_tail failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int ipsec_out_ah(odp_packet_t *pkt,
+ ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa,
+ odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status,
+ uint32_t mtu,
+ odp_bool_t enqueue_op)
+{
+ _odp_ahhdr_t ah;
+ unsigned hdr_len = _ODP_AHHDR_LEN + ipsec_sa->esp_iv_len +
+ ipsec_sa->icv_len;
+ uint16_t ipsec_offset = state->ip_offset + state->ip_hdr_len;
+ uint8_t proto = _ODP_IPPROTO_AH;
+ uint64_t seq_no;
+
+ if (state->ip_tot_len + hdr_len > mtu) {
+ status->error.mtu = 1;
+ return -1;
+ }
+
+ if (enqueue_op)
+ wait_for_order(ipsec_global->outbound_ordering_mode);
+ seq_no = ipsec_seq_no(ipsec_sa);
+
+ memset(&ah, 0, sizeof(ah));
+ ah.spi = odp_cpu_to_be_32(ipsec_sa->spi);
+ ah.seq_no = odp_cpu_to_be_32(seq_no & 0xffffffff);
+ ah.next_header = state->ip_next_hdr;
+
+ odp_packet_copy_from_mem(*pkt, state->ip_next_hdr_offset, 1, &proto);
+ /* Save IP stuff */
+ if (state->is_ipv4) {
+ _odp_ipv4hdr_t *ipv4hdr = state->ip;
+
+ state->ah_ipv4.tos = ipv4hdr->tos;
+ state->ah_ipv4.frag_offset = ipv4hdr->frag_offset;
+ state->ah_ipv4.ttl = ipv4hdr->ttl;
+ ipv4hdr->chksum = 0;
+ ipv4hdr->tos = 0;
+ ipv4hdr->frag_offset = 0;
+ ipv4hdr->ttl = 0;
+ hdr_len = IPSEC_PAD_LEN(hdr_len, 4);
+ state->ip_tot_len += hdr_len;
+ ipv4hdr->tot_len = odp_cpu_to_be_16(state->ip_tot_len);
+ } else {
+ _odp_ipv6hdr_t *ipv6hdr = state->ip;
+
+ state->ah_ipv6.ver_tc_flow = ipv6hdr->ver_tc_flow;
+ state->ah_ipv6.hop_limit = ipv6hdr->hop_limit;
+ ipv6hdr->ver_tc_flow =
+ odp_cpu_to_be_32(6 << _ODP_IPV6HDR_VERSION_SHIFT);
+ ipv6hdr->hop_limit = 0;
+
+ hdr_len = IPSEC_PAD_LEN(hdr_len, 8);
+ state->ip_tot_len += hdr_len;
+ ipv6hdr->payload_len = odp_cpu_to_be_16(state->ip_tot_len -
+ _ODP_IPV6HDR_LEN);
+ }
+
+ ah.ah_len = hdr_len / 4 - 2;
+
+ /* For GMAC */
+ if (ipsec_out_iv(state, ipsec_sa, seq_no) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ param->auth_iv_ptr = state->iv;
+
+ if (odp_packet_extend_head(pkt, hdr_len, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ return -1;
+ }
+
+ odp_packet_move_data(*pkt, 0, hdr_len, ipsec_offset);
+
+ odp_packet_copy_from_mem(*pkt,
+ ipsec_offset, _ODP_AHHDR_LEN,
+ &ah);
+ odp_packet_copy_from_mem(*pkt,
+ ipsec_offset + _ODP_AHHDR_LEN,
+ ipsec_sa->esp_iv_len,
+ state->iv + ipsec_sa->salt_length);
+ _odp_packet_set_data(*pkt,
+ ipsec_offset + _ODP_AHHDR_LEN +
+ ipsec_sa->esp_iv_len,
+ 0,
+ hdr_len - _ODP_AHHDR_LEN - ipsec_sa->esp_iv_len);
+
+ /* ESN higher 32 bits are included at the end of the packet data
+ * for outbound ICV computation.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ uint32_t outb_seqh = odp_cpu_to_be_32(seq_no >> 32);
+ uint32_t seqh_offset = odp_packet_len(*pkt);
+
+ if (odp_packet_extend_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ status->error.alg = 1;
+ _ODP_ERR("odp_packet_extend_tail failed\n");
+ return -1;
+ }
+ odp_packet_copy_from_mem(*pkt,
+ seqh_offset, IPSEC_SEQ_HI_LEN, &outb_seqh);
+ }
+
+ param->auth_range.offset = state->ip_offset;
+ param->auth_range.length = state->ip_tot_len;
+ param->hash_result_offset = ipsec_offset + _ODP_AHHDR_LEN +
+ ipsec_sa->esp_iv_len;
+
+ state->stats_length = param->auth_range.length;
+ param->auth_range.length += ipsec_get_seqh_len(ipsec_sa);
+ param->session = ipsec_sa->session;
+
+ return 0;
+}
+
+static int ipsec_out_ah_post(ipsec_state_t *state, odp_packet_t *pkt,
+ ipsec_sa_t *ipsec_sa)
+{
+ if (state->is_ipv4) {
+ _odp_ipv4hdr_t *ipv4hdr = odp_packet_l3_ptr(*pkt, NULL);
+
+ ipv4hdr->ttl = state->ah_ipv4.ttl;
+ ipv4hdr->tos = state->ah_ipv4.tos;
+ ipv4hdr->frag_offset = state->ah_ipv4.frag_offset;
+
+ _odp_packet_ipv4_chksum_insert(*pkt);
+ } else {
+ _odp_ipv6hdr_t *ipv6hdr = odp_packet_l3_ptr(*pkt, NULL);
+
+ ipv6hdr->ver_tc_flow = state->ah_ipv6.ver_tc_flow;
+ ipv6hdr->hop_limit = state->ah_ipv6.hop_limit;
+ }
+
+ /* Remove the high order ESN bits that were added in the packet for ICV
+ * computation.
+ */
+ if (ipsec_sa->insert_seq_hi) {
+ if (odp_packet_trunc_tail(pkt, IPSEC_SEQ_HI_LEN, NULL, NULL) < 0) {
+ _ODP_ERR("odp_packet_trunc_tail failed\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define OL_TX_CHKSUM_PKT(_cfg, _proto, _ovr_set, _ovr) \
+ (_proto && (_ovr_set ? _ovr : _cfg))
+
+static void ipsec_out_checksums(odp_packet_t pkt,
+ ipsec_state_t *state)
+{
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt,
+ sctp_chksum_pkt;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ odp_ipsec_outbound_config_t outbound = ipsec_config->outbound;
+
+ ipv4_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_ipv4,
+ state->is_ipv4,
+ pkt_hdr->p.flags.l3_chksum_set,
+ pkt_hdr->p.flags.l3_chksum);
+ udp_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_udp,
+ state->ip_next_hdr ==
+ _ODP_IPPROTO_UDP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+ tcp_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_tcp,
+ state->ip_next_hdr ==
+ _ODP_IPPROTO_TCP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+
+ sctp_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_sctp,
+ state->ip_next_hdr ==
+ _ODP_IPPROTO_SCTP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+
+ if (ipv4_chksum_pkt)
+ _odp_packet_ipv4_chksum_insert(pkt);
+
+ if (tcp_chksum_pkt)
+ _odp_packet_tcp_chksum_insert(pkt);
+
+ if (udp_chksum_pkt)
+ _odp_packet_udp_chksum_insert(pkt);
+
+ if (sctp_chksum_pkt)
+ _odp_packet_sctp_chksum_insert(pkt);
+}
+
+static int ipsec_out_tp_encap(odp_packet_t pkt, ipsec_state_t *state)
+{
+ int (*op)(ipsec_state_t *state, odp_packet_t pkt);
+
+ if (odp_unlikely(!(state->is_ipv4 || state->is_ipv6)))
+ return -1;
+
+ op = state->is_ipv4 ? ipsec_parse_ipv4 : ipsec_parse_ipv6;
+
+ if (odp_unlikely(op(state, pkt) ||
+ state->ip_tot_len + state->ip_offset != odp_packet_len(pkt)))
+ return -1;
+
+ ipsec_out_checksums(pkt, state);
+
+ return 0;
+}
+
+static int ipsec_out_tunnel_encap(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_out_opt_t *opt)
+{
+ int ret;
+
+ if (odp_unlikely(!(state->is_ipv4 || state->is_ipv6 || opt->flag.tfc_dummy)))
+ return -1;
+
+ if (state->is_ipv4) {
+ if (odp_unlikely(ipsec_out_tunnel_parse_ipv4(state, ipsec_sa)))
+ return -1;
+ } else if (state->is_ipv6) {
+ if (odp_unlikely(ipsec_out_tunnel_parse_ipv6(state, ipsec_sa)))
+ return -1;
+ } else {
+ state->out_tunnel.ip_tos = 0;
+ state->out_tunnel.ip_df = 0;
+ state->out_tunnel.ip_flabel = 0;
+ state->ip_next_hdr = _ODP_IPPROTO_NO_NEXT;
+ }
+
+ ipsec_out_checksums(*pkt, state);
+
+ if (ipsec_sa->tun_ipv4)
+ ret = ipsec_out_tunnel_ipv4(pkt, state, ipsec_sa,
+ opt->flag.ip_param ? &opt->ipv4 :
+ &ipsec_sa->out.tun_ipv4.param);
+ else
+ ret = ipsec_out_tunnel_ipv6(pkt, state, ipsec_sa,
+ opt->flag.ip_param ? &opt->ipv6 :
+ &ipsec_sa->out.tun_ipv6.param);
+
+ return ret;
+}
+
+static int ipsec_out_parse_encap_packet(odp_packet_t *pkt, ipsec_state_t *state,
+ ipsec_sa_t *ipsec_sa, const odp_ipsec_out_opt_t *opt,
+ odp_ipsec_op_status_t *status)
+{
+ odp_packet_hdr_t *pkt_hdr;
+ int ret;
+
+ if (opt->flag.tfc_dummy) {
+ pkt_hdr = packet_hdr(*pkt);
+ _ODP_ASSERT(ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode);
+ pkt_hdr->p.l2_offset = ODP_PACKET_OFFSET_INVALID;
+ pkt_hdr->p.l3_offset = 0;
+ state->ip_offset = 0;
+ state->ip = NULL;
+ state->is_ipv4 = 0;
+ state->is_ipv6 = 0;
+ } else {
+ state->ip_offset = odp_packet_l3_offset(*pkt);
+ _ODP_ASSERT(ODP_PACKET_OFFSET_INVALID != state->ip_offset);
+ state->ip = odp_packet_l3_ptr(*pkt, NULL);
+ _ODP_ASSERT(NULL != state->ip);
+ state->is_ipv4 = (((uint8_t *)state->ip)[0] >> 4) == 0x4;
+ state->is_ipv6 = (((uint8_t *)state->ip)[0] >> 4) == 0x6;
+ }
+
+ if (ODP_IPSEC_MODE_TRANSPORT == ipsec_sa->mode)
+ ret = ipsec_out_tp_encap(*pkt, state);
+ else
+ ret = ipsec_out_tunnel_encap(pkt, state, ipsec_sa, opt);
+
+ if (odp_unlikely(ret))
+ status->error.alg = 1;
+
+ return ret;
+}
+
+static int ipsec_out_prepare_op(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_out_opt_t *opt, odp_bool_t is_enqueue_op,
+ odp_crypto_packet_op_param_t *param, odp_ipsec_op_status_t *status)
+{
+ odp_ipsec_frag_mode_t frag_mode;
+ uint32_t mtu;
+ int ret;
+
+ memset(param, 0, sizeof(*param));
+
+ frag_mode = opt->flag.frag_mode ? opt->frag_mode : ipsec_sa->out.frag_mode;
+ mtu = frag_mode == ODP_IPSEC_FRAG_CHECK ? odp_atomic_load_u32(&ipsec_sa->out.mtu) :
+ UINT32_MAX;
+
+ if (odp_unlikely(!(ODP_IPSEC_ESP == ipsec_sa->proto || ODP_IPSEC_AH == ipsec_sa->proto))) {
+ status->error.alg = 1;
+
+ return -1;
+ }
+
+ if (ODP_IPSEC_ESP == ipsec_sa->proto)
+ ret = ipsec_out_esp(pkt, state, ipsec_sa, param, status, mtu, is_enqueue_op, opt);
+ else
+ ret = ipsec_out_ah(pkt, state, ipsec_sa, param, status, mtu, is_enqueue_op);
+
+ return ret;
+}
+
+static int ipsec_out_prepare_packet(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_out_opt_t *opt, odp_bool_t is_enqueue_op,
+ odp_crypto_packet_op_param_t *param,
+ odp_ipsec_op_status_t *status)
+{
+ return ipsec_out_parse_encap_packet(pkt, state, ipsec_sa, opt, status) ||
+ ipsec_out_prepare_op(pkt, state, ipsec_sa, opt, is_enqueue_op, param, status);
+}
+
+static int ipsec_out_finalize_packet(odp_packet_t *pkt, ipsec_state_t *state, ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status)
+{
+ int (*op)(ipsec_state_t *state, odp_packet_t *pkt, ipsec_sa_t *ipsec_sa);
+
+ op = ODP_IPSEC_ESP == ipsec_sa->proto ? ipsec_out_esp_post :
+ ODP_IPSEC_AH == ipsec_sa->proto ? ipsec_out_ah_post : NULL;
+
+ if (odp_unlikely(op && op(state, pkt, ipsec_sa))) {
+ status->error.alg = 1;
+
+ return -1;
+ }
+
+ return 0;
+}
+
+static void ipsec_in_prepare(const odp_packet_t pkt_in[], odp_packet_t pkt_out[], int num_in,
+ const odp_ipsec_in_param_t *param, ipsec_op_t ops[],
+ odp_packet_t crypto_pkts[],
+ odp_crypto_packet_op_param_t crypto_param[], ipsec_op_t *crypto_ops[],
+ int *num_crypto)
+{
+ unsigned int sa_idx = 0, sa_inc = (param->num_sa > 1) ? 1 : 0;
+
+ *num_crypto = 0;
+
+ for (int i = 0; i < num_in; i++) {
+ pkt_out[i] = pkt_in[i];
+ ipsec_op_t *op = &ops[i];
+ odp_packet_t *pkt = &pkt_out[i];
+ odp_crypto_packet_op_param_t c_p;
+
+ memset(op, 0, sizeof(*op));
+
+ if (0 == param->num_sa) {
+ op->sa_hdl = ODP_IPSEC_SA_INVALID;
+ } else {
+ op->sa_hdl = param->sa[sa_idx];
+ _ODP_ASSERT(ODP_IPSEC_SA_INVALID != op->sa_hdl);
+ }
+
+ sa_idx += sa_inc;
+
+ if (odp_likely(ipsec_in_prepare_packet(pkt, &op->state, &op->sa, op->sa_hdl, &c_p,
+ &op->status, &op->orig_ip_len) == 0)) {
+ crypto_pkts[*num_crypto] = *pkt;
+ crypto_param[*num_crypto] = c_p;
+ crypto_ops[*num_crypto] = op;
+ (*num_crypto)++;
+ }
+ }
+}
+
+static void ipsec_do_crypto_burst(odp_packet_t pkts[], odp_crypto_packet_op_param_t param[],
+ ipsec_op_t *ops[], int num)
+{
+ int num_procd = 0;
+
+ while (num_procd < num) {
+ int ret = odp_crypto_op(&pkts[num_procd], &pkts[num_procd], &param[num_procd],
+ num - num_procd);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ num_procd += ret;
+ }
+
+ for (int i = num_procd; i < num; i++)
+ ops[i]->status.error.alg = 1;
+}
+
+static int ipsec_in_check_crypto_result(odp_packet_t pkt, odp_ipsec_op_status_t *status)
+{
+ odp_crypto_packet_result_t result;
+ int rc = odp_crypto_result(&result, pkt);
+
+ if (odp_likely(rc == 0))
+ return 0;
+
+ if (odp_unlikely(rc < -1)) {
+ _ODP_DBG("Crypto failed\n");
+ status->error.alg = 1;
+ return -1;
+ }
+
+ if (result.cipher_status.alg_err == ODP_CRYPTO_ALG_ERR_ICV_CHECK ||
+ result.auth_status.alg_err == ODP_CRYPTO_ALG_ERR_ICV_CHECK)
+ status->error.auth = 1;
+ else
+ status->error.alg = 1;
+
+ return -1;
+}
+
+static inline void update_post_lifetime_stats(ipsec_sa_t *sa, ipsec_state_t *state)
+{
+ if (ipsec_config->stats_en) {
+ odp_atomic_inc_u64(&sa->stats.post_lifetime_err_pkts);
+ odp_atomic_add_u64(&sa->stats.post_lifetime_err_bytes, state->stats_length);
+ }
+}
+
+static inline void finish_packet_proc(odp_packet_t pkt, ipsec_op_t *op, odp_queue_t queue)
+{
+ odp_ipsec_packet_result_t *res;
+
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(op->sa, &op->status);
+
+ packet_subtype_set(pkt, ODP_EVENT_PACKET_IPSEC);
+ res = ipsec_pkt_result(pkt);
+ memset(res, 0, sizeof(*res));
+ res->status = op->status;
+ res->sa = NULL != op->sa ? op->sa->ipsec_sa_hdl : ODP_IPSEC_SA_INVALID;
+ /* We need to decrease SA use count only if the SA was not provided to us by the caller but
+ * was found through our own SA lookup that increased the use count. */
+ if (op->sa_hdl == ODP_IPSEC_SA_INVALID && op->sa)
+ _odp_ipsec_sa_unuse(op->sa);
+
+ if (queue != ODP_QUEUE_INVALID) {
+ res->orig_ip_len = op->orig_ip_len;
+ /* What should be done if enqueue fails? */
+ if (odp_unlikely(odp_queue_enq(queue, odp_ipsec_packet_to_event(pkt)) < 0))
+ odp_packet_free(pkt);
+ }
+}
+
+static void ipsec_in_finalize(odp_packet_t pkt_in[], ipsec_op_t ops[], int num, odp_bool_t is_enq)
+{
+ for (int i = 0; i < num; i++) {
+ ipsec_op_t *op = &ops[i];
+ odp_packet_t *pkt = &pkt_in[i];
+ odp_queue_t q = ODP_QUEUE_INVALID;
+
+ if (odp_unlikely(op->status.error.all))
+ goto finish;
+
+ if (odp_unlikely(ipsec_in_check_crypto_result(*pkt, &op->status)))
+ goto finish;
+
+ if (op->sa->antireplay) {
+ if (is_enq)
+ wait_for_order(ipsec_global->inbound_ordering_mode);
+
+ if (odp_unlikely(_odp_ipsec_sa_replay_update(op->sa, op->state.in.seq_no,
+ &op->status) < 0))
+ goto finish;
+ }
+
+ if (odp_unlikely(ipsec_in_finalize_packet(pkt, &op->state, op->sa,
+ &op->status))) {
+ update_post_lifetime_stats(op->sa, &op->state);
+ goto finish;
+ }
+
+ ipsec_in_parse_decap_packet(*pkt, &op->state, op->sa);
+
+finish:
+ if (is_enq)
+ q = NULL != op->sa ? op->sa->queue : ipsec_config->inbound.default_queue;
+
+ finish_packet_proc(*pkt, op, q);
+ }
+}
+
+int odp_ipsec_in(const odp_packet_t pkt_in[], int num_in, odp_packet_t pkt_out[], int *num_out,
+ const odp_ipsec_in_param_t *param)
+{
+ int max_out = _ODP_MIN3(num_in, *num_out, MAX_BURST), num_crypto;
+ odp_packet_t crypto_pkts[MAX_BURST];
+ odp_crypto_packet_op_param_t crypto_param[MAX_BURST];
+ ipsec_op_t ops[MAX_BURST], *crypto_ops[MAX_BURST];
+
+ ipsec_in_prepare(pkt_in, pkt_out, max_out, param, ops, crypto_pkts, crypto_param,
+ crypto_ops, &num_crypto);
+ ipsec_do_crypto_burst(crypto_pkts, crypto_param, crypto_ops, num_crypto);
+ ipsec_in_finalize(pkt_out, ops, max_out, false);
+ *num_out = max_out;
+
+ return max_out;
+}
+
+static odp_ipsec_out_opt_t default_out_opt;
+
+static void ipsec_out_prepare(const odp_packet_t pkt_in[], odp_packet_t pkt_out[], int num_in,
+ const odp_ipsec_out_param_t *param, ipsec_op_t ops[],
+ odp_packet_t crypto_pkts[],
+ odp_crypto_packet_op_param_t crypto_param[],
+ ipsec_op_t *crypto_ops[], int *num_crypto, odp_bool_t is_enq)
+{
+ unsigned int sa_idx = 0, opt_idx = 0, sa_inc = (param->num_sa > 1) ? 1 : 0,
+ opt_inc = (param->num_opt > 1) ? 1 : 0;
+ /* No need to do _odp_ipsec_sa_use() here since an ODP application is not allowed to do
+ * call IPsec output before SA creation has completed nor call odp_ipsec_sa_disable()
+ * before IPsec output has completed. IOW, the needed synchronization between threads is
+ * done by the application. */
+ *num_crypto = 0;
+
+ for (int i = 0; i < num_in; i++) {
+ pkt_out[i] = pkt_in[i];
+ ipsec_op_t *op = &ops[i];
+ const odp_ipsec_out_opt_t *opt;
+ odp_packet_t *pkt = &pkt_out[i];
+ odp_crypto_packet_op_param_t c_p;
+
+ memset(op, 0, sizeof(*op));
+ op->sa_hdl = param->sa[sa_idx];
+ _ODP_ASSERT(ODP_IPSEC_SA_INVALID != op->sa_hdl);
+ op->sa = _odp_ipsec_sa_entry_from_hdl(op->sa_hdl);
+ _ODP_ASSERT(NULL != op->sa);
+
+ if (0 == param->num_opt)
+ opt = &default_out_opt;
+ else
+ opt = &param->opt[opt_idx];
+
+ sa_idx += sa_inc;
+ opt_idx += opt_inc;
+
+ if (odp_unlikely(ipsec_out_prepare_packet(pkt, &op->state, op->sa, opt, is_enq,
+ &c_p, &op->status)))
+ continue;
+
+ if (odp_unlikely(_odp_ipsec_sa_lifetime_update(op->sa, op->state.stats_length,
+ &op->status))) {
+ update_post_lifetime_stats(op->sa, &op->state);
+ continue;
+ }
+
+ crypto_pkts[*num_crypto] = *pkt;
+ crypto_param[*num_crypto] = c_p;
+ crypto_ops[*num_crypto] = op;
+ (*num_crypto)++;
+ }
+}
+
+static int ipsec_out_check_crypto_result(odp_packet_t pkt, odp_ipsec_op_status_t *status)
+{
+ if (odp_unlikely(odp_crypto_result(NULL, pkt) != 0)) {
+ _ODP_DBG("Crypto failed\n");
+ status->error.alg = 1;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void ipsec_out_finalize(odp_packet_t pkt_in[], ipsec_op_t ops[], int num, odp_bool_t is_enq)
+{
+ for (int i = 0; i < num; i++) {
+ ipsec_op_t *op = &ops[i];
+ odp_packet_t *pkt = &pkt_in[i];
+ odp_queue_t q = ODP_QUEUE_INVALID;
+
+ if (odp_unlikely(op->status.error.all))
+ goto finish;
+
+ if (odp_unlikely(ipsec_out_check_crypto_result(*pkt, &op->status))) {
+ update_post_lifetime_stats(op->sa, &op->state);
+ goto finish;
+ }
+
+ if (odp_unlikely(ipsec_out_finalize_packet(pkt, &op->state, op->sa, &op->status)))
+ update_post_lifetime_stats(op->sa, &op->state);
+
+finish:
+ if (is_enq)
+ q = NULL != op->sa ? op->sa->queue : ipsec_config->inbound.default_queue;
+
+ finish_packet_proc(*pkt, op, q);
+ }
+}
+
+int odp_ipsec_out(const odp_packet_t pkt_in[], int num_in, odp_packet_t pkt_out[], int *num_out,
+ const odp_ipsec_out_param_t *param)
+{
+ int max_out = _ODP_MIN3(num_in, *num_out, MAX_BURST), num_crypto;
+ odp_packet_t crypto_pkts[MAX_BURST];
+ odp_crypto_packet_op_param_t crypto_param[MAX_BURST];
+ ipsec_op_t ops[MAX_BURST], *crypto_ops[MAX_BURST];
+
+ ipsec_out_prepare(pkt_in, pkt_out, max_out, param, ops, crypto_pkts, crypto_param,
+ crypto_ops, &num_crypto, false);
+ ipsec_do_crypto_burst(crypto_pkts, crypto_param, crypto_ops, num_crypto);
+ ipsec_out_finalize(pkt_out, ops, max_out, false);
+ *num_out = max_out;
+
+ return max_out;
+}
+
+/* Do not change to an asynchronous design without thinking concurrency and what changes are
+ * required to guarantee that used SAs are not destroyed when asynchronous operations are in
+ * progress.
+ *
+ * The containing code does not hold a reference to the SA but completes processing synchronously
+ * and makes use of the fact that the application may not disable (and then destroy) the SA before
+ * these routines return (and all side effects are visible to the disabling thread). */
+int odp_ipsec_in_enq(const odp_packet_t pkt_in[], int num_in, const odp_ipsec_in_param_t *param)
+{
+ int max_out = _ODP_MIN(num_in, MAX_BURST), num_crypto;
+ odp_packet_t pkt_out[MAX_BURST], crypto_pkts[MAX_BURST];
+ odp_crypto_packet_op_param_t crypto_param[MAX_BURST];
+ ipsec_op_t ops[MAX_BURST], *crypto_ops[MAX_BURST];
+
+ ipsec_in_prepare(pkt_in, pkt_out, max_out, param, ops, crypto_pkts, crypto_param,
+ crypto_ops, &num_crypto);
+ ipsec_do_crypto_burst(crypto_pkts, crypto_param, crypto_ops, num_crypto);
+ ipsec_in_finalize(pkt_out, ops, max_out, true);
+
+ return max_out;
+}
+
+int odp_ipsec_out_enq(const odp_packet_t pkt_in[], int num_in, const odp_ipsec_out_param_t *param)
+{
+ int max_out = _ODP_MIN(num_in, MAX_BURST), num_crypto;
+ odp_packet_t pkt_out[MAX_BURST], crypto_pkts[MAX_BURST];
+ odp_crypto_packet_op_param_t crypto_param[MAX_BURST];
+ ipsec_op_t ops[MAX_BURST], *crypto_ops[MAX_BURST];
+
+ ipsec_out_prepare(pkt_in, pkt_out, max_out, param, ops, crypto_pkts, crypto_param,
+ crypto_ops, &num_crypto, true);
+ ipsec_do_crypto_burst(crypto_pkts, crypto_param, crypto_ops, num_crypto);
+ ipsec_out_finalize(pkt_out, ops, max_out, true);
+
+ return max_out;
+}
+
+int _odp_ipsec_try_inline(odp_packet_t *pkt)
+{
+ odp_ipsec_op_status_t status;
+ ipsec_sa_t *ipsec_sa;
+ uint32_t orig_ip_len = 0;
+ odp_ipsec_packet_result_t *result;
+ odp_packet_hdr_t *pkt_hdr;
+
+ if (odp_global_ro.disable.ipsec)
+ return -1;
+
+ memset(&status, 0, sizeof(status));
+
+ ipsec_sa = ipsec_in_single(*pkt, ODP_IPSEC_SA_INVALID, pkt, false,
+ &status, &orig_ip_len);
+ /*
+ * Route packet back in case of lookup failure or early error before
+ * lookup
+ */
+ if (NULL == ipsec_sa)
+ return -1;
+
+ packet_subtype_set(*pkt, ODP_EVENT_PACKET_IPSEC);
+ result = ipsec_pkt_result(*pkt);
+ memset(result, 0, sizeof(*result));
+ result->status = status;
+ result->orig_ip_len = orig_ip_len;
+ result->sa = ipsec_sa->ipsec_sa_hdl;
+ result->flag.inline_mode = 1;
+
+ pkt_hdr = packet_hdr(*pkt);
+ pkt_hdr->p.input_flags.dst_queue = 1;
+ pkt_hdr->dst_queue = ipsec_sa->queue;
+ /* Distinguish inline IPsec packets from classifier packets */
+ pkt_hdr->cos = CLS_COS_IDX_NONE;
+
+ /* Last thing */
+ _odp_ipsec_sa_unuse(ipsec_sa);
+
+ return 0;
+}
+
+static inline int ipsec_out_inline_check_out_hdrs(odp_packet_t pkt,
+ const odp_ipsec_out_inline_param_t *param,
+ ipsec_inline_op_t *op)
+{
+ uint32_t l2_offset, hdr_len = param->outer_hdr.len;
+
+ if (!param->outer_hdr.ptr) {
+ l2_offset = odp_packet_l2_offset(pkt);
+ _ODP_ASSERT(hdr_len == odp_packet_l3_offset(pkt) - l2_offset);
+
+ if (odp_unlikely(hdr_len > MAX_HDR_LEN ||
+ odp_packet_copy_to_mem(pkt, l2_offset, hdr_len, op->hdr_buf)
+ < 0)) {
+ op->op.status.error.proto = 1;
+
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static void ipsec_out_inline_prepare(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_in, const odp_ipsec_out_param_t *param,
+ const odp_ipsec_out_inline_param_t *inline_param,
+ ipsec_inline_op_t ops[], odp_packet_t crypto_pkts[],
+ odp_crypto_packet_op_param_t crypto_param[],
+ ipsec_op_t *crypto_ops[], int *num_crypto)
+{
+ unsigned int sa_idx = 0, opt_idx = 0, sa_inc = (param->num_sa > 1) ? 1 : 0,
+ opt_inc = (param->num_opt > 1) ? 1 : 0;
+
+ *num_crypto = 0;
+
+ for (int i = 0; i < num_in; i++) {
+ pkt_out[i] = pkt_in[i];
+ ipsec_inline_op_t *op = &ops[i];
+ const odp_ipsec_out_opt_t *opt;
+ odp_packet_t *pkt = &pkt_out[i];
+ odp_crypto_packet_op_param_t c_p;
+
+ memset(op, 0, sizeof(*op));
+ op->op.sa_hdl = param->sa[sa_idx];
+ _ODP_ASSERT(ODP_IPSEC_SA_INVALID != op->op.sa_hdl);
+ op->op.sa = _odp_ipsec_sa_entry_from_hdl(op->op.sa_hdl);
+ _ODP_ASSERT(NULL != op->op.sa);
+
+ if (0 == param->num_opt)
+ opt = &default_out_opt;
+ else
+ opt = &param->opt[opt_idx];
+
+ sa_idx += sa_inc;
+ opt_idx += opt_inc;
+
+ if (odp_unlikely(ipsec_out_inline_check_out_hdrs(*pkt, &inline_param[i], op) ||
+ ipsec_out_prepare_packet(pkt, &op->op.state, op->op.sa, opt,
+ true, &c_p, &op->op.status)))
+ continue;
+
+ if (odp_unlikely(_odp_ipsec_sa_lifetime_update(op->op.sa,
+ op->op.state.stats_length,
+ &op->op.status))) {
+ update_post_lifetime_stats(op->op.sa, &op->op.state);
+ continue;
+ }
+
+ crypto_pkts[*num_crypto] = *pkt;
+ crypto_param[*num_crypto] = c_p;
+ crypto_ops[*num_crypto] = &op->op;
+ (*num_crypto)++;
+ }
+}
+
+static void ipsec_out_inline_finish_packet_proc(odp_packet_t *pkt,
+ const odp_ipsec_out_inline_param_t *param,
+ ipsec_inline_op_t *op)
+{
+ uint32_t offset = odp_packet_l3_offset(*pkt), hdr_len = param->outer_hdr.len;
+ odp_pktout_queue_t pkqueue;
+
+ _ODP_ASSERT(NULL != op->op.sa);
+
+ if (odp_unlikely(offset == ODP_PACKET_OFFSET_INVALID))
+ offset = 0;
+
+ if (offset >= hdr_len) {
+ if (odp_packet_trunc_head(pkt, offset - hdr_len, NULL, NULL) < 0)
+ op->op.status.error.alg = 1;
+ } else {
+ if (odp_packet_extend_head(pkt, hdr_len - offset, NULL, NULL) < 0)
+ op->op.status.error.alg = 1;
+ }
+
+ odp_packet_l3_offset_set(*pkt, hdr_len);
+
+ if (odp_packet_copy_from_mem(*pkt, 0, hdr_len,
+ param->outer_hdr.ptr ? param->outer_hdr.ptr : op->hdr_buf)
+ < 0)
+ op->op.status.error.alg = 1;
+
+ if (!op->op.status.error.all) {
+ if (odp_pktout_queue(param->pktio, &pkqueue, 1) <= 0)
+ op->op.status.error.alg = 1;
+
+ if (odp_pktout_send(pkqueue, pkt, 1) < 0)
+ op->op.status.error.alg = 1;
+ }
+}
+
+static void ipsec_out_inline_handle_err(odp_packet_t pkt, ipsec_inline_op_t *op)
+{
+ odp_ipsec_packet_result_t *res;
+
+ if (odp_likely(!op->op.status.error.all))
+ return;
+
+ if (ipsec_config->stats_en)
+ ipsec_sa_err_stats_update(op->op.sa, &op->op.status);
+
+ packet_subtype_set(pkt, ODP_EVENT_PACKET_IPSEC);
+ res = ipsec_pkt_result(pkt);
+ memset(res, 0, sizeof(*res));
+ res->sa = op->op.sa_hdl;
+ res->status = op->op.status;
+
+ if (odp_unlikely(odp_queue_enq(op->op.sa->queue, odp_ipsec_packet_to_event(pkt)) < 0))
+ odp_packet_free(pkt);
+}
+
+static void ipsec_out_inline_finalize(odp_packet_t pkt_in[],
+ const odp_ipsec_out_inline_param_t *inline_param,
+ ipsec_inline_op_t ops[], int num)
+{
+ for (int i = 0; i < num; i++) {
+ ipsec_inline_op_t *op = &ops[i];
+ odp_packet_t *pkt = &pkt_in[i];
+
+ if (op->op.status.warn.soft_exp_packets || op->op.status.warn.soft_exp_bytes) {
+ if (!odp_atomic_load_u32(&op->op.sa->soft_expiry_notified)) {
+ int rc;
+
+ /*
+ * Another thread may have sent the notification by now but we do
+ * not care since sending duplicate expiry notifications is allowed.
+ */
+ rc = _odp_ipsec_status_send(op->op.sa->queue,
+ ODP_IPSEC_STATUS_WARN,
+ op->op.sa->ipsec_sa_hdl,
+ 0, op->op.status.warn);
+ if (rc == 0)
+ odp_atomic_store_u32(&op->op.sa->soft_expiry_notified, 1);
+ else
+ _ODP_DBG("IPsec status event submission failed\n");
+ }
+ }
+
+ if (odp_unlikely(op->op.status.error.all))
+ goto handle_err;
+
+ if (odp_unlikely(ipsec_out_check_crypto_result(*pkt, &op->op.status))) {
+ update_post_lifetime_stats(op->op.sa, &op->op.state);
+ goto finish;
+ }
+
+ if (odp_unlikely(ipsec_out_finalize_packet(pkt, &op->op.state, op->op.sa,
+ &op->op.status)))
+ update_post_lifetime_stats(op->op.sa, &op->op.state);
+
+finish:
+ ipsec_out_inline_finish_packet_proc(pkt, &inline_param[i], op);
+
+handle_err:
+ ipsec_out_inline_handle_err(*pkt, op);
+ }
+}
+
+int odp_ipsec_out_inline(const odp_packet_t pkt_in[], int num_in,
+ const odp_ipsec_out_param_t *param,
+ const odp_ipsec_out_inline_param_t *inline_param)
+{
+ int max_out = _ODP_MIN(num_in, MAX_BURST), num_crypto;
+ odp_packet_t pkt_out[MAX_BURST], crypto_pkts[MAX_BURST];
+ odp_crypto_packet_op_param_t crypto_param[MAX_BURST];
+ ipsec_inline_op_t ops[MAX_BURST];
+ ipsec_op_t *crypto_ops[MAX_BURST];
+
+ ipsec_out_inline_prepare(pkt_in, pkt_out, max_out, param, inline_param, ops, crypto_pkts,
+ crypto_param, crypto_ops, &num_crypto);
+ ipsec_do_crypto_burst(crypto_pkts, crypto_param, crypto_ops, num_crypto);
+ ipsec_out_inline_finalize(pkt_out, inline_param, ops, max_out);
+
+ return max_out;
+}
+
+int odp_ipsec_test_sa_update(odp_ipsec_sa_t sa,
+ odp_ipsec_test_sa_operation_t sa_op,
+ const odp_ipsec_test_sa_param_t *sa_param)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+ _ODP_ASSERT(NULL != ipsec_sa);
+
+ switch (sa_op) {
+ case ODP_IPSEC_TEST_SA_UPDATE_SEQ_NUM:
+ odp_atomic_store_u64(&ipsec_sa->hot.out.seq, sa_param->seq_num);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_ipsec_stats(odp_ipsec_sa_t sa, odp_ipsec_stats_t *stats)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ if (ODP_IPSEC_SA_INVALID == sa)
+ return -EINVAL;
+
+ if (!ipsec_config->stats_en)
+ return -ENOTSUP;
+
+ _ODP_ASSERT(NULL != stats);
+
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+ _ODP_ASSERT(NULL != ipsec_sa);
+
+ _odp_ipsec_sa_stats_pkts(ipsec_sa, stats);
+ stats->proto_err = odp_atomic_load_u64(&ipsec_sa->stats.proto_err);
+ stats->auth_err = odp_atomic_load_u64(&ipsec_sa->stats.auth_err);
+ stats->antireplay_err = odp_atomic_load_u64(&ipsec_sa->stats.antireplay_err);
+ stats->alg_err = odp_atomic_load_u64(&ipsec_sa->stats.alg_err);
+ stats->mtu_err = odp_atomic_load_u64(&ipsec_sa->stats.mtu_err);
+ stats->hard_exp_bytes_err = odp_atomic_load_u64(&ipsec_sa->stats.hard_exp_bytes_err);
+ stats->hard_exp_pkts_err = odp_atomic_load_u64(&ipsec_sa->stats.hard_exp_pkts_err);
+
+ return 0;
+}
+
+int odp_ipsec_stats_multi(odp_ipsec_sa_t sa[], odp_ipsec_stats_t stats[], int num)
+{
+ int ret, i;
+
+ _ODP_ASSERT(NULL != stats);
+
+ for (i = 0; i < num; i++) {
+ ret = odp_ipsec_stats(sa[i], &stats[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int read_config_file(ipsec_global_t *global)
+{
+ const char *str_i = "ipsec.ordering.async_inbound";
+ const char *str_o = "ipsec.ordering.async_outbound";
+ int val;
+
+ if (!_odp_libconfig_lookup_int(str_i, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str_i);
+ return -1;
+ }
+ global->inbound_ordering_mode = val;
+
+ if (!_odp_libconfig_lookup_int(str_o, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str_o);
+ return -1;
+ }
+ global->outbound_ordering_mode = val;
+
+ return 0;
+}
+
+int _odp_ipsec_init_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.ipsec)
+ return 0;
+
+ shm = odp_shm_reserve("_odp_ipsec_global", sizeof(*ipsec_global),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Shm reserve failed for odp_ipsec\n");
+ return -1;
+ }
+ ipsec_global = odp_shm_addr(shm);
+ if (ipsec_global == NULL) {
+ _ODP_ERR("ipsec: odp_shm_addr() failed\n");
+ odp_shm_free(shm);
+ return -1;
+ }
+ memset(ipsec_global, 0, sizeof(*ipsec_global));
+ ipsec_config = &ipsec_global->ipsec_config;
+
+ if (read_config_file(ipsec_global)) {
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ memset(&default_out_opt, 0, sizeof(default_out_opt));
+
+ return 0;
+}
+
+int _odp_ipsec_term_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.ipsec)
+ return 0;
+
+ shm = odp_shm_lookup("_odp_ipsec_global");
+
+ if (shm == ODP_SHM_INVALID || odp_shm_free(shm)) {
+ _ODP_ERR("Shm free failed for odp_ipsec");
+ return -1;
+ }
+
+ return 0;
+}
+
+void odp_ipsec_print(void)
+{
+ _ODP_PRINT("\nIPSEC print\n");
+ _ODP_PRINT("-----------\n");
+ _ODP_PRINT(" max number of SA %u\n\n", ipsec_config->max_num_sa);
+}
+
+void odp_ipsec_sa_print(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+
+ _ODP_PRINT("\nIPSEC SA print\n");
+ _ODP_PRINT("--------------\n");
+ _ODP_PRINT(" SPI %u\n\n", ipsec_sa->spi);
+}
diff --git a/platform/linux-generic/odp_ipsec_api.c b/platform/linux-generic/odp_ipsec_api.c
new file mode 100644
index 000000000..1d1abe84a
--- /dev/null
+++ b/platform/linux-generic/odp_ipsec_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/ipsec.h>
+
+/* Non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/ipsec_inlines.h>
diff --git a/platform/linux-generic/odp_ipsec_events.c b/platform/linux-generic/odp_ipsec_events.c
new file mode 100644
index 000000000..769cab78b
--- /dev/null
+++ b/platform/linux-generic/odp_ipsec_events.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/ipsec.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_init_internal.h>
+#include <odp_buffer_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_global_data.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct {
+ /* common buffer header */
+ odp_buffer_hdr_t buf_hdr;
+
+ odp_ipsec_status_t status;
+} ipsec_status_hdr_t;
+#pragma GCC diagnostic pop
+
+static odp_pool_t ipsec_status_pool = ODP_POOL_INVALID;
+
+#define IPSEC_EVENTS_POOL_BUF_COUNT 1024
+
+int _odp_ipsec_events_init_global(void)
+{
+ odp_pool_param_t param;
+
+ if (odp_global_ro.disable.ipsec) {
+ _ODP_PRINT("\nODP IPSec is DISABLED\n");
+ return 0;
+ }
+
+ odp_pool_param_init(&param);
+
+ param.buf.size = sizeof(ipsec_status_hdr_t);
+ param.buf.align = 0;
+ param.buf.num = IPSEC_EVENTS_POOL_BUF_COUNT;
+ param.type = ODP_POOL_BUFFER;
+
+ ipsec_status_pool = odp_pool_create("_odp_ipsec_status_pool", &param);
+ if (ODP_POOL_INVALID == ipsec_status_pool) {
+ _ODP_ERR("Error: status pool create failed.\n");
+ goto err_status;
+ }
+
+ return 0;
+
+err_status:
+ return -1;
+}
+
+int _odp_ipsec_events_term_global(void)
+{
+ int ret;
+
+ if (odp_global_ro.disable.ipsec)
+ return 0;
+
+ ret = odp_pool_destroy(ipsec_status_pool);
+ if (ret < 0) {
+ _ODP_ERR("status pool destroy failed");
+ return -1;
+ }
+
+ return 0;
+}
+
+ipsec_status_t _odp_ipsec_status_from_event(odp_event_t ev)
+{
+ _ODP_ASSERT(ODP_EVENT_INVALID != ev);
+ _ODP_ASSERT(ODP_EVENT_IPSEC_STATUS == odp_event_type(ev));
+
+ return (ipsec_status_t)ev;
+}
+
+static odp_event_t ipsec_status_to_event(ipsec_status_t status)
+{
+ _ODP_ASSERT(ODP_IPSEC_STATUS_INVALID != status);
+
+ return (odp_event_t)status;
+}
+
+static ipsec_status_hdr_t *ipsec_status_hdr_from_buf(odp_buffer_t buf)
+{
+ return (ipsec_status_hdr_t *)(void *)_odp_buf_hdr(buf);
+}
+
+static inline odp_buffer_t buffer_from_event(odp_event_t ev)
+{
+ return (odp_buffer_t)ev;
+}
+
+static ipsec_status_hdr_t *ipsec_status_hdr(ipsec_status_t status)
+{
+ odp_buffer_t buf = buffer_from_event(ipsec_status_to_event(status));
+
+ return ipsec_status_hdr_from_buf(buf);
+}
+
+static ipsec_status_t odp_ipsec_status_alloc(void)
+{
+ odp_buffer_t buf = odp_buffer_alloc(ipsec_status_pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return ODP_IPSEC_STATUS_INVALID;
+
+ _odp_event_type_set(odp_buffer_to_event(buf), ODP_EVENT_IPSEC_STATUS);
+
+ return _odp_ipsec_status_from_event(odp_buffer_to_event(buf));
+}
+
+void _odp_ipsec_status_free(ipsec_status_t status)
+{
+ odp_event_t ev = ipsec_status_to_event(status);
+
+ _odp_event_type_set(ev, ODP_EVENT_BUFFER);
+ odp_buffer_free(buffer_from_event(ev));
+}
+
+int _odp_ipsec_status_send(odp_queue_t queue,
+ odp_ipsec_status_id_t id,
+ odp_ipsec_sa_t sa,
+ int result,
+ odp_ipsec_warn_t warn)
+{
+ ipsec_status_t ipsec_ev = odp_ipsec_status_alloc();
+ ipsec_status_hdr_t *status_hdr;
+
+ if (ODP_IPSEC_STATUS_INVALID == ipsec_ev)
+ return -1;
+
+ status_hdr = ipsec_status_hdr(ipsec_ev);
+
+ status_hdr->status.id = id;
+ status_hdr->status.sa = sa;
+ status_hdr->status.result = result;
+ status_hdr->status.warn = warn;
+
+ if (odp_queue_enq(queue, ipsec_status_to_event(ipsec_ev))) {
+ _odp_ipsec_status_free(ipsec_ev);
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_ipsec_status(odp_ipsec_status_t *status, odp_event_t event)
+{
+ ipsec_status_t ipsec_ev;
+ ipsec_status_hdr_t *status_hdr;
+
+ if (odp_unlikely(ODP_EVENT_INVALID == event))
+ return -1;
+
+ ipsec_ev = _odp_ipsec_status_from_event(event);
+ if (odp_unlikely(ODP_IPSEC_STATUS_INVALID == ipsec_ev))
+ return -1;
+
+ status_hdr = ipsec_status_hdr(ipsec_ev);
+
+ *status = status_hdr->status;
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
new file mode 100644
index 000000000..89cc8aef7
--- /dev/null
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -0,0 +1,1307 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2018-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/atomic.h>
+#include <odp/api/crypto.h>
+#include <odp/api/ipsec.h>
+#include <odp/api/random.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_ipsec_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
+#include <odp_global_data.h>
+
+#include <string.h>
+#include <inttypes.h>
+
+/*
+ * SA state consists of state value in the high order bits of ipsec_sa_t::state
+ * and use counter in the low order bits.
+ *
+ * An SA cannot be destroyed if its use count is higher than one. Use counter
+ * is needed for the case SA lookup is done by us and not the application.
+ * In the latter case we rely on the fact that the application may not pass
+ * the SA as a parameter to an IPsec operation concurrently with a call
+ * to odp_ipsec_sa_disable().
+ *
+ * SAs that are free or being disabled cannot be found in SA lookup by ODP.
+ */
+#define IPSEC_SA_STATE_ACTIVE 0x00000000 /* SA is in use */
+#define IPSEC_SA_STATE_DISABLE 0x40000000 /* SA is being disabled */
+#define IPSEC_SA_STATE_FREE 0xc0000000 /* SA is unused and free */
+#define IPSEC_SA_STATE_MASK 0xc0000000 /* mask of state bits */
+
+#define SA_IDX_NONE UINT32_MAX
+
+/*
+ * We do not have global IPv4 ID counter that is accessed for every outbound
+ * packet. Instead, we split IPv4 ID space to fixed size blocks that we
+ * allocate to threads on demand. When a thread has used its block of IDs,
+ * it frees it and allocates a new block. Free blocks are kept in a ring so
+ * that the block last freed is the one to be allocated last to maximize
+ * the time before IPv4 ID reuse.
+ */
+#define IPV4_ID_BLOCK_SIZE 64 /* must be power of 2 */
+#define IPV4_ID_RING_SIZE (UINT16_MAX / IPV4_ID_BLOCK_SIZE)
+#define IPV4_ID_RING_MASK (IPV4_ID_RING_SIZE - 1)
+
+#if IPV4_ID_RING_SIZE <= ODP_THREAD_COUNT_MAX
+#warning IPV4_ID_RING_SIZE is too small for the maximum number of threads.
+#endif
+
+/*
+ * To avoid checking and updating the packet and byte counters in the
+ * SA for every packet, we increment the global counters once for several
+ * packets. We decrement a preallocated thread-local quota for every
+ * packet. When the quota runs out, we get a new quota by incementing the
+ * global counter.
+ *
+ * This improves performance but the looser synchronization between
+ * threads makes life time warnings and errors somewhat inaccurate.
+ * The warnings and errors may get triggered a bit too early since
+ * some threads may still have unused quota when the first thread
+ * hits the limit.
+ */
+#define SA_LIFE_PACKETS_PREALLOC 64
+#define SA_LIFE_BYTES_PREALLOC 4000
+
+typedef struct sa_thread_local_s {
+ /*
+ * Packets that can be processed in this thread before looking at
+ * the SA-global packet counter and checking hard and soft limits.
+ */
+ odp_atomic_u32_t packet_quota;
+ /*
+ * Bytes that can be processed in this thread before looking at
+ * the SA-global byte counter and checking hard and soft limits.
+ */
+ odp_atomic_u32_t byte_quota;
+ /*
+ * Life time status when this thread last checked the global
+ * counter(s).
+ */
+ odp_ipsec_op_status_t lifetime_status;
+} sa_thread_local_t;
+
+typedef struct ODP_ALIGNED_CACHE ipsec_thread_local_s {
+ sa_thread_local_t sa[CONFIG_IPSEC_MAX_NUM_SA];
+ uint16_t first_ipv4_id; /* first ID of current block of IDs */
+ uint16_t next_ipv4_id; /* next ID to be used */
+} ipsec_thread_local_t;
+
+typedef struct ipsec_sa_table_t {
+ ipsec_sa_t ipsec_sa[CONFIG_IPSEC_MAX_NUM_SA];
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_u32_t ipv4_id_ring;
+ uint32_t ipv4_id_data[IPV4_ID_RING_SIZE] ODP_ALIGNED_CACHE;
+ } hot;
+ struct {
+ uint32_t head;
+ odp_spinlock_t lock;
+ } sa_freelist;
+ uint32_t max_num_sa;
+ odp_shm_t shm;
+ ipsec_thread_local_t per_thread[];
+} ipsec_sa_table_t;
+
+static ipsec_sa_table_t *ipsec_sa_tbl;
+
+static inline ipsec_sa_t *ipsec_sa_entry(uint32_t ipsec_sa_idx)
+{
+ return &ipsec_sa_tbl->ipsec_sa[ipsec_sa_idx];
+}
+
+static inline ipsec_sa_t *ipsec_sa_entry_from_hdl(odp_ipsec_sa_t ipsec_sa_hdl)
+{
+ return ipsec_sa_entry(_odp_typeval(ipsec_sa_hdl) - 1);
+}
+
+static inline odp_ipsec_sa_t ipsec_sa_index_to_handle(uint32_t ipsec_sa_idx)
+{
+ return _odp_cast_scalar(odp_ipsec_sa_t, ipsec_sa_idx + 1);
+}
+
+ipsec_sa_t *_odp_ipsec_sa_entry_from_hdl(odp_ipsec_sa_t sa)
+{
+ _ODP_ASSERT(ODP_IPSEC_SA_INVALID != sa);
+ return ipsec_sa_entry_from_hdl(sa);
+}
+
+static inline sa_thread_local_t *ipsec_sa_thread_local(ipsec_sa_t *sa)
+{
+ return &ipsec_sa_tbl->per_thread[odp_thread_id()].sa[sa->ipsec_sa_idx];
+}
+
+static void init_sa_thread_local(ipsec_sa_t *sa)
+{
+ sa_thread_local_t *sa_tl;
+ int n;
+ int thread_count_max = odp_thread_count_max();
+
+ for (n = 0; n < thread_count_max; n++) {
+ sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx];
+ odp_atomic_init_u32(&sa_tl->packet_quota, 0);
+ odp_atomic_init_u32(&sa_tl->byte_quota, 0);
+ sa_tl->lifetime_status.all = 0;
+ }
+}
+
+int _odp_ipsec_sad_init_global(void)
+{
+ odp_crypto_capability_t crypto_capa;
+ uint32_t max_num_sa = CONFIG_IPSEC_MAX_NUM_SA;
+ uint64_t shm_size;
+ unsigned int thread_count_max = odp_thread_count_max();
+ odp_shm_t shm;
+ unsigned i;
+
+ if (odp_global_ro.disable.ipsec)
+ return 0;
+
+ crypto_capa.max_sessions = 0;
+
+ if (odp_crypto_capability(&crypto_capa)) {
+ _ODP_ERR("odp_crypto_capability() failed\n");
+ return -1;
+ }
+ if (max_num_sa > crypto_capa.max_sessions)
+ max_num_sa = crypto_capa.max_sessions;
+
+ shm_size = sizeof(ipsec_sa_table_t) +
+ sizeof(ipsec_thread_local_t) * thread_count_max;
+
+ shm = odp_shm_reserve("_odp_ipsec_sa_table",
+ shm_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ ipsec_sa_tbl = odp_shm_addr(shm);
+ memset(ipsec_sa_tbl, 0, sizeof(ipsec_sa_table_t));
+ ipsec_sa_tbl->shm = shm;
+ ipsec_sa_tbl->max_num_sa = max_num_sa;
+
+ ring_mpmc_u32_init(&ipsec_sa_tbl->hot.ipv4_id_ring);
+ for (i = 0; i < thread_count_max; i++) {
+ /*
+ * Make the current ID block fully used, forcing allocation
+ * of a fresh block at first use.
+ */
+ ipsec_sa_tbl->per_thread[i].first_ipv4_id = 0;
+ ipsec_sa_tbl->per_thread[i].next_ipv4_id = IPV4_ID_BLOCK_SIZE;
+ }
+ /*
+ * Initialize IPv4 ID ring with ID blocks.
+ *
+ * The last ID block is left unused since the ring can hold
+ * only IPV4_ID_RING_SIZE - 1 entries.
+ */
+ for (i = 0; i < IPV4_ID_RING_SIZE - 1; i++) {
+ uint32_t data = i * IPV4_ID_BLOCK_SIZE;
+
+ ring_mpmc_u32_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ }
+
+ for (i = 0; i < ipsec_sa_tbl->max_num_sa; i++) {
+ ipsec_sa_t *ipsec_sa = ipsec_sa_entry(i);
+
+ ipsec_sa->ipsec_sa_hdl = ipsec_sa_index_to_handle(i);
+ ipsec_sa->ipsec_sa_idx = i;
+ ipsec_sa->next_sa = i + 1;
+ if (i == ipsec_sa_tbl->max_num_sa - 1)
+ ipsec_sa->next_sa = SA_IDX_NONE;
+ odp_atomic_init_u32(&ipsec_sa->state, IPSEC_SA_STATE_FREE);
+ odp_atomic_init_u64(&ipsec_sa->hot.bytes, 0);
+ odp_atomic_init_u64(&ipsec_sa->hot.packets, 0);
+ }
+ ipsec_sa_tbl->sa_freelist.head = 0;
+ odp_spinlock_init(&ipsec_sa_tbl->sa_freelist.lock);
+
+ return 0;
+}
+
+int _odp_ipsec_sad_term_global(void)
+{
+ uint32_t i;
+ ipsec_sa_t *ipsec_sa;
+ int ret = 0;
+ int rc = 0;
+
+ if (odp_global_ro.disable.ipsec)
+ return 0;
+
+ for (i = 0; i < ipsec_sa_tbl->max_num_sa; i++) {
+ ipsec_sa = ipsec_sa_entry(i);
+
+ if (odp_atomic_load_u32(&ipsec_sa->state) !=
+ IPSEC_SA_STATE_FREE) {
+ _ODP_ERR("Not destroyed ipsec_sa: %u\n", ipsec_sa->ipsec_sa_idx);
+ rc = -1;
+ }
+ odp_atomic_store_u32(&ipsec_sa->state, IPSEC_SA_STATE_FREE);
+ }
+
+ ret = odp_shm_free(ipsec_sa_tbl->shm);
+ if (ret < 0) {
+ _ODP_ERR("shm free failed");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+uint32_t _odp_ipsec_max_num_sa(void)
+{
+ return ipsec_sa_tbl->max_num_sa;
+}
+
+static ipsec_sa_t *ipsec_sa_reserve(void)
+{
+ ipsec_sa_t *ipsec_sa = NULL;
+ uint32_t sa_idx;
+
+ odp_spinlock_lock(&ipsec_sa_tbl->sa_freelist.lock);
+ sa_idx = ipsec_sa_tbl->sa_freelist.head;
+ if (sa_idx != SA_IDX_NONE) {
+ ipsec_sa = ipsec_sa_entry(sa_idx);
+ ipsec_sa_tbl->sa_freelist.head = ipsec_sa->next_sa;
+ }
+ odp_spinlock_unlock(&ipsec_sa_tbl->sa_freelist.lock);
+ return ipsec_sa;
+}
+
+static void ipsec_sa_release(ipsec_sa_t *ipsec_sa)
+{
+ odp_spinlock_lock(&ipsec_sa_tbl->sa_freelist.lock);
+ ipsec_sa->next_sa = ipsec_sa_tbl->sa_freelist.head;
+ ipsec_sa_tbl->sa_freelist.head = ipsec_sa->ipsec_sa_idx;
+ odp_atomic_store_u32(&ipsec_sa->state, IPSEC_SA_STATE_FREE);
+ odp_spinlock_unlock(&ipsec_sa_tbl->sa_freelist.lock);
+}
+
+/* Mark reserved SA as available now */
+static void ipsec_sa_publish(ipsec_sa_t *ipsec_sa)
+{
+ odp_atomic_store_rel_u32(&ipsec_sa->state, IPSEC_SA_STATE_ACTIVE);
+}
+
+static int ipsec_sa_lock(ipsec_sa_t *ipsec_sa)
+{
+ int cas = 0;
+ uint32_t state = odp_atomic_load_u32(&ipsec_sa->state);
+
+ while (0 == cas) {
+ /*
+ * This can be called from lookup path, so we really need this
+ * check.
+ */
+ if ((state & IPSEC_SA_STATE_MASK) != IPSEC_SA_STATE_ACTIVE)
+ return -1;
+
+ cas = odp_atomic_cas_acq_u32(&ipsec_sa->state, &state,
+ state + 1);
+ }
+
+ return 0;
+}
+
+/* Do not call directly, use _odp_ipsec_sa_unuse */
+static odp_bool_t ipsec_sa_unlock(ipsec_sa_t *ipsec_sa)
+{
+ int cas = 0;
+ uint32_t state = odp_atomic_load_u32(&ipsec_sa->state);
+
+ while (0 == cas)
+ cas = odp_atomic_cas_rel_u32(&ipsec_sa->state, &state,
+ state - 1);
+
+ return state == IPSEC_SA_STATE_DISABLE;
+}
+
+ipsec_sa_t *_odp_ipsec_sa_use(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ _ODP_ASSERT(ODP_IPSEC_SA_INVALID != sa);
+
+ ipsec_sa = ipsec_sa_entry_from_hdl(sa);
+
+ if (ipsec_sa_lock(ipsec_sa) < 0)
+ return NULL;
+
+ return ipsec_sa;
+}
+
+void _odp_ipsec_sa_unuse(ipsec_sa_t *ipsec_sa)
+{
+ odp_queue_t queue;
+ odp_ipsec_sa_t sa;
+ odp_ipsec_warn_t warn = { .all = 0 };
+
+ _ODP_ASSERT(NULL != ipsec_sa);
+
+ queue = ipsec_sa->queue;
+ sa = ipsec_sa->ipsec_sa_hdl;
+
+ if (ipsec_sa_unlock(ipsec_sa) && ODP_QUEUE_INVALID != queue)
+ _odp_ipsec_status_send(queue,
+ ODP_IPSEC_STATUS_SA_DISABLE,
+ sa, 0, warn);
+}
+
+void odp_ipsec_sa_param_init(odp_ipsec_sa_param_t *param)
+{
+ memset(param, 0, sizeof(odp_ipsec_sa_param_t));
+ param->dest_queue = ODP_QUEUE_INVALID;
+ param->outbound.tunnel.ipv4.ttl = 255;
+ param->outbound.tunnel.ipv6.hlimit = 255;
+}
+
+/* Return IV length required for the cipher for IPsec use */
+uint32_t _odp_ipsec_cipher_iv_len(odp_cipher_alg_t cipher)
+{
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ return 0;
+ case ODP_CIPHER_ALG_DES:
+ case ODP_CIPHER_ALG_3DES_CBC:
+ return 8;
+ case ODP_CIPHER_ALG_AES_CBC:
+ case ODP_CIPHER_ALG_AES_CTR:
+ return 16;
+ case ODP_CIPHER_ALG_AES_GCM:
+ return 12;
+ case ODP_CIPHER_ALG_AES_CCM:
+ return 11;
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ return 12;
+ default:
+ return (uint32_t)-1;
+ }
+}
+
+/* Return digest length required for the cipher for IPsec use */
+uint32_t _odp_ipsec_auth_digest_len(odp_auth_alg_t auth)
+{
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ return 0;
+ case ODP_AUTH_ALG_MD5_HMAC:
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ return 12;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ return 16;
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ return 24;
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ return 32;
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ return 12;
+ case ODP_AUTH_ALG_AES_GCM:
+ case ODP_AUTH_ALG_AES_GMAC:
+ return 16;
+ case ODP_AUTH_ALG_AES_CCM:
+ return 16;
+ case ODP_AUTH_ALG_AES_CMAC:
+ return 12;
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ return 16;
+ default:
+ return (uint32_t)-1;
+ }
+}
+
+static uint32_t esp_block_len_to_mask(uint32_t block_len)
+{
+ /* ESP trailer should be 32-bit right aligned */
+ if (block_len < 4)
+ block_len = 4;
+
+ _ODP_ASSERT(_ODP_CHECK_IS_POWER2(block_len));
+ return block_len - 1;
+}
+
+/* AR window management initialization */
+static int ipsec_antireplay_init(ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_sa_param_t *param)
+{
+ uint16_t num_bkts = 0;
+
+ if (param->inbound.antireplay_ws > IPSEC_AR_WIN_SIZE_MAX) {
+ _ODP_ERR("Anti-replay window size %" PRIu32 " is not supported.\n",
+ param->inbound.antireplay_ws);
+ return -1;
+ }
+
+ ipsec_sa->antireplay = (param->inbound.antireplay_ws != 0);
+ if (!ipsec_sa->antireplay)
+ return 0;
+
+ ipsec_sa->in.ar.win_size = param->inbound.antireplay_ws;
+ /* Window size should be at least IPSEC_AR_WIN_SIZE_MIN */
+ if (ipsec_sa->in.ar.win_size < IPSEC_AR_WIN_SIZE_MIN)
+ ipsec_sa->in.ar.win_size = IPSEC_AR_WIN_SIZE_MIN;
+
+ num_bkts = IPSEC_AR_WIN_NUM_BUCKETS(ipsec_sa->in.ar.win_size);
+ ipsec_sa->in.ar.num_buckets = num_bkts;
+ odp_atomic_init_u64(&ipsec_sa->hot.in.wintop_seq, 0);
+ memset(ipsec_sa->hot.in.bucket_arr, 0, sizeof(uint64_t) * num_bkts);
+
+ odp_spinlock_init(&ipsec_sa->hot.in.lock);
+
+ return 0;
+}
+
+static void store_sa_info(ipsec_sa_t *ipsec_sa, const odp_ipsec_sa_param_t *p)
+{
+ ipsec_sa->sa_info.cipher_alg = p->crypto.cipher_alg;
+ ipsec_sa->sa_info.cipher_key_len = p->crypto.cipher_key.length;
+ ipsec_sa->sa_info.cipher_key_extra_len = p->crypto.cipher_key.length;
+ ipsec_sa->sa_info.auth_alg = p->crypto.auth_alg;
+ ipsec_sa->sa_info.auth_key_len = p->crypto.auth_key.length;
+ ipsec_sa->sa_info.auth_key_extra_len = p->crypto.auth_key_extra.length;
+
+ ipsec_sa->sa_info.icv_len = p->crypto.icv_len;
+ ipsec_sa->sa_info.context_len = p->context_len;
+
+ if (p->dir == ODP_IPSEC_DIR_INBOUND)
+ ipsec_sa->sa_info.in.antireplay_ws = p->inbound.antireplay_ws;
+ else
+ ipsec_sa->sa_info.out.mtu = p->outbound.mtu;
+}
+
+static int init_cbc_salt(ipsec_sa_t *ipsec_sa)
+{
+ int filled = 0;
+ int rc;
+
+ if (!ipsec_sa->use_cbc_iv)
+ return 0;
+
+ while (filled < CBC_SALT_LEN) {
+ rc = odp_random_data(&ipsec_sa->cbc_salt[filled],
+ CBC_SALT_LEN - filled,
+ ODP_RANDOM_CRYPTO);
+ if (rc < 0)
+ return -1;
+ filled += rc;
+ }
+ return 0;
+}
+
+odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
+{
+ ipsec_sa_t *ipsec_sa;
+ odp_crypto_session_param_t crypto_param;
+ odp_crypto_ses_create_err_t ses_create_rc;
+ const odp_crypto_key_t *salt_param = NULL;
+
+ if (!odp_ipsec_cipher_capability(param->crypto.cipher_alg, NULL, 0) ||
+ !odp_ipsec_auth_capability(param->crypto.auth_alg, NULL, 0))
+ return ODP_IPSEC_SA_INVALID;
+
+ ipsec_sa = ipsec_sa_reserve();
+ if (NULL == ipsec_sa) {
+ _ODP_ERR("No more free SA\n");
+ return ODP_IPSEC_SA_INVALID;
+ }
+
+ store_sa_info(ipsec_sa, param);
+
+ ipsec_sa->proto = param->proto;
+ ipsec_sa->spi = param->spi;
+ ipsec_sa->context = param->context;
+ ipsec_sa->queue = param->dest_queue;
+ if (_odp_ipsec_is_sync_mode(param->dir)) {
+ /* Invalid queue indicates sync mode */
+ ipsec_sa->queue = ODP_QUEUE_INVALID;
+ }
+ ipsec_sa->mode = param->mode;
+ ipsec_sa->flags = 0;
+ ipsec_sa->esn = param->opt.esn;
+
+ if (ODP_IPSEC_DIR_INBOUND == param->dir) {
+ ipsec_sa->inbound = 1;
+ ipsec_sa->lookup_mode = param->inbound.lookup_mode;
+ if (ODP_IPSEC_LOOKUP_DSTADDR_SPI == ipsec_sa->lookup_mode) {
+ ipsec_sa->in.lookup_ver =
+ param->inbound.lookup_param.ip_version;
+ if (ODP_IPSEC_IPV4 == ipsec_sa->in.lookup_ver)
+ memcpy(&ipsec_sa->in.lookup_dst_ipv4,
+ param->inbound.lookup_param.dst_addr,
+ sizeof(ipsec_sa->in.lookup_dst_ipv4));
+ else
+ memcpy(&ipsec_sa->in.lookup_dst_ipv6,
+ param->inbound.lookup_param.dst_addr,
+ sizeof(ipsec_sa->in.lookup_dst_ipv6));
+ }
+
+ if (ipsec_antireplay_init(ipsec_sa, param))
+ goto error;
+ } else {
+ ipsec_sa->lookup_mode = ODP_IPSEC_LOOKUP_DISABLED;
+ odp_atomic_init_u64(&ipsec_sa->hot.out.seq, 1);
+ ipsec_sa->out.frag_mode = param->outbound.frag_mode;
+ odp_atomic_init_u32(&ipsec_sa->out.mtu, param->outbound.mtu);
+ }
+ ipsec_sa->dec_ttl = param->opt.dec_ttl;
+ ipsec_sa->copy_dscp = param->opt.copy_dscp;
+ ipsec_sa->copy_df = param->opt.copy_df;
+ ipsec_sa->copy_flabel = param->opt.copy_flabel;
+ ipsec_sa->udp_encap = param->opt.udp_encap;
+
+ odp_atomic_init_u64(&ipsec_sa->hot.bytes, 0);
+ odp_atomic_init_u64(&ipsec_sa->hot.packets, 0);
+ ipsec_sa->soft_limit_bytes = param->lifetime.soft_limit.bytes;
+ ipsec_sa->soft_limit_packets = param->lifetime.soft_limit.packets;
+ ipsec_sa->hard_limit_bytes = param->lifetime.hard_limit.bytes;
+ ipsec_sa->hard_limit_packets = param->lifetime.hard_limit.packets;
+
+ odp_atomic_init_u64(&ipsec_sa->stats.proto_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.auth_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.antireplay_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.alg_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.mtu_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.hard_exp_bytes_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.hard_exp_pkts_err, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_pkts, 0);
+ odp_atomic_init_u64(&ipsec_sa->stats.post_lifetime_err_bytes, 0);
+ odp_atomic_init_u32(&ipsec_sa->soft_expiry_notified, 0);
+
+ if (ODP_IPSEC_MODE_TUNNEL == ipsec_sa->mode &&
+ ODP_IPSEC_DIR_OUTBOUND == param->dir) {
+ if (ODP_IPSEC_TUNNEL_IPV4 == param->outbound.tunnel.type) {
+ ipsec_sa->tun_ipv4 = 1;
+ memcpy(&ipsec_sa->out.tun_ipv4.src_ip,
+ param->outbound.tunnel.ipv4.src_addr,
+ sizeof(ipsec_sa->out.tun_ipv4.src_ip));
+ memcpy(&ipsec_sa->out.tun_ipv4.dst_ip,
+ param->outbound.tunnel.ipv4.dst_addr,
+ sizeof(ipsec_sa->out.tun_ipv4.dst_ip));
+ ipsec_sa->out.tun_ipv4.param.src_addr =
+ &ipsec_sa->out.tun_ipv4.src_ip;
+ ipsec_sa->out.tun_ipv4.param.dst_addr =
+ &ipsec_sa->out.tun_ipv4.dst_ip;
+ ipsec_sa->out.tun_ipv4.param.ttl =
+ param->outbound.tunnel.ipv4.ttl;
+ ipsec_sa->out.tun_ipv4.param.dscp =
+ param->outbound.tunnel.ipv4.dscp;
+ ipsec_sa->out.tun_ipv4.param.df =
+ param->outbound.tunnel.ipv4.df;
+ } else {
+ ipsec_sa->tun_ipv4 = 0;
+ memcpy(&ipsec_sa->out.tun_ipv6.src_ip,
+ param->outbound.tunnel.ipv6.src_addr,
+ sizeof(ipsec_sa->out.tun_ipv6.src_ip));
+ memcpy(&ipsec_sa->out.tun_ipv6.dst_ip,
+ param->outbound.tunnel.ipv6.dst_addr,
+ sizeof(ipsec_sa->out.tun_ipv6.dst_ip));
+ ipsec_sa->out.tun_ipv4.param.src_addr =
+ &ipsec_sa->out.tun_ipv6.src_ip;
+ ipsec_sa->out.tun_ipv4.param.dst_addr =
+ &ipsec_sa->out.tun_ipv6.dst_ip;
+ ipsec_sa->out.tun_ipv6.param.hlimit =
+ param->outbound.tunnel.ipv6.hlimit;
+ ipsec_sa->out.tun_ipv6.param.dscp =
+ param->outbound.tunnel.ipv6.dscp;
+ ipsec_sa->out.tun_ipv6.param.flabel =
+ param->outbound.tunnel.ipv6.flabel;
+ }
+ }
+
+ odp_crypto_session_param_init(&crypto_param);
+
+ /* Setup parameters and call crypto library to create session */
+ crypto_param.op = (ODP_IPSEC_DIR_INBOUND == param->dir) ?
+ ODP_CRYPTO_OP_DECODE :
+ ODP_CRYPTO_OP_ENCODE;
+ crypto_param.op_type = ODP_CRYPTO_OP_TYPE_BASIC;
+ crypto_param.auth_cipher_text = 1;
+ if (param->proto == ODP_IPSEC_AH)
+ crypto_param.hash_result_in_auth_range = 1;
+
+ crypto_param.op_mode = ODP_CRYPTO_SYNC;
+ crypto_param.compl_queue = ODP_QUEUE_INVALID;
+ crypto_param.output_pool = ODP_POOL_INVALID;
+
+ crypto_param.cipher_alg = param->crypto.cipher_alg;
+ crypto_param.cipher_key = param->crypto.cipher_key;
+ crypto_param.auth_alg = param->crypto.auth_alg;
+ crypto_param.auth_key = param->crypto.auth_key;
+
+ crypto_param.cipher_iv_len =
+ _odp_ipsec_cipher_iv_len(crypto_param.cipher_alg);
+
+ crypto_param.auth_digest_len =
+ _odp_ipsec_auth_digest_len(crypto_param.auth_alg);
+
+ if (param->crypto.icv_len != 0 &&
+ param->crypto.icv_len != crypto_param.auth_digest_len)
+ goto error;
+
+ if ((uint32_t)-1 == crypto_param.cipher_iv_len ||
+ (uint32_t)-1 == crypto_param.auth_digest_len)
+ goto error;
+
+ ipsec_sa->salt_length = 0;
+ /* ESN higher 32 bits flag.
+ * This flag is set for individual algo's.
+ * This flag is reset for combined mode algo's and ODP_AUTH_ALG_NULL.
+ */
+ ipsec_sa->insert_seq_hi = (ipsec_sa->esn) ? 1 : 0;
+
+ switch (crypto_param.cipher_alg) {
+ case ODP_CIPHER_ALG_NULL:
+ ipsec_sa->esp_iv_len = 0;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
+ break;
+ case ODP_CIPHER_ALG_DES:
+ case ODP_CIPHER_ALG_3DES_CBC:
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(8);
+ break;
+ case ODP_CIPHER_ALG_AES_CBC:
+ ipsec_sa->use_cbc_iv = 1;
+ ipsec_sa->esp_iv_len = 16;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(16);
+ break;
+ case ODP_CIPHER_ALG_AES_CTR:
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->aes_ctr_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
+ /* 4 byte nonse */
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
+ break;
+ case ODP_CIPHER_ALG_AES_GCM:
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
+ break;
+ case ODP_CIPHER_ALG_AES_CCM:
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(16);
+ ipsec_sa->salt_length = 3;
+ salt_param = &param->crypto.cipher_key_extra;
+ break;
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
+ break;
+ default:
+ goto error;
+ }
+
+ switch (crypto_param.auth_alg) {
+ case ODP_AUTH_ALG_AES_GCM:
+ case ODP_AUTH_ALG_AES_CCM:
+ if (ipsec_sa->esn) {
+ crypto_param.auth_aad_len = 12;
+ ipsec_sa->insert_seq_hi = 0;
+ } else {
+ crypto_param.auth_aad_len = 8;
+ }
+ break;
+ case ODP_AUTH_ALG_AES_GMAC:
+ if ((ODP_CIPHER_ALG_NULL != crypto_param.cipher_alg) ||
+ ipsec_sa->esn)
+ goto error;
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_pad_mask = esp_block_len_to_mask(1);
+ crypto_param.auth_iv_len = 12;
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.auth_key_extra;
+ break;
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ if (ipsec_sa->esn) {
+ crypto_param.auth_aad_len = 12;
+ ipsec_sa->insert_seq_hi = 0;
+ } else {
+ crypto_param.auth_aad_len = 8;
+ }
+ break;
+ default:
+ break;
+ }
+
+ ipsec_sa->icv_len = crypto_param.auth_digest_len;
+
+ /* For ODP_AUTH_ALG_NULL */
+ if (!ipsec_sa->icv_len)
+ ipsec_sa->insert_seq_hi = 0;
+
+ if (ipsec_sa->salt_length) {
+ if (ipsec_sa->salt_length > IPSEC_MAX_SALT_LEN) {
+ _ODP_ERR("IPSEC_MAX_SALT_LEN too small\n");
+ goto error;
+ }
+
+ if (ipsec_sa->salt_length != salt_param->length) {
+ _ODP_ERR("Bad extra keying material length: %i\n", salt_param->length);
+ goto error;
+ }
+
+ memcpy(ipsec_sa->salt, salt_param->data, ipsec_sa->salt_length);
+ }
+
+ if (init_cbc_salt(ipsec_sa))
+ goto error;
+
+ if (odp_crypto_session_create(&crypto_param, &ipsec_sa->session,
+ &ses_create_rc))
+ goto error;
+
+ init_sa_thread_local(ipsec_sa);
+
+ ipsec_sa_publish(ipsec_sa);
+
+ return ipsec_sa->ipsec_sa_hdl;
+
+error:
+ ipsec_sa_release(ipsec_sa);
+
+ return ODP_IPSEC_SA_INVALID;
+}
+
+int odp_ipsec_sa_disable(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa = ipsec_sa_entry_from_hdl(sa);
+ uint32_t state;
+ int cas = 0;
+
+ /* This is a custom rwlock implementation. It is not possible to use
+ * original rwlock, because there is no way to test if current code is
+ * the last reader when disable operation is pending. */
+ state = odp_atomic_load_u32(&ipsec_sa->state);
+
+ while (0 == cas) {
+ if (state & IPSEC_SA_STATE_DISABLE)
+ return -1;
+
+ cas = odp_atomic_cas_acq_u32(&ipsec_sa->state, &state,
+ state | IPSEC_SA_STATE_DISABLE);
+ }
+
+ if (ODP_QUEUE_INVALID != ipsec_sa->queue) {
+ odp_ipsec_warn_t warn = { .all = 0 };
+
+ /*
+ * If there were not active state when we disabled SA,
+ * send the event.
+ */
+ if (0 == state)
+ _odp_ipsec_status_send(ipsec_sa->queue,
+ ODP_IPSEC_STATUS_SA_DISABLE,
+ ipsec_sa->ipsec_sa_hdl,
+ 0, warn);
+
+ return 0;
+ }
+
+ while (IPSEC_SA_STATE_DISABLE != state) {
+ odp_cpu_pause();
+ state = odp_atomic_load_u32(&ipsec_sa->state);
+ }
+
+ return 0;
+}
+
+int odp_ipsec_sa_destroy(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa = ipsec_sa_entry_from_hdl(sa);
+ int rc = 0;
+ uint32_t state = odp_atomic_load_u32(&ipsec_sa->state);
+
+ if (IPSEC_SA_STATE_DISABLE != state) {
+ _ODP_ERR("Destroying not disabled ipsec_sa: %u\n", ipsec_sa->ipsec_sa_idx);
+ return -1;
+ }
+
+ if (odp_crypto_session_destroy(ipsec_sa->session) < 0) {
+ _ODP_ERR("Error destroying crypto session for ipsec_sa: %u\n",
+ ipsec_sa->ipsec_sa_idx);
+ rc = -1;
+ }
+
+ ipsec_sa_release(ipsec_sa);
+
+ return rc;
+}
+
+void *odp_ipsec_sa_context(odp_ipsec_sa_t sa)
+{
+ ipsec_sa_t *ipsec_sa = ipsec_sa_entry_from_hdl(sa);
+
+ return ipsec_sa->context;
+}
+
+uint64_t odp_ipsec_sa_to_u64(odp_ipsec_sa_t sa)
+{
+ return _odp_pri(sa);
+}
+
+int odp_ipsec_sa_mtu_update(odp_ipsec_sa_t sa, uint32_t mtu)
+{
+ ipsec_sa_t *ipsec_sa;
+
+ ipsec_sa = ipsec_sa_entry_from_hdl(sa);
+ _ODP_ASSERT(NULL != ipsec_sa);
+ odp_atomic_store_u32(&ipsec_sa->out.mtu, mtu);
+ return 0;
+}
+
+ipsec_sa_t *_odp_ipsec_sa_lookup(const ipsec_sa_lookup_t *lookup)
+{
+ uint32_t i;
+ ipsec_sa_t *best = NULL;
+
+ for (i = 0; i < ipsec_sa_tbl->max_num_sa; i++) {
+ ipsec_sa_t *ipsec_sa = ipsec_sa_entry(i);
+
+ if (ipsec_sa_lock(ipsec_sa) < 0)
+ continue;
+
+ if (ODP_IPSEC_LOOKUP_DSTADDR_SPI == ipsec_sa->lookup_mode &&
+ lookup->proto == ipsec_sa->proto &&
+ lookup->spi == ipsec_sa->spi &&
+ lookup->ver == ipsec_sa->in.lookup_ver &&
+ !memcmp(lookup->dst_addr,
+ lookup->ver == ODP_IPSEC_IPV4 ?
+ (void *)&ipsec_sa->in.lookup_dst_ipv4 :
+ (void *)&ipsec_sa->in.lookup_dst_ipv6,
+ lookup->ver == ODP_IPSEC_IPV4 ?
+ _ODP_IPV4ADDR_LEN :
+ _ODP_IPV6ADDR_LEN)) {
+ if (NULL != best)
+ _odp_ipsec_sa_unuse(best);
+ return ipsec_sa;
+ } else if (NULL == best &&
+ ODP_IPSEC_LOOKUP_SPI == ipsec_sa->lookup_mode &&
+ lookup->proto == ipsec_sa->proto &&
+ lookup->spi == ipsec_sa->spi) {
+ best = ipsec_sa;
+ } else {
+ _odp_ipsec_sa_unuse(ipsec_sa);
+ }
+ }
+
+ return best;
+}
+
+int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
+ odp_ipsec_op_status_t *status)
+{
+ int rc = 0;
+ sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
+
+ if (sa_tl->lifetime_status.error.hard_exp_packets ||
+ sa_tl->lifetime_status.error.hard_exp_bytes) {
+ status->all |= sa_tl->lifetime_status.all;
+ rc = -1;
+ }
+
+ return rc;
+}
+
+int _odp_ipsec_sa_lifetime_update(ipsec_sa_t *ipsec_sa, uint32_t len,
+ odp_ipsec_op_status_t *status)
+{
+ sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
+ uint64_t packets, bytes;
+ uint32_t tl_byte_quota;
+ uint32_t tl_pkt_quota;
+
+ tl_pkt_quota = odp_atomic_load_u32(&sa_tl->packet_quota);
+ if (odp_unlikely(tl_pkt_quota == 0)) {
+ packets = odp_atomic_fetch_add_u64(&ipsec_sa->hot.packets,
+ SA_LIFE_PACKETS_PREALLOC);
+ packets += SA_LIFE_PACKETS_PREALLOC;
+ tl_pkt_quota += SA_LIFE_PACKETS_PREALLOC;
+
+ if (ipsec_sa->soft_limit_packets > 0 &&
+ packets >= ipsec_sa->soft_limit_packets)
+ sa_tl->lifetime_status.warn.soft_exp_packets = 1;
+
+ if (ipsec_sa->hard_limit_packets > 0 &&
+ packets >= ipsec_sa->hard_limit_packets)
+ sa_tl->lifetime_status.error.hard_exp_packets = 1;
+ }
+ tl_pkt_quota--;
+ odp_atomic_store_u32(&sa_tl->packet_quota, tl_pkt_quota);
+
+ tl_byte_quota = odp_atomic_load_u32(&sa_tl->byte_quota);
+ if (odp_unlikely(tl_byte_quota < len)) {
+ bytes = odp_atomic_fetch_add_u64(&ipsec_sa->hot.bytes,
+ len + SA_LIFE_BYTES_PREALLOC);
+ bytes += len + SA_LIFE_BYTES_PREALLOC;
+ tl_byte_quota += len + SA_LIFE_BYTES_PREALLOC;
+
+ if (ipsec_sa->soft_limit_bytes > 0 &&
+ bytes >= ipsec_sa->soft_limit_bytes)
+ sa_tl->lifetime_status.warn.soft_exp_bytes = 1;
+
+ if (ipsec_sa->hard_limit_bytes > 0 &&
+ bytes >= ipsec_sa->hard_limit_bytes)
+ sa_tl->lifetime_status.error.hard_exp_bytes = 1;
+ }
+ tl_byte_quota -= len;
+ odp_atomic_store_u32(&sa_tl->byte_quota, tl_byte_quota);
+
+
+ status->all |= sa_tl->lifetime_status.all;
+
+ if (sa_tl->lifetime_status.error.hard_exp_packets ||
+ sa_tl->lifetime_status.error.hard_exp_bytes)
+ return -1;
+ return 0;
+}
+
+static uint64_t ipsec_sa_antireplay_max_seq(ipsec_sa_t *ipsec_sa)
+{
+ uint64_t max_seq = 0;
+
+ max_seq = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+ if (!ipsec_sa->esn)
+ max_seq &= 0xffffffff;
+
+ return max_seq;
+}
+
+int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint64_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ /* Try to be as quick as possible, we will discard packets later */
+ if (ipsec_sa->antireplay) {
+ uint64_t wintop = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+
+ if (!ipsec_sa->esn)
+ wintop &= 0xffffffff;
+
+ if ((seq + ipsec_sa->in.ar.win_size) <= wintop) {
+ status->error.antireplay = 1;
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static inline int ipsec_wslarge_replay_update(ipsec_sa_t *ipsec_sa, uint64_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ uint64_t bucket, wintop_bucket, new_bucket;
+ uint64_t bkt_diff, bkt_cnt;
+ uint64_t bit = 0, top_seq;
+
+ odp_spinlock_lock(&ipsec_sa->hot.in.lock);
+
+ top_seq = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+ if ((seq + ipsec_sa->in.ar.win_size) <= top_seq)
+ goto ar_err;
+
+ bucket = (seq >> IPSEC_AR_WIN_BUCKET_BITS);
+
+ /* Check if the seq is within the range */
+ if (seq > top_seq) {
+ wintop_bucket = top_seq >> IPSEC_AR_WIN_BUCKET_BITS;
+ bkt_diff = bucket - wintop_bucket;
+
+ /* Seq is way after the range of AR window size */
+ if (bkt_diff > ipsec_sa->in.ar.num_buckets)
+ bkt_diff = ipsec_sa->in.ar.num_buckets;
+
+ for (bkt_cnt = 0; bkt_cnt < bkt_diff; bkt_cnt++) {
+ new_bucket = (bkt_cnt + wintop_bucket + 1) %
+ ipsec_sa->in.ar.num_buckets;
+ ipsec_sa->hot.in.bucket_arr[new_bucket] = 0;
+ }
+
+ /* AR window top sequence number */
+ odp_atomic_store_u64(&ipsec_sa->hot.in.wintop_seq, seq);
+ }
+
+ bucket %= ipsec_sa->in.ar.num_buckets;
+ bit = (uint64_t)1 << (seq & IPSEC_AR_WIN_BITLOC_MASK);
+
+ /* Already seen the packet, discard it */
+ if (ipsec_sa->hot.in.bucket_arr[bucket] & bit)
+ goto ar_err;
+
+ /* Packet is new, mark it as seen */
+ ipsec_sa->hot.in.bucket_arr[bucket] |= bit;
+ odp_spinlock_unlock(&ipsec_sa->hot.in.lock);
+
+ return 0;
+ar_err:
+ status->error.antireplay = 1;
+ odp_spinlock_unlock(&ipsec_sa->hot.in.lock);
+ return -1;
+}
+
+static inline int ipsec_ws32_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ uint64_t state, new_state;
+ int cas = 0;
+
+ state = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+ while (0 == cas) {
+ uint32_t max_seq = state & 0xffffffff;
+ uint32_t mask = state >> 32;
+
+ if (seq + IPSEC_AR_WIN_SIZE_MIN <= max_seq) {
+ status->error.antireplay = 1;
+ return -1;
+ } else if (seq >= max_seq + IPSEC_AR_WIN_SIZE_MIN) {
+ mask = 1;
+ max_seq = seq;
+ } else if (seq > max_seq) {
+ mask <<= seq - max_seq;
+ mask |= 1;
+ max_seq = seq;
+ } else if (mask & (1U << (max_seq - seq))) {
+ status->error.antireplay = 1;
+ return -1;
+ } else {
+ mask |= (1U << (max_seq - seq));
+ }
+
+ new_state = (((uint64_t)mask) << 32) | max_seq;
+ cas = odp_atomic_cas_acq_rel_u64(&ipsec_sa->hot.in.wintop_seq,
+ &state, new_state);
+ }
+ return 0;
+}
+
+int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint64_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ int ret;
+
+ /* Window update for ws equal to 32 */
+ if ((!ipsec_sa->esn) && (ipsec_sa->in.ar.win_size == IPSEC_AR_WIN_SIZE_MIN))
+ ret = ipsec_ws32_replay_update(ipsec_sa, (seq & 0xffffffff), status);
+ else
+ ret = ipsec_wslarge_replay_update(ipsec_sa, seq, status);
+
+ return ret;
+}
+
+uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa)
+{
+ (void)ipsec_sa;
+ ipsec_thread_local_t *tl = &ipsec_sa_tbl->per_thread[odp_thread_id()];
+ uint32_t data;
+
+ if (odp_unlikely(tl->next_ipv4_id ==
+ tl->first_ipv4_id + IPV4_ID_BLOCK_SIZE)) {
+ /* Return used ID block to the ring */
+ data = tl->first_ipv4_id;
+ ring_mpmc_u32_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ /* Get new ID block */
+ ring_mpmc_u32_deq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ tl->first_ipv4_id = data;
+ tl->next_ipv4_id = data;
+ }
+
+ /* No need to convert to BE: ID just should not be duplicated */
+ return tl->next_ipv4_id++;
+}
+
+void _odp_ipsec_sa_stats_pkts(ipsec_sa_t *sa, odp_ipsec_stats_t *stats)
+{
+ int thread_count_max = odp_thread_count_max();
+ uint64_t tl_byte_quota = 0;
+ uint64_t tl_pkt_quota = 0;
+ sa_thread_local_t *sa_tl;
+ int n;
+
+ /*
+ * Field 'hot.packets' tracks SA lifetime. The same field is being used
+ * to track the number of success packets.
+ *
+ * SA lifetime tracking implements a per thread packet quota to allow
+ * less frequent updates to the hot field. The per thread quota need
+ * to be decremented. In addition, SA lifetime gets consumed for any
+ * errors occurring after lifetime check is done. Those packets also
+ * need to be accounted for.
+ */
+
+ for (n = 0; n < thread_count_max; n++) {
+ sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx];
+ tl_pkt_quota += odp_atomic_load_u32(&sa_tl->packet_quota);
+ tl_byte_quota += odp_atomic_load_u32(&sa_tl->byte_quota);
+ }
+
+ stats->success =
+ odp_atomic_load_u64(&sa->hot.packets)
+ - odp_atomic_load_u64(&sa->stats.post_lifetime_err_pkts)
+ - tl_pkt_quota;
+
+ stats->success_bytes =
+ odp_atomic_load_u64(&sa->hot.bytes)
+ - odp_atomic_load_u64(&sa->stats.post_lifetime_err_bytes)
+ - tl_byte_quota;
+
+ return;
+}
+
+static void ipsec_out_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
+{
+ odp_ipsec_tunnel_param_t *tun_param = &sa_info->param.outbound.tunnel;
+
+ tun_param->type = ipsec_sa->tun_ipv4 ? ODP_IPSEC_TUNNEL_IPV4 :
+ ODP_IPSEC_TUNNEL_IPV6;
+ tun_param->ipv4.dscp = ipsec_sa->out.tun_ipv4.param.dscp;
+ tun_param->ipv4.df = ipsec_sa->out.tun_ipv4.param.df;
+ tun_param->ipv4.ttl = ipsec_sa->out.tun_ipv4.param.ttl;
+ tun_param->ipv6.flabel = ipsec_sa->out.tun_ipv6.param.flabel;
+ tun_param->ipv6.dscp = ipsec_sa->out.tun_ipv6.param.dscp;
+ tun_param->ipv6.hlimit = ipsec_sa->out.tun_ipv6.param.hlimit;
+
+ sa_info->param.outbound.frag_mode = ipsec_sa->out.frag_mode;
+ sa_info->param.outbound.mtu = ipsec_sa->sa_info.out.mtu;
+
+ sa_info->outbound.seq_num =
+ (uint64_t)odp_atomic_load_u64(&ipsec_sa->hot.out.seq) - 1;
+
+ if (ipsec_sa->mode == ODP_IPSEC_MODE_TUNNEL) {
+ uint8_t *src, *dst;
+
+ if (ipsec_sa->tun_ipv4) {
+ src = sa_info->outbound.tunnel.ipv4.src_addr;
+ dst = sa_info->outbound.tunnel.ipv4.dst_addr;
+ memcpy(src, &ipsec_sa->out.tun_ipv4.src_ip,
+ ODP_IPV4_ADDR_SIZE);
+ memcpy(dst, &ipsec_sa->out.tun_ipv4.dst_ip,
+ ODP_IPV4_ADDR_SIZE);
+ tun_param->ipv4.src_addr = src;
+ tun_param->ipv4.dst_addr = dst;
+ } else {
+ src = sa_info->outbound.tunnel.ipv6.src_addr;
+ dst = sa_info->outbound.tunnel.ipv6.dst_addr;
+ memcpy(src, &ipsec_sa->out.tun_ipv6.src_ip,
+ ODP_IPV6_ADDR_SIZE);
+ memcpy(dst, &ipsec_sa->out.tun_ipv6.dst_ip,
+ ODP_IPV6_ADDR_SIZE);
+ tun_param->ipv6.src_addr = src;
+ tun_param->ipv6.dst_addr = dst;
+ }
+ }
+}
+
+static void ipsec_in_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
+{
+ uint8_t *dst = sa_info->inbound.lookup_param.dst_addr;
+
+ sa_info->param.inbound.lookup_mode = ipsec_sa->lookup_mode;
+ sa_info->param.inbound.lookup_param.ip_version = ipsec_sa->in.lookup_ver;
+ sa_info->param.inbound.lookup_param.dst_addr = dst;
+ sa_info->param.inbound.antireplay_ws = ipsec_sa->sa_info.in.antireplay_ws;
+ sa_info->param.inbound.pipeline = ODP_IPSEC_PIPELINE_NONE;
+ sa_info->param.inbound.dest_cos = ODP_COS_INVALID;
+ sa_info->param.inbound.reassembly_en = false;
+
+ if (ipsec_sa->lookup_mode == ODP_IPSEC_LOOKUP_DSTADDR_SPI) {
+ if (ipsec_sa->in.lookup_ver == ODP_IPSEC_IPV4)
+ memcpy(dst, &ipsec_sa->in.lookup_dst_ipv4,
+ ODP_IPV4_ADDR_SIZE);
+ else
+ memcpy(dst, &ipsec_sa->in.lookup_dst_ipv6,
+ ODP_IPV6_ADDR_SIZE);
+ }
+
+ sa_info->param.inbound.lookup_param.dst_addr = dst;
+
+ if (ipsec_sa->antireplay) {
+ sa_info->inbound.antireplay_ws = ipsec_sa->in.ar.win_size;
+ sa_info->inbound.antireplay_window_top =
+ ipsec_sa_antireplay_max_seq(ipsec_sa);
+ }
+}
+
+int odp_ipsec_sa_info(odp_ipsec_sa_t sa, odp_ipsec_sa_info_t *sa_info)
+{
+ ipsec_sa_t *ipsec_sa;
+ odp_ipsec_sa_param_t *param;
+
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
+
+ _ODP_ASSERT(ipsec_sa != NULL);
+ _ODP_ASSERT(sa_info != NULL);
+
+ memset(sa_info, 0, sizeof(*sa_info));
+ param = &sa_info->param;
+
+ param->dir = ipsec_sa->inbound ? ODP_IPSEC_DIR_INBOUND :
+ ODP_IPSEC_DIR_OUTBOUND;
+ param->proto = ipsec_sa->proto;
+ param->mode = ipsec_sa->mode;
+
+ param->crypto.cipher_alg = ipsec_sa->sa_info.cipher_alg;
+ param->crypto.cipher_key.data = NULL;
+ param->crypto.cipher_key.length = ipsec_sa->sa_info.cipher_key_len;
+ param->crypto.cipher_key_extra.data = NULL;
+ param->crypto.cipher_key_extra.length = ipsec_sa->sa_info.cipher_key_extra_len;
+ param->crypto.auth_alg = ipsec_sa->sa_info.auth_alg;
+ param->crypto.auth_key.data = NULL;
+ param->crypto.auth_key.length = ipsec_sa->sa_info.auth_key_len;
+ param->crypto.auth_key_extra.data = NULL;
+ param->crypto.auth_key_extra.length = ipsec_sa->sa_info.auth_key_extra_len;
+ param->crypto.icv_len = ipsec_sa->sa_info.icv_len;
+
+ param->opt.esn = ipsec_sa->esn;
+ param->opt.udp_encap = ipsec_sa->udp_encap;
+ param->opt.copy_dscp = ipsec_sa->copy_dscp;
+ param->opt.copy_flabel = ipsec_sa->copy_flabel;
+ param->opt.copy_df = ipsec_sa->copy_df;
+ param->opt.dec_ttl = ipsec_sa->dec_ttl;
+
+ param->lifetime.soft_limit.bytes = ipsec_sa->soft_limit_bytes;
+ param->lifetime.soft_limit.packets = ipsec_sa->soft_limit_packets;
+ param->lifetime.hard_limit.bytes = ipsec_sa->hard_limit_bytes;
+ param->lifetime.hard_limit.packets = ipsec_sa->hard_limit_packets;
+
+ param->spi = ipsec_sa->spi;
+ param->dest_queue = ipsec_sa->queue;
+ param->context = ipsec_sa->context;
+ param->context_len = ipsec_sa->sa_info.context_len;
+
+ if (ipsec_sa->inbound)
+ ipsec_in_sa_info(ipsec_sa, sa_info);
+ else
+ ipsec_out_sa_info(ipsec_sa, sa_info);
+
+ return 0;
+}
diff --git a/platform/linux-generic/_ishm.c b/platform/linux-generic/odp_ishm.c
index c1efd7d23..3ef1894bc 100644
--- a/platform/linux-generic/_ishm.c
+++ b/platform/linux-generic/odp_ishm.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2019, Nokia
+ * Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -12,32 +13,20 @@
* internal shared memory is guaranteed to always be located at the same virtual
* address, i.e. pointers to internal shared memory are fully shareable
* between odp threads (regardless of thread type or fork time) in that case.
- * Internal shared memory is mainly meant to be used internaly within ODP
+ * Internal shared memory is mainly meant to be used internally within ODP
* (hence its name), but may also be allocated by odp applications and drivers,
* in the future (through these interfaces).
- * To guarrentee this full pointer shareability (when reserved with the
- * _ODP_ISHM_SINGLE_VA flag) internal shared memory is handled as follows:
- * At global_init time, a huge virtual address space reservation is performed.
- * Note that this is just reserving virtual space, not physical memory.
+ * To guarantee this full pointer shareability (when reserved with the
+ * _ODP_ISHM_SINGLE_VA flag) the whole internal shared memory area is reserved
+ * at global_init time.
* Because all ODP threads (pthreads or processes) are descendants of the ODP
- * instantiation process, this VA space is inherited by all ODP threads.
- * When internal shmem reservation actually occurs, and
- * when reserved with the _ODP_ISHM_SINGLE_VA flag, physical memory is
- * allocated, and mapped (MAP_FIXED) to some part in the huge preallocated
- * address space area:
- * because this virtual address space is common to all ODP threads, we
- * know this mapping will succeed, and not clash with anything else.
- * Hence, an ODP threads which perform a lookup for the same ishm block
- * can map it at the same VA address.
- * When internal shared memory is released, the physical memory is released
- * and the corresponding virtual space returned to its "pool" of preallocated
- * virtual space (assuming it was allocated from there).
- * Note, though, that, if 2 linux processes share the same ishm block,
- * the virtual space is marked as released as soon as one of the processes
- * releases the ishm block, but the physical memory space is actually released
- * by the kernel once all processes have done a ishm operation (i,e. a sync).
- * This is due to the fact that linux does not contain any syscall to unmap
- * memory from a different process.
+ * instantiation process, this address space is inherited by all ODP threads.
+ * When internal shmem reservation actually occurs, and when reserved with the
+ * _ODP_ISHM_SINGLE_VA flag, memory is allocated from the pre-reserved single
+ * VA memory.
+ * When an internal shared memory block is released, the memory is returned to
+ * its "pool" of pre-reserved memory (assuming it was allocated from there). The
+ * memory is not returned back to kernel until odp_term_global().
*
* This file contains functions to handle the VA area (handling fragmentation
* and defragmentation resulting from different allocs/release) and also
@@ -47,17 +36,19 @@
*/
#include <odp_posix_extensions.h>
#include <odp_config_internal.h>
-#include <odp_internal.h>
+#include <odp_global_data.h>
#include <odp/api/spinlock.h>
#include <odp/api/align.h>
#include <odp/api/system_info.h>
#include <odp/api/debug.h>
+#include <odp_init_internal.h>
#include <odp_shm_internal.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
-#include <_fdserver_internal.h>
-#include <_ishm_internal.h>
-#include <_ishmphy_internal.h>
+#include <odp_fdserver_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_ishmphy_internal.h>
+#include <odp_ishmpool_internal.h>
+#include <odp_libconfig_internal.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
@@ -71,33 +62,37 @@
#include <inttypes.h>
#include <sys/wait.h>
#include <libgen.h>
+#include <sys/types.h>
+#include <dirent.h>
/*
* Maximum number of internal shared memory blocks.
*
* This is the number of separate ISHM areas that can be reserved concurrently
* (Note that freeing such blocks may take time, or possibly never happen
- * if some of the block ownwers never procsync() after free). This number
+ * if some of the block owners never procsync() after free). This number
* should take that into account)
*/
-#define ISHM_MAX_NB_BLOCKS 128
+#define ISHM_MAX_NB_BLOCKS (CONFIG_INTERNAL_SHM_BLOCKS + CONFIG_SHM_BLOCKS)
/*
* Maximum internal shared memory block name length in chars
* probably taking the same number as SHM name size make sense at this stage
*/
-#define ISHM_NAME_MAXLEN 32
+#define ISHM_NAME_MAXLEN 128
/*
* Linux underlying file name: <directory>/odp-<odp_pid>-ishm-<name>
* The <name> part may be replaced by a sequence number if no specific
* name is given at reserve time
- * <directory> is either /tmp or the hugepagefs mount point for default size.
+ * <directory> is either /dev/shm or the hugepagefs mount point for default
+ * size.
* (searched at init time)
*/
#define ISHM_FILENAME_MAXLEN (ISHM_NAME_MAXLEN + 64)
#define ISHM_FILENAME_FORMAT "%s/odp-%d-ishm-%s"
-#define ISHM_FILENAME_NORMAL_PAGE_DIR "/tmp"
+#define ISHM_FILENAME_NORMAL_PAGE_DIR "/dev/shm"
+#define _ODP_FILES_FMT "odp-%d-"
/*
* when the memory is to be shared with an external entity (such as another
@@ -105,7 +100,7 @@
* export file is created describing the exported memory: this defines the
* location and the filename format of this description file
*/
-#define ISHM_EXPTNAME_FORMAT "/tmp/odp-%d-shm-%s"
+#define ISHM_EXPTNAME_FORMAT "%s/%s/odp-%d-shm-%s"
/*
* At worse case the virtual space gets so fragmented that there is
@@ -117,7 +112,7 @@
/*
* when a memory block is to be exported outside its ODP instance,
- * an block 'attribute file' is created in /tmp/odp-<pid>-shm-<name>.
+ * an block 'attribute file' is created in /dev/shm/odp-<pid>-shm-<name>.
* The information given in this file is according to the following:
*/
#define EXPORT_FILE_LINE1_FMT "ODP exported shm block info:"
@@ -128,6 +123,8 @@
#define EXPORT_FILE_LINE6_FMT "user_length: %" PRIu64
#define EXPORT_FILE_LINE7_FMT "user_flags: %" PRIu32
#define EXPORT_FILE_LINE8_FMT "align: %" PRIu32
+#define EXPORT_FILE_LINE9_FMT "offset: %" PRIu64
+
/*
* A fragment describes a piece of the shared virtual address space,
* and is allocated only when allocation is done with the _ODP_ISHM_SINGLE_VA
@@ -155,7 +152,7 @@ typedef struct ishm_fragment {
* will allocate both a block and a fragment.
* Blocks contain only global data common to all processes.
*/
-typedef enum {UNKNOWN, HUGE, NORMAL, EXTERNAL} huge_flag_t;
+typedef enum {UNKNOWN, HUGE, NORMAL, EXTERNAL, CACHED} huge_flag_t;
typedef struct ishm_block {
char name[ISHM_NAME_MAXLEN]; /* name for the ishm block (if any) */
char filename[ISHM_FILENAME_MAXLEN]; /* name of the .../odp-* file */
@@ -164,6 +161,7 @@ typedef struct ishm_block {
uint32_t flags; /* block creation flags. */
uint32_t external_fd:1; /* block FD was externally provided */
uint64_t user_len; /* length, as requested at reserve time. */
+ uint64_t offset; /* offset from beginning of the fd */
void *start; /* only valid if _ODP_ISHM_SINGLE_VA is set*/
uint64_t len; /* length. multiple of page size. 0 if free*/
ishm_fragment_t *fragment; /* used when _ODP_ISHM_SINGLE_VA is used */
@@ -182,8 +180,14 @@ typedef struct ishm_block {
typedef struct {
odp_spinlock_t lock;
uint64_t dev_seq; /* used when creating device names */
+ /* limit for reserving memory using huge pages */
+ uint64_t huge_page_limit;
uint32_t odpthread_cnt; /* number of running ODP threads */
ishm_block_t block[ISHM_MAX_NB_BLOCKS];
+ void *single_va_start; /* start of single VA memory */
+ int single_va_fd; /* single VA memory file descriptor */
+ odp_bool_t single_va_huge; /* single VA memory from huge pages */
+ char single_va_filename[ISHM_FILENAME_MAXLEN];
} ishm_table_t;
static ishm_table_t *ishm_tbl;
@@ -229,6 +233,16 @@ typedef struct {
} ishm_ftable_t;
static ishm_ftable_t *ishm_ftbl;
+struct huge_page_cache {
+ uint64_t len;
+ int max_fds; /* maximum amount requested of pre-allocated huge pages */
+ int total; /* amount of actually pre-allocated huge pages */
+ int idx; /* retrieve fd[idx] to get a free file descriptor */
+ int fd[]; /* list of file descriptors */
+};
+
+static struct huge_page_cache *hpc;
+
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -236,6 +250,177 @@ static ishm_ftable_t *ishm_ftbl;
/* prototypes: */
static void procsync(void);
+static int hp_create_file(uint64_t len, const char *filename)
+{
+ int fd;
+ int ret;
+ void *addr;
+
+ if (len <= 0) {
+ _ODP_ERR("Length is wrong\n");
+ return -1;
+ }
+
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ _ODP_ERR("Could not create cache file %s\n", filename);
+ return -1;
+ }
+
+ /* remove file from file system */
+ unlink(filename);
+
+ ret = fallocate(fd, 0, 0, len);
+ if (ret == -1) {
+ if (errno == ENOTSUP) {
+ _ODP_DBG("fallocate() not supported\n");
+ ret = ftruncate(fd, len);
+ }
+
+ if (ret == -1) {
+ _ODP_ERR("memory allocation failed: fd=%d, err=%s.\n",
+ fd, strerror(errno));
+ close(fd);
+ return -1;
+ }
+ }
+
+ /* commit huge page */
+ addr = _odp_ishmphy_map(fd, len, 0, 0);
+ if (addr == NULL) {
+ /* no more pages available */
+ close(fd);
+ return -1;
+ }
+ _odp_ishmphy_unmap(addr, len, 0);
+
+ _ODP_DBG("Created HP cache file %s, fd: %d\n", filename, fd);
+
+ return fd;
+}
+
+static void hp_init(void)
+{
+ char filename[ISHM_FILENAME_MAXLEN];
+ char dir[ISHM_FILENAME_MAXLEN];
+ int count;
+ void *addr;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "num_cached_hp",
+ &count)) {
+ return;
+ }
+
+ if (count <= 0)
+ return;
+
+ _ODP_DBG("Init HP cache with up to %d pages\n", count);
+
+ if (!odp_global_ro.hugepage_info.default_huge_page_dir) {
+ _ODP_ERR("No huge page dir\n");
+ return;
+ }
+
+ snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
+ odp_global_ro.hugepage_info.default_huge_page_dir,
+ odp_global_ro.uid);
+
+ if (mkdir(dir, 0744) != 0) {
+ if (errno != EEXIST) {
+ _ODP_ERR("Failed to create dir: %s\n", strerror(errno));
+ return;
+ }
+ }
+
+ snprintf(filename, ISHM_FILENAME_MAXLEN,
+ "%s/odp-%d-ishm_cached",
+ dir,
+ odp_global_ro.main_pid);
+
+ addr = mmap(NULL,
+ sizeof(struct huge_page_cache) + sizeof(int) * count,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ _ODP_ERR("Unable to mmap memory for huge page cache\n.");
+ return;
+ }
+
+ hpc = addr;
+
+ hpc->max_fds = count;
+ hpc->total = 0;
+ hpc->idx = -1;
+ hpc->len = odp_sys_huge_page_size();
+
+ for (int i = 0; i < count; ++i) {
+ int fd;
+
+ fd = hp_create_file(hpc->len, filename);
+ if (fd == -1) {
+ do {
+ hpc->fd[i++] = -1;
+ } while (i < count);
+ break;
+ }
+ hpc->total++;
+ hpc->fd[i] = fd;
+ }
+ hpc->idx = hpc->total - 1;
+
+ _ODP_DBG("HP cache has %d huge pages of size 0x%08" PRIx64 "\n",
+ hpc->total, hpc->len);
+}
+
+static void hp_term(void)
+{
+ if (NULL == hpc)
+ return;
+
+ for (int i = 0; i < hpc->total; i++) {
+ if (hpc->fd[i] != -1)
+ close(hpc->fd[i]);
+ }
+
+ hpc->total = 0;
+ hpc->idx = -1;
+ hpc->len = 0;
+}
+
+static int hp_get_cached(uint64_t len)
+{
+ int fd;
+
+ if (hpc == NULL)
+ return -1;
+
+ if (hpc->idx < 0 || len != hpc->len)
+ return -1;
+
+ fd = hpc->fd[hpc->idx];
+ hpc->fd[hpc->idx--] = -1;
+
+ return fd;
+}
+
+static int hp_put_cached(int fd)
+{
+ if (hpc == NULL) {
+ _ODP_ERR("Bad hpc state\n");
+ return -1;
+ }
+
+ if (odp_unlikely((hpc->idx + 1) >= hpc->total)) {
+ _ODP_ERR("Trying to put more FD than allowed: %d\n", fd);
+ return -1;
+ }
+
+ hpc->idx++;
+ hpc->fd[hpc->idx] = fd;
+
+ return 0;
+}
+
/*
* Take a piece of the preallocated virtual space to fit "size" bytes.
* (best fit). Size must be rounded up to an integer number of pages size.
@@ -249,9 +434,9 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
ishm_fragment_t *fragmnt;
*best_fragmnt = NULL;
ishm_fragment_t *rem_fragmnt;
- uintptr_t border;/* possible start of new fragment (next alignement) */
+ uintptr_t border;/* possible start of new fragment (next alignment) */
intptr_t left; /* room remaining after, if the segment is allocated */
- uintptr_t remainder = ODP_CONFIG_ISHM_VA_PREALLOC_SZ;
+ uintptr_t remainder = odp_global_ro.shm_max_memory;
/*
* search for the best bit, i.e. search for the unallocated fragment
@@ -277,7 +462,8 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
}
if (!(*best_fragmnt)) {
- ODP_ERR("unable to get virtual address for shmem block!\n.");
+ _ODP_ERR("Out of single VA memory. Try increasing "
+ "'shm.single_va_size_kb' in ODP config.\n");
return NULL;
}
@@ -293,7 +479,7 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
/* fragment space, i.e. take a new fragment descriptor... */
rem_fragmnt = ishm_ftbl->unused_fragmnts;
if (!rem_fragmnt) {
- ODP_ERR("unable to get shmem fragment descriptor!\n.");
+ _ODP_ERR("unable to get shmem fragment descriptor!\n.");
return NULL;
}
ishm_ftbl->unused_fragmnts = rem_fragmnt->next;
@@ -323,7 +509,7 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
/* otherwise, fragment space, i.e. take a new fragment descriptor... */
rem_fragmnt = ishm_ftbl->unused_fragmnts;
if (!rem_fragmnt) {
- ODP_ERR("unable to get shmem fragment descriptor!\n.");
+ _ODP_ERR("unable to get shmem fragment descriptor!\n.");
return (*best_fragmnt)->start;
}
ishm_ftbl->unused_fragmnts = rem_fragmnt->next;
@@ -375,7 +561,7 @@ static void free_fragment(ishm_fragment_t *fragmnt)
if (ishm_ftbl->used_fragmnts == prev_f)
ishm_ftbl->used_fragmnts = fragmnt;
else
- ODP_ERR("corrupted fragment list!.\n");
+ _ODP_ERR("corrupted fragment list!.\n");
}
fragmnt->prev = prev_f->prev;
@@ -399,98 +585,144 @@ static void free_fragment(ishm_fragment_t *fragmnt)
}
}
+static char *create_seq_string(char *output, size_t size)
+{
+ snprintf(output, size, "%08" PRIu64, ishm_tbl->dev_seq++);
+
+ return output;
+}
+
+static int create_export_file(ishm_block_t *new_block, const char *name,
+ uint64_t len, uint32_t flags, uint32_t align,
+ odp_bool_t single_va, uint64_t offset)
+{
+ FILE *export_file;
+
+ snprintf(new_block->exptname, ISHM_FILENAME_MAXLEN,
+ ISHM_EXPTNAME_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid,
+ name);
+ export_file = fopen(new_block->exptname, "w");
+ if (export_file == NULL) {
+ _ODP_ERR("open failed: err=%s.\n", strerror(errno));
+ new_block->exptname[0] = 0;
+ return -1;
+ }
+
+ fprintf(export_file, EXPORT_FILE_LINE1_FMT "\n");
+ fprintf(export_file, EXPORT_FILE_LINE2_FMT "\n", new_block->name);
+ if (single_va)
+ fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
+ ishm_tbl->single_va_filename);
+ else
+ fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
+ new_block->filename);
+
+ fprintf(export_file, EXPORT_FILE_LINE4_FMT "\n", len);
+ fprintf(export_file, EXPORT_FILE_LINE5_FMT "\n", flags);
+ fprintf(export_file, EXPORT_FILE_LINE6_FMT "\n",
+ new_block->user_len);
+ fprintf(export_file, EXPORT_FILE_LINE7_FMT "\n",
+ new_block->user_flags);
+ fprintf(export_file, EXPORT_FILE_LINE8_FMT "\n", align);
+ fprintf(export_file, EXPORT_FILE_LINE9_FMT "\n", offset);
+
+ fclose(export_file);
+
+ return 0;
+}
+
/*
* Create file with size len. returns -1 on error
- * Creates a file to /tmp/odp-<pid>-<sequence_or_name> (for normal pages)
- * or /mnt/huge/odp-<pid>-<sequence_or_name> (for huge pages)
+ * Creates a file to /dev/shm/odp-<pid>-<sequence_or_name> (for normal pages)
+ * or /mnt/huge/odp-<pid>-<sequence_or_name> (for huge pages).
* Return the new file descriptor, or -1 on error.
*/
static int create_file(int block_index, huge_flag_t huge, uint64_t len,
- uint32_t flags, uint32_t align)
+ uint32_t flags, uint32_t align, odp_bool_t single_va)
{
char *name;
int fd;
- ishm_block_t *new_block; /* entry in the main block table */
+ ishm_block_t *new_block = NULL; /* entry in the main block table */
char seq_string[ISHM_FILENAME_MAXLEN]; /* used to construct filename*/
- char filename[ISHM_FILENAME_MAXLEN];/* filename in /tmp/ or /mnt/huge */
+ char filename[ISHM_FILENAME_MAXLEN]; /* filename in /dev/shm or
+ * /mnt/huge */
int oflag = O_RDWR | O_CREAT | O_TRUNC; /* flags for open */
- FILE *export_file;
-
- new_block = &ishm_tbl->block[block_index];
- name = new_block->name;
+ char dir[ISHM_FILENAME_MAXLEN];
+ int ret;
- /* create the filename: */
- snprintf(seq_string, ISHM_FILENAME_MAXLEN, "%08" PRIu64,
- ishm_tbl->dev_seq++);
+ /* No ishm_block_t for the master single VA memory file */
+ if (single_va) {
+ name = (char *)(uintptr_t)"single_va";
+ } else {
+ new_block = &ishm_tbl->block[block_index];
+ name = new_block->name;
+ if (!name || !name[0])
+ name = create_seq_string(seq_string,
+ ISHM_FILENAME_MAXLEN);
+ }
/* huge dir must be known to create files there!: */
if ((huge == HUGE) &&
- (!odp_global_data.hugepage_info.default_huge_page_dir))
+ (!odp_global_ro.hugepage_info.default_huge_page_dir))
return -1;
if (huge == HUGE)
- snprintf(filename, ISHM_FILENAME_MAXLEN,
- ISHM_FILENAME_FORMAT,
- odp_global_data.hugepage_info.default_huge_page_dir,
- odp_global_data.main_pid,
- (name && name[0]) ? name : seq_string);
+ snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
+ odp_global_ro.hugepage_info.default_huge_page_dir,
+ odp_global_ro.uid);
else
- snprintf(filename, ISHM_FILENAME_MAXLEN,
- ISHM_FILENAME_FORMAT,
- ISHM_FILENAME_NORMAL_PAGE_DIR,
- odp_global_data.main_pid,
- (name && name[0]) ? name : seq_string);
+ snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
+
+ snprintf(filename, ISHM_FILENAME_MAXLEN, ISHM_FILENAME_FORMAT, dir,
+ odp_global_ro.main_pid, name);
+
+ mkdir(dir, 0744);
fd = open(filename, oflag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd < 0) {
- if (huge == HUGE)
- ODP_DBG("open failed for %s: %s.\n",
- filename, strerror(errno));
- else
- ODP_ERR("open failed for %s: %s.\n",
- filename, strerror(errno));
+ if (huge != HUGE)
+ _ODP_ERR("Normal page open failed: file=%s, "
+ "err=\"%s\"\n", filename, strerror(errno));
return -1;
}
- if (ftruncate(fd, len) == -1) {
- ODP_ERR("ftruncate failed: fd=%d, err=%s.\n",
- fd, strerror(errno));
- close(fd);
- unlink(filename);
- return -1;
+ ret = fallocate(fd, 0, 0, len);
+ if (ret == -1) {
+ if (errno == ENOTSUP) {
+ _ODP_DBG("fallocate() not supported\n");
+ ret = ftruncate(fd, len);
+ }
+
+ if (ret == -1) {
+ _ODP_ERR("%s memory allocation failed: fd=%d, file=%s, "
+ "err=\"%s\"\n", (huge == HUGE) ? "Huge page" :
+ "Normal page", fd, filename, strerror(errno));
+ close(fd);
+ unlink(filename);
+ return -1;
+ }
}
+ /* No export file is created since this is only for internal use.*/
+ if (single_va) {
+ snprintf(ishm_tbl->single_va_filename, ISHM_FILENAME_MAXLEN,
+ "%s", filename);
+ return fd;
+ }
/* if _ODP_ISHM_EXPORT is set, create a description file for
* external ref:
*/
if (flags & _ODP_ISHM_EXPORT) {
- strncpy(new_block->filename, filename,
- ISHM_FILENAME_MAXLEN - 1);
- snprintf(new_block->exptname, ISHM_FILENAME_MAXLEN,
- ISHM_EXPTNAME_FORMAT,
- odp_global_data.main_pid,
- (name && name[0]) ? name : seq_string);
- export_file = fopen(new_block->exptname, "w");
- if (export_file == NULL) {
- ODP_ERR("open failed: err=%s.\n",
- strerror(errno));
- new_block->exptname[0] = 0;
- } else {
- fprintf(export_file, EXPORT_FILE_LINE1_FMT "\n");
- fprintf(export_file, EXPORT_FILE_LINE2_FMT "\n", name);
- fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
- new_block->filename);
- fprintf(export_file, EXPORT_FILE_LINE4_FMT "\n", len);
- fprintf(export_file, EXPORT_FILE_LINE5_FMT "\n", flags);
- fprintf(export_file, EXPORT_FILE_LINE6_FMT "\n",
- new_block->user_len);
- fprintf(export_file, EXPORT_FILE_LINE7_FMT "\n",
- new_block->user_flags);
- fprintf(export_file, EXPORT_FILE_LINE8_FMT "\n", align);
-
- fclose(export_file);
- }
+ memcpy(new_block->filename, filename, ISHM_FILENAME_MAXLEN);
+
+ create_export_file(new_block, name, len, flags, align, false,
+ 0);
} else {
new_block->exptname[0] = 0;
/* remove the file from the filesystem, keeping its fd open */
@@ -503,8 +735,9 @@ static int create_file(int block_index, huge_flag_t huge, uint64_t len,
/* delete the files related to a given ishm block: */
static void delete_file(ishm_block_t *block)
{
- /* remove the .../odp-* file, unless fd was external: */
- if (block->filename[0] != 0)
+ /* remove the .../odp-* file, unless fd was external or single va */
+ if (block->filename[0] != 0 &&
+ strcmp(block->filename, ishm_tbl->single_va_filename))
unlink(block->filename);
/* also remove possible description file (if block was exported): */
if (block->exptname[0] != 0)
@@ -512,56 +745,37 @@ static void delete_file(ishm_block_t *block)
}
/*
- * performs the mapping, possibly allocating a fragment of the pre-reserved
- * VA space if the _ODP_ISHM_SINGLE_VA flag was given.
- * Sets fd, and returns the mapping address.
- * This function will also set the _ODP_ISHM_SINGLE_VA flag if the alignment
- * requires it
+ * Performs the mapping.
+ * Sets fd, and returns the mapping address. Not to be used with
+ * _ODP_ISHM_SINGLE_VA blocks.
* Mutex must be assured by the caller.
*/
static void *do_map(int block_index, uint64_t len, uint32_t align,
- uint32_t flags, huge_flag_t huge, int *fd)
+ uint64_t offset, uint32_t flags, huge_flag_t huge, int *fd)
{
ishm_block_t *new_block; /* entry in the main block table */
- void *addr = NULL;
void *mapped_addr;
- ishm_fragment_t *fragment = NULL;
+
+ _ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
new_block = &ishm_tbl->block[block_index];
/*
- * Creates a file to /tmp/odp-<pid>-<sequence> (for normal pages)
+ * Creates a file to /dev/shm/odp-<pid>-<sequence> (for normal pages)
* or /mnt/huge/odp-<pid>-<sequence> (for huge pages)
* unless a fd was already given
*/
if (*fd < 0) {
- *fd = create_file(block_index, huge, len, flags, align);
+ *fd = create_file(block_index, huge, len, flags, align, false);
if (*fd < 0)
return NULL;
} else {
new_block->filename[0] = 0;
}
- /* allocate an address range in the prebooked VA area if needed */
- if (flags & _ODP_ISHM_SINGLE_VA) {
- addr = alloc_fragment(len, block_index, align, &fragment);
- if (!addr) {
- ODP_ERR("alloc_fragment failed.\n");
- if (!new_block->external_fd) {
- close(*fd);
- *fd = -1;
- delete_file(new_block);
- }
- return NULL;
- }
- ishm_tbl->block[block_index].fragment = fragment;
- }
-
/* try to mmap: */
- mapped_addr = _odp_ishmphy_map(*fd, addr, len, flags);
+ mapped_addr = _odp_ishmphy_map(*fd, len, offset, flags);
if (mapped_addr == NULL) {
- if (flags & _ODP_ISHM_SINGLE_VA)
- free_fragment(fragment);
if (!new_block->external_fd) {
close(*fd);
*fd = -1;
@@ -574,36 +788,86 @@ static void *do_map(int block_index, uint64_t len, uint32_t align,
}
/*
+ * Allocate block from pre-reserved single VA memory
+ */
+static void *alloc_single_va(const char *name, int new_index, uint64_t size,
+ uint32_t align, uint32_t flags, int *fd,
+ uint64_t *len_out)
+{
+ uint64_t len;
+ uint64_t page_sz;
+ char *file_name = (char *)(uintptr_t)name;
+ void *addr;
+ ishm_block_t *new_block = &ishm_tbl->block[new_index];
+ ishm_fragment_t *fragment = NULL;
+ char seq_string[ISHM_FILENAME_MAXLEN];
+
+ if (!file_name || !file_name[0])
+ file_name = create_seq_string(seq_string, ISHM_FILENAME_MAXLEN);
+
+ /* Common fd for all single VA blocks */
+ *fd = ishm_tbl->single_va_fd;
+
+ if (ishm_tbl->single_va_huge) {
+ page_sz = odp_sys_huge_page_size();
+ new_block->huge = HUGE;
+ } else {
+ page_sz = odp_sys_page_size();
+ new_block->huge = NORMAL;
+ }
+ new_block->filename[0] = 0;
+
+ len = (size + (page_sz - 1)) & (-page_sz);
+
+ if (align < page_sz)
+ align = page_sz;
+
+ /* Allocate memory from the pre-reserved single VA space */
+ addr = alloc_fragment(len, new_index, align, &fragment);
+ if (!addr) {
+ _ODP_ERR("alloc_fragment failed.\n");
+ return NULL;
+ }
+ new_block->fragment = fragment;
+
+ /* Create export info file */
+ if (flags & _ODP_ISHM_EXPORT) {
+ uint64_t offset = (uintptr_t)addr -
+ (uintptr_t)ishm_tbl->single_va_start;
+ memcpy(new_block->filename, ishm_tbl->single_va_filename,
+ ISHM_FILENAME_MAXLEN);
+
+ create_export_file(new_block, file_name, len, flags, align,
+ true, offset);
+ } else {
+ new_block->exptname[0] = 0;
+ }
+
+ *len_out = len;
+ return addr;
+}
+
+/*
* Performs an extra mapping (for a process trying to see an existing block
- * i.e. performing a lookup).
+ * i.e. performing a lookup). Not to be used with _ODP_ISHM_SINGLE_VA blocks.
* Mutex must be assured by the caller.
*/
static void *do_remap(int block_index, int fd)
{
void *mapped_addr;
- ishm_fragment_t *fragment;
uint64_t len;
+ uint64_t offset;
uint32_t flags;
len = ishm_tbl->block[block_index].len;
+ offset = ishm_tbl->block[block_index].offset;
flags = ishm_tbl->block[block_index].flags;
- if (flags & _ODP_ISHM_SINGLE_VA) {
- fragment = ishm_tbl->block[block_index].fragment;
- if (!fragment) {
- ODP_ERR("invalid fragment failure.\n");
- return NULL;
- }
-
- /* try to mmap: */
- mapped_addr = _odp_ishmphy_map(fd, fragment->start, len, flags);
- if (mapped_addr == NULL)
- return NULL;
- return mapped_addr;
- }
+ _ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
/* try to mmap: */
- mapped_addr = _odp_ishmphy_map(fd, NULL, len, flags);
+ mapped_addr = _odp_ishmphy_map(fd, len, offset, flags);
+
if (mapped_addr == NULL)
return NULL;
@@ -611,8 +875,8 @@ static void *do_remap(int block_index, int fd)
}
/*
- * Performs unmapping, possibly freeing a prereserved VA space fragment,
- * if the _ODP_ISHM_SINGLE_VA flag was set at alloc time
+ * Performs unmapping, possibly freeing a pre-reserved single VA memory
+ * fragment, if the _ODP_ISHM_SINGLE_VA flag was set at alloc time.
* Mutex must be assured by the caller.
*/
static int do_unmap(void *start, uint64_t size, uint32_t flags,
@@ -656,56 +920,6 @@ static int find_block_by_name(const char *name)
}
/*
- * Search for a block by address (only works when flag _ODP_ISHM_SINGLE_VA
- * was set at reserve() time, or if the block is already known by this
- * process).
- * Search is performed in the process table and in the global ishm table.
- * The provided address does not have to be at start: any address
- * within the fragment is OK.
- * Returns the index to the found block (if any) or -1 if none.
- * Mutex must be assured by the caller.
- */
-static int find_block_by_address(void *addr)
-{
- int block_index;
- int i;
- ishm_fragment_t *fragmnt;
-
- /*
- * first check if there is already a process known block for this
- * address
- */
- for (i = 0; i < ishm_proctable->nb_entries; i++) {
- block_index = ishm_proctable->entry[i].block_index;
- if ((addr > ishm_proctable->entry[i].start) &&
- ((char *)addr < ((char *)ishm_proctable->entry[i].start +
- ishm_tbl->block[block_index].len)))
- return block_index;
- }
-
- /*
- * then check if there is a existing single VA block known by some other
- * process and containing the given address
- */
- for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
- if ((!ishm_tbl->block[i].len) ||
- (!(ishm_tbl->block[i].flags & _ODP_ISHM_SINGLE_VA)))
- continue;
- fragmnt = ishm_tbl->block[i].fragment;
- if (!fragmnt) {
- ODP_ERR("find_fragment: invalid NULL fragment\n");
- return -1;
- }
- if ((addr >= fragmnt->start) &&
- ((char *)addr < ((char *)fragmnt->start + fragmnt->len)))
- return i;
- }
-
- /* address does not belong to any accessible block: */
- return -1;
-}
-
-/*
* Search a given ishm block in the process local table. Return its index
* in the process table or -1 if not found (meaning that the ishm table
* block index was not referenced in the process local table, i.e. the
@@ -742,7 +956,9 @@ static void procsync(void)
block = &ishm_tbl->block[ishm_proctable->entry[i].block_index];
if (ishm_proctable->entry[i].seq != block->seq) {
/* obsolete entry: free memory and remove proc entry */
- close(ishm_proctable->entry[i].fd);
+ if (ishm_proctable->entry[i].fd !=
+ ishm_tbl->single_va_fd)
+ close(ishm_proctable->entry[i].fd);
_odp_ishmphy_unmap(ishm_proctable->entry[i].start,
ishm_proctable->entry[i].len,
ishm_proctable->entry[i].flags);
@@ -756,28 +972,93 @@ static void procsync(void)
}
/*
+ * Free a block as described in block_free(), but
+ * considering whether to close the file descriptor or not, and
+ * whether to deregister from the fdserver.
+ */
+static int block_free_internal(int block_index, int close_fd, int deregister)
+{
+ int proc_index;
+ ishm_block_t *block; /* entry in the main block table*/
+ int last;
+ int ret = 0;
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ _ODP_ERR("Request to free an invalid block\n");
+ return -1;
+ }
+
+ block = &ishm_tbl->block[block_index];
+
+ proc_index = procfind_block(block_index);
+ if (proc_index >= 0) {
+ int fd = ishm_proctable->entry[proc_index].fd;
+
+ /* remove the mapping and possible fragment */
+ do_unmap(ishm_proctable->entry[proc_index].start,
+ block->len,
+ ishm_proctable->entry[proc_index].flags,
+ block_index);
+
+ /* close the related fd */
+ if (close_fd && (fd != ishm_tbl->single_va_fd)) {
+ if (block->huge == CACHED)
+ hp_put_cached(fd);
+ else
+ close(fd);
+ }
+
+ /* remove entry from process local table: */
+ last = ishm_proctable->nb_entries - 1;
+ ishm_proctable->entry[proc_index] = ishm_proctable->entry[last];
+ ishm_proctable->nb_entries = last;
+ } else {
+ /* just possibly free the fragment as no mapping exist here: */
+ do_unmap(NULL, 0, block->flags, block_index);
+ }
+
+ /* remove all files related to this block: */
+ if (close_fd)
+ delete_file(block);
+
+ /* deregister the file descriptor from the file descriptor server. */
+ if (deregister)
+ ret = _odp_fdserver_deregister_fd(FD_SRV_CTX_ISHM, block_index);
+
+ /* mark the block as free in the main block table: */
+ block->len = 0;
+
+ /* mark the change so other processes see this entry as obsolete: */
+ block->seq++;
+
+ return ret;
+}
+
+/*
* Allocate and map internal shared memory, or other objects:
* If a name is given, check that this name is not already in use.
* If ok, allocate a new shared memory block and map the
* provided fd in it (if fd >=0 was given).
* If no fd is provided, a shared memory file desc named
- * /tmp/odp-<pid>-ishm-<name_or_sequence> is created and mapped.
+ * /dev/shm/odp-<pid>-ishm-<name_or_sequence> is created and mapped.
* (the name is different for huge page file as they must be on hugepagefs)
* The function returns the index of the newly created block in the
* main block table (>=0) or -1 on error.
*/
int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
- uint32_t align, uint32_t flags, uint32_t user_flags)
+ uint32_t align, uint64_t offset, uint32_t flags,
+ uint32_t user_flags)
{
int new_index; /* index in the main block table*/
ishm_block_t *new_block; /* entry in the main block table*/
uint64_t page_sz; /* normal page size. usually 4K*/
uint64_t page_hp_size; /* huge page size */
uint32_t hp_align;
- uint64_t len; /* mapped length */
+ uint64_t len = 0; /* mapped length */
void *addr = NULL; /* mapping address */
int new_proc_entry;
- struct stat statbuf;
static int huge_error_printed; /* to avoid millions of error...*/
odp_spinlock_lock(&ishm_tbl->lock);
@@ -787,7 +1068,8 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
/* Get system page sizes: page_hp_size is 0 if no huge page available*/
page_sz = odp_sys_page_size();
- page_hp_size = odp_sys_huge_page_size();
+ /* Use normal pages if ODP_SHM_NO_HP was used */
+ page_hp_size = (user_flags & ODP_SHM_NO_HP) ? 0 : odp_sys_huge_page_size();
/* grab a new entry: */
for (new_index = 0; new_index < ISHM_MAX_NB_BLOCKS; new_index++) {
@@ -800,7 +1082,7 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
/* check if we have reached the maximum number of allocation: */
if (new_index >= ISHM_MAX_NB_BLOCKS) {
odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("ISHM_MAX_NB_BLOCKS limit reached!\n");
+ _ODP_ERR("ISHM_MAX_NB_BLOCKS limit reached!\n");
return -1;
}
@@ -812,90 +1094,125 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
else
new_block->name[0] = 0;
+ new_block->offset = 0;
+
/* save user data: */
new_block->user_flags = user_flags;
new_block->user_len = size;
/* If a file descriptor is provided, get the real size and map: */
if (fd >= 0) {
- if (fstat(fd, &statbuf) < 0) {
- close(fd);
- odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("_ishm_reserve failed (fstat failed: %s).\n",
- strerror(errno));
- __odp_errno = errno;
- return -1;
- }
- len = statbuf.st_size;
- /* note that the huge page flag is meningless here as huge
+ new_block->external_fd = 1;
+ len = size;
+ /* note that the huge page flag is meaningless here as huge
* page is determined by the provided file descriptor: */
- addr = do_map(new_index, len, align, flags, EXTERNAL, &fd);
+ addr = do_map(new_index, len, align, offset, flags, EXTERNAL,
+ &fd);
if (addr == NULL) {
- close(fd);
odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("_ishm_reserve failed.\n");
+ _ODP_ERR("_ishm_reserve failed.\n");
return -1;
}
new_block->huge = EXTERNAL;
- new_block->external_fd = 1;
} else {
new_block->external_fd = 0;
+ new_block->huge = UNKNOWN;
}
/* Otherwise, Try first huge pages when possible and needed: */
- if ((fd < 0) && page_hp_size && (size > page_sz)) {
+ if ((fd < 0) && page_hp_size && ((user_flags & ODP_SHM_HP) ||
+ size > ishm_tbl->huge_page_limit)) {
/* at least, alignment in VA should match page size, but user
* can request more: If the user requirement exceeds the page
* size then we have to make sure the block will be mapped at
* the same address every where, otherwise alignment may be
- * be wrong for some process */
+ * wrong for some process */
hp_align = align;
- if (hp_align <= odp_sys_huge_page_size())
- hp_align = odp_sys_huge_page_size();
+ if (hp_align <= page_hp_size)
+ hp_align = page_hp_size;
else
flags |= _ODP_ISHM_SINGLE_VA;
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ goto use_single_va;
+
/* roundup to page size */
len = (size + (page_hp_size - 1)) & (-page_hp_size);
- addr = do_map(new_index, len, hp_align, flags, HUGE, &fd);
- if (addr == NULL) {
- if (!huge_error_printed) {
- ODP_ERR("No huge pages, fall back to normal "
- "pages. "
- "check: /proc/sys/vm/nr_hugepages.\n");
- huge_error_printed = 1;
+ /* try pre-allocated pages */
+ fd = hp_get_cached(len);
+ if (fd != -1) {
+ /* do as if user provided a fd */
+ new_block->external_fd = 1;
+ addr = do_map(new_index, len, hp_align, 0, flags,
+ CACHED, &fd);
+ if (addr == NULL) {
+ _ODP_ERR("Could not use cached hp %d\n", fd);
+ hp_put_cached(fd);
+ fd = -1;
+ } else {
+ new_block->huge = CACHED;
+ }
+ }
+ if (fd == -1) {
+ addr = do_map(new_index, len, hp_align, 0, flags, HUGE,
+ &fd);
+
+ if (addr == NULL) {
+ if (!huge_error_printed) {
+ _ODP_WARN("No huge pages, fall back to normal pages. "
+ "Check: /proc/sys/vm/nr_hugepages.\n");
+ huge_error_printed = 1;
+ }
+ } else {
+ new_block->huge = HUGE;
}
- } else {
- new_block->huge = HUGE;
}
}
/* Try normal pages if huge pages failed */
if (fd < 0) {
+ if (user_flags & ODP_SHM_HP) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ _ODP_ERR("Unable to allocate memory from huge pages\n");
+ return -1;
+ }
/* at least, alignment in VA should match page size, but user
* can request more: If the user requirement exceeds the page
* size then we have to make sure the block will be mapped at
* the same address every where, otherwise alignment may be
- * be wrong for some process */
+ * wrong for some process */
if (align <= odp_sys_page_size())
align = odp_sys_page_size();
else
flags |= _ODP_ISHM_SINGLE_VA;
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ goto use_single_va;
+
/* roundup to page size */
len = (size + (page_sz - 1)) & (-page_sz);
- addr = do_map(new_index, len, align, flags, NORMAL, &fd);
+ addr = do_map(new_index, len, align, 0, flags, NORMAL, &fd);
new_block->huge = NORMAL;
}
+use_single_va:
+ /* Reserve memory from single VA space */
+ if (fd < 0 && (flags & _ODP_ISHM_SINGLE_VA))
+ addr = alloc_single_va(name, new_index, size, align, flags, &fd,
+ &len);
+
/* if neither huge pages or normal pages works, we cannot proceed: */
if ((fd < 0) || (addr == NULL) || (len == 0)) {
- if ((!new_block->external_fd) && (fd >= 0))
+ if (new_block->external_fd) {
+ if (new_block->huge == CACHED)
+ hp_put_cached(fd);
+ } else if (fd >= 0 && (fd != ishm_tbl->single_va_fd)) {
close(fd);
+ }
delete_file(new_block);
odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("_ishm_reserve failed.\n");
+ _ODP_ERR("_ishm_reserve failed.\n");
return -1;
}
@@ -918,13 +1235,75 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
ishm_proctable->entry[new_proc_entry].fd = fd;
/* register the file descriptor to the file descriptor server. */
- _odp_fdserver_register_fd(FD_SRV_CTX_ISHM, new_index, fd);
+ if (_odp_fdserver_register_fd(FD_SRV_CTX_ISHM, new_index, fd) == -1) {
+ block_free_internal(new_index, !new_block->external_fd, 0);
+ new_index = -1;
+ }
odp_spinlock_unlock(&ishm_tbl->lock);
return new_index;
}
/*
+ * Pre-reserve all single VA memory. Called only in global init.
+ */
+static void *reserve_single_va(uint64_t size, int *fd_out)
+{
+ uint64_t page_sz; /* normal page size. usually 4K*/
+ uint64_t page_hp_size; /* huge page size */
+ uint64_t len; /* mapped length */
+ int fd = -1;
+ void *addr = NULL;
+
+ /* Get system page sizes: page_hp_size is 0 if no huge page available*/
+ page_sz = odp_sys_page_size();
+ page_hp_size = odp_sys_huge_page_size();
+
+ /* Try first huge pages when possible and needed: */
+ if (page_hp_size && (size > page_sz)) {
+ /* roundup to page size */
+ len = (size + (page_hp_size - 1)) & (-page_hp_size);
+ fd = create_file(-1, HUGE, len, 0, 0, true);
+ if (fd >= 0) {
+ addr = _odp_ishmphy_reserve_single_va(len, fd);
+ if (!addr) {
+ close(fd);
+ unlink(ishm_tbl->single_va_filename);
+ fd = -1;
+ }
+ }
+ if (fd < 0)
+ _ODP_WARN("No huge pages, fall back to normal pages. "
+ "Check: /proc/sys/vm/nr_hugepages.\n");
+ ishm_tbl->single_va_huge = true;
+ }
+
+ /* Fall back to normal pages if necessary */
+ if (fd < 0) {
+ /* roundup to page size */
+ len = (size + (page_sz - 1)) & (-page_sz);
+
+ fd = create_file(-1, NORMAL, len, 0, 0, true);
+ if (fd >= 0)
+ addr = _odp_ishmphy_reserve_single_va(len, fd);
+ ishm_tbl->single_va_huge = false;
+ }
+
+ /* If neither huge pages or normal pages works, we cannot proceed: */
+ if ((fd < 0) || (len == 0) || !addr) {
+ if (fd >= 0) {
+ close(fd);
+ unlink(ishm_tbl->single_va_filename);
+ }
+ _ODP_ERR("Reserving single VA memory failed.\n");
+ return NULL;
+ }
+
+ *fd_out = fd;
+ return addr;
+}
+
+/*
* Try to map an memory block mapped by another ODP instance into the
* current ODP instance.
* returns 0 on success.
@@ -939,6 +1318,7 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
uint64_t len;
uint32_t flags;
uint64_t user_len;
+ uint64_t offset;
uint32_t user_flags;
uint32_t align;
int fd;
@@ -947,13 +1327,15 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
/* try to read the block description file: */
snprintf(export_filename, ISHM_FILENAME_MAXLEN,
ISHM_EXPTNAME_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
external_odp_pid,
remote_name);
export_file = fopen(export_filename, "r");
if (export_file == NULL) {
- ODP_ERR("Error opening %s.\n", export_filename);
+ _ODP_ERR("Error opening %s.\n", export_filename);
return -1;
}
@@ -981,26 +1363,34 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
if (fscanf(export_file, EXPORT_FILE_LINE8_FMT " ", &align) != 1)
goto error_exp_file;
+ if (fscanf(export_file, EXPORT_FILE_LINE9_FMT " ", &offset) != 1)
+ goto error_exp_file;
+
fclose(export_file);
/* now open the filename given in the description file: */
fd = open(filename, O_RDWR, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
if (fd == -1) {
- ODP_ERR("open failed for %s: %s.\n",
- filename, strerror(errno));
+ _ODP_ERR("open failed for %s: %s.\n", filename, strerror(errno));
return -1;
}
- /* clear the _ODP_ISHM_EXPORT flag so we don't export that again*/
+ /* Clear the _ODP_ISHM_EXPORT flag so we don't export again. Single
+ * VA doesn't hold up after export. */
flags &= ~(uint32_t)_ODP_ISHM_EXPORT;
+ flags &= ~(uint32_t)_ODP_ISHM_SINGLE_VA;
/* reserve the memory, providing the opened file descriptor: */
- block_index = _odp_ishm_reserve(local_name, 0, fd, align, flags, 0);
+ block_index = _odp_ishm_reserve(local_name, len, fd, align, offset,
+ flags, 0);
if (block_index < 0) {
close(fd);
return block_index;
}
+ /* Offset is required to remap the block to other processes */
+ ishm_tbl->block[block_index].offset = offset;
+
/* set inherited info: */
ishm_tbl->block[block_index].user_flags = user_flags;
ishm_tbl->block[block_index].user_len = user_len;
@@ -1009,7 +1399,7 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
error_exp_file:
fclose(export_file);
- ODP_ERR("Error reading %s.\n", export_filename);
+ _ODP_ERR("Error reading %s.\n", export_filename);
return -1;
}
@@ -1022,53 +1412,7 @@ error_exp_file:
*/
static int block_free(int block_index)
{
- int proc_index;
- ishm_block_t *block; /* entry in the main block table*/
- int last;
-
- if ((block_index < 0) ||
- (block_index >= ISHM_MAX_NB_BLOCKS) ||
- (ishm_tbl->block[block_index].len == 0)) {
- ODP_ERR("Request to free an invalid block\n");
- return -1;
- }
-
- block = &ishm_tbl->block[block_index];
-
- proc_index = procfind_block(block_index);
- if (proc_index >= 0) {
- /* close the related fd */
- close(ishm_proctable->entry[proc_index].fd);
-
- /* remove the mapping and possible fragment */
- do_unmap(ishm_proctable->entry[proc_index].start,
- block->len,
- ishm_proctable->entry[proc_index].flags,
- block_index);
-
- /* remove entry from process local table: */
- last = ishm_proctable->nb_entries - 1;
- ishm_proctable->entry[proc_index] =
- ishm_proctable->entry[last];
- ishm_proctable->nb_entries = last;
- } else {
- /* just possibly free the fragment as no mapping exist here: */
- do_unmap(NULL, 0, block->flags, block_index);
- }
-
- /* remove all files related to this block: */
- delete_file(block);
-
- /* deregister the file descriptor from the file descriptor server. */
- _odp_fdserver_deregister_fd(FD_SRV_CTX_ISHM, block_index);
-
- /* mark the block as free in the main block table: */
- block->len = 0;
-
- /* mark the change so other processes see this entry as obsolete: */
- block->seq++;
-
- return 0;
+ return block_free_internal(block_index, 1, 1);
}
/*
@@ -1088,59 +1432,6 @@ int _odp_ishm_free_by_index(int block_index)
}
/*
- * free and unmap internal shared memory, identified by its block name:
- * return -1 on error. 0 if OK.
- */
-int _odp_ishm_free_by_name(const char *name)
-{
- int block_index;
- int ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table */
- block_index = find_block_by_name(name);
- if (block_index < 0) {
- ODP_ERR("Request to free an non existing block..."
- " (double free?)\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- ret = block_free(block_index);
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
- * Free and unmap internal shared memory identified by address:
- * return -1 on error. 0 if OK.
- */
-int _odp_ishm_free_by_address(void *addr)
-{
- int block_index;
- int ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table */
- block_index = find_block_by_address(addr);
- if (block_index < 0) {
- ODP_ERR("Request to free an non existing block..."
- " (double free?)\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- ret = block_free(block_index);
-
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
* Lookup for an ishm shared memory, identified by its block index
* in the main ishm block table.
* Map this ishm area in the process VA (if not already present).
@@ -1158,7 +1449,7 @@ static void *block_lookup(int block_index)
if ((block_index < 0) ||
(block_index >= ISHM_MAX_NB_BLOCKS) ||
(ishm_tbl->block[block_index].len == 0)) {
- ODP_ERR("Request to lookup an invalid block\n");
+ _ODP_ERR("Request to lookup an invalid block\n");
return NULL;
}
@@ -1170,16 +1461,21 @@ static void *block_lookup(int block_index)
/* this ishm is not known by this process, yet: we create the mapping.*/
fd = _odp_fdserver_lookup_fd(FD_SRV_CTX_ISHM, block_index);
if (fd < 0) {
- ODP_ERR("Could not find ishm file descriptor (BUG!)\n");
+ _ODP_ERR("Could not find ishm file descriptor (BUG!)\n");
return NULL;
}
/* perform the mapping */
block = &ishm_tbl->block[block_index];
- mapped_addr = do_remap(block_index, fd);
+ /* No need to remap single VA */
+ if (block->flags & _ODP_ISHM_SINGLE_VA)
+ mapped_addr = block->start;
+ else
+ mapped_addr = do_remap(block_index, fd);
+
if (mapped_addr == NULL) {
- ODP_ERR(" lookup: Could not map existing shared memory!\n");
+ _ODP_ERR(" lookup: Could not map existing shared memory!\n");
return NULL;
}
@@ -1197,28 +1493,9 @@ static void *block_lookup(int block_index)
}
/*
- * Lookup for an ishm shared memory, identified by its block_index.
- * Maps this ishmem area in the process VA (if not already present).
- * Returns the block user address, or NULL if the index
- * does not match any known ishm blocks.
- */
-void *_odp_ishm_lookup_by_index(int block_index)
-{
- void *ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- ret = block_lookup(block_index);
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
* Lookup for an ishm shared memory, identified by its block name.
- * Map this ishm area in the process VA (if not already present).
- * Return the block index, or -1 if the index
- * does not match any known ishm blocks.
+ * Return the block index, or -1 if the index does not match any known ishm
+ * blocks.
*/
int _odp_ishm_lookup_by_name(const char *name)
{
@@ -1229,68 +1506,25 @@ int _odp_ishm_lookup_by_name(const char *name)
/* search the block in main ishm table: return -1 if not found: */
block_index = find_block_by_name(name);
- if ((block_index < 0) || (!block_lookup(block_index))) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
odp_spinlock_unlock(&ishm_tbl->lock);
return block_index;
}
/*
- * Lookup for an ishm shared memory block, identified by its VA address.
- * This works only if the block has already been looked-up (mapped) by the
- * current process or it it was created with the _ODP_ISHM_SINGLE_VA flag.
- * Map this ishm area in the process VA (if not already present).
- * Return the block index, or -1 if the address
- * does not match any known ishm blocks.
- */
-int _odp_ishm_lookup_by_address(void *addr)
-{
- int block_index;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table: return -1 if not found: */
- block_index = find_block_by_address(addr);
- if ((block_index < 0) || (!block_lookup(block_index))) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- odp_spinlock_unlock(&ishm_tbl->lock);
- return block_index;
-}
-
-/*
- * Returns the VA address of a given block (which has to be known in the current
- * process). Returns NULL if the block is unknown.
+ * Returns the VA address of a given block. Maps this ishm area in the process
+ * VA (if not already present).
+ * Returns NULL if the block is unknown.
*/
void *_odp_ishm_address(int block_index)
{
- int proc_index;
void *addr;
odp_spinlock_lock(&ishm_tbl->lock);
procsync();
- if ((block_index < 0) ||
- (block_index >= ISHM_MAX_NB_BLOCKS) ||
- (ishm_tbl->block[block_index].len == 0)) {
- ODP_ERR("Request for address on an invalid block\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return NULL;
- }
+ addr = block_lookup(block_index);
- proc_index = procfind_block(block_index);
- if (proc_index < 0) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return NULL;
- }
-
- addr = ishm_proctable->entry[proc_index].start;
odp_spinlock_unlock(&ishm_tbl->lock);
return addr;
}
@@ -1306,7 +1540,7 @@ int _odp_ishm_info(int block_index, _odp_ishm_info_t *info)
(block_index >= ISHM_MAX_NB_BLOCKS) ||
(ishm_tbl->block[block_index].len == 0)) {
odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("Request for info on an invalid block\n");
+ _ODP_ERR("Request for info on an invalid block\n");
return -1;
}
@@ -1374,61 +1608,161 @@ static int do_odp_ishm_init_local(void)
return 0;
}
-int _odp_ishm_init_global(void)
+/* remove all files staring with "odp-<pid>" from a directory "dir" */
+int _odp_ishm_cleanup_files(const char *dirpath)
+{
+ struct dirent *e;
+ DIR *dir;
+ char userdir[PATH_MAX];
+ char prefix[PATH_MAX];
+ char *fullpath;
+ int d_len;
+ int p_len;
+ int f_len;
+
+ snprintf(userdir, PATH_MAX, "%s/%s", dirpath, odp_global_ro.uid);
+ d_len = strlen(userdir);
+
+ dir = opendir(userdir);
+ if (!dir) {
+ /* ok if the dir does not exist. no much to delete then! */
+ _ODP_DBG("opendir failed for %s: %s\n", userdir, strerror(errno));
+ return 0;
+ }
+ snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_global_ro.main_pid);
+ p_len = strlen(prefix);
+ while ((e = readdir(dir)) != NULL) {
+ if (strncmp(e->d_name, prefix, p_len) == 0) {
+ f_len = strlen(e->d_name);
+ fullpath = malloc(d_len + f_len + 2);
+ if (fullpath == NULL) {
+ closedir(dir);
+ return -1;
+ }
+ snprintf(fullpath, PATH_MAX, "%s/%s",
+ userdir, e->d_name);
+ _ODP_DBG("deleting obsolete file: %s\n", fullpath);
+ if (unlink(fullpath))
+ _ODP_ERR("unlink failed for %s: %s\n", fullpath, strerror(errno));
+ free(fullpath);
+ }
+ }
+ closedir(dir);
+
+ return 0;
+}
+
+int _odp_ishm_init_global(const odp_init_t *init)
{
void *addr;
- void *spce_addr;
+ void *spce_addr = NULL;
int i;
+ int val_kb;
+ uid_t uid;
+ char *hp_dir = odp_global_ro.hugepage_info.default_huge_page_dir;
+ uint64_t max_memory;
+ uint64_t internal;
+ uint64_t huge_page_limit;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "single_va_size_kb",
+ &val_kb)) {
+ _ODP_ERR("Unable to read single VA size from config\n");
+ return -1;
+ }
- if ((getpid() != odp_global_data.main_pid) ||
- (syscall(SYS_gettid) != getpid()))
- ODP_ERR("odp_init_global() must be performed by the main "
+ _ODP_DBG("Shm single VA size: %dkB\n", val_kb);
+
+ max_memory = (uint64_t)val_kb * 1024;
+ internal = max_memory / 8;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "huge_page_limit_kb",
+ &val_kb)) {
+ _ODP_ERR("Unable to read huge page usage limit from config\n");
+ return -1;
+ }
+ huge_page_limit = (uint64_t)val_kb * 1024;
+
+ _ODP_DBG("Shm huge page usage limit: %dkB\n", val_kb);
+
+ /* user requested memory size + some extra for internal use */
+ if (init && init->shm.max_memory)
+ max_memory = init->shm.max_memory + internal;
+
+ odp_global_ro.shm_max_memory = max_memory;
+ odp_global_ro.shm_max_size = max_memory - internal;
+ odp_global_ro.main_pid = getpid();
+ odp_global_ro.shm_dir = getenv("ODP_SHM_DIR");
+ if (odp_global_ro.shm_dir) {
+ odp_global_ro.shm_dir_from_env = 1;
+ } else {
+ odp_global_ro.shm_dir =
+ calloc(1, sizeof(ISHM_FILENAME_NORMAL_PAGE_DIR));
+ sprintf(odp_global_ro.shm_dir, "%s",
+ ISHM_FILENAME_NORMAL_PAGE_DIR);
+ odp_global_ro.shm_dir_from_env = 0;
+ }
+
+ _ODP_DBG("ishm: using dir %s\n", odp_global_ro.shm_dir);
+
+ uid = getuid();
+ snprintf(odp_global_ro.uid, UID_MAXLEN, "%d",
+ uid);
+
+ if ((syscall(SYS_gettid)) != odp_global_ro.main_pid) {
+ _ODP_ERR("ishm init must be performed by the main "
"ODP process!\n.");
+ return -1;
+ }
- if (!odp_global_data.hugepage_info.default_huge_page_dir)
- ODP_DBG("NOTE: No support for huge pages\n");
- else
- ODP_DBG("Huge pages mount point is: %s\n",
- odp_global_data.hugepage_info.default_huge_page_dir);
+ if (!hp_dir) {
+ _ODP_DBG("NOTE: No support for huge pages\n");
+ } else {
+ _ODP_DBG("Huge pages mount point is: %s\n", hp_dir);
+ _odp_ishm_cleanup_files(hp_dir);
+ }
+
+ _odp_ishm_cleanup_files(odp_global_ro.shm_dir);
/* allocate space for the internal shared mem block table: */
addr = mmap(NULL, sizeof(ishm_table_t),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ODP_ERR("unable to mmap the main block table\n.");
+ _ODP_ERR("unable to mmap the main block table\n.");
goto init_glob_err1;
}
ishm_tbl = addr;
memset(ishm_tbl, 0, sizeof(ishm_table_t));
ishm_tbl->dev_seq = 0;
ishm_tbl->odpthread_cnt = 0;
+ ishm_tbl->huge_page_limit = huge_page_limit;
odp_spinlock_init(&ishm_tbl->lock);
/* allocate space for the internal shared mem fragment table: */
addr = mmap(NULL, sizeof(ishm_ftable_t),
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
if (addr == MAP_FAILED) {
- ODP_ERR("unable to mmap the main fragment table\n.");
+ _ODP_ERR("unable to mmap the main fragment table\n.");
goto init_glob_err2;
}
ishm_ftbl = addr;
memset(ishm_ftbl, 0, sizeof(ishm_ftable_t));
- /*
- *reserve the address space for _ODP_ISHM_SINGLE_VA reserved blocks,
- * only address space!
- */
- spce_addr = _odp_ishmphy_book_va(ODP_CONFIG_ISHM_VA_PREALLOC_SZ,
- odp_sys_huge_page_size());
- if (!spce_addr) {
- ODP_ERR("unable to reserve virtual space\n.");
- goto init_glob_err3;
+ /* Reserve memory for _ODP_ISHM_SINGLE_VA reserved blocks */
+ ishm_tbl->single_va_fd = -1;
+ if (max_memory) {
+ spce_addr = reserve_single_va(max_memory,
+ &ishm_tbl->single_va_fd);
+ if (!spce_addr) {
+ _ODP_ERR("unable to reserve single VA memory\n.");
+ goto init_glob_err3;
+ }
+ ishm_tbl->single_va_start = spce_addr;
}
/* use the first fragment descriptor to describe to whole VA space: */
ishm_ftbl->fragment[0].block_index = -1;
ishm_ftbl->fragment[0].start = spce_addr;
- ishm_ftbl->fragment[0].len = ODP_CONFIG_ISHM_VA_PREALLOC_SZ;
+ ishm_ftbl->fragment[0].len = max_memory;
ishm_ftbl->fragment[0].prev = NULL;
ishm_ftbl->fragment[0].next = NULL;
ishm_ftbl->used_fragmnts = &ishm_ftbl->fragment[0];
@@ -1443,19 +1777,33 @@ int _odp_ishm_init_global(void)
ishm_ftbl->unused_fragmnts = &ishm_ftbl->fragment[1];
/*
- * We run _odp_ishm_init_local() directely here to give the
+ * We run _odp_ishm_init_local() directly here to give the
* possibility to run shm_reserve() before the odp_init_local()
* is performed for the main thread... Many init_global() functions
* indeed assume the availability of odp_shm_reserve()...:
*/
- return do_odp_ishm_init_local();
+ if (do_odp_ishm_init_local()) {
+ _ODP_ERR("unable to init the main thread\n.");
+ goto init_glob_err4;
+ }
+ /* get ready to create pools: */
+ _odp_ishm_pool_init();
+
+ /* init cache files */
+ hp_init();
+
+ return 0;
+
+init_glob_err4:
+ if (_odp_ishmphy_free_single_va())
+ _ODP_ERR("unable to free single VA memory\n.");
init_glob_err3:
if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0)
- ODP_ERR("unable to munmap main fragment table\n.");
+ _ODP_ERR("unable to munmap main fragment table\n.");
init_glob_err2:
if (munmap(ishm_tbl, sizeof(ishm_table_t)) < 0)
- ODP_ERR("unable to munmap main block table\n.");
+ _ODP_ERR("unable to munmap main block table\n.");
init_glob_err1:
return -1;
}
@@ -1466,7 +1814,7 @@ int _odp_ishm_init_local(void)
* Do not re-run this for the main ODP process, as it has already
* been done in advance at _odp_ishm_init_global() time:
*/
- if ((getpid() == odp_global_data.main_pid) &&
+ if ((getpid() == odp_global_ro.main_pid) &&
(syscall(SYS_gettid) == getpid()))
return 0;
@@ -1511,7 +1859,7 @@ static int do_odp_ishm_term_local(void)
block = &ishm_tbl->block[block_index];
if ((--block->refcnt) <= 0) {
block->refcnt = 0;
- ODP_DBG("Warning: block %d: name:%s "
+ _ODP_DBG("Warning: block %d: name:%s "
"no longer referenced\n",
i,
ishm_tbl->block[i].name[0] ?
@@ -1547,18 +1895,19 @@ int _odp_ishm_term_global(void)
{
int ret = 0;
int index;
+ int fd = ishm_tbl->single_va_fd;
ishm_block_t *block;
- if ((getpid() != odp_global_data.main_pid) ||
+ if ((getpid() != odp_global_ro.main_pid) ||
(syscall(SYS_gettid) != getpid()))
- ODP_ERR("odp_term_global() must be performed by the main "
+ _ODP_ERR("odp_term_global() must be performed by the main "
"ODP process!\n.");
/* cleanup possibly non freed memory (and complain a bit): */
for (index = 0; index < ISHM_MAX_NB_BLOCKS; index++) {
block = &ishm_tbl->block[index];
if (block->len != 0) {
- ODP_ERR("block '%s' (file %s) was never freed "
+ _ODP_ERR("block '%s' (file %s) was never freed "
"(cleaning up...).\n",
block->name, block->filename);
delete_file(block);
@@ -1568,20 +1917,32 @@ int _odp_ishm_term_global(void)
/* perform the last thread terminate which was postponed: */
ret = do_odp_ishm_term_local();
+ /* remove the file from the filesystem, keeping its fd open */
+ unlink(ishm_tbl->single_va_filename);
+
/* free the fragment table */
if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0) {
ret |= -1;
- ODP_ERR("unable to munmap fragment table\n.");
+ _ODP_ERR("unable to munmap fragment table\n.");
}
/* free the block table */
if (munmap(ishm_tbl, sizeof(ishm_table_t)) < 0) {
ret |= -1;
- ODP_ERR("unable to munmap main table\n.");
+ _ODP_ERR("unable to munmap main table\n.");
}
- /* free the reserved VA space */
- if (_odp_ishmphy_unbook_va())
+ /* free the reserved single VA memory */
+ if (_odp_ishmphy_free_single_va())
ret |= -1;
+ if ((fd >= 0) && close(fd)) {
+ ret |= -1;
+ _ODP_ERR("unable to close single VA\n.");
+ }
+
+ if (!odp_global_ro.shm_dir_from_env)
+ free(odp_global_ro.shm_dir);
+
+ hp_term();
return ret;
}
@@ -1607,14 +1968,37 @@ int _odp_ishm_status(const char *title)
int nb_allocated_frgments = 0; /* nb frag describing an allocated VA */
int nb_blocks = 0;
int single_va_blocks = 0;
+ int max_name_len = 0;
+ uint64_t lost_total = 0; /* statistics for total unused memory */
+ uint64_t len_total = 0; /* statistics for total allocated memory */
odp_spinlock_lock(&ishm_tbl->lock);
procsync();
- ODP_DBG("ishm blocks allocated at: %s\n", title);
+ /* find longest block name */
+ for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ int str_len;
+
+ if (ishm_tbl->block[i].len <= 0)
+ continue;
+
+ str_len = strlen(ishm_tbl->block[i].name);
+
+ if (max_name_len < str_len)
+ max_name_len = str_len;
+ }
+
+ _ODP_PRINT("%s\n", title);
+ _ODP_PRINT(" %-*s flag %-29s %-8s %-8s %-3s %-3s %-3s file\n",
+ max_name_len, "name", "range", "user_len", "unused",
+ "seq", "ref", "fd");
/* display block table: 1 line per entry +1 extra line if mapped here */
for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ void *start_addr = NULL;
+ void *end_addr = NULL;
+ int entry_fd = -1;
+
if (ishm_tbl->block[i].len <= 0)
continue; /* unused block */
@@ -1637,56 +2021,71 @@ int _odp_ishm_status(const char *title)
case EXTERNAL:
huge = 'E';
break;
+ case CACHED:
+ huge = 'C';
+ break;
default:
huge = '?';
}
proc_index = procfind_block(i);
- ODP_DBG("%-3d: name:%-.24s file:%-.24s"
- " flags:%s,%c len:0x%-08lx"
- " user_len:%-8ld seq:%-3ld refcnt:%-4d\n",
- i,
- ishm_tbl->block[i].name,
- ishm_tbl->block[i].filename,
- flags, huge,
- ishm_tbl->block[i].len,
- ishm_tbl->block[i].user_len,
- ishm_tbl->block[i].seq,
- ishm_tbl->block[i].refcnt);
-
- if (proc_index < 0)
- continue;
+ lost_total += ishm_tbl->block[i].len -
+ ishm_tbl->block[i].user_len;
+ len_total += ishm_tbl->block[i].len;
+
+ if (proc_index >= 0) {
+ start_addr = ishm_proctable->entry[proc_index].start;
+ end_addr = (void *)(uintptr_t)((uintptr_t)start_addr +
+ ishm_tbl->block[i].len);
+ entry_fd = ishm_proctable->entry[proc_index].fd;
+ }
- ODP_DBG(" start:%-08lx fd:%-3d\n",
- ishm_proctable->entry[proc_index].start,
- ishm_proctable->entry[proc_index].fd);
+ _ODP_PRINT("%2i %-*s %s%c %p-%p %-8" PRIu64 " "
+ "%-8" PRIu64 " %-3" PRIu64 " %-3" PRIu64 " "
+ "%-3d %s\n",
+ i, max_name_len, ishm_tbl->block[i].name,
+ flags, huge, start_addr, end_addr,
+ ishm_tbl->block[i].user_len,
+ ishm_tbl->block[i].len - ishm_tbl->block[i].user_len,
+ ishm_tbl->block[i].seq,
+ ishm_tbl->block[i].refcnt,
+ entry_fd,
+ ishm_tbl->block[i].filename[0] ?
+ ishm_tbl->block[i].filename :
+ "(none)");
}
+ _ODP_PRINT("TOTAL: %58s%-8" PRIu64 " %2s%-8" PRIu64 "\n",
+ "", len_total,
+ "", lost_total);
+ _ODP_PRINT("%65s(%" PRIu64 "MB) %4s(%" PRIu64 "MB)\n",
+ "", len_total / 1024 / 1024,
+ "", lost_total / 1024 / 1024);
/* display the virtual space allocations... : */
- ODP_DBG("ishm virtual space:\n");
+ _ODP_PRINT("\nishm virtual space:\n");
for (fragmnt = ishm_ftbl->used_fragmnts;
fragmnt; fragmnt = fragmnt->next) {
if (fragmnt->block_index >= 0) {
nb_allocated_frgments++;
- ODP_DBG(" %08p - %08p: ALLOCATED by block:%d\n",
- (uintptr_t)fragmnt->start,
- (uintptr_t)fragmnt->start + fragmnt->len - 1,
- fragmnt->block_index);
+ _ODP_PRINT(" %8p - %8p: ALLOCATED by block:%d\n",
+ fragmnt->start,
+ (void *)((uintptr_t)fragmnt->start + fragmnt->len - 1),
+ fragmnt->block_index);
consecutive_unallocated = 0;
} else {
- ODP_DBG(" %08p - %08p: NOT ALLOCATED\n",
- (uintptr_t)fragmnt->start,
- (uintptr_t)fragmnt->start + fragmnt->len - 1);
+ _ODP_PRINT(" %8p - %8p: NOT ALLOCATED\n",
+ fragmnt->start,
+ (void *)((uintptr_t)fragmnt->start + fragmnt->len - 1));
if (consecutive_unallocated++)
- ODP_ERR("defragmentation error\n");
+ _ODP_ERR("defragmentation error\n");
}
/* some other sanity checks: */
if (fragmnt->prev != previous)
- ODP_ERR("chaining error\n");
+ _ODP_ERR("chaining error\n");
if (fragmnt != ishm_ftbl->used_fragmnts) {
if ((uintptr_t)fragmnt->start != last_address + 1)
- ODP_ERR("lost space error\n");
+ _ODP_ERR("lost space error\n");
}
last_address = (uintptr_t)fragmnt->start + fragmnt->len - 1;
@@ -1699,25 +2098,145 @@ int _odp_ishm_status(const char *title)
* the number of used fragments:
*/
if (single_va_blocks != nb_allocated_frgments)
- ODP_ERR("single_va_blocks != nb_allocated_fragments!\n");
+ _ODP_ERR("single_va_blocks != nb_allocated_fragments!\n");
/* compute the number of unused fragments*/
for (fragmnt = ishm_ftbl->unused_fragmnts;
fragmnt; fragmnt = fragmnt->next)
nb_unused_frgments++;
- ODP_DBG("ishm: %d fragment used. %d fragments unused. (total=%d)\n",
- nb_used_frgments, nb_unused_frgments,
- nb_used_frgments + nb_unused_frgments);
+ _ODP_PRINT("ishm: %d fragment used. %d fragments unused. (total=%d)\n",
+ nb_used_frgments, nb_unused_frgments,
+ nb_used_frgments + nb_unused_frgments);
if ((nb_used_frgments + nb_unused_frgments) != ISHM_NB_FRAGMNTS)
- ODP_ERR("lost fragments!\n");
+ _ODP_ERR("lost fragments!\n");
if (nb_blocks < ishm_proctable->nb_entries)
- ODP_ERR("process known block cannot exceed main total sum!\n");
+ _ODP_ERR("process known block cannot exceed main total sum!\n");
- ODP_DBG("\n");
+ _ODP_PRINT("\n");
odp_spinlock_unlock(&ishm_tbl->lock);
return nb_blocks;
}
+
+void _odp_ishm_print(int block_index)
+{
+ ishm_block_t *block;
+ const char *str;
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+
+ if ((block_index < 0) ||
+ (block_index >= ISHM_MAX_NB_BLOCKS) ||
+ (ishm_tbl->block[block_index].len == 0)) {
+ odp_spinlock_unlock(&ishm_tbl->lock);
+ _ODP_ERR("Request for info on an invalid block\n");
+ return;
+ }
+
+ block = &ishm_tbl->block[block_index];
+
+ _ODP_PRINT("\nSHM block info\n--------------\n");
+ _ODP_PRINT(" name: %s\n", block->name);
+ _ODP_PRINT(" file: %s\n", block->filename);
+ _ODP_PRINT(" expt: %s\n", block->exptname);
+ _ODP_PRINT(" user_flags: 0x%x\n", block->user_flags);
+ _ODP_PRINT(" flags: 0x%x\n", block->flags);
+ _ODP_PRINT(" user_len: %" PRIu64 "\n", block->user_len);
+ _ODP_PRINT(" start: %p\n", block->start);
+ _ODP_PRINT(" len: %" PRIu64 "\n", block->len);
+
+ switch (block->huge) {
+ case HUGE:
+ str = "huge";
+ break;
+ case NORMAL:
+ str = "normal";
+ break;
+ case EXTERNAL:
+ str = "external";
+ break;
+ case CACHED:
+ str = "cached";
+ break;
+ default:
+ str = "??";
+ }
+
+ _ODP_PRINT(" page type: %s\n", str);
+ _ODP_PRINT(" seq: %" PRIu64 "\n", block->seq);
+ _ODP_PRINT(" refcnt: %" PRIu64 "\n", block->refcnt);
+ _ODP_PRINT("\n");
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+}
+
+int32_t odp_system_meminfo(odp_system_meminfo_t *info, odp_system_memblock_t memblock[],
+ int32_t max_num)
+{
+ ishm_block_t *block;
+ int name_len, proc_index;
+ int32_t i;
+ uintptr_t addr;
+ uint64_t len, lost, page_size;
+ uint64_t lost_total = 0;
+ uint64_t len_total = 0;
+ int32_t num = 0;
+ const uint64_t huge_sz = odp_sys_huge_page_size();
+ const uint64_t normal_sz = odp_sys_page_size();
+
+ odp_spinlock_lock(&ishm_tbl->lock);
+ procsync();
+
+ for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
+ block = &ishm_tbl->block[i];
+
+ len = block->len;
+ if (len == 0)
+ continue;
+
+ lost = len - block->user_len;
+
+ if (num < max_num) {
+ odp_system_memblock_t *mb = &memblock[num];
+
+ name_len = strlen(block->name);
+ if (name_len >= ODP_SYSTEM_MEMBLOCK_NAME_LEN)
+ name_len = ODP_SYSTEM_MEMBLOCK_NAME_LEN - 1;
+
+ memcpy(mb->name, block->name, name_len);
+ mb->name[name_len] = 0;
+
+ addr = 0;
+ proc_index = procfind_block(i);
+ if (proc_index >= 0)
+ addr = (uintptr_t)ishm_proctable->entry[proc_index].start;
+
+ page_size = 0;
+ if (block->huge == HUGE)
+ page_size = huge_sz;
+ else if (block->huge == NORMAL)
+ page_size = normal_sz;
+
+ mb->addr = addr;
+ mb->used = len;
+ mb->overhead = lost;
+ mb->page_size = page_size;
+ }
+
+ len_total += len;
+ lost_total += lost;
+
+ num++;
+ }
+
+ odp_spinlock_unlock(&ishm_tbl->lock);
+
+ info->total_mapped = len_total;
+ info->total_used = len_total;
+ info->total_overhead = lost_total;
+
+ return num;
+}
diff --git a/platform/linux-generic/odp_ishmphy.c b/platform/linux-generic/odp_ishmphy.c
new file mode 100644
index 000000000..796482b98
--- /dev/null
+++ b/platform/linux-generic/odp_ishmphy.c
@@ -0,0 +1,145 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * This file handles the lower end of the ishm memory allocator:
+ * It performs the physical mappings.
+ */
+#include <odp_posix_extensions.h>
+#include <odp_config_internal.h>
+#include <odp/api/align.h>
+#include <odp/api/system_info.h>
+#include <odp/api/debug.h>
+#include <odp_debug_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_ishmphy_internal.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <inttypes.h>
+#include <odp_ishmphy_internal.h>
+
+static void *common_va_address;
+static uint64_t common_va_len;
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/* Reserve single VA memory
+ * This function is called at odp_init_global() time to pre-reserve some memory
+ * which is inherited by all odpthreads (i.e. descendant processes and threads).
+ * This memory block is later used when memory is reserved with
+ * _ODP_ISHM_SINGLE_VA flag.
+ * returns the address of the mapping or NULL on error.
+ */
+void *_odp_ishmphy_reserve_single_va(uint64_t len, int fd)
+{
+ void *addr;
+
+ addr = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, 0);
+ if (addr == MAP_FAILED) {
+ _ODP_ERR("mmap failed: %s\n", strerror(errno));
+ return NULL;
+ }
+
+ if (mprotect(addr, len, PROT_READ | PROT_WRITE))
+ _ODP_ERR("mprotect failed: %s\n", strerror(errno));
+
+ _ODP_DBG("VA Reserved: %p, len=%" PRIu64 "\n", addr, len);
+
+ common_va_address = addr;
+ common_va_len = len;
+
+ return addr;
+}
+
+/* Free single VA memory
+ * This function is called at odp_term_global() time to free the memory reserved
+ * by _odp_ishmphy_reserve_single_va()
+ */
+int _odp_ishmphy_free_single_va(void)
+{
+ int ret;
+
+ if (!common_va_address)
+ return 0;
+
+ ret = munmap(common_va_address, common_va_len);
+ if (ret)
+ _ODP_ERR("munmap failed: %s\n", strerror(errno));
+ return ret;
+}
+
+/*
+ * do a mapping:
+ * Performs a mapping of the provided file descriptor to the process VA
+ * space. Not to be used with _ODP_ISHM_SINGLE_VA blocks.
+ * returns the address of the mapping or NULL on error.
+ */
+void *_odp_ishmphy_map(int fd, uint64_t size, uint64_t offset, int flags)
+{
+ void *mapped_addr;
+ int mmap_flags = MAP_POPULATE;
+
+ _ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
+
+ /* do a new mapping in the VA space: */
+ mapped_addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | mmap_flags, fd, offset);
+ if ((mapped_addr >= common_va_address) &&
+ ((char *)mapped_addr <
+ (char *)common_va_address + common_va_len)) {
+ _ODP_ERR("VA SPACE OVERLAP!\n");
+ }
+
+ if (mapped_addr == MAP_FAILED)
+ return NULL;
+
+ /* if locking is requested, lock it...*/
+ if (flags & _ODP_ISHM_LOCK) {
+ if (mlock(mapped_addr, size)) {
+ _ODP_ERR("mlock failed: %s\n", strerror(errno));
+ if (munmap(mapped_addr, size))
+ _ODP_ERR("munmap failed: %s\n", strerror(errno));
+ return NULL;
+ }
+ }
+ return mapped_addr;
+}
+
+/* free a mapping:
+ * _ODP_ISHM_SINGLE_VA memory is not returned back to linux until global
+ * terminate. If the _ODP_ISHM_SINGLE_VA flag was not given, both physical
+ * memory and virtual address space are released by calling the normal munmap.
+ * return 0 on success or -1 on error.
+ */
+int _odp_ishmphy_unmap(void *start, uint64_t len, int flags)
+{
+ int ret;
+
+ /* if locking was requested, unlock...*/
+ if (flags & _ODP_ISHM_LOCK)
+ munlock(start, len);
+
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ return 0;
+
+ /* just release the mapping */
+ ret = munmap(start, len);
+ if (ret)
+ _ODP_ERR("munmap failed: %s\n", strerror(errno));
+ return ret;
+}
diff --git a/platform/linux-generic/odp_ishmpool.c b/platform/linux-generic/odp_ishmpool.c
new file mode 100644
index 000000000..89ec10695
--- /dev/null
+++ b/platform/linux-generic/odp_ishmpool.c
@@ -0,0 +1,659 @@
+/* Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* This file gathers the buddy and slab allocation functionality provided
+ * by _ishm.
+ * _odp_ishmpool_create() can be used to create a pool for buddy/slab
+ * allocation. _odp_ishmpool_create() will allocate a memory area using
+ * ishm_reserve() for both the control part (needed for tracking
+ * allocation/free...) and the user memory itself (part of which will be given
+ * at each ishmpool_alloc()).
+ * The element size provided at pool creation time determines whether
+ * to pool will of type buddy or slab.
+ * For buddy, all allocations are rounded to the nearest power of 2.
+ *
+ * The implementation of the buddy allocator is very traditional: it
+ * maintains N lists of free buffers.
+ * The control part actually contains these N queue heads, (N-M are actually
+ * used), the free buffers themselves being used for chaining (the chaining info
+ * is in the buffers: as they are "free" they should not be touched by the
+ * user). The control part also contains a array of bytes for remembering
+ * the size (actually the order) of the allocated buffers:
+ * There are 2^(N-M) such bytes, this number being the maximum number of
+ * allocated buffers (when all allocation are <= 2^M bytes)
+ * Buddy allocators handle fragmentation by splitting or merging blocks by 2.
+ * They guarantee a minimum efficiency of 50%, at worse case fragmentation.
+ *
+ * Slab implementation is even simpler, all free elements being queued in
+ * one single queue at init, taken from this queue when allocated and
+ * returned to this same queue when freed.
+ *
+ * The reason for not using malloc() is that malloc does not guarantee
+ * memory sharability between ODP threads (regardless of their implementation)
+ * which ishm_reserve() can do. see the comments around
+ * _odp_ishmbud_pool_create() and ishm_reserve() for more details.
+ *
+ * This file is divided in 3 sections: the first one regroups functions
+ * needed by the buddy allocation.
+ * The second one regroups the functions needed by the slab allocator.
+ * The third section regroups the common functions exported externally.
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/spinlock.h>
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+
+#include <odp_shm_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_ishmpool_internal.h>
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <inttypes.h>
+
+#define BUDDY_MIN_SIZE 32 /* minimal buddy allocation size */
+
+typedef _odp_ishm_pool_t pool_t; /* for shorter writing */
+
+/* array of ishm block index used for pools. only used for pool
+ * lookup by name */
+#define MAX_NB_POOL 100
+static int pool_blk_idx[MAX_NB_POOL];
+
+/* section 1: functions for buddy allocation: */
+
+/* free buddy blocks contains the following structure, used to link the
+ * free blocks together.
+ */
+typedef struct bblock_t {
+ struct bblock_t *next;
+ uint32_t order;
+} bblock_t;
+
+/* value set in the 'order' table when the block is not allocated: */
+#define BBLOCK_FREE 0
+
+/* compute ceil(log2(size)) */
+static uint8_t clog2(uint64_t size)
+{
+ uint64_t sz;
+ uint32_t bit;
+ uint8_t res;
+
+ sz = size; /* we start by computing res = log2(sz)... */
+ res = 0;
+ for (bit = 32; bit ; bit >>= 1) {
+ if (sz >= ((uint64_t)1 << bit)) {
+ sz >>= bit;
+ res += bit;
+ }
+ }
+ if (((uint64_t)1 << res) < size) /* ...and then ceil(x) */
+ res++;
+
+ return res;
+}
+
+/*
+ * given a bblock address, and an order value, returns the address
+ * of the buddy bblock (the other "half")
+ */
+static inline bblock_t *get_bblock_buddy(pool_t *bpool, bblock_t *addr,
+ uint8_t order)
+{
+ uintptr_t b;
+
+ b = ((uintptr_t)addr - (uintptr_t)bpool->ctrl.user_addr);
+ b ^= 1 << order;
+ return (void *)(b + (uintptr_t)bpool->ctrl.user_addr);
+}
+
+/*
+ * given a buddy block address, return its number (used for busy flags):
+ */
+static inline uintptr_t get_bblock_nr(pool_t *bpool, void *addr)
+{
+ uintptr_t b;
+ uint8_t min_order;
+
+ min_order = bpool->ctrl.min_order;
+ b = ((uintptr_t)addr - (uintptr_t)bpool->ctrl.user_addr) >> min_order;
+ return b;
+}
+
+/* remove bblock from the list for bblocks of rank order. The bblock to be
+ * removed is really expected to be on the list: not finding it is an error */
+static inline void remove_from_list(pool_t *bpool, uint8_t order,
+ bblock_t *bblock)
+{
+ bblock_t *curr;
+ bblock_t *prev = NULL;
+
+ curr = bpool->ctrl.free_heads[order];
+ if (!curr)
+ goto remove_from_list_error;
+
+ if (curr == bblock) {
+ bpool->ctrl.free_heads[order] = curr->next;
+ return;
+ }
+
+ while (curr) {
+ if (curr == bblock) {
+ prev->next = curr->next;
+ return;
+ }
+ prev = curr;
+ curr = curr->next;
+ }
+
+remove_from_list_error:
+ _ODP_ERR("List corrupted\n");
+}
+
+/*
+ * create a buddy memory pool of given size (actually nearest power of 2),
+ * where allocation will never be smaller than min_alloc.
+ * returns a pointer to the created buddy_pool
+ * The allocated area contains:
+ * - The _odp_ishm_pool_ctrl_t structure
+ * - The array of ((order - min_order) of free list heads
+ * - The array of 'order' values, remembering sizes of allocated bblocks
+ * - alignment to cache line
+ * - The user memory
+ */
+static pool_t *_odp_ishmbud_pool_create(const char *pool_name, int store_idx,
+ uint64_t size,
+ uint64_t min_alloc, int flags)
+{
+ uint8_t order; /* pool order = ceil(log2(size)) */
+ uint8_t min_order; /* pool min_order = ceil(log2(min_alloc))*/
+ uint32_t max_nb_bblock; /* max number of bblock, when smallest */
+ uint32_t control_sz; /* size of control area */
+ uint32_t free_head_sz; /* mem area needed for list heads */
+ uint32_t saved_order_sz; /* mem area to remember given sizes */
+ uint64_t user_sz; /* 2^order bytes */
+ uint64_t total_sz; /* total size to request */
+ int blk_idx; /* as returned by _ishm_resrve() */
+ pool_t *bpool;
+ int i;
+ bblock_t *first_block;
+
+ /* a bblock_t must fit in the buffers for linked chain! */
+ if (min_alloc < sizeof(bblock_t))
+ min_alloc = sizeof(bblock_t);
+
+ /* pool order is such that 2^order = size. same for min_order */
+ order = clog2(size);
+ min_order = clog2(min_alloc);
+
+ /* check parameters obvious wishes: */
+ if (order >= 64)
+ return NULL;
+ if (order < min_order)
+ return NULL;
+
+ /* at worst case, all bblocks have smallest (2^min_order) size */
+ max_nb_bblock = (1 << (order - min_order));
+
+ /* space needed for the control area (padded to cache line size)*/
+ control_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
+
+ /* space needed for 'order' free bblock list heads: */
+ /* Note that only lists from min_order to order are really used.*/
+ free_head_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(void *) * (order + 1));
+
+ /* space needed for order -i.e. size- storage of alloc'd bblock:*/
+ saved_order_sz = _ODP_ROUNDUP_CACHE_LINE(max_nb_bblock * sizeof(uint8_t));
+
+ /* space needed for user area is 2^order bytes: */
+ user_sz = 1ULL << order;
+
+ total_sz = control_sz +
+ free_head_sz +
+ saved_order_sz +
+ user_sz;
+
+ /* allocate required memory: */
+ blk_idx = _odp_ishm_reserve(pool_name, total_sz, -1,
+ ODP_CACHE_LINE_SIZE, 0, flags, 0);
+ if (blk_idx < 0) {
+ _ODP_ERR("_odp_ishm_reserve failed.");
+ return NULL;
+ }
+
+ bpool = _odp_ishm_address(blk_idx);
+ if (bpool == NULL) {
+ _ODP_ERR("_odp_ishm_address failed.");
+ return NULL;
+ }
+
+ /* store in pool array (needed for look up): */
+ pool_blk_idx[store_idx] = blk_idx;
+
+ /* remember block index, needed when pool is destroyed */
+ bpool->ctrl.ishm_blk_idx = blk_idx;
+
+ /* remember element size: 0 means unknown size, i.e. buddy alloation*/
+ bpool->ctrl.element_sz = 0;
+
+ /* prepare mutex: */
+ odp_spinlock_init(&bpool->ctrl.lock);
+
+ /* initialise pointers and things... */
+ bpool->ctrl.order = order;
+ bpool->ctrl.min_order = min_order;
+ bpool->ctrl.free_heads =
+ (void *)((uintptr_t)bpool + control_sz);
+ bpool->ctrl.alloced_order =
+ (uint8_t *)((uintptr_t)bpool->ctrl.free_heads + free_head_sz);
+ bpool->ctrl.user_addr =
+ (void *)((uintptr_t)bpool->ctrl.alloced_order + saved_order_sz);
+
+ /* initialize all free list to NULL, except the top biggest element:*/
+ for (i = 0; i < (order - min_order); i++)
+ bpool->ctrl.free_heads[i] = NULL;
+ bpool->ctrl.free_heads[order] = bpool->ctrl.user_addr;
+ first_block = (bblock_t *)bpool->ctrl.user_addr;
+ first_block->next = NULL;
+ first_block->order = order;
+
+ /* set all 'order' of allocated bblocks to free: */
+ memset(bpool->ctrl.alloced_order, BBLOCK_FREE, saved_order_sz);
+
+ return bpool;
+}
+
+/* allocated memory from the given buddy pool */
+static void *_odp_ishmbud_alloc(pool_t *bpool, uint64_t size)
+{
+ uint32_t rq_order; /* requested order */
+ uint32_t try_order;
+ bblock_t *bblock;
+ bblock_t *buddy;
+ uintptr_t nr;
+
+ /* if size is zero or too big reject: */
+ if ((!size) && (size > (1ULL << bpool->ctrl.order))) {
+ _ODP_ERR("Invalid alloc size (0 or larger than whole pool)\n");
+ return NULL;
+ }
+
+ /* compute ceil(log2(size)), to get the requested block order: */
+ rq_order = clog2(size);
+
+ /* make sure the requested order is bigger (or same) as minimum! */
+ if (rq_order < bpool->ctrl.min_order)
+ rq_order = bpool->ctrl.min_order;
+
+ /* mutex from here: */
+ odp_spinlock_lock(&bpool->ctrl.lock);
+
+ /* now, start trying to allocate a bblock of rq_order. If that
+ * fails keep trying larger orders until pool order is reached */
+ bblock = NULL;
+ for (try_order = rq_order; try_order <= bpool->ctrl.order;
+ try_order++) {
+ if (bpool->ctrl.free_heads[try_order]) {
+ /* remove from list: */
+ bblock =
+ (bblock_t *)(bpool->ctrl.free_heads[try_order]);
+ bpool->ctrl.free_heads[try_order] = bblock->next;
+ break;
+ }
+ }
+
+ if (!bblock) {
+ odp_spinlock_unlock(&bpool->ctrl.lock);
+ _ODP_ERR("Out of memory. (Buddy pool full)\n");
+ return NULL;
+ }
+
+ /* OK: we got a block, but possibbly too large (if try_order>rq_order)
+ * return the extra halves to the pool hence splitting the bblock at
+ * each 'extra' order: */
+ while (try_order-- > rq_order) {
+ /* split: */
+ buddy = (bblock_t *)((uintptr_t)bblock + (1 << try_order));
+ buddy->order = try_order;
+ /* add to list: */
+ buddy->next = bpool->ctrl.free_heads[try_order];
+ bpool->ctrl.free_heads[try_order] = buddy;
+ /* mark as free (non allocated block get size 0): */
+ nr = get_bblock_nr(bpool, buddy);
+ bpool->ctrl.alloced_order[nr] = BBLOCK_FREE;
+ }
+
+ /* remember the size if the allocated block: */
+ nr = get_bblock_nr(bpool, bblock);
+ bpool->ctrl.alloced_order[nr] = rq_order;
+
+ /* and return the allocated block! */
+ odp_spinlock_unlock(&bpool->ctrl.lock);
+ return (void *)bblock;
+}
+
+/* free a previously allocated buffer from a given buddy pool */
+static int _odp_ishmbud_free(pool_t *bpool, void *addr)
+{
+ uintptr_t user_start; /* start of user area */
+ uintptr_t user_stop; /* stop of user area */
+ uintptr_t mask; /* 2^min_order - 1 */
+ bblock_t *bblock; /* bblock being freed */
+ bblock_t *buddy; /* buddy bblock of bblock being freed */
+ uint8_t order; /* order of block being freed */
+ uintptr_t nr; /* block number */
+
+ /* freeing NULL is regarded as OK, though without any effect: */
+ if (!addr)
+ return 0;
+
+ user_start = (uintptr_t)bpool->ctrl.user_addr;
+ user_stop = user_start + ((uintptr_t)1 << bpool->ctrl.order);
+ mask = ((uintptr_t)1 << bpool->ctrl.min_order) - 1;
+
+ /* some sanity checks: check that given address is within pool and
+ * that relative address has 2^min_order granularity: */
+ if (((uintptr_t)addr < user_start) ||
+ ((uintptr_t)addr > user_stop) ||
+ (((uintptr_t)addr - user_start) & mask)) {
+ _ODP_ERR("Invalid address to be freed\n");
+ return -1;
+ }
+
+ /* mutex from here: */
+ odp_spinlock_lock(&bpool->ctrl.lock);
+
+ /* collect saved block order and make sure bblock was allocated */
+ bblock = (bblock_t *)addr;
+ nr = get_bblock_nr(bpool, bblock);
+ order = bpool->ctrl.alloced_order[nr];
+ if (order == BBLOCK_FREE) {
+ _ODP_ERR("Double free error\n");
+ odp_spinlock_unlock(&bpool->ctrl.lock);
+ return -1;
+ }
+
+ /* this looks like a valid free, mark at least this as free: */
+ bpool->ctrl.alloced_order[nr] = BBLOCK_FREE;
+
+ /* go up in orders, trying to merge buddies... */
+ while (order < bpool->ctrl.order) {
+ buddy = get_bblock_buddy(bpool, bblock, order);
+ /*if buddy is not free: no further merge possible */
+ nr = get_bblock_nr(bpool, buddy);
+ if (bpool->ctrl.alloced_order[nr] != BBLOCK_FREE)
+ break;
+ /*merge only bblock of same order:*/
+ if (buddy->order != order)
+ break;
+ /*merge: remove buddy from free list: */
+ remove_from_list(bpool, order, buddy);
+ /*merge: make sure we point at start of block: */
+ if (bblock > buddy)
+ bblock = buddy;
+ /*merge: size of block has doubled: increase order: */
+ order++;
+ }
+
+ /* insert the bblock into its correct free block list: */
+ bblock->next = bpool->ctrl.free_heads[order];
+ bpool->ctrl.free_heads[order] = bblock;
+
+ /* remember the (possibly now merged) block order: */
+ bblock->order = order;
+
+ odp_spinlock_unlock(&bpool->ctrl.lock);
+ return 0;
+}
+
+/* section 2: functions for slab allocation: */
+
+/* free slab blocks contains the following structure, used to link the
+ * free blocks together.
+ */
+typedef struct sblock_t {
+ struct sblock_t *next;
+} sblock_t;
+
+/*
+ * create a slab memory pool of given size (rounded up to the nearest integer
+ * number of element, where each element has size 'elt_size').
+ * returns a pointer to the created slab pool.
+ * The allocated area contains:
+ * - The _odp_ishm_pool_ctrl_t structure
+ * - alignment to cache line
+ * - The user memory
+ */
+static pool_t *_odp_ishmslab_pool_create(const char *pool_name, int store_idx,
+ uint64_t size,
+ uint64_t elt_size, int flags)
+{
+ uint32_t nb_sblock; /* number of elements in the pool */
+ uint32_t control_sz; /* size of control area */
+ uint64_t total_sz; /* total size to request */
+ uint64_t user_sz; /* 2^order bytes */
+ int blk_idx; /* as returned by _ishm_reserve() */
+ pool_t *spool;
+ unsigned int i;
+ sblock_t *block;
+
+ /* a sblock_t must fit in the buffers for linked chain! */
+ if (elt_size < sizeof(bblock_t)) {
+ elt_size = sizeof(bblock_t);
+ size = size * (sizeof(bblock_t) / elt_size +
+ ((sizeof(bblock_t) % elt_size) ? 1 : 0));
+ }
+
+ /* nb of element fitting in the pool is just ceil(size/elt_size)*/
+ nb_sblock = (size / elt_size) + ((size % elt_size) ? 1 : 0);
+
+ /* space needed for the control area (padded to cache line size)*/
+ control_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
+
+ /* space needed for user area is : */
+ user_sz = nb_sblock * elt_size;
+
+ total_sz = control_sz +
+ user_sz;
+
+ /* allocate required memory: */
+ blk_idx = _odp_ishm_reserve(pool_name, total_sz, -1,
+ ODP_CACHE_LINE_SIZE, 0, flags, 0);
+ if (blk_idx < 0) {
+ _ODP_ERR("_odp_ishm_reserve failed.");
+ return NULL;
+ }
+
+ spool = _odp_ishm_address(blk_idx);
+ if (spool == NULL) {
+ _ODP_ERR("_odp_ishm_address failed.");
+ return NULL;
+ }
+
+ /* store in pool array (needed for look up): */
+ pool_blk_idx[store_idx] = blk_idx;
+
+ /* remember block index, needed when pool is destroyed */
+ spool->ctrl.ishm_blk_idx = blk_idx;
+
+ /* remember element (sblock) size and their number: */
+ spool->ctrl.element_sz = elt_size;
+ spool->ctrl.nb_elem = nb_sblock;
+
+ /* prepare mutex: */
+ odp_spinlock_init(&spool->ctrl.lock);
+
+ /* initialise pointers and things... */
+ spool->ctrl.user_addr =
+ (void *)((uintptr_t)spool + control_sz);
+
+ /* initialise the free list with the list of all elements:*/
+ spool->ctrl.free_head = spool->ctrl.user_addr;
+ for (i = 0; i < nb_sblock - 1; i++) {
+ block = (sblock_t *)((uintptr_t)spool->ctrl.user_addr +
+ i * (uintptr_t)elt_size);
+ block->next = (sblock_t *)((uintptr_t)block +
+ (uintptr_t)elt_size);
+ }
+ block = (sblock_t *)((uintptr_t)spool->ctrl.user_addr +
+ (nb_sblock - 1) * (uintptr_t)elt_size);
+ block->next = NULL;
+
+ return spool;
+}
+
+/* allocated memory from the given slab pool */
+static void *_odp_ishmslab_alloc(pool_t *spool, uint64_t size)
+{
+ void *ret;
+ sblock_t *block;
+
+ if (size > spool->ctrl.element_sz)
+ return NULL;
+
+ odp_spinlock_lock(&spool->ctrl.lock);
+ ret = spool->ctrl.free_head;
+ if (!ret) {
+ odp_spinlock_unlock(&spool->ctrl.lock);
+ _ODP_ERR("Out of memory. (Slab pool full)\n");
+ return NULL;
+ }
+
+ block = (sblock_t *)ret;
+ spool->ctrl.free_head = block->next;
+
+ odp_spinlock_unlock(&spool->ctrl.lock);
+ return ret;
+}
+
+/* free a previously allocated buffer from a given slab pool */
+static int _odp_ishmslab_free(pool_t *spool, void *addr)
+{
+ uintptr_t user_start; /* start of user area */
+ uintptr_t user_stop; /* stop of user area */
+ sblock_t *block;
+
+ /* freeing NULL is regarded as OK, though without any effect: */
+ if (!addr)
+ return 0;
+
+ user_start = (uintptr_t)spool->ctrl.user_addr;
+ user_stop = user_start + spool->ctrl.element_sz * spool->ctrl.nb_elem;
+
+ /* some sanity checks: check that given address is within pool and
+ * that relative address has element_sz granularity: */
+ if (((uintptr_t)addr < user_start) ||
+ ((uintptr_t)addr > user_stop) ||
+ (((uintptr_t)addr - user_start) % spool->ctrl.element_sz)) {
+ _ODP_ERR("Invalid address to be freed\n");
+ return -1;
+ }
+
+ odp_spinlock_lock(&spool->ctrl.lock);
+ block = (sblock_t *)addr;
+ block->next = (sblock_t *)spool->ctrl.free_head;
+ spool->ctrl.free_head = addr;
+ odp_spinlock_unlock(&spool->ctrl.lock);
+
+ return 0;
+}
+
+/* section 3: common, external functions: */
+
+/* create a pool: either with fixed alloc size (if max_alloc/min_alloc<2) or
+ * of variable block size (if max_alloc == 0) */
+pool_t *_odp_ishm_pool_create(const char *pool_name, uint64_t size,
+ uint64_t min_alloc, uint64_t max_alloc, int flags)
+{
+ int store_idx;
+ uint64_t real_pool_sz;
+
+ if (min_alloc > max_alloc) {
+ _ODP_ERR("invalid parameter: min_alloc > max_alloc");
+ return NULL;
+ }
+
+ /* search for a free index in pool_blk_idx for the pool */
+ for (store_idx = 0; store_idx < MAX_NB_POOL; store_idx++) {
+ if (pool_blk_idx[store_idx] < 0)
+ break;
+ }
+ if (store_idx == MAX_NB_POOL) {
+ _ODP_ERR("Max number of pool reached (MAX_NB_POOL)");
+ return NULL;
+ }
+
+ if ((min_alloc == 0) || ((max_alloc / min_alloc) > 2)) {
+ /* alloc variation is not constant enough: we go for a buddy
+ * allocator. The pool efficiency may go as low as 50%
+ * so we double the required size to make sure we can satisfy
+ * the user request */
+ real_pool_sz = 2 * size;
+ return _odp_ishmbud_pool_create(pool_name, store_idx,
+ real_pool_sz,
+ BUDDY_MIN_SIZE, flags);
+ } else {
+ /* min and max are close enough so we go for constant size
+ * allocator:
+ * make sure the pool can fit the required size, even when
+ * only min_alloc allocation are performed: */
+ real_pool_sz = ((size / min_alloc) +
+ ((size % min_alloc) ? 1 : 0))
+ * max_alloc;
+ return _odp_ishmslab_pool_create(pool_name, store_idx,
+ real_pool_sz,
+ max_alloc, flags);
+ }
+}
+
+/* destroy a pool. everything goes away. no operation on the pool should
+ * follow. */
+int _odp_ishm_pool_destroy(pool_t *pool)
+{
+ int store_idx;
+
+ for (store_idx = 0; store_idx < MAX_NB_POOL; store_idx++) {
+ if (pool_blk_idx[store_idx] == pool->ctrl.ishm_blk_idx) {
+ pool_blk_idx[store_idx] = -1;
+ break;
+ }
+ }
+
+ return _odp_ishm_free_by_index(pool->ctrl.ishm_blk_idx);
+}
+
+/* allocated a buffer from a pool */
+void *_odp_ishm_pool_alloc(_odp_ishm_pool_t *pool, uint64_t size)
+{
+ if (!pool->ctrl.element_sz)
+ return _odp_ishmbud_alloc(pool, size);
+ else
+ return _odp_ishmslab_alloc(pool, size);
+}
+
+/* free a previously allocated buffer from a pool */
+int _odp_ishm_pool_free(_odp_ishm_pool_t *pool, void *addr)
+{
+ if (!pool->ctrl.element_sz)
+ return _odp_ishmbud_free(pool, addr);
+ else
+ return _odp_ishmslab_free(pool, addr);
+}
+
+void _odp_ishm_pool_init(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_NB_POOL; i++)
+ pool_blk_idx[i] = -1;
+}
diff --git a/platform/linux-generic/odp_libconfig.c b/platform/linux-generic/odp_libconfig.c
new file mode 100644
index 000000000..d5e159e21
--- /dev/null
+++ b/platform/linux-generic/odp_libconfig.c
@@ -0,0 +1,344 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
+ * Copyright (c) 2021, Marvell
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <libconfig.h>
+
+#include <odp/api/version.h>
+#include <odp_global_data.h>
+#include <odp_debug_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_libconfig_config.h>
+
+int _odp_libconfig_init_global(void)
+{
+ const char *filename;
+ const char *vers;
+ const char *vers_rt;
+ const char *impl;
+ const char *impl_rt;
+ config_t *config = &odp_global_ro.libconfig_default;
+ config_t *config_rt = &odp_global_ro.libconfig_runtime;
+ const char *impl_field = "odp_implementation";
+ const char *vers_field = "config_file_version";
+
+ config_init(config);
+ config_init(config_rt);
+ odp_global_ro.has_config_rt = 0;
+
+ if (!config_read_string(config, config_builtin)) {
+ _ODP_ERR("Failed to read default config: %s(%d): %s\n",
+ config_error_file(config), config_error_line(config),
+ config_error_text(config));
+ goto fail;
+ }
+
+ filename = getenv("ODP_CONFIG_FILE");
+ if (filename == NULL)
+ return 0;
+
+ _ODP_PRINT("ODP CONFIG FILE: %s\n", filename);
+
+ if (!config_read_file(config_rt, filename)) {
+ _ODP_PRINT(" ERROR: failed to read config file: %s(%d): %s\n\n",
+ config_error_file(config_rt),
+ config_error_line(config_rt),
+ config_error_text(config_rt));
+ goto fail;
+ }
+
+ /* Check runtime configuration's implementation name and version */
+ if (!config_lookup_string(config, impl_field, &impl) ||
+ !config_lookup_string(config_rt, impl_field, &impl_rt)) {
+ _ODP_PRINT(" ERROR: missing mandatory field: %s\n\n", impl_field);
+ goto fail;
+ }
+ if (!config_lookup_string(config, vers_field, &vers) ||
+ !config_lookup_string(config_rt, vers_field, &vers_rt)) {
+ _ODP_PRINT(" ERROR: missing mandatory field: %s\n\n", vers_field);
+ goto fail;
+ }
+ if (strcmp(impl, impl_rt)) {
+ _ODP_PRINT(" ERROR: ODP implementation name mismatch:\n"
+ " Expected: \"%s\"\n"
+ " Found: \"%s\"\n\n", impl, impl_rt);
+ goto fail;
+ }
+ if (strcmp(vers, vers_rt)) {
+ _ODP_PRINT(" ERROR: config file version number mismatch:\n"
+ " Expected: \"%s\"\n"
+ " Found: \"%s\"\n\n", vers, vers_rt);
+ goto fail;
+ }
+
+ odp_global_ro.has_config_rt = 1;
+ return 0;
+fail:
+ _ODP_ERR("Config file failure\n");
+ config_destroy(config);
+ config_destroy(config_rt);
+ return -1;
+}
+
+int _odp_libconfig_term_global(void)
+{
+ config_destroy(&odp_global_ro.libconfig_default);
+ config_destroy(&odp_global_ro.libconfig_runtime);
+
+ return 0;
+}
+
+/**
+ * String setting value
+ *
+ * Returns a string from setting. A valid runtime setting overrides
+ * default even if the string is empty.
+ *
+ * @param path Path of the setting
+ * @param[out] value String to be copied from the setting
+ * @param str_size Maximum string length to be copied
+ *
+ * @return Size of the string copied
+ * @retval <0 on failure
+*/
+int _odp_libconfig_lookup_str(const char *path, char *value,
+ unsigned int str_size)
+{
+ const config_t *config;
+ unsigned int length, i;
+ const char *str;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ config = &odp_global_ro.libconfig_runtime;
+ else
+ config = &odp_global_ro.libconfig_default;
+
+ if (config_lookup_string(config, path, &str) == CONFIG_FALSE)
+ continue;
+
+ length = strlen(str);
+
+ /* Runtime config overrides even if it's empty string */
+ if (value == NULL || str_size == 0 || length == 0)
+ return length;
+
+ if (length > str_size) {
+ _ODP_ERR("libconfig: length of %d bigger than size %u\n", length, str_size);
+ return -1;
+ }
+
+ strcpy(value, str);
+ return length;
+ }
+
+ _ODP_ERR("libconfig: %s is not defined in config files\n", path);
+ return -1;
+}
+
+int _odp_libconfig_lookup_int(const char *path, int *value)
+{
+ int ret_def = CONFIG_FALSE;
+ int ret_rt = CONFIG_FALSE;
+
+ ret_def = config_lookup_int(&odp_global_ro.libconfig_default, path,
+ value);
+
+ /* Runtime option overrides default value */
+ ret_rt = config_lookup_int(&odp_global_ro.libconfig_runtime, path,
+ value);
+
+ return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0;
+}
+
+/**
+ * String array setting values
+ *
+ * Returns the number of strings in a string array setting. A valid runtime
+ * setting overrides default even if the array is empty. Outputs upto
+ * 'count' strings when the 'value' array pointer is not NULL. If return
+ * value is larger than 'count', there are more strings than the function was
+ * allowed to output. If return value (N) is less than 'count', only
+ * strings[0 ... N-1] have been written.
+ *
+ * @param path Path of the setting
+ * @param[out] value Array of strings to be copied from the setting
+ * @param count Number of strings to be copied
+ * @param str_size Maximum string length to be copied
+ *
+ * @return Number of strings in the setting
+ * @retval <0 on failure
+*/
+int _odp_libconfig_lookup_array_str(const char *path, char **value,
+ int count, unsigned int str_size)
+{
+ config_setting_t *setting, *elem;
+ const config_t *config;
+ int num, i, j;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ config = &odp_global_ro.libconfig_runtime;
+ else
+ config = &odp_global_ro.libconfig_default;
+
+ setting = config_lookup(config, path);
+
+ if (setting == NULL)
+ continue;
+
+ /* invalid config if element is not an array */
+ if (config_setting_is_array(setting) == CONFIG_FALSE) {
+ _ODP_ERR("libconfig: %s is not an array\n", path);
+ return -1;
+ }
+ num = config_setting_length(setting);
+
+ if (num == 0 || count == 0 || value == NULL)
+ return num;
+
+ elem = config_setting_get_elem(setting, 0);
+ if (config_setting_type(elem) != CONFIG_TYPE_STRING) {
+ _ODP_ERR("libconfig: %s array is not of type string\n", path);
+ return -1;
+ }
+
+ for (j = 0; j < num; j++) {
+ elem = config_setting_get_elem(setting, j);
+ if (strlen(elem->value.sval) > str_size) {
+ _ODP_ERR("libconfig: length of %s bigger than size %u\n",
+ elem->value.sval, str_size);
+ return -1;
+ }
+ strcpy(value[j], elem->value.sval);
+ }
+
+ return num;
+ }
+
+ _ODP_ERR("libconfig: %s is not defined in config files\n", path);
+ return -1;
+}
+
+int _odp_libconfig_lookup_array(const char *path, int value[], int max_num)
+{
+ const config_t *config;
+ config_setting_t *setting;
+ int num, i, j;
+ int num_out = 0;
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ config = &odp_global_ro.libconfig_default;
+ else
+ config = &odp_global_ro.libconfig_runtime;
+
+ setting = config_lookup(config, path);
+
+ /* Runtime config may not define the array, whereas
+ * the default config has it always defined. When the array
+ * is defined, it must be correctly formatted. */
+ if (setting == NULL)
+ continue;
+
+ if (config_setting_is_array(setting) == CONFIG_FALSE)
+ return 0;
+
+ num = config_setting_length(setting);
+
+ if (num <= 0 || num > max_num)
+ return 0;
+
+ for (i = 0; i < num; i++)
+ value[i] = config_setting_get_int_elem(setting, i);
+
+ num_out = num;
+ }
+
+ /* Number of elements copied */
+ return num_out;
+}
+
+static int lookup_int(config_t *cfg,
+ const char *base_path,
+ const char *local_path,
+ const char *name,
+ int *value)
+{
+ char path[256];
+
+ if (local_path) {
+ snprintf(path, sizeof(path), "%s.%s.%s", base_path,
+ local_path, name);
+ if (config_lookup_int(cfg, path, value) == CONFIG_TRUE)
+ return 1;
+ }
+
+ snprintf(path, sizeof(path), "%s.%s", base_path, name);
+ if (config_lookup_int(cfg, path, value) == CONFIG_TRUE)
+ return 1;
+
+ return 0;
+}
+
+int _odp_libconfig_lookup_ext_int(const char *base_path,
+ const char *local_path,
+ const char *name,
+ int *value)
+{
+ if (lookup_int(&odp_global_ro.libconfig_runtime,
+ base_path, local_path, name, value))
+ return 1;
+
+ if (lookup_int(&odp_global_ro.libconfig_default,
+ base_path, local_path, name, value))
+ return 1;
+
+ return 0;
+}
+
+int _odp_libconfig_print(void)
+{
+ int c;
+ /* Temp file for config_write() output. Suppress Coverity warning about tmpfile() usage. */
+ /* coverity[secure_temp] */
+ FILE *file = tmpfile();
+
+ if (file == NULL)
+ return -1;
+
+ if (fprintf(file,
+ "\nODP_CONFIG_FILE default values:\n"
+ "-------------------------------\n\n") < 0)
+ goto fail;
+
+ config_write(&odp_global_ro.libconfig_default, file);
+
+ if (odp_global_ro.has_config_rt) {
+ if (fprintf(file,
+ "\nODP_CONFIG_FILE override values:\n"
+ "--------------------------------\n\n") < 0)
+ goto fail;
+
+ config_write(&odp_global_ro.libconfig_runtime, file);
+ }
+
+ /* Print temp file to the log */
+ rewind(file);
+ while ((c = fgetc(file)) != EOF)
+ _ODP_PRINT("%c", (char)c);
+
+ fclose(file);
+ return 0;
+
+fail:
+ fclose(file);
+ return -1;
+}
diff --git a/platform/linux-generic/odp_ml.c b/platform/linux-generic/odp_ml.c
new file mode 100644
index 000000000..6ab9e7177
--- /dev/null
+++ b/platform/linux-generic/odp_ml.c
@@ -0,0 +1,2646 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/autoheader_external.h>
+
+#include <odp/api/atomic.h>
+#include <odp/api/buffer.h>
+#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/ml.h>
+#include <odp/api/pool.h>
+#include <odp/api/queue.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/std_types.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/event_inline_types.h>
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_buffer_internal.h>
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_pool_internal.h>
+
+#include <onnxruntime_c_api.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#define ML_MAX_IO_SEGS UINT32_MAX
+#define ML_MAX_COMPL_ID 32
+#define ML_MAX_CONFIG_STR_LEN 65
+#define ML_MAX_MODEL_SIZE (1024 * 1024 * 1024)
+#define ML_MAX_MODELS_CREATED CONFIG_ML_MAX_MODELS
+#define ML_MAX_MODELS_LOADED CONFIG_ML_MAX_MODELS
+
+/* Error codes */
+enum {
+ /* Feature not supported */
+ ML_FEATURE_NOT_SUPPORTED = 1,
+
+ /* Model is not created */
+ ML_NOT_CREATED,
+
+ /* Model was not loaded */
+ ML_NOT_LOADED,
+
+ /* Model has already loaded */
+ ML_LOADED,
+
+ /* Bad input */
+ ML_BAD_INPUT,
+
+ /* Fail from underlying library onnxruntime */
+ ML_LIB_FAILED,
+
+ /* Bad output */
+ ML_BAD_OUTPUT,
+
+ /* Bad handle */
+ ML_BAD_HDL
+};
+
+typedef struct ort_run_opts_t {
+ int enable_profiling;
+
+ ExecutionMode execution_mode;
+
+ int inter_op_num_threads;
+
+ int intra_op_num_threads;
+
+ GraphOptimizationLevel graph_opt_level;
+
+ char opt_model_filepath[ML_MAX_CONFIG_STR_LEN];
+} ort_run_opts_t;
+
+typedef struct ml_input_t {
+ /* Combined input start address */
+ void *addr;
+ /* Data size in bytes */
+ uint64_t size;
+} ml_input_t;
+
+/* Onnxruntime model info */
+typedef struct ml_model_t {
+ /* Guards state, which must be accessed atomically */
+ odp_ticketlock_t lock;
+
+ enum {
+ ML_STATE_FREE = 0, /* Not allocated */
+ ML_STATE_CREATED, /* Model is created */
+ ML_STATE_LOADED, /* Model is loaded */
+ ML_STATE_INFERENCING, /* Model is inferencing */
+ } state;
+
+ OrtSession *session;
+ OrtSessionOptions *session_opts;
+ uint32_t max_compl_id;
+ odp_atomic_u32_t compl_status[ML_MAX_COMPL_ID];
+
+ odp_ml_model_info_t info;
+ odp_ml_input_info_t input_info[CONFIG_ML_MAX_INPUTS];
+ uint64_t input_sizes[CONFIG_ML_MAX_INPUTS];
+ odp_ml_output_info_t output_info[CONFIG_ML_MAX_OUTPUTS];
+ uint64_t output_sizes[CONFIG_ML_MAX_OUTPUTS];
+
+ struct {
+ void *user_ptr;
+ } result[ML_MAX_COMPL_ID];
+} ml_model_t;
+
+typedef struct ml_global_t {
+ odp_shm_t shm;
+
+ odp_ml_capability_t capa;
+ odp_ml_config_t ml_config;
+
+ odp_pool_param_t pool_param;
+
+ const OrtApi *ort_api;
+ OrtEnv *env;
+ ort_run_opts_t ort_run_opts;
+
+ ml_model_t models[ML_MAX_MODELS_CREATED];
+
+} ml_global_t;
+
+static ml_global_t *_odp_ml_glb;
+
+static inline ml_model_t *ml_model_from_handle(odp_ml_model_t model)
+{
+ return (ml_model_t *)(uintptr_t)model;
+}
+
+int odp_ml_capability(odp_ml_capability_t *capa)
+{
+ odp_pool_capability_t pool_capa;
+
+ memset(capa, 0, sizeof(odp_ml_capability_t));
+
+ if (odp_global_ro.disable.ml) {
+ _ODP_PRINT("ML is disabled\n");
+ return 0;
+ }
+
+ capa->max_model_size = ML_MAX_MODEL_SIZE;
+ capa->max_models = ML_MAX_MODELS_CREATED;
+ capa->max_models_loaded = ML_MAX_MODELS_LOADED;
+ capa->max_compl_id = ML_MAX_COMPL_ID;
+ capa->max_inputs = CONFIG_ML_MAX_INPUTS;
+ capa->max_outputs = CONFIG_ML_MAX_OUTPUTS;
+ capa->max_segs_per_input = ML_MAX_IO_SEGS;
+ capa->max_segs_per_output = ML_MAX_IO_SEGS;
+ capa->min_input_align = 1;
+ capa->min_output_align = 1;
+
+ capa->load.compl_mode_mask = ODP_ML_COMPL_MODE_SYNC |
+ ODP_ML_COMPL_MODE_POLL |
+ ODP_ML_COMPL_MODE_EVENT;
+ capa->load.compl_queue_plain = 1;
+ capa->load.compl_queue_sched = 1;
+
+ capa->run.compl_mode_mask = ODP_ML_COMPL_MODE_SYNC |
+ ODP_ML_COMPL_MODE_POLL |
+ ODP_ML_COMPL_MODE_EVENT;
+ capa->run.compl_queue_plain = 1;
+ capa->run.compl_queue_sched = 1;
+
+ if (odp_pool_capability(&pool_capa)) {
+ _ODP_ERR("Pool capability failed\n");
+ return -1;
+ }
+
+ capa->pool.max_pools = pool_capa.buf.max_pools;
+ capa->pool.max_num = pool_capa.buf.max_num;
+ capa->pool.max_uarea_size = pool_capa.buf.max_uarea_size;
+ capa->pool.uarea_persistence = pool_capa.buf.uarea_persistence;
+ capa->pool.max_cache_size = pool_capa.buf.max_cache_size;
+ capa->pool.min_cache_size = pool_capa.buf.min_cache_size;
+
+ return 0;
+}
+
+void odp_ml_config_init(odp_ml_config_t *config)
+{
+ memset(config, 0, sizeof(odp_ml_config_t));
+ config->max_models_created = 1;
+ config->max_models_loaded = 1;
+}
+
+int odp_ml_config(const odp_ml_config_t *config)
+{
+ if (!config) {
+ _ODP_ERR("Error: config must not be NULL\n");
+ return -1;
+ }
+
+ if (config->max_model_size == 0 || config->max_models_created == 0 ||
+ config->max_models_loaded == 0) {
+ _ODP_ERR("Error: max_model_size, max_models_created and max_models_loaded"
+ " must be bigger than 0\n");
+ return -1;
+ }
+
+ if (config->max_models_loaded > config->max_models_created) {
+ _ODP_ERR("Error: max_models_loaded %d exceeds max_models_created %d\n",
+ config->max_models_loaded, config->max_models_created);
+ return -1;
+ }
+
+ if (config->max_models_created > ML_MAX_MODELS_CREATED) {
+ _ODP_ERR("Error: max_models_created %d exceeds maximum number"
+ " of models that can be created in this driver %d\n",
+ config->max_models_created, ML_MAX_MODELS_CREATED);
+ return -1;
+ }
+
+ if (config->max_models_loaded > ML_MAX_MODELS_LOADED) {
+ _ODP_ERR("Error: max_models_loaded %d exceeds maximum number"
+ " of models that can be loaded in this driver %d\n",
+ config->max_models_loaded, ML_MAX_MODELS_LOADED);
+ return -1;
+ }
+
+ if (config->max_model_size > ML_MAX_MODEL_SIZE) {
+ _ODP_ERR("max_model_size %" PRIu64 " exceeds supported maximum model size %d\n",
+ config->max_model_size, ML_MAX_MODEL_SIZE);
+ return -1;
+ }
+
+ _odp_ml_glb->ml_config = *config;
+ return 0;
+}
+
+void odp_ml_model_param_init(odp_ml_model_param_t *param)
+{
+ memset(param, 0, sizeof(odp_ml_model_param_t));
+}
+
+static int check_ortstatus(OrtStatus * const status)
+{
+ if (status != NULL) {
+ const char *msg = _odp_ml_glb->ort_api->GetErrorMessage(status);
+
+ _ODP_ERR("%s\n", msg);
+ _odp_ml_glb->ort_api->ReleaseStatus(status);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Get model input and output count */
+static int get_model_io_count(OrtSession *model, uint32_t *num_inputs, uint32_t *num_outputs)
+{
+ size_t num = 0;
+ OrtStatus *status = NULL;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ status = ort_api->SessionGetInputCount(model, &num);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Get model input count failed\n");
+ return -1;
+ }
+
+ *num_inputs = num;
+ _ODP_DBG("num_inputs: %u\n", *num_inputs);
+
+ status = ort_api->SessionGetOutputCount(model, &num);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Get model output count failed\n");
+ return -1;
+ }
+
+ *num_outputs = num;
+ _ODP_DBG("num_outputs: %u\n", *num_outputs);
+
+ return 0;
+}
+
+static odp_ml_data_type_t onnx_dtype_to_odp_dtype(ONNXTensorElementDataType onnx_dtype)
+{
+ switch (onnx_dtype) {
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT:
+ return ODP_ML_DATA_TYPE_FP32;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8:
+ return ODP_ML_DATA_TYPE_UINT8;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8:
+ return ODP_ML_DATA_TYPE_INT8;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16:
+ return ODP_ML_DATA_TYPE_UINT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16:
+ return ODP_ML_DATA_TYPE_INT16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32:
+ return ODP_ML_DATA_TYPE_INT32;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32:
+ return ODP_ML_DATA_TYPE_UINT32;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64:
+ return ODP_ML_DATA_TYPE_INT64;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64:
+ return ODP_ML_DATA_TYPE_UINT64;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16:
+ return ODP_ML_DATA_TYPE_FP16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16:
+ return ODP_ML_DATA_TYPE_BFP16;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE:
+ return ODP_ML_DATA_TYPE_FP64;
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_BOOL:
+ /* Fall through */
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX64:
+ /* Fall through */
+ case ONNX_TENSOR_ELEMENT_DATA_TYPE_COMPLEX128:
+ /* Fall through */
+ default:
+ _ODP_ERR("onnx_dtype %d not supported by odp_ml\n", onnx_dtype);
+ return ODP_ML_DATA_TYPE_NONE;
+ }
+}
+
+/* Get the size of given odp_ml_data_type_t in bytes */
+static uint32_t size_of_odp_ml_data_type(odp_ml_data_type_t data_type)
+{
+ switch (data_type) {
+ case ODP_ML_DATA_TYPE_NONE:
+ return 0;
+
+ case ODP_ML_DATA_TYPE_INT8:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_UINT8:
+ return 1;
+
+ case ODP_ML_DATA_TYPE_INT16:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_UINT16:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_FP16:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_BFP16:
+ return 2;
+
+ case ODP_ML_DATA_TYPE_INT24:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_UINT24:
+ return 3;
+
+ case ODP_ML_DATA_TYPE_INT32:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_UINT32:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_FP32:
+ return 4;
+
+ case ODP_ML_DATA_TYPE_INT64:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_UINT64:
+ /* Fall through */
+ case ODP_ML_DATA_TYPE_FP64:
+ return 8;
+
+ default:
+ return 0;
+ }
+}
+
+static int get_shape(int64_t dims[], odp_ml_shape_info_t *shape)
+{
+ uint32_t dyn_cnt = 0;
+
+ for (uint32_t i = 0; i < shape->num_dim; i++) {
+ if (dims[i] == 0) {
+ _ODP_ERR("Dimension value: %" PRId64 " must be at least 1\n", dims[i]);
+ return -1;
+ } else if (dims[i] == -1) { /* Symbolic dimension */
+ dyn_cnt++;
+ shape->dim[i] = ODP_ML_DIM_DYNAMIC;
+ shape->dim_min[i] = 0; /*unknown*/
+ shape->dim_max[i] = 0; /*unknown*/
+ } else if (dims[i] > 0 && dims[i] < UINT32_MAX) {
+ shape->dim[i] = dims[i];
+ shape->dim_min[i] = dims[i];
+ shape->dim_max[i] = dims[i];
+ } else {
+ _ODP_ERR("Dimension value: %" PRId64 " invalid\n", dims[i]);
+ return -1;
+ }
+ }
+
+ if (dyn_cnt == 0) {
+ shape->type = ODP_ML_SHAPE_STATIC;
+ } else if (dyn_cnt == 1) {
+ shape->type = ODP_ML_SHAPE_BATCH;
+ } else {
+ _ODP_ERR("Data shape type not supported by ODP\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void calculate_model_io_size(const odp_ml_shape_info_t *shape, uint64_t *size)
+{
+ /* Calculate the data size in bytes of this tensor, 0 for tensors with
+ * dynamic batch sizes */
+ for (size_t i = 0; i < shape->num_dim; i++) {
+ /* Skip dynamic dimension size */
+ if (shape->dim[i] == ODP_ML_DIM_DYNAMIC) {
+ *size = 0;
+ break;
+ }
+ (*size) *= shape->dim[i];
+ }
+}
+
+static int get_model_io_type_shape_size(OrtTypeInfo *type_info, odp_ml_shape_info_t *shape,
+ odp_ml_data_type_t *data_type, uint32_t *data_type_size,
+ uint64_t *size)
+{
+ ONNXTensorElementDataType tensor_type;
+ const OrtTensorTypeAndShapeInfo *tensor_info;
+ size_t num_dim = 0;
+ OrtStatus *status = NULL;
+ int64_t dims[ODP_ML_MAX_DIMS] = {0};
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ status = ort_api->CastTypeInfoToTensorInfo(type_info, &tensor_info);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("CastTypeInfoToTensorInfo failed\n");
+ return -1;
+ }
+
+ status = ort_api->GetTensorElementType(tensor_info, &tensor_type);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetTensorElementType failed\n");
+ return -1;
+ }
+
+ *data_type = onnx_dtype_to_odp_dtype(tensor_type);
+ if (*data_type == ODP_ML_DATA_TYPE_NONE) /* Type not supported by odp */
+ return -1;
+
+ status = ort_api->GetDimensionsCount(tensor_info, &num_dim);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetDimensionsCount failed\n");
+ return -1;
+ }
+
+ if (num_dim > ODP_ML_MAX_DIMS) {
+ _ODP_ERR("Number of dimensions: %zu exceeds supported maximum number"
+ " of dimensions: %d\n", num_dim, ODP_ML_MAX_DIMS);
+ return -1;
+ }
+ shape->num_dim = num_dim;
+
+ status = ort_api->GetDimensions(tensor_info, dims, num_dim);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetDimensions failed\n");
+ return -1;
+ }
+
+ if (get_shape(dims, shape))
+ return -1;
+
+ *data_type_size = size_of_odp_ml_data_type(*data_type);
+
+ *size = *data_type_size;
+ calculate_model_io_size(shape, size);
+
+ return 0;
+}
+
+/* Get model input and output info */
+static int get_model_io_info(OrtSession *session, ml_model_t *mdl,
+ const odp_ml_model_param_t *param)
+{
+ char *name;
+ OrtTypeInfo *type_info;
+ const odp_ml_data_format_t *data_format;
+ OrtStatus *status = NULL;
+ OrtAllocator *allocator = NULL;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+ odp_ml_input_info_t *input_info = mdl->input_info;
+ odp_ml_output_info_t *output_info = mdl->output_info;
+
+ status = ort_api->GetAllocatorWithDefaultOptions(&allocator);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetAllocatorWithDefaultOptions failed\n");
+ return -1;
+ }
+
+ /* Retrieve info about input array. */
+ memset(input_info, 0, sizeof(mdl->input_info));
+ for (uint32_t i = 0; i < mdl->info.num_inputs; i++) {
+ name = NULL;
+ status = ort_api->SessionGetInputName(session, i, allocator, &name);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Get %uth input name failed\n", i);
+ return -1;
+ }
+
+ strncpy(input_info[i].name, name, ODP_ML_MODEL_IO_NAME_LEN - 1);
+ input_info[i].name[ODP_ML_MODEL_IO_NAME_LEN - 1] = 0;
+
+ /* Free memory allocated by SessionGetInputName */
+ status = ort_api->AllocatorFree(allocator, name);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("AllocatorFree %uth input_name failed\n", i);
+ return -1;
+ }
+
+ if (param->extra_info.num_inputs) {
+ data_format = &param->extra_info.input_format[i];
+
+ input_info[i].shape = data_format->shape;
+ input_info[i].data_type = data_format->data_type;
+ input_info[i].data_type_size = data_format->data_type_size;
+
+ mdl->input_sizes[i] = input_info[i].data_type_size;
+ calculate_model_io_size(&data_format->shape, &mdl->input_sizes[i]);
+ continue;
+ }
+
+ type_info = NULL;
+ status = ort_api->SessionGetInputTypeInfo(session, i, &type_info);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SessionGetInputTypeInfo failed\n");
+ return -1;
+ }
+
+ if (get_model_io_type_shape_size(type_info, &input_info[i].shape,
+ &input_info[i].data_type,
+ &input_info[i].data_type_size,
+ &mdl->input_sizes[i])) {
+ _ODP_ERR("get_model_io_type_shape_size() for input failed\n");
+ ort_api->ReleaseTypeInfo(type_info);
+ return -1;
+ }
+
+ ort_api->ReleaseTypeInfo(type_info);
+ }
+
+ /* Retrieve info about output array. */
+ memset(output_info, 0, sizeof(mdl->output_info));
+ for (uint32_t i = 0; i < mdl->info.num_outputs; i++) {
+ name = NULL;
+ status = ort_api->SessionGetOutputName(session, i, allocator, &name);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Get %uth output name failed\n", i);
+ return -1;
+ }
+
+ strncpy(output_info[i].name, name, ODP_ML_MODEL_IO_NAME_LEN - 1);
+ output_info[i].name[ODP_ML_MODEL_IO_NAME_LEN - 1] = 0;
+
+ /* Free memory allocated by SessionGetOutputName */
+ status = ort_api->AllocatorFree(allocator, name);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("AllocatorFree %uth output_name failed\n", i);
+ return -1;
+ }
+
+ if (param->extra_info.num_outputs) {
+ data_format = &param->extra_info.output_format[i];
+
+ output_info[i].shape = data_format->shape;
+ output_info[i].data_type = data_format->data_type;
+ output_info[i].data_type_size = data_format->data_type_size;
+
+ mdl->output_sizes[i] = output_info[i].data_type_size;
+ calculate_model_io_size(&data_format->shape, &mdl->output_sizes[i]);
+ continue;
+ }
+
+ type_info = NULL;
+ status = ort_api->SessionGetOutputTypeInfo(session, i, &type_info);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SessionGetOutputTypeInfo failed\n");
+ return -1;
+ }
+
+ if (get_model_io_type_shape_size(type_info, &output_info[i].shape,
+ &output_info[i].data_type,
+ &output_info[i].data_type_size,
+ &mdl->output_sizes[i])) {
+ _ODP_ERR("get_model_io_type_shape_size() for output failed\n");
+ ort_api->ReleaseTypeInfo(type_info);
+ return -1;
+ }
+
+ ort_api->ReleaseTypeInfo(type_info);
+ }
+
+ return 0;
+}
+
+static inline int check_model_io_num(const odp_ml_model_param_t *param,
+ uint32_t num_inputs, uint32_t num_outputs)
+{
+ /* Make sure the number of inputs/outputs not exceeding the supported
+ * model max inputs/outputs */
+ if (num_inputs > CONFIG_ML_MAX_INPUTS) {
+ _ODP_ERR("The model's number of inputs %u exceeds the maximum "
+ "number of inputs supported in a model %u\n",
+ num_inputs, CONFIG_ML_MAX_INPUTS);
+ return -1;
+ }
+
+ if (num_outputs > CONFIG_ML_MAX_OUTPUTS) {
+ _ODP_ERR("The model's number of outputs %u exceeds the maximum "
+ "number of outputs supported in a model %u\n",
+ num_outputs, CONFIG_ML_MAX_OUTPUTS);
+
+ return -1;
+ }
+
+ /* Make sure the numbers of inputs/outputs provided in the extra_info of
+ * param match the numbers defined in model metadata. */
+ if (param->extra_info.num_inputs &&
+ param->extra_info.num_inputs != num_inputs) {
+ _ODP_ERR("Provided param->extra_info.num_inputs %u does not match the"
+ " number of inputs defined in model metadata: %u\n",
+ param->extra_info.num_inputs, num_inputs);
+ return -1;
+ }
+
+ if (param->extra_info.num_outputs && param->extra_info.num_outputs != num_outputs) {
+ _ODP_ERR("Provided param->extra_info.num_outputs %u does not match the"
+ " number of outputs defined in model metadata: %u\n",
+ param->extra_info.num_outputs, num_outputs);
+ return -1;
+ }
+
+ if (param->extra_info.num_inputs && !param->extra_info.input_format) {
+ _ODP_ERR("num_inputs is provided but not input_format in param->extra_info\n");
+ return -1;
+ }
+
+ if (param->extra_info.num_outputs && !param->extra_info.output_format) {
+ _ODP_ERR("num_outputs is provided but not output_format in param->extra_info\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int create_ort_model(const odp_ml_model_param_t *param, OrtSession **session,
+ ml_model_t *mdl, OrtSessionOptions *session_opts)
+{
+ OrtStatus *status;
+ int64_t model_version;
+ uint32_t num_inputs = 0;
+ uint32_t num_outputs = 0;
+ OrtModelMetadata *metadata = {0};
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ status = ort_api->CreateSessionFromArray(_odp_ml_glb->env,
+ param->model,
+ param->size,
+ session_opts,
+ session);
+ if (check_ortstatus(status) || !(*session)) {
+ _ODP_ERR("CreateSessionFromArray failed\n");
+ return -1;
+ }
+
+ if (get_model_io_count(*session, &num_inputs, &num_outputs)) {
+ _ODP_ERR("get_model_io_count() failed\n");
+ ort_api->ReleaseSession(*session);
+ return -1;
+ }
+
+ if (check_model_io_num(param, num_inputs, num_outputs)) {
+ ort_api->ReleaseSession(*session);
+ return -1;
+ }
+
+ mdl->max_compl_id = param->max_compl_id;
+ mdl->info.num_inputs = num_inputs;
+ mdl->info.num_outputs = num_outputs;
+
+ /* Get metadata */
+ status = ort_api->SessionGetModelMetadata(*session, &metadata);
+ if (check_ortstatus(status) || !metadata) {
+ _ODP_ERR("SessionGetModelMetadata failed\n");
+ ort_api->ReleaseSession(*session);
+ return -1;
+ }
+
+ /* Get model version */
+ status = ort_api->ModelMetadataGetVersion(metadata, &model_version);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("ModelMetadataGetVersion failed\n");
+ ort_api->ReleaseModelMetadata(metadata);
+ ort_api->ReleaseSession(*session);
+ return -1;
+ }
+ mdl->info.model_version = model_version;
+ mdl->info.interface_version = 0;
+
+ if (get_model_io_info(*session, mdl, param)) {
+ _ODP_ERR("get_model_io_info() failed\n");
+ ort_api->ReleaseModelMetadata(metadata);
+ ort_api->ReleaseSession(*session);
+ return -1;
+ }
+
+ ort_api->ReleaseModelMetadata(metadata);
+ return 0;
+}
+
+static int set_ort_run_opts(const char *name, OrtSessionOptions *se_opts)
+{
+ OrtStatus *status;
+ ort_run_opts_t *opts = &_odp_ml_glb->ort_run_opts;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ if (opts->enable_profiling) {
+ status = ort_api->EnableProfiling(se_opts, name);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Enable profiling failed\n");
+ return -1;
+ }
+ }
+
+ status = ort_api->SetSessionExecutionMode(se_opts, opts->execution_mode);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SetSessionExecutionMode failed\n");
+ return -1;
+ }
+
+ if (opts->intra_op_num_threads) {
+ status = ort_api->SetIntraOpNumThreads(se_opts, opts->intra_op_num_threads);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SetIntraOpNumThreads failed\n");
+ return -1;
+ }
+ }
+
+ if (opts->inter_op_num_threads) {
+ status = ort_api->SetInterOpNumThreads(se_opts, opts->inter_op_num_threads);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SetInterOpNumThreads failed\n");
+ return -1;
+ }
+ }
+
+ status = ort_api->SetSessionGraphOptimizationLevel(se_opts, opts->graph_opt_level);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SetSessionGraphOptimizationLevel failed\n");
+ return -1;
+ }
+
+ /* Optimized model file path is not provided */
+ if (opts->opt_model_filepath[0] == '\0')
+ return 0;
+
+ status = ort_api->SetOptimizedModelFilePath(se_opts, opts->opt_model_filepath);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("SetOptimizedModelFilePath failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline void reset_mdl_info_sizes(ml_model_t *mdl)
+{
+ memset(&mdl->info, 0, sizeof(odp_ml_model_info_t));
+ memset(mdl->input_info, 0, sizeof(mdl->input_info));
+ memset(mdl->output_info, 0, sizeof(mdl->output_info));
+ memset(mdl->input_sizes, 0, sizeof(mdl->input_sizes));
+ memset(mdl->output_sizes, 0, sizeof(mdl->output_sizes));
+}
+
+static int check_io_shape(ml_model_t *mdl)
+{
+ odp_ml_shape_info_t *shape;
+
+ for (uint32_t i = 0; i < mdl->info.num_inputs; i++) {
+ shape = &mdl->input_info[i].shape;
+
+ if (shape->type == ODP_ML_SHAPE_NONE) {
+ _ODP_ERR("Undefined shape type for model input[%u]\n", i);
+ return -1;
+ }
+
+ if (shape->type == ODP_ML_SHAPE_STATIC)
+ continue;
+
+ /* shape->type == ODP_ML_SHAPE_BATCH */
+ for (uint32_t j = 0; j < shape->num_dim; j++) {
+ if (shape->dim[j] == ODP_ML_DIM_DYNAMIC && !shape->dim_max[j]) {
+ _ODP_ERR("Missing dim_max[%u] for dynamic sized input[%u], please"
+ " provide via the extra_info of model param\n", j, i);
+ return -1;
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < mdl->info.num_outputs; i++) {
+ if (mdl->output_info[i].shape.type == ODP_ML_SHAPE_NONE) {
+ _ODP_ERR("Undefined shape type for model output[%u]\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+odp_ml_model_t odp_ml_model_create(const char *name, const odp_ml_model_param_t *param)
+{
+ OrtStatus *status;
+ odp_ml_model_info_t *info;
+ OrtSessionOptions *session_opts;
+ uint32_t i = 0;
+ ml_model_t *mdl = NULL;
+ OrtSession *session = NULL;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ if (odp_unlikely(odp_global_ro.disable.ml)) {
+ _ODP_ERR("ML is disabled\n");
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ if (odp_unlikely(param->size > _odp_ml_glb->ml_config.max_model_size)) {
+ _ODP_ERR("Model size %" PRIu64 " exceeds maximum model size configured %" PRIu64 "\n",
+ param->size, _odp_ml_glb->ml_config.max_model_size);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ if (odp_unlikely(!param->size || !param->model)) {
+ _ODP_ERR("Invalid model param: param->model: %p, param->size: %" PRIu64 "\n",
+ param->model, param->size);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ if (odp_unlikely(param->max_compl_id > ML_MAX_COMPL_ID)) {
+ _ODP_ERR("param->max_compl_id: %u exceeds maximum completion id supported: %d\n",
+ param->max_compl_id, ML_MAX_COMPL_ID);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ /* Find an emtpy slot to store the new model */
+ for (i = 0; i < ML_MAX_MODELS_CREATED; i++) {
+ if (_odp_ml_glb->models[i].state)
+ continue;
+
+ odp_ticketlock_lock(&_odp_ml_glb->models[i].lock);
+
+ if (_odp_ml_glb->models[i].state) {
+ odp_ticketlock_unlock(&_odp_ml_glb->models[i].lock);
+ continue;
+ }
+
+ mdl = &_odp_ml_glb->models[i];
+ break;
+ }
+
+ if (i == ML_MAX_MODELS_CREATED) {
+ _ODP_ERR("Maximum number of models has already been created!\n");
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ /* Free model entry was found and is now locked */
+ mdl->state = ML_STATE_CREATED;
+
+ status = ort_api->CreateSessionOptions(&session_opts);
+ if (check_ortstatus(status) || !session_opts) {
+ _ODP_ERR("Error: CreateSessionOptions failed.\n");
+ mdl->state = ML_STATE_FREE;
+ odp_ticketlock_unlock(&mdl->lock);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ if (set_ort_run_opts(name, session_opts)) {
+ _odp_ml_glb->ort_api->ReleaseSessionOptions(session_opts);
+ mdl->state = ML_STATE_FREE;
+ odp_ticketlock_unlock(&mdl->lock);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ /* Store model info */
+ info = &mdl->info;
+ memset(info, 0, sizeof(odp_ml_model_info_t));
+
+ if (create_ort_model(param, &session, mdl, session_opts)) {
+ mdl->state = ML_STATE_FREE;
+
+ /* Initialize info back to 0 when some fields have been filled
+ * while later failed */
+ reset_mdl_info_sizes(mdl);
+ odp_ticketlock_unlock(&mdl->lock);
+
+ _odp_ml_glb->ort_api->ReleaseSessionOptions(session_opts);
+ _ODP_ERR("create_ort_model() failed\n");
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ if (check_io_shape(mdl)) {
+ mdl->state = ML_STATE_FREE;
+ reset_mdl_info_sizes(mdl);
+ odp_ticketlock_unlock(&mdl->lock);
+
+ ort_api->ReleaseSession(session);
+ _odp_ml_glb->ort_api->ReleaseSessionOptions(session_opts);
+ return ODP_ML_MODEL_INVALID;
+ }
+
+ mdl->session = session;
+ mdl->session_opts = session_opts;
+ info->index = i;
+
+ if (name) {
+ strncpy(info->name, name, ODP_ML_MODEL_NAME_LEN - 1);
+ info->name[ODP_ML_MODEL_NAME_LEN - 1] = 0;
+ }
+
+ mdl->max_compl_id = param->max_compl_id;
+ for (uint32_t j = 0; j < ML_MAX_COMPL_ID; j++)
+ odp_atomic_init_u32(&mdl->compl_status[j], 1);
+
+ odp_ticketlock_unlock(&mdl->lock);
+ return (odp_ml_model_t)mdl;
+}
+
+int odp_ml_model_destroy(odp_ml_model_t model)
+{
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (model == ODP_ML_MODEL_INVALID) {
+ _ODP_ERR("Bad ML model handle\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+
+ if (mdl->state != ML_STATE_CREATED) {
+ _ODP_ERR("Model not created\n");
+ odp_ticketlock_unlock(&mdl->lock);
+ return -1;
+ }
+
+ _odp_ml_glb->ort_api->ReleaseSessionOptions(mdl->session_opts);
+ _odp_ml_glb->ort_api->ReleaseSession(mdl->session);
+ mdl->state = ML_STATE_FREE;
+ mdl->session = NULL;
+ odp_ticketlock_unlock(&mdl->lock);
+
+ return 0;
+}
+
+int odp_ml_model_info(odp_ml_model_t model, odp_ml_model_info_t *info)
+{
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(!info)) {
+ _ODP_ERR("info must not be NULL\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ if (odp_unlikely(mdl->state == ML_STATE_FREE)) {
+ _ODP_ERR("Model not created\n");
+ odp_ticketlock_unlock(&mdl->lock);
+ return -1;
+ }
+
+ *info = mdl->info;
+
+ odp_ticketlock_unlock(&mdl->lock);
+ return 0;
+}
+
+uint32_t odp_ml_model_input_info(odp_ml_model_t model, odp_ml_input_info_t info[], uint32_t num)
+{
+ uint32_t num_model_inputs;
+ uint32_t num_written;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return 0;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ num_model_inputs = mdl->info.num_inputs;
+ num_written = num_model_inputs >= num ? num : num_model_inputs;
+
+ if (num == 0) {
+ odp_ticketlock_unlock(&mdl->lock);
+ return num_model_inputs;
+ }
+
+ for (uint32_t i = 0; i < num_written; i++)
+ info[i] = mdl->input_info[i];
+
+ odp_ticketlock_unlock(&mdl->lock);
+ return num_model_inputs;
+}
+
+uint32_t odp_ml_model_output_info(odp_ml_model_t model, odp_ml_output_info_t info[], uint32_t num)
+{
+ uint32_t num_model_outputs;
+ uint32_t num_written;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return 0;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ num_model_outputs = mdl->info.num_outputs;
+ num_written = num_model_outputs >= num ? num : num_model_outputs;
+
+ if (num == 0) {
+ odp_ticketlock_unlock(&mdl->lock);
+ return num_model_outputs;
+ }
+
+ for (uint32_t i = 0; i < num_written; i++)
+ info[i] = mdl->output_info[i];
+
+ odp_ticketlock_unlock(&mdl->lock);
+ return num_model_outputs;
+}
+
+odp_ml_model_t odp_ml_model_lookup(const char *name)
+{
+ uint32_t i;
+ ml_model_t *mdl;
+
+ for (i = 0; i < ML_MAX_MODELS_CREATED; i++) {
+ mdl = &_odp_ml_glb->models[i];
+
+ odp_ticketlock_lock(&mdl->lock);
+
+ if (mdl->state == ML_STATE_FREE) {
+ odp_ticketlock_unlock(&mdl->lock);
+ continue;
+ }
+
+ if (!strcmp(mdl->info.name, name)) {
+ /* found it */
+ odp_ticketlock_unlock(&mdl->lock);
+ return (odp_ml_model_t)mdl;
+ }
+ odp_ticketlock_unlock(&mdl->lock);
+ }
+
+ return ODP_ML_MODEL_INVALID;
+}
+
+uint64_t odp_ml_model_to_u64(odp_ml_model_t model)
+{
+ return _odp_pri(model);
+}
+
+static const char *data_type_str(odp_ml_data_type_t data_type)
+{
+ switch (data_type) {
+ case ODP_ML_DATA_TYPE_INT8:
+ return "int8";
+ case ODP_ML_DATA_TYPE_UINT8:
+ return "uint8";
+ case ODP_ML_DATA_TYPE_UINT16:
+ return "uint16";
+ case ODP_ML_DATA_TYPE_INT16:
+ return "int16";
+ case ODP_ML_DATA_TYPE_INT32:
+ return "int32";
+ case ODP_ML_DATA_TYPE_UINT32:
+ return "uint32";
+ case ODP_ML_DATA_TYPE_INT64:
+ return "int64";
+ case ODP_ML_DATA_TYPE_UINT64:
+ return "uint64";
+ case ODP_ML_DATA_TYPE_FP16:
+ return "fp16";
+ case ODP_ML_DATA_TYPE_FP32:
+ return "fp32";
+ case ODP_ML_DATA_TYPE_BFP16:
+ return "bfp16";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *shape_type_str(odp_ml_shape_type_t shape_type)
+{
+ switch (shape_type) {
+ case ODP_ML_SHAPE_NONE:
+ return "none";
+ case ODP_ML_SHAPE_STATIC:
+ return "static";
+ case ODP_ML_SHAPE_BATCH:
+ return "batch";
+ default:
+ return "Unknown";
+ }
+}
+
+static void print_shape(const odp_ml_shape_info_t *shape)
+{
+ /* Print shape */
+ _ODP_PRINT("Shape: %s [", shape_type_str(shape->type));
+
+ for (uint32_t i = 0; i < shape->num_dim; i++) {
+ if (shape->dim[i] == ODP_ML_DIM_DYNAMIC)
+ _ODP_PRINT("Dyn");
+ else
+ _ODP_PRINT("%" PRIu32, shape->dim[i]);
+
+ if (i == (shape->num_dim - 1))
+ _ODP_PRINT("]\n");
+ else
+ _ODP_PRINT(", ");
+ }
+
+ /* The number of dimensions for a scalar input is 0, in which case did not
+ * go into above for loop */
+ if (shape->num_dim == 0)
+ _ODP_PRINT("]\n");
+}
+
+void odp_ml_model_print(odp_ml_model_t model)
+{
+ ml_model_t *mdl = ml_model_from_handle(model);
+ const odp_ml_model_info_t * const info = &mdl->info;
+ const odp_ml_input_info_t * const input_info = mdl->input_info;
+ const odp_ml_output_info_t * const output_info = mdl->output_info;
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ if (odp_unlikely(mdl->state == ML_STATE_FREE)) {
+ odp_ticketlock_unlock(&mdl->lock);
+ _ODP_ERR("Model not created\n");
+ return;
+ }
+
+ _ODP_PRINT("\nModel info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" Model handle: 0x%" PRIx64 "\n", odp_ml_model_to_u64(model));
+ _ODP_PRINT(" Name: %s\n", info->name);
+ _ODP_PRINT(" Model version: %" PRIu64 "\n", info->model_version);
+ _ODP_PRINT(" Model interface version: %" PRIu64 "\n", info->interface_version);
+ _ODP_PRINT(" Index: %u\n", info->index);
+ _ODP_PRINT(" Number of inputs: %u\n", info->num_inputs);
+
+ for (uint32_t i = 0; i < info->num_inputs; i++) {
+ _ODP_PRINT(" Input[%u]: ", i);
+ _ODP_PRINT("Name: %s, ", input_info[i].name);
+ _ODP_PRINT("Data_type: %s, ", data_type_str(input_info[i].data_type));
+ print_shape(&input_info[i].shape);
+ }
+
+ _ODP_PRINT(" Number of outputs: %u\n", info->num_outputs);
+ for (uint32_t i = 0; i < info->num_outputs; i++) {
+ _ODP_PRINT(" Output[%u]: ", i);
+ _ODP_PRINT("Name: %s, ", output_info[i].name);
+ _ODP_PRINT("Data_type: %s, ", data_type_str(output_info[i].data_type));
+ print_shape(&output_info[i].shape);
+ }
+
+ odp_ticketlock_unlock(&mdl->lock);
+
+ _ODP_PRINT("\n");
+}
+
+static inline void mode_print(odp_ml_compl_mode_t compl_mode_mask)
+{
+ if (compl_mode_mask & ODP_ML_COMPL_MODE_SYNC)
+ _ODP_PRINT(" syn");
+
+ if (compl_mode_mask & ODP_ML_COMPL_MODE_POLL)
+ _ODP_PRINT(" poll");
+
+ if (compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ _ODP_PRINT(" event");
+}
+
+void odp_ml_print(void)
+{
+ _ODP_PRINT("\nML info\n");
+ _ODP_PRINT("-----------\n");
+ _ODP_PRINT(" max_model_size: %u\n", ML_MAX_MODEL_SIZE);
+ _ODP_PRINT(" max_compl_id: %u\n", ML_MAX_COMPL_ID);
+ _ODP_PRINT(" max_models_created: %u\n", ML_MAX_MODELS_CREATED);
+ _ODP_PRINT(" max_models_loaded: %u\n", ML_MAX_MODELS_LOADED);
+ _ODP_PRINT(" model_max_inputs: %u\n", CONFIG_ML_MAX_INPUTS);
+ _ODP_PRINT(" model_max_outputs: %u\n", CONFIG_ML_MAX_OUTPUTS);
+
+ _ODP_PRINT(" load:\n");
+ _ODP_PRINT(" completion mode: ");
+ mode_print(_odp_ml_glb->capa.load.compl_mode_mask);
+ _ODP_PRINT(", plain queue: %c, schedule queue: %c\n",
+ _odp_ml_glb->capa.load.compl_queue_plain ? 'Y' : 'N',
+ _odp_ml_glb->capa.load.compl_queue_sched ? 'Y' : 'N');
+
+ _ODP_PRINT(" run:\n");
+ _ODP_PRINT(" completion mode:");
+ mode_print(_odp_ml_glb->capa.run.compl_mode_mask);
+ _ODP_PRINT(", plain queue: %c, schedule queue: %c\n",
+ _odp_ml_glb->capa.run.compl_queue_plain ? 'Y' : 'N',
+ _odp_ml_glb->capa.run.compl_queue_sched ? 'Y' : 'N');
+ _ODP_PRINT("\n");
+}
+
+int odp_ml_model_extra_stat_info(odp_ml_model_t model,
+ odp_ml_extra_stat_info_t info[] ODP_UNUSED,
+ int num ODP_UNUSED)
+{
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_ml_model_extra_stats(odp_ml_model_t model, uint64_t stats[] ODP_UNUSED, int num ODP_UNUSED)
+{
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+void odp_ml_compl_pool_param_init(odp_ml_compl_pool_param_t *pool_param)
+{
+ if (odp_unlikely(!pool_param)) {
+ _ODP_ERR("Param 'pool_param' must not NULL\n");
+ return;
+ }
+
+ memset(pool_param, 0, sizeof(odp_ml_compl_pool_param_t));
+
+ pool_param->cache_size = _odp_ml_glb->pool_param.buf.cache_size;
+}
+
+odp_pool_t odp_ml_compl_pool_create(const char *name, const odp_ml_compl_pool_param_t *pool_param)
+{
+ odp_pool_t pool;
+ odp_pool_param_t ml_pool_param;
+ uint32_t num = pool_param->num;
+ uint32_t uarea_size = pool_param->uarea_size;
+ uint32_t cache_size = pool_param->cache_size;
+ uint32_t buf_size = _ODP_MAX(sizeof(odp_ml_run_result_t),
+ sizeof(odp_ml_load_result_t));
+
+ if (num > _odp_ml_glb->capa.pool.max_num) {
+ _ODP_ERR("Too many ML completion events: %u\n", num);
+ return ODP_POOL_INVALID;
+ }
+
+ if (uarea_size > _odp_ml_glb->capa.pool.max_uarea_size) {
+ _ODP_ERR("Bad uarea size: %u\n", uarea_size);
+ return ODP_POOL_INVALID;
+ }
+
+ if (cache_size < _odp_ml_glb->capa.pool.min_cache_size ||
+ cache_size > _odp_ml_glb->capa.pool.max_cache_size) {
+ _ODP_ERR("Bad cache size: %u\n", cache_size);
+ return ODP_POOL_INVALID;
+ }
+
+ odp_pool_param_init(&ml_pool_param);
+ ml_pool_param.type = ODP_POOL_BUFFER;
+ ml_pool_param.uarea_init.init_fn = pool_param->uarea_init.init_fn;
+ ml_pool_param.uarea_init.args = pool_param->uarea_init.args;
+ ml_pool_param.buf.num = num;
+ ml_pool_param.buf.cache_size = cache_size;
+ ml_pool_param.buf.size = buf_size;
+ ml_pool_param.buf.uarea_size = uarea_size;
+
+ pool = _odp_pool_create(name, &ml_pool_param, ODP_POOL_ML_COMPL);
+
+ return pool;
+}
+
+odp_ml_compl_t odp_ml_compl_alloc(odp_pool_t pool)
+{
+ odp_buffer_t buf;
+ odp_event_t ev;
+ odp_ml_run_result_t *result;
+ uint32_t buf_size = _ODP_MAX(sizeof(odp_ml_run_result_t),
+ sizeof(odp_ml_load_result_t));
+
+ buf = odp_buffer_alloc(pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ return ODP_ML_COMPL_INVALID;
+
+ result = odp_buffer_addr(buf);
+ memset(result, 0, buf_size);
+
+ ev = odp_buffer_to_event(buf);
+ _odp_event_type_set(ev, ODP_EVENT_ML_COMPL);
+
+ return (odp_ml_compl_t)(uintptr_t)buf;
+}
+
+void odp_ml_compl_free(odp_ml_compl_t ml_compl)
+{
+ odp_event_t ev;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)ml_compl;
+
+ if (odp_unlikely(ml_compl == ODP_ML_COMPL_INVALID)) {
+ _ODP_ERR("Bad ML job completion handle\n");
+ return;
+ }
+
+ ev = odp_buffer_to_event(buf);
+ _odp_event_type_set(ev, ODP_EVENT_BUFFER);
+
+ odp_buffer_free(buf);
+}
+
+int odp_ml_compl_run_result(odp_ml_compl_t ml_compl, odp_ml_run_result_t *result)
+{
+ odp_event_subtype_t subtype;
+ odp_ml_run_result_t *run_result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)ml_compl;
+ odp_event_t ev = odp_buffer_to_event(buf);
+
+ if (odp_unlikely(ml_compl == ODP_ML_COMPL_INVALID)) {
+ _ODP_ERR("Given ML completion event is invalid\n");
+ return -2;
+ }
+
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_ML_COMPL ||
+ subtype != ODP_EVENT_ML_COMPL_RUN) {
+ _ODP_ERR("Given completion event has wrong event type or subtype\n");
+ return -2;
+ }
+
+ run_result = odp_buffer_addr(buf);
+ if (result)
+ *result = *run_result;
+
+ return run_result->error_code ? -1 : 0;
+}
+
+int odp_ml_compl_load_result(odp_ml_compl_t ml_compl, odp_ml_load_result_t *result)
+{
+ odp_event_subtype_t subtype;
+ odp_ml_load_result_t *load_result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)ml_compl;
+ odp_event_t ev = odp_buffer_to_event(buf);
+
+ if (odp_unlikely(ml_compl == ODP_ML_COMPL_INVALID)) {
+ _ODP_ERR("Given ML completion event is invalid\n");
+ return -2;
+ }
+
+ if (odp_event_types(ev, &subtype) != ODP_EVENT_ML_COMPL ||
+ subtype != ODP_EVENT_ML_COMPL_LOAD) {
+ _ODP_ERR("Given completion event has wrong event type or subtype\n");
+ return -2;
+ }
+
+ load_result = odp_buffer_addr(buf);
+ if (result)
+ *result = *load_result;
+
+ return load_result->error_code ? -1 : 0;
+}
+
+void *odp_ml_compl_user_area(odp_ml_compl_t ml_compl)
+{
+ return odp_buffer_user_area((odp_buffer_t)(uintptr_t)ml_compl);
+}
+
+odp_ml_compl_t odp_ml_compl_from_event(odp_event_t event)
+{
+ _ODP_ASSERT(_odp_event_hdr_field(event, int8_t, event_type) == ODP_EVENT_ML_COMPL);
+
+ return (odp_ml_compl_t)(uintptr_t)event;
+}
+
+odp_event_t odp_ml_compl_to_event(odp_ml_compl_t ml_compl)
+{
+ return (odp_event_t)(uintptr_t)ml_compl;
+}
+
+uint64_t odp_ml_compl_to_u64(odp_ml_compl_t ml_compl)
+{
+ return (uint64_t)(uintptr_t)ml_compl;
+}
+
+void odp_ml_compl_param_init(odp_ml_compl_param_t *compl_param)
+{
+ memset(compl_param, 0, sizeof(odp_ml_compl_param_t));
+
+ compl_param->queue = ODP_QUEUE_INVALID;
+ compl_param->event = ODP_EVENT_INVALID;
+}
+
+int odp_ml_model_load(odp_ml_model_t model, odp_ml_load_result_t *result)
+{
+ odp_ml_load_result_t result_local;
+ int ret = -1;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ memset(&result_local, 0, sizeof(result_local));
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ result_local.error_code = ML_BAD_HDL;
+ goto load_fail;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ if (odp_unlikely(mdl->state != ML_STATE_CREATED)) {
+ _ODP_ERR("Model has not been created yet or is already loaded\n");
+ odp_ticketlock_unlock(&mdl->lock);
+ result_local.error_code = ML_NOT_CREATED;
+ goto load_fail;
+ }
+
+ mdl->state = ML_STATE_LOADED;
+ odp_ticketlock_unlock(&mdl->lock);
+ ret = 0;
+
+load_fail:
+ if (result)
+ *result = result_local;
+
+ return ret;
+}
+
+static inline int check_compl_param(const odp_ml_compl_param_t *compl_param,
+ uint32_t max_compl_id, odp_bool_t is_load)
+{
+ odp_ml_config_t *config = &_odp_ml_glb->ml_config;
+
+ switch (compl_param->mode) {
+ case ODP_ML_COMPL_MODE_POLL:
+ if (is_load && !(config->load_mode_mask & ODP_ML_COMPL_MODE_POLL)) {
+ _ODP_ERR("Poll mode loading/unloading is not configured\n");
+ return -1;
+ }
+
+ if (!is_load && !(config->run_mode_mask & ODP_ML_COMPL_MODE_POLL)) {
+ _ODP_ERR("Poll mode run is not configured\n");
+ return -1;
+ }
+
+ if (compl_param->compl_id > max_compl_id) {
+ _ODP_ERR("Bad compl_id: %u, exceeding model max completion id %u\n",
+ compl_param->compl_id, max_compl_id);
+ return -1;
+ }
+ break;
+ case ODP_ML_COMPL_MODE_EVENT:
+ if (is_load && !(config->load_mode_mask & ODP_ML_COMPL_MODE_EVENT)) {
+ _ODP_ERR("Event mode loading/unloading is not configured\n");
+ return -1;
+ }
+
+ if (!is_load && !(config->run_mode_mask & ODP_ML_COMPL_MODE_EVENT)) {
+ _ODP_ERR("Event mode run is not configured\n");
+ return -1;
+ }
+
+ if (compl_param->event == ODP_EVENT_INVALID ||
+ compl_param->queue == ODP_QUEUE_INVALID) {
+ _ODP_ERR("Bad event or queue\n");
+ return -1;
+ }
+
+ if (odp_event_type(compl_param->event) != ODP_EVENT_ML_COMPL) {
+ _ODP_ERR("Bad completion event type\n");
+ return -1;
+ }
+ break;
+ default:
+ /* Including ODP_ML_COMPL_MODE_SYNC, which is not supported by
+ * asynchrous functions (e.g. *_start()) either.
+ */
+ _ODP_ERR("Invalid completion mode %u\n", compl_param->mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_ml_model_load_start(odp_ml_model_t model, const odp_ml_compl_param_t *compl_param)
+{
+ int ret;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad model handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(check_compl_param(compl_param, mdl->max_compl_id, true)))
+ return -1;
+
+ if (compl_param->mode == ODP_ML_COMPL_MODE_POLL)
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 0);
+
+ ret = odp_ml_model_load(model, NULL);
+
+ if (odp_unlikely(ret))
+ return -1;
+
+ /* Send a completion event to the given queue */
+ if (compl_param->mode == ODP_ML_COMPL_MODE_EVENT) {
+ odp_ml_load_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)compl_param->event;
+
+ _odp_buffer_subtype_set(buf, ODP_EVENT_ML_COMPL_LOAD);
+
+ result = odp_buffer_addr(buf);
+ result->error_code = 0;
+ result->user_ptr = compl_param->user_ptr;
+
+ if (odp_unlikely(odp_queue_enq(compl_param->queue, compl_param->event))) {
+ _ODP_ERR("Completion event enqueue failed %" PRIu64 "\n",
+ odp_queue_to_u64(compl_param->queue));
+ if (odp_ml_model_unload(model, NULL))
+ _ODP_ERR("Failed to unload model\n");
+ return -1;
+ }
+
+ return 0;
+ }
+
+ mdl->result[compl_param->compl_id].user_ptr = compl_param->user_ptr;
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 1);
+ return 0;
+}
+
+int odp_ml_model_load_status(odp_ml_model_t model, uint32_t compl_id, odp_ml_load_result_t *result)
+{
+ int ret;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID || compl_id > mdl->max_compl_id)) {
+ _ODP_ERR("Invalid model or compl_id: %u\n", compl_id);
+ return -2;
+ }
+
+ ret = odp_atomic_load_acq_u32(&mdl->compl_status[compl_id]);
+
+ if (ret && result) {
+ result->error_code = 0;
+ result->user_ptr = mdl->result[compl_id].user_ptr;
+ }
+
+ return ret;
+}
+
+int odp_ml_model_unload(odp_ml_model_t model, odp_ml_load_result_t *result)
+{
+ odp_ml_load_result_t result_local;
+ int ret = -1;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ memset(&result_local, 0, sizeof(result_local));
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ result_local.error_code = ML_BAD_HDL;
+ _ODP_ERR("Bad ML model handle\n");
+ goto unload_fail;
+ }
+
+ odp_ticketlock_lock(&mdl->lock);
+ /* mdl->state == ML_STATE_FREE, ML_STATE_CREATED, ML_STATE_INFERENCING */
+ if (odp_unlikely(mdl->state != ML_STATE_LOADED)) {
+ _ODP_ERR("Model has not been created/loaded or inferencing has not finished yet\n");
+ odp_ticketlock_unlock(&mdl->lock);
+ result_local.error_code = ML_NOT_LOADED;
+ goto unload_fail;
+ }
+
+ mdl->state = ML_STATE_CREATED;
+ odp_ticketlock_unlock(&mdl->lock);
+
+ ret = 0;
+
+unload_fail:
+ if (result)
+ *result = result_local;
+
+ return ret;
+}
+
+int odp_ml_model_unload_start(odp_ml_model_t model, const odp_ml_compl_param_t *compl_param)
+{
+ int ret;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad model handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(check_compl_param(compl_param, mdl->max_compl_id, true)))
+ return -1;
+
+ if (compl_param->mode == ODP_ML_COMPL_MODE_POLL)
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 0);
+
+ ret = odp_ml_model_unload(model, NULL);
+
+ if (odp_unlikely(ret))
+ return -1;
+
+ /* Upon successful unloading, send a completion event to the given queue */
+ if (compl_param->mode == ODP_ML_COMPL_MODE_EVENT) {
+ odp_ml_load_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)compl_param->event;
+
+ _odp_buffer_subtype_set(buf, ODP_EVENT_ML_COMPL_LOAD);
+
+ result = odp_buffer_addr(buf);
+ result->error_code = 0;
+ result->user_ptr = compl_param->user_ptr;
+
+ if (odp_unlikely(odp_queue_enq(compl_param->queue, compl_param->event))) {
+ _ODP_ERR("Completion event enqueue failed %" PRIu64 "\n",
+ odp_queue_to_u64(compl_param->queue));
+ return -1;
+ }
+
+ return 0;
+ }
+
+ mdl->result[compl_param->compl_id].user_ptr = compl_param->user_ptr;
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 1);
+ return 0;
+}
+
+int odp_ml_model_unload_status(odp_ml_model_t model, uint32_t compl_id,
+ odp_ml_load_result_t *result)
+{
+ return odp_ml_model_load_status(model, compl_id, result);
+}
+
+void odp_ml_run_param_init(odp_ml_run_param_t *param)
+{
+ memset(param, 0, sizeof(odp_ml_run_param_t));
+}
+
+static void ml_shape_to_int64(const odp_ml_shape_info_t *shape, uint32_t batch_size, int64_t *array)
+{
+ for (uint32_t i = 0; i < shape->num_dim; i++) {
+ /* Replace dynamic dimension size with provided batch_size */
+ if (shape->dim[i] == ODP_ML_DIM_DYNAMIC)
+ array[i] = batch_size;
+ else
+ array[i] = shape->dim[i];
+ }
+}
+
+/* Get the number of elements in given shape */
+static inline uint64_t get_num_elem(uint32_t batch_size, const odp_ml_shape_info_t *shape)
+{
+ uint64_t num_elements = 1;
+ int64_t dim[ODP_ML_MAX_DIMS] = {0};
+
+ ml_shape_to_int64(shape, batch_size, dim);
+
+ for (uint32_t i = 0; i < shape->num_dim; i++)
+ num_elements *= (uint64_t)dim[i];
+
+ return num_elements;
+}
+
+static inline uint32_t dyn_io_size(const odp_ml_shape_info_t *shape, uint32_t data_type_size,
+ const odp_ml_run_param_t *param)
+{
+ uint32_t size;
+
+ if (!param || !param->batch_size) {
+ _ODP_ERR("Parameter 'param' must not be NULL and batch_size must be "
+ "provided when a input/output has dynamic dimension size\n");
+ return 0;
+ }
+
+ size = get_num_elem(param->batch_size, shape);
+ size *= data_type_size;
+
+ return size;
+}
+
+static int verify_run_params(odp_ml_model_t model, const odp_ml_data_t *data,
+ const odp_ml_run_param_t *param)
+{
+ const ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad ML model handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(!data)) {
+ _ODP_ERR("Parameter 'data' must not be NULL\n");
+ return -1;
+ }
+
+ /* Make sure that the number of input data segments equals or bigger than
+ * the number of model inputs. */
+ if (mdl->info.num_inputs > data->num_input_seg) {
+ _ODP_ERR("The num of input data segments %u must not less than "
+ "the number of model inputs %u\n", data->num_input_seg,
+ mdl->info.num_inputs);
+ return -1;
+ }
+
+ if (mdl->info.num_outputs > data->num_output_seg) {
+ _ODP_ERR("The num of output data segments %u must not less than "
+ "the number of model outputs %u\n", data->num_output_seg,
+ mdl->info.num_outputs);
+ return -1;
+ }
+
+ if (data->num_input_seg > mdl->info.num_inputs &&
+ (_odp_ml_glb->capa.max_segs_per_input == 1)) {
+ _ODP_ERR("Segmented input data is not supported\n");
+ return -1;
+ }
+
+ if (data->num_output_seg > mdl->info.num_outputs &&
+ (_odp_ml_glb->capa.max_segs_per_output == 1)) {
+ _ODP_ERR("Segmented output data is not supported");
+ return -1;
+ }
+
+ uint32_t size = 0;
+ uint32_t input_index = 0;
+ uint32_t seg_size_sum = 0;
+ odp_bool_t index_new = true;
+ uint32_t segs_per_input = 1;
+
+ for (uint32_t i = 0; i < data->num_input_seg; i++) {
+ if (data->input_seg[i].addr == NULL) {
+ _ODP_ERR("data->input_seg[%u].addr must not NULL\n", i);
+ return -1;
+ };
+
+ if (index_new) {
+ if (input_index > mdl->info.num_inputs - 1) {
+ _ODP_ERR("Too much number of input segments given\n");
+ return -1;
+ }
+
+ /* Input with dynamic batch size */
+ if (mdl->input_info[input_index].shape.type == ODP_ML_SHAPE_BATCH)
+ size = dyn_io_size(&mdl->input_info[input_index].shape,
+ mdl->input_info[input_index].data_type_size,
+ param);
+ else
+ size = mdl->input_sizes[input_index];
+
+ if (!size) {
+ _ODP_ERR("Size for %uth input is 0\n", input_index);
+ return -1;
+ }
+ }
+
+ seg_size_sum += data->input_seg[i].size;
+
+ if (seg_size_sum > size) {
+ _ODP_ERR("Sum of segment sizes %u exceeds %uth input data size %u\n",
+ seg_size_sum, input_index, size);
+ return -1;
+ }
+
+ if (seg_size_sum == size) {
+ if (segs_per_input > _odp_ml_glb->capa.max_segs_per_input) {
+ _ODP_ERR("Number of segments %u for input[%u] exceeds maximum"
+ " number of data segments per model input %u\n",
+ segs_per_input, input_index,
+ _odp_ml_glb->capa.max_segs_per_input);
+ return -1;
+ }
+ input_index++;
+ index_new = true;
+ seg_size_sum = 0;
+ segs_per_input = 1;
+ } else {
+ segs_per_input++;
+ index_new = false;
+ }
+ }
+
+ if (input_index != mdl->info.num_inputs) {
+ _ODP_ERR("Data is not provided for all model inputs\n");
+ return -1;
+ }
+
+ seg_size_sum = 0;
+ index_new = true;
+ uint32_t output_index = 0;
+ uint32_t segs_per_output = 1;
+
+ for (uint32_t i = 0; i < data->num_output_seg; i++) {
+ if (data->output_seg[i].addr == NULL) {
+ _ODP_ERR("data->output_seg[%u].addr must not NULL\n", i);
+ return -1;
+ }
+
+ if (index_new) {
+ if (output_index > mdl->info.num_outputs - 1) {
+ _ODP_ERR("Too much number of output segments given\n");
+ return -1;
+ }
+
+ /* Output with dynamic batch size */
+ if (mdl->output_info[output_index].shape.type == ODP_ML_SHAPE_BATCH)
+ size = dyn_io_size(&mdl->output_info[output_index].shape,
+ mdl->output_info[output_index].data_type_size,
+ param);
+ else
+ size = mdl->output_sizes[output_index];
+
+ if (!size) {
+ _ODP_ERR("Size for %uth output is 0\n", output_index);
+ return -1;
+ }
+ }
+
+ seg_size_sum += data->output_seg[i].size;
+
+ if (seg_size_sum > size) {
+ _ODP_ERR("Sum of segment sizes %u exceeds %uth output data size %u\n",
+ seg_size_sum, output_index, size);
+ return -1;
+ }
+
+ if (seg_size_sum >= size) {
+ if (segs_per_output > _odp_ml_glb->capa.max_segs_per_output) {
+ _ODP_ERR("Number of segments %u for output[%u] exceeds maximum"
+ " number of data segments per model output %u\n",
+ segs_per_output, output_index,
+ _odp_ml_glb->capa.max_segs_per_output);
+ return -1;
+ }
+ output_index++;
+ index_new = true;
+ seg_size_sum = 0;
+ segs_per_output = 1;
+ } else {
+ segs_per_output++;
+ index_new = false;
+ }
+ }
+
+ if (output_index != mdl->info.num_outputs) {
+ _ODP_ERR("Not enough output_segs to hold all output data\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static ONNXTensorElementDataType onnx_dtype_from_odp_dtype(odp_ml_data_type_t data_type)
+{
+ switch (data_type) {
+ case ODP_ML_DATA_TYPE_NONE:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
+ case ODP_ML_DATA_TYPE_INT8:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8;
+ case ODP_ML_DATA_TYPE_UINT8:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8;
+ case ODP_ML_DATA_TYPE_INT16:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT16;
+ case ODP_ML_DATA_TYPE_UINT16:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT16;
+ case ODP_ML_DATA_TYPE_INT24:
+ /* Fall through*/
+ case ODP_ML_DATA_TYPE_UINT24:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
+ case ODP_ML_DATA_TYPE_FP64:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_DOUBLE;
+ case ODP_ML_DATA_TYPE_INT32:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT32;
+ case ODP_ML_DATA_TYPE_UINT32:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT32;
+ case ODP_ML_DATA_TYPE_INT64:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64;
+ case ODP_ML_DATA_TYPE_UINT64:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT64;
+ case ODP_ML_DATA_TYPE_FP16:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT16;
+ case ODP_ML_DATA_TYPE_FP32:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT;
+ case ODP_ML_DATA_TYPE_BFP16:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_BFLOAT16;
+ default:
+ return ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
+ }
+}
+
+static int verify_tensor(const OrtValue *tensor, odp_ml_data_type_t expected_type,
+ const odp_ml_shape_info_t *expected_shape, uint32_t batch_size)
+{
+ OrtTensorTypeAndShapeInfo *tensor_info;
+ ONNXTensorElementDataType tensor_type;
+ size_t dim_count;
+ OrtStatus *status = NULL;
+ int64_t dims[ODP_ML_MAX_DIMS] = {0};
+ int64_t shape_arr[ODP_ML_MAX_DIMS] = {0};
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ status = ort_api->GetTensorTypeAndShape(tensor, &tensor_info);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetTensorTypeAndShape() failed\n");
+ return -1;
+ }
+
+ status = ort_api->GetTensorElementType(tensor_info, &tensor_type);
+ if (check_ortstatus(status)) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("GetTensorElementType() failed\n");
+ return -1;
+ }
+
+ if (onnx_dtype_to_odp_dtype(tensor_type) != expected_type) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("Tensor type does not match model type\n");
+ return -1;
+ }
+
+ status = ort_api->GetDimensionsCount(tensor_info, &dim_count);
+ if (check_ortstatus(status)) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("GetDimensionsCount() failed\n");
+ return -1;
+ }
+
+ if (dim_count != expected_shape->num_dim) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("Tensor dimension does not match shape_dim\n");
+ return -1;
+ }
+
+ status = ort_api->GetDimensions(tensor_info, dims, dim_count);
+ if (check_ortstatus(status)) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("GetDimensions() failed\n");
+ return -1;
+ }
+
+ ml_shape_to_int64(expected_shape, batch_size, shape_arr);
+
+ for (uint32_t i = 0; i < dim_count; i++) {
+ if (dims[i] != shape_arr[i]) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("Shape[%u]: %" PRIu64 " does not match expected: %" PRIu64 "\n",
+ i, dims[i], shape_arr[i]);
+ return -1;
+ }
+ }
+
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ return 0;
+}
+
+static int input_data_to_tensor(const odp_ml_input_info_t *input_info, uint32_t num_seg,
+ const odp_ml_data_seg_t *input_seg, uint32_t *seg_idx,
+ uint32_t batch_size, OrtValue **input_tensor)
+{
+ int is_tensor;
+ uint64_t input_size;
+ OrtAllocator *allocator;
+ void *data = NULL;
+ OrtStatus *status = NULL;
+ int64_t shape[ODP_ML_MAX_DIMS] = {0};
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+ ONNXTensorElementDataType onnx_dtype = ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED;
+
+ ml_shape_to_int64(&input_info->shape, batch_size, shape);
+
+ onnx_dtype = onnx_dtype_from_odp_dtype(input_info->data_type);
+ _ODP_ASSERT(onnx_dtype != ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED);
+
+ status = ort_api->GetAllocatorWithDefaultOptions(&allocator);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetAllocatorWithDefaultOptions() failed\n");
+ return -1;
+ }
+
+ status = ort_api->CreateTensorAsOrtValue(allocator,
+ shape,
+ input_info->shape.num_dim,
+ onnx_dtype,
+ input_tensor);
+ if (check_ortstatus(status) || !input_tensor[0]) {
+ _ODP_ERR("CreateTensorWithDataAsOrtValue() failed\n");
+ return -1;
+ }
+
+ input_size = input_info->data_type_size * get_num_elem(batch_size, &input_info->shape);
+
+ status = ort_api->GetTensorMutableData(input_tensor[0], &data);
+ if (check_ortstatus(status) || !data) {
+ _ODP_ERR("GetTensorMutableData() failed\n");
+ return -1;
+ }
+
+ for (uint64_t i = 0; i < input_size; ) {
+ if (*seg_idx >= num_seg) {
+ _ODP_ERR("Insufficient input data\n");
+ return -1;
+ }
+
+ uint64_t seg_size = input_seg[*seg_idx].size;
+
+ if (i + seg_size > input_size) {
+ _ODP_ERR("Excess input data in segment %" PRIu32 "\n", *seg_idx);
+ return -1;
+ }
+
+ memcpy((uint8_t *)data + i, input_seg[(*seg_idx)++].addr, seg_size);
+ i += seg_size;
+ }
+
+ if (!ODP_DEBUG)
+ return 0;
+
+ status = ort_api->IsTensor(input_tensor[0], &is_tensor);
+ if (check_ortstatus(status) || !is_tensor) {
+ _ODP_ERR("input_tensor IsTensor failed\n");
+ return -1;
+ }
+
+ /* Make sure tensor shape matches input_shape */
+ if (verify_tensor(input_tensor[0], input_info->data_type,
+ &input_info->shape, batch_size)) {
+ _ODP_ERR("Verify input_tensor failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int verify_output_tensor(OrtValue *output_tensor, odp_ml_data_type_t expected_type,
+ const odp_ml_shape_info_t *expected_shape, uint32_t batch_size)
+{
+ int is_tensor = 0;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+ OrtStatus *status = ort_api->IsTensor(output_tensor, &is_tensor);
+
+ if (check_ortstatus(status) || !is_tensor) {
+ _ODP_ERR("output_tensor IsTensor failed\n");
+ return -1;
+ }
+
+ /* Make sure tensor shape matches output_shape */
+ if (verify_tensor(output_tensor, expected_type, expected_shape, batch_size)) {
+ _ODP_ERR("Verify output_tensor failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int get_tensor_data_size(OrtValue *tensor, uint32_t *size, uint32_t data_type_size)
+{
+ size_t num_elem;
+ OrtStatus *status;
+ OrtTensorTypeAndShapeInfo *tensor_info;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+
+ status = ort_api->GetTensorTypeAndShape(tensor, &tensor_info);
+ if (check_ortstatus(status)) {
+ _ODP_ERR("GetTensorTypeAndShape() failed\n");
+ return -1;
+ }
+
+ status = ort_api->GetTensorShapeElementCount(tensor_info, &num_elem);
+ if (check_ortstatus(status)) {
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ _ODP_ERR("GetTensorShapeElementCount() failed\n");
+ return -1;
+ }
+ *size = data_type_size * num_elem;
+
+ ort_api->ReleaseTensorTypeAndShapeInfo(tensor_info);
+ return 0;
+}
+
+static int check_output_size(odp_bool_t is_segmented, uint32_t output_idx, uint32_t seg_idx,
+ uint64_t out_tensor_data_size, const odp_ml_data_t data[])
+{
+ uint64_t output_size = 0;
+
+ /* Output is not segmented */
+ if (!is_segmented) {
+ /* Make sure tensor data size does not exceed size allocated for
+ * data->output_seg[seg_idx].addr */
+ if (out_tensor_data_size > data->output_seg[seg_idx].size) {
+ _ODP_ERR("Malloc at least %" PRIu64 " bytes for %dth output tensor\n",
+ out_tensor_data_size, output_idx);
+ return -1;
+ }
+
+ return 0;
+ }
+
+ /* Output is segmented, first calculate total size for one tensor */
+ for (; seg_idx < data->num_output_seg; seg_idx++) {
+ output_size += data->output_seg[seg_idx].size;
+ if (output_size >= out_tensor_data_size)
+ break;
+ }
+
+ if (0 == output_size) {
+ _ODP_ERR("No output data segments for %uth output tensor\n", output_idx);
+ return -1;
+ }
+
+ if (out_tensor_data_size > output_size) {
+ _ODP_ERR("Output segments (%" PRIu64 " bytes in total) for %uth output"
+ " is expected to be at least %" PRIu64 " bytes\n",
+ output_size, output_idx, out_tensor_data_size);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int output_tensors_to_data(OrtValue **output_tensors,
+ uint32_t model_num_outputs,
+ const odp_ml_run_param_t *param,
+ const odp_ml_output_info_t *output_info,
+ const odp_ml_data_t *data,
+ odp_ml_run_result_t *result_local)
+{
+ uint32_t seg_idx;
+ uint64_t seg_size;
+ uint64_t cpy_size;
+ uint64_t left_size;
+ uint64_t output_val_offset;
+ uint32_t out_tensor_data_size;
+ void *output_val = NULL; /* Pointer to store one raw output value */
+ OrtStatus *status = NULL;
+ uint32_t batch_size = (param && param->batch_size) ? param->batch_size : 0;
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+ odp_bool_t is_segmented = (data->num_output_seg != model_num_outputs);
+
+ seg_idx = 0;
+ for (uint32_t i = 0; i < model_num_outputs; i++) {
+ if (ODP_DEBUG &&
+ verify_output_tensor(output_tensors[i], output_info[i].data_type,
+ &output_info[i].shape, batch_size)){
+ result_local->error_code = ML_BAD_OUTPUT;
+ return -1;
+ }
+
+ /* Get tensor data size */
+ if (get_tensor_data_size(output_tensors[i], &out_tensor_data_size,
+ output_info[i].data_type_size)) {
+ result_local->error_code = ML_LIB_FAILED;
+ return -1;
+ }
+
+ /* When output_tensor is an empty tensor [], skip getting data */
+ if (out_tensor_data_size == 0)
+ continue;
+
+ if (ODP_DEBUG && check_output_size(is_segmented, i, seg_idx,
+ out_tensor_data_size, data)) {
+ result_local->error_code = ML_BAD_OUTPUT;
+ return -1;
+ }
+
+ /* Following assumes param and data->output_seg are valid */
+ /* Get tensor data */
+ output_val = NULL;
+ status = ort_api->GetTensorMutableData(output_tensors[i], &output_val);
+ if (check_ortstatus(status) || !output_val) {
+ result_local->error_code = ML_LIB_FAILED;
+ return -1;
+ }
+
+ /* Output is not segmented */
+ if (!is_segmented) {
+ /* Store output data to data->output_seg[i].addr */
+ memcpy(data->output_seg[i].addr, output_val, out_tensor_data_size);
+ seg_idx++;
+ continue;
+ }
+
+ /* Output is segmented */
+ output_val_offset = 0;
+ left_size = out_tensor_data_size;
+ for (; seg_idx < data->num_output_seg; seg_idx++) {
+ seg_size = data->output_seg[seg_idx].size;
+ cpy_size = left_size > seg_size ? seg_size : left_size;
+ memcpy(data->output_seg[seg_idx].addr,
+ ((char *)output_val) + output_val_offset, cpy_size);
+
+ output_val_offset += cpy_size;
+ left_size = out_tensor_data_size - output_val_offset;
+
+ if (!left_size) {
+ seg_idx++;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int odp_ml_run(odp_ml_model_t model, const odp_ml_data_t *data, const odp_ml_run_param_t *param)
+{
+ odp_ml_run_result_t result_local;
+
+ int retval = -1; /* Return value of this function */
+ int ret = 0;
+ OrtStatus *status = NULL;
+ uint32_t batch_size = 0;
+
+ OrtValue *input_tensor[CONFIG_ML_MAX_INPUTS] = {0};
+ OrtValue *output_tensors[CONFIG_ML_MAX_OUTPUTS] = {0};
+ const char *input_names[CONFIG_ML_MAX_INPUTS] = {0};
+ const char *output_names[CONFIG_ML_MAX_OUTPUTS] = {0};
+
+ const OrtApi *ort_api = _odp_ml_glb->ort_api;
+ ml_model_t *mdl = ml_model_from_handle(model);
+ const odp_ml_model_info_t *ml_info = &mdl->info;
+ const odp_ml_input_info_t *input_info = mdl->input_info;
+ const odp_ml_output_info_t *output_info = mdl->output_info;
+ OrtSession *session = mdl->session;
+
+ odp_ticketlock_lock(&mdl->lock);
+ if (odp_unlikely(mdl->state == ML_STATE_INFERENCING)) {
+ odp_ticketlock_unlock(&mdl->lock);
+ return 0;
+ }
+ if (odp_unlikely(mdl->state != ML_STATE_LOADED)) {
+ _ODP_ERR("Wrong model state: not created or not loaded\n");
+ odp_ticketlock_unlock(&mdl->lock);
+ return -1;
+ }
+ mdl->state = ML_STATE_INFERENCING;
+ odp_ticketlock_unlock(&mdl->lock);
+
+ memset(&result_local, 0, sizeof(result_local));
+
+ if (ODP_DEBUG && verify_run_params(model, data, param)) {
+ result_local.error_code = ML_BAD_INPUT;
+ goto init_fail;
+ }
+
+ if (param && param->batch_size)
+ batch_size = param->batch_size;
+
+ uint32_t seg_idx = 0;
+
+ /* Transfer input data to tensor */
+ for (uint32_t i = 0; i < ml_info->num_inputs; i++) {
+ ret = input_data_to_tensor(&input_info[i],
+ data->num_input_seg,
+ data->input_seg,
+ &seg_idx,
+ batch_size,
+ &input_tensor[i]);
+ if (ret) {
+ _ODP_ERR("%uth input data to tensor failed\n", i);
+ result_local.error_code = ML_LIB_FAILED;
+ goto release_input_tensors;
+ }
+
+ _ODP_DBG("input_tensor[%u]: %p\n", i, input_tensor[i]);
+
+ /* Model input names */
+ input_names[i] = input_info[i].name;
+ }
+
+ if (seg_idx < data->num_input_seg) {
+ _ODP_ERR("Excess input segments\n");
+ ret = -1;
+ }
+
+ for (uint32_t i = 0; i < ml_info->num_outputs; i++)
+ output_names[i] = output_info[i].name;
+
+ /* Run inference */
+ status = ort_api->Run(session,
+ NULL,
+ (const char * const *)input_names,
+ (const OrtValue * const*)input_tensor,
+ ml_info->num_inputs,
+ (const char * const *)output_names,
+ ml_info->num_outputs,
+ output_tensors);
+
+ if (check_ortstatus(status)) {
+ _ODP_ERR("Run inference failed\n");
+ result_local.error_code = ML_LIB_FAILED;
+ goto release_all_tensors;
+ }
+
+ /* Verify output tensors and store them to output */
+ if (output_tensors_to_data(output_tensors, ml_info->num_outputs, param,
+ output_info, data, &result_local)) {
+ _ODP_ERR("Output tensors to data failed\n");
+ goto release_all_tensors;
+ }
+
+ retval = 1;
+
+release_all_tensors:
+ for (uint32_t i = 0; i < ml_info->num_outputs; i++)
+ ort_api->ReleaseValue(output_tensors[i]);
+
+release_input_tensors:
+ for (uint32_t i = 0; i < ml_info->num_inputs; i++)
+ ort_api->ReleaseValue(input_tensor[i]);
+
+init_fail:
+ if (param && param->result)
+ *param->result = result_local;
+
+ odp_ticketlock_lock(&mdl->lock);
+ mdl->state = ML_STATE_LOADED;
+ odp_ticketlock_unlock(&mdl->lock);
+
+ return retval;
+}
+
+int odp_ml_run_multi(odp_ml_model_t model, const odp_ml_data_t data[],
+ const odp_ml_run_param_t param[], int num)
+{
+ int i;
+ int ret;
+
+ if (odp_unlikely(num < 1)) {
+ _ODP_ERR("Bad number of runs\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (param)
+ ret = odp_ml_run(model, &data[i], &param[i]);
+ else
+ ret = odp_ml_run(model, &data[i], NULL);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+int odp_ml_run_start(odp_ml_model_t model, const odp_ml_data_t *data,
+ const odp_ml_compl_param_t *compl_param,
+ const odp_ml_run_param_t *run_param)
+{
+ int ret;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID)) {
+ _ODP_ERR("Bad model handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(!compl_param)) {
+ _ODP_ERR("Completion parameter is NULL\n");
+ return -1;
+ }
+
+ /* Check completion mode */
+ if (odp_unlikely(check_compl_param(compl_param, mdl->max_compl_id, false))) {
+ _ODP_ERR("Bad ML job completion parameter\n");
+ return -1;
+ }
+
+ if (compl_param->mode == ODP_ML_COMPL_MODE_POLL)
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 0);
+
+ ret = odp_ml_run(model, data, run_param);
+
+ if (odp_unlikely(ret < 1))
+ return ret;
+
+ /* Send a completion event to the given queue */
+ if (compl_param->mode == ODP_ML_COMPL_MODE_EVENT) {
+ odp_ml_run_result_t *result;
+ odp_buffer_t buf = (odp_buffer_t)(uintptr_t)compl_param->event;
+
+ _odp_buffer_subtype_set(buf, ODP_EVENT_ML_COMPL_RUN);
+
+ result = odp_buffer_addr(buf);
+ result->error_code = 0;
+ result->user_ptr = compl_param->user_ptr;
+
+ if (odp_unlikely(odp_queue_enq(compl_param->queue, compl_param->event))) {
+ _ODP_ERR("Completion event enqueue failed %" PRIu64 "\n",
+ odp_queue_to_u64(compl_param->queue));
+ return -1;
+ }
+
+ return 1;
+ }
+
+ /* compl_param->mode == ODP_ML_COMPL_MODE_POLL */
+ mdl->result[compl_param->compl_id].user_ptr = compl_param->user_ptr;
+ odp_atomic_store_rel_u32(&mdl->compl_status[compl_param->compl_id], 1);
+
+ return 1;
+}
+
+int odp_ml_run_start_multi(odp_ml_model_t model, const odp_ml_data_t data[],
+ const odp_ml_compl_param_t compl_param[],
+ const odp_ml_run_param_t run_param[], int num)
+{
+ int i;
+ int ret = 0;
+
+ if (odp_unlikely(num < 1)) {
+ _ODP_ERR("Bad number of runs\n");
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (run_param)
+ ret = odp_ml_run_start(model, &data[i], &compl_param[i], &run_param[i]);
+ else
+ ret = odp_ml_run_start(model, &data[i], &compl_param[i], NULL);
+
+ if (odp_unlikely(ret != 1))
+ break;
+ }
+
+ if (odp_unlikely(i == 0))
+ return ret;
+
+ return i;
+}
+
+int odp_ml_run_status(odp_ml_model_t model, uint32_t compl_id, odp_ml_run_result_t *result)
+{
+ int ret;
+ ml_model_t *mdl = ml_model_from_handle(model);
+
+ if (odp_unlikely(model == ODP_ML_MODEL_INVALID ||
+ compl_id > mdl->max_compl_id)) {
+ _ODP_ERR("Invalid model handle or completion id: %u\n", compl_id);
+ return -2;
+ }
+
+ ret = odp_atomic_load_acq_u32(&mdl->compl_status[compl_id]);
+
+ if (result) {
+ result->error_code = 0;
+ result->user_ptr = mdl->result[compl_id].user_ptr;
+ }
+
+ return ret;
+}
+
+static int opt_level_from_str(const char *level_str, GraphOptimizationLevel *level)
+{
+ if (strcmp(level_str, "DISABLE_ALL") == 0)
+ *level = ORT_DISABLE_ALL;
+ else if (strcmp(level_str, "ENABLE_BASIC") == 0)
+ *level = ORT_ENABLE_BASIC;
+ else if (strcmp(level_str, "ENABLE_EXTENDED") == 0)
+ *level = ORT_ENABLE_EXTENDED;
+ else if (strcmp(level_str, "ENABLE_ALL") == 0)
+ *level = ORT_ENABLE_ALL;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int execution_mode_from_str(const char *mode_str, ExecutionMode *mode)
+{
+ if (strcmp(mode_str, "SEQUENTIAL") == 0)
+ *mode = ORT_SEQUENTIAL;
+ else if (strcmp(mode_str, "PARALLEL") == 0)
+ *mode = ORT_PARALLEL;
+ else
+ return -1;
+
+ return 0;
+}
+
+static int read_config_file(ort_run_opts_t *opts)
+{
+ const char *conf_str;
+ char mode_str[ML_MAX_CONFIG_STR_LEN];
+ char opt_level_str[ML_MAX_CONFIG_STR_LEN];
+
+ _ODP_PRINT("ML config:\n");
+
+ conf_str = "ml.enable_profiling";
+ if (!_odp_libconfig_lookup_int(conf_str, &opts->enable_profiling)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", conf_str, opts->enable_profiling);
+
+ conf_str = "ml.execution_mode";
+ if (_odp_libconfig_lookup_str(conf_str, mode_str, ML_MAX_CONFIG_STR_LEN) < 0) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+
+ if (execution_mode_from_str(mode_str, &opts->execution_mode)) {
+ _ODP_ERR("Unsupported execution mode: %s\n", mode_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %s\n", conf_str, mode_str);
+
+ conf_str = "ml.inter_op_num_threads";
+ if (!_odp_libconfig_lookup_int(conf_str, &opts->inter_op_num_threads)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", conf_str, opts->inter_op_num_threads);
+
+ conf_str = "ml.intra_op_num_threads";
+ if (!_odp_libconfig_lookup_int(conf_str, &opts->intra_op_num_threads)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", conf_str, opts->intra_op_num_threads);
+
+ conf_str = "ml.graph_optimization_level";
+ if (_odp_libconfig_lookup_str(conf_str, opt_level_str,
+ ML_MAX_CONFIG_STR_LEN) < 0) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+
+ if (opt_level_from_str(opt_level_str, &opts->graph_opt_level)) {
+ _ODP_ERR("Graph optimize level %s not supported\n", opt_level_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %s\n", conf_str, opt_level_str);
+
+ conf_str = "ml.optimized_model_filepath";
+ if (_odp_libconfig_lookup_str(conf_str, opts->opt_model_filepath,
+ ML_MAX_CONFIG_STR_LEN) < 0) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %s\n", conf_str, opts->opt_model_filepath);
+
+ return 0;
+}
+
+int _odp_ml_init_global(void)
+{
+ int i;
+ OrtEnv *env;
+ odp_shm_t shm;
+ OrtStatus *status;
+ const OrtApi *ort_api;
+
+ if (odp_global_ro.disable.ml) {
+ _ODP_ERR("ML is disabled\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_ml_global", sizeof(ml_global_t), ODP_CACHE_LINE_SIZE, 0);
+ _odp_ml_glb = odp_shm_addr(shm);
+
+ if (_odp_ml_glb == NULL) {
+ _ODP_ERR("SHM reserve failed for odp_ml\n");
+ return -1;
+ }
+
+ memset(_odp_ml_glb, 0, sizeof(ml_global_t));
+ _odp_ml_glb->shm = shm;
+
+ if (odp_ml_capability(&_odp_ml_glb->capa)) {
+ _ODP_ERR("ML capability failed\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&_odp_ml_glb->pool_param);
+
+ if (read_config_file(&_odp_ml_glb->ort_run_opts))
+ return -1;
+
+ ort_api = OrtGetApiBase()->GetApi(ORT_API_VERSION);
+ if (!ort_api) {
+ _ODP_ERR("Failed to init ONNX Runtime engine.\n");
+ return -1;
+ }
+ _odp_ml_glb->ort_api = ort_api;
+
+ status = ort_api->CreateEnv(ORT_LOGGING_LEVEL_WARNING, "Default", &env);
+ if (check_ortstatus(status) || !env) {
+ _ODP_ERR("ort_api->CreateEnv() failed.\n");
+ return -1;
+ }
+ _odp_ml_glb->env = env;
+
+ for (i = 0; i < ML_MAX_MODELS_CREATED; i++)
+ odp_ticketlock_init(&_odp_ml_glb->models[i].lock);
+
+ return 0;
+}
+
+int _odp_ml_term_global(void)
+{
+ if (odp_global_ro.disable.ml)
+ return 0;
+
+ if (_odp_ml_glb == NULL)
+ return 0;
+
+ if (_odp_ml_glb->env)
+ _odp_ml_glb->ort_api->ReleaseEnv(_odp_ml_glb->env);
+
+ if (odp_shm_free(_odp_ml_glb->shm)) {
+ _ODP_ERR("Shm free failed for odp_ml\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_ml_fp16.c b/platform/linux-generic/odp_ml_fp16.c
new file mode 100644
index 000000000..47b10f841
--- /dev/null
+++ b/platform/linux-generic/odp_ml_fp16.c
@@ -0,0 +1,425 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022-2023 Marvell.
+ * Copyright (c) 2023 Nokia
+ *
+ * Based on
+ * - dpdk/lib/mldev/mldev_utils_scalar.h
+ * - dpdk/lib/mldev/mldev_utils_scalar.c
+ * - dpdk/lib/mldev/mldev_utils_scalar_bfloat16.c
+ */
+
+#include <odp_ml_fp16.h>
+
+#include <errno.h>
+#include <stdint.h>
+
+#ifndef BIT
+#define BIT(nr) (1UL << (nr))
+#endif
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
+#endif
+
+#ifndef GENMASK_U32
+#define GENMASK_U32(h, l) (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#endif
+
+/* float32: bit index of MSB & LSB of sign, exponent and mantissa */
+#define FP32_LSB_M 0
+#define FP32_MSB_M 22
+#define FP32_LSB_E 23
+#define FP32_MSB_E 30
+#define FP32_LSB_S 31
+#define FP32_MSB_S 31
+
+/* float32: bitmask for sign, exponent and mantissa */
+#define FP32_MASK_S GENMASK_U32(FP32_MSB_S, FP32_LSB_S)
+#define FP32_MASK_E GENMASK_U32(FP32_MSB_E, FP32_LSB_E)
+#define FP32_MASK_M GENMASK_U32(FP32_MSB_M, FP32_LSB_M)
+
+/* float16: bit index of MSB & LSB of sign, exponent and mantissa */
+#define FP16_LSB_M 0
+#define FP16_MSB_M 9
+#define FP16_LSB_E 10
+#define FP16_MSB_E 14
+#define FP16_LSB_S 15
+#define FP16_MSB_S 15
+
+/* float16: bitmask for sign, exponent and mantissa */
+#define FP16_MASK_S GENMASK_U32(FP16_MSB_S, FP16_LSB_S)
+#define FP16_MASK_E GENMASK_U32(FP16_MSB_E, FP16_LSB_E)
+#define FP16_MASK_M GENMASK_U32(FP16_MSB_M, FP16_LSB_M)
+
+/* bfloat16: bit index of MSB & LSB of sign, exponent and mantissa */
+#define BF16_LSB_M 0
+#define BF16_MSB_M 6
+#define BF16_LSB_E 7
+#define BF16_MSB_E 14
+#define BF16_LSB_S 15
+#define BF16_MSB_S 15
+
+/* bfloat16: bitmask for sign, exponent and mantissa */
+#define BF16_MASK_S GENMASK_U32(BF16_MSB_S, BF16_LSB_S)
+#define BF16_MASK_E GENMASK_U32(BF16_MSB_E, BF16_LSB_E)
+#define BF16_MASK_M GENMASK_U32(BF16_MSB_M, BF16_LSB_M)
+
+/* Exponent bias */
+#define FP32_BIAS_E 127
+#define FP16_BIAS_E 15
+#define BF16_BIAS_E 127
+
+#define FP32_PACK(sign, exponent, mantissa) \
+ (((sign) << FP32_LSB_S) | ((exponent) << FP32_LSB_E) | (mantissa))
+
+#define FP16_PACK(sign, exponent, mantissa) \
+ (((sign) << FP16_LSB_S) | ((exponent) << FP16_LSB_E) | (mantissa))
+
+#define BF16_PACK(sign, exponent, mantissa) \
+ (((sign) << BF16_LSB_S) | ((exponent) << BF16_LSB_E) | (mantissa))
+
+/* Represent float32 as float and uint32_t */
+union float32 {
+ float f;
+ uint32_t u;
+};
+
+/* Convert a single precision floating point number (float32) into a half precision
+ * floating point number (float16) using round to nearest rounding mode.
+ */
+static uint16_t
+__float32_to_float16_scalar_rtn(float x)
+{
+ union float32 f32; /* float32 input */
+ uint32_t f32_s; /* float32 sign */
+ uint32_t f32_e; /* float32 exponent */
+ uint32_t f32_m; /* float32 mantissa */
+ uint16_t f16_s; /* float16 sign */
+ uint16_t f16_e; /* float16 exponent */
+ uint16_t f16_m; /* float16 mantissa */
+ uint32_t tbits; /* number of truncated bits */
+ uint32_t tmsb; /* MSB position of truncated bits */
+ uint32_t m_32; /* temporary float32 mantissa */
+ uint16_t m_16; /* temporary float16 mantissa */
+ uint16_t u16; /* float16 output */
+ int be_16; /* float16 biased exponent, signed */
+
+ f32.f = x;
+ f32_s = (f32.u & FP32_MASK_S) >> FP32_LSB_S;
+ f32_e = (f32.u & FP32_MASK_E) >> FP32_LSB_E;
+ f32_m = (f32.u & FP32_MASK_M) >> FP32_LSB_M;
+
+ f16_s = f32_s;
+ f16_e = 0;
+ f16_m = 0;
+
+ switch (f32_e) {
+ case (0): /* float32: zero or subnormal number */
+ f16_e = 0;
+ f16_m = 0; /* convert to zero */
+ break;
+ case (FP32_MASK_E >> FP32_LSB_E): /* float32: infinity or nan */
+ f16_e = FP16_MASK_E >> FP16_LSB_E;
+ if (f32_m == 0) { /* infinity */
+ f16_m = 0;
+ } else { /* nan, propagate mantissa and set MSB of mantissa to 1 */
+ f16_m = f32_m >> (FP32_MSB_M - FP16_MSB_M);
+ f16_m |= BIT(FP16_MSB_M);
+ }
+ break;
+ default: /* float32: normal number */
+ /* compute biased exponent for float16 */
+ be_16 = (int)f32_e - FP32_BIAS_E + FP16_BIAS_E;
+
+ /* overflow, be_16 = [31-INF], set to infinity */
+ if (be_16 >= (int)(FP16_MASK_E >> FP16_LSB_E)) {
+ f16_e = FP16_MASK_E >> FP16_LSB_E;
+ f16_m = 0;
+ } else if ((be_16 >= 1) && (be_16 < (int)(FP16_MASK_E >> FP16_LSB_E))) {
+ /* normal float16, be_16 = [1:30]*/
+ f16_e = be_16;
+ m_16 = f32_m >> (FP32_LSB_E - FP16_LSB_E);
+ tmsb = FP32_MSB_M - FP16_MSB_M - 1;
+ if ((f32_m & GENMASK_U32(tmsb, 0)) > BIT(tmsb)) {
+ /* round: non-zero truncated bits except MSB */
+ m_16++;
+
+ /* overflow into exponent */
+ if (((m_16 & FP16_MASK_E) >> FP16_LSB_E) == 0x1)
+ f16_e++;
+ } else if ((f32_m & GENMASK_U32(tmsb, 0)) == BIT(tmsb)) {
+ /* round: MSB of truncated bits and LSB of m_16 is set */
+ if ((m_16 & 0x1) == 0x1) {
+ m_16++;
+
+ /* overflow into exponent */
+ if (((m_16 & FP16_MASK_E) >> FP16_LSB_E) == 0x1)
+ f16_e++;
+ }
+ }
+ f16_m = m_16 & FP16_MASK_M;
+ } else if ((be_16 >= -(int)(FP16_MSB_M)) && (be_16 < 1)) {
+ /* underflow: zero / subnormal, be_16 = [-9:0] */
+ f16_e = 0;
+
+ /* add implicit leading zero */
+ m_32 = f32_m | BIT(FP32_LSB_E);
+ tbits = FP32_LSB_E - FP16_LSB_E - be_16 + 1;
+ m_16 = m_32 >> tbits;
+
+ /* if non-leading truncated bits are set */
+ if ((f32_m & GENMASK_U32(tbits - 1, 0)) > BIT(tbits - 1)) {
+ m_16++;
+
+ /* overflow into exponent */
+ if (((m_16 & FP16_MASK_E) >> FP16_LSB_E) == 0x1)
+ f16_e++;
+ } else if ((f32_m & GENMASK_U32(tbits - 1, 0)) == BIT(tbits - 1)) {
+ /* if leading truncated bit is set */
+ if ((m_16 & 0x1) == 0x1) {
+ m_16++;
+
+ /* overflow into exponent */
+ if (((m_16 & FP16_MASK_E) >> FP16_LSB_E) == 0x1)
+ f16_e++;
+ }
+ }
+ f16_m = m_16 & FP16_MASK_M;
+ } else if (be_16 == -(int)(FP16_MSB_M + 1)) {
+ /* underflow: zero, be_16 = [-10] */
+ f16_e = 0;
+ if (f32_m != 0)
+ f16_m = 1;
+ else
+ f16_m = 0;
+ } else {
+ /* underflow: zero, be_16 = [-INF:-11] */
+ f16_e = 0;
+ f16_m = 0;
+ }
+
+ break;
+ }
+
+ u16 = FP16_PACK(f16_s, f16_e, f16_m);
+
+ return u16;
+}
+
+/* Convert a half precision floating point number (float16) into a single precision
+ * floating point number (float32).
+ */
+static float
+__float16_to_float32_scalar_rtx(uint16_t f16)
+{
+ union float32 f32; /* float32 output */
+ uint16_t f16_s; /* float16 sign */
+ uint16_t f16_e; /* float16 exponent */
+ uint16_t f16_m; /* float16 mantissa */
+ uint32_t f32_s; /* float32 sign */
+ uint32_t f32_e; /* float32 exponent */
+ uint32_t f32_m; /* float32 mantissa*/
+ uint8_t shift; /* number of bits to be shifted */
+ uint32_t clz; /* count of leading zeroes */
+ int e_16; /* float16 exponent unbiased */
+
+ f16_s = (f16 & FP16_MASK_S) >> FP16_LSB_S;
+ f16_e = (f16 & FP16_MASK_E) >> FP16_LSB_E;
+ f16_m = (f16 & FP16_MASK_M) >> FP16_LSB_M;
+
+ f32_s = f16_s;
+ switch (f16_e) {
+ case (FP16_MASK_E >> FP16_LSB_E): /* float16: infinity or nan */
+ f32_e = FP32_MASK_E >> FP32_LSB_E;
+ if (f16_m == 0x0) { /* infinity */
+ f32_m = f16_m;
+ } else { /* nan, propagate mantissa, set MSB of mantissa to 1 */
+ f32_m = f16_m;
+ shift = FP32_MSB_M - FP16_MSB_M;
+ f32_m = (f32_m << shift) & FP32_MASK_M;
+ f32_m |= BIT(FP32_MSB_M);
+ }
+ break;
+ case 0: /* float16: zero or sub-normal */
+ f32_m = f16_m;
+ if (f16_m == 0) { /* zero signed */
+ f32_e = 0;
+ } else { /* subnormal numbers */
+ clz = __builtin_clz((uint32_t)f16_m) - sizeof(uint32_t) * 8 + FP16_LSB_E;
+ e_16 = (int)f16_e - clz;
+ f32_e = FP32_BIAS_E + e_16 - FP16_BIAS_E;
+
+ shift = clz + (FP32_MSB_M - FP16_MSB_M) + 1;
+ f32_m = (f32_m << shift) & FP32_MASK_M;
+ }
+ break;
+ default: /* normal numbers */
+ f32_m = f16_m;
+ e_16 = (int)f16_e;
+ f32_e = FP32_BIAS_E + e_16 - FP16_BIAS_E;
+
+ shift = (FP32_MSB_M - FP16_MSB_M);
+ f32_m = (f32_m << shift) & FP32_MASK_M;
+ }
+
+ f32.u = FP32_PACK(f32_s, f32_e, f32_m);
+
+ return f32.f;
+}
+
+/* Convert a single precision floating point number (float32) into a
+ * brain float number (bfloat16) using round to nearest rounding mode.
+ */
+static uint16_t
+__float32_to_bfloat16_scalar_rtn(float x)
+{
+ union float32 f32; /* float32 input */
+ uint32_t f32_s; /* float32 sign */
+ uint32_t f32_e; /* float32 exponent */
+ uint32_t f32_m; /* float32 mantissa */
+ uint16_t b16_s; /* float16 sign */
+ uint16_t b16_e; /* float16 exponent */
+ uint16_t b16_m; /* float16 mantissa */
+ uint32_t tbits; /* number of truncated bits */
+ uint16_t u16; /* float16 output */
+
+ f32.f = x;
+ f32_s = (f32.u & FP32_MASK_S) >> FP32_LSB_S;
+ f32_e = (f32.u & FP32_MASK_E) >> FP32_LSB_E;
+ f32_m = (f32.u & FP32_MASK_M) >> FP32_LSB_M;
+
+ b16_s = f32_s;
+ b16_e = 0;
+ b16_m = 0;
+
+ switch (f32_e) {
+ case (0): /* float32: zero or subnormal number */
+ b16_e = 0;
+ if (f32_m == 0) /* zero */
+ b16_m = 0;
+ else /* subnormal float32 number, normal bfloat16 */
+ goto bf16_normal;
+ break;
+ case (FP32_MASK_E >> FP32_LSB_E): /* float32: infinity or nan */
+ b16_e = BF16_MASK_E >> BF16_LSB_E;
+ if (f32_m == 0) { /* infinity */
+ b16_m = 0;
+ } else { /* nan, propagate mantissa and set MSB of mantissa to 1 */
+ b16_m = f32_m >> (FP32_MSB_M - BF16_MSB_M);
+ b16_m |= BIT(BF16_MSB_M);
+ }
+ break;
+ default: /* float32: normal number, normal bfloat16 */
+ goto bf16_normal;
+ }
+
+ goto bf16_pack;
+
+bf16_normal:
+ b16_e = f32_e;
+ tbits = FP32_MSB_M - BF16_MSB_M;
+ b16_m = f32_m >> tbits;
+
+ /* if non-leading truncated bits are set */
+ if ((f32_m & GENMASK_U32(tbits - 1, 0)) > BIT(tbits - 1)) {
+ b16_m++;
+
+ /* if overflow into exponent */
+ if (((b16_m & BF16_MASK_E) >> BF16_LSB_E) == 0x1)
+ b16_e++;
+ } else if ((f32_m & GENMASK_U32(tbits - 1, 0)) == BIT(tbits - 1)) {
+ /* if only leading truncated bit is set */
+ if ((b16_m & 0x1) == 0x1) {
+ b16_m++;
+
+ /* if overflow into exponent */
+ if (((b16_m & BF16_MASK_E) >> BF16_LSB_E) == 0x1)
+ b16_e++;
+ }
+ }
+ b16_m = b16_m & BF16_MASK_M;
+
+bf16_pack:
+ u16 = BF16_PACK(b16_s, b16_e, b16_m);
+
+ return u16;
+}
+
+/* Convert a brain float number (bfloat16) into a
+ * single precision floating point number (float32).
+ */
+static float
+__bfloat16_to_float32_scalar_rtx(uint16_t f16)
+{
+ union float32 f32; /* float32 output */
+ uint16_t b16_s; /* float16 sign */
+ uint16_t b16_e; /* float16 exponent */
+ uint16_t b16_m; /* float16 mantissa */
+ uint32_t f32_s; /* float32 sign */
+ uint32_t f32_e; /* float32 exponent */
+ uint32_t f32_m; /* float32 mantissa*/
+ uint8_t shift; /* number of bits to be shifted */
+
+ b16_s = (f16 & BF16_MASK_S) >> BF16_LSB_S;
+ b16_e = (f16 & BF16_MASK_E) >> BF16_LSB_E;
+ b16_m = (f16 & BF16_MASK_M) >> BF16_LSB_M;
+
+ f32_s = b16_s;
+ switch (b16_e) {
+ case (BF16_MASK_E >> BF16_LSB_E): /* bfloat16: infinity or nan */
+ f32_e = FP32_MASK_E >> FP32_LSB_E;
+ if (b16_m == 0x0) { /* infinity */
+ f32_m = 0;
+ } else { /* nan, propagate mantissa, set MSB of mantissa to 1 */
+ f32_m = b16_m;
+ shift = FP32_MSB_M - BF16_MSB_M;
+ f32_m = (f32_m << shift) & FP32_MASK_M;
+ f32_m |= BIT(FP32_MSB_M);
+ }
+ break;
+ case 0: /* bfloat16: zero or subnormal */
+ f32_m = b16_m;
+ if (b16_m == 0) { /* zero signed */
+ f32_e = 0;
+ } else { /* subnormal numbers */
+ goto fp32_normal;
+ }
+ break;
+ default: /* bfloat16: normal number */
+ goto fp32_normal;
+ }
+
+ goto fp32_pack;
+
+fp32_normal:
+ f32_m = b16_m;
+ f32_e = FP32_BIAS_E + b16_e - BF16_BIAS_E;
+
+ shift = (FP32_MSB_M - BF16_MSB_M);
+ f32_m = (f32_m << shift) & FP32_MASK_M;
+
+fp32_pack:
+ f32.u = FP32_PACK(f32_s, f32_e, f32_m);
+
+ return f32.f;
+}
+
+uint16_t _odp_float32_to_float16(float x)
+{
+ return __float32_to_float16_scalar_rtn(x);
+}
+
+float _odp_float16_to_float32(uint16_t f16)
+{
+ return __float16_to_float32_scalar_rtx(f16);
+}
+
+uint16_t _odp_float32_to_bfloat16(float x)
+{
+ return __float32_to_bfloat16_scalar_rtn(x);
+}
+
+float _odp_bfloat16_to_float32(uint16_t f16)
+{
+ return __bfloat16_to_float32_scalar_rtx(f16);
+}
diff --git a/platform/linux-generic/odp_ml_null.c b/platform/linux-generic/odp_ml_null.c
new file mode 100644
index 000000000..718e80d76
--- /dev/null
+++ b/platform/linux-generic/odp_ml_null.c
@@ -0,0 +1,232 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/hints.h>
+#include <odp/api/ml.h>
+
+#include <odp_init_internal.h>
+
+#include <stdint.h>
+#include <string.h>
+
+/* Dummy ML API implementation, no capability and just return error for
+ * other functions.
+ */
+int _odp_ml_init_global(void)
+{
+ return 0;
+}
+
+int _odp_ml_term_global(void)
+{
+ return 0;
+}
+
+int odp_ml_capability(odp_ml_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_ml_capability_t));
+ return 0;
+}
+
+void odp_ml_config_init(odp_ml_config_t *config ODP_UNUSED)
+{
+}
+
+int odp_ml_config(const odp_ml_config_t *config ODP_UNUSED)
+{
+ return -1;
+}
+
+void odp_ml_model_param_init(odp_ml_model_param_t *param ODP_UNUSED)
+{
+}
+
+odp_ml_model_t odp_ml_model_create(const char *name ODP_UNUSED,
+ const odp_ml_model_param_t *param ODP_UNUSED)
+{
+ return ODP_ML_MODEL_INVALID;
+}
+
+int odp_ml_model_destroy(odp_ml_model_t model ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_info(odp_ml_model_t model ODP_UNUSED, odp_ml_model_info_t *info ODP_UNUSED)
+{
+ return -1;
+}
+
+uint32_t odp_ml_model_input_info(odp_ml_model_t model ODP_UNUSED,
+ odp_ml_input_info_t info[] ODP_UNUSED,
+ uint32_t num ODP_UNUSED)
+{
+ return 0;
+}
+
+uint32_t odp_ml_model_output_info(odp_ml_model_t model ODP_UNUSED,
+ odp_ml_output_info_t info[] ODP_UNUSED,
+ uint32_t num ODP_UNUSED)
+{
+ return 0;
+}
+
+odp_ml_model_t odp_ml_model_lookup(const char *name ODP_UNUSED)
+{
+ return ODP_ML_MODEL_INVALID;
+}
+
+uint64_t odp_ml_model_to_u64(odp_ml_model_t model ODP_UNUSED)
+{
+ return 0;
+}
+
+void odp_ml_model_print(odp_ml_model_t model ODP_UNUSED)
+{
+}
+
+void odp_ml_print(void)
+{
+}
+
+void odp_ml_compl_pool_param_init(odp_ml_compl_pool_param_t *pool_param)
+{
+ memset(pool_param, 0, sizeof(odp_ml_compl_pool_param_t));
+}
+
+odp_pool_t odp_ml_compl_pool_create(const char *name ODP_UNUSED,
+ const odp_ml_compl_pool_param_t *pool_param ODP_UNUSED)
+{
+ return ODP_POOL_INVALID;
+}
+
+odp_ml_compl_t odp_ml_compl_alloc(odp_pool_t pool ODP_UNUSED)
+{
+ return ODP_ML_COMPL_INVALID;
+}
+
+void odp_ml_compl_free(odp_ml_compl_t ml_compl ODP_UNUSED)
+{
+}
+
+int odp_ml_compl_run_result(odp_ml_compl_t ml_compl ODP_UNUSED,
+ odp_ml_run_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_compl_load_result(odp_ml_compl_t ml_compl ODP_UNUSED,
+ odp_ml_load_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+void *odp_ml_compl_user_area(odp_ml_compl_t ml_compl ODP_UNUSED)
+{
+ return NULL;
+}
+
+odp_ml_compl_t odp_ml_compl_from_event(odp_event_t event ODP_UNUSED)
+{
+ return ODP_ML_COMPL_INVALID;
+}
+
+odp_event_t odp_ml_compl_to_event(odp_ml_compl_t ml_compl ODP_UNUSED)
+{
+ return ODP_EVENT_INVALID;
+}
+
+uint64_t odp_ml_compl_to_u64(odp_ml_compl_t ml_compl ODP_UNUSED)
+{
+ return 0;
+}
+
+void odp_ml_compl_param_init(odp_ml_compl_param_t *compl_param ODP_UNUSED)
+{
+}
+
+int odp_ml_model_load(odp_ml_model_t model ODP_UNUSED, odp_ml_load_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_load_start(odp_ml_model_t model ODP_UNUSED,
+ const odp_ml_compl_param_t *compl_param ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_load_status(odp_ml_model_t model ODP_UNUSED, uint32_t compl_id ODP_UNUSED,
+ odp_ml_load_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_unload(odp_ml_model_t model ODP_UNUSED, odp_ml_load_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_unload_start(odp_ml_model_t model ODP_UNUSED,
+ const odp_ml_compl_param_t *compl_param ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_unload_status(odp_ml_model_t model ODP_UNUSED, uint32_t compl_id ODP_UNUSED,
+ odp_ml_load_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+void odp_ml_run_param_init(odp_ml_run_param_t *param ODP_UNUSED)
+{
+}
+
+int odp_ml_run(odp_ml_model_t model ODP_UNUSED, const odp_ml_data_t *data ODP_UNUSED,
+ const odp_ml_run_param_t *param ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_run_multi(odp_ml_model_t model ODP_UNUSED, const odp_ml_data_t data[] ODP_UNUSED,
+ const odp_ml_run_param_t param[] ODP_UNUSED, int num ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_run_start(odp_ml_model_t model ODP_UNUSED, const odp_ml_data_t *data ODP_UNUSED,
+ const odp_ml_compl_param_t *compl_param ODP_UNUSED,
+ const odp_ml_run_param_t *run_param ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_run_start_multi(odp_ml_model_t model ODP_UNUSED,
+ const odp_ml_data_t data[] ODP_UNUSED,
+ const odp_ml_compl_param_t compl_param[] ODP_UNUSED,
+ const odp_ml_run_param_t run_param[] ODP_UNUSED,
+ int num ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_run_status(odp_ml_model_t model ODP_UNUSED, uint32_t compl_id ODP_UNUSED,
+ odp_ml_run_result_t *result ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_extra_stat_info(odp_ml_model_t model ODP_UNUSED,
+ odp_ml_extra_stat_info_t info[] ODP_UNUSED,
+ int num ODP_UNUSED)
+{
+ return -1;
+}
+
+int odp_ml_model_extra_stats(odp_ml_model_t model ODP_UNUSED,
+ uint64_t stats[] ODP_UNUSED, int num ODP_UNUSED)
+{
+ return -1;
+}
diff --git a/platform/linux-generic/odp_ml_quantize.c b/platform/linux-generic/odp_ml_quantize.c
new file mode 100644
index 000000000..d3f3601e3
--- /dev/null
+++ b/platform/linux-generic/odp_ml_quantize.c
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/ml_quantize.h>
+
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ml_fp16.h>
+
+#include <math.h>
+#include <stdint.h>
+
+void odp_ml_fp32_to_uint8(uint8_t *u8, const float *fp32, uint32_t num, float scale,
+ uint8_t zerop)
+{
+ float fval;
+
+ _ODP_ASSERT(scale != 0);
+
+ for (uint32_t i = 0; i < num; i++) {
+ /* Range mapping: map real values to signed integer */
+ fval = nearbyintf(fp32[i] / scale) + (float)zerop;
+
+ /* clip */
+ fval = _ODP_MAX(fval, 0.f);
+ fval = _ODP_MIN(fval, 255.f);
+ u8[i] = (uint8_t)(int32_t)fval;
+ }
+}
+
+void odp_ml_fp32_from_uint8(float *fp32, const uint8_t *u8, uint32_t num, float scale,
+ uint8_t zerop)
+{
+ for (uint32_t i = 0; i < num; i++)
+ fp32[i] = (float)(u8[i] - zerop) * scale;
+}
+
+void odp_ml_fp32_to_int8(int8_t *i8, const float *fp32, uint32_t num, float scale, int8_t zerop)
+{
+ float fval;
+
+ _ODP_ASSERT(scale != 0);
+
+ for (uint32_t i = 0; i < num; i++) {
+ /* Range mapping: map real values to signed integer */
+ fval = nearbyintf(fp32[i] / scale) + (float)zerop;
+
+ /* NOTE: Clamps signed quantization values to [-127,127] instead of [-128,127].
+ * This is to ensure that symmetric quantization results in a zero
+ * point of exactly 0 for signed 8 bit ints.
+ */
+ fval = _ODP_MAX(fval, -127.f);
+ fval = _ODP_MIN(fval, 127.f);
+ i8[i] = (int8_t)(int32_t)fval;
+ }
+}
+
+void odp_ml_fp32_from_int8(float *fp32, const int8_t *i8, uint32_t num, float scale, int8_t zerop)
+{
+ for (uint32_t i = 0; i < num; i++)
+ fp32[i] = (float)(i8[i] - zerop) * scale;
+}
+
+void odp_ml_fp32_to_fp16(uint16_t *fp16, const float *fp32, uint32_t num)
+{
+ uint32_t i;
+
+ for (i = 0; i < num; i++)
+ fp16[i] = _odp_float32_to_float16(fp32[i]);
+}
+
+void odp_ml_fp32_from_fp16(float *fp32, const uint16_t *fp16, uint32_t num)
+{
+ uint32_t i;
+
+ for (i = 0; i < num; i++)
+ fp32[i] = _odp_float16_to_float32(fp16[i]);
+}
diff --git a/platform/linux-generic/odp_name_table.c b/platform/linux-generic/odp_name_table.c
index b116904da..7c93d594b 100644
--- a/platform/linux-generic/odp_name_table.c
+++ b/platform/linux-generic/odp_name_table.c
@@ -1,20 +1,23 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/hash.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_name_table_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+
#include <stdint.h>
#include <string.h>
#include <malloc.h>
#include <stdlib.h>
-#include <odp_name_table_internal.h>
-#include <odp_debug_internal.h>
-
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
+#include <inttypes.h>
/* The following constants define some tunable parameters of this module.
* They are set to fairly reasonable values (perhaps somewhat biased toward
@@ -54,7 +57,7 @@ typedef struct name_tbl_entry_s name_tbl_entry_t;
/* It is important for most platforms that the following struct fit within
* one cacheline.
*/
-struct name_tbl_entry_s {
+struct ODP_ALIGNED_CACHE name_tbl_entry_s {
name_tbl_entry_t *next_entry;
uint64_t user_data;
_odp_int_name_t name_tbl_id;
@@ -62,17 +65,17 @@ struct name_tbl_entry_s {
uint8_t name_kind;
uint8_t name_len;
char name[_ODP_INT_NAME_LEN + 1];
-} ODP_ALIGNED_CACHE;
+};
-typedef struct {
+typedef struct ODP_ALIGNED_CACHE {
uint32_t num_allocd;
uint32_t num_used;
uint32_t num_added_to_free_list;
uint32_t num_avail_to_add;
uint32_t base_id;
name_tbl_entry_t *free_list_head;
- name_tbl_entry_t entries[0];
-} ODP_ALIGNED_CACHE name_tbl_t;
+ name_tbl_entry_t entries[];
+} name_tbl_t;
typedef struct {
name_tbl_t *tbls[NUM_NAME_TBLS];
@@ -220,20 +223,19 @@ static void secondary_hash_dump(secondary_hash_tbl_t *secondary_hash_tbl)
entry_cnt = hash_tbl_entry & 0x3F;
list_cnt = linked_list_len(name_tbl_entry);
if (entry_cnt != list_cnt)
- ODP_DBG("%s idx=%u entry_cnt=%u "
+ _ODP_DBG("%s idx=%u entry_cnt=%u "
"list_cnt=%u\n",
__func__,
idx, entry_cnt, list_cnt);
count += entry_cnt;
} else {
- ODP_DBG("%s inner secondary tbl\n",
- __func__);
+ _ODP_DBG("%s inner secondary tbl\n", __func__);
}
}
}
- ODP_DBG("%s count=%u\n", __func__, count);
+ _ODP_DBG("%s count=%u\n", __func__, count);
}
static uint32_t name_tbl_free_list_add(name_tbl_t *name_tbl,
@@ -245,7 +247,7 @@ static uint32_t name_tbl_free_list_add(name_tbl_t *name_tbl,
name_tbl_id = name_tbl->base_id | first_idx;
entry_idx = first_idx;
- num_added = MIN(num_to_add, name_tbl->num_avail_to_add);
+ num_added = _ODP_MIN(num_to_add, name_tbl->num_avail_to_add);
if (num_added == 0)
return 0;
@@ -296,7 +298,7 @@ static int new_name_tbl_add(void)
name_tbls_idx = name_tbls.num_name_tbls;
num_entries = INITIAL_NAME_TBL_SIZE << name_tbls_idx;
new_name_tbl = name_tbl_alloc(name_tbls_idx, num_entries);
- name_tbl_free_list_add(new_name_tbl, MIN(num_entries, 256));
+ name_tbl_free_list_add(new_name_tbl, _ODP_MIN(num_entries, UINT32_C(256)));
name_tbls.tbls[name_tbls_idx] = new_name_tbl;
name_tbls.avail_space_bit_mask |= 1 << name_tbls_idx;
@@ -377,6 +379,7 @@ static void name_tbl_entry_free(name_tbl_entry_t *name_tbl_entry)
memset(name_tbl_entry, 0, sizeof(name_tbl_entry_t));
name_tbl_entry->next_entry = name_tbl->free_list_head;
name_tbl->free_list_head = name_tbl_entry;
+ name_tbl_entry->name_tbl_id = name_tbl_id;
}
static hash_tbl_entry_t make_hash_tbl_entry(name_tbl_entry_t *name_tbl_entry,
@@ -385,7 +388,7 @@ static hash_tbl_entry_t make_hash_tbl_entry(name_tbl_entry_t *name_tbl_entry,
hash_tbl_entry_t hash_tbl_entry;
uint32_t new_entry_cnt;
- new_entry_cnt = MIN(entry_cnt + 1, 0x3F);
+ new_entry_cnt = _ODP_MIN(entry_cnt + 1, UINT32_C(0x3F));
hash_tbl_entry = (hash_tbl_entry_t)(uintptr_t)name_tbl_entry;
hash_tbl_entry &= ~0x3F;
hash_tbl_entry |= new_entry_cnt;
@@ -1004,7 +1007,7 @@ static uint32_t level2_hash_histo(secondary_hash_tbl_t *hash_tbl,
collisions = linked_list_len(name_tbl_entry);
}
- level2_histo[MIN(collisions, 256)]++;
+ level2_histo[_ODP_MIN(collisions, UINT32_C(256))]++;
total_collisions += collisions;
}
@@ -1036,7 +1039,7 @@ static uint32_t level1_hash_histo(secondary_hash_tbl_t *hash_tbl,
level2_histo);
}
- level1_histo[MIN(collisions, 256)]++;
+ level1_histo[_ODP_MIN(collisions, UINT32_C(256))]++;
total_collisions += collisions;
}
@@ -1069,7 +1072,7 @@ static void secondary_hash_histo_print(void)
if (name_hash_tbl.num_secondary_tbls[0] == 0)
return;
- ODP_DBG(" level1 secondary hash histogram:\n");
+ _ODP_DBG(" level1 secondary hash histogram:\n");
total_count = 0;
for (idx = 0; idx < 256; idx++) {
count = level1_histo[idx];
@@ -1077,24 +1080,23 @@ static void secondary_hash_histo_print(void)
total_count += count * idx;
if (count != 0)
- ODP_DBG(" num collisions=%02u count=%u\n",
- idx, count);
+ _ODP_DBG(" num collisions=%02u count=%u\n", idx, count);
}
count = level1_histo[256];
total_count += count;
if (count != 0)
- ODP_DBG(" num collisions >=256 count=%u\n", count);
+ _ODP_DBG(" num collisions >=256 count=%u\n", count);
avg = (100 * total_count) / name_hash_tbl.num_secondary_tbls[0];
avg = avg / SECONDARY_HASH_TBL_SIZE;
- ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
- avg / 100, avg % 100, total_count);
+ _ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
+ avg / 100, avg % 100, total_count);
if (name_hash_tbl.num_secondary_tbls[1] == 0)
return;
- ODP_DBG(" level2 secondary hash histogram:\n");
+ _ODP_DBG(" level2 secondary hash histogram:\n");
total_count = 0;
for (idx = 0; idx < 256; idx++) {
count = level2_histo[idx];
@@ -1102,19 +1104,18 @@ static void secondary_hash_histo_print(void)
total_count += count * idx;
if (count != 0)
- ODP_DBG(" num collisions=%02u count=%u\n",
- idx, count);
+ _ODP_DBG(" num collisions=%02u count=%u\n", idx, count);
}
count = level2_histo[256];
total_count += count;
if (count != 0)
- ODP_DBG(" num collisions >=256 count=%u\n", count);
+ _ODP_DBG(" num collisions >=256 count=%u\n", count);
avg = (100 * total_count) / name_hash_tbl.num_secondary_tbls[1];
avg = avg / SECONDARY_HASH_TBL_SIZE;
- ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
- avg / 100, avg % 100, total_count);
+ _ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
+ avg / 100, avg % 100, total_count);
}
#endif
@@ -1126,30 +1127,31 @@ void _odp_int_name_tbl_stats_print(void)
count, total_count;
uint32_t avg;
- ODP_DBG("\nname table stats:\n");
- ODP_DBG(" num_names=%u num_adds=%lu "
- "num_deletes=%lu num_name_tbls=%u\n",
- name_tbls.current_num_names, name_tbls.num_adds,
- name_tbls.num_deletes, name_tbls.num_name_tbls);
+ _ODP_DBG("\nname table stats:\n");
+ _ODP_DBG(" num_names=%" PRIu32 " num_adds=%" PRIu64 " "
+ "num_deletes=%" PRIu64 " num_name_tbls=%" PRIu8 "\n",
+ name_tbls.current_num_names, name_tbls.num_adds,
+ name_tbls.num_deletes, name_tbls.num_name_tbls);
for (idx = 0; idx < NUM_NAME_TBLS; idx++) {
name_tbl = name_tbls.tbls[idx];
if ((name_tbl) && (name_tbl->num_used != 0))
- ODP_DBG(" name_tbl %u num_allocd=%7u "
- "num_added_to_free_list=%7u "
- "num_used=%7u num_avail_to_add=%7u\n", idx,
- name_tbl->num_allocd,
- name_tbl->num_added_to_free_list,
- name_tbl->num_used,
- name_tbl->num_avail_to_add);
+ _ODP_DBG(" name_tbl %u num_allocd=%7u "
+ "num_added_to_free_list=%7u "
+ "num_used=%7u num_avail_to_add=%7u\n", idx,
+ name_tbl->num_allocd,
+ name_tbl->num_added_to_free_list,
+ name_tbl->num_used,
+ name_tbl->num_avail_to_add);
}
memset(primary_hash_histo, 0, sizeof(primary_hash_histo));
for (idx = 0; idx < PRIMARY_HASH_TBL_SIZE; idx++) {
- collisions = MIN(name_hash_tbl.hash_collisions[idx], 256);
+ collisions =
+ _ODP_MIN(name_hash_tbl.hash_collisions[idx], UINT32_C(256));
primary_hash_histo[collisions]++;
}
- ODP_DBG(" name_tbl primary hash histogram:\n");
+ _ODP_DBG(" name_tbl primary hash histogram:\n");
total_count = 0;
for (idx = 0; idx < 256; idx++) {
count = primary_hash_histo[idx];
@@ -1157,23 +1159,20 @@ void _odp_int_name_tbl_stats_print(void)
total_count += count * idx;
if (count != 0)
- ODP_DBG(" num collisions=%02u count=%u\n",
- idx, count);
+ _ODP_DBG(" num collisions=%02u count=%u\n", idx, count);
}
count = primary_hash_histo[256];
total_count += count;
if (count != 0)
- ODP_DBG(" num collisions >=256 count=%u\n", count);
+ _ODP_DBG(" num collisions >=256 count=%u\n", count);
avg = (100 * total_count) / PRIMARY_HASH_TBL_SIZE;
- ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
- avg / 100, avg % 100, total_count);
+ _ODP_DBG(" avg collisions=%02u.%02u total=%u\n\n",
+ avg / 100, avg % 100, total_count);
- ODP_DBG(" num of first level secondary hash tbls=%u "
- "second level tbls=%u\n",
- name_hash_tbl.num_secondary_tbls[0],
- name_hash_tbl.num_secondary_tbls[1]);
+ _ODP_DBG(" num of first level secondary hash tbls=%u second level tbls=%u\n",
+ name_hash_tbl.num_secondary_tbls[0], name_hash_tbl.num_secondary_tbls[1]);
#ifdef SECONDARY_HASH_HISTO_PRINT
if (name_hash_tbl.num_secondary_tbls[0] != 0)
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index 17a51b069..17a4a9298 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -1,18 +1,41 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/plat/packet_inlines.h>
+#include <odp/autoheader_external.h>
+
+#include <odp/api/byteorder.h>
+#include <odp/api/hash.h>
+#include <odp/api/hints.h>
#include <odp/api/packet.h>
-#include <odp_packet_internal.h>
+#include <odp/api/packet_flags.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/proto_stats.h>
+#include <odp/api/timer.h>
+
+#include <odp_parse_internal.h>
+#include <odp_chksum_internal.h>
#include <odp_debug_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/byteorder.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_io_inlines.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
+#include <protocols/sctp.h>
#include <protocols/tcp.h>
#include <protocols/udp.h>
@@ -21,180 +44,152 @@
#include <stdio.h>
#include <inttypes.h>
-/* Initial packet segment data length */
-#define BASE_LEN CONFIG_PACKET_MAX_SEG_LEN
+#include <odp/visibility_begin.h>
/* Fill in packet header field offsets for inline functions */
const _odp_packet_inline_offset_t _odp_packet_inline ODP_ALIGNED_CACHE = {
- .data = offsetof(odp_packet_hdr_t, buf_hdr.seg[0].data),
- .seg_len = offsetof(odp_packet_hdr_t, buf_hdr.seg[0].len),
+ .seg_data = offsetof(odp_packet_hdr_t, seg_data),
+ .seg_len = offsetof(odp_packet_hdr_t, seg_len),
+ .seg_next = offsetof(odp_packet_hdr_t, seg_next),
.frame_len = offsetof(odp_packet_hdr_t, frame_len),
.headroom = offsetof(odp_packet_hdr_t, headroom),
.tailroom = offsetof(odp_packet_hdr_t, tailroom),
- .pool = offsetof(odp_packet_hdr_t, buf_hdr.pool_hdl),
+ .pool = offsetof(odp_packet_hdr_t, event_hdr.pool),
.input = offsetof(odp_packet_hdr_t, input),
- .segcount = offsetof(odp_packet_hdr_t, buf_hdr.segcount),
- .user_ptr = offsetof(odp_packet_hdr_t, buf_hdr.buf_ctx),
- .user_area = offsetof(odp_packet_hdr_t, buf_hdr.uarea_addr),
- .user_area_size = offsetof(odp_packet_hdr_t, buf_hdr.uarea_size),
+ .seg_count = offsetof(odp_packet_hdr_t, seg_count),
+ .user_ptr = offsetof(odp_packet_hdr_t, user_ptr),
+ .user_area = offsetof(odp_packet_hdr_t, uarea_addr),
+ .l2_offset = offsetof(odp_packet_hdr_t, p.l2_offset),
+ .l3_offset = offsetof(odp_packet_hdr_t, p.l3_offset),
+ .l4_offset = offsetof(odp_packet_hdr_t, p.l4_offset),
.flow_hash = offsetof(odp_packet_hdr_t, flow_hash),
.timestamp = offsetof(odp_packet_hdr_t, timestamp),
- .input_flags = offsetof(odp_packet_hdr_t, p.input_flags)
-
+ .input_flags = offsetof(odp_packet_hdr_t, p.input_flags),
+ .flags = offsetof(odp_packet_hdr_t, p.flags),
+ .cls_mark = offsetof(odp_packet_hdr_t, cls_mark),
+ .ipsec_ctx = offsetof(odp_packet_hdr_t, ipsec_ctx),
+ .crypto_op = offsetof(odp_packet_hdr_t, crypto_op_result),
};
-static inline odp_packet_hdr_t *packet_hdr(odp_packet_t pkt)
-{
- return (odp_packet_hdr_t *)(uintptr_t)pkt;
-}
+#include <odp/visibility_end.h>
-static inline odp_buffer_t buffer_handle(odp_packet_hdr_t *pkt_hdr)
-{
- return pkt_hdr->buf_hdr.handle.handle;
-}
+/* Check that invalid values are the same. Some versions of Clang and pedantic
+ * build have trouble with the strong type casting, and complain that these
+ * invalid values are not integral constants.
+ *
+ * Invalid values are required to be equal for _odp_buffer_is_valid() to work
+ * properly. */
+#ifndef __clang__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ODP_STATIC_ASSERT(ODP_PACKET_INVALID == 0, "Packet invalid not 0");
+ODP_STATIC_ASSERT(ODP_BUFFER_INVALID == 0, "Buffer invalid not 0");
+ODP_STATIC_ASSERT(ODP_EVENT_INVALID == 0, "Event invalid not 0");
+ODP_STATIC_ASSERT(ODP_PACKET_VECTOR_INVALID == 0, "Packet vector invalid not 0");
+ODP_STATIC_ASSERT(ODP_PACKET_TX_COMPL_INVALID == 0, "Packet TX completion invalid not 0");
+ODP_STATIC_ASSERT(ODP_TIMEOUT_INVALID == 0, "Timeout invalid not 0");
+#pragma GCC diagnostic pop
+#endif
-static inline odp_packet_hdr_t *buf_to_packet_hdr(odp_buffer_t buf)
+static inline odp_packet_hdr_t *packet_seg_to_hdr(odp_packet_seg_t seg)
{
- return (odp_packet_hdr_t *)buf_hdl_to_hdr(buf);
+ return (odp_packet_hdr_t *)(uintptr_t)seg;
}
-static inline uint32_t packet_seg_len(odp_packet_hdr_t *pkt_hdr,
- uint32_t seg_idx)
+static inline odp_packet_seg_t packet_hdr_to_seg(odp_packet_hdr_t *pkt_hdr)
{
- return pkt_hdr->buf_hdr.seg[seg_idx].len;
+ return (odp_packet_seg_t)pkt_hdr;
}
-static inline void *packet_seg_data(odp_packet_hdr_t *pkt_hdr, uint32_t seg_idx)
+/*
+ * Return pointer to the current segment and step cur_hdr forward.
+ */
+static inline odp_packet_hdr_t *packet_seg_step(odp_packet_hdr_t **cur_hdr)
{
- return pkt_hdr->buf_hdr.seg[seg_idx].data;
-}
+ odp_packet_hdr_t *hdr = *cur_hdr;
-static inline int packet_last_seg(odp_packet_hdr_t *pkt_hdr)
-{
- if (CONFIG_PACKET_MAX_SEGS == 1)
- return 0;
- else
- return pkt_hdr->buf_hdr.segcount - 1;
-}
+ *cur_hdr = hdr->seg_next;
-static inline uint32_t packet_first_seg_len(odp_packet_hdr_t *pkt_hdr)
-{
- return packet_seg_len(pkt_hdr, 0);
+ return hdr;
}
-static inline uint32_t packet_last_seg_len(odp_packet_hdr_t *pkt_hdr)
+static inline void packet_seg_find_idx(odp_packet_hdr_t **pkt_hdr,
+ uint32_t find_idx)
{
- int last = packet_last_seg(pkt_hdr);
+ odp_packet_hdr_t *hdr = *pkt_hdr;
+ uint32_t idx = 0;
- return packet_seg_len(pkt_hdr, last);
+ while (odp_unlikely(idx < find_idx)) {
+ idx++;
+ hdr = hdr->seg_next;
+ }
+
+ *pkt_hdr = hdr;
}
-static inline void *packet_data(odp_packet_hdr_t *pkt_hdr)
+static inline uint32_t packet_seg_len(odp_packet_hdr_t *pkt_hdr,
+ uint32_t seg_idx)
{
- return pkt_hdr->buf_hdr.seg[0].data;
+ packet_seg_find_idx(&pkt_hdr, seg_idx);
+
+ return pkt_hdr->seg_len;
}
static inline void *packet_tail(odp_packet_hdr_t *pkt_hdr)
{
- int last = packet_last_seg(pkt_hdr);
- uint32_t seg_len = pkt_hdr->buf_hdr.seg[last].len;
+ odp_packet_hdr_t *last_seg = packet_last_seg(pkt_hdr);
- return pkt_hdr->buf_hdr.seg[last].data + seg_len;
+ return last_seg->seg_data + last_seg->seg_len;
}
-static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_hdr, int seg)
+static inline uint32_t seg_headroom(odp_packet_hdr_t *pkt_seg)
{
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
+ _odp_event_hdr_t *hdr = &pkt_seg->event_hdr;
+ pool_t *pool = _odp_pool_entry(hdr->pool);
uint8_t *base = hdr->base_data;
- uint8_t *head = pkt_hdr->buf_hdr.seg[seg].data;
+ uint8_t *head = pkt_seg->seg_data;
- return CONFIG_PACKET_HEADROOM + (head - base);
+ return pool->headroom + (head - base);
}
-static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_hdr, int seg)
+static inline uint32_t seg_tailroom(odp_packet_hdr_t *pkt_seg)
{
- uint32_t seg_len = pkt_hdr->buf_hdr.seg[seg].len;
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
- uint8_t *tail = pkt_hdr->buf_hdr.seg[seg].data + seg_len;
+ _odp_event_hdr_t *hdr = &pkt_seg->event_hdr;
+ uint8_t *tail = pkt_seg->seg_data + pkt_seg->seg_len;
return hdr->buf_end - tail;
}
-static inline void push_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
-{
- pkt_hdr->headroom -= len;
- pkt_hdr->frame_len += len;
- pkt_hdr->buf_hdr.seg[0].data -= len;
- pkt_hdr->buf_hdr.seg[0].len += len;
-}
-
-static inline void pull_head(odp_packet_hdr_t *pkt_hdr, uint32_t len)
-{
- pkt_hdr->headroom += len;
- pkt_hdr->frame_len -= len;
- pkt_hdr->buf_hdr.seg[0].data += len;
- pkt_hdr->buf_hdr.seg[0].len -= len;
-}
-
static inline void push_tail(odp_packet_hdr_t *pkt_hdr, uint32_t len)
{
- int last = packet_last_seg(pkt_hdr);
+ odp_packet_hdr_t *last_seg = packet_last_seg(pkt_hdr);
pkt_hdr->tailroom -= len;
pkt_hdr->frame_len += len;
- pkt_hdr->buf_hdr.seg[last].len += len;
+ last_seg->seg_len += len;
}
-/* Copy all metadata for segmentation modification. Segment data and lengths
- * are not copied. */
-static inline void packet_seg_copy_md(odp_packet_hdr_t *dst,
- odp_packet_hdr_t *src)
-{
- dst->p = src->p;
-
- /* lengths are not copied:
- * .frame_len
- * .headroom
- * .tailroom
- */
-
- dst->input = src->input;
- dst->dst_queue = src->dst_queue;
- dst->flow_hash = src->flow_hash;
- dst->timestamp = src->timestamp;
- dst->op_result = src->op_result;
-
- /* buffer header side packet metadata */
- dst->buf_hdr.buf_u64 = src->buf_hdr.buf_u64;
- dst->buf_hdr.uarea_addr = src->buf_hdr.uarea_addr;
- dst->buf_hdr.uarea_size = src->buf_hdr.uarea_size;
-
- /* segmentation data is not copied:
- * buf_hdr.seg[]
- * buf_hdr.segcount
- */
-}
-
-static inline void *packet_map(odp_packet_hdr_t *pkt_hdr,
- uint32_t offset, uint32_t *seg_len, int *seg_idx)
+static inline void *packet_map(void *pkt_ptr, uint32_t offset,
+ uint32_t *seg_len, odp_packet_seg_t *seg)
{
void *addr;
uint32_t len;
- int seg = 0;
- int seg_count = pkt_hdr->buf_hdr.segcount;
+ odp_packet_hdr_t *pkt_hdr = pkt_ptr;
+ int seg_count = pkt_hdr->seg_count;
if (odp_unlikely(offset >= pkt_hdr->frame_len))
return NULL;
- if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || seg_count == 1)) {
- addr = pkt_hdr->buf_hdr.seg[0].data + offset;
- len = pkt_hdr->buf_hdr.seg[0].len - offset;
+ if (odp_likely(seg_count == 1)) {
+ addr = pkt_hdr->seg_data + offset;
+ len = pkt_hdr->seg_len - offset;
} else {
- int i;
+ odp_packet_hdr_t *next_hdr = pkt_hdr;
uint32_t seg_start = 0, seg_end = 0;
- for (i = 0; i < seg_count; i++) {
- seg_end += pkt_hdr->buf_hdr.seg[i].len;
+ while (next_hdr != NULL) {
+ pkt_hdr = packet_seg_step(&next_hdr);
+ seg_end += pkt_hdr->seg_len;
if (odp_likely(offset < seg_end))
break;
@@ -202,172 +197,192 @@ static inline void *packet_map(odp_packet_hdr_t *pkt_hdr,
seg_start = seg_end;
}
- addr = pkt_hdr->buf_hdr.seg[i].data + (offset - seg_start);
- len = pkt_hdr->buf_hdr.seg[i].len - (offset - seg_start);
- seg = i;
+ addr = pkt_hdr->seg_data + (offset - seg_start);
+ len = pkt_hdr->seg_len - (offset - seg_start);
}
if (seg_len)
*seg_len = len;
- if (seg_idx)
- *seg_idx = seg;
+ if (seg)
+ *seg = packet_hdr_to_seg(pkt_hdr);
return addr;
}
-static inline void packet_parse_disable(odp_packet_hdr_t *pkt_hdr)
+#include <odp/visibility_begin.h>
+
+/* This file uses the inlined version directly. Inlined API calls use this when
+ * offset does not point to the first segment. */
+void *_odp_packet_map(void *pkt_ptr, uint32_t offset, uint32_t *seg_len,
+ odp_packet_seg_t *seg)
{
- pkt_hdr->p.input_flags.parsed_l2 = 1;
- pkt_hdr->p.parsed_layers = LAYER_ALL;
+ return packet_map(pkt_ptr, offset, seg_len, seg);
}
-void packet_parse_reset(odp_packet_hdr_t *pkt_hdr)
+int _odp_packet_copy_from_mem_seg(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, const void *src)
{
- /* Reset parser metadata before new parse */
- pkt_hdr->p.parsed_layers = LAYER_NONE;
- pkt_hdr->p.error_flags.all = 0;
- pkt_hdr->p.input_flags.all = 0;
- pkt_hdr->p.output_flags.all = 0;
- pkt_hdr->p.l2_offset = 0;
- pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cpylen;
+ const uint8_t *srcaddr = (const uint8_t *)src;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (offset + len > pkt_hdr->frame_len)
+ return -1;
+
+ while (len > 0) {
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+ cpylen = len > seglen ? seglen : len;
+ memcpy(mapaddr, srcaddr, cpylen);
+ offset += cpylen;
+ srcaddr += cpylen;
+ len -= cpylen;
+ }
+
+ return 0;
}
-/**
- * Initialize packet
- */
-static inline void packet_init(odp_packet_hdr_t *pkt_hdr, uint32_t len,
- int parse)
+int _odp_packet_copy_to_mem_seg(odp_packet_t pkt, uint32_t offset,
+ uint32_t len, void *dst)
{
- uint32_t seg_len;
- int num = pkt_hdr->buf_hdr.segcount;
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cpylen;
+ uint8_t *dstaddr = (uint8_t *)dst;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num == 1)) {
- seg_len = len;
- pkt_hdr->buf_hdr.seg[0].len = len;
- } else {
- seg_len = len - ((num - 1) * CONFIG_PACKET_MAX_SEG_LEN);
+ if (offset + len > pkt_hdr->frame_len)
+ return -1;
- /* Last segment data length */
- pkt_hdr->buf_hdr.seg[num - 1].len = seg_len;
+ while (len > 0) {
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+ cpylen = len > seglen ? seglen : len;
+ memcpy(dstaddr, mapaddr, cpylen);
+ offset += cpylen;
+ dstaddr += cpylen;
+ len -= cpylen;
}
- pkt_hdr->p.parsed_layers = LAYER_NONE;
- pkt_hdr->p.input_flags.all = 0;
- pkt_hdr->p.output_flags.all = 0;
- pkt_hdr->p.error_flags.all = 0;
+ return 0;
+}
+
+#include <odp/visibility_end.h>
- pkt_hdr->p.l2_offset = 0;
- pkt_hdr->p.l3_offset = ODP_PACKET_OFFSET_INVALID;
- pkt_hdr->p.l4_offset = ODP_PACKET_OFFSET_INVALID;
+static inline void link_segments(odp_packet_hdr_t *pkt_hdr[], int num)
+{
+ int cur = 0;
+ odp_packet_hdr_t *hdr;
+ odp_packet_hdr_t *head = pkt_hdr[0];
+ uint32_t seg_len = _odp_pool_entry(head->event_hdr.pool)->seg_len;
+
+ while (1) {
+ _odp_event_hdr_t *event_hdr = &pkt_hdr[cur]->event_hdr;
+
+ hdr = pkt_hdr[cur];
+ hdr->seg_data = event_hdr->base_data;
+ hdr->seg_len = seg_len;
- /* Disable lazy parsing on user allocated packets */
- if (!parse)
- packet_parse_disable(pkt_hdr);
+ /* init_segments() handles first seg ref_cnt init */
+ if (ODP_DEBUG == 1 && cur > 0) {
+ uint32_t prev_ref;
+ odp_atomic_u32_t *ref_cnt;
+
+ ref_cnt = &pkt_hdr[cur]->ref_cnt;
+ prev_ref = odp_atomic_fetch_inc_u32(ref_cnt);
+
+ _ODP_ASSERT(prev_ref == 0);
+ }
- /*
- * Packet headroom is set from the pool's headroom
- * Packet tailroom is rounded up to fill the last
- * segment occupied by the allocated length.
- */
- pkt_hdr->frame_len = len;
- pkt_hdr->headroom = CONFIG_PACKET_HEADROOM;
- pkt_hdr->tailroom = CONFIG_PACKET_MAX_SEG_LEN - seg_len +
- CONFIG_PACKET_TAILROOM;
+ cur++;
- pkt_hdr->input = ODP_PKTIO_INVALID;
+ if (cur == num) {
+ /* Last segment */
+ hdr->seg_next = NULL;
+ return;
+ }
+
+ hdr->seg_next = pkt_hdr[cur];
+ }
}
static inline void init_segments(odp_packet_hdr_t *pkt_hdr[], int num)
{
odp_packet_hdr_t *hdr;
- int i;
+ uint32_t seg_len;
/* First segment is the packet descriptor */
hdr = pkt_hdr[0];
+ seg_len = _odp_pool_entry(hdr->event_hdr.pool)->seg_len;
- hdr->buf_hdr.seg[0].data = hdr->buf_hdr.base_data;
- hdr->buf_hdr.seg[0].len = BASE_LEN;
+ /* Defaults for single segment packet */
+ hdr->seg_data = hdr->event_hdr.base_data;
+ hdr->seg_len = seg_len;
+ hdr->seg_next = NULL;
- /* Link segments */
- if (CONFIG_PACKET_MAX_SEGS != 1) {
- hdr->buf_hdr.segcount = num;
+ hdr->seg_count = num;
- if (odp_unlikely(num > 1)) {
- for (i = 1; i < num; i++) {
- odp_buffer_hdr_t *buf_hdr;
+ if (ODP_DEBUG == 1) {
+ uint32_t prev_ref =
+ odp_atomic_fetch_inc_u32(&hdr->ref_cnt);
- buf_hdr = &pkt_hdr[i]->buf_hdr;
- hdr->buf_hdr.seg[i].hdr = buf_hdr;
- hdr->buf_hdr.seg[i].data = buf_hdr->base_data;
- hdr->buf_hdr.seg[i].len = BASE_LEN;
- }
- }
+ _ODP_ASSERT(prev_ref == 0);
}
+
+ /* Link segments */
+ if (odp_unlikely(num > 1))
+ link_segments(pkt_hdr, num);
}
-/* Calculate the number of segments */
-static inline int num_segments(uint32_t len)
+static inline void reset_segments(odp_packet_hdr_t *pkt_hdr)
{
- uint32_t max_seg_len;
- int num;
-
- if (CONFIG_PACKET_MAX_SEGS == 1)
- return 1;
+ void *base;
+ uint32_t seg_len = _odp_pool_entry(pkt_hdr->event_hdr.pool)->seg_len;
- num = 1;
- max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+ while (pkt_hdr != NULL) {
+ base = pkt_hdr->event_hdr.base_data;
- if (odp_unlikely(len > max_seg_len)) {
- num = len / max_seg_len;
+ pkt_hdr->seg_len = seg_len;
+ pkt_hdr->seg_data = base;
- if (odp_likely((num * max_seg_len) != len))
- num += 1;
+ pkt_hdr = pkt_hdr->seg_next;
}
-
- return num;
}
-static inline void add_all_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from)
+/* Calculate the number of segments */
+static inline int num_segments(uint32_t len, uint32_t seg_len)
{
- int i;
- int n = to->buf_hdr.segcount;
- int num = from->buf_hdr.segcount;
+ int num = 1;
- for (i = 0; i < num; i++) {
- to->buf_hdr.seg[n + i].hdr = from->buf_hdr.seg[i].hdr;
- to->buf_hdr.seg[n + i].data = from->buf_hdr.seg[i].data;
- to->buf_hdr.seg[n + i].len = from->buf_hdr.seg[i].len;
+ if (odp_unlikely(len > seg_len)) {
+ num = len / seg_len;
+
+ if (odp_likely((num * seg_len) != len))
+ num += 1;
}
- to->buf_hdr.segcount = n + num;
+ return num;
}
-static inline void copy_num_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from,
- int first, int num)
+static inline void add_all_segs(odp_packet_hdr_t *to, odp_packet_hdr_t *from)
{
- int i;
+ odp_packet_hdr_t *last = packet_last_seg(to);
- for (i = 0; i < num; i++) {
- to->buf_hdr.seg[i].hdr = from->buf_hdr.seg[first + i].hdr;
- to->buf_hdr.seg[i].data = from->buf_hdr.seg[first + i].data;
- to->buf_hdr.seg[i].len = from->buf_hdr.seg[first + i].len;
- }
-
- to->buf_hdr.segcount = num;
+ last->seg_next = from;
+ to->seg_count += from->seg_count;
}
static inline odp_packet_hdr_t *alloc_segments(pool_t *pool, int num)
{
- odp_buffer_t buf[num];
odp_packet_hdr_t *pkt_hdr[num];
int ret;
- ret = buffer_alloc_multi(pool, buf, (odp_buffer_hdr_t **)pkt_hdr, num);
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)pkt_hdr, num);
+
if (odp_unlikely(ret != num)) {
if (ret > 0)
- buffer_free_multi(buf, ret);
+ _odp_event_free_multi((_odp_event_hdr_t **)pkt_hdr, ret);
return NULL;
}
@@ -389,32 +404,32 @@ static inline odp_packet_hdr_t *add_segments(odp_packet_hdr_t *pkt_hdr,
if (new_hdr == NULL)
return NULL;
- seg_len = len - ((num - 1) * pool->max_seg_len);
- offset = pool->max_seg_len - seg_len;
+ seg_len = len - ((num - 1) * pool->seg_len);
+ offset = pool->seg_len - seg_len;
if (head) {
/* add into the head*/
add_all_segs(new_hdr, pkt_hdr);
/* adjust first segment length */
- new_hdr->buf_hdr.seg[0].data += offset;
- new_hdr->buf_hdr.seg[0].len = seg_len;
+ new_hdr->seg_data += offset;
+ new_hdr->seg_len = seg_len;
- packet_seg_copy_md(new_hdr, pkt_hdr);
+ _odp_packet_copy_md(new_hdr, pkt_hdr, 0);
new_hdr->frame_len = pkt_hdr->frame_len + len;
new_hdr->headroom = pool->headroom + offset;
new_hdr->tailroom = pkt_hdr->tailroom;
pkt_hdr = new_hdr;
} else {
- int last;
+ odp_packet_hdr_t *last_seg;
/* add into the tail */
add_all_segs(pkt_hdr, new_hdr);
/* adjust last segment length */
- last = packet_last_seg(pkt_hdr);
- pkt_hdr->buf_hdr.seg[last].len = seg_len;
+ last_seg = packet_last_seg(pkt_hdr);
+ last_seg->seg_len = seg_len;
pkt_hdr->frame_len += len;
pkt_hdr->tailroom = pool->tailroom + offset;
@@ -423,56 +438,139 @@ static inline odp_packet_hdr_t *add_segments(odp_packet_hdr_t *pkt_hdr,
return pkt_hdr;
}
-static inline void free_bufs(odp_packet_hdr_t *pkt_hdr, int first, int num)
+static inline void segment_ref_inc(odp_packet_hdr_t *seg_hdr)
+{
+ uint32_t ref_cnt = odp_atomic_load_u32(&seg_hdr->ref_cnt);
+
+ /* First count increment after alloc */
+ if (odp_likely(ref_cnt == 0))
+ odp_atomic_store_u32(&seg_hdr->ref_cnt, 2);
+ else
+ odp_atomic_inc_u32(&seg_hdr->ref_cnt);
+}
+
+static inline uint32_t segment_ref_dec(odp_packet_hdr_t *seg_hdr)
+{
+ return odp_atomic_fetch_dec_u32(&seg_hdr->ref_cnt);
+}
+
+static inline uint32_t segment_ref(odp_packet_hdr_t *seg_hdr)
+{
+ return odp_atomic_load_u32(&seg_hdr->ref_cnt);
+}
+
+static inline int is_multi_ref(uint32_t ref_cnt)
+{
+ return (ref_cnt > 1);
+}
+
+static inline void packet_free_multi(odp_packet_hdr_t *hdr[], int num)
{
int i;
- odp_buffer_t buf[num];
+ uint32_t ref_cnt;
+ int num_ref = 0;
- for (i = 0; i < num; i++)
- buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[first + i].hdr);
+ for (i = 0; i < num; i++) {
+ /* Zero when reference API has not been used */
+ ref_cnt = segment_ref(hdr[i]);
- buffer_free_multi(buf, num);
+ if (odp_unlikely(ref_cnt)) {
+ ref_cnt = segment_ref_dec(hdr[i]);
+
+ if (is_multi_ref(ref_cnt)) {
+ num_ref++;
+ continue;
+ }
+ }
+
+ /* Skip references and pack to be freed headers to array head */
+ if (odp_unlikely(num_ref))
+ hdr[i - num_ref] = hdr[i];
+ }
+
+ num -= num_ref;
+
+ if (odp_likely(num))
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)hdr, num);
+}
+
+static inline void free_all_segments(odp_packet_hdr_t *pkt_hdr, int num)
+{
+ int i;
+ odp_packet_hdr_t *pkt_hdrs[num];
+ odp_packet_hdr_t *seg_hdr = pkt_hdr;
+
+ for (i = 0; i < num; i++) {
+ pkt_hdrs[i] = seg_hdr;
+ seg_hdr = seg_hdr->seg_next;
+ }
+
+ packet_free_multi(pkt_hdrs, num);
}
static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
int num, uint32_t free_len,
uint32_t pull_len, int head)
{
- int num_remain = pkt_hdr->buf_hdr.segcount - num;
+ odp_packet_hdr_t *seg_hdr;
+ int i;
+ int num_remain = pkt_hdr->seg_count - num;
+ odp_packet_hdr_t *hdr = pkt_hdr;
+ odp_packet_hdr_t *last_hdr = packet_last_seg(pkt_hdr);
+ odp_packet_hdr_t *pkt_hdrs[num];
if (head) {
odp_packet_hdr_t *new_hdr;
- int i;
- odp_buffer_t buf[num];
- for (i = 0; i < num; i++)
- buf[i] = buffer_handle(pkt_hdr->buf_hdr.seg[i].hdr);
+ for (i = 0; i < num; i++) {
+ seg_hdr = packet_seg_step(&hdr);
+ pkt_hdrs[i] = seg_hdr;
+ }
+
+ /* The first remaining header is the new packet descriptor.
+ * Copy remaining segments from the last to-be-removed header
+ * to the new header. */
+ new_hdr = hdr;
- /* First remaining segment is the new packet descriptor */
- new_hdr = pkt_hdr->buf_hdr.seg[num].hdr;
+ new_hdr->seg_next = hdr->seg_next;
+ new_hdr->seg_count = num_remain;
- copy_num_segs(new_hdr, pkt_hdr, num, num_remain);
- packet_seg_copy_md(new_hdr, pkt_hdr);
+ _odp_packet_copy_md(new_hdr, pkt_hdr, 0);
/* Tailroom not changed */
new_hdr->tailroom = pkt_hdr->tailroom;
- new_hdr->headroom = seg_headroom(new_hdr, 0);
- new_hdr->frame_len = pkt_hdr->frame_len - free_len;
+
+ new_hdr->headroom = seg_headroom(new_hdr);
+
+ new_hdr->frame_len = pkt_hdr->frame_len - free_len;
pull_head(new_hdr, pull_len);
pkt_hdr = new_hdr;
- buffer_free_multi(buf, num);
+ packet_free_multi(pkt_hdrs, num);
} else {
- /* Free last 'num' bufs */
- free_bufs(pkt_hdr, num_remain, num);
+ /* Free last 'num' bufs.
+ * First, find the last remaining header. */
+ packet_seg_find_idx(&hdr, num_remain - 1);
+ last_hdr = hdr;
+
+ packet_seg_step(&hdr);
+
+ for (i = 0; i < num; i++) {
+ seg_hdr = packet_seg_step(&hdr);
+ pkt_hdrs[i] = seg_hdr;
+ }
+
+ packet_free_multi(pkt_hdrs, num);
/* Head segment remains, no need to copy or update majority
* of the metadata. */
- pkt_hdr->buf_hdr.segcount = num_remain;
+ last_hdr->seg_next = NULL;
+
+ pkt_hdr->seg_count = num_remain;
pkt_hdr->frame_len -= free_len;
- pkt_hdr->tailroom = seg_tailroom(pkt_hdr, num_remain - 1);
+ pkt_hdr->tailroom = seg_tailroom(pkt_hdr);
pull_tail(pkt_hdr, pull_len);
}
@@ -481,16 +579,17 @@ static inline odp_packet_hdr_t *free_segments(odp_packet_hdr_t *pkt_hdr,
}
static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt,
- int num_seg, odp_packet_t *pkt, int parse)
+ int num_seg, odp_packet_t *pkt)
{
int num_buf, i;
int num = max_pkt;
int max_buf = max_pkt * num_seg;
- odp_buffer_t buf[max_buf];
odp_packet_hdr_t *pkt_hdr[max_buf];
+ odp_packet_hdr_t *hdr_next;
+ odp_packet_hdr_t *hdr;
- num_buf = buffer_alloc_multi(pool, buf, (odp_buffer_hdr_t **)pkt_hdr,
- max_buf);
+ num_buf = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)pkt_hdr,
+ max_buf);
/* Failed to allocate all segments */
if (odp_unlikely(num_buf != max_buf)) {
@@ -499,55 +598,68 @@ static inline int packet_alloc(pool_t *pool, uint32_t len, int max_pkt,
num = num_buf / num_seg;
num_free = num_buf - (num * num_seg);
- if (num_free > 0)
- buffer_free_multi(&buf[num_buf - num_free], num_free);
+ if (num_free > 0) {
+ _odp_event_hdr_t **p;
+
+ p = (_odp_event_hdr_t **)&pkt_hdr[num_buf - num_free];
+ _odp_event_free_multi(p, num_free);
+ }
if (num == 0)
return 0;
}
- for (i = 0; i < num; i++) {
- odp_packet_hdr_t *hdr;
+ hdr_next = pkt_hdr[0];
+ odp_prefetch(hdr_next);
+ odp_prefetch((uint8_t *)hdr_next + ODP_CACHE_LINE_SIZE);
+
+ for (i = 0; i < num - 1; i++) {
+ hdr = hdr_next;
+ hdr_next = pkt_hdr[(i + 1) * num_seg];
+
+ odp_prefetch(hdr_next);
+ odp_prefetch((uint8_t *)hdr_next + ODP_CACHE_LINE_SIZE);
/* First buffer is the packet descriptor */
- hdr = pkt_hdr[i * num_seg];
pkt[i] = packet_handle(hdr);
init_segments(&pkt_hdr[i * num_seg], num_seg);
- packet_init(hdr, len, parse);
+ packet_init(hdr, len);
}
+ /* Last packet */
+ pkt[i] = packet_handle(hdr_next);
+ init_segments(&pkt_hdr[i * num_seg], num_seg);
+ packet_init(hdr_next, len);
+
return num;
}
-int packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
- odp_packet_t pkt[], int max_num)
+int _odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
+ odp_packet_t pkt[], int max_num)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
int num, num_seg;
- num_seg = num_segments(len);
- num = packet_alloc(pool, len, max_num, num_seg, pkt, 1);
+ num_seg = num_segments(len, pool->seg_len);
+ num = packet_alloc(pool, len, max_num, num_seg, pkt);
return num;
}
odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
odp_packet_t pkt;
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
- __odp_errno = EINVAL;
- return ODP_PACKET_INVALID;
- }
+ _ODP_ASSERT(pool->type == ODP_POOL_PACKET);
- if (odp_unlikely(len > pool->max_len))
+ if (odp_unlikely(len > pool->max_len || len == 0))
return ODP_PACKET_INVALID;
- num_seg = num_segments(len);
- num = packet_alloc(pool, len, 1, num_seg, &pkt, 0);
+ num_seg = num_segments(len, pool->seg_len);
+ num = packet_alloc(pool, len, 1, num_seg, &pkt);
if (odp_unlikely(num == 0))
return ODP_PACKET_INVALID;
@@ -558,19 +670,16 @@ odp_packet_t odp_packet_alloc(odp_pool_t pool_hdl, uint32_t len)
int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
odp_packet_t pkt[], int max_num)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
int num, num_seg;
- if (odp_unlikely(pool->params.type != ODP_POOL_PACKET)) {
- __odp_errno = EINVAL;
- return -1;
- }
+ _ODP_ASSERT(pool->type == ODP_POOL_PACKET);
- if (odp_unlikely(len > pool->max_len))
+ if (odp_unlikely(len > pool->max_len || len == 0))
return -1;
- num_seg = num_segments(len);
- num = packet_alloc(pool, len, max_num, num_seg, pkt, 0);
+ num_seg = num_segments(len, pool->seg_len);
+ num = packet_alloc(pool, len, max_num, num_seg, pkt);
return num;
}
@@ -578,89 +687,95 @@ int odp_packet_alloc_multi(odp_pool_t pool_hdl, uint32_t len,
void odp_packet_free(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- odp_buffer_t hdl = buffer_handle(pkt_hdr);
+ int num_seg = pkt_hdr->seg_count;
- int num_seg = pkt_hdr->buf_hdr.segcount;
+ _odp_packet_validate(pkt, _ODP_EV_PACKET_FREE);
- if (odp_likely(CONFIG_PACKET_MAX_SEGS == 1 || num_seg == 1))
- buffer_free_multi(&hdl, 1);
+ _ODP_ASSERT(segment_ref(pkt_hdr) > 0);
+
+ if (odp_likely(num_seg == 1))
+ packet_free_multi(&pkt_hdr, 1);
else
- free_bufs(pkt_hdr, 0, num_seg);
+ free_all_segments(pkt_hdr, num_seg);
}
-void odp_packet_free_multi(const odp_packet_t pkt[], int num)
+static inline void packet_free_multi_ev(const odp_packet_t pkt[], int num, _odp_ev_id_t id)
{
- if (CONFIG_PACKET_MAX_SEGS == 1) {
- buffer_free_multi((const odp_buffer_t * const)pkt, num);
- } else {
- odp_buffer_t buf[num * CONFIG_PACKET_MAX_SEGS];
- int i, j;
- int bufs = 0;
+ odp_packet_hdr_t *pkt_hdrs[num];
+ int i;
+ int num_freed = 0;
- for (i = 0; i < num; i++) {
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt[i]);
- int num_seg = pkt_hdr->buf_hdr.segcount;
- odp_buffer_hdr_t *buf_hdr = &pkt_hdr->buf_hdr;
+ _odp_packet_validate_multi(pkt, num, id);
- buf[bufs] = buffer_handle(pkt_hdr);
- bufs++;
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt[i]);
+ int num_seg = pkt_hdr->seg_count;
- if (odp_likely(num_seg == 1))
- continue;
+ _ODP_ASSERT(segment_ref(pkt_hdr) > 0);
- for (j = 1; j < num_seg; j++) {
- buf[bufs] = buffer_handle(buf_hdr->seg[j].hdr);
- bufs++;
- }
+ if (odp_unlikely(num_seg > 1)) {
+ free_all_segments(pkt_hdr, num_seg);
+ num_freed++;
+ continue;
}
- buffer_free_multi(buf, bufs);
+ pkt_hdrs[i - num_freed] = pkt_hdr;
}
+
+ if (odp_likely(num - num_freed))
+ packet_free_multi(pkt_hdrs, num - num_freed);
}
-int odp_packet_reset(odp_packet_t pkt, uint32_t len)
+void odp_packet_free_multi(const odp_packet_t pkt[], int num)
{
- odp_packet_hdr_t *const pkt_hdr = packet_hdr(pkt);
- pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
-
- if (len > pool->headroom + pool->data_size + pool->tailroom)
- return -1;
-
- packet_init(pkt_hdr, len, 0);
-
- return 0;
+ packet_free_multi_ev(pkt, num, _ODP_EV_PACKET_FREE_MULTI);
}
-odp_packet_t _odp_packet_from_buffer(odp_buffer_t buf)
+void odp_packet_free_sp(const odp_packet_t pkt[], int num)
{
- if (odp_unlikely(buf == ODP_BUFFER_INVALID))
- return ODP_PACKET_INVALID;
-
- return (odp_packet_t)buf_to_packet_hdr(buf);
+ packet_free_multi_ev(pkt, num, _ODP_EV_PACKET_FREE_SP);
}
-odp_buffer_t _odp_packet_to_buffer(odp_packet_t pkt)
+int odp_packet_reset(odp_packet_t pkt, uint32_t len)
{
- if (odp_unlikely(pkt == ODP_PACKET_INVALID))
- return ODP_BUFFER_INVALID;
+ odp_packet_hdr_t *const pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
+ int num = pkt_hdr->seg_count;
+ int num_req;
- return buffer_handle(packet_hdr(pkt));
-}
+ if (odp_unlikely(len > (pool->seg_len * num)) || len == 0)
+ return -1;
-odp_packet_t odp_packet_from_event(odp_event_t ev)
-{
- if (odp_unlikely(ev == ODP_EVENT_INVALID))
- return ODP_PACKET_INVALID;
+ /* Free possible extra segments */
+ num_req = num_segments(len, pool->seg_len);
+ if (odp_unlikely(num_req < num))
+ free_segments(pkt_hdr, num - num_req, 0, 0, 0);
+ reset_segments(pkt_hdr);
+
+ packet_init(pkt_hdr, len);
- return (odp_packet_t)buf_to_packet_hdr((odp_buffer_t)ev);
+ return 0;
}
-odp_event_t odp_packet_to_event(odp_packet_t pkt)
+int odp_event_filter_packet(const odp_event_t event[],
+ odp_packet_t packet[],
+ odp_event_t remain[], int num)
{
- if (odp_unlikely(pkt == ODP_PACKET_INVALID))
- return ODP_EVENT_INVALID;
+ int i;
+ int num_pkt = 0;
+ int num_rem = 0;
- return (odp_event_t)buffer_handle(packet_hdr(pkt));
+ for (i = 0; i < num; i++) {
+ if (odp_event_type(event[i]) == ODP_EVENT_PACKET) {
+ packet[num_pkt] = odp_packet_from_event(event[i]);
+ num_pkt++;
+ } else {
+ remain[num_rem] = event[i];
+ num_rem++;
+ }
+ }
+
+ return num_pkt;
}
/*
@@ -673,8 +788,9 @@ odp_event_t odp_packet_to_event(odp_packet_t pkt)
uint32_t odp_packet_buf_len(odp_packet_t pkt)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
- return pkt_hdr->buf_hdr.size * pkt_hdr->buf_hdr.segcount;
+ return pool->max_seg_len * pkt_hdr->seg_count;
}
void *odp_packet_tail(odp_packet_t pkt)
@@ -695,179 +811,6 @@ void *odp_packet_push_head(odp_packet_t pkt, uint32_t len)
return packet_data(pkt_hdr);
}
-static inline uint32_t pack_seg_head(odp_packet_hdr_t *pkt_hdr, int seg)
-{
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
- uint32_t len = pkt_hdr->buf_hdr.seg[seg].len;
- uint8_t *src = pkt_hdr->buf_hdr.seg[seg].data;
- uint8_t *dst = hdr->base_data;
-
- if (dst != src) {
- memmove(dst, src, len);
- pkt_hdr->buf_hdr.seg[seg].data = dst;
- }
-
- return len;
-}
-
-static inline uint32_t pack_seg_tail(odp_packet_hdr_t *pkt_hdr, int seg)
-{
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[seg].hdr;
- uint32_t len = pkt_hdr->buf_hdr.seg[seg].len;
- uint8_t *src = pkt_hdr->buf_hdr.seg[seg].data;
- uint8_t *dst = hdr->base_data + BASE_LEN - len;
-
- if (dst != src) {
- memmove(dst, src, len);
- pkt_hdr->buf_hdr.seg[seg].data = dst;
- }
-
- return len;
-}
-
-static inline uint32_t fill_seg_head(odp_packet_hdr_t *pkt_hdr, int dst_seg,
- int src_seg, uint32_t max_len)
-{
- uint32_t len = pkt_hdr->buf_hdr.seg[src_seg].len;
- uint8_t *src = pkt_hdr->buf_hdr.seg[src_seg].data;
- uint32_t offset = pkt_hdr->buf_hdr.seg[dst_seg].len;
- uint8_t *dst = pkt_hdr->buf_hdr.seg[dst_seg].data + offset;
-
- if (len > max_len)
- len = max_len;
-
- memmove(dst, src, len);
-
- pkt_hdr->buf_hdr.seg[dst_seg].len += len;
- pkt_hdr->buf_hdr.seg[src_seg].len -= len;
- pkt_hdr->buf_hdr.seg[src_seg].data += len;
-
- if (pkt_hdr->buf_hdr.seg[src_seg].len == 0) {
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[src_seg].hdr;
-
- pkt_hdr->buf_hdr.seg[src_seg].data = hdr->base_data;
- }
-
- return len;
-}
-
-static inline uint32_t fill_seg_tail(odp_packet_hdr_t *pkt_hdr, int dst_seg,
- int src_seg, uint32_t max_len)
-{
- uint32_t src_len = pkt_hdr->buf_hdr.seg[src_seg].len;
- uint8_t *src = pkt_hdr->buf_hdr.seg[src_seg].data;
- uint8_t *dst = pkt_hdr->buf_hdr.seg[dst_seg].data;
- uint32_t len = src_len;
-
- if (len > max_len)
- len = max_len;
-
- src += src_len - len;
- dst -= len;
-
- memmove(dst, src, len);
-
- pkt_hdr->buf_hdr.seg[dst_seg].data -= len;
- pkt_hdr->buf_hdr.seg[dst_seg].len += len;
- pkt_hdr->buf_hdr.seg[src_seg].len -= len;
-
- if (pkt_hdr->buf_hdr.seg[src_seg].len == 0) {
- odp_buffer_hdr_t *hdr = pkt_hdr->buf_hdr.seg[src_seg].hdr;
-
- pkt_hdr->buf_hdr.seg[src_seg].data = hdr->base_data;
- }
-
- return len;
-}
-
-static inline int move_data_to_head(odp_packet_hdr_t *pkt_hdr, int segs)
-{
- int dst_seg, src_seg;
- uint32_t len, free_len;
- uint32_t moved = 0;
-
- for (dst_seg = 0; dst_seg < segs; dst_seg++) {
- len = pack_seg_head(pkt_hdr, dst_seg);
- moved += len;
-
- if (len == BASE_LEN)
- continue;
-
- free_len = BASE_LEN - len;
-
- for (src_seg = dst_seg + 1; src_seg < segs; src_seg++) {
- len = fill_seg_head(pkt_hdr, dst_seg, src_seg,
- free_len);
- moved += len;
-
- if (len == free_len) {
- /* dst seg is full */
- break;
- }
-
- /* src seg is empty */
- free_len -= len;
- }
-
- if (moved == pkt_hdr->frame_len)
- break;
- }
-
- /* last segment which have data */
- return dst_seg;
-}
-
-static inline int move_data_to_tail(odp_packet_hdr_t *pkt_hdr, int segs)
-{
- int dst_seg, src_seg;
- uint32_t len, free_len;
- uint32_t moved = 0;
-
- for (dst_seg = segs - 1; dst_seg >= 0; dst_seg--) {
- len = pack_seg_tail(pkt_hdr, dst_seg);
- moved += len;
-
- if (len == BASE_LEN)
- continue;
-
- free_len = BASE_LEN - len;
-
- for (src_seg = dst_seg - 1; src_seg >= 0; src_seg--) {
- len = fill_seg_tail(pkt_hdr, dst_seg, src_seg,
- free_len);
- moved += len;
-
- if (len == free_len) {
- /* dst seg is full */
- break;
- }
-
- /* src seg is empty */
- free_len -= len;
- }
-
- if (moved == pkt_hdr->frame_len)
- break;
- }
-
- /* first segment which have data */
- return dst_seg;
-}
-
-static inline void reset_seg(odp_packet_hdr_t *pkt_hdr, int first, int num)
-{
- odp_buffer_hdr_t *hdr;
- void *base;
- int i;
-
- for (i = first; i < first + num; i++) {
- hdr = pkt_hdr->buf_hdr.seg[i].hdr;
- base = hdr->base_data;
- pkt_hdr->buf_hdr.seg[i].len = BASE_LEN;
- pkt_hdr->buf_hdr.seg[i].data = base;
- }
-}
-
int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len,
void **data_ptr, uint32_t *seg_len)
{
@@ -877,89 +820,28 @@ int odp_packet_extend_head(odp_packet_t *pkt, uint32_t len,
int ret = 0;
if (len > headroom) {
- pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
int num;
- int segs;
+ void *ptr;
if (odp_unlikely((frame_len + len) > pool->max_len))
return -1;
- num = num_segments(len - headroom);
- segs = pkt_hdr->buf_hdr.segcount;
-
- if (odp_unlikely((segs + num) > CONFIG_PACKET_MAX_SEGS)) {
- /* Cannot directly add new segments */
- odp_packet_hdr_t *new_hdr;
- int new_segs = 0;
- int free_segs = 0;
- uint32_t offset;
-
- num = num_segments(frame_len + len);
-
- if (num > segs) {
- /* Allocate additional segments */
- new_segs = num - segs;
- new_hdr = alloc_segments(pool, new_segs);
-
- if (new_hdr == NULL)
- return -1;
-
- } else if (num < segs) {
- free_segs = segs - num;
- }
-
- /* Pack all data to packet tail */
- move_data_to_tail(pkt_hdr, segs);
- reset_seg(pkt_hdr, 0, segs);
-
- if (new_segs) {
- add_all_segs(new_hdr, pkt_hdr);
- packet_seg_copy_md(new_hdr, pkt_hdr);
- segs += new_segs;
-
- pkt_hdr = new_hdr;
- *pkt = packet_handle(pkt_hdr);
- } else if (free_segs) {
- new_hdr = pkt_hdr->buf_hdr.seg[free_segs].hdr;
- packet_seg_copy_md(new_hdr, pkt_hdr);
-
- /* Free extra segs */
- free_bufs(pkt_hdr, 0, free_segs);
-
- segs -= free_segs;
- pkt_hdr = new_hdr;
- *pkt = packet_handle(pkt_hdr);
- }
-
- frame_len += len;
- offset = (segs * BASE_LEN) - frame_len;
-
- pkt_hdr->buf_hdr.seg[0].data += offset;
- pkt_hdr->buf_hdr.seg[0].len -= offset;
-
- pkt_hdr->buf_hdr.segcount = segs;
- pkt_hdr->frame_len = frame_len;
- pkt_hdr->headroom = offset + pool->headroom;
- pkt_hdr->tailroom = pool->tailroom;
-
- /* Data was moved */
- ret = 1;
- } else {
- void *ptr;
+ num = num_segments(len - headroom, pool->seg_len);
+ if (odp_unlikely(pkt_hdr->seg_count + num > PKT_MAX_SEGS))
+ return -1;
- push_head(pkt_hdr, headroom);
- ptr = add_segments(pkt_hdr, pool, len - headroom,
- num, 1);
+ push_head(pkt_hdr, headroom);
+ ptr = add_segments(pkt_hdr, pool, len - headroom, num, 1);
- if (ptr == NULL) {
- /* segment alloc failed, rollback changes */
- pull_head(pkt_hdr, headroom);
- return -1;
- }
-
- *pkt = packet_handle(ptr);
- pkt_hdr = ptr;
+ if (ptr == NULL) {
+ /* segment alloc failed, rollback changes */
+ pull_head(pkt_hdr, headroom);
+ return -1;
}
+
+ *pkt = packet_handle(ptr);
+ pkt_hdr = ptr;
} else {
push_head(pkt_hdr, len);
}
@@ -977,7 +859,7 @@ void *odp_packet_pull_head(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- if (len > pkt_hdr->frame_len)
+ if (len >= pkt_hdr->seg_len)
return NULL;
pull_head(pkt_hdr, len);
@@ -990,12 +872,12 @@ int odp_packet_trunc_head(odp_packet_t *pkt, uint32_t len,
odp_packet_hdr_t *pkt_hdr = packet_hdr(*pkt);
uint32_t seg_len = packet_first_seg_len(pkt_hdr);
- if (len > pkt_hdr->frame_len)
+ if (len >= pkt_hdr->frame_len)
return -1;
if (len < seg_len) {
pull_head(pkt_hdr, len);
- } else if (CONFIG_PACKET_MAX_SEGS != 1) {
+ } else {
int num = 0;
uint32_t pull_len = 0;
@@ -1027,6 +909,8 @@ void *odp_packet_push_tail(odp_packet_t pkt, uint32_t len)
if (len > pkt_hdr->tailroom)
return NULL;
+ _ODP_ASSERT(odp_packet_has_ref(pkt) == 0);
+
old_tail = packet_tail(pkt_hdr);
push_tail(pkt_hdr, len);
@@ -1042,78 +926,27 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len,
uint32_t tail_off = frame_len;
int ret = 0;
+ _ODP_ASSERT(odp_packet_has_ref(*pkt) == 0);
+
if (len > tailroom) {
- pool_t *pool = pool_entry_from_hdl(pkt_hdr->buf_hdr.pool_hdl);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
int num;
- int segs;
+ void *ptr;
if (odp_unlikely((frame_len + len) > pool->max_len))
return -1;
- num = num_segments(len - tailroom);
- segs = pkt_hdr->buf_hdr.segcount;
-
- if (odp_unlikely((segs + num) > CONFIG_PACKET_MAX_SEGS)) {
- /* Cannot directly add new segments */
- odp_packet_hdr_t *new_hdr;
- int new_segs = 0;
- int free_segs = 0;
- uint32_t offset;
-
- num = num_segments(frame_len + len);
-
- if (num > segs) {
- /* Allocate additional segments */
- new_segs = num - segs;
- new_hdr = alloc_segments(pool, new_segs);
-
- if (new_hdr == NULL)
- return -1;
-
- } else if (num < segs) {
- free_segs = segs - num;
- }
-
- /* Pack all data to packet head */
- move_data_to_head(pkt_hdr, segs);
- reset_seg(pkt_hdr, 0, segs);
-
- if (new_segs) {
- /* Add new segs */
- add_all_segs(pkt_hdr, new_hdr);
- segs += new_segs;
- } else if (free_segs) {
- /* Free extra segs */
- free_bufs(pkt_hdr, segs - free_segs, free_segs);
-
- segs -= free_segs;
- }
-
- frame_len += len;
- offset = (segs * BASE_LEN) - frame_len;
-
- pkt_hdr->buf_hdr.seg[segs - 1].len -= offset;
-
- pkt_hdr->buf_hdr.segcount = segs;
- pkt_hdr->frame_len = frame_len;
- pkt_hdr->headroom = pool->headroom;
- pkt_hdr->tailroom = offset + pool->tailroom;
-
- /* Data was moved */
- ret = 1;
- } else {
- void *ptr;
-
- push_tail(pkt_hdr, tailroom);
+ num = num_segments(len - tailroom, pool->seg_len);
+ if (odp_unlikely(pkt_hdr->seg_count + num > PKT_MAX_SEGS))
+ return -1;
- ptr = add_segments(pkt_hdr, pool, len - tailroom,
- num, 0);
+ push_tail(pkt_hdr, tailroom);
+ ptr = add_segments(pkt_hdr, pool, len - tailroom, num, 0);
- if (ptr == NULL) {
- /* segment alloc failed, rollback changes */
- pull_tail(pkt_hdr, tailroom);
- return -1;
- }
+ if (ptr == NULL) {
+ /* segment alloc failed, rollback changes */
+ pull_tail(pkt_hdr, tailroom);
+ return -1;
}
} else {
push_tail(pkt_hdr, len);
@@ -1128,8 +961,11 @@ int odp_packet_extend_tail(odp_packet_t *pkt, uint32_t len,
void *odp_packet_pull_tail(odp_packet_t pkt, uint32_t len)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ odp_packet_hdr_t *last_seg = packet_last_seg(pkt_hdr);
- if (len > packet_last_seg_len(pkt_hdr))
+ _ODP_ASSERT(odp_packet_has_ref(pkt) == 0);
+
+ if (len >= last_seg->seg_len)
return NULL;
pull_tail(pkt_hdr, len);
@@ -1142,20 +978,25 @@ int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len,
{
int last;
uint32_t seg_len;
+ odp_packet_hdr_t *last_seg;
odp_packet_hdr_t *pkt_hdr = packet_hdr(*pkt);
- if (len > pkt_hdr->frame_len)
+ if (len >= pkt_hdr->frame_len)
return -1;
- last = packet_last_seg(pkt_hdr);
- seg_len = packet_seg_len(pkt_hdr, last);
+ _ODP_ASSERT(odp_packet_has_ref(*pkt) == 0);
+
+ last = pkt_hdr->seg_count - 1;
+ last_seg = packet_last_seg(pkt_hdr);
+ seg_len = last_seg->seg_len;
if (len < seg_len) {
pull_tail(pkt_hdr, len);
- } else if (CONFIG_PACKET_MAX_SEGS != 1) {
+ } else {
int num = 0;
uint32_t pull_len = 0;
+ /* Reverse order */
while (seg_len <= len) {
pull_len = len - seg_len;
num++;
@@ -1176,12 +1017,9 @@ int odp_packet_trunc_tail(odp_packet_t *pkt, uint32_t len,
void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len,
odp_packet_seg_t *seg)
{
- int seg_idx;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- void *addr = packet_map(pkt_hdr, offset, len, &seg_idx);
- if (addr != NULL && seg != NULL)
- *seg = _odp_packet_seg_from_ndx(seg_idx);
+ void *addr = packet_map(pkt_hdr, offset, len, seg);
return addr;
}
@@ -1193,105 +1031,11 @@ void *odp_packet_offset(odp_packet_t pkt, uint32_t offset, uint32_t *len,
*
*/
-int odp_packet_input_index(odp_packet_t pkt)
-{
- return odp_pktio_index(packet_hdr(pkt)->input);
-}
-
-void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ctx)
-{
- packet_hdr(pkt)->buf_hdr.buf_cctx = ctx;
-}
-
-void *odp_packet_l2_ptr(odp_packet_t pkt, uint32_t *len)
+uint16_t odp_packet_ones_comp(odp_packet_t pkt, odp_packet_data_range_t *range)
{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (!packet_hdr_has_l2(pkt_hdr))
- return NULL;
- return packet_map(pkt_hdr, pkt_hdr->p.l2_offset, len, NULL);
-}
-
-uint32_t odp_packet_l2_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (!packet_hdr_has_l2(pkt_hdr))
- return ODP_PACKET_OFFSET_INVALID;
- return pkt_hdr->p.l2_offset;
-}
-
-int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (offset >= pkt_hdr->frame_len)
- return -1;
-
- packet_hdr_has_l2_set(pkt_hdr, 1);
- pkt_hdr->p.l2_offset = offset;
- return 0;
-}
-
-void *odp_packet_l3_ptr(odp_packet_t pkt, uint32_t *len)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- return packet_map(pkt_hdr, pkt_hdr->p.l3_offset, len, NULL);
-}
-
-uint32_t odp_packet_l3_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- return pkt_hdr->p.l3_offset;
-}
-
-int odp_packet_l3_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (offset >= pkt_hdr->frame_len)
- return -1;
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
- pkt_hdr->p.l3_offset = offset;
- return 0;
-}
-
-void *odp_packet_l4_ptr(odp_packet_t pkt, uint32_t *len)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- return packet_map(pkt_hdr, pkt_hdr->p.l4_offset, len, NULL);
-}
-
-uint32_t odp_packet_l4_offset(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- return pkt_hdr->p.l4_offset;
-}
-
-int odp_packet_l4_offset_set(odp_packet_t pkt, uint32_t offset)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (offset >= pkt_hdr->frame_len)
- return -1;
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
- pkt_hdr->p.l4_offset = offset;
+ (void)pkt;
+ range->length = 0;
+ range->offset = 0;
return 0;
}
@@ -1299,16 +1043,7 @@ void odp_packet_flow_hash_set(odp_packet_t pkt, uint32_t flow_hash)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- pkt_hdr->flow_hash = flow_hash;
- pkt_hdr->p.input_flags.flow_hash = 1;
-}
-
-void odp_packet_ts_set(odp_packet_t pkt, odp_time_t timestamp)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- pkt_hdr->timestamp = timestamp;
- pkt_hdr->p.input_flags.timestamp = 1;
+ packet_set_flow_hash(pkt_hdr, flow_hash);
}
/*
@@ -1318,26 +1053,9 @@ void odp_packet_ts_set(odp_packet_t pkt, odp_time_t timestamp)
*
*/
-void *odp_packet_seg_data(odp_packet_t pkt, odp_packet_seg_t seg)
+odp_packet_seg_t odp_packet_last_seg(odp_packet_t pkt)
{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (odp_unlikely(_odp_packet_seg_to_ndx(seg) >=
- pkt_hdr->buf_hdr.segcount))
- return NULL;
-
- return packet_seg_data(pkt_hdr, _odp_packet_seg_to_ndx(seg));
-}
-
-uint32_t odp_packet_seg_data_len(odp_packet_t pkt, odp_packet_seg_t seg)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (odp_unlikely(_odp_packet_seg_to_ndx(seg) >=
- pkt_hdr->buf_hdr.segcount))
- return 0;
-
- return packet_seg_len(pkt_hdr, _odp_packet_seg_to_ndx(seg));
+ return (odp_packet_seg_t)packet_last_seg(packet_hdr(pkt));
}
/*
@@ -1352,12 +1070,13 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = pkt_hdr->frame_len;
+ odp_pool_t pool = pkt_hdr->event_hdr.pool;
odp_packet_t newpkt;
if (offset > pktlen)
return -1;
- newpkt = odp_packet_alloc(pkt_hdr->buf_hdr.pool_hdl, pktlen + len);
+ newpkt = odp_packet_alloc(pool, pktlen + len);
if (newpkt == ODP_PACKET_INVALID)
return -1;
@@ -1369,7 +1088,7 @@ int odp_packet_add_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
return -1;
}
- _odp_packet_copy_md_to_packet(pkt, newpkt);
+ _odp_packet_copy_md(packet_hdr(newpkt), pkt_hdr, 0);
odp_packet_free(pkt);
*pkt_ptr = newpkt;
@@ -1381,12 +1100,13 @@ int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
odp_packet_t pkt = *pkt_ptr;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
uint32_t pktlen = pkt_hdr->frame_len;
+ odp_pool_t pool = pkt_hdr->event_hdr.pool;
odp_packet_t newpkt;
- if (offset > pktlen || offset + len > pktlen)
+ if (offset + len >= pktlen)
return -1;
- newpkt = odp_packet_alloc(pkt_hdr->buf_hdr.pool_hdl, pktlen - len);
+ newpkt = odp_packet_alloc(pool, pktlen - len);
if (newpkt == ODP_PACKET_INVALID)
return -1;
@@ -1398,7 +1118,7 @@ int odp_packet_rem_data(odp_packet_t *pkt_ptr, uint32_t offset, uint32_t len)
return -1;
}
- _odp_packet_copy_md_to_packet(pkt, newpkt);
+ _odp_packet_copy_md(packet_hdr(newpkt), pkt_hdr, 0);
odp_packet_free(pkt);
*pkt_ptr = newpkt;
@@ -1412,6 +1132,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
uint32_t shift;
uint32_t seglen = 0; /* GCC */
odp_packet_hdr_t *pkt_hdr = packet_hdr(*pkt);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
void *addr = packet_map(pkt_hdr, offset, &seglen, NULL);
uint64_t uaddr = (uint64_t)(uintptr_t)addr;
uint64_t misalign;
@@ -1419,19 +1140,21 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
if (align > ODP_CACHE_LINE_SIZE)
return -1;
+ _ODP_ASSERT(odp_packet_has_ref(*pkt) == 0);
+
if (seglen >= len) {
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign == 0)
return 0;
shift = align - misalign;
} else {
- if (len > pkt_hdr->buf_hdr.size)
+ if (len > pool->max_seg_len)
return -1;
shift = len - seglen;
uaddr -= shift;
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign)
shift += align - misalign;
}
@@ -1441,7 +1164,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
return rc;
(void)odp_packet_move_data(*pkt, 0, shift,
- _odp_packet_len(*pkt) - shift);
+ odp_packet_len(*pkt) - shift);
(void)odp_packet_trunc_tail(pkt, shift, NULL, NULL);
return 1;
@@ -1451,17 +1174,20 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
{
odp_packet_hdr_t *dst_hdr = packet_hdr(*dst);
odp_packet_hdr_t *src_hdr = packet_hdr(src);
- int dst_segs = dst_hdr->buf_hdr.segcount;
- int src_segs = src_hdr->buf_hdr.segcount;
- odp_pool_t dst_pool = dst_hdr->buf_hdr.pool_hdl;
- odp_pool_t src_pool = src_hdr->buf_hdr.pool_hdl;
- uint32_t dst_len = dst_hdr->frame_len;
- uint32_t src_len = src_hdr->frame_len;
-
- /* Do a copy if resulting packet would be out of segments or packets
- * are from different pools. */
- if (odp_unlikely((dst_segs + src_segs) > CONFIG_PACKET_MAX_SEGS) ||
- odp_unlikely(dst_pool != src_pool)) {
+ pool_t *dst_pool = _odp_pool_entry(dst_hdr->event_hdr.pool);
+ pool_t *src_pool = _odp_pool_entry(src_hdr->event_hdr.pool);
+ uint32_t dst_len = dst_hdr->frame_len;
+ uint32_t src_len = src_hdr->frame_len;
+
+ _ODP_ASSERT(odp_packet_has_ref(*dst) == 0);
+
+ if (odp_unlikely(dst_len + src_len > dst_pool->max_len)) {
+ _ODP_ERR("concat would result oversized packet\n");
+ return -1;
+ }
+
+ /* Do a copy if packets are from different pools. */
+ if (odp_unlikely(dst_pool != src_pool)) {
if (odp_packet_extend_tail(dst, src_len, NULL, NULL) >= 0) {
(void)odp_packet_copy_from_pkt(*dst, dst_len,
src, 0, src_len);
@@ -1474,6 +1200,10 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
return -1;
}
+ if (odp_unlikely(dst_hdr->seg_count + src_hdr->seg_count >
+ PKT_MAX_SEGS))
+ return -1;
+
add_all_segs(dst_hdr, src_hdr);
dst_hdr->frame_len = dst_len + src_len;
@@ -1485,11 +1215,13 @@ int odp_packet_concat(odp_packet_t *dst, odp_packet_t src)
int odp_packet_split(odp_packet_t *pkt, uint32_t len, odp_packet_t *tail)
{
- uint32_t pktlen = _odp_packet_len(*pkt);
+ uint32_t pktlen = odp_packet_len(*pkt);
if (len >= pktlen || tail == NULL)
return -1;
+ _ODP_ASSERT(odp_packet_has_ref(*pkt) == 0);
+
*tail = odp_packet_copy_part(*pkt, len, pktlen - len,
odp_packet_pool(*pkt));
@@ -1510,23 +1242,33 @@ odp_packet_t odp_packet_copy(odp_packet_t pkt, odp_pool_t pool)
{
odp_packet_hdr_t *srchdr = packet_hdr(pkt);
uint32_t pktlen = srchdr->frame_len;
- odp_packet_t newpkt = odp_packet_alloc(pool, pktlen);
+ odp_packet_t newpkt;
+ int md_copy;
- if (newpkt != ODP_PACKET_INVALID) {
- if (_odp_packet_copy_md_to_packet(pkt, newpkt) ||
- odp_packet_copy_from_pkt(newpkt, 0, pkt, 0, pktlen)) {
- odp_packet_free(newpkt);
- newpkt = ODP_PACKET_INVALID;
- }
+ md_copy = _odp_packet_copy_md_possible(pool, odp_packet_pool(pkt));
+ if (odp_unlikely(md_copy < 0)) {
+ _ODP_ERR("Unable to copy packet metadata\n");
+ return ODP_PACKET_INVALID;
}
+ newpkt = odp_packet_alloc(pool, pktlen);
+ if (odp_unlikely(newpkt == ODP_PACKET_INVALID))
+ return ODP_PACKET_INVALID;
+
+ if (odp_unlikely(odp_packet_copy_from_pkt(newpkt, 0, pkt, 0, pktlen))) {
+ odp_packet_free(newpkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ _odp_packet_copy_md(packet_hdr(newpkt), srchdr, 1);
+
return newpkt;
}
odp_packet_t odp_packet_copy_part(odp_packet_t pkt, uint32_t offset,
uint32_t len, odp_pool_t pool)
{
- uint32_t pktlen = _odp_packet_len(pkt);
+ uint32_t pktlen = odp_packet_len(pkt);
odp_packet_t newpkt;
if (offset >= pktlen || offset + len > pktlen)
@@ -1539,54 +1281,6 @@ odp_packet_t odp_packet_copy_part(odp_packet_t pkt, uint32_t offset,
return newpkt;
}
-int odp_packet_copy_to_mem(odp_packet_t pkt, uint32_t offset,
- uint32_t len, void *dst)
-{
- void *mapaddr;
- uint32_t seglen = 0; /* GCC */
- uint32_t cpylen;
- uint8_t *dstaddr = (uint8_t *)dst;
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (offset + len > pkt_hdr->frame_len)
- return -1;
-
- while (len > 0) {
- mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
- cpylen = len > seglen ? seglen : len;
- memcpy(dstaddr, mapaddr, cpylen);
- offset += cpylen;
- dstaddr += cpylen;
- len -= cpylen;
- }
-
- return 0;
-}
-
-int odp_packet_copy_from_mem(odp_packet_t pkt, uint32_t offset,
- uint32_t len, const void *src)
-{
- void *mapaddr;
- uint32_t seglen = 0; /* GCC */
- uint32_t cpylen;
- const uint8_t *srcaddr = (const uint8_t *)src;
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- if (offset + len > pkt_hdr->frame_len)
- return -1;
-
- while (len > 0) {
- mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
- cpylen = len > seglen ? seglen : len;
- memcpy(mapaddr, srcaddr, cpylen);
- offset += cpylen;
- srcaddr += cpylen;
- len -= cpylen;
- }
-
- return 0;
-}
-
int odp_packet_copy_from_pkt(odp_packet_t dst, uint32_t dst_offset,
odp_packet_t src, uint32_t src_offset,
uint32_t len)
@@ -1655,75 +1349,264 @@ int odp_packet_move_data(odp_packet_t pkt, uint32_t dst_offset,
pkt, src_offset, len);
}
+int _odp_packet_set_data(odp_packet_t pkt, uint32_t offset,
+ uint8_t c, uint32_t len)
+{
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t setlen;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (offset + len > pkt_hdr->frame_len)
+ return -1;
+
+ while (len > 0) {
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+ setlen = len > seglen ? seglen : len;
+ memset(mapaddr, c, setlen);
+ offset += setlen;
+ len -= setlen;
+ }
+
+ return 0;
+}
+
+int _odp_packet_cmp_data(odp_packet_t pkt, uint32_t offset,
+ const void *s, uint32_t len)
+{
+ const uint8_t *ptr = s;
+ void *mapaddr;
+ uint32_t seglen = 0; /* GCC */
+ uint32_t cmplen;
+ int ret;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ _ODP_ASSERT(offset + len <= pkt_hdr->frame_len);
+
+ while (len > 0) {
+ mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+ cmplen = len > seglen ? seglen : len;
+ ret = memcmp(mapaddr, ptr, cmplen);
+ if (ret != 0)
+ return ret;
+ offset += cmplen;
+ len -= cmplen;
+ ptr += cmplen;
+ }
+
+ return 0;
+}
+
/*
*
* Debugging
* ********************************************************
*
*/
+static int packet_print_input_flags(odp_packet_hdr_t *hdr, char *str, int max)
+{
+ int len = 0;
+
+ if (hdr->p.input_flags.l2)
+ len += _odp_snprint(&str[len], max - len, "l2 ");
+ if (hdr->p.input_flags.l3)
+ len += _odp_snprint(&str[len], max - len, "l3 ");
+ if (hdr->p.input_flags.l4)
+ len += _odp_snprint(&str[len], max - len, "l4 ");
+ if (hdr->p.input_flags.eth)
+ len += _odp_snprint(&str[len], max - len, "eth ");
+ if (hdr->p.input_flags.vlan)
+ len += _odp_snprint(&str[len], max - len, "vlan ");
+ if (hdr->p.input_flags.arp)
+ len += _odp_snprint(&str[len], max - len, "arp ");
+ if (hdr->p.input_flags.ipv4)
+ len += _odp_snprint(&str[len], max - len, "ipv4 ");
+ if (hdr->p.input_flags.ipv6)
+ len += _odp_snprint(&str[len], max - len, "ipv6 ");
+ if (hdr->p.input_flags.ipsec)
+ len += _odp_snprint(&str[len], max - len, "ipsec ");
+ if (hdr->p.input_flags.udp)
+ len += _odp_snprint(&str[len], max - len, "udp ");
+ if (hdr->p.input_flags.tcp)
+ len += _odp_snprint(&str[len], max - len, "tcp ");
+ if (hdr->p.input_flags.sctp)
+ len += _odp_snprint(&str[len], max - len, "sctp ");
+ if (hdr->p.input_flags.icmp)
+ len += _odp_snprint(&str[len], max - len, "icmp ");
+
+ return len;
+}
void odp_packet_print(odp_packet_t pkt)
{
odp_packet_seg_t seg;
- int max_len = 512;
+ int max_len = 4096;
char str[max_len];
int len = 0;
int n = max_len - 1;
odp_packet_hdr_t *hdr = packet_hdr(pkt);
- odp_buffer_t buf = _odp_packet_to_buffer(pkt);
-
- len += snprintf(&str[len], n - len, "Packet ");
- len += odp_buffer_snprint(&str[len], n - len, buf);
- len += snprintf(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
- hdr->p.input_flags.all);
- len += snprintf(&str[len], n - len, " error_flags 0x%" PRIx32 "\n",
- hdr->p.error_flags.all);
- len += snprintf(&str[len], n - len,
- " output_flags 0x%" PRIx32 "\n",
- hdr->p.output_flags.all);
- len += snprintf(&str[len], n - len,
- " l2_offset %" PRIu32 "\n", hdr->p.l2_offset);
- len += snprintf(&str[len], n - len,
- " l3_offset %" PRIu32 "\n", hdr->p.l3_offset);
- len += snprintf(&str[len], n - len,
- " l4_offset %" PRIu32 "\n", hdr->p.l4_offset);
- len += snprintf(&str[len], n - len,
- " frame_len %" PRIu32 "\n", hdr->frame_len);
- len += snprintf(&str[len], n - len,
- " input %" PRIu64 "\n",
- odp_pktio_to_u64(hdr->input));
- len += snprintf(&str[len], n - len,
- " headroom %" PRIu32 "\n",
- odp_packet_headroom(pkt));
- len += snprintf(&str[len], n - len,
- " tailroom %" PRIu32 "\n",
- odp_packet_tailroom(pkt));
- len += snprintf(&str[len], n - len,
- " num_segs %i\n", odp_packet_num_segs(pkt));
+
+ len += _odp_snprint(&str[len], n - len, "Packet info\n");
+ len += _odp_snprint(&str[len], n - len, "-----------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_packet_to_u64(pkt));
+ len += _odp_snprint(&str[len], n - len, " pool index %u\n", hdr->event_hdr.index.pool);
+ len += _odp_snprint(&str[len], n - len, " buf index %u\n",
+ hdr->event_hdr.index.event);
+ len += _odp_snprint(&str[len], n - len, " ev subtype %i\n", hdr->event_hdr.subtype);
+ len += _odp_snprint(&str[len], n - len, " input_flags 0x%" PRIx64 "\n",
+ hdr->p.input_flags.all);
+ if (hdr->p.input_flags.all) {
+ len += _odp_snprint(&str[len], n - len, " ");
+ len += packet_print_input_flags(hdr, &str[len], n - len);
+ len += _odp_snprint(&str[len], n - len, "\n");
+ }
+ len += _odp_snprint(&str[len], n - len,
+ " flags 0x%" PRIx32 "\n", hdr->p.flags.all_flags);
+ len += _odp_snprint(&str[len], n - len,
+ " cls_mark %" PRIu64 "\n", odp_packet_cls_mark(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " user ptr %p\n", hdr->user_ptr);
+ len += _odp_snprint(&str[len], n - len,
+ " user area %p\n", hdr->uarea_addr);
+ len += _odp_snprint(&str[len], n - len,
+ " l2_offset %" PRIu32 "\n", hdr->p.l2_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " l3_offset %" PRIu32 "\n", hdr->p.l3_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " l4_offset %" PRIu32 "\n", hdr->p.l4_offset);
+ len += _odp_snprint(&str[len], n - len,
+ " frame_len %" PRIu32 "\n", hdr->frame_len);
+ len += _odp_snprint(&str[len], n - len,
+ " input %" PRIu64 "\n", odp_pktio_to_u64(hdr->input));
+ len += _odp_snprint(&str[len], n - len,
+ " headroom %" PRIu32 "\n", odp_packet_headroom(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " tailroom %" PRIu32 "\n", odp_packet_tailroom(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " num_segs %i\n", odp_packet_num_segs(pkt));
seg = odp_packet_first_seg(pkt);
- while (seg != ODP_PACKET_SEG_INVALID) {
- len += snprintf(&str[len], n - len,
- " seg_len %" PRIu32 "\n",
- odp_packet_seg_data_len(pkt, seg));
+ for (int seg_idx = 0; seg != ODP_PACKET_SEG_INVALID; seg_idx++) {
+ odp_packet_hdr_t *seg_hdr = packet_seg_to_hdr(seg);
+ char seg_str[max_len];
+ int str_len;
+
+ str_len = _odp_snprint(&seg_str[0], max_len,
+ " [%d] seg_len %-4" PRIu32 " seg_data %p ref_cnt %u\n",
+ seg_idx, odp_packet_seg_data_len(pkt, seg),
+ odp_packet_seg_data(pkt, seg), segment_ref(seg_hdr));
+
+ /* Prevent print buffer overflow */
+ if (n - len - str_len < 10) {
+ len += _odp_snprint(&str[len], n - len, " ...\n");
+ break;
+ }
+ len += _odp_snprint(&str[len], n - len, "%s", seg_str);
seg = odp_packet_next_seg(pkt, seg);
}
- str[len] = '\0';
+ _ODP_PRINT("%s\n", str);
+}
+
+void odp_packet_print_data(odp_packet_t pkt, uint32_t offset,
+ uint32_t byte_len)
+{
+ odp_packet_hdr_t *hdr = packet_hdr(pkt);
+ uint32_t bytes_per_row = 16;
+ int num_rows = (byte_len + bytes_per_row - 1) / bytes_per_row;
+ int max_len = 256 + (3 * byte_len) + (3 * num_rows);
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+ uint32_t data_len = odp_packet_len(pkt);
+ pool_t *pool = _odp_pool_entry(hdr->event_hdr.pool);
+
+ len += _odp_snprint(&str[len], n - len, "Packet data\n");
+ len += _odp_snprint(&str[len], n - len, "-----------\n");
+ len += _odp_snprint(&str[len], n - len,
+ " handle 0x%" PRIx64 "\n", odp_packet_to_u64(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " pool index %" PRIu32 "\n", pool->pool_idx);
+ len += _odp_snprint(&str[len], n - len,
+ " buf index %" PRIu32 "\n", hdr->event_hdr.index.event);
+ len += _odp_snprint(&str[len], n - len,
+ " seg_count %" PRIu16 "\n", hdr->seg_count);
+ len += _odp_snprint(&str[len], n - len,
+ " data len %" PRIu32 "\n", data_len);
+ len += _odp_snprint(&str[len], n - len,
+ " data ptr %p\n", odp_packet_data(pkt));
+ len += _odp_snprint(&str[len], n - len,
+ " print offset %" PRIu32 "\n", offset);
+ len += _odp_snprint(&str[len], n - len,
+ " print length %" PRIu32 "\n", byte_len);
+
+ if (offset + byte_len > data_len) {
+ len += _odp_snprint(&str[len], n - len, " BAD OFFSET OR LEN\n");
+ _ODP_PRINT("%s\n", str);
+ return;
+ }
+
+ while (byte_len) {
+ uint32_t copy_len;
+ uint8_t data[bytes_per_row];
+ uint32_t i;
+
+ if (byte_len > bytes_per_row)
+ copy_len = bytes_per_row;
+ else
+ copy_len = byte_len;
+
+ odp_packet_copy_to_mem(pkt, offset, copy_len, data);
+
+ len += _odp_snprint(&str[len], n - len, " ");
+
+ for (i = 0; i < copy_len; i++)
+ len += _odp_snprint(&str[len], n - len, " %02x", data[i]);
+
+ len += _odp_snprint(&str[len], n - len, "\n");
- ODP_PRINT("\n%s\n", str);
+ byte_len -= copy_len;
+ offset += copy_len;
+ }
+
+ _ODP_PRINT("%s\n", str);
}
int odp_packet_is_valid(odp_packet_t pkt)
{
- if (odp_buffer_is_valid(_odp_packet_to_buffer(pkt)) == 0)
+ odp_event_t ev;
+
+ if (pkt == ODP_PACKET_INVALID)
+ return 0;
+
+ ev = odp_packet_to_event(pkt);
+
+ if (_odp_event_is_valid(ev) == 0)
return 0;
- if (odp_event_type(odp_packet_to_event(pkt)) != ODP_EVENT_PACKET)
+ if (odp_event_type(ev) != ODP_EVENT_PACKET)
return 0;
+ if (odp_unlikely(_odp_packet_validate(pkt, _ODP_EV_PACKET_IS_VALID)))
+ return 0;
+
+ switch (odp_event_subtype(ev)) {
+ case ODP_EVENT_PACKET_BASIC:
+ /* Fall through */
+ case ODP_EVENT_PACKET_COMP:
+ /* Fall through */
+ case ODP_EVENT_PACKET_CRYPTO:
+ /* Fall through */
+ case ODP_EVENT_PACKET_IPSEC:
+ /* Fall through */
+ break;
+ default:
+ return 0;
+ }
+
return 1;
}
@@ -1734,406 +1617,465 @@ int odp_packet_is_valid(odp_packet_t pkt)
*
*/
-int _odp_packet_copy_md_to_packet(odp_packet_t srcpkt, odp_packet_t dstpkt)
+static uint64_t packet_sum_partial(odp_packet_hdr_t *pkt_hdr,
+ uint32_t l3_offset,
+ uint32_t offset,
+ uint32_t len)
{
- odp_packet_hdr_t *srchdr = packet_hdr(srcpkt);
- odp_packet_hdr_t *dsthdr = packet_hdr(dstpkt);
+ uint64_t sum = 0;
- dsthdr->input = srchdr->input;
- dsthdr->dst_queue = srchdr->dst_queue;
- dsthdr->buf_hdr.buf_u64 = srchdr->buf_hdr.buf_u64;
- if (dsthdr->buf_hdr.uarea_addr != NULL &&
- srchdr->buf_hdr.uarea_addr != NULL)
- memcpy(dsthdr->buf_hdr.uarea_addr,
- srchdr->buf_hdr.uarea_addr,
- dsthdr->buf_hdr.uarea_size <=
- srchdr->buf_hdr.uarea_size ?
- dsthdr->buf_hdr.uarea_size :
- srchdr->buf_hdr.uarea_size);
+ if (offset + len > pkt_hdr->frame_len)
+ return 0;
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
- copy_packet_parser_metadata(srchdr, dsthdr);
+ if (seglen > len)
+ seglen = len;
- /* Metadata copied, but return indication of whether the packet
- * user area was truncated in the process. Note this can only
- * happen when copying between different pools.
- */
- return dsthdr->buf_hdr.uarea_size < srchdr->buf_hdr.uarea_size;
+ sum += chksum_partial(mapaddr, seglen, offset - l3_offset);
+ len -= seglen;
+ offset += seglen;
+ }
+
+ return sum;
}
-/**
- * Parser helper function for IPv4
- */
-static inline uint8_t parse_ipv4(packet_parser_t *prs, const uint8_t **parseptr,
- uint32_t *offset, uint32_t frame_len)
+static inline uint16_t packet_sum(odp_packet_hdr_t *pkt_hdr,
+ uint32_t l3_offset,
+ uint32_t offset,
+ uint32_t len,
+ uint64_t sum)
{
- const _odp_ipv4hdr_t *ipv4 = (const _odp_ipv4hdr_t *)*parseptr;
- uint8_t ver = _ODP_IPV4HDR_VER(ipv4->ver_ihl);
- uint8_t ihl = _ODP_IPV4HDR_IHL(ipv4->ver_ihl);
- uint16_t frag_offset;
- uint32_t dstaddr = odp_be_to_cpu_32(ipv4->dst_addr);
+ sum += packet_sum_partial(pkt_hdr, l3_offset, offset, len);
+ return chksum_finalize(sum);
+}
- prs->l3_len = odp_be_to_cpu_16(ipv4->tot_len);
+static uint32_t packet_sum_crc32c(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset,
+ uint32_t len,
+ uint32_t init_val)
+{
+ uint32_t sum = init_val;
- if (odp_unlikely(ihl < _ODP_IPV4HDR_IHL_MIN) ||
- odp_unlikely(ver != 4) ||
- (prs->l3_len > frame_len - *offset)) {
- prs->error_flags.ip_err = 1;
- return 0;
+ if (offset + len > pkt_hdr->frame_len)
+ return sum;
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+
+ if (seglen > len)
+ seglen = len;
+
+ sum = odp_hash_crc32c(mapaddr, seglen, sum);
+ len -= seglen;
+ offset += seglen;
}
- *offset += ihl * 4;
- *parseptr += ihl * 4;
+ return sum;
+}
- if (odp_unlikely(ihl > _ODP_IPV4HDR_IHL_MIN))
- prs->input_flags.ipopt = 1;
+static inline int packet_ipv4_chksum(odp_packet_t pkt,
+ uint32_t offset,
+ _odp_ipv4hdr_t *ip,
+ odp_u16sum_t *chksum)
+{
+ unsigned int nleft = _ODP_IPV4HDR_IHL(ip->ver_ihl) * 4;
+ uint16_t buf[nleft / 2];
+ int res;
- /* A packet is a fragment if:
- * "more fragments" flag is set (all fragments except the last)
- * OR
- * "fragment offset" field is nonzero (all fragments except the first)
- */
- frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
- if (odp_unlikely(_ODP_IPV4HDR_IS_FRAGMENT(frag_offset)))
- prs->input_flags.ipfrag = 1;
+ if (odp_unlikely(nleft < sizeof(*ip)))
+ return -1;
+ ip->chksum = 0;
+ memcpy(buf, ip, sizeof(*ip));
+ res = odp_packet_copy_to_mem(pkt, offset + sizeof(*ip),
+ nleft - sizeof(*ip),
+ buf + sizeof(*ip) / 2);
+ if (odp_unlikely(res < 0))
+ return res;
- /* Handle IPv4 broadcast / multicast */
- prs->input_flags.ip_bcast = (dstaddr == 0xffffffff);
- prs->input_flags.ip_mcast = (dstaddr >> 28) == 0xd;
+ *chksum = ~chksum_finalize(chksum_partial(buf, nleft, 0));
- return ipv4->proto;
+ return 0;
}
+#define _ODP_IPV4HDR_CSUM_OFFSET ODP_OFFSETOF(_odp_ipv4hdr_t, chksum)
+#define _ODP_IPV4ADDR_OFFSSET ODP_OFFSETOF(_odp_ipv4hdr_t, src_addr)
+#define _ODP_IPV6ADDR_OFFSSET ODP_OFFSETOF(_odp_ipv6hdr_t, src_addr)
+#define _ODP_IPV4HDR_CSUM_OFFSET ODP_OFFSETOF(_odp_ipv4hdr_t, chksum)
+#define _ODP_UDP_LEN_OFFSET ODP_OFFSETOF(_odp_udphdr_t, length)
+#define _ODP_UDP_CSUM_OFFSET ODP_OFFSETOF(_odp_udphdr_t, chksum)
+
/**
- * Parser helper function for IPv6
+ * Calculate and fill in IPv4 checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-static inline uint8_t parse_ipv6(packet_parser_t *prs, const uint8_t **parseptr,
- uint32_t *offset, uint32_t frame_len,
- uint32_t seg_len)
+int _odp_packet_ipv4_chksum_insert(odp_packet_t pkt)
{
- const _odp_ipv6hdr_t *ipv6 = (const _odp_ipv6hdr_t *)*parseptr;
- const _odp_ipv6hdr_ext_t *ipv6ext;
- uint32_t dstaddr0 = odp_be_to_cpu_32(ipv6->dst_addr.u8[0]);
-
- prs->l3_len = odp_be_to_cpu_16(ipv6->payload_len) +
- _ODP_IPV6HDR_LEN;
-
- /* Basic sanity checks on IPv6 header */
- if ((odp_be_to_cpu_32(ipv6->ver_tc_flow) >> 28) != 6 ||
- prs->l3_len > frame_len - *offset) {
- prs->error_flags.ip_err = 1;
- return 0;
- }
+ uint32_t offset;
+ _odp_ipv4hdr_t ip;
+ odp_u16sum_t chksum;
+ int res;
- /* IPv6 broadcast / multicast flags */
- prs->input_flags.ip_mcast = (dstaddr0 & 0xff000000) == 0xff000000;
- prs->input_flags.ip_bcast = 0;
+ offset = odp_packet_l3_offset(pkt);
+ if (offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- /* Skip past IPv6 header */
- *offset += sizeof(_odp_ipv6hdr_t);
- *parseptr += sizeof(_odp_ipv6hdr_t);
+ res = odp_packet_copy_to_mem(pkt, offset, sizeof(ip), &ip);
+ if (odp_unlikely(res < 0))
+ return res;
- /* Skip past any IPv6 extension headers */
- if (ipv6->next_hdr == _ODP_IPPROTO_HOPOPTS ||
- ipv6->next_hdr == _ODP_IPPROTO_ROUTE) {
- prs->input_flags.ipopt = 1;
+ res = packet_ipv4_chksum(pkt, offset, &ip, &chksum);
+ if (odp_unlikely(res < 0))
+ return res;
- do {
- ipv6ext = (const _odp_ipv6hdr_ext_t *)*parseptr;
- uint16_t extlen = 8 + ipv6ext->ext_len * 8;
+ return odp_packet_copy_from_mem(pkt,
+ offset + _ODP_IPV4HDR_CSUM_OFFSET,
+ 2, &chksum);
+}
- *offset += extlen;
- *parseptr += extlen;
- } while ((ipv6ext->next_hdr == _ODP_IPPROTO_HOPOPTS ||
- ipv6ext->next_hdr == _ODP_IPPROTO_ROUTE) &&
- *offset < seg_len);
+static int _odp_packet_tcp_udp_chksum_insert(odp_packet_t pkt, uint16_t proto)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t zero = 0;
+ uint64_t sum;
+ uint16_t l3_ver;
+ uint16_t chksum;
+ uint32_t chksum_offset;
- if (*offset >= prs->l3_offset +
- odp_be_to_cpu_16(ipv6->payload_len)) {
- prs->error_flags.ip_err = 1;
- return 0;
- }
+ if (pkt_hdr->p.l3_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
+ if (pkt_hdr->p.l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- if (ipv6ext->next_hdr == _ODP_IPPROTO_FRAG)
- prs->input_flags.ipfrag = 1;
+ odp_packet_copy_to_mem(pkt, pkt_hdr->p.l3_offset, 2, &l3_ver);
- return ipv6ext->next_hdr;
- }
+ if (_ODP_IPV4HDR_VER(l3_ver) == _ODP_IPV4)
+ sum = packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l3_offset +
+ _ODP_IPV4ADDR_OFFSSET,
+ 2 * _ODP_IPV4ADDR_LEN);
+ else
+ sum = packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l3_offset +
+ _ODP_IPV6ADDR_OFFSSET,
+ 2 * _ODP_IPV6ADDR_LEN);
+#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
+ sum += proto;
+#else
+ sum += proto << 8;
+#endif
- if (odp_unlikely(ipv6->next_hdr == _ODP_IPPROTO_FRAG)) {
- prs->input_flags.ipopt = 1;
- prs->input_flags.ipfrag = 1;
+ if (proto == _ODP_IPPROTO_TCP) {
+ sum += odp_cpu_to_be_16(pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset);
+ chksum_offset = pkt_hdr->p.l4_offset + _ODP_UDP_CSUM_OFFSET;
+ } else {
+ sum += packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset +
+ _ODP_UDP_LEN_OFFSET,
+ 2);
+ chksum_offset = pkt_hdr->p.l4_offset + _ODP_UDP_CSUM_OFFSET;
}
+ odp_packet_copy_from_mem(pkt, chksum_offset, 2, &zero);
- return ipv6->next_hdr;
-}
+ sum += packet_sum_partial(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset);
-/**
- * Parser helper function for TCP
- */
-static inline void parse_tcp(packet_parser_t *prs,
- const uint8_t **parseptr, uint32_t *offset)
-{
- const _odp_tcphdr_t *tcp = (const _odp_tcphdr_t *)*parseptr;
+ chksum = ~chksum_finalize(sum);
- if (tcp->hl < sizeof(_odp_tcphdr_t) / sizeof(uint32_t))
- prs->error_flags.tcp_err = 1;
- else if ((uint32_t)tcp->hl * 4 > sizeof(_odp_tcphdr_t))
- prs->input_flags.tcpopt = 1;
+ if (proto == _ODP_IPPROTO_UDP && chksum == 0)
+ chksum = 0xffff;
- prs->l4_len = prs->l3_len +
- prs->l3_offset - prs->l4_offset;
-
- if (offset)
- *offset += (uint32_t)tcp->hl * 4;
- *parseptr += (uint32_t)tcp->hl * 4;
+ return odp_packet_copy_from_mem(pkt,
+ chksum_offset,
+ 2, &chksum);
}
/**
- * Parser helper function for UDP
+ * Calculate and fill in TCP checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-static inline void parse_udp(packet_parser_t *prs,
- const uint8_t **parseptr, uint32_t *offset)
+int _odp_packet_tcp_chksum_insert(odp_packet_t pkt)
{
- const _odp_udphdr_t *udp = (const _odp_udphdr_t *)*parseptr;
- uint32_t udplen = odp_be_to_cpu_16(udp->length);
-
- if (udplen < sizeof(_odp_udphdr_t) ||
- udplen > (prs->l3_len +
- prs->l4_offset - prs->l3_offset)) {
- prs->error_flags.udp_err = 1;
- }
-
- prs->l4_len = udplen;
-
- if (offset)
- *offset += sizeof(_odp_udphdr_t);
- *parseptr += sizeof(_odp_udphdr_t);
+ return _odp_packet_tcp_udp_chksum_insert(pkt, _ODP_IPPROTO_TCP);
}
/**
- * Initialize L2 related parser flags and metadata
+ * Calculate and fill in UDP checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-void packet_parse_l2(packet_parser_t *prs, uint32_t frame_len)
+int _odp_packet_udp_chksum_insert(odp_packet_t pkt)
{
- /* Packet alloc or reset have already init other offsets and flags */
-
- /* We only support Ethernet for now */
- prs->input_flags.eth = 1;
-
- /* Detect jumbo frames */
- if (frame_len > _ODP_ETH_LEN_MAX)
- prs->input_flags.jumbo = 1;
-
- /* Assume valid L2 header, no CRC/FCS check in SW */
- prs->input_flags.l2 = 1;
-
- prs->input_flags.parsed_l2 = 1;
+ return _odp_packet_tcp_udp_chksum_insert(pkt, _ODP_IPPROTO_UDP);
}
/**
- * Parse common packet headers up to given layer
+ * Calculate and fill in SCTP checksum
*
- * The function expects at least PACKET_PARSE_SEG_LEN bytes of data to be
- * available from the ptr.
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-int packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
- uint32_t frame_len, uint32_t seg_len, layer_t layer)
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt)
{
- uint32_t offset;
- const uint8_t *parseptr;
-
- switch (prs->parsed_layers) {
- case LAYER_NONE:
- /* Fall through */
-
- case LAYER_L2:
- {
- const _odp_ethhdr_t *eth;
- uint16_t macaddr0, macaddr2, macaddr4;
- const _odp_vlanhdr_t *vlan;
-
- offset = sizeof(_odp_ethhdr_t);
- if (packet_parse_l2_not_done(prs))
- packet_parse_l2(prs, frame_len);
-
- eth = (const _odp_ethhdr_t *)ptr;
-
- /* Handle Ethernet broadcast/multicast addresses */
- macaddr0 = odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth));
- prs->input_flags.eth_mcast = (macaddr0 & 0x0100) == 0x0100;
-
- if (macaddr0 == 0xffff) {
- macaddr2 =
- odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth + 1));
- macaddr4 =
- odp_be_to_cpu_16(*((const uint16_t *)
- (const void *)eth + 2));
- prs->input_flags.eth_bcast =
- (macaddr2 == 0xffff) && (macaddr4 == 0xffff);
- } else {
- prs->input_flags.eth_bcast = 0;
- }
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t sum;
- /* Get Ethertype */
- prs->ethtype = odp_be_to_cpu_16(eth->type);
- parseptr = (const uint8_t *)(eth + 1);
+ if (pkt_hdr->p.l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
- /* Check for SNAP vs. DIX */
- if (prs->ethtype < _ODP_ETH_LEN_MAX) {
- prs->input_flags.snap = 1;
- if (prs->ethtype > frame_len - offset) {
- prs->error_flags.snap_len = 1;
- goto parse_exit;
- }
- prs->ethtype = odp_be_to_cpu_16(*((const uint16_t *)
- (uintptr_t)
- (parseptr + 6)));
- offset += 8;
- parseptr += 8;
+ sum = 0;
+ odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+ sum = ~packet_sum_crc32c(pkt_hdr, pkt_hdr->p.l4_offset,
+ pkt_hdr->frame_len - pkt_hdr->p.l4_offset,
+ ~0);
+ return odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+}
+
+int _odp_packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
+ odp_pktin_config_opt_t opt, uint64_t l4_part_sum)
+{
+ /* UDP chksum == 0 case is covered in parse_udp() */
+ if (opt.bit.udp_chksum &&
+ pkt_hdr->p.input_flags.udp &&
+ !pkt_hdr->p.input_flags.ipfrag &&
+ !pkt_hdr->p.input_flags.udp_chksum_zero) {
+ uint16_t sum = ~packet_sum(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset,
+ l4_part_sum);
+
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != 0) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.udp_err = 1;
+ _ODP_DBG("UDP chksum fail (%x)!\n", sum);
+ if (opt.bit.drop_udp_err)
+ return -1;
}
+ }
- /* Parse the VLAN header(s), if present */
- if (prs->ethtype == _ODP_ETHTYPE_VLAN_OUTER) {
- prs->input_flags.vlan_qinq = 1;
- prs->input_flags.vlan = 1;
-
- vlan = (const _odp_vlanhdr_t *)parseptr;
- prs->ethtype = odp_be_to_cpu_16(vlan->type);
- offset += sizeof(_odp_vlanhdr_t);
- parseptr += sizeof(_odp_vlanhdr_t);
+ if (opt.bit.tcp_chksum &&
+ pkt_hdr->p.input_flags.tcp &&
+ !pkt_hdr->p.input_flags.ipfrag) {
+ uint16_t sum = ~packet_sum(pkt_hdr,
+ pkt_hdr->p.l3_offset,
+ pkt_hdr->p.l4_offset,
+ pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset,
+ l4_part_sum);
+
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != 0) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.tcp_err = 1;
+ _ODP_DBG("TCP chksum fail (%x)!\n", sum);
+ if (opt.bit.drop_tcp_err)
+ return -1;
}
+ }
- if (prs->ethtype == _ODP_ETHTYPE_VLAN) {
- prs->input_flags.vlan = 1;
- vlan = (const _odp_vlanhdr_t *)parseptr;
- prs->ethtype = odp_be_to_cpu_16(vlan->type);
- offset += sizeof(_odp_vlanhdr_t);
- parseptr += sizeof(_odp_vlanhdr_t);
+ if (opt.bit.sctp_chksum &&
+ pkt_hdr->p.input_flags.sctp &&
+ !pkt_hdr->p.input_flags.ipfrag) {
+ uint32_t seg_len = 0;
+ _odp_sctphdr_t hdr_copy;
+ uint32_t sum = ~packet_sum_crc32c(pkt_hdr,
+ pkt_hdr->p.l4_offset +
+ _ODP_SCTPHDR_LEN,
+ pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset -
+ _ODP_SCTPHDR_LEN,
+ l4_part_sum);
+ _odp_sctphdr_t *sctp = packet_map(pkt_hdr,
+ pkt_hdr->p.l4_offset,
+ &seg_len, NULL);
+ if (odp_unlikely(seg_len < sizeof(*sctp))) {
+ odp_packet_t pkt = packet_handle(pkt_hdr);
+
+ sctp = &hdr_copy;
+ odp_packet_copy_to_mem(pkt, pkt_hdr->p.l4_offset,
+ sizeof(*sctp), sctp);
+ }
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != sctp->chksum) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.sctp_err = 1;
+ _ODP_DBG("SCTP chksum fail (%x/%x)!\n", sum, sctp->chksum);
+ if (opt.bit.drop_sctp_err)
+ return -1;
}
-
- prs->l3_offset = offset;
- prs->parsed_layers = LAYER_L2;
- if (layer == LAYER_L2)
- return prs->error_flags.all != 0;
}
- /* Fall through */
-
- case LAYER_L3:
- {
- offset = prs->l3_offset;
- parseptr = (const uint8_t *)(ptr + offset);
- /* Set l3_offset+flag only for known ethtypes */
- prs->input_flags.l3 = 1;
-
- /* Parse Layer 3 headers */
- switch (prs->ethtype) {
- case _ODP_ETHTYPE_IPV4:
- prs->input_flags.ipv4 = 1;
- prs->ip_proto = parse_ipv4(prs, &parseptr, &offset,
- frame_len);
- break;
- case _ODP_ETHTYPE_IPV6:
- prs->input_flags.ipv6 = 1;
- prs->ip_proto = parse_ipv6(prs, &parseptr, &offset,
- frame_len, seg_len);
- break;
-
- case _ODP_ETHTYPE_ARP:
- prs->input_flags.arp = 1;
- prs->ip_proto = 255; /* Reserved invalid by IANA */
- break;
+ return pkt_hdr->p.flags.all.error != 0;
+}
- default:
- prs->input_flags.l3 = 0;
- prs->l3_offset = ODP_PACKET_OFFSET_INVALID;
- prs->ip_proto = 255; /* Reserved invalid by IANA */
- }
+int odp_packet_parse(odp_packet_t pkt, uint32_t offset,
+ const odp_packet_parse_param_t *param)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ const uint8_t *data;
+ uint32_t seg_len;
+ uint32_t seg_end;
+ uint32_t packet_len = pkt_hdr->frame_len;
+ odp_proto_t proto = param->proto;
+ odp_proto_layer_t layer = param->last_layer;
+ int ret;
+ uint16_t ethtype;
+ uint64_t l4_part_sum = 0;
+ const uint32_t min_seglen = PARSE_ETH_BYTES + PARSE_L3_L4_BYTES;
+ uint8_t buf[min_seglen];
+ odp_pktin_config_opt_t opt;
- /* Set l4_offset+flag only for known ip_proto */
- prs->l4_offset = offset;
- prs->parsed_layers = LAYER_L3;
- if (layer == LAYER_L3)
- return prs->error_flags.all != 0;
- }
- /* Fall through */
-
- case LAYER_L4:
- {
- offset = prs->l4_offset;
- parseptr = (const uint8_t *)(ptr + offset);
- prs->input_flags.l4 = 1;
-
- /* Parse Layer 4 headers */
- switch (prs->ip_proto) {
- case _ODP_IPPROTO_ICMP:
- prs->input_flags.icmp = 1;
- break;
+ if (proto == ODP_PROTO_NONE || layer == ODP_PROTO_LAYER_NONE)
+ return -1;
- case _ODP_IPPROTO_TCP:
- if (odp_unlikely(offset + _ODP_TCPHDR_LEN > seg_len))
- return -1;
- prs->input_flags.tcp = 1;
- parse_tcp(prs, &parseptr, NULL);
- break;
+ data = packet_map(pkt_hdr, offset, &seg_len, NULL);
- case _ODP_IPPROTO_UDP:
- if (odp_unlikely(offset + _ODP_UDPHDR_LEN > seg_len))
- return -1;
- prs->input_flags.udp = 1;
- parse_udp(prs, &parseptr, NULL);
- break;
+ if (data == NULL)
+ return -1;
- case _ODP_IPPROTO_AH:
- prs->input_flags.ipsec = 1;
- prs->input_flags.ipsec_ah = 1;
- break;
+ /*
+ * We must not have a packet segment boundary within the parsed
+ * packet data range. Copy enough data to a temporary buffer for
+ * parsing if necessary.
+ */
+ if (odp_unlikely(pkt_hdr->seg_count > 1) &&
+ odp_unlikely(seg_len < min_seglen)) {
+ seg_len = min_seglen;
+ if (seg_len > packet_len - offset)
+ seg_len = packet_len - offset;
+ odp_packet_copy_to_mem(pkt, offset, seg_len, buf);
+ data = buf;
+ }
- case _ODP_IPPROTO_ESP:
- prs->input_flags.ipsec = 1;
- prs->input_flags.ipsec_esp = 1;
- break;
+ seg_end = offset + seg_len; /* one past the maximum offset */
- case _ODP_IPPROTO_SCTP:
- prs->input_flags.sctp = 1;
- break;
+ /* Reset parser flags, keep other flags */
+ packet_parse_reset(pkt_hdr, 0);
- default:
- prs->input_flags.l4 = 0;
- prs->l4_offset = ODP_PACKET_OFFSET_INVALID;
- break;
- }
+ if (proto == ODP_PROTO_ETH) {
+ /* Assume valid L2 header, no CRC/FCS check in SW */
+ pkt_hdr->p.l2_offset = offset;
- prs->parsed_layers = LAYER_L4;
- break;
+ ethtype = _odp_parse_eth(&pkt_hdr->p, &data, &offset, packet_len);
+ } else if (proto == ODP_PROTO_IPV4) {
+ ethtype = _ODP_ETHTYPE_IPV4;
+ } else if (proto == ODP_PROTO_IPV6) {
+ ethtype = _ODP_ETHTYPE_IPV6;
+ } else {
+ ethtype = 0; /* Invalid */
}
- case LAYER_ALL:
- break;
+ opt.all_bits = 0;
+ opt.bit.ipv4_chksum = param->chksums.chksum.ipv4;
+ opt.bit.udp_chksum = param->chksums.chksum.udp;
+ opt.bit.tcp_chksum = param->chksums.chksum.tcp;
+ opt.bit.sctp_chksum = param->chksums.chksum.sctp;
- default:
- ODP_ERR("Invalid parse layer: %d\n", (int)layer);
+ ret = _odp_packet_parse_common_l3_l4(&pkt_hdr->p, data, offset,
+ packet_len, seg_end, layer,
+ ethtype, &l4_part_sum, opt);
+
+ if (ret)
return -1;
+
+ if (layer >= ODP_PROTO_LAYER_L4) {
+ ret = _odp_packet_l4_chksum(pkt_hdr, opt, l4_part_sum);
+ if (ret)
+ return -1;
}
- prs->parsed_layers = LAYER_ALL;
+ return 0;
+}
+
+int odp_packet_parse_multi(const odp_packet_t pkt[], const uint32_t offset[],
+ int num, const odp_packet_parse_param_t *param)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ if (odp_packet_parse(pkt[i], offset[i], param))
+ return i;
-parse_exit:
- return prs->error_flags.all != 0;
+ return num;
}
-/**
- * Simple packet parser
- */
-int packet_parse_layer(odp_packet_hdr_t *pkt_hdr, layer_t layer)
+void odp_packet_parse_result(odp_packet_t pkt,
+ odp_packet_parse_result_t *result)
+{
+ /* TODO: optimize to single word copy when packet header stores bits
+ * directly into odp_packet_parse_result_flag_t */
+ result->flag.all = 0;
+ result->flag.has_error = odp_packet_has_error(pkt);
+ result->flag.has_l2_error = odp_packet_has_l2_error(pkt);
+ result->flag.has_l3_error = odp_packet_has_l3_error(pkt);
+ result->flag.has_l4_error = odp_packet_has_l4_error(pkt);
+ result->flag.has_l2 = odp_packet_has_l2(pkt);
+ result->flag.has_l3 = odp_packet_has_l3(pkt);
+ result->flag.has_l4 = odp_packet_has_l4(pkt);
+ result->flag.has_eth = odp_packet_has_eth(pkt);
+ result->flag.has_eth_bcast = odp_packet_has_eth_bcast(pkt);
+ result->flag.has_eth_mcast = odp_packet_has_eth_mcast(pkt);
+ result->flag.has_jumbo = odp_packet_has_jumbo(pkt);
+ result->flag.has_vlan = odp_packet_has_vlan(pkt);
+ result->flag.has_vlan_qinq = odp_packet_has_vlan_qinq(pkt);
+ result->flag.has_arp = odp_packet_has_arp(pkt);
+ result->flag.has_ipv4 = odp_packet_has_ipv4(pkt);
+ result->flag.has_ipv6 = odp_packet_has_ipv6(pkt);
+ result->flag.has_ip_bcast = odp_packet_has_ip_bcast(pkt);
+ result->flag.has_ip_mcast = odp_packet_has_ip_mcast(pkt);
+ result->flag.has_ipfrag = odp_packet_has_ipfrag(pkt);
+ result->flag.has_ipopt = odp_packet_has_ipopt(pkt);
+ result->flag.has_ipsec = odp_packet_has_ipsec(pkt);
+ result->flag.has_udp = odp_packet_has_udp(pkt);
+ result->flag.has_tcp = odp_packet_has_tcp(pkt);
+ result->flag.has_sctp = odp_packet_has_sctp(pkt);
+ result->flag.has_icmp = odp_packet_has_icmp(pkt);
+
+ result->packet_len = odp_packet_len(pkt);
+ result->l2_offset = odp_packet_l2_offset(pkt);
+ result->l3_offset = odp_packet_l3_offset(pkt);
+ result->l4_offset = odp_packet_l4_offset(pkt);
+ result->l3_chksum_status = odp_packet_l3_chksum_status(pkt);
+ result->l4_chksum_status = odp_packet_l4_chksum_status(pkt);
+ result->l2_type = odp_packet_l2_type(pkt);
+ result->l3_type = odp_packet_l3_type(pkt);
+ result->l4_type = odp_packet_l4_type(pkt);
+}
+
+void odp_packet_parse_result_multi(const odp_packet_t pkt[],
+ odp_packet_parse_result_t *result[],
+ int num)
{
- uint32_t seg_len = packet_first_seg_len(pkt_hdr);
- void *base = packet_data(pkt_hdr);
+ int i;
- return packet_parse_common(&pkt_hdr->p, base, pkt_hdr->frame_len,
- seg_len, layer);
+ for (i = 0; i < num; i++)
+ odp_packet_parse_result(pkt[i], result[i]);
}
uint64_t odp_packet_to_u64(odp_packet_t hdl)
@@ -2148,7 +2090,14 @@ uint64_t odp_packet_seg_to_u64(odp_packet_seg_t hdl)
odp_packet_t odp_packet_ref_static(odp_packet_t pkt)
{
- return odp_packet_copy(pkt, odp_packet_pool(pkt));
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ while (pkt_hdr != NULL) {
+ segment_ref_inc(pkt_hdr);
+ pkt_hdr = pkt_hdr->seg_next;
+ }
+
+ return pkt;
}
odp_packet_t odp_packet_ref(odp_packet_t pkt, uint32_t offset)
@@ -2159,14 +2108,14 @@ odp_packet_t odp_packet_ref(odp_packet_t pkt, uint32_t offset)
new = odp_packet_copy(pkt, odp_packet_pool(pkt));
if (new == ODP_PACKET_INVALID) {
- ODP_ERR("copy failed\n");
+ _ODP_ERR("copy failed\n");
return ODP_PACKET_INVALID;
}
ret = odp_packet_trunc_head(&new, offset, NULL, NULL);
if (ret < 0) {
- ODP_ERR("trunk_head failed\n");
+ _ODP_ERR("trunk_head failed\n");
odp_packet_free(new);
return ODP_PACKET_INVALID;
}
@@ -2177,31 +2126,21 @@ odp_packet_t odp_packet_ref(odp_packet_t pkt, uint32_t offset)
odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
odp_packet_t hdr)
{
- odp_packet_t new;
+ odp_packet_t ref;
int ret;
- new = odp_packet_copy(pkt, odp_packet_pool(pkt));
+ ref = odp_packet_ref(pkt, offset);
- if (new == ODP_PACKET_INVALID) {
- ODP_ERR("copy failed\n");
+ if (ref == ODP_PACKET_INVALID) {
+ _ODP_DBG("reference create failed\n");
return ODP_PACKET_INVALID;
}
- if (offset) {
- ret = odp_packet_trunc_head(&new, offset, NULL, NULL);
-
- if (ret < 0) {
- ODP_ERR("trunk_head failed\n");
- odp_packet_free(new);
- return ODP_PACKET_INVALID;
- }
- }
-
- ret = odp_packet_concat(&hdr, new);
+ ret = odp_packet_concat(&hdr, ref);
if (ret < 0) {
- ODP_ERR("concat failed\n");
- odp_packet_free(new);
+ _ODP_DBG("concat failed\n");
+ odp_packet_free(ref);
return ODP_PACKET_INVALID;
}
@@ -2210,17 +2149,273 @@ odp_packet_t odp_packet_ref_pkt(odp_packet_t pkt, uint32_t offset,
int odp_packet_has_ref(odp_packet_t pkt)
{
- (void)pkt;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t ref_cnt;
+
+ while (pkt_hdr != NULL) {
+ ref_cnt = segment_ref(pkt_hdr);
+
+ if (is_multi_ref(ref_cnt))
+ return 1;
+
+ pkt_hdr = pkt_hdr->seg_next;
+ }
return 0;
}
-uint32_t odp_packet_unshared_len(odp_packet_t pkt)
+void odp_packet_lso_request_clr(odp_packet_t pkt)
{
- return odp_packet_len(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.lso = 0;
}
-/* Include non-inlined versions of API functions */
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/packet_inlines_api.h>
-#endif
+int odp_packet_has_lso_request(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.lso;
+}
+
+uint32_t odp_packet_payload_offset(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.payload_off)
+ return pkt_hdr->payload_offset;
+
+ return ODP_PACKET_OFFSET_INVALID;
+}
+
+int odp_packet_payload_offset_set(odp_packet_t pkt, uint32_t offset)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.payload_off = 1;
+ pkt_hdr->payload_offset = offset;
+
+ return 0;
+}
+
+void odp_packet_aging_tmo_set(odp_packet_t pkt, uint64_t tmo_ns)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ pkt_hdr->p.flags.tx_aging = tmo_ns ? 1 : 0;
+ pkt_hdr->tx_aging_ns = tmo_ns;
+}
+
+uint64_t odp_packet_aging_tmo(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.tx_aging ? pkt_hdr->tx_aging_ns : 0;
+}
+
+int odp_packet_tx_compl_request(odp_packet_t pkt, const odp_packet_tx_compl_opt_t *opt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ switch (opt->mode) {
+ case ODP_PACKET_TX_COMPL_DISABLED:
+ pkt_hdr->p.flags.tx_compl_ev = 0;
+ pkt_hdr->p.flags.tx_compl_poll = 0;
+ break;
+ case ODP_PACKET_TX_COMPL_EVENT:
+ _ODP_ASSERT(opt->queue != ODP_QUEUE_INVALID);
+ pkt_hdr->p.flags.tx_compl_ev = 1;
+ pkt_hdr->p.flags.tx_compl_poll = 0;
+ pkt_hdr->dst_queue = opt->queue;
+ break;
+ case ODP_PACKET_TX_COMPL_POLL:
+ pkt_hdr->p.flags.tx_compl_ev = 0;
+ pkt_hdr->p.flags.tx_compl_poll = 1;
+ pkt_hdr->tx_compl_id = opt->compl_id;
+ break;
+ default:
+ _ODP_ERR("Bad TX completion mode: %i\n", opt->mode);
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_packet_has_tx_compl_request(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ return pkt_hdr->p.flags.tx_compl_ev || pkt_hdr->p.flags.tx_compl_poll;
+}
+
+void odp_packet_tx_compl_free(odp_packet_tx_compl_t tx_compl)
+{
+ if (odp_unlikely(tx_compl == ODP_PACKET_TX_COMPL_INVALID)) {
+ _ODP_ERR("Bad TX completion event handle\n");
+ return;
+ }
+
+ odp_buffer_free((odp_buffer_t)tx_compl);
+}
+
+void *odp_packet_tx_compl_user_ptr(odp_packet_tx_compl_t tx_compl)
+{
+ if (odp_unlikely(tx_compl == ODP_PACKET_TX_COMPL_INVALID)) {
+ _ODP_ERR("Bad TX completion event handle\n");
+ return NULL;
+ }
+
+ _odp_pktio_tx_compl_t *data = odp_buffer_addr((odp_buffer_t)tx_compl);
+
+ return (void *)(uintptr_t)data->user_ptr;
+}
+
+int odp_packet_tx_compl_done(odp_pktio_t pktio, uint32_t compl_id)
+{
+ return odp_atomic_load_acq_u32(&get_pktio_entry(pktio)->tx_compl_status[compl_id]);
+}
+
+void odp_packet_free_ctrl_set(odp_packet_t pkt, odp_packet_free_ctrl_t ctrl)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (ctrl == ODP_PACKET_FREE_CTRL_DONT_FREE)
+ pkt_hdr->p.flags.free_ctrl = 1;
+ else
+ pkt_hdr->p.flags.free_ctrl = 0;
+}
+
+odp_packet_free_ctrl_t odp_packet_free_ctrl(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.free_ctrl)
+ return ODP_PACKET_FREE_CTRL_DONT_FREE;
+
+ return ODP_PACKET_FREE_CTRL_DISABLED;
+}
+
+odp_packet_reass_status_t
+odp_packet_reass_status(odp_packet_t pkt)
+{
+ (void)pkt;
+ return ODP_PACKET_REASS_NONE;
+}
+
+int odp_packet_reass_info(odp_packet_t pkt, odp_packet_reass_info_t *info)
+{
+ (void)pkt;
+ (void)info;
+ return -1;
+}
+
+int
+odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
+ odp_packet_reass_partial_state_t *res)
+{
+ (void)pkt;
+ (void)frags;
+ (void)res;
+ return -ENOTSUP;
+}
+
+uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i;
+ odp_packet_seg_t seg;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
+ uint32_t num_segs = odp_packet_num_segs(pkt);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ _ODP_ERR("Not a packet pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ _ODP_ERR("Not an external memory pool\n");
+ return 0;
+ }
+
+ if (odp_unlikely(num < num_segs)) {
+ _ODP_ERR("Not enough buffer handles %u. Packet has %u segments.\n", num, num_segs);
+ return 0;
+ }
+
+ seg = odp_packet_first_seg(pkt);
+
+ for (i = 0; i < num_segs; i++) {
+ pkt_buf[i] = (odp_packet_buf_t)(uintptr_t)packet_seg_to_hdr(seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+
+ return num_segs;
+}
+
+odp_packet_t odp_packet_reassemble(odp_pool_t pool_hdl, odp_packet_buf_t pkt_buf[], uint32_t num)
+{
+ uint32_t i, data_len, tailroom;
+ odp_packet_hdr_t *cur_seg, *next_seg;
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[0];
+ uint32_t headroom = odp_packet_buf_data_offset(pkt_buf[0]);
+
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+
+ if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
+ _ODP_ERR("Not a packet pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(pool->pool_ext == 0)) {
+ _ODP_ERR("Not an external memory pool\n");
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(num == 0)) {
+ _ODP_ERR("Bad number of buffers: %u\n", num);
+ return ODP_PACKET_INVALID;
+ }
+
+ cur_seg = pkt_hdr;
+ data_len = 0;
+
+ for (i = 0; i < num; i++) {
+ next_seg = NULL;
+ if (i < num - 1)
+ next_seg = (odp_packet_hdr_t *)(uintptr_t)pkt_buf[i + 1];
+
+ data_len += cur_seg->seg_len;
+ cur_seg->seg_next = next_seg;
+ cur_seg = next_seg;
+ }
+
+ tailroom = pool->ext_param.pkt.buf_size - sizeof(odp_packet_hdr_t);
+ tailroom -= pool->ext_param.pkt.app_header_size;
+ tailroom -= odp_packet_buf_data_len(pkt_buf[num - 1]);
+ tailroom -= pool->trailer_size;
+
+ pkt_hdr->seg_count = num;
+ pkt_hdr->frame_len = data_len;
+ pkt_hdr->headroom = headroom;
+ pkt_hdr->tailroom = tailroom;
+
+ /* Reset metadata */
+ pkt_hdr->event_hdr.subtype = ODP_EVENT_PACKET_BASIC;
+ pkt_hdr->input = ODP_PKTIO_INVALID;
+ packet_parse_reset(pkt_hdr, 1);
+
+ return packet_handle(pkt_hdr);
+}
+
+void odp_packet_proto_stats_request(odp_packet_t pkt, odp_packet_proto_stats_opt_t *opt)
+{
+ (void)pkt;
+ (void)opt;
+}
+
+odp_proto_stats_t odp_packet_proto_stats(odp_packet_t pkt)
+{
+ (void)pkt;
+
+ return ODP_PROTO_STATS_INVALID;
+}
diff --git a/platform/linux-generic/odp_packet_api.c b/platform/linux-generic/odp_packet_api.c
new file mode 100644
index 000000000..61d03d0c8
--- /dev/null
+++ b/platform/linux-generic/odp_packet_api.c
@@ -0,0 +1,15 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/packet.h>
+
+/* Prevent this header from being included again later */
+#include <odp/api/plat/packet_io_inlines.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_vector_inlines.h>
diff --git a/platform/linux-generic/odp_packet_flags.c b/platform/linux-generic/odp_packet_flags.c
index ea9a22710..777da12ae 100644
--- a/platform/linux-generic/odp_packet_flags.c
+++ b/platform/linux-generic/odp_packet_flags.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,318 +8,149 @@
#include <odp/api/packet_flags.h>
#include <odp_packet_internal.h>
-#define retflag(pkt, x, layer) do { \
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); \
- if (pkt_hdr->p.parsed_layers < layer) \
- packet_parse_layer(pkt_hdr, layer); \
- return pkt_hdr->p.x; \
+#define setflag(pkt, x, v) do { \
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt); \
+ pkt_hdr->p.x = (v) & 1; \
} while (0)
-#define setflag(pkt, x, v, layer) do { \
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); \
- if (pkt_hdr->p.parsed_layers < layer) \
- packet_parse_layer(pkt_hdr, layer); \
- pkt_hdr->p.x = v & 1; \
- } while (0)
-
-int odp_packet_has_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
- return odp_packet_hdr(pkt)->p.error_flags.all != 0;
-}
-
-/* Get Input Flags */
-
-int odp_packet_has_l2_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
- /* L2 parsing is always done by default and hence
- no additional check is required */
- return pkt_hdr->p.error_flags.frame_len
- | pkt_hdr->p.error_flags.snap_len
- | pkt_hdr->p.error_flags.l2_chksum;
-}
-
-int odp_packet_has_l3(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.l3, LAYER_L3);
-}
-
-int odp_packet_has_l3_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L3)
- packet_parse_layer(pkt_hdr, LAYER_L3);
-
- return pkt_hdr->p.error_flags.ip_err;
-}
-
-int odp_packet_has_l4(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.l4, LAYER_L4);
-}
-
-int odp_packet_has_l4_error(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (pkt_hdr->p.parsed_layers < LAYER_L4)
- packet_parse_layer(pkt_hdr, LAYER_L4);
-
- return pkt_hdr->p.error_flags.tcp_err | pkt_hdr->p.error_flags.udp_err;
-}
-
-int odp_packet_has_eth_bcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.eth_bcast, LAYER_L2);
-}
-
-int odp_packet_has_eth_mcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.eth_mcast, LAYER_L2);
-}
-
-int odp_packet_has_vlan(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.vlan, LAYER_L2);
-}
-
-int odp_packet_has_vlan_qinq(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.vlan_qinq, LAYER_L2);
-}
-
-int odp_packet_has_arp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.arp, LAYER_L3);
-}
-
-int odp_packet_has_ipv4(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipv4, LAYER_L3);
-}
-
-int odp_packet_has_ipv6(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipv6, LAYER_L3);
-}
-
-int odp_packet_has_ip_bcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ip_bcast, LAYER_L3);
-}
-
-int odp_packet_has_ip_mcast(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ip_mcast, LAYER_L3);
-}
-
-int odp_packet_has_ipfrag(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipfrag, LAYER_L3);
-}
-
-int odp_packet_has_ipopt(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipopt, LAYER_L3);
-}
-
-int odp_packet_has_ipsec(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.ipsec, LAYER_L4);
-}
-
-int odp_packet_has_udp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.udp, LAYER_L4);
-}
-
-int odp_packet_has_tcp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.tcp, LAYER_L4);
-}
-
-int odp_packet_has_sctp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.sctp, LAYER_L4);
-}
-
-int odp_packet_has_icmp(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.icmp, LAYER_L4);
-}
-
-odp_packet_color_t odp_packet_color(odp_packet_t pkt)
-{
- retflag(pkt, input_flags.color, LAYER_ALL);
-}
-
void odp_packet_color_set(odp_packet_t pkt, odp_packet_color_t color)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
pkt_hdr->p.input_flags.color = color;
}
-odp_bool_t odp_packet_drop_eligible(odp_packet_t pkt)
-{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
-
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
- return !pkt_hdr->p.input_flags.nodrop;
-}
-
void odp_packet_drop_eligible_set(odp_packet_t pkt, odp_bool_t drop)
{
- setflag(pkt, input_flags.nodrop, !drop, LAYER_ALL);
-}
-
-int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
-{
- retflag(pkt, output_flags.shaper_len_adj, LAYER_ALL);
+ setflag(pkt, input_flags.nodrop, !drop);
}
void odp_packet_shaper_len_adjust_set(odp_packet_t pkt, int8_t adj)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- if (packet_parse_not_complete(pkt_hdr))
- packet_parse_layer(pkt_hdr, LAYER_ALL);
-
- pkt_hdr->p.output_flags.shaper_len_adj = adj;
+ pkt_hdr->p.flags.shaper_len_adj = adj;
}
/* Set Input Flags */
void odp_packet_has_l2_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l2, val, LAYER_L2);
+ setflag(pkt, input_flags.l2, val);
}
void odp_packet_has_l3_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l3, val, LAYER_L3);
+ setflag(pkt, input_flags.l3, val);
}
void odp_packet_has_l4_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.l4, val, LAYER_L4);
+ setflag(pkt, input_flags.l4, val);
}
void odp_packet_has_eth_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth, val, LAYER_L2);
+ setflag(pkt, input_flags.eth, val);
}
void odp_packet_has_eth_bcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth_bcast, val, LAYER_L2);
+ setflag(pkt, input_flags.eth_bcast, val);
}
void odp_packet_has_eth_mcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.eth_mcast, val, LAYER_L2);
+ setflag(pkt, input_flags.eth_mcast, val);
}
void odp_packet_has_jumbo_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.jumbo, val, LAYER_L2);
+ setflag(pkt, input_flags.jumbo, val);
}
void odp_packet_has_vlan_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.vlan, val, LAYER_L2);
+ setflag(pkt, input_flags.vlan, val);
+ setflag(pkt, input_flags.vlan_qinq, 0);
}
void odp_packet_has_vlan_qinq_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.vlan_qinq, val, LAYER_L2);
+ setflag(pkt, input_flags.vlan, val);
+ setflag(pkt, input_flags.vlan_qinq, val);
}
void odp_packet_has_arp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.arp, val, LAYER_L3);
+ setflag(pkt, input_flags.arp, val);
}
void odp_packet_has_ipv4_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipv4, val, LAYER_L3);
+ setflag(pkt, input_flags.ipv4, val);
}
void odp_packet_has_ipv6_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipv6, val, LAYER_L3);
+ setflag(pkt, input_flags.ipv6, val);
}
void odp_packet_has_ip_bcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ip_bcast, val, LAYER_L3);
+ setflag(pkt, input_flags.ip_bcast, val);
}
void odp_packet_has_ip_mcast_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ip_mcast, val, LAYER_L3);
+ setflag(pkt, input_flags.ip_mcast, val);
}
void odp_packet_has_ipfrag_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipfrag, val, LAYER_L3);
+ setflag(pkt, input_flags.ipfrag, val);
}
void odp_packet_has_ipopt_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipopt, val, LAYER_L3);
+ setflag(pkt, input_flags.ipopt, val);
}
void odp_packet_has_ipsec_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.ipsec, val, LAYER_L4);
+ setflag(pkt, input_flags.ipsec, val);
}
void odp_packet_has_udp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.udp, val, LAYER_L4);
+ setflag(pkt, input_flags.udp, val);
}
void odp_packet_has_tcp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.tcp, val, LAYER_L4);
+ setflag(pkt, input_flags.tcp, val);
}
void odp_packet_has_sctp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.sctp, val, LAYER_L4);
+ setflag(pkt, input_flags.sctp, val);
}
void odp_packet_has_icmp_set(odp_packet_t pkt, int val)
{
- setflag(pkt, input_flags.icmp, val, LAYER_L4);
+ setflag(pkt, input_flags.icmp, val);
}
void odp_packet_has_flow_hash_clr(odp_packet_t pkt)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
pkt_hdr->p.input_flags.flow_hash = 0;
}
void odp_packet_has_ts_clr(odp_packet_t pkt)
{
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
pkt_hdr->p.input_flags.timestamp = 0;
}
-
-/* Include non-inlined versions of API functions */
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/packet_flag_inlines_api.h>
-#endif
diff --git a/platform/linux-generic/odp_packet_flags_api.c b/platform/linux-generic/odp_packet_flags_api.c
new file mode 100644
index 000000000..e77f22c3e
--- /dev/null
+++ b/platform/linux-generic/odp_packet_flags_api.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/packet_flags.h>
+#include <odp_packet_internal.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/packet_flag_inlines.h>
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 98460a566..8283c41e6 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -1,103 +1,186 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+
#include <odp_posix_extensions.h>
-#include <odp/api/packet_io.h>
-#include <odp_packet_io_internal.h>
-#include <odp_packet_io_queue.h>
+#include <odp/api/buffer.h>
+#include <odp/api/debug.h>
#include <odp/api/packet.h>
-#include <odp_packet_internal.h>
-#include <odp_internal.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/proto_stats.h>
+#include <odp/api/shared_memory.h>
#include <odp/api/spinlock.h>
#include <odp/api/ticketlock.h>
-#include <odp/api/shared_memory.h>
-#include <odp_packet_socket.h>
-#include <odp_config_internal.h>
-#include <odp_queue_internal.h>
-#include <odp_schedule_if.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_io_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+
+#include <odp/autoheader_internal.h>
#include <odp_classification_internal.h>
+#include <odp_config_internal.h>
#include <odp_debug_internal.h>
-#include <odp_packet_io_ipc_internal.h>
-#include <odp/api/time.h>
+#include <odp_event_vector_internal.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pcapng.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
-#include <string.h>
+#include <ifaddrs.h>
#include <inttypes.h>
+#include <string.h>
#include <sys/ioctl.h>
-#include <ifaddrs.h>
-#include <errno.h>
#include <time.h>
-/* Sleep this many nanoseconds between pktin receive calls */
-#define SLEEP_NSEC 1000
+/* Sleep this many microseconds between pktin receive calls. Must be smaller
+ * than 1000000 (a million), i.e. smaller than a second. */
+#define SLEEP_USEC 1
-/* Check total sleep time about every SLEEP_CHECK * SLEEP_NSEC nanoseconds.
+/* Check total sleep time about every SLEEP_CHECK * SLEEP_USEC microseconds.
* Must be power of two. */
#define SLEEP_CHECK 32
-pktio_table_t *pktio_tbl;
+/* Max wait time supported to avoid potential overflow */
+#define MAX_WAIT_TIME (UINT64_MAX / 1024)
+
+/* One hour maximum aging timeout, no real limitations imposed by the implementation other than
+ * integer width, so just use some value. */
+#define MAX_TX_AGING_TMO_NS 3600000000000ULL
+
+typedef struct {
+ union {
+ struct {
+ odp_buffer_t buf;
+ const void *user_ptr;
+ odp_queue_t queue;
+ };
+
+ odp_atomic_u32_t *status;
+ };
+ uint16_t idx;
+ uint8_t mode;
+} tx_compl_info_t;
+
+/* Global variables */
+static pktio_global_t *pktio_global;
/* pktio pointer entries ( for inlines) */
-void *pktio_entry_ptr[ODP_CONFIG_PKTIO_ENTRIES];
+void *_odp_pktio_entry_ptr[CONFIG_PKTIO_ENTRIES];
static inline pktio_entry_t *pktio_entry_by_index(int index)
{
- return pktio_entry_ptr[index];
+ return _odp_pktio_entry_ptr[index];
}
-int odp_pktio_init_global(void)
+static int read_config_file(pktio_global_t *pktio_glb)
+{
+ const char *str;
+ int val = 0;
+
+ _ODP_PRINT("Packet IO config:\n");
+
+ str = "pktio.pktin_frame_offset";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val < 0 || val > UINT16_MAX) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+
+ pktio_glb->config.pktin_frame_offset = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "pktio.tx_compl_pool_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val < 0) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+
+ pktio_glb->config.tx_compl_pool_size = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ _ODP_PRINT("\n");
+
+ return 0;
+}
+
+int _odp_pktio_init_global(void)
{
pktio_entry_t *pktio_entry;
int i;
odp_shm_t shm;
int pktio_if;
- shm = odp_shm_reserve("odp_pktio_entries",
- sizeof(pktio_table_t),
- sizeof(pktio_entry_t), 0);
- pktio_tbl = odp_shm_addr(shm);
-
- if (pktio_tbl == NULL)
+ shm = odp_shm_reserve("_odp_pktio_global", sizeof(pktio_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
return -1;
- memset(pktio_tbl, 0, sizeof(pktio_table_t));
+ pktio_global = odp_shm_addr(shm);
+ memset(pktio_global, 0, sizeof(pktio_global_t));
+ pktio_global->shm = shm;
- odp_spinlock_init(&pktio_tbl->lock);
+ odp_spinlock_init(&pktio_global->lock);
+
+ if (read_config_file(pktio_global)) {
+ odp_shm_free(shm);
+ pktio_global = NULL;
+ return -1;
+ }
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
- pktio_entry = &pktio_tbl->entries[i];
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
+ pktio_entry = &pktio_global->entries[i];
- odp_ticketlock_init(&pktio_entry->s.rxl);
- odp_ticketlock_init(&pktio_entry->s.txl);
- odp_spinlock_init(&pktio_entry->s.cls.l2_cos_table.lock);
- odp_spinlock_init(&pktio_entry->s.cls.l3_cos_table.lock);
+ pktio_entry->handle = _odp_cast_scalar(odp_pktio_t, i + 1);
+ odp_ticketlock_init(&pktio_entry->rxl);
+ odp_ticketlock_init(&pktio_entry->txl);
- pktio_entry_ptr[i] = pktio_entry;
+ _odp_pktio_entry_ptr[i] = pktio_entry;
}
- for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) {
- if (pktio_if_ops[pktio_if]->init_global)
- if (pktio_if_ops[pktio_if]->init_global()) {
- ODP_ERR("failed to initialized pktio type %d",
- pktio_if);
+ for (pktio_if = 0; _odp_pktio_if_ops[pktio_if]; ++pktio_if) {
+ if (_odp_pktio_if_ops[pktio_if]->init_global)
+ if (_odp_pktio_if_ops[pktio_if]->init_global()) {
+ _ODP_ERR("failed to initialized pktio type %d", pktio_if);
return -1;
}
}
+ if (_ODP_PCAPNG) {
+ if (_odp_pcapng_init_global()) {
+ _ODP_ERR("Failed to initialize pcapng\n");
+ return -1;
+ }
+ }
+
return 0;
}
-int odp_pktio_init_local(void)
+int _odp_pktio_init_local(void)
{
int pktio_if;
- for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) {
- if (pktio_if_ops[pktio_if]->init_local)
- if (pktio_if_ops[pktio_if]->init_local()) {
- ODP_ERR("failed to initialized pktio type %d",
- pktio_if);
+ for (pktio_if = 0; _odp_pktio_if_ops[pktio_if]; ++pktio_if) {
+ if (_odp_pktio_if_ops[pktio_if]->init_local)
+ if (_odp_pktio_if_ops[pktio_if]->init_local()) {
+ _ODP_ERR("failed to initialized pktio type %d", pktio_if);
return -1;
}
}
@@ -107,73 +190,104 @@ int odp_pktio_init_local(void)
static inline int is_free(pktio_entry_t *entry)
{
- return (entry->s.state == PKTIO_STATE_FREE);
+ return (entry->state == PKTIO_STATE_FREE);
}
static void lock_entry(pktio_entry_t *entry)
{
- odp_ticketlock_lock(&entry->s.rxl);
- odp_ticketlock_lock(&entry->s.txl);
+ odp_ticketlock_lock(&entry->rxl);
+ odp_ticketlock_lock(&entry->txl);
}
static void unlock_entry(pktio_entry_t *entry)
{
- odp_ticketlock_unlock(&entry->s.txl);
- odp_ticketlock_unlock(&entry->s.rxl);
+ odp_ticketlock_unlock(&entry->txl);
+ odp_ticketlock_unlock(&entry->rxl);
}
-static void init_in_queues(pktio_entry_t *entry)
+/**
+ * Strip optional pktio type from device name by moving start pointer
+ *
+ * @param name Packet IO device name
+ * @param[out] type_out Optional char array (len = PKTIO_NAME_LEN) for storing
+ * pktio type. Ignored when NULL.
+ *
+ * @return Pointer to the beginning of device name
+ */
+static const char *strip_pktio_type(const char *name, char *type_out)
{
- int i;
+ const char *if_name;
+
+ if (type_out)
+ type_out[0] = '\0';
+
+ /* Strip pktio type prefix <pktio_type>:<if_name> */
+ if_name = strchr(name, ':');
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
- entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
- entry->s.in_queue[i].pktin = PKTIN_INVALID;
+ if (if_name) {
+ int pktio_if;
+ int type_len = if_name - name;
+ char pktio_type[type_len + 1];
+
+ strncpy(pktio_type, name, type_len);
+ pktio_type[type_len] = '\0';
+
+ /* Remove colon */
+ if_name++;
+
+ /* Match if_type to enabled pktio devices */
+ for (pktio_if = 0; _odp_pktio_if_ops[pktio_if]; pktio_if++) {
+ if (!strcmp(pktio_type, _odp_pktio_if_ops[pktio_if]->name)) {
+ if (type_out)
+ strcpy(type_out, pktio_type);
+ /* Some pktio devices expect device names to
+ * begin with pktio type */
+ if (!strcmp(pktio_type, "ipc") ||
+ !strcmp(pktio_type, "null") ||
+ !strcmp(pktio_type, "pcap") ||
+ !strcmp(pktio_type, "tap"))
+ return name;
+
+ return if_name;
+ }
+ }
}
+ return name;
}
static void init_out_queues(pktio_entry_t *entry)
{
int i;
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
- entry->s.out_queue[i].queue = ODP_QUEUE_INVALID;
- entry->s.out_queue[i].pktout = PKTOUT_INVALID;
+ for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++) {
+ entry->out_queue[i].queue = ODP_QUEUE_INVALID;
+ entry->out_queue[i].pktout = PKTOUT_INVALID;
}
}
static void init_pktio_entry(pktio_entry_t *entry)
{
- pktio_cls_enabled_set(entry, 0);
+ int i;
- init_in_queues(entry);
- init_out_queues(entry);
+ /* Clear all flags */
+ entry->enabled.all_flags = 0;
- pktio_classifier_init(entry);
-}
+ entry->tx_compl_pool = ODP_POOL_INVALID;
+ entry->tx_compl_status_shm = ODP_SHM_INVALID;
-static odp_pktio_t alloc_lock_pktio_entry(void)
-{
- pktio_entry_t *entry;
- int i;
+ odp_atomic_init_u64(&entry->stats_extra.in_discards, 0);
+ odp_atomic_init_u64(&entry->stats_extra.in_errors, 0);
+ odp_atomic_init_u64(&entry->stats_extra.out_discards, 0);
+ odp_atomic_init_u64(&entry->tx_ts, 0);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
- entry = &pktio_tbl->entries[i];
- if (is_free(entry)) {
- lock_entry(entry);
- if (is_free(entry)) {
- odp_pktio_t hdl;
-
- entry->s.state = PKTIO_STATE_ACTIVE;
- init_pktio_entry(entry);
- hdl = _odp_cast_scalar(odp_pktio_t, i + 1);
- return hdl; /* return with entry locked! */
- }
- unlock_entry(entry);
- }
+ for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) {
+ entry->in_queue[i].queue = ODP_QUEUE_INVALID;
+ entry->in_queue[i].pktin = PKTIN_INVALID;
}
- return ODP_PKTIO_INVALID;
+ init_out_queues(entry);
+
+ _odp_pktio_classifier_init(entry);
}
static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
@@ -181,53 +295,71 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
{
odp_pktio_t hdl;
pktio_entry_t *pktio_entry;
+ int i, pktio_if;
+ char pktio_type[PKTIO_NAME_LEN];
+ const char *if_name;
+ uint16_t pktin_frame_offset = pktio_global->config.pktin_frame_offset;
int ret = -1;
- int pktio_if;
if (strlen(name) >= PKTIO_NAME_LEN - 1) {
/* ioctl names limitation */
- ODP_ERR("pktio name %s is too big, limit is %d bytes\n",
- name, PKTIO_NAME_LEN - 1);
+ _ODP_ERR("pktio name %s is too long (max: %d chars)\n", name, PKTIO_NAME_LEN - 1);
return ODP_PKTIO_INVALID;
}
- hdl = alloc_lock_pktio_entry();
- if (hdl == ODP_PKTIO_INVALID) {
- ODP_ERR("No resources available.\n");
- return ODP_PKTIO_INVALID;
+ if_name = strip_pktio_type(name, pktio_type);
+
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
+ pktio_entry = &pktio_global->entries[i];
+ if (is_free(pktio_entry)) {
+ lock_entry(pktio_entry);
+ if (is_free(pktio_entry))
+ break;
+
+ unlock_entry(pktio_entry);
+ }
}
- /* if successful, alloc_pktio_entry() returns with the entry locked */
- pktio_entry = get_pktio_entry(hdl);
- if (!pktio_entry)
+ if (i == CONFIG_PKTIO_ENTRIES) {
+ _ODP_ERR("All pktios used already\n");
return ODP_PKTIO_INVALID;
+ }
+
+ /* Entry was found and is now locked */
+ pktio_entry->state = PKTIO_STATE_ACTIVE;
+ hdl = pktio_entry->handle;
- pktio_entry->s.pool = pool;
- memcpy(&pktio_entry->s.param, param, sizeof(odp_pktio_param_t));
- pktio_entry->s.handle = hdl;
+ init_pktio_entry(pktio_entry);
- for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) {
- ret = pktio_if_ops[pktio_if]->open(hdl, pktio_entry, name,
- pool);
+ snprintf(pktio_entry->name, sizeof(pktio_entry->name), "%s", if_name);
+ snprintf(pktio_entry->full_name, sizeof(pktio_entry->full_name), "%s", name);
+ pktio_entry->pool = pool;
+ memcpy(&pktio_entry->param, param, sizeof(odp_pktio_param_t));
+ pktio_entry->pktin_frame_offset = pktin_frame_offset;
- if (!ret) {
- pktio_entry->s.ops = pktio_if_ops[pktio_if];
- ODP_DBG("%s uses %s\n",
- name, pktio_if_ops[pktio_if]->name);
+ odp_pktio_config_init(&pktio_entry->config);
+
+ for (pktio_if = 0; _odp_pktio_if_ops[pktio_if]; ++pktio_if) {
+ /* Only use explicitly defined pktio type */
+ if (strlen(pktio_type) &&
+ strcmp(_odp_pktio_if_ops[pktio_if]->name, pktio_type))
+ continue;
+
+ ret = _odp_pktio_if_ops[pktio_if]->open(hdl, pktio_entry, if_name, pool);
+
+ if (!ret)
break;
- }
}
if (ret != 0) {
- pktio_entry->s.state = PKTIO_STATE_FREE;
- hdl = ODP_PKTIO_INVALID;
- ODP_ERR("Unable to init any I/O type.\n");
- } else {
- snprintf(pktio_entry->s.name,
- sizeof(pktio_entry->s.name), "%s", name);
- pktio_entry->s.state = PKTIO_STATE_OPENED;
+ pktio_entry->state = PKTIO_STATE_FREE;
+ unlock_entry(pktio_entry);
+ _ODP_ERR("Unable to init any I/O type.\n");
+ return ODP_PKTIO_INVALID;
}
+ pktio_entry->state = PKTIO_STATE_OPENED;
+ pktio_entry->ops = _odp_pktio_if_ops[pktio_if];
unlock_entry(pktio_entry);
return hdl;
@@ -246,6 +378,19 @@ static int pool_type_is_packet(odp_pool_t pool)
return pool_info.params.type == ODP_POOL_PACKET;
}
+static const char *driver_name(odp_pktio_t hdl)
+{
+ pktio_entry_t *entry;
+
+ entry = get_pktio_entry(hdl);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
+ return "bad handle";
+ }
+
+ return entry->ops->name;
+}
+
odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool,
const odp_pktio_param_t *param)
{
@@ -257,18 +402,19 @@ odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool,
param = &default_param;
}
- ODP_ASSERT(pool_type_is_packet(pool));
+ _ODP_ASSERT(pool_type_is_packet(pool));
hdl = odp_pktio_lookup(name);
if (hdl != ODP_PKTIO_INVALID) {
- /* interface is already open */
- __odp_errno = EEXIST;
+ _ODP_ERR("pktio device %s already opened\n", name);
return ODP_PKTIO_INVALID;
}
- odp_spinlock_lock(&pktio_tbl->lock);
+ odp_spinlock_lock(&pktio_global->lock);
hdl = setup_pktio_entry(name, pool, param);
- odp_spinlock_unlock(&pktio_tbl->lock);
+ odp_spinlock_unlock(&pktio_global->lock);
+
+ _ODP_DBG("interface: %s, driver: %s\n", name, driver_name(hdl));
return hdl;
}
@@ -276,21 +422,21 @@ odp_pktio_t odp_pktio_open(const char *name, odp_pool_t pool,
static int _pktio_close(pktio_entry_t *entry)
{
int ret;
- int state = entry->s.state;
+ int state = entry->state;
if (state != PKTIO_STATE_OPENED &&
state != PKTIO_STATE_STOPPED &&
state != PKTIO_STATE_STOP_PENDING)
return -1;
- ret = entry->s.ops->close(entry);
+ ret = entry->ops->close(entry);
if (ret)
return -1;
if (state == PKTIO_STATE_STOP_PENDING)
- entry->s.state = PKTIO_STATE_CLOSE_PENDING;
+ entry->state = PKTIO_STATE_CLOSE_PENDING;
else
- entry->s.state = PKTIO_STATE_FREE;
+ entry->state = PKTIO_STATE_FREE;
return 0;
}
@@ -300,9 +446,9 @@ static void destroy_in_queues(pktio_entry_t *entry, int num)
int i;
for (i = 0; i < num; i++) {
- if (entry->s.in_queue[i].queue != ODP_QUEUE_INVALID) {
- odp_queue_destroy(entry->s.in_queue[i].queue);
- entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
+ if (entry->in_queue[i].queue != ODP_QUEUE_INVALID) {
+ odp_queue_destroy(entry->in_queue[i].queue);
+ entry->in_queue[i].queue = ODP_QUEUE_INVALID;
}
}
}
@@ -312,10 +458,10 @@ static void destroy_out_queues(pktio_entry_t *entry, int num)
int i, rc;
for (i = 0; i < num; i++) {
- if (entry->s.out_queue[i].queue != ODP_QUEUE_INVALID) {
- rc = odp_queue_destroy(entry->s.out_queue[i].queue);
- ODP_ASSERT(rc == 0);
- entry->s.out_queue[i].queue = ODP_QUEUE_INVALID;
+ if (entry->out_queue[i].queue != ODP_QUEUE_INVALID) {
+ rc = odp_queue_destroy(entry->out_queue[i].queue);
+ _ODP_ASSERT(rc == 0);
+ entry->out_queue[i].queue = ODP_QUEUE_INVALID;
}
}
}
@@ -327,18 +473,18 @@ static void flush_in_queues(pktio_entry_t *entry)
int max_pkts = 16;
odp_packet_t packets[max_pkts];
- mode = entry->s.param.in_mode;
- num = entry->s.num_in_queue;
+ mode = entry->param.in_mode;
+ num = entry->num_in_queue;
if (mode == ODP_PKTIN_MODE_DIRECT) {
for (i = 0; i < num; i++) {
int ret;
- odp_pktin_queue_t pktin = entry->s.in_queue[i].pktin;
+ odp_pktin_queue_t pktin = entry->in_queue[i].pktin;
while ((ret = odp_pktin_recv(pktin, packets,
max_pkts))) {
if (ret < 0) {
- ODP_ERR("Queue flush failed\n");
+ _ODP_ERR("Queue flush failed\n");
return;
}
@@ -354,31 +500,103 @@ int odp_pktio_close(odp_pktio_t hdl)
int res;
entry = get_pktio_entry(hdl);
- if (entry == NULL)
+ if (entry == NULL) {
+ _ODP_ERR("Bad handle\n");
return -1;
+ }
- if (entry->s.state == PKTIO_STATE_STARTED) {
- ODP_DBG("Missing odp_pktio_stop() before close.\n");
+ if (entry->state == PKTIO_STATE_STARTED) {
+ _ODP_DBG("Missing odp_pktio_stop() before close.\n");
return -1;
}
- if (entry->s.state == PKTIO_STATE_STOPPED)
+ if (entry->state == PKTIO_STATE_STOPPED)
flush_in_queues(entry);
lock_entry(entry);
- destroy_in_queues(entry, entry->s.num_in_queue);
- destroy_out_queues(entry, entry->s.num_out_queue);
+ destroy_in_queues(entry, entry->num_in_queue);
+ destroy_out_queues(entry, entry->num_out_queue);
+
+ entry->num_in_queue = 0;
+ entry->num_out_queue = 0;
+
+ if (entry->tx_compl_pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(entry->tx_compl_pool) == -1) {
+ unlock_entry(entry);
+ _ODP_ERR("Unable to destroy Tx event completion pool\n");
+ return -1;
+ }
+ }
- entry->s.num_in_queue = 0;
- entry->s.num_out_queue = 0;
+ if (entry->tx_compl_status_shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(entry->tx_compl_status_shm) < 0) {
+ unlock_entry(entry);
+ _ODP_ERR("Unable to destroy Tx poll completion SHM\n");
+ return -1;
+ }
+ }
+ odp_spinlock_lock(&pktio_global->lock);
res = _pktio_close(entry);
+ odp_spinlock_unlock(&pktio_global->lock);
if (res)
- ODP_ABORT("unable to close pktio\n");
+ _ODP_ABORT("unable to close pktio\n");
unlock_entry(entry);
+ _ODP_DBG("interface: %s\n", entry->name);
+
+ return 0;
+}
+
+static int configure_tx_event_compl(pktio_entry_t *entry)
+{
+ odp_pool_param_t params;
+ const char *name_base = "_odp_pktio_tx_compl_";
+ char pool_name[ODP_POOL_NAME_LEN];
+
+ if (entry->tx_compl_pool != ODP_POOL_INVALID)
+ return 0;
+
+ snprintf(pool_name, sizeof(pool_name), "%s%d", name_base, odp_pktio_index(entry->handle));
+ odp_pool_param_init(&params);
+
+ params.type = ODP_POOL_BUFFER;
+ params.buf.num = pktio_global->config.tx_compl_pool_size;
+ params.buf.size = sizeof(_odp_pktio_tx_compl_t);
+ entry->tx_compl_pool = odp_pool_create(pool_name, &params);
+
+ if (entry->tx_compl_pool == ODP_POOL_INVALID)
+ return -1;
+
+ return 0;
+}
+
+static int configure_tx_poll_compl(pktio_entry_t *entry, uint32_t count)
+{
+ odp_shm_t shm;
+ const char *name_base = "_odp_pktio_tx_compl_";
+ char shm_name[ODP_SHM_NAME_LEN];
+
+ if (entry->tx_compl_status_shm != ODP_SHM_INVALID)
+ return 0;
+
+ snprintf(shm_name, sizeof(shm_name), "%s%d", name_base, odp_pktio_index(entry->handle));
+ shm = odp_shm_reserve(shm_name, sizeof(odp_atomic_u32_t) * count, ODP_CACHE_LINE_SIZE, 0);
+
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ entry->tx_compl_status_shm = shm;
+ entry->tx_compl_status = odp_shm_addr(shm);
+
+ if (entry->tx_compl_status == NULL)
+ return -1;
+
+ for (uint32_t i = 0; i < count; i++)
+ odp_atomic_init_u32(&entry->tx_compl_status[i], 0);
+
return 0;
}
@@ -390,8 +608,10 @@ int odp_pktio_config(odp_pktio_t hdl, const odp_pktio_config_t *config)
int res = 0;
entry = get_pktio_entry(hdl);
- if (!entry)
+ if (!entry) {
+ _ODP_ERR("Bad handle\n");
return -1;
+ }
if (config == NULL) {
odp_pktio_config_init(&default_config);
@@ -403,30 +623,59 @@ int odp_pktio_config(odp_pktio_t hdl, const odp_pktio_config_t *config)
/* Check config for invalid values */
if (config->pktin.all_bits & ~capa.config.pktin.all_bits) {
- ODP_ERR("Unsupported input configuration option\n");
+ _ODP_ERR("Unsupported input configuration option\n");
return -1;
}
if (config->pktout.all_bits & ~capa.config.pktout.all_bits) {
- ODP_ERR("Unsupported output configuration option\n");
+ _ODP_ERR("Unsupported output configuration option\n");
+ return -1;
+ }
+
+ if (config->enable_loop && !capa.config.enable_loop) {
+ _ODP_ERR("Loopback mode not supported\n");
return -1;
}
- if (config->enable_loop && !capa.loop_supported) {
- ODP_ERR("Loopback mode not supported\n");
+ if (config->flow_control.pause_rx != ODP_PKTIO_LINK_PAUSE_OFF ||
+ config->flow_control.pause_tx != ODP_PKTIO_LINK_PAUSE_OFF) {
+ _ODP_ERR("Link flow control is not supported\n");
return -1;
}
lock_entry(entry);
- if (entry->s.state == PKTIO_STATE_STARTED) {
+ if (entry->state == PKTIO_STATE_STARTED) {
unlock_entry(entry);
- ODP_DBG("pktio %s: not stopped\n", entry->s.name);
+ _ODP_DBG("pktio %s: not stopped\n", entry->name);
return -1;
}
- entry->s.config = *config;
+ entry->config = *config;
- if (entry->s.ops->config)
- res = entry->s.ops->config(entry, config);
+ entry->enabled.tx_ts = config->pktout.bit.ts_ena;
+ entry->enabled.tx_compl = (config->pktout.bit.tx_compl_ena ||
+ config->tx_compl.mode_event ||
+ config->tx_compl.mode_poll);
+
+ if (entry->enabled.tx_compl) {
+ if ((config->pktout.bit.tx_compl_ena || config->tx_compl.mode_event) &&
+ configure_tx_event_compl(entry)) {
+ unlock_entry(entry);
+ _ODP_ERR("Unable to configure Tx event completion\n");
+ return -1;
+ }
+
+ if (config->tx_compl.mode_poll &&
+ configure_tx_poll_compl(entry, config->tx_compl.max_compl_id + 1)) {
+ unlock_entry(entry);
+ _ODP_ERR("Unable to configure Tx poll completion\n");
+ return -1;
+ }
+ }
+
+ entry->enabled.tx_aging = config->pktout.bit.aging_ena;
+
+ if (entry->ops->config)
+ res = entry->ops->config(entry, config);
unlock_entry(entry);
@@ -440,38 +689,61 @@ int odp_pktio_start(odp_pktio_t hdl)
int res = 0;
entry = get_pktio_entry(hdl);
- if (!entry)
+ if (!entry) {
+ _ODP_ERR("Bad handle\n");
return -1;
+ }
lock_entry(entry);
- if (entry->s.state == PKTIO_STATE_STARTED) {
+ if (entry->state == PKTIO_STATE_STARTED) {
unlock_entry(entry);
+ _ODP_ERR("Already started\n");
return -1;
}
- if (entry->s.ops->start)
- res = entry->s.ops->start(entry);
+
+ if (entry->state == PKTIO_STATE_STOP_PENDING) {
+ unlock_entry(entry);
+ _ODP_ERR("Scheduled pktio stop pending\n");
+ return -1;
+ }
+
+ entry->parse_layer = pktio_cls_enabled(entry) ?
+ ODP_PROTO_LAYER_ALL :
+ entry->config.parser.layer;
+ if (entry->ops->start)
+ res = entry->ops->start(entry);
if (!res)
- entry->s.state = PKTIO_STATE_STARTED;
+ entry->state = PKTIO_STATE_STARTED;
unlock_entry(entry);
- mode = entry->s.param.in_mode;
+ mode = entry->param.in_mode;
if (mode == ODP_PKTIN_MODE_SCHED) {
- unsigned i;
- unsigned num = entry->s.num_in_queue;
+ uint32_t i;
+ uint32_t num = entry->num_in_queue;
int index[num];
+ odp_queue_t odpq[num];
for (i = 0; i < num; i++) {
index[i] = i;
+ odpq[i] = entry->in_queue[i].queue;
- if (entry->s.in_queue[i].queue == ODP_QUEUE_INVALID) {
- ODP_ERR("No input queue\n");
+ if (entry->in_queue[i].queue == ODP_QUEUE_INVALID) {
+ _ODP_ERR("No input queue\n");
return -1;
}
}
- sched_fn->pktio_start(pktio_to_id(hdl), num, index);
+ _odp_sched_fn->pktio_start(odp_pktio_index(hdl), num, index, odpq);
+ }
+
+ _ODP_DBG("interface: %s, input queues: %u, output queues: %u\n",
+ entry->name, entry->num_in_queue, entry->num_out_queue);
+
+ if (_ODP_PCAPNG) {
+ if (_odp_pcapng_start(entry))
+ _ODP_ERR("pcapng start failed, won't capture\n");
}
return res;
@@ -480,21 +752,26 @@ int odp_pktio_start(odp_pktio_t hdl)
static int _pktio_stop(pktio_entry_t *entry)
{
int res = 0;
- odp_pktin_mode_t mode = entry->s.param.in_mode;
+ odp_pktin_mode_t mode = entry->param.in_mode;
- if (entry->s.state != PKTIO_STATE_STARTED)
+ if (entry->state != PKTIO_STATE_STARTED) {
+ _ODP_ERR("Not started\n");
return -1;
+ }
- if (entry->s.ops->stop)
- res = entry->s.ops->stop(entry);
+ if (entry->ops->stop)
+ res = entry->ops->stop(entry);
if (res)
return -1;
if (mode == ODP_PKTIN_MODE_SCHED)
- entry->s.state = PKTIO_STATE_STOP_PENDING;
+ entry->state = PKTIO_STATE_STOP_PENDING;
else
- entry->s.state = PKTIO_STATE_STOPPED;
+ entry->state = PKTIO_STATE_STOPPED;
+
+ if (_ODP_PCAPNG)
+ _odp_pcapng_stop(entry);
return res;
}
@@ -505,13 +782,17 @@ int odp_pktio_stop(odp_pktio_t hdl)
int res;
entry = get_pktio_entry(hdl);
- if (!entry)
+ if (!entry) {
+ _ODP_ERR("Bad handle\n");
return -1;
+ }
lock_entry(entry);
res = _pktio_stop(entry);
unlock_entry(entry);
+ _ODP_DBG("interface: %s\n", entry->name);
+
return res;
}
@@ -519,19 +800,22 @@ odp_pktio_t odp_pktio_lookup(const char *name)
{
odp_pktio_t hdl = ODP_PKTIO_INVALID;
pktio_entry_t *entry;
+ const char *ifname;
int i;
- odp_spinlock_lock(&pktio_tbl->lock);
+ ifname = strip_pktio_type(name, NULL);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ odp_spinlock_lock(&pktio_global->lock);
+
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
entry = pktio_entry_by_index(i);
if (!entry || is_free(entry))
continue;
lock_entry(entry);
- if (entry->s.state >= PKTIO_STATE_ACTIVE &&
- strncmp(entry->s.name, name, sizeof(entry->s.name)) == 0)
+ if (entry->state >= PKTIO_STATE_ACTIVE &&
+ strncmp(entry->name, ifname, sizeof(entry->name)) == 0)
hdl = _odp_cast_scalar(odp_pktio_t, i + 1);
unlock_entry(entry);
@@ -540,228 +824,237 @@ odp_pktio_t odp_pktio_lookup(const char *name)
break;
}
- odp_spinlock_unlock(&pktio_tbl->lock);
+ odp_spinlock_unlock(&pktio_global->lock);
return hdl;
}
-static inline int pktin_recv_buf(odp_pktin_queue_t queue,
- odp_buffer_hdr_t *buffer_hdrs[], int num)
+static inline odp_packet_vector_t packet_vector_create(odp_packet_t packets[], uint32_t num,
+ odp_pool_t pool)
{
- odp_packet_t pkt;
- odp_packet_t packets[num];
- odp_packet_hdr_t *pkt_hdr;
- odp_buffer_hdr_t *buf_hdr;
- odp_buffer_t buf;
- int i;
- int pkts;
- int num_rx = 0;
-
- pkts = odp_pktin_recv(queue, packets, num);
+ odp_packet_vector_t pktv;
+ odp_packet_t *pkt_tbl;
+ uint32_t i;
- for (i = 0; i < pkts; i++) {
- pkt = packets[i];
- pkt_hdr = odp_packet_hdr(pkt);
- buf = _odp_packet_to_buffer(pkt);
- buf_hdr = buf_hdl_to_hdr(buf);
+ pktv = odp_packet_vector_alloc(pool);
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID)) {
+ odp_packet_free_multi(packets, num);
+ return ODP_PACKET_VECTOR_INVALID;
+ }
- if (pkt_hdr->p.input_flags.dst_queue) {
- queue_entry_t *dst_queue;
- int ret;
+ odp_packet_vector_tbl(pktv, &pkt_tbl);
+ for (i = 0; i < num; i++)
+ pkt_tbl[i] = packets[i];
+ odp_packet_vector_size_set(pktv, num);
- dst_queue = queue_to_qentry(pkt_hdr->dst_queue);
- ret = queue_enq(dst_queue, buf_hdr);
- if (ret < 0)
- odp_packet_free(pkt);
- continue;
- }
- buffer_hdrs[num_rx++] = buf_hdr;
- }
- return num_rx;
+ return pktv;
}
-int pktout_enqueue(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr)
+static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
+ _odp_event_hdr_t *event_hdrs[], int num)
{
- odp_packet_t pkt = _odp_packet_from_buffer(buf_hdr->handle.handle);
- int len = 1;
- int nbr;
+ const odp_bool_t vector_enabled = entry->in_queue[pktin_index].vector.enable;
+ odp_pool_t pool = ODP_POOL_INVALID;
+ int num_rx;
- nbr = odp_pktout_send(qentry->s.pktout, &pkt, len);
- return (nbr == len ? 0 : -1);
-}
+ if (vector_enabled) {
+ /* Make sure all packets will fit into a single packet vector */
+ if ((int)entry->in_queue[pktin_index].vector.max_size < num)
+ num = entry->in_queue[pktin_index].vector.max_size;
+ pool = entry->in_queue[pktin_index].vector.pool;
+ }
-odp_buffer_hdr_t *pktout_dequeue(queue_entry_t *qentry ODP_UNUSED)
-{
- ODP_ABORT("attempted dequeue from a pktout queue");
- return NULL;
+ num_rx = entry->ops->recv(entry, pktin_index, (odp_packet_t *)event_hdrs, num);
+
+ if (!vector_enabled || num_rx < 2)
+ return num_rx;
+
+ odp_packet_vector_t pktv = packet_vector_create((odp_packet_t *)event_hdrs, num_rx, pool);
+
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ return 0;
+
+ event_hdrs[0] = _odp_packet_vector_to_event_hdr(pktv);
+ return 1;
}
-int pktout_enq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[],
- int num)
+static int pktout_enqueue(odp_queue_t queue, _odp_event_hdr_t *event_hdr)
{
- odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
+ odp_packet_t pkt = packet_from_event_hdr(event_hdr);
int nbr;
- int i;
- for (i = 0; i < num; ++i)
- pkt_tbl[i] = _odp_packet_from_buffer(buf_hdr[i]->handle.handle);
+ _ODP_ASSERT(odp_event_type(_odp_event_from_hdr(event_hdr)) == ODP_EVENT_PACKET);
- nbr = odp_pktout_send(qentry->s.pktout, pkt_tbl, num);
- return nbr;
-}
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)event_hdr, 1, &nbr))
+ return (nbr == 1 ? 0 : -1);
-int pktout_deq_multi(queue_entry_t *qentry ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED,
- int num ODP_UNUSED)
-{
- ODP_ABORT("attempted dequeue from a pktout queue");
- return 0;
+ nbr = odp_pktout_send(_odp_queue_fn->get_pktout(queue), &pkt, 1);
+ return (nbr == 1 ? 0 : -1);
}
-int pktin_enqueue(queue_entry_t *qentry ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr ODP_UNUSED)
+static int pktout_enq_multi(odp_queue_t queue, _odp_event_hdr_t *event_hdr[],
+ int num)
{
- ODP_ABORT("attempted enqueue to a pktin queue");
- return -1;
+ int nbr;
+
+ if (ODP_DEBUG) {
+ for (int i = 0; i < num; i++)
+ _ODP_ASSERT(odp_event_type(_odp_event_from_hdr(event_hdr[i])) ==
+ ODP_EVENT_PACKET);
+ }
+
+ if (_odp_sched_fn->ord_enq_multi(queue, (void **)event_hdr, num, &nbr))
+ return nbr;
+
+ return odp_pktout_send(_odp_queue_fn->get_pktout(queue), (odp_packet_t *)event_hdr, num);
}
-odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *qentry)
+static _odp_event_hdr_t *pktin_dequeue(odp_queue_t queue)
{
- odp_buffer_hdr_t *buf_hdr;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *event_hdr;
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts;
+ odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
+ odp_pktio_t pktio = pktin_queue.pktio;
+ int pktin_index = pktin_queue.index;
+ pktio_entry_t *entry = get_pktio_entry(pktio);
- buf_hdr = queue_deq(qentry);
- if (buf_hdr != NULL)
- return buf_hdr;
+ _ODP_ASSERT(entry != NULL);
- pkts = pktin_recv_buf(qentry->s.pktin, hdr_tbl, QUEUE_MULTI_MAX);
+ if (_odp_queue_fn->orig_deq_multi(queue, &event_hdr, 1) == 1)
+ return event_hdr;
+
+ if (odp_unlikely(entry->state != PKTIO_STATE_STARTED))
+ return 0;
+
+ pkts = pktin_recv_buf(entry, pktin_index, hdr_tbl, QUEUE_MULTI_MAX);
if (pkts <= 0)
return NULL;
- if (pkts > 1)
- queue_enq_multi(qentry, &hdr_tbl[1], pkts - 1);
- buf_hdr = hdr_tbl[0];
- return buf_hdr;
-}
+ if (pkts > 1) {
+ int num_enq;
+ int num = pkts - 1;
-int pktin_enq_multi(queue_entry_t *qentry ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED, int num ODP_UNUSED)
-{
- ODP_ABORT("attempted enqueue to a pktin queue");
- return 0;
+ num_enq = odp_queue_enq_multi(queue,
+ (odp_event_t *)&hdr_tbl[1], num);
+
+ if (odp_unlikely(num_enq < num)) {
+ if (odp_unlikely(num_enq < 0))
+ num_enq = 0;
+
+ _ODP_DBG("Interface %s dropped %i packets\n", entry->name, num - num_enq);
+ _odp_event_free_multi(&hdr_tbl[num_enq + 1], num - num_enq);
+ }
+ }
+
+ event_hdr = hdr_tbl[0];
+ return event_hdr;
}
-int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
+static int pktin_deq_multi(odp_queue_t queue, _odp_event_hdr_t *event_hdr[],
+ int num)
{
int nbr;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts, i, j;
+ odp_pktin_queue_t pktin_queue = _odp_queue_fn->get_pktin(queue);
+ odp_pktio_t pktio = pktin_queue.pktio;
+ int pktin_index = pktin_queue.index;
+ pktio_entry_t *entry = get_pktio_entry(pktio);
+
+ _ODP_ASSERT(entry != NULL);
- nbr = queue_deq_multi(qentry, buf_hdr, num);
+ nbr = _odp_queue_fn->orig_deq_multi(queue, event_hdr, num);
if (odp_unlikely(nbr > num))
- ODP_ABORT("queue_deq_multi req: %d, returned %d\n", num, nbr);
+ _ODP_ABORT("queue_deq_multi req: %d, returned %d\n", num, nbr);
- /** queue already has number of requsted buffers,
+ /** queue already has number of requested buffers,
* do not do receive in that case.
*/
- if (nbr == num)
+ if (nbr == num || odp_unlikely(entry->state != PKTIO_STATE_STARTED))
return nbr;
- pkts = pktin_recv_buf(qentry->s.pktin, hdr_tbl, QUEUE_MULTI_MAX);
+ pkts = pktin_recv_buf(entry, pktin_index, hdr_tbl, QUEUE_MULTI_MAX);
+
if (pkts <= 0)
return nbr;
for (i = 0; i < pkts && nbr < num; i++, nbr++)
- buf_hdr[nbr] = hdr_tbl[i];
+ event_hdr[nbr] = hdr_tbl[i];
/* Queue the rest for later */
for (j = 0; i < pkts; i++, j++)
hdr_tbl[j] = hdr_tbl[i];
- if (j)
- queue_enq_multi(qentry, hdr_tbl, j);
+ if (j) {
+ int num_enq;
+
+ num_enq = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl, j);
+
+ if (odp_unlikely(num_enq < j)) {
+ if (odp_unlikely(num_enq < 0))
+ num_enq = 0;
+
+ _ODP_DBG("Interface %s dropped %i packets\n",
+ entry->name, j - num_enq);
+ _odp_event_free_multi(&event_hdr[num_enq], j - num_enq);
+ }
+ }
+
return nbr;
}
-int sched_cb_pktin_poll(int pktio_index, int num_queue, int index[])
+int _odp_sched_cb_pktin_poll(int pktio_index, int pktin_index,
+ _odp_event_hdr_t *hdr_tbl[], int num)
{
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- int num, idx;
- pktio_entry_t *entry;
- entry = pktio_entry_by_index(pktio_index);
- int state = entry->s.state;
+ pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
+ int state = entry->state;
if (odp_unlikely(state != PKTIO_STATE_STARTED)) {
if (state < PKTIO_STATE_ACTIVE ||
state == PKTIO_STATE_STOP_PENDING)
return -1;
- ODP_DBG("interface not started\n");
+ _ODP_DBG("Interface %s not started\n", entry->name);
return 0;
}
- for (idx = 0; idx < num_queue; idx++) {
- queue_entry_t *qentry;
- odp_queue_t queue;
- odp_pktin_queue_t pktin = entry->s.in_queue[index[idx]].pktin;
-
- num = pktin_recv_buf(pktin, hdr_tbl, QUEUE_MULTI_MAX);
-
- if (num == 0)
- continue;
-
- if (num < 0) {
- ODP_ERR("Packet recv error\n");
- return -1;
- }
-
- queue = entry->s.in_queue[index[idx]].queue;
- qentry = queue_to_qentry(queue);
- queue_enq_multi(qentry, hdr_tbl, num);
- }
-
- return 0;
+ return pktin_recv_buf(entry, pktin_index, hdr_tbl, num);
}
-void sched_cb_pktio_stop_finalize(int pktio_index)
+void _odp_sched_cb_pktio_stop_finalize(int pktio_index)
{
int state;
pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
lock_entry(entry);
- state = entry->s.state;
+ state = entry->state;
if (state != PKTIO_STATE_STOP_PENDING &&
state != PKTIO_STATE_CLOSE_PENDING) {
unlock_entry(entry);
- ODP_ERR("Not in a pending state %i\n", state);
+ _ODP_ERR("Not in a pending state %i\n", state);
return;
}
if (state == PKTIO_STATE_STOP_PENDING)
- entry->s.state = PKTIO_STATE_STOPPED;
+ entry->state = PKTIO_STATE_STOPPED;
else
- entry->s.state = PKTIO_STATE_FREE;
+ entry->state = PKTIO_STATE_FREE;
unlock_entry(entry);
}
-int sched_cb_num_pktio(void)
-{
- return ODP_CONFIG_PKTIO_ENTRIES;
-}
-
-uint32_t odp_pktio_mtu(odp_pktio_t hdl)
+static inline uint32_t pktio_maxlen(odp_pktio_t hdl)
{
pktio_entry_t *entry;
uint32_t ret = 0;
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return 0;
}
@@ -769,13 +1062,89 @@ uint32_t odp_pktio_mtu(odp_pktio_t hdl)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return 0;
}
- if (entry->s.ops->mtu_get)
- ret = entry->s.ops->mtu_get(entry);
+ if (entry->ops->maxlen_get)
+ ret = entry->ops->maxlen_get(entry);
+
+ unlock_entry(entry);
+ return ret;
+}
+
+uint32_t odp_pktin_maxlen(odp_pktio_t pktio)
+{
+ return pktio_maxlen(pktio);
+}
+
+uint32_t odp_pktout_maxlen(odp_pktio_t pktio)
+{
+ return pktio_maxlen(pktio);
+}
+
+int odp_pktio_maxlen_set(odp_pktio_t hdl, uint32_t maxlen_input,
+ uint32_t maxlen_output)
+{
+ odp_pktio_capability_t capa;
+ pktio_entry_t *entry;
+ int ret = 0;
+
+ entry = get_pktio_entry(hdl);
+ if (entry == NULL) {
+ _ODP_ERR("Pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
+ return -1;
+ }
+
+ ret = odp_pktio_capability(hdl, &capa);
+ if (ret) {
+ _ODP_ERR("Reading pktio capability failed\n");
+ goto fail;
+ }
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ _ODP_ERR("Pktio already freed\n");
+ ret = -1;
+ goto fail;
+ }
+ if (entry->state == PKTIO_STATE_STARTED) {
+ _ODP_ERR("Pktio not stopped\n");
+ ret = -1;
+ goto fail;
+ }
+
+ if (capa.set_op.op.maxlen == 0) {
+ _ODP_ERR("Setting maximum frame length not supported\n");
+ ret = -1;
+ goto fail;
+ }
+
+ if (capa.maxlen.equal && (maxlen_input != maxlen_output)) {
+ _ODP_ERR("Max input and output lengths don't match\n");
+ ret = -1;
+ goto fail;
+ }
+
+ if (maxlen_input < capa.maxlen.min_input ||
+ maxlen_input > capa.maxlen.max_input) {
+ _ODP_ERR("Invalid max input length value: %" PRIu32 "\n", maxlen_input);
+ ret = -1;
+ goto fail;
+ }
+
+ if (maxlen_output < capa.maxlen.min_output ||
+ maxlen_output > capa.maxlen.max_output) {
+ _ODP_ERR("Invalid max output length value: %" PRIu32 "\n", maxlen_output);
+ ret = -1;
+ goto fail;
+ }
+
+ if (entry->ops->maxlen_set)
+ ret = entry->ops->maxlen_set(entry, maxlen_input, maxlen_output);
+
+fail:
unlock_entry(entry);
return ret;
}
@@ -787,7 +1156,7 @@ int odp_pktio_promisc_mode_set(odp_pktio_t hdl, odp_bool_t enable)
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
}
@@ -795,16 +1164,16 @@ int odp_pktio_promisc_mode_set(odp_pktio_t hdl, odp_bool_t enable)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return -1;
}
- if (entry->s.state == PKTIO_STATE_STARTED) {
+ if (entry->state == PKTIO_STATE_STARTED) {
unlock_entry(entry);
return -1;
}
- if (entry->s.ops->promisc_mode_set)
- ret = entry->s.ops->promisc_mode_set(entry, enable);
+ if (entry->ops->promisc_mode_set)
+ ret = entry->ops->promisc_mode_set(entry, enable);
unlock_entry(entry);
return ret;
@@ -817,7 +1186,7 @@ int odp_pktio_promisc_mode(odp_pktio_t hdl)
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
}
@@ -825,12 +1194,12 @@ int odp_pktio_promisc_mode(odp_pktio_t hdl)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return -1;
}
- if (entry->s.ops->promisc_mode_get)
- ret = entry->s.ops->promisc_mode_get(entry);
+ if (entry->ops->promisc_mode_get)
+ ret = entry->ops->promisc_mode_get(entry);
unlock_entry(entry);
return ret;
@@ -848,7 +1217,7 @@ int odp_pktio_mac_addr(odp_pktio_t hdl, void *mac_addr, int addr_size)
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
}
@@ -856,14 +1225,14 @@ int odp_pktio_mac_addr(odp_pktio_t hdl, void *mac_addr, int addr_size)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return -1;
}
- if (entry->s.ops->mac_get) {
- ret = entry->s.ops->mac_get(entry, mac_addr);
+ if (entry->ops->mac_get) {
+ ret = entry->ops->mac_get(entry, mac_addr);
} else {
- ODP_DBG("pktio does not support mac addr get\n");
+ _ODP_DBG("pktio does not support mac addr get\n");
ret = -1;
}
unlock_entry(entry);
@@ -871,14 +1240,19 @@ int odp_pktio_mac_addr(odp_pktio_t hdl, void *mac_addr, int addr_size)
return ret;
}
-int odp_pktio_link_status(odp_pktio_t hdl)
+int odp_pktio_mac_addr_set(odp_pktio_t hdl, const void *mac_addr, int addr_size)
{
pktio_entry_t *entry;
int ret = -1;
+ if (addr_size < ETH_ALEN) {
+ /* Input buffer too small */
+ return -1;
+ }
+
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
}
@@ -886,12 +1260,43 @@ int odp_pktio_link_status(odp_pktio_t hdl)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return -1;
}
- if (entry->s.ops->link_status)
- ret = entry->s.ops->link_status(entry);
+ if (entry->state == PKTIO_STATE_STARTED) {
+ unlock_entry(entry);
+ return -1;
+ }
+
+ if (entry->ops->mac_set)
+ ret = entry->ops->mac_set(entry, mac_addr);
+
+ unlock_entry(entry);
+ return ret;
+}
+
+odp_pktio_link_status_t odp_pktio_link_status(odp_pktio_t hdl)
+{
+ pktio_entry_t *entry;
+ int ret = ODP_PKTIO_LINK_STATUS_UNKNOWN;
+
+ entry = get_pktio_entry(hdl);
+ if (entry == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
+ return ODP_PKTIO_LINK_STATUS_UNKNOWN;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_DBG("already freed pktio\n");
+ return ODP_PKTIO_LINK_STATUS_UNKNOWN;
+ }
+
+ if (entry->ops->link_status)
+ ret = entry->ops->link_status(entry);
unlock_entry(entry);
return ret;
@@ -923,6 +1328,11 @@ void odp_pktout_queue_param_init(odp_pktout_queue_param_t *param)
void odp_pktio_config_init(odp_pktio_config_t *config)
{
memset(config, 0, sizeof(odp_pktio_config_t));
+
+ config->parser.layer = ODP_PROTO_LAYER_ALL;
+ config->reassembly.max_num_frags = 2;
+ config->flow_control.pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ config->flow_control.pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
}
int odp_pktio_info(odp_pktio_t hdl, odp_pktio_info_t *info)
@@ -932,63 +1342,93 @@ int odp_pktio_info(odp_pktio_t hdl, odp_pktio_info_t *info)
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
}
memset(info, 0, sizeof(odp_pktio_info_t));
- info->name = entry->s.name;
- info->drv_name = entry->s.ops->name;
- info->pool = entry->s.pool;
- memcpy(&info->param, &entry->s.param, sizeof(odp_pktio_param_t));
+ info->name = entry->full_name;
+ info->drv_name = entry->ops->name;
+ info->pool = entry->pool;
+ memcpy(&info->param, &entry->param, sizeof(odp_pktio_param_t));
return 0;
}
-int odp_pktio_index(odp_pktio_t pktio)
+int odp_pktio_link_info(odp_pktio_t hdl, odp_pktio_link_info_t *info)
{
- pktio_entry_t *entry = get_pktio_entry(pktio);
+ pktio_entry_t *entry;
+
+ entry = get_pktio_entry(hdl);
- if (!entry || is_free(entry))
+ if (entry == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return -1;
+ }
+
+ if (entry->ops->link_info)
+ return entry->ops->link_info(entry, info);
- return pktio_to_id(pktio);
+ return -1;
}
-uint64_t odp_pktin_ts_res(odp_pktio_t hdl)
+uint64_t odp_pktio_ts_res(odp_pktio_t hdl)
{
pktio_entry_t *entry;
entry = get_pktio_entry(hdl);
-
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return 0;
}
- if (entry->s.ops->pktin_ts_res)
- return entry->s.ops->pktin_ts_res(entry);
+ if (entry->ops->pktio_ts_res)
+ return entry->ops->pktio_ts_res(entry);
return odp_time_global_res();
}
-odp_time_t odp_pktin_ts_from_ns(odp_pktio_t hdl, uint64_t ns)
+odp_time_t odp_pktio_ts_from_ns(odp_pktio_t hdl, uint64_t ns)
{
pktio_entry_t *entry;
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return ODP_TIME_NULL;
}
- if (entry->s.ops->pktin_ts_from_ns)
- return entry->s.ops->pktin_ts_from_ns(entry, ns);
+ if (entry->ops->pktio_ts_from_ns)
+ return entry->ops->pktio_ts_from_ns(entry, ns);
return odp_time_global_from_ns(ns);
}
+odp_time_t odp_pktio_time(odp_pktio_t hdl, odp_time_t *global_ts)
+{
+ pktio_entry_t *entry;
+ odp_time_t ts;
+
+ entry = get_pktio_entry(hdl);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
+ return ODP_TIME_NULL;
+ }
+
+ /* Callback if present */
+ if (entry->ops->pktio_time)
+ return entry->ops->pktio_time(entry, global_ts);
+
+ /* By default both Packet IO time source and
+ * global time source are same.
+ */
+ ts = odp_time_global();
+ if (global_ts)
+ *global_ts = ts;
+ return ts;
+}
+
void odp_pktio_print(odp_pktio_t hdl)
{
pktio_entry_t *entry;
@@ -1001,35 +1441,42 @@ void odp_pktio_print(odp_pktio_t hdl)
entry = get_pktio_entry(hdl);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", hdl);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
return;
}
+ len += snprintf(&str[len], n - len, "Pktio info\n----------\n");
len += snprintf(&str[len], n - len,
- "pktio\n");
+ " name %s\n", entry->name);
len += snprintf(&str[len], n - len,
- " handle %" PRIu64 "\n",
- odp_pktio_to_u64(hdl));
+ " type %s\n", entry->ops->name);
+ len += snprintf(&str[len], n - len,
+ " index %i\n", odp_pktio_index(hdl));
len += snprintf(&str[len], n - len,
- " name %s\n", entry->s.name);
+ " handle 0x%" PRIx64 "\n",
+ odp_pktio_to_u64(hdl));
len += snprintf(&str[len], n - len,
- " type %s\n", entry->s.ops->name);
+ " pool handle 0x%" PRIx64 "\n",
+ odp_pool_to_u64(entry->pool));
len += snprintf(&str[len], n - len,
" state %s\n",
- entry->s.state == PKTIO_STATE_STARTED ? "start" :
- (entry->s.state == PKTIO_STATE_STOPPED ? "stop" :
- (entry->s.state == PKTIO_STATE_STOP_PENDING ?
+ entry->state == PKTIO_STATE_STARTED ? "start" :
+ (entry->state == PKTIO_STATE_STOPPED ? "stop" :
+ (entry->state == PKTIO_STATE_STOP_PENDING ?
"stop pending" :
- (entry->s.state == PKTIO_STATE_OPENED ? "opened" :
- "unknown"))));
+ (entry->state == PKTIO_STATE_OPENED ? "opened" :
+ "unknown"))));
memset(addr, 0, sizeof(addr));
odp_pktio_mac_addr(hdl, addr, ETH_ALEN);
len += snprintf(&str[len], n - len,
" mac %02x:%02x:%02x:%02x:%02x:%02x\n",
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
len += snprintf(&str[len], n - len,
- " mtu %" PRIu32 "\n",
- odp_pktio_mtu(hdl));
+ " pktin maxlen %" PRIu32 "\n",
+ odp_pktin_maxlen(hdl));
+ len += snprintf(&str[len], n - len,
+ " pktout maxlen %" PRIu32 "\n",
+ odp_pktout_maxlen(hdl));
len += snprintf(&str[len], n - len,
" promisc %s\n",
odp_pktio_promisc_mode(hdl) ? "yes" : "no");
@@ -1043,54 +1490,61 @@ void odp_pktio_print(odp_pktio_t hdl)
str[len] = '\0';
- ODP_PRINT("\n%s", str);
+ _ODP_PRINT("\n%s", str);
- if (entry->s.ops->print)
- entry->s.ops->print(entry);
+ if (entry->ops->print)
+ entry->ops->print(entry);
- ODP_PRINT("\n");
+ _ODP_PRINT("\n");
}
-int odp_pktio_term_global(void)
+int _odp_pktio_term_global(void)
{
+ odp_shm_t shm;
+ int i, pktio_if;
int ret = 0;
- int i;
- int pktio_if;
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ if (pktio_global == NULL)
+ return 0;
+
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
pktio_entry_t *pktio_entry;
- pktio_entry = &pktio_tbl->entries[i];
+ pktio_entry = &pktio_global->entries[i];
if (is_free(pktio_entry))
continue;
lock_entry(pktio_entry);
- if (pktio_entry->s.state == PKTIO_STATE_STARTED) {
+ if (pktio_entry->state == PKTIO_STATE_STARTED) {
ret = _pktio_stop(pktio_entry);
if (ret)
- ODP_ABORT("unable to stop pktio %s\n",
- pktio_entry->s.name);
+ _ODP_ABORT("unable to stop pktio %s\n", pktio_entry->name);
}
- if (pktio_entry->s.state != PKTIO_STATE_CLOSE_PENDING)
+ if (pktio_entry->state != PKTIO_STATE_CLOSE_PENDING)
ret = _pktio_close(pktio_entry);
if (ret)
- ODP_ABORT("unable to close pktio %s\n",
- pktio_entry->s.name);
+ _ODP_ABORT("unable to close pktio %s\n", pktio_entry->name);
unlock_entry(pktio_entry);
}
- for (pktio_if = 0; pktio_if_ops[pktio_if]; ++pktio_if) {
- if (pktio_if_ops[pktio_if]->term)
- if (pktio_if_ops[pktio_if]->term())
- ODP_ABORT("failed to terminate pktio type %d",
- pktio_if);
+ for (pktio_if = 0; _odp_pktio_if_ops[pktio_if]; ++pktio_if) {
+ if (_odp_pktio_if_ops[pktio_if]->term)
+ if (_odp_pktio_if_ops[pktio_if]->term())
+ _ODP_ABORT("failed to terminate pktio type %d", pktio_if);
+ }
+
+ if (_ODP_PCAPNG) {
+ ret = _odp_pcapng_term_global();
+ if (ret)
+ _ODP_ERR("Failed to terminate pcapng\n");
}
- ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries"));
+ shm = pktio_global->shm;
+ ret = odp_shm_free(shm);
if (ret != 0)
- ODP_ERR("shm free failed for odp_pktio_entries");
+ _ODP_ERR("shm free failed\n");
return ret;
}
@@ -1098,22 +1552,84 @@ int odp_pktio_term_global(void)
int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
{
pktio_entry_t *entry;
+ int ret;
+ uint32_t mtu;
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- if (entry->s.ops->capability)
- return entry->s.ops->capability(entry, capa);
+ ret = entry->ops->capability(entry, capa);
+
+ if (ret) {
+ _ODP_ERR("Driver specific capa query failed: %s\n", entry->name);
+ return -1;
+ }
+
+ mtu = pktio_maxlen(pktio);
+
+ if (mtu == 0) {
+ _ODP_ERR("MTU query failed: %s\n", entry->name);
+ return -1;
+ }
+
+ /* The same parser is used for all pktios */
+ capa->config.parser.layer = ODP_PROTO_LAYER_ALL;
+ /* Header skip is not supported */
+ capa->set_op.op.skip_offset = 0;
+ /* Irrespective of whether we optimize the fast path or not,
+ * we can report that it is supported.
+ */
+ capa->config.pktout.bit.no_packet_refs = 1;
+
+ /* LSO implementation is common to all pktios */
+ capa->lso.max_profiles = PKTIO_LSO_PROFILES;
+ capa->lso.max_profiles_per_pktio = PKTIO_LSO_PROFILES;
+ capa->lso.max_packet_segments = PKT_MAX_SEGS;
+ capa->lso.max_segments = PKTIO_LSO_MAX_SEGMENTS;
+ capa->lso.max_payload_len = mtu - PKTIO_LSO_MIN_PAYLOAD_OFFSET;
+ capa->lso.max_payload_offset = PKTIO_LSO_MAX_PAYLOAD_OFFSET;
+ capa->lso.max_num_custom = ODP_LSO_MAX_CUSTOM;
+ capa->lso.proto.ipv4 = 1;
+ capa->lso.proto.custom = 1;
+ capa->lso.mod_op.add_segment_num = 1;
+
+ capa->tx_compl.queue_type_sched = 1;
+ capa->tx_compl.queue_type_plain = 1;
+ capa->tx_compl.max_compl_id = UINT32_MAX - 1;
+
+ capa->config.pktout.bit.aging_ena = 1;
+ capa->max_tx_aging_tmo_ns = MAX_TX_AGING_TMO_NS;
+
+ /* Packet vector generation is common for all pktio types */
+ if (entry->param.in_mode == ODP_PKTIN_MODE_QUEUE ||
+ entry->param.in_mode == ODP_PKTIN_MODE_SCHED) {
+ capa->vector.supported = ODP_SUPPORT_YES;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.min_size = 1;
+ capa->vector.max_tmo_ns = 0;
+ capa->vector.min_tmo_ns = 0;
+ }
+
+ capa->reassembly.ip = false;
+ capa->reassembly.ipv4 = false;
+ capa->reassembly.ipv6 = false;
+ capa->flow_control.pause_rx = 0;
+ capa->flow_control.pfc_rx = 0;
+ capa->flow_control.pause_tx = 0;
+ capa->flow_control.pfc_tx = 0;
- return single_capability(capa);
+ return 0;
}
-unsigned odp_pktio_max_index(void)
+ODP_STATIC_ASSERT(CONFIG_PKTIO_ENTRIES - 1 <= ODP_PKTIO_MAX_INDEX,
+ "CONFIG_PKTIO_ENTRIES larger than ODP_PKTIO_MAX_INDEX");
+
+unsigned int odp_pktio_max_index(void)
{
- return ODP_CONFIG_PKTIO_ENTRIES - 1;
+ return CONFIG_PKTIO_ENTRIES - 1;
}
int odp_pktio_stats(odp_pktio_t pktio,
@@ -1124,7 +1640,7 @@ int odp_pktio_stats(odp_pktio_t pktio,
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
@@ -1132,12 +1648,17 @@ int odp_pktio_stats(odp_pktio_t pktio,
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_DBG("already freed pktio\n");
return -1;
}
- if (entry->s.ops->stats)
- ret = entry->s.ops->stats(entry, stats);
+ if (entry->ops->stats)
+ ret = entry->ops->stats(entry, stats);
+ if (odp_likely(ret == 0)) {
+ stats->in_discards += odp_atomic_load_u64(&entry->stats_extra.in_discards);
+ stats->in_errors += odp_atomic_load_u64(&entry->stats_extra.in_errors);
+ stats->out_discards += odp_atomic_load_u64(&entry->stats_extra.out_discards);
+ }
unlock_entry(entry);
return ret;
@@ -1150,7 +1671,241 @@ int odp_pktio_stats_reset(odp_pktio_t pktio)
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_DBG("already freed pktio\n");
+ return -1;
+ }
+
+ odp_atomic_store_u64(&entry->stats_extra.in_discards, 0);
+ odp_atomic_store_u64(&entry->stats_extra.in_errors, 0);
+ odp_atomic_store_u64(&entry->stats_extra.out_discards, 0);
+ if (entry->ops->stats)
+ ret = entry->ops->stats_reset(entry);
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktin_queue_stats(odp_pktin_queue_t queue,
+ odp_pktin_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+ int ret = -1;
+
+ entry = get_pktio_entry(queue.pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)queue.pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->param.in_mode;
+ if (odp_unlikely(mode != ODP_PKTIN_MODE_DIRECT)) {
+ unlock_entry(entry);
+ _ODP_ERR("invalid packet input mode: %d\n", mode);
+ return -1;
+ }
+
+ if (entry->ops->pktin_queue_stats)
+ ret = entry->ops->pktin_queue_stats(entry, queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktin_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktin_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+ odp_pktin_queue_t pktin_queue;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->param.in_mode;
+ if (odp_unlikely(mode != ODP_PKTIN_MODE_SCHED && mode != ODP_PKTIN_MODE_QUEUE)) {
+ unlock_entry(entry);
+ _ODP_ERR("invalid packet input mode: %d\n", mode);
+ return -1;
+ }
+
+ pktin_queue = _odp_queue_fn->get_pktin(queue);
+
+ if (entry->ops->pktin_queue_stats)
+ ret = entry->ops->pktin_queue_stats(entry, pktin_queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktout_queue_stats(odp_pktout_queue_t queue,
+ odp_pktout_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+ int ret = -1;
+
+ entry = get_pktio_entry(queue.pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)queue.pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->param.out_mode;
+ if (odp_unlikely(mode != ODP_PKTOUT_MODE_DIRECT)) {
+ unlock_entry(entry);
+ _ODP_ERR("invalid packet output mode: %d\n", mode);
+ return -1;
+ }
+
+ if (entry->ops->pktout_queue_stats)
+ ret = entry->ops->pktout_queue_stats(entry, queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktout_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktout_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+ odp_pktout_queue_t pktout_queue;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->param.out_mode;
+ if (odp_unlikely(mode != ODP_PKTOUT_MODE_QUEUE)) {
+ unlock_entry(entry);
+ _ODP_ERR("invalid packet output mode: %d\n", mode);
+ return -1;
+ }
+
+ pktout_queue = _odp_queue_fn->get_pktout(queue);
+
+ if (entry->ops->pktout_queue_stats)
+ ret = entry->ops->pktout_queue_stats(entry, pktout_queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stat_info(odp_pktio_t pktio,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ pktio_entry_t *entry;
+ int ret = 0;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->ops->extra_stat_info)
+ ret = entry->ops->extra_stat_info(entry, info, num);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stats(odp_pktio_t pktio, uint64_t stats[], int num)
+{
+ pktio_entry_t *entry;
+ int ret = 0;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ _ODP_ERR("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->ops->extra_stats)
+ ret = entry->ops->extra_stats(entry, stats, num);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stat_counter(odp_pktio_t pktio, uint32_t id, uint64_t *stat)
+{
+ pktio_entry_t *entry;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
@@ -1158,28 +1913,66 @@ int odp_pktio_stats_reset(odp_pktio_t pktio)
if (odp_unlikely(is_free(entry))) {
unlock_entry(entry);
- ODP_DBG("already freed pktio\n");
+ _ODP_ERR("already freed pktio\n");
return -1;
}
- if (entry->s.ops->stats)
- ret = entry->s.ops->stats_reset(entry);
+ if (entry->ops->extra_stat_counter)
+ ret = entry->ops->extra_stat_counter(entry, id, stat);
+
unlock_entry(entry);
return ret;
}
-int odp_pktin_queue_config(odp_pktio_t pktio,
- const odp_pktin_queue_param_t *param)
+void odp_pktio_extra_stats_print(odp_pktio_t pktio)
+{
+ int num_info, num_stats, i;
+
+ num_info = odp_pktio_extra_stat_info(pktio, NULL, 0);
+ if (num_info <= 0)
+ return;
+
+ num_stats = odp_pktio_extra_stats(pktio, NULL, 0);
+ if (num_stats <= 0)
+ return;
+
+ if (num_info != num_stats) {
+ _ODP_ERR("extra statistics info counts not matching\n");
+ return;
+ }
+
+ odp_pktio_extra_stat_info_t stats_info[num_stats];
+ uint64_t extra_stats[num_stats];
+
+ num_info = odp_pktio_extra_stat_info(pktio, stats_info, num_stats);
+ if (num_info <= 0)
+ return;
+
+ num_stats = odp_pktio_extra_stats(pktio, extra_stats, num_stats);
+ if (num_stats <= 0)
+ return;
+
+ if (num_info != num_stats) {
+ _ODP_ERR("extra statistics info counts not matching\n");
+ return;
+ }
+
+ _ODP_PRINT("Pktio extra statistics\n----------------------\n");
+ for (i = 0; i < num_stats; i++)
+ _ODP_PRINT(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
+ _ODP_PRINT("\n");
+}
+
+int odp_pktin_queue_config(odp_pktio_t pktio, const odp_pktin_queue_param_t *param)
{
pktio_entry_t *entry;
odp_pktin_mode_t mode;
odp_pktio_capability_t capa;
- unsigned num_queues;
- unsigned i;
+ uint32_t num_queues, i;
int rc;
odp_queue_t queue;
- odp_pktin_queue_param_t default_param;
+ odp_pktin_queue_param_t default_param, local_param;
if (param == NULL) {
odp_pktin_queue_param_init(&default_param);
@@ -1188,56 +1981,141 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- if (entry->s.state == PKTIO_STATE_STARTED) {
- ODP_DBG("pktio %s: not stopped\n", entry->s.name);
+ if (entry->state == PKTIO_STATE_STARTED) {
+ _ODP_ERR("pktio %s: not stopped\n", entry->name);
return -1;
}
- mode = entry->s.param.in_mode;
+ mode = entry->param.in_mode;
/* Ignore the call when packet input is disabled. */
if (mode == ODP_PKTIN_MODE_DISABLED)
return 0;
- num_queues = param->num_queues;
-
- if (num_queues == 0) {
- ODP_DBG("pktio %s: zero input queues\n", entry->s.name);
+ if (!param->classifier_enable && param->num_queues == 0) {
+ _ODP_ERR("invalid num_queues for operation mode\n");
return -1;
}
+ if (param->classifier_enable) {
+ num_queues = 1;
+
+ if (param->num_queues != num_queues) {
+ /* When classifier is enabled, ensure that only one input queue will be
+ * configured by driver. */
+ memcpy(&local_param, param, sizeof(odp_pktin_queue_param_t));
+ local_param.num_queues = num_queues;
+ param = &local_param;
+ }
+ } else {
+ num_queues = param->num_queues;
+ }
+
rc = odp_pktio_capability(pktio, &capa);
if (rc) {
- ODP_DBG("pktio %s: unable to read capabilities\n",
- entry->s.name);
+ _ODP_ERR("pktio %s: unable to read capabilities\n", entry->name);
return -1;
}
+ entry->enabled.cls = !!param->classifier_enable;
+
if (num_queues > capa.max_input_queues) {
- ODP_DBG("pktio %s: too many input queues\n", entry->s.name);
+ _ODP_ERR("pktio %s: too many input queues\n", entry->name);
return -1;
}
+ /* Check input queue sizes in direct mode */
+ for (i = 0; i < num_queues && mode == ODP_PKTIN_MODE_DIRECT; i++) {
+ uint32_t queue_size = param->queue_size[i];
+
+ if (queue_size == 0)
+ continue;
+
+ if (capa.max_input_queue_size == 0) {
+ _ODP_ERR("pktio %s: configuring input queue size not supported\n",
+ entry->name);
+ return -1;
+ }
+ if (queue_size < capa.min_input_queue_size) {
+ _ODP_ERR("pktio %s: input queue size too small\n", entry->name);
+ return -1;
+ }
+ if (queue_size > capa.max_input_queue_size) {
+ _ODP_ERR("pktio %s: input queue size too large\n", entry->name);
+ return -1;
+ }
+ }
+
+ /* Validate packet vector parameters */
+ if (param->vector.enable) {
+ odp_pool_t pool = param->vector.pool;
+ odp_pool_info_t pool_info;
+
+ if (mode == ODP_PKTIN_MODE_DIRECT) {
+ _ODP_ERR("packet vectors not supported with ODP_PKTIN_MODE_DIRECT\n");
+ return -1;
+ }
+ if (param->vector.max_size < capa.vector.min_size) {
+ _ODP_ERR("vector.max_size too small %" PRIu32 "\n",
+ param->vector.max_size);
+ return -1;
+ }
+ if (param->vector.max_size > capa.vector.max_size) {
+ _ODP_ERR("vector.max_size too large %" PRIu32 "\n",
+ param->vector.max_size);
+ return -1;
+ }
+ if (param->vector.max_tmo_ns > capa.vector.max_tmo_ns) {
+ _ODP_ERR("vector.max_tmo_ns too large %" PRIu64 "\n",
+ param->vector.max_tmo_ns);
+ return -1;
+ }
+
+ if (pool == ODP_POOL_INVALID || odp_pool_info(pool, &pool_info)) {
+ _ODP_ERR("invalid packet vector pool\n");
+ return -1;
+ }
+ if (pool_info.params.type != ODP_POOL_VECTOR) {
+ _ODP_ERR("wrong pool type\n");
+ return -1;
+ }
+ if (param->vector.max_size > pool_info.params.vector.max_size) {
+ _ODP_ERR("vector.max_size larger than pool max vector size\n");
+ return -1;
+ }
+ }
+
/* If re-configuring, destroy old queues */
- if (entry->s.num_in_queue)
- destroy_in_queues(entry, entry->s.num_in_queue);
+ if (entry->num_in_queue)
+ destroy_in_queues(entry, entry->num_in_queue);
for (i = 0; i < num_queues; i++) {
if (mode == ODP_PKTIN_MODE_QUEUE ||
mode == ODP_PKTIN_MODE_SCHED) {
odp_queue_param_t queue_param;
char name[ODP_QUEUE_NAME_LEN];
- int pktio_id = pktio_to_id(pktio);
+ int pktio_id = odp_pktio_index(pktio);
+ odp_pktin_queue_param_ovr_t *queue_param_ovr = NULL;
+
+ if (param->queue_param_ovr)
+ queue_param_ovr = param->queue_param_ovr + i;
snprintf(name, sizeof(name), "odp-pktin-%i-%i",
pktio_id, i);
- memcpy(&queue_param, &param->queue_param,
- sizeof(odp_queue_param_t));
+ if (param->classifier_enable) {
+ odp_queue_param_init(&queue_param);
+ } else {
+ memcpy(&queue_param, &param->queue_param,
+ sizeof(odp_queue_param_t));
+ if (queue_param_ovr)
+ queue_param.sched.group =
+ queue_param_ovr->group;
+ }
queue_param.type = ODP_QUEUE_TYPE_PLAIN;
@@ -1247,50 +2125,115 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
queue = odp_queue_create(name, &queue_param);
if (queue == ODP_QUEUE_INVALID) {
- ODP_DBG("pktio %s: event queue create failed\n",
- entry->s.name);
+ _ODP_ERR("pktio %s: event queue create failed\n", entry->name);
destroy_in_queues(entry, i + 1);
return -1;
}
- if (mode == ODP_PKTIN_MODE_QUEUE) {
- queue_entry_t *qentry;
+ _odp_queue_fn->set_pktin(queue, pktio, i);
+ if (mode == ODP_PKTIN_MODE_QUEUE)
+ _odp_queue_fn->set_enq_deq_fn(queue,
+ NULL,
+ NULL,
+ pktin_dequeue,
+ pktin_deq_multi);
- qentry = queue_to_qentry(queue);
- qentry->s.pktin.index = i;
- qentry->s.pktin.pktio = pktio;
+ entry->in_queue[i].queue = queue;
- qentry->s.enqueue = pktin_enqueue;
- qentry->s.dequeue = pktin_dequeue;
- qentry->s.enqueue_multi = pktin_enq_multi;
- qentry->s.dequeue_multi = pktin_deq_multi;
- }
-
- entry->s.in_queue[i].queue = queue;
} else {
- entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
+ entry->in_queue[i].queue = ODP_QUEUE_INVALID;
}
- entry->s.in_queue[i].pktin.index = i;
- entry->s.in_queue[i].pktin.pktio = entry->s.handle;
+ entry->in_queue[i].pktin.index = i;
+ entry->in_queue[i].pktin.pktio = entry->handle;
+ entry->in_queue[i].vector = param->vector;
}
- entry->s.num_in_queue = num_queues;
+ entry->num_in_queue = num_queues;
- if (entry->s.ops->input_queues_config)
- return entry->s.ops->input_queues_config(entry, param);
+ if (entry->ops->input_queues_config)
+ return entry->ops->input_queues_config(entry, param);
return 0;
}
+int _odp_pktio_pktout_tm_config(odp_pktio_t pktio_hdl,
+ odp_pktout_queue_t *queue, bool reconf)
+{
+ odp_pktout_queue_param_t param;
+ bool pktio_started = false;
+ odp_pktout_mode_t mode;
+ pktio_entry_t *entry;
+ uint32_t i;
+ int rc;
+
+ odp_pktout_queue_param_init(&param);
+ param.num_queues = 1;
+
+ entry = get_pktio_entry(pktio_hdl);
+ if (entry == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio_hdl);
+ return -1;
+ }
+
+ mode = entry->param.out_mode;
+ /* Don't proceed further if mode is not TM */
+ if (mode != ODP_PKTOUT_MODE_TM)
+ return -1;
+
+ /* Don't reconfigure unless requested */
+ if (entry->num_out_queue && !reconf) {
+ *queue = entry->out_queue[0].pktout;
+ return 0;
+ }
+
+ if (entry->state == PKTIO_STATE_STARTED) {
+ pktio_started = true;
+ rc = odp_pktio_stop(pktio_hdl);
+ if (rc) {
+ _ODP_ERR("Unable to stop pktio, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ /* If re-configuring, destroy old queues */
+ if (entry->num_out_queue) {
+ destroy_out_queues(entry, entry->num_out_queue);
+ entry->num_out_queue = 0;
+ }
+
+ init_out_queues(entry);
+ for (i = 0; i < param.num_queues; i++) {
+ entry->out_queue[i].pktout.index = i;
+ entry->out_queue[i].pktout.pktio = pktio_hdl;
+ }
+
+ entry->num_out_queue = param.num_queues;
+
+ rc = 0;
+ if (entry->ops->output_queues_config) {
+ rc = entry->ops->output_queues_config(entry, &param);
+ if (rc)
+ _ODP_ERR("Unable to setup output queues, rc=%d\n", rc);
+ }
+
+ /* Return pktout queue on success */
+ if (!rc)
+ *queue = entry->out_queue[0].pktout;
+
+ /* Take pktio back to its previous state */
+ if (pktio_started)
+ rc |= odp_pktio_start(pktio_hdl);
+ return rc;
+}
+
int odp_pktout_queue_config(odp_pktio_t pktio,
const odp_pktout_queue_param_t *param)
{
pktio_entry_t *entry;
odp_pktout_mode_t mode;
odp_pktio_capability_t capa;
- unsigned num_queues;
- unsigned i;
+ uint32_t num_queues, i;
int rc;
odp_pktout_queue_param_t default_param;
@@ -1301,16 +2244,16 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- if (entry->s.state == PKTIO_STATE_STARTED) {
- ODP_DBG("pktio %s: not stopped\n", entry->s.name);
+ if (entry->state == PKTIO_STATE_STARTED) {
+ _ODP_ERR("pktio %s: not stopped\n", entry->name);
return -1;
}
- mode = entry->s.param.out_mode;
+ mode = entry->param.out_mode;
/* Ignore the call when packet output is disabled, or routed through
* traffic manager. */
@@ -1318,51 +2261,71 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
return 0;
if (mode != ODP_PKTOUT_MODE_DIRECT && mode != ODP_PKTOUT_MODE_QUEUE) {
- ODP_DBG("pktio %s: bad packet output mode\n", entry->s.name);
+ _ODP_ERR("pktio %s: bad packet output mode\n", entry->name);
return -1;
}
num_queues = param->num_queues;
if (num_queues == 0) {
- ODP_DBG("pktio %s: zero output queues\n", entry->s.name);
+ _ODP_ERR("pktio %s: zero output queues\n", entry->name);
return -1;
}
rc = odp_pktio_capability(pktio, &capa);
if (rc) {
- ODP_DBG("pktio %s: unable to read capabilities\n",
- entry->s.name);
+ _ODP_ERR("pktio %s: unable to read capabilities\n", entry->name);
return -1;
}
if (num_queues > capa.max_output_queues) {
- ODP_DBG("pktio %s: too many output queues\n", entry->s.name);
+ _ODP_ERR("pktio %s: too many output queues\n", entry->name);
return -1;
}
+ /* Check output queue sizes */
+ for (i = 0; i < num_queues; i++) {
+ uint32_t queue_size = param->queue_size[i];
+
+ if (queue_size == 0)
+ continue;
+
+ if (capa.max_output_queue_size == 0) {
+ _ODP_ERR("pktio %s: configuring output queue size not supported\n",
+ entry->name);
+ return -1;
+ }
+ if (queue_size < capa.min_output_queue_size) {
+ _ODP_ERR("pktio %s: output queue size too small\n", entry->name);
+ return -1;
+ }
+ if (queue_size > capa.max_output_queue_size) {
+ _ODP_ERR("pktio %s: output queue size too large\n", entry->name);
+ return -1;
+ }
+ }
+
/* If re-configuring, destroy old queues */
- if (entry->s.num_out_queue) {
- destroy_out_queues(entry, entry->s.num_out_queue);
- entry->s.num_out_queue = 0;
+ if (entry->num_out_queue) {
+ destroy_out_queues(entry, entry->num_out_queue);
+ entry->num_out_queue = 0;
}
init_out_queues(entry);
for (i = 0; i < num_queues; i++) {
- entry->s.out_queue[i].pktout.index = i;
- entry->s.out_queue[i].pktout.pktio = pktio;
+ entry->out_queue[i].pktout.index = i;
+ entry->out_queue[i].pktout.pktio = pktio;
}
- entry->s.num_out_queue = num_queues;
+ entry->num_out_queue = num_queues;
if (mode == ODP_PKTOUT_MODE_QUEUE) {
for (i = 0; i < num_queues; i++) {
odp_queue_t queue;
odp_queue_param_t queue_param;
- queue_entry_t *qentry;
char name[ODP_QUEUE_NAME_LEN];
- int pktio_id = pktio_to_id(pktio);
+ int pktio_id = odp_pktio_index(pktio);
snprintf(name, sizeof(name), "odp-pktout-%i-%i",
pktio_id, i);
@@ -1374,28 +2337,26 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
queue = odp_queue_create(name, &queue_param);
if (queue == ODP_QUEUE_INVALID) {
- ODP_DBG("pktout %s: event queue create failed\n",
- entry->s.name);
+ _ODP_ERR("pktout %s: event queue create failed\n", entry->name);
destroy_out_queues(entry, i + 1);
return -1;
}
- qentry = queue_to_qentry(queue);
- qentry->s.pktout.index = i;
- qentry->s.pktout.pktio = pktio;
+ _odp_queue_fn->set_pktout(queue, pktio, i);
/* Override default enqueue / dequeue functions */
- qentry->s.enqueue = pktout_enqueue;
- qentry->s.dequeue = pktout_dequeue;
- qentry->s.enqueue_multi = pktout_enq_multi;
- qentry->s.dequeue_multi = pktout_deq_multi;
+ _odp_queue_fn->set_enq_deq_fn(queue,
+ pktout_enqueue,
+ pktout_enq_multi,
+ NULL,
+ NULL);
- entry->s.out_queue[i].queue = queue;
+ entry->out_queue[i].queue = queue;
}
}
- if (entry->s.ops->output_queues_config)
- return entry->s.ops->output_queues_config(entry, param);
+ if (entry->ops->output_queues_config)
+ return entry->ops->output_queues_config(entry, param);
return 0;
}
@@ -1409,11 +2370,16 @@ int odp_pktin_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ if (num < 0) {
+ _ODP_DBG("Bad param: num %i\n", num);
return -1;
}
- mode = entry->s.param.in_mode;
+ mode = entry->param.in_mode;
if (mode == ODP_PKTIN_MODE_DISABLED)
return 0;
@@ -1422,11 +2388,14 @@ int odp_pktin_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
mode != ODP_PKTIN_MODE_SCHED)
return -1;
- num_queues = entry->s.num_in_queue;
+ num_queues = entry->num_in_queue;
- if (queues && num > 0) {
- for (i = 0; i < num && i < num_queues; i++)
- queues[i] = entry->s.in_queue[i].queue;
+ if (queues) {
+ if (num_queues < num)
+ num = num_queues;
+
+ for (i = 0; i < num; i++)
+ queues[i] = entry->in_queue[i].queue;
}
return num_queues;
@@ -1441,11 +2410,16 @@ int odp_pktin_queue(odp_pktio_t pktio, odp_pktin_queue_t queues[], int num)
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- mode = entry->s.param.in_mode;
+ if (num < 0) {
+ _ODP_DBG("Bad param: num %i\n", num);
+ return -1;
+ }
+
+ mode = entry->param.in_mode;
if (mode == ODP_PKTIN_MODE_DISABLED)
return 0;
@@ -1453,11 +2427,14 @@ int odp_pktin_queue(odp_pktio_t pktio, odp_pktin_queue_t queues[], int num)
if (mode != ODP_PKTIN_MODE_DIRECT)
return -1;
- num_queues = entry->s.num_in_queue;
+ num_queues = entry->num_in_queue;
- if (queues && num > 0) {
- for (i = 0; i < num && i < num_queues; i++)
- queues[i] = entry->s.in_queue[i].pktin;
+ if (queues) {
+ if (num_queues < num)
+ num = num_queues;
+
+ for (i = 0; i < num; i++)
+ queues[i] = entry->in_queue[i].pktin;
}
return num_queues;
@@ -1472,11 +2449,11 @@ int odp_pktout_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- mode = entry->s.param.out_mode;
+ mode = entry->param.out_mode;
if (mode == ODP_PKTOUT_MODE_DISABLED)
return 0;
@@ -1484,11 +2461,11 @@ int odp_pktout_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
if (mode != ODP_PKTOUT_MODE_QUEUE)
return -1;
- num_queues = entry->s.num_out_queue;
+ num_queues = entry->num_out_queue;
if (queues && num > 0) {
for (i = 0; i < num && i < num_queues; i++)
- queues[i] = entry->s.out_queue[i].queue;
+ queues[i] = entry->out_queue[i].queue;
}
return num_queues;
@@ -1503,11 +2480,11 @@ int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num)
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- mode = entry->s.param.out_mode;
+ mode = entry->param.out_mode;
if (mode == ODP_PKTOUT_MODE_DISABLED)
return 0;
@@ -1515,11 +2492,11 @@ int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num)
if (mode != ODP_PKTOUT_MODE_DIRECT)
return -1;
- num_queues = entry->s.num_out_queue;
+ num_queues = entry->num_out_queue;
if (queues && num > 0) {
for (i = 0; i < num && i < num_queues; i++)
- queues[i] = entry->s.out_queue[i].pktout;
+ queues[i] = entry->out_queue[i].pktout;
}
return num_queues;
@@ -1529,14 +2506,22 @@ int odp_pktin_recv(odp_pktin_queue_t queue, odp_packet_t packets[], int num)
{
pktio_entry_t *entry;
odp_pktio_t pktio = queue.pktio;
+ int ret;
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
return -1;
}
- return entry->s.ops->recv(entry, queue.index, packets, num);
+ if (odp_unlikely(entry->state != PKTIO_STATE_STARTED))
+ return 0;
+
+ ret = entry->ops->recv(entry, queue.index, packets, num);
+ if (_ODP_PCAPNG)
+ _odp_pcapng_dump_pkts(entry, queue.index, packets, ret);
+
+ return ret;
}
int odp_pktin_recv_tmo(odp_pktin_queue_t queue, odp_packet_t packets[], int num,
@@ -1546,58 +2531,107 @@ int odp_pktin_recv_tmo(odp_pktin_queue_t queue, odp_packet_t packets[], int num,
odp_time_t t1, t2;
struct timespec ts;
int started = 0;
+ uint64_t sleep_round = 0;
+ pktio_entry_t *entry;
ts.tv_sec = 0;
- ts.tv_nsec = SLEEP_NSEC;
+ ts.tv_nsec = 1000 * SLEEP_USEC;
- while (1) {
- ret = odp_pktin_recv(queue, packets, num);
+ entry = get_pktio_entry(queue.pktio);
+ if (entry == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)queue.pktio);
+ return -1;
+ }
- if (ret != 0)
- return ret;
+ if (odp_unlikely(entry->state != PKTIO_STATE_STARTED))
+ return 0;
- if (wait == 0)
- return 0;
+ if (entry->ops->recv_tmo && wait != ODP_PKTIN_NO_WAIT) {
+ ret = entry->ops->recv_tmo(entry, queue.index, packets, num,
+ wait);
+ if (_ODP_PCAPNG)
+ _odp_pcapng_dump_pkts(entry, queue.index, packets, ret);
- if (wait != ODP_PKTIN_WAIT) {
- /* Avoid unnecessary system calls. Record the start time
- * only when needed and after the first call to recv. */
- if (odp_unlikely(!started)) {
- odp_time_t t;
+ return ret;
+ }
- t = odp_time_local_from_ns(wait * SLEEP_NSEC);
- started = 1;
- t1 = odp_time_sum(odp_time_local(), t);
- }
+ while (1) {
+ ret = entry->ops->recv(entry, queue.index, packets, num);
+ if (_ODP_PCAPNG)
+ _odp_pcapng_dump_pkts(entry, queue.index, packets, ret);
- /* Check every SLEEP_CHECK rounds if total wait time
- * has been exceeded. */
- if ((wait & (SLEEP_CHECK - 1)) == 0) {
- t2 = odp_time_local();
+ if (ret != 0 || wait == 0)
+ return ret;
- if (odp_time_cmp(t2, t1) > 0)
- return 0;
- }
+ /* Avoid unnecessary system calls. Record the start time
+ * only when needed and after the first call to recv. */
+ if (odp_unlikely(!started)) {
+ /* Avoid overflow issues for large wait times */
+ if (wait > MAX_WAIT_TIME)
+ wait = MAX_WAIT_TIME;
- wait--;
+ started = 1;
+ t1 = odp_time_add_ns(odp_time_local(), wait * 1000);
}
+ /* Check every SLEEP_CHECK rounds if total wait time
+ * has been exceeded. */
+ if ((++sleep_round & (SLEEP_CHECK - 1)) == 0) {
+ t2 = odp_time_local();
+
+ if (odp_time_cmp(t2, t1) > 0)
+ return 0;
+ }
+ wait = wait > SLEEP_USEC ? wait - SLEEP_USEC : 0;
+
nanosleep(&ts, NULL);
}
}
-int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], unsigned num_q,
- unsigned *from, odp_packet_t packets[], int num,
- uint64_t wait)
+int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], uint32_t num_q, uint32_t *from,
+ odp_packet_t packets[], int num, uint64_t wait)
{
- unsigned i;
+ uint32_t i;
int ret;
odp_time_t t1, t2;
struct timespec ts;
int started = 0;
+ uint64_t sleep_round = 0;
+ int trial_successful = 0;
+ uint32_t lfrom = 0;
+
+ for (i = 0; i < num_q; i++) {
+ ret = odp_pktin_recv(queues[i], packets, num);
+
+ if (ret > 0 && from)
+ *from = i;
+
+ if (ret != 0)
+ return ret;
+ }
+
+ if (wait == 0)
+ return 0;
+
+ ret = _odp_sock_recv_mq_tmo_try_int_driven(queues, num_q, &lfrom,
+ packets, num, wait,
+ &trial_successful);
+ if (ret > 0 && from)
+ *from = lfrom;
+ if (trial_successful) {
+ if (_ODP_PCAPNG) {
+ pktio_entry_t *entry;
+
+ entry = get_pktio_entry(queues[lfrom].pktio);
+ if (entry)
+ _odp_pcapng_dump_pkts(entry, lfrom, packets, ret);
+ }
+
+ return ret;
+ }
ts.tv_sec = 0;
- ts.tv_nsec = SLEEP_NSEC;
+ ts.tv_nsec = 1000 * SLEEP_USEC;
while (1) {
for (i = 0; i < num_q; i++) {
@@ -1613,24 +2647,24 @@ int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], unsigned num_q,
if (wait == 0)
return 0;
- if (wait != ODP_PKTIN_WAIT) {
- if (odp_unlikely(!started)) {
- odp_time_t t;
-
- t = odp_time_local_from_ns(wait * SLEEP_NSEC);
- started = 1;
- t1 = odp_time_sum(odp_time_local(), t);
- }
+ if (odp_unlikely(!started)) {
+ /* Avoid overflow issues for large wait times */
+ if (wait > MAX_WAIT_TIME)
+ wait = MAX_WAIT_TIME;
- if ((wait & (SLEEP_CHECK - 1)) == 0) {
- t2 = odp_time_local();
+ started = 1;
+ t1 = odp_time_add_ns(odp_time_local(), wait * 1000);
+ }
- if (odp_time_cmp(t2, t1) > 0)
- return 0;
- }
+ /* Check every SLEEP_CHECK rounds if total wait time
+ * has been exceeded. */
+ if ((++sleep_round & (SLEEP_CHECK - 1)) == 0) {
+ t2 = odp_time_local();
- wait--;
+ if (odp_time_cmp(t2, t1) > 0)
+ return 0;
}
+ wait = wait > SLEEP_USEC ? wait - SLEEP_USEC : 0;
nanosleep(&ts, NULL);
}
@@ -1641,9 +2675,90 @@ uint64_t odp_pktin_wait_time(uint64_t nsec)
if (nsec == 0)
return 0;
- /* number of nanosleep calls rounded up by one, so that
+ /* number of microseconds rounded up by one, so that
* recv_mq_tmo call waits at least 'nsec' nanoseconds. */
- return (nsec / SLEEP_NSEC) + 1;
+ return (nsec / (1000)) + 1;
+}
+
+static inline odp_bool_t check_tx_compl(const odp_packet_hdr_t *hdr, int pkt_idx,
+ tx_compl_info_t *info, odp_pool_t pool,
+ odp_atomic_u32_t *status_map, uint16_t *num)
+{
+ tx_compl_info_t *i;
+
+ if (odp_likely(hdr->p.flags.tx_compl_ev == 0 && hdr->p.flags.tx_compl_poll == 0))
+ return true;
+
+ i = &info[*num];
+ i->idx = pkt_idx;
+
+ if (hdr->p.flags.tx_compl_ev) {
+ i->buf = odp_buffer_alloc(pool);
+
+ if (i->buf == ODP_BUFFER_INVALID)
+ return false;
+
+ i->user_ptr = hdr->user_ptr;
+ i->queue = hdr->dst_queue;
+ i->mode = ODP_PACKET_TX_COMPL_EVENT;
+ } else {
+ i->status = &status_map[hdr->tx_compl_id];
+ odp_atomic_store_rel_u32(i->status, 0);
+ i->mode = ODP_PACKET_TX_COMPL_POLL;
+ }
+
+ (*num)++;
+
+ return true;
+}
+
+static inline int prepare_tx_compl(const odp_packet_t packets[], int num, tx_compl_info_t *info,
+ odp_pool_t pool, odp_atomic_u32_t *status_map,
+ uint16_t *num_tx_c)
+{
+ int num_to_send = num;
+
+ for (int i = 0; i < num; i++)
+ if (!check_tx_compl(packet_hdr(packets[i]), i, info, pool, status_map, num_tx_c)) {
+ num_to_send = info[*num_tx_c].idx;
+ break;
+ }
+
+ return num_to_send;
+}
+
+static inline void send_tx_compl_event(odp_buffer_t buf, const void *user_ptr, odp_queue_t queue)
+{
+ _odp_pktio_tx_compl_t *data;
+ odp_event_t ev;
+
+ data = odp_buffer_addr(buf);
+ data->user_ptr = user_ptr;
+ ev = odp_buffer_to_event(buf);
+ _odp_event_type_set(ev, ODP_EVENT_PACKET_TX_COMPL);
+
+ if (odp_unlikely(odp_queue_enq(queue, ev))) {
+ _ODP_ERR("Failed to enqueue Tx completion event\n");
+ odp_event_free(ev);
+ }
+}
+
+static inline void finish_tx_compl(tx_compl_info_t *info, uint16_t num, int num_sent)
+{
+ tx_compl_info_t *i;
+
+ for (int j = 0; j < num; j++) {
+ i = &info[j];
+
+ if (i->idx < num_sent) {
+ if (i->mode == ODP_PACKET_TX_COMPL_EVENT)
+ send_tx_compl_event(i->buf, i->user_ptr, i->queue);
+ else
+ odp_atomic_store_rel_u32(i->status, 1);
+ } else if (i->mode == ODP_PACKET_TX_COMPL_EVENT) {
+ odp_buffer_free(i->buf);
+ }
+ }
}
int odp_pktout_send(odp_pktout_queue_t queue, const odp_packet_t packets[],
@@ -1651,22 +2766,635 @@ int odp_pktout_send(odp_pktout_queue_t queue, const odp_packet_t packets[],
{
pktio_entry_t *entry;
odp_pktio_t pktio = queue.pktio;
+ tx_compl_info_t tx_compl_info[num];
+ uint16_t num_tx_c = 0;
+ int num_to_send = num, num_sent;
entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", pktio);
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ if (odp_unlikely(entry->state != PKTIO_STATE_STARTED))
+ return 0;
+
+ if (_ODP_PCAPNG)
+ _odp_pcapng_dump_pkts(entry, queue.index, packets, num);
+
+ if (odp_unlikely(_odp_pktio_tx_compl_enabled(entry))) {
+ odp_pool_t tx_compl_pool = entry->tx_compl_pool;
+ odp_atomic_u32_t *tx_compl_status = entry->tx_compl_status;
+
+ num_to_send = prepare_tx_compl(packets, num, tx_compl_info, tx_compl_pool,
+ tx_compl_status, &num_tx_c);
+ }
+
+ num_sent = entry->ops->send(entry, queue.index, packets, num_to_send);
+
+ if (odp_unlikely(num_tx_c))
+ finish_tx_compl(tx_compl_info, num_tx_c, num_sent);
+
+ return num_sent;
+}
+
+/** Get printable format of odp_pktio_t */
+uint64_t odp_pktio_to_u64(odp_pktio_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+int odp_pktout_ts_read(odp_pktio_t hdl, odp_time_t *ts)
+{
+ pktio_entry_t *entry;
+ uint64_t ts_val;
+
+ entry = get_pktio_entry(hdl);
+ if (odp_unlikely(entry == NULL)) {
+ _ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)hdl);
+ return -1;
+ }
+
+ if (odp_atomic_load_u64(&entry->tx_ts) == 0)
+ return 1;
+
+ ts_val = odp_atomic_xchg_u64(&entry->tx_ts, 0);
+ if (odp_unlikely(ts_val == 0))
+ return 1;
+
+ ts->u64 = ts_val;
+ return 0;
+}
+
+void odp_lso_profile_param_init(odp_lso_profile_param_t *param)
+{
+ memset(param, 0, sizeof(odp_lso_profile_param_t));
+
+ param->lso_proto = ODP_LSO_PROTO_NONE;
+}
+
+odp_lso_profile_t odp_lso_profile_create(odp_pktio_t pktio, const odp_lso_profile_param_t *param)
+{
+ uint32_t i, num_custom, mod_op, offset, size;
+ lso_profile_t *lso_prof = NULL;
+ (void)pktio;
+
+ /* Currently only IPv4 and custom implemented */
+ if (param->lso_proto != ODP_LSO_PROTO_IPV4 &&
+ param->lso_proto != ODP_LSO_PROTO_CUSTOM) {
+ _ODP_ERR("Protocol not supported\n");
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ if (param->lso_proto == ODP_LSO_PROTO_CUSTOM) {
+ num_custom = param->custom.num_custom;
+ if (num_custom > ODP_LSO_MAX_CUSTOM) {
+ _ODP_ERR("Too many custom fields\n");
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ for (i = 0; i < num_custom; i++) {
+ mod_op = param->custom.field[i].mod_op;
+ offset = param->custom.field[i].offset;
+ size = param->custom.field[i].size;
+
+ if (offset > PKTIO_LSO_MAX_PAYLOAD_OFFSET) {
+ _ODP_ERR("Too large custom field offset %u\n", offset);
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ /* Currently only segment number supported */
+ if (mod_op != ODP_LSO_ADD_SEGMENT_NUM) {
+ _ODP_ERR("Custom modify operation %u not supported\n", mod_op);
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ if (size != 1 && size != 2 && size != 4 && size != 8) {
+ _ODP_ERR("Bad custom field size %u\n", size);
+ return ODP_LSO_PROFILE_INVALID;
+ }
+ }
+ }
+
+ odp_spinlock_lock(&pktio_global->lock);
+
+ if (pktio_global->num_lso_profiles >= PKTIO_LSO_PROFILES) {
+ odp_spinlock_unlock(&pktio_global->lock);
+ _ODP_ERR("All LSO profiles used already: %u\n", PKTIO_LSO_PROFILES);
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ for (i = 0; i < PKTIO_LSO_PROFILES; i++) {
+ if (pktio_global->lso_profile[i].used == 0) {
+ lso_prof = &pktio_global->lso_profile[i];
+ lso_prof->used = 1;
+ pktio_global->num_lso_profiles++;
+ break;
+ }
+ }
+
+ odp_spinlock_unlock(&pktio_global->lock);
+
+ if (lso_prof == NULL) {
+ _ODP_ERR("Did not find free LSO profile\n");
+ return ODP_LSO_PROFILE_INVALID;
+ }
+
+ lso_prof->param = *param;
+ lso_prof->index = i;
+
+ return (odp_lso_profile_t)(uintptr_t)lso_prof;
+}
+
+odp_lso_profile_t _odp_lso_prof_from_idx(uint8_t idx)
+{
+ return (odp_lso_profile_t)(uintptr_t)&pktio_global->lso_profile[idx];
+}
+
+static inline lso_profile_t *lso_profile_ptr(odp_lso_profile_t handle)
+{
+ return (lso_profile_t *)(uintptr_t)handle;
+}
+
+int odp_lso_profile_destroy(odp_lso_profile_t lso_profile)
+{
+ lso_profile_t *lso_prof = lso_profile_ptr(lso_profile);
+
+ if (lso_profile == ODP_LSO_PROFILE_INVALID || lso_prof->used == 0) {
+ _ODP_ERR("Bad handle\n");
+ return -1;
+ }
+
+ odp_spinlock_lock(&pktio_global->lock);
+ lso_prof->used = 0;
+ pktio_global->num_lso_profiles--;
+ odp_spinlock_unlock(&pktio_global->lock);
+
+ return 0;
+}
+
+int odp_packet_lso_request(odp_packet_t pkt, const odp_packet_lso_opt_t *lso_opt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ lso_profile_t *lso_prof = lso_profile_ptr(lso_opt->lso_profile);
+ uint32_t payload_offset = lso_opt->payload_offset;
+
+ if (odp_unlikely(lso_opt->lso_profile == ODP_LSO_PROFILE_INVALID || lso_prof->used == 0)) {
+ _ODP_ERR("Bad LSO profile handle\n");
+ return -1;
+ }
+
+ if (odp_unlikely(payload_offset > PKTIO_LSO_MAX_PAYLOAD_OFFSET)) {
+ _ODP_ERR("Too large LSO payload offset\n");
+ return -1;
+ }
+
+ if (odp_unlikely(payload_offset > packet_len(pkt_hdr))) {
+ _ODP_ERR("LSO payload offset larger than packet data length\n");
+ return -1;
+ }
+
+ if (odp_packet_payload_offset_set(pkt, payload_offset)) {
+ _ODP_ERR("Payload offset set failed\n");
+ return -1;
+ }
+
+ pkt_hdr->p.flags.lso = 1;
+ pkt_hdr->lso_max_payload = lso_opt->max_payload_len;
+ pkt_hdr->lso_profile_idx = lso_prof->index;
+
+ return 0;
+}
+
+static int lso_update_ipv4(odp_packet_t pkt, int index, int num_pkt,
+ uint32_t l3_offset, uint32_t payload_len)
+{
+ _odp_ipv4hdr_t *ipv4;
+ uint32_t pkt_len = odp_packet_len(pkt);
+ uint16_t tot_len = pkt_len - l3_offset;
+ int ret = 0;
+ uint16_t frag_offset;
+
+ odp_packet_l3_offset_set(pkt, l3_offset);
+ ipv4 = odp_packet_l3_ptr(pkt, NULL);
+ ipv4->tot_len = odp_cpu_to_be_16(tot_len);
+
+ /* IP payload offset in 8 byte blocks */
+ frag_offset = ((uint32_t)index * payload_len) / 8;
+
+ /* More fragments flag */
+ if (index < (num_pkt - 1))
+ frag_offset |= _ODP_IPV4HDR_FRAG_OFFSET_MORE_FRAGS;
+
+ ipv4->frag_offset = odp_cpu_to_be_16(frag_offset);
+ ret = _odp_packet_ipv4_chksum_insert(pkt);
+
+ return ret;
+}
+
+static int lso_update_custom(lso_profile_t *lso_prof, odp_packet_t pkt, int segnum)
+{
+ void *ptr;
+ int i, mod_op;
+ uint32_t offset;
+ uint8_t size;
+ int num_custom = lso_prof->param.custom.num_custom;
+ uint64_t u64 = 0;
+ uint32_t u32 = 0;
+ uint16_t u16 = 0;
+ uint8_t u8 = 0;
+
+ for (i = 0; i < num_custom; i++) {
+ mod_op = lso_prof->param.custom.field[i].mod_op;
+ offset = lso_prof->param.custom.field[i].offset;
+ size = lso_prof->param.custom.field[i].size;
+
+ if (size == 8)
+ ptr = &u64;
+ else if (size == 4)
+ ptr = &u32;
+ else if (size == 2)
+ ptr = &u16;
+ else {
+ /*
+ * odp_lso_profile_create() ensures that size is one of the allowed values.
+ * But compiler doesn't know that, so set it here to avoid possibility of
+ * out of bounds warnings.
+ */
+ size = 1;
+ ptr = &u8;
+ }
+
+ if (odp_packet_copy_to_mem(pkt, offset, size, ptr)) {
+ _ODP_ERR("Read from packet failed at offset %u\n", offset);
+ return -1;
+ }
+
+ if (mod_op == ODP_LSO_ADD_SEGMENT_NUM) {
+ if (size == 8)
+ u64 = odp_cpu_to_be_64(segnum + odp_be_to_cpu_64(u64));
+ else if (size == 4)
+ u32 = odp_cpu_to_be_32(segnum + odp_be_to_cpu_32(u32));
+ else if (size == 2)
+ u16 = odp_cpu_to_be_16(segnum + odp_be_to_cpu_16(u16));
+ else
+ u8 += segnum;
+ }
+
+ if (odp_packet_copy_from_mem(pkt, offset, size, ptr)) {
+ _ODP_ERR("Write to packet failed at offset %u\n", offset);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+int _odp_lso_num_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t *len_out, uint32_t *left_over_out)
+{
+ uint32_t num_pkt, left_over, l3_offset, iphdr_len;
+ odp_lso_profile_t lso_profile = lso_opt->lso_profile;
+ lso_profile_t *lso_prof = lso_profile_ptr(lso_profile);
+ uint32_t payload_len = lso_opt->max_payload_len;
+ uint32_t hdr_len = lso_opt->payload_offset;
+ uint32_t pkt_len = odp_packet_len(packet);
+ uint32_t pkt_payload = pkt_len - hdr_len;
+
+ if (odp_unlikely(hdr_len > PKTIO_LSO_MAX_PAYLOAD_OFFSET)) {
+ _ODP_ERR("Too large LSO payload offset\n");
+ return -1;
+ }
+
+ if (odp_unlikely(hdr_len > pkt_len)) {
+ _ODP_ERR("LSO payload offset larger than packet data length\n");
+ return -1;
+ }
+
+ if (odp_unlikely(hdr_len + payload_len > odp_packet_len(packet))) {
+ /* Packet does not need segmentation */
+ *len_out = payload_len;
+ *left_over_out = 0;
+
+ return 1;
+ }
+
+ if (lso_prof->param.lso_proto == ODP_LSO_PROTO_IPV4) {
+ l3_offset = odp_packet_l3_offset(packet);
+ iphdr_len = hdr_len - l3_offset;
+
+ if (l3_offset == ODP_PACKET_OFFSET_INVALID) {
+ _ODP_ERR("Invalid L3 offset\n");
+ return -1;
+ }
+
+ if (hdr_len < l3_offset || iphdr_len < _ODP_IPV4HDR_LEN) {
+ _ODP_ERR("Bad payload or L3 offset\n");
+ return -1;
+ }
+
+ /* Round down payload len to a multiple of 8 (on other than the last fragment). */
+ payload_len = (payload_len / 8) * 8;
+ }
+
+ num_pkt = pkt_payload / payload_len;
+
+ left_over = pkt_payload - (num_pkt * payload_len);
+ if (left_over)
+ num_pkt++;
+
+ if (num_pkt > PKTIO_LSO_MAX_SEGMENTS) {
+ _ODP_ERR("Too many LSO segments %i. Maximum is %i\n", num_pkt,
+ PKTIO_LSO_MAX_SEGMENTS);
+ return -1;
+ }
+
+ *len_out = payload_len;
+ *left_over_out = left_over;
+
+ return num_pkt;
+}
+
+int _odp_lso_create_packets(odp_packet_t packet, const odp_packet_lso_opt_t *lso_opt,
+ uint32_t payload_len, uint32_t left_over_len,
+ odp_packet_t pkt_out[], int num_pkt)
+{
+ int i, num;
+ uint32_t offset;
+ odp_packet_t pkt;
+ odp_lso_profile_t lso_profile = lso_opt->lso_profile;
+ lso_profile_t *lso_prof = lso_profile_ptr(lso_profile);
+ const uint32_t hdr_len = lso_opt->payload_offset;
+ const uint32_t pkt_len = hdr_len + payload_len;
+ odp_pool_t pool = odp_packet_pool(packet);
+ int num_free = 0;
+ int num_full = num_pkt;
+
+ if (left_over_len)
+ num_full = num_pkt - 1;
+
+ num = odp_packet_alloc_multi(pool, pkt_len, pkt_out, num_full);
+ if (odp_unlikely(num < num_full)) {
+ _ODP_DBG("Alloc failed %i\n", num);
+ if (num > 0) {
+ num_free = num;
+ goto error;
+ }
+ }
+
+ if (left_over_len) {
+ pkt = odp_packet_alloc(pool, hdr_len + left_over_len);
+ if (pkt == ODP_PACKET_INVALID) {
+ _ODP_DBG("Alloc failed\n");
+ num_free = num_full;
+ goto error;
+ }
+
+ pkt_out[num_pkt - 1] = pkt;
+ }
+
+ num_free = num_pkt;
+
+ /* Copy headers */
+ for (i = 0; i < num_pkt; i++) {
+ if (odp_packet_copy_from_pkt(pkt_out[i], 0, packet, 0, hdr_len)) {
+ _ODP_ERR("Header copy failed\n");
+ goto error;
+ }
+ }
+
+ /* Copy payload */
+ for (i = 0; i < num_full; i++) {
+ offset = hdr_len + (i * payload_len);
+ if (odp_packet_copy_from_pkt(pkt_out[i], hdr_len, packet, offset, payload_len)) {
+ _ODP_ERR("Payload copy failed\n");
+ goto error;
+ }
+ }
+
+ /* Copy left over payload */
+ if (left_over_len) {
+ offset = hdr_len + (num_full * payload_len);
+ if (odp_packet_copy_from_pkt(pkt_out[num_pkt - 1], hdr_len, packet, offset,
+ left_over_len)){
+ _ODP_ERR("Payload copy failed\n");
+ goto error;
+ }
+ }
+
+ if (lso_prof->param.lso_proto == ODP_LSO_PROTO_IPV4) {
+ offset = odp_packet_l3_offset(packet);
+
+ if (offset == ODP_PACKET_OFFSET_INVALID) {
+ _ODP_ERR("Invalid L3 offset\n");
+ goto error;
+ }
+
+ for (i = 0; i < num_pkt; i++) {
+ if (lso_update_ipv4(pkt_out[i], i, num_pkt, offset, payload_len)) {
+ _ODP_ERR("IPv4 header update failed. Packet %i.\n", i);
+ goto error;
+ }
+ }
+ } else {
+ /* Update custom fields */
+ int num_custom = lso_prof->param.custom.num_custom;
+
+ for (i = 0; num_custom && i < num_pkt; i++) {
+ if (lso_update_custom(lso_prof, pkt_out[i], i)) {
+ _ODP_ERR("Custom field update failed. Segment %i\n", i);
+ goto error;
+ }
+ }
+ }
+
+ return 0;
+
+error:
+ odp_packet_free_multi(pkt_out, num_free);
+ return -1;
+}
+
+static int pktout_send_lso(odp_pktout_queue_t queue, odp_packet_t packet,
+ const odp_packet_lso_opt_t *lso_opt)
+{
+ int ret, num_pkt;
+ uint32_t payload_len, left_over_len;
+
+ /* Calculate number of packets */
+ num_pkt = _odp_lso_num_packets(packet, lso_opt, &payload_len, &left_over_len);
+ if (odp_unlikely(num_pkt <= 0))
+ return -1;
+
+ if (odp_unlikely(num_pkt == 1)) {
+ /* Segmentation not needed */
+ if (odp_pktout_send(queue, &packet, 1) != 1)
+ return -1;
+
+ return 0;
+ }
+
+ /* Create packets */
+ odp_packet_t pkt_out[num_pkt];
+
+ ret = _odp_lso_create_packets(packet, lso_opt, payload_len, left_over_len, pkt_out,
+ num_pkt);
+
+ if (odp_unlikely(ret))
+ return -1;
+
+ /* Send LSO packets */
+ ret = odp_pktout_send(queue, pkt_out, num_pkt);
+
+ if (ret < num_pkt) {
+ int first_free = 0;
+ int num_free = num_pkt;
+
+ _ODP_DBG("Packet send failed %i\n", ret);
+
+ if (ret > 0) {
+ first_free = ret;
+ num_free = num_pkt - ret;
+ }
+
+ odp_packet_free_multi(&pkt_out[first_free], num_free);
+ return -1;
+ }
+
+ /* Free original packet */
+ odp_packet_free(packet);
+
+ return 0;
+}
+
+int odp_pktout_send_lso(odp_pktout_queue_t queue, const odp_packet_t packet[], int num,
+ const odp_packet_lso_opt_t *opt)
+{
+ int i;
+ odp_packet_t pkt;
+ odp_packet_lso_opt_t lso_opt;
+ const odp_packet_lso_opt_t *opt_ptr = &lso_opt;
+
+ if (odp_unlikely(num <= 0)) {
+ _ODP_ERR("No packets\n");
return -1;
}
- return entry->s.ops->send(entry, queue.index, packets, num);
+ memset(&lso_opt, 0, sizeof(odp_packet_lso_opt_t));
+ if (opt)
+ opt_ptr = opt;
+
+ for (i = 0; i < num; i++) {
+ pkt = packet[i];
+
+ if (opt == NULL) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.lso == 0) {
+ _ODP_ERR("No LSO options on packet %i\n", i);
+ if (i == 0)
+ return -1;
+
+ return i;
+ }
+
+ lso_opt.lso_profile = _odp_lso_prof_from_idx(pkt_hdr->lso_profile_idx);
+ lso_opt.payload_offset = odp_packet_payload_offset(pkt);
+ lso_opt.max_payload_len = pkt_hdr->lso_max_payload;
+ }
+
+ if (odp_unlikely(pktout_send_lso(queue, pkt, opt_ptr))) {
+ _ODP_DBG("LSO output failed on packet %i\n", i);
+ return i;
+ }
+ }
+
+ return i;
}
-int single_capability(odp_pktio_capability_t *capa)
+void _odp_pktio_process_tx_compl(const pktio_entry_t *entry, const odp_packet_t packets[], int num)
{
- memset(capa, 0, sizeof(odp_pktio_capability_t));
- capa->max_input_queues = 1;
- capa->max_output_queues = 1;
- capa->set_op.op.promisc_mode = 1;
+ odp_packet_hdr_t *hdr;
+ odp_pool_t pool = entry->tx_compl_pool;
+ odp_buffer_t buf;
+ odp_atomic_u32_t *status_map = entry->tx_compl_status;
+
+ for (int i = 0; i < num; i++) {
+ hdr = packet_hdr(packets[i]);
+
+ if (odp_likely(hdr->p.flags.tx_compl_ev == 0 && hdr->p.flags.tx_compl_poll == 0))
+ continue;
+
+ if (hdr->p.flags.tx_compl_ev) {
+ buf = odp_buffer_alloc(pool);
+
+ if (odp_unlikely(buf == ODP_BUFFER_INVALID))
+ continue;
+
+ send_tx_compl_event(buf, hdr->user_ptr, hdr->dst_queue);
+ } else {
+ odp_atomic_store_rel_u32(&status_map[hdr->tx_compl_id], 1);
+ }
+ }
+}
+
+void
+odp_proto_stats_param_init(odp_proto_stats_param_t *param)
+{
+ if (param)
+ memset(param, 0, sizeof(*param));
+}
+
+int
+odp_proto_stats_capability(odp_pktio_t pktio, odp_proto_stats_capability_t *capa)
+{
+ (void)pktio;
+
+ if (capa == NULL)
+ return -1;
+
+ memset(capa, 0, sizeof(*capa));
+
+ return 0;
+}
+
+odp_proto_stats_t
+odp_proto_stats_lookup(const char *name)
+{
+ (void)name;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+odp_proto_stats_t
+odp_proto_stats_create(const char *name, const odp_proto_stats_param_t *param)
+{
+ (void)name;
+ (void)param;
+
+ return ODP_PROTO_STATS_INVALID;
+}
+
+int
+odp_proto_stats_destroy(odp_proto_stats_t stat)
+{
+ (void)stat;
return 0;
}
+
+int
+odp_proto_stats(odp_proto_stats_t stat, odp_proto_stats_data_t *data)
+{
+ (void)stat;
+
+ memset(data, 0, sizeof(odp_proto_stats_data_t));
+
+ return 0;
+}
+
+void
+odp_proto_stats_print(odp_proto_stats_t stat)
+{
+ (void)stat;
+}
diff --git a/platform/linux-generic/odp_packet_io_api.c b/platform/linux-generic/odp_packet_io_api.c
new file mode 100644
index 000000000..d0b66a67f
--- /dev/null
+++ b/platform/linux-generic/odp_packet_io_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/packet_io.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/packet_io_inlines.h>
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
new file mode 100644
index 000000000..698445181
--- /dev/null
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -0,0 +1,142 @@
+/* Copyright (c) 2020-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/pool.h>
+#include <odp/api/plat/packet_vector_inlines.h>
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_debug_internal.h>
+#include <odp_event_vector_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+
+#include <odp/visibility_begin.h>
+
+/* Packet vector header field offsets for inline functions */
+const _odp_event_vector_inline_offset_t _odp_event_vector_inline ODP_ALIGNED_CACHE = {
+ .packet = offsetof(odp_event_vector_hdr_t, packet),
+ .pool = offsetof(odp_event_vector_hdr_t, event_hdr.pool),
+ .size = offsetof(odp_event_vector_hdr_t, size),
+ .uarea_addr = offsetof(odp_event_vector_hdr_t, uarea_addr),
+ .flags = offsetof(odp_event_vector_hdr_t, flags)
+};
+
+#include <odp/visibility_end.h>
+
+static inline odp_event_vector_hdr_t *event_vector_hdr_from_event(odp_event_t event)
+{
+ return (odp_event_vector_hdr_t *)(uintptr_t)event;
+}
+
+odp_packet_vector_t odp_packet_vector_alloc(odp_pool_t pool_hdl)
+{
+ odp_event_t event;
+ pool_t *pool;
+
+ _ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_VECTOR);
+
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_PACKET_VECTOR_INVALID;
+
+ _ODP_ASSERT(event_vector_hdr_from_event(event)->size == 0);
+
+ return odp_packet_vector_from_event(event);
+}
+
+void odp_packet_vector_free(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ pktv_hdr->size = 0;
+ pktv_hdr->flags.all_flags = 0;
+
+ _odp_event_free(odp_packet_vector_to_event(pktv));
+}
+
+int odp_packet_vector_valid(odp_packet_vector_t pktv)
+{
+ odp_event_vector_hdr_t *pktv_hdr;
+ odp_event_t ev;
+ pool_t *pool;
+ uint32_t i;
+
+ if (odp_unlikely(pktv == ODP_PACKET_VECTOR_INVALID))
+ return 0;
+
+ ev = odp_packet_vector_to_event(pktv);
+
+ if (_odp_event_is_valid(ev) == 0)
+ return 0;
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET_VECTOR)
+ return 0;
+
+ pktv_hdr = _odp_packet_vector_hdr(pktv);
+ pool = _odp_pool_entry(pktv_hdr->event_hdr.pool);
+
+ if (odp_unlikely(pktv_hdr->size > pool->params.vector.max_size))
+ return 0;
+
+ for (i = 0; i < pktv_hdr->size; i++) {
+ if (pktv_hdr->packet[i] == ODP_PACKET_INVALID)
+ return 0;
+ }
+
+ return 1;
+}
+
+void odp_packet_vector_print(odp_packet_vector_t pktv)
+{
+ int max_len = 4096;
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+ uint32_t i;
+ odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
+
+ len += _odp_snprint(&str[len], n - len, "Packet vector info\n");
+ len += _odp_snprint(&str[len], n - len, "------------------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_packet_vector_to_u64(pktv));
+ len += _odp_snprint(&str[len], n - len, " size %" PRIu32 "\n", pktv_hdr->size);
+ len += _odp_snprint(&str[len], n - len, " flags 0x%" PRIx32 "\n",
+ pktv_hdr->flags.all_flags);
+ len += _odp_snprint(&str[len], n - len, " user area %p\n", pktv_hdr->uarea_addr);
+
+ for (i = 0; i < pktv_hdr->size; i++) {
+ odp_packet_t pkt = pktv_hdr->packet[i];
+ char seg_str[max_len];
+ int str_len;
+
+ str_len = _odp_snprint(seg_str, max_len, " packet %p len %" PRIu32 "\n",
+ (void *)pkt, odp_packet_len(pkt));
+
+ /* Prevent print buffer overflow */
+ if (n - len - str_len < 10) {
+ len += _odp_snprint(&str[len], n - len, " ...\n");
+ break;
+ }
+ len += _odp_snprint(&str[len], n - len, "%s", seg_str);
+ }
+
+ _ODP_PRINT("%s\n", str);
+}
+
+uint64_t odp_packet_vector_to_u64(odp_packet_vector_t pktv)
+{
+ return _odp_pri(pktv);
+}
diff --git a/platform/linux-generic/odp_parse.c b/platform/linux-generic/odp_parse.c
new file mode 100644
index 000000000..d7fdb1439
--- /dev/null
+++ b/platform/linux-generic/odp_parse.c
@@ -0,0 +1,482 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_parse_internal.h>
+#include <odp_chksum_internal.h>
+#include <protocols/eth.h>
+#include <protocols/ip.h>
+#include <protocols/sctp.h>
+#include <protocols/tcp.h>
+#include <protocols/udp.h>
+#include <odp/api/hash.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/packet_types.h>
+#include <stdint.h>
+#include <string.h>
+
+/** Parser helper function for Ethernet packets
+ *
+ * Requires up to PARSE_ETH_BYTES bytes of contiguous packet data.
+ */
+uint16_t _odp_parse_eth(packet_parser_t *prs, const uint8_t **parseptr,
+ uint32_t *offset, uint32_t frame_len)
+{
+ uint16_t ethtype;
+ const _odp_ethhdr_t *eth;
+ uint16_t macaddr0, macaddr2, macaddr4;
+ const _odp_vlanhdr_t *vlan;
+ _odp_packet_input_flags_t input_flags;
+
+ input_flags.all = 0;
+ input_flags.l2 = 1;
+ input_flags.eth = 1;
+
+ eth = (const _odp_ethhdr_t *)*parseptr;
+
+ /* Detect jumbo frames */
+ if (odp_unlikely(frame_len - *offset > _ODP_ETH_LEN_MAX))
+ input_flags.jumbo = 1;
+
+ /* Handle Ethernet broadcast/multicast addresses */
+ macaddr0 = odp_be_to_cpu_16(*((const uint16_t *)(const void *)eth));
+ if (odp_unlikely((macaddr0 & 0x0100) == 0x0100))
+ input_flags.eth_mcast = 1;
+
+ if (odp_unlikely(macaddr0 == 0xffff)) {
+ macaddr2 =
+ odp_be_to_cpu_16(*((const uint16_t *)
+ (const void *)eth + 1));
+ macaddr4 =
+ odp_be_to_cpu_16(*((const uint16_t *)
+ (const void *)eth + 2));
+
+ if ((macaddr2 == 0xffff) && (macaddr4 == 0xffff))
+ input_flags.eth_bcast = 1;
+ }
+
+ /* Get Ethertype */
+ ethtype = odp_be_to_cpu_16(eth->type);
+ *offset += sizeof(*eth);
+ *parseptr += sizeof(*eth);
+
+ /* Check for SNAP vs. DIX */
+ if (odp_unlikely(ethtype < _ODP_ETH_LEN_MAX)) {
+ input_flags.snap = 1;
+ if (ethtype > frame_len - *offset) {
+ prs->flags.snap_len_err = 1;
+ ethtype = 0;
+ goto error;
+ }
+ ethtype = odp_be_to_cpu_16(*((const uint16_t *)(uintptr_t)
+ (*parseptr + 6)));
+ *offset += 8;
+ *parseptr += 8;
+ }
+
+ /* Parse the VLAN header(s), if present */
+ if (odp_unlikely(ethtype == _ODP_ETHTYPE_VLAN_OUTER)) {
+ input_flags.vlan_qinq = 1;
+ input_flags.vlan = 1;
+
+ vlan = (const _odp_vlanhdr_t *)*parseptr;
+ ethtype = odp_be_to_cpu_16(vlan->type);
+ *offset += sizeof(_odp_vlanhdr_t);
+ *parseptr += sizeof(_odp_vlanhdr_t);
+ }
+
+ if (ethtype == _ODP_ETHTYPE_VLAN) {
+ input_flags.vlan = 1;
+ vlan = (const _odp_vlanhdr_t *)*parseptr;
+ ethtype = odp_be_to_cpu_16(vlan->type);
+ *offset += sizeof(_odp_vlanhdr_t);
+ *parseptr += sizeof(_odp_vlanhdr_t);
+ }
+
+ /*
+ * The packet was too short for what we parsed. We just give up
+ * entirely without trying to parse what fits in the packet.
+ */
+ if (odp_unlikely(*offset > frame_len)) {
+ input_flags.all = 0;
+ input_flags.l2 = 1;
+ ethtype = 0;
+ }
+
+error:
+ prs->input_flags.all |= input_flags.all;
+
+ return ethtype;
+}
+
+/**
+ * Parser helper function for IPv4
+ *
+ * Requires up to PARSE_IPV4_BYTES bytes of contiguous packet data.
+ */
+static inline uint8_t parse_ipv4(packet_parser_t *prs, const uint8_t **parseptr,
+ uint32_t *offset, uint32_t frame_len,
+ odp_pktin_config_opt_t opt,
+ uint64_t *l4_part_sum)
+{
+ const _odp_ipv4hdr_t *ipv4 = (const _odp_ipv4hdr_t *)*parseptr;
+ uint32_t dstaddr = odp_be_to_cpu_32(ipv4->dst_addr);
+ uint32_t l3_len = odp_be_to_cpu_16(ipv4->tot_len);
+ uint16_t frag_offset = odp_be_to_cpu_16(ipv4->frag_offset);
+ uint8_t ver = _ODP_IPV4HDR_VER(ipv4->ver_ihl);
+ uint8_t ihl = _ODP_IPV4HDR_IHL(ipv4->ver_ihl);
+
+ if (odp_unlikely(prs->flags.l3_chksum_err ||
+ ihl < _ODP_IPV4HDR_IHL_MIN ||
+ ver != 4 ||
+ sizeof(*ipv4) > frame_len - *offset ||
+ (l3_len > frame_len - *offset))) {
+ prs->flags.ip_err = 1;
+ return 0;
+ }
+
+ if (opt.bit.ipv4_chksum) {
+ prs->input_flags.l3_chksum_done = 1;
+ if (chksum_finalize(chksum_partial(ipv4, ihl * 4, 0)) != 0xffff) {
+ prs->flags.ip_err = 1;
+ prs->flags.l3_chksum_err = 1;
+ return 0;
+ }
+ }
+
+ *offset += ihl * 4;
+ *parseptr += ihl * 4;
+
+ if (opt.bit.udp_chksum || opt.bit.tcp_chksum)
+ *l4_part_sum = chksum_partial((const uint8_t *)&ipv4->src_addr,
+ 2 * _ODP_IPV4ADDR_LEN, 0);
+
+ if (odp_unlikely(ihl > _ODP_IPV4HDR_IHL_MIN))
+ prs->input_flags.ipopt = 1;
+
+ /* A packet is a fragment if:
+ * "more fragments" flag is set (all fragments except the last)
+ * OR
+ * "fragment offset" field is nonzero (all fragments except the first)
+ */
+ if (odp_unlikely(_ODP_IPV4HDR_IS_FRAGMENT(frag_offset)))
+ prs->input_flags.ipfrag = 1;
+
+ /* Handle IPv4 broadcast / multicast */
+ if (odp_unlikely(dstaddr == 0xffffffff))
+ prs->input_flags.ip_bcast = 1;
+
+ if (odp_unlikely((dstaddr >> 28) == 0xe))
+ prs->input_flags.ip_mcast = 1;
+
+ return ipv4->proto;
+}
+
+/**
+ * Parser helper function for IPv6
+ *
+ * Requires at least PARSE_IPV6_BYTES bytes of contiguous packet data.
+ *
+ * - offset is the offset of the first byte of the data pointed to by parseptr
+ * - seg_end is the maximum offset that can be accessed plus one
+ */
+static inline uint8_t parse_ipv6(packet_parser_t *prs, const uint8_t **parseptr,
+ uint32_t *offset, uint32_t frame_len,
+ uint32_t seg_end,
+ odp_pktin_config_opt_t opt,
+ uint64_t *l4_part_sum)
+{
+ const _odp_ipv6hdr_t *ipv6 = (const _odp_ipv6hdr_t *)*parseptr;
+ const _odp_ipv6hdr_ext_t *ipv6ext;
+ uint32_t dstaddr0 = odp_be_to_cpu_32(ipv6->dst_addr.u8[0]);
+ uint32_t l3_len = odp_be_to_cpu_16(ipv6->payload_len) +
+ _ODP_IPV6HDR_LEN;
+
+ /* Basic sanity checks on IPv6 header */
+ if (odp_unlikely(prs->flags.l3_chksum_err ||
+ (odp_be_to_cpu_32(ipv6->ver_tc_flow) >> 28) != 6 ||
+ sizeof(*ipv6) > frame_len - *offset ||
+ l3_len > frame_len - *offset)) {
+ prs->flags.ip_err = 1;
+ return 0;
+ }
+
+ /* IPv6 broadcast / multicast flags */
+ prs->input_flags.ip_mcast = (dstaddr0 & 0xff000000) == 0xff000000;
+ prs->input_flags.ip_bcast = 0;
+
+ /* Skip past IPv6 header */
+ *offset += sizeof(_odp_ipv6hdr_t);
+ *parseptr += sizeof(_odp_ipv6hdr_t);
+
+ if (opt.bit.udp_chksum || opt.bit.tcp_chksum)
+ *l4_part_sum = chksum_partial((const uint8_t *)&ipv6->src_addr,
+ 2 * _ODP_IPV6ADDR_LEN, 0);
+
+ /* Skip past any IPv6 extension headers */
+ if (ipv6->next_hdr == _ODP_IPPROTO_HOPOPTS ||
+ ipv6->next_hdr == _ODP_IPPROTO_ROUTE) {
+ prs->input_flags.ipopt = 1;
+
+ do {
+ ipv6ext = (const _odp_ipv6hdr_ext_t *)*parseptr;
+ uint16_t extlen = 8 + ipv6ext->ext_len * 8;
+
+ *offset += extlen;
+ *parseptr += extlen;
+ } while ((ipv6ext->next_hdr == _ODP_IPPROTO_HOPOPTS ||
+ ipv6ext->next_hdr == _ODP_IPPROTO_ROUTE) &&
+ *offset < seg_end);
+
+ if (*offset >= prs->l3_offset +
+ odp_be_to_cpu_16(ipv6->payload_len)) {
+ prs->flags.ip_err = 1;
+ return 0;
+ }
+
+ if (ipv6ext->next_hdr == _ODP_IPPROTO_FRAG)
+ prs->input_flags.ipfrag = 1;
+
+ return ipv6ext->next_hdr;
+ }
+
+ if (odp_unlikely(ipv6->next_hdr == _ODP_IPPROTO_FRAG)) {
+ prs->input_flags.ipopt = 1;
+ prs->input_flags.ipfrag = 1;
+ }
+
+ return ipv6->next_hdr;
+}
+
+/**
+ * Parser helper function for TCP
+ *
+ * Requires PARSE_TCP_BYTES bytes of contiguous packet data.
+ */
+static inline void parse_tcp(packet_parser_t *prs, const uint8_t **parseptr,
+ uint16_t tcp_len,
+ odp_pktin_config_opt_t opt,
+ uint64_t *l4_part_sum)
+{
+ const _odp_tcphdr_t *tcp = (const _odp_tcphdr_t *)*parseptr;
+ uint32_t len = tcp->hl * 4;
+
+ if (odp_unlikely(tcp->hl < sizeof(_odp_tcphdr_t) / sizeof(uint32_t)))
+ prs->flags.tcp_err = 1;
+
+ if (opt.bit.tcp_chksum &&
+ !prs->input_flags.ipfrag) {
+ *l4_part_sum += odp_cpu_to_be_16(tcp_len);
+#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
+ *l4_part_sum += _ODP_IPPROTO_TCP;
+#else
+ *l4_part_sum += _ODP_IPPROTO_TCP << 8;
+#endif
+ }
+
+ *parseptr += len;
+}
+
+/**
+ * Parser helper function for UDP
+ *
+ * Requires PARSE_UDP_BYTES bytes of contiguous packet data.
+ */
+static inline void parse_udp(packet_parser_t *prs, const uint8_t **parseptr,
+ odp_pktin_config_opt_t opt,
+ uint64_t *l4_part_sum)
+{
+ const _odp_udphdr_t *udp = (const _odp_udphdr_t *)*parseptr;
+ uint32_t udplen = odp_be_to_cpu_16(udp->length);
+ uint16_t ipsec_port = odp_cpu_to_be_16(_ODP_UDP_IPSEC_PORT);
+
+ if (odp_unlikely(udplen < sizeof(_odp_udphdr_t))) {
+ prs->flags.udp_err = 1;
+ return;
+ }
+
+ if (opt.bit.udp_chksum &&
+ !prs->input_flags.ipfrag) {
+ if (udp->chksum == 0) {
+ prs->input_flags.l4_chksum_done = 1;
+ prs->flags.l4_chksum_err =
+ (prs->input_flags.ipv4 != 1);
+ } else {
+ *l4_part_sum += udp->length;
+#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN
+ *l4_part_sum += _ODP_IPPROTO_UDP;
+#else
+ *l4_part_sum += _ODP_IPPROTO_UDP << 8;
+#endif
+ }
+ prs->input_flags.udp_chksum_zero = (udp->chksum == 0);
+ }
+
+ if (odp_unlikely(ipsec_port == udp->dst_port && udplen > 4)) {
+ uint32_t val;
+
+ memcpy(&val, udp + 1, 4);
+ if (val != 0) {
+ prs->input_flags.ipsec = 1;
+ prs->input_flags.ipsec_udp = 1;
+ }
+ }
+
+ *parseptr += sizeof(_odp_udphdr_t);
+}
+
+/**
+ * Parser helper function for SCTP
+ *
+ * Requires PARSE_SCTP_BYTES bytes of contiguous packet data.
+ */
+static inline void parse_sctp(packet_parser_t *prs, const uint8_t **parseptr,
+ uint16_t sctp_len,
+ odp_pktin_config_opt_t opt,
+ uint64_t *l4_part_sum)
+{
+ if (odp_unlikely(sctp_len < sizeof(_odp_sctphdr_t))) {
+ prs->flags.sctp_err = 1;
+ return;
+ }
+
+ if (opt.bit.sctp_chksum &&
+ !prs->input_flags.ipfrag) {
+ const _odp_sctphdr_t *sctp =
+ (const _odp_sctphdr_t *)*parseptr;
+ uint32_t crc = ~0;
+ uint32_t zero = 0;
+
+ crc = odp_hash_crc32c(sctp, sizeof(*sctp) - 4, crc);
+ crc = odp_hash_crc32c(&zero, 4, crc);
+ *l4_part_sum = crc;
+ }
+
+ *parseptr += sizeof(_odp_sctphdr_t);
+}
+
+/*
+ * Requires up to PARSE_L3_L4_BYTES bytes of contiguous packet data.
+ *
+ * - offset is the offset of the first byte of the data pointed to by parseptr
+ * - seg_end is the maximum offset that can be accessed plus one
+ */
+int _odp_packet_parse_common_l3_l4(packet_parser_t *prs,
+ const uint8_t *parseptr, uint32_t offset,
+ uint32_t frame_len, uint32_t seg_end,
+ int layer, uint16_t ethtype,
+ uint64_t *l4_part_sum,
+ odp_pktin_config_opt_t opt)
+{
+ uint8_t ip_proto;
+
+ prs->l3_offset = offset;
+
+ if (odp_unlikely(layer <= ODP_PROTO_LAYER_L2))
+ return prs->flags.all.error != 0;
+
+ /* Set l3 flag only for known ethtypes */
+ prs->input_flags.l3 = 1;
+
+ /* Parse Layer 3 headers */
+ switch (ethtype) {
+ case _ODP_ETHTYPE_IPV4:
+ prs->input_flags.ipv4 = 1;
+ ip_proto = parse_ipv4(prs, &parseptr, &offset, frame_len,
+ opt, l4_part_sum);
+ if (odp_likely(!prs->flags.ip_err))
+ prs->l4_offset = offset;
+ else if (opt.bit.drop_ipv4_err)
+ return -1; /* drop */
+ break;
+
+ case _ODP_ETHTYPE_IPV6:
+ prs->input_flags.ipv6 = 1;
+ ip_proto = parse_ipv6(prs, &parseptr, &offset, frame_len,
+ seg_end, opt, l4_part_sum);
+ if (odp_likely(!prs->flags.ip_err))
+ prs->l4_offset = offset;
+ else if (opt.bit.drop_ipv6_err)
+ return -1; /* drop */
+ break;
+
+ case _ODP_ETHTYPE_ARP:
+ prs->input_flags.arp = 1;
+ ip_proto = 255; /* Reserved invalid by IANA */
+ break;
+
+ default:
+ prs->input_flags.l3 = 0;
+ ip_proto = 255; /* Reserved invalid by IANA */
+ }
+
+ if (layer == ODP_PROTO_LAYER_L3)
+ return prs->flags.all.error != 0;
+
+ /* Set l4 flag only for known ip_proto */
+ prs->input_flags.l4 = 1;
+
+ /* Parse Layer 4 headers */
+ switch (ip_proto) {
+ case _ODP_IPPROTO_ICMPV4:
+ /* Fall through */
+
+ case _ODP_IPPROTO_ICMPV6:
+ prs->input_flags.icmp = 1;
+ break;
+
+ case _ODP_IPPROTO_IPIP:
+ /* Do nothing */
+ break;
+
+ case _ODP_IPPROTO_TCP:
+ if (odp_unlikely(offset + _ODP_TCPHDR_LEN > seg_end))
+ return -1;
+ prs->input_flags.tcp = 1;
+ parse_tcp(prs, &parseptr, frame_len - prs->l4_offset, opt,
+ l4_part_sum);
+ if (prs->flags.tcp_err && opt.bit.drop_tcp_err)
+ return -1; /* drop */
+ break;
+
+ case _ODP_IPPROTO_UDP:
+ if (odp_unlikely(offset + _ODP_UDPHDR_LEN > seg_end))
+ return -1;
+ prs->input_flags.udp = 1;
+ parse_udp(prs, &parseptr, opt, l4_part_sum);
+ if (prs->flags.udp_err && opt.bit.drop_udp_err)
+ return -1; /* drop */
+ break;
+
+ case _ODP_IPPROTO_AH:
+ prs->input_flags.ipsec = 1;
+ prs->input_flags.ipsec_ah = 1;
+ break;
+
+ case _ODP_IPPROTO_ESP:
+ prs->input_flags.ipsec = 1;
+ prs->input_flags.ipsec_esp = 1;
+ break;
+
+ case _ODP_IPPROTO_SCTP:
+ prs->input_flags.sctp = 1;
+ parse_sctp(prs, &parseptr, frame_len - prs->l4_offset, opt,
+ l4_part_sum);
+ if (prs->flags.sctp_err && opt.bit.drop_sctp_err)
+ return -1; /* drop */
+ break;
+
+ case _ODP_IPPROTO_NO_NEXT:
+ prs->input_flags.no_next_hdr = 1;
+ break;
+
+ default:
+ prs->input_flags.l4 = 0;
+ break;
+ }
+
+ return prs->flags.all.error != 0;
+}
diff --git a/platform/linux-generic/odp_pcapng.c b/platform/linux-generic/odp_pcapng.c
new file mode 100644
index 000000000..7f11f4340
--- /dev/null
+++ b/platform/linux-generic/odp_pcapng.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/autoheader_internal.h>
+
+#if defined(_ODP_PCAPNG) && _ODP_PCAPNG == 1
+
+#include <odp/api/hints.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/spinlock.h>
+
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/packet_io_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pcapng.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/uio.h>
+#include <sys/inotify.h>
+#include <sys/select.h>
+
+#define PCAPNG_BLOCK_TYPE_EPB 0x00000006UL
+#define PCAPNG_BLOCK_TYPE_SHB 0x0A0D0D0AUL
+#define PCAPNG_BLOCK_TYPE_IDB 0x00000001UL
+#define PCAPNG_ENDIAN_MAGIC 0x1A2B3C4DUL
+#define PCAPNG_DATA_ALIGN 4
+#define PCAPNG_LINKTYPE_ETHERNET 0x1
+
+/* inotify */
+#define INOTIFY_BUF_LEN (16 * (sizeof(struct inotify_event)))
+#define PCAPNG_WATCH_DIR "/var/run/odp/"
+
+#define PKTIO_MAX_QUEUES (ODP_PKTIN_MAX_QUEUES > ODP_PKTOUT_MAX_QUEUES ? \
+ ODP_PKTIN_MAX_QUEUES : ODP_PKTOUT_MAX_QUEUES)
+
+/* pcapng: enhanced packet block file encoding */
+typedef struct ODP_PACKED pcapng_section_hdr_block_s {
+ uint32_t block_type;
+ uint32_t block_total_length;
+ uint32_t magic;
+ uint16_t version_major;
+ uint16_t version_minor;
+ int64_t section_len;
+ uint32_t block_total_length2;
+} pcapng_section_hdr_block_t;
+
+typedef struct pcapng_interface_description_block {
+ uint32_t block_type;
+ uint32_t block_total_length;
+ uint16_t linktype;
+ uint16_t reserved;
+ uint32_t snaplen;
+ uint32_t block_total_length2;
+} pcapng_interface_description_block_t;
+
+typedef struct pcapng_enhanced_packet_block_s {
+ uint32_t block_type;
+ uint32_t block_total_length;
+ uint32_t interface_idx;
+ uint32_t timestamp_high;
+ uint32_t timestamp_low;
+ uint32_t captured_len;
+ uint32_t packet_len;
+} pcapng_enhanced_packet_block_t;
+
+/** Pktio entry specific data */
+typedef struct {
+ pktio_entry_t *pktio_entry;
+
+ /* inotify instances for pcapng fifos */
+ enum {
+ PCAPNG_WR_STOP = 0,
+ PCAPNG_WR_PKT,
+ } state[PKTIO_MAX_QUEUES];
+ int fd[PKTIO_MAX_QUEUES];
+} pcapng_entry_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ odp_shm_t shm;
+ int num_entries;
+ pthread_t inotify_thread;
+ int inotify_fd;
+ int inotify_watch_fd;
+ int inotify_is_running;
+ odp_spinlock_t lock;
+ pcapng_entry_t entry[CONFIG_PKTIO_ENTRIES];
+} pcapng_global_t;
+
+static pcapng_global_t *pcapng_gbl;
+
+static inline pcapng_entry_t *pcapng_entry(pktio_entry_t *pktio_entry)
+{
+ return &pcapng_gbl->entry[odp_pktio_index(pktio_entry->handle)];
+}
+
+int write_pcapng_hdr(pktio_entry_t *entry, int qidx);
+
+int _odp_pcapng_init_global(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_pcapng_global", sizeof(pcapng_global_t),
+ ODP_PAGE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ pcapng_gbl = odp_shm_addr(shm);
+
+ memset(pcapng_gbl, 0, sizeof(pcapng_global_t));
+ pcapng_gbl->shm = shm;
+
+ odp_spinlock_init(&pcapng_gbl->lock);
+
+ return 0;
+}
+
+int _odp_pcapng_term_global(void)
+{
+ int ret = 0;
+
+ if (odp_shm_free(pcapng_gbl->shm)) {
+ _ODP_ERR("shm free failed");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static void pcapng_drain_fifo(int fd)
+{
+ char buffer[4096];
+ ssize_t len;
+
+ do {
+ len = read(fd, buffer, sizeof(buffer));
+ } while (len > 0);
+}
+
+static void inotify_event_handle(pktio_entry_t *entry, int qidx,
+ struct inotify_event *event)
+{
+ pcapng_entry_t *pcapng = pcapng_entry(entry);
+ int mtu = _ODP_MAX(odp_pktin_maxlen(entry->handle), odp_pktout_maxlen(entry->handle));
+
+ if (event->mask & IN_OPEN) {
+ int ret;
+
+ if (PIPE_BUF < mtu + sizeof(pcapng_enhanced_packet_block_t) +
+ sizeof(uint32_t)) {
+ _ODP_ERR("PIPE_BUF:%d too small. Disabling pcap\n", PIPE_BUF);
+ pcapng->state[qidx] = PCAPNG_WR_STOP;
+
+ return;
+ }
+
+ ret = write_pcapng_hdr(entry, qidx);
+ if (ret) {
+ pcapng->state[qidx] = PCAPNG_WR_STOP;
+ } else {
+ pcapng->state[qidx] = PCAPNG_WR_PKT;
+ _ODP_DBG("Open %s for pcap tracing\n", event->name);
+ }
+ } else if (event->mask & IN_CLOSE) {
+ int fd = pcapng->fd[qidx];
+
+ pcapng_drain_fifo(fd);
+ pcapng->state[qidx] = PCAPNG_WR_STOP;
+ _ODP_DBG("Close %s for pcap tracing\n", event->name);
+ } else {
+ _ODP_ERR("Unknown inotify event 0x%08x\n", event->mask);
+ }
+}
+
+static void get_pcapng_fifo_name(char *pcapng_entry, size_t len,
+ char *pktio_name, int qidx)
+{
+ snprintf(pcapng_entry, len, "%d-%s-flow-%d",
+ odp_global_ro.main_pid, pktio_name, qidx);
+ pcapng_entry[len - 1] = 0;
+}
+
+static int get_qidx_from_fifo(pktio_entry_t *entry, char *name)
+{
+ unsigned int max_queue = _ODP_MAX(entry->num_in_queue, entry->num_out_queue);
+ unsigned int i;
+
+ for (i = 0; i < max_queue; i++) {
+ char pcapng_entry[256];
+
+ get_pcapng_fifo_name(pcapng_entry, sizeof(pcapng_entry),
+ entry->name, i);
+ /*
+ * verify we still talk to a fifo before returning a valid
+ * queue number
+ */
+ if (strcmp(name, pcapng_entry) == 0) {
+ struct stat fstat;
+ char pcapng_path[256];
+
+ snprintf(pcapng_path, sizeof(pcapng_path), "%s/%s",
+ PCAPNG_WATCH_DIR, name);
+ stat(pcapng_path, &fstat);
+
+ return S_ISFIFO(fstat.st_mode) ? (int)i : -1;
+ }
+ }
+
+ return -1;
+}
+
+static pktio_entry_t *pktio_from_event(struct inotify_event *event)
+{
+ int i;
+
+ odp_spinlock_lock(&pcapng_gbl->lock);
+
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; i++) {
+ pktio_entry_t *entry = pcapng_gbl->entry[i].pktio_entry;
+
+ if (entry == NULL)
+ continue;
+
+ if (get_qidx_from_fifo(entry, event->name) != -1) {
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+ return entry;
+ }
+ }
+
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+
+ return NULL;
+}
+
+static void *inotify_update(void *arg)
+{
+ struct timeval time;
+ ssize_t rdlen;
+ int offset;
+ char buffer[INOTIFY_BUF_LEN];
+ fd_set rfds;
+ int inotify_fd = *(int *)arg;
+
+ while (1) {
+ offset = 0;
+ FD_ZERO(&rfds);
+ FD_SET(inotify_fd, &rfds);
+ time.tv_sec = 2;
+ time.tv_usec = 0;
+ select(inotify_fd + 1, &rfds, NULL, NULL, &time);
+ if (FD_ISSET(inotify_fd, &rfds)) {
+ rdlen = read(inotify_fd, buffer, INOTIFY_BUF_LEN);
+ while (offset < rdlen) {
+ int qidx;
+ struct inotify_event *event =
+ (struct inotify_event *)(void *)
+ &buffer[offset];
+ pktio_entry_t *entry;
+
+ offset += sizeof(struct inotify_event) +
+ event->len;
+
+ entry = pktio_from_event(event);
+ if (entry == NULL)
+ continue;
+
+ qidx = get_qidx_from_fifo(entry, event->name);
+ if (qidx == -1)
+ continue;
+
+ inotify_event_handle(entry, qidx, event);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static int get_fifo_max_size(void)
+{
+ FILE *file;
+ char buf[128];
+ int ret = -1;
+
+ file = fopen("/proc/sys/fs/pipe-max-size", "r");
+ if (file == NULL)
+ return ret;
+
+ if (fgets(buf, sizeof(buf), file))
+ ret = atoi(buf);
+
+ fclose(file);
+
+ return ret;
+}
+
+int _odp_pcapng_start(pktio_entry_t *entry)
+{
+ pcapng_entry_t *pcapng = pcapng_entry(entry);
+ int ret = -1, fd;
+ pthread_attr_t attr;
+ unsigned int i;
+ unsigned int max_queue = _ODP_MAX(entry->num_in_queue, entry->num_out_queue);
+ int fifo_sz;
+
+ fifo_sz = get_fifo_max_size();
+ if (fifo_sz < 0)
+ _ODP_DBG("failed to read max fifo size\n");
+
+ for (i = 0; i < max_queue; i++) {
+ char pcapng_name[128];
+ char pcapng_path[256];
+
+ pcapng->fd[i] = -1;
+ pcapng->state[i] = PCAPNG_WR_STOP;
+
+ get_pcapng_fifo_name(pcapng_name, sizeof(pcapng_name),
+ entry->name, i);
+ snprintf(pcapng_path, sizeof(pcapng_path), "%s/%s",
+ PCAPNG_WATCH_DIR, pcapng_name);
+ if (mkfifo(pcapng_path, O_RDWR)) {
+ _ODP_ERR("pcap not available for %s %s\n", pcapng_path, strerror(errno));
+ continue;
+ }
+
+ if (chmod(pcapng_path, S_IRUSR | S_IRGRP))
+ _ODP_ERR("Failed to change file permission for %s %s\n",
+ pcapng_path, strerror(errno));
+
+ fd = open(pcapng_path, O_RDWR | O_NONBLOCK);
+ if (fd == -1) {
+ _ODP_ERR("Fail to open fifo\n");
+ pcapng->state[i] = PCAPNG_WR_STOP;
+ if (remove(pcapng_path) == -1)
+ _ODP_ERR("Can't remove fifo %s\n", pcapng_path);
+ continue;
+ }
+
+ if (fifo_sz > 0) {
+ if (fcntl(fd, F_SETPIPE_SZ, fifo_sz) != fifo_sz)
+ _ODP_DBG("Failed to set max fifo size\n");
+ else
+ _ODP_DBG("set pcap fifo size %i\n", fifo_sz);
+ }
+
+ pcapng->fd[i] = fd;
+ }
+
+ odp_spinlock_lock(&pcapng_gbl->lock);
+
+ /* already running from a previous pktio */
+ if (pcapng_gbl->inotify_is_running == 1) {
+ pcapng->pktio_entry = entry;
+ pcapng_gbl->num_entries++;
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+ return 0;
+ }
+
+ pcapng_gbl->inotify_fd = -1;
+ pcapng_gbl->inotify_watch_fd = -1;
+
+ pcapng_gbl->inotify_fd = inotify_init();
+ if (pcapng_gbl->inotify_fd == -1) {
+ _ODP_ERR("can't init inotify. pcap disabled\n");
+ goto out_destroy;
+ }
+
+ pcapng_gbl->inotify_watch_fd = inotify_add_watch(pcapng_gbl->inotify_fd,
+ PCAPNG_WATCH_DIR,
+ IN_CLOSE | IN_OPEN);
+
+ if (pcapng_gbl->inotify_watch_fd == -1) {
+ _ODP_ERR("can't register inotify for %s. pcap disabled\n", strerror(errno));
+ goto out_destroy;
+ }
+
+ /* create a thread to poll inotify triggers */
+ pthread_attr_init(&attr);
+ ret = pthread_create(&pcapng_gbl->inotify_thread, &attr, inotify_update,
+ &pcapng_gbl->inotify_fd);
+ if (ret) {
+ _ODP_ERR("Can't start inotify thread (ret=%d). pcapng disabled.\n", ret);
+ } else {
+ pcapng->pktio_entry = entry;
+ pcapng_gbl->num_entries++;
+ pcapng_gbl->inotify_is_running = 1;
+ }
+
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+
+ return ret;
+
+out_destroy:
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+
+ _odp_pcapng_stop(entry);
+
+ return ret;
+}
+
+void _odp_pcapng_stop(pktio_entry_t *entry)
+{
+ pcapng_entry_t *pcapng = pcapng_entry(entry);
+ int ret;
+ unsigned int i;
+ unsigned int max_queue = _ODP_MAX(entry->num_in_queue, entry->num_out_queue);
+
+ odp_spinlock_lock(&pcapng_gbl->lock);
+
+ pcapng->pktio_entry = NULL;
+ pcapng_gbl->num_entries--;
+
+ if (pcapng_gbl->inotify_is_running == 1 &&
+ pcapng_gbl->num_entries == 0) {
+ ret = pthread_cancel(pcapng_gbl->inotify_thread);
+ if (ret)
+ _ODP_ERR("can't cancel inotify thread %s\n", strerror(errno));
+ pcapng_gbl->inotify_is_running = 0;
+ }
+
+ if (pcapng_gbl->num_entries == 0) {
+ /* fd's will be -1 in case of any failure */
+ ret = inotify_rm_watch(pcapng_gbl->inotify_fd,
+ pcapng_gbl->inotify_watch_fd);
+ if (ret)
+ _ODP_ERR("can't deregister inotify %s\n", strerror(errno));
+
+ if (pcapng_gbl->inotify_fd != -1)
+ close(pcapng_gbl->inotify_fd);
+
+ if (pcapng_gbl->inotify_watch_fd != -1)
+ close(pcapng_gbl->inotify_watch_fd);
+ }
+
+ odp_spinlock_unlock(&pcapng_gbl->lock);
+
+ for (i = 0; i < max_queue; i++) {
+ char pcapng_name[128];
+ char pcapng_path[256];
+
+ pcapng->state[i] = PCAPNG_WR_STOP;
+ close(pcapng->fd[i]);
+
+ get_pcapng_fifo_name(pcapng_name, sizeof(pcapng_name),
+ entry->name, i);
+ snprintf(pcapng_path, sizeof(pcapng_path), "%s/%s",
+ PCAPNG_WATCH_DIR, pcapng_name);
+
+ if (remove(pcapng_path))
+ _ODP_ERR("can't delete fifo %s\n", pcapng_path);
+ }
+}
+
+int write_pcapng_hdr(pktio_entry_t *entry, int qidx)
+{
+ pcapng_entry_t *pcapng = pcapng_entry(entry);
+ size_t len;
+ pcapng_section_hdr_block_t shb;
+ pcapng_interface_description_block_t idb;
+ int fd = pcapng->fd[qidx];
+
+ memset(&shb, 0, sizeof(shb));
+ memset(&idb, 0, sizeof(idb));
+
+ shb.block_type = PCAPNG_BLOCK_TYPE_SHB;
+ shb.block_total_length = sizeof(shb);
+ shb.block_total_length2 = sizeof(shb);
+ shb.magic = PCAPNG_ENDIAN_MAGIC;
+ shb.version_major = 0x1;
+ shb.version_minor = 0x0;
+ shb.section_len = -1;
+
+ len = write(fd, &shb, sizeof(shb));
+ /* fail to write shb/idb means the pcapng is unreadable */
+ if (len != sizeof(shb)) {
+ _ODP_ERR("Failed to write pcapng section hdr\n");
+ return -1;
+ }
+ fsync(fd);
+
+ idb.block_type = PCAPNG_BLOCK_TYPE_IDB;
+ idb.block_total_length = sizeof(idb);
+ idb.block_total_length2 = sizeof(idb);
+ idb.linktype = PCAPNG_LINKTYPE_ETHERNET;
+ idb.snaplen = 0x0; /* unlimited */
+ len = write(fd, &idb, sizeof(idb));
+ if (len != sizeof(idb)) {
+ _ODP_ERR("Failed to write pcapng interface description\n");
+ return -1;
+ }
+ fsync(fd);
+
+ return 0;
+}
+
+/*
+ * make sure that each fifo write is less than PIPE_BUF
+ * this will make sure writes are atomic (on non blocking mode).
+ * writev() transfers all the data and returns the number of bytes requested or
+ * -EAGAIN
+ */
+static ssize_t write_fifo(int fd, struct iovec *iov, int iovcnt)
+{
+ ssize_t len = 0;
+
+ len = writev(fd, iov, iovcnt);
+ /*
+ * we don't care if a writev fails, we asynchronously read the fifo
+ * so the next block of packets might be successful. This error only
+ * means that some packets failed to append on the pcap file
+ */
+ if (len > 0)
+ fsync(fd);
+
+ return len;
+}
+
+int _odp_pcapng_dump_pkts(pktio_entry_t *entry, int qidx,
+ const odp_packet_t packets[], int num)
+{
+ pcapng_entry_t *pcapng = pcapng_entry(entry);
+ int i = 0;
+ struct iovec packet_iov[3 * num];
+ pcapng_enhanced_packet_block_t epb[num];
+ int iovcnt = 0;
+ ssize_t block_len = 0;
+ int fd = pcapng->fd[qidx];
+ ssize_t len = 0, wlen;
+
+ if (odp_likely(pcapng->state[qidx] != PCAPNG_WR_PKT))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(packets[i]);
+ uint32_t seg_len;
+ char *buf = (char *)odp_packet_offset(packets[i], 0, &seg_len,
+ NULL);
+
+ if (block_len + sizeof(epb[i]) +
+ _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
+ sizeof(uint32_t) > PIPE_BUF) {
+ wlen = write_fifo(fd, packet_iov, iovcnt);
+ if (wlen > 0) {
+ len += wlen;
+ block_len = 0;
+ iovcnt = 0;
+ }
+ }
+ epb[i].block_type = PCAPNG_BLOCK_TYPE_EPB;
+ epb[i].block_total_length = sizeof(epb[i]) +
+ _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
+ PCAPNG_DATA_ALIGN;
+ epb[i].interface_idx = 0;
+ epb[i].timestamp_high =
+ (uint32_t)(pkt_hdr->timestamp.u64 >> 32);
+ epb[i].timestamp_low = (uint32_t)(pkt_hdr->timestamp.u64);
+ epb[i].captured_len = seg_len;
+ epb[i].packet_len = seg_len;
+
+ /* epb */
+ packet_iov[iovcnt].iov_base = &epb[i];
+ packet_iov[iovcnt].iov_len = sizeof(epb[i]);
+ block_len += packet_iov[iovcnt].iov_len;
+ iovcnt++;
+
+ /* data */
+ packet_iov[iovcnt].iov_base = buf;
+ packet_iov[iovcnt].iov_len = _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN);
+ block_len += packet_iov[iovcnt].iov_len;
+ iovcnt++;
+
+ /* trailing */
+ packet_iov[iovcnt].iov_base = &epb[i].block_total_length;
+ packet_iov[iovcnt].iov_len = sizeof(uint32_t);
+ block_len += packet_iov[iovcnt].iov_len;
+ iovcnt++;
+ }
+
+ if (iovcnt) {
+ wlen = write_fifo(fd, packet_iov, iovcnt);
+ if (wlen > 0)
+ len += wlen;
+ }
+
+ return len;
+}
+
+#else /* _ODP_PCAPNG */
+/* Avoid warning about empty translation unit */
+typedef int _odp_dummy;
+#endif
diff --git a/platform/linux-generic/odp_pkt_queue.c b/platform/linux-generic/odp_pkt_queue.c
index 7c6cd87e1..d3dd6639f 100644
--- a/platform/linux-generic/odp_pkt_queue.c
+++ b/platform/linux-generic/odp_pkt_queue.c
@@ -1,44 +1,39 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/packet.h>
+
+#include <odp_pkt_queue_internal.h>
+#include <odp_traffic_mngr_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+
#include <stdint.h>
#include <string.h>
#include <malloc.h>
#include <stdio.h>
#include <inttypes.h>
-#include <odp_api.h>
-#include <odp_pkt_queue_internal.h>
-#include <odp_debug_internal.h>
-
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
#define NUM_PKTS 7
-typedef struct /* Must be exactly 64 bytes long AND cacheline aligned! */ {
+/* Must be exactly 64 bytes long AND cacheline aligned! */
+typedef struct ODP_ALIGNED_CACHE {
uint32_t next_queue_blk_idx;
uint32_t tail_queue_blk_idx;
odp_packet_t pkts[NUM_PKTS];
-} ODP_ALIGNED_CACHE queue_blk_t;
+} queue_blk_t;
-typedef struct {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct ODP_ALIGNED_CACHE {
queue_blk_t blks[0];
-} ODP_ALIGNED_CACHE queue_blks_t;
-
-/* The queue_num_tbl is used to map from a queue_num to a queue_num_desc.
- * The reason is based on the assumption that usually only a small fraction
- * of the max_num_queues will have more than 1 pkt associated with it. This
- * way the active queue_desc's can be dynamically allocated and freed according
- * to the actual usage pattern.
- */
-typedef struct {
- uint32_t queue_num_to_blk_idx[0];
-} queue_num_tbl_t;
+} queue_blks_t;
+#pragma GCC diagnostic pop
typedef struct {
uint32_t num_blks;
@@ -47,6 +42,7 @@ typedef struct {
} queue_region_desc_t;
typedef struct {
+ uint8_t queue_status[ODP_TM_MAX_TM_QUEUES];
uint64_t total_pkt_appends;
uint64_t total_pkt_removes;
uint64_t total_bad_removes;
@@ -56,7 +52,7 @@ typedef struct {
uint32_t free_list_head_idx;
uint32_t max_queue_num;
uint32_t max_queued_pkts;
- uint32_t next_queue_num;
+ uint32_t num_queues;
queue_region_desc_t queue_region_descs[16];
uint32_t *queue_num_tbl;
uint8_t current_region;
@@ -85,17 +81,27 @@ static queue_blk_t *blk_idx_to_queue_blk(queue_pool_t *queue_pool,
return &queue_region_desc->queue_blks->blks[blk_tbl_idx];
}
+static void free_alloced_queue_blks(uint32_t end, queue_blks_t *blk_array[])
+{
+ uint32_t i;
+
+ for (i = 0; i < end; i++)
+ free(blk_array[i]);
+}
+
static int pkt_queue_free_list_add(queue_pool_t *pool,
uint32_t num_queue_blks)
{
queue_region_desc_t *region_desc;
queue_blks_t *queue_blks;
+ queue_blks_t *alloced_queue_blks[num_queue_blks];
queue_blk_t *queue_blk;
uint32_t which_region, blks_added, num_blks, start_idx;
- uint32_t malloc_len, blks_to_add, cnt, i;
+ uint32_t malloc_len, blks_to_add, cnt, i, alloc_cnt;
which_region = pool->current_region;
blks_added = 0;
+ alloc_cnt = 0;
while ((blks_added < num_queue_blks) && (pool->all_regions_used == 0)) {
region_desc = &pool->queue_region_descs[which_region];
start_idx = region_desc->next_blk_idx;
@@ -104,6 +110,13 @@ static int pkt_queue_free_list_add(queue_pool_t *pool,
if (!queue_blks) {
malloc_len = num_blks * sizeof(queue_blk_t);
queue_blks = malloc(malloc_len);
+ if (!queue_blks) {
+ free_alloced_queue_blks(alloc_cnt,
+ alloced_queue_blks);
+ return -1;
+ }
+ alloced_queue_blks[alloc_cnt] = queue_blks;
+ alloc_cnt++;
for (i = 0; i < num_blks; i++)
init_queue_blk(&queue_blks->blks[i]);
@@ -112,7 +125,7 @@ static int pkt_queue_free_list_add(queue_pool_t *pool,
}
/* Now add as many queue_blks to the free list as... */
- blks_to_add = MIN(num_blks - start_idx, num_queue_blks);
+ blks_to_add = _ODP_MIN(num_blks - start_idx, num_queue_blks);
queue_blk = &queue_blks->blks[start_idx];
for (cnt = 1; cnt <= blks_to_add; cnt++) {
queue_blk->next_queue_blk_idx = start_idx + cnt;
@@ -194,12 +207,23 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
int rc;
pool = malloc(sizeof(queue_pool_t));
+ if (!pool)
+ return _ODP_INT_QUEUE_POOL_INVALID;
+
memset(pool, 0, sizeof(queue_pool_t));
+ malloc_len = max_num_queues * sizeof(uint32_t);
+ pool->queue_num_tbl = malloc(malloc_len);
+ if (!pool->queue_num_tbl) {
+ free(pool);
+ return _ODP_INT_QUEUE_POOL_INVALID;
+ }
+ memset(pool->queue_num_tbl, 0, malloc_len);
+
/* Initialize the queue_blk_tbl_sizes array based upon the
* max_queued_pkts.
*/
- max_queued_pkts = MAX(max_queued_pkts, 64 * 1024);
+ max_queued_pkts = _ODP_MAX(max_queued_pkts, 64 * UINT32_C(1024));
queue_region_desc_init(pool, 0, max_queued_pkts / 4);
queue_region_desc_init(pool, 1, max_queued_pkts / 64);
queue_region_desc_init(pool, 2, max_queued_pkts / 64);
@@ -211,21 +235,20 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
/* Now allocate the first queue_blk_tbl and add its blks to the free
* list. Replenish the queue_blk_t free list.
*/
- initial_free_list_size = MIN(64 * 1024, max_queued_pkts / 4);
+ initial_free_list_size = _ODP_MIN(64 * UINT32_C(1024), max_queued_pkts / 4);
rc = pkt_queue_free_list_add(pool, initial_free_list_size);
- if (rc < 0)
+ if (rc < 0) {
+ free(pool->queue_num_tbl);
+ free(pool);
return _ODP_INT_QUEUE_POOL_INVALID;
+ }
/* Discard the first queue blk with idx 0 */
queue_blk_alloc(pool, &first_queue_blk_idx);
pool->max_queue_num = max_num_queues;
pool->max_queued_pkts = max_queued_pkts;
- pool->next_queue_num = 1;
-
- malloc_len = max_num_queues * sizeof(uint32_t);
- pool->queue_num_tbl = malloc(malloc_len);
- memset(pool->queue_num_tbl, 0, malloc_len);
+ pool->num_queues = 0;
pool->min_free_list_size = pool->free_list_size;
pool->peak_free_list_size = pool->free_list_size;
@@ -234,15 +257,35 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
_odp_int_pkt_queue_t _odp_pkt_queue_create(_odp_int_queue_pool_t queue_pool)
{
- queue_pool_t *pool;
- uint32_t queue_num;
+ queue_pool_t *pool = (queue_pool_t *)(uintptr_t)queue_pool;
+ uint32_t i;
- pool = (queue_pool_t *)(uintptr_t)queue_pool;
- queue_num = pool->next_queue_num++;
- if (pool->max_queue_num < queue_num)
+ if (pool->num_queues >= pool->max_queue_num)
return _ODP_INT_PKT_QUEUE_INVALID;
- return (_odp_int_pkt_queue_t)queue_num;
+ for (i = 0; i < pool->max_queue_num; i++) {
+ if (pool->queue_status[i] == TM_STATUS_FREE) {
+ pool->queue_status[i] = TM_STATUS_RESERVED;
+ pool->num_queues++;
+ return (_odp_int_pkt_queue_t)(i + 1);
+ }
+ }
+ return _ODP_INT_PKT_QUEUE_INVALID;
+}
+
+void _odp_pkt_queue_destroy(_odp_int_queue_pool_t queue_pool,
+ _odp_int_pkt_queue_t pkt_queue)
+{
+ queue_pool_t *pool = (queue_pool_t *)(uintptr_t)queue_pool;
+ uint32_t queue_num = (uint32_t)pkt_queue;
+
+ if ((queue_num == 0) || (queue_num > pool->max_queue_num)) {
+ _ODP_ERR("Invalid TM packet queue ID\n");
+ return;
+ }
+
+ pool->queue_status[queue_num - 1] = TM_STATUS_FREE;
+ pool->num_queues--;
}
int _odp_pkt_queue_append(_odp_int_queue_pool_t queue_pool,
@@ -363,17 +406,17 @@ void _odp_pkt_queue_stats_print(_odp_int_queue_pool_t queue_pool)
queue_pool_t *pool;
pool = (queue_pool_t *)(uintptr_t)queue_pool;
- ODP_DBG("pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", queue_pool);
- ODP_DBG(" max_queue_num=%u max_queued_pkts=%u next_queue_num=%u\n",
- pool->max_queue_num, pool->max_queued_pkts,
- pool->next_queue_num);
- ODP_DBG(" total pkt appends=%" PRIu64 " total pkt removes=%" PRIu64
- " bad removes=%" PRIu64 "\n",
- pool->total_pkt_appends, pool->total_pkt_removes,
- pool->total_bad_removes);
- ODP_DBG(" free_list size=%u min size=%u peak size=%u\n",
- pool->free_list_size, pool->min_free_list_size,
- pool->peak_free_list_size);
+ _ODP_PRINT(" pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", queue_pool);
+ _ODP_PRINT(" max_queue_num=%" PRIu32 " max_queued_pkts=%" PRIu32 " "
+ "num_queues=%" PRIu32 "\n", pool->max_queue_num,
+ pool->max_queued_pkts, pool->num_queues);
+ _ODP_PRINT(" total pkt appends=%" PRIu64 " total pkt removes=%" PRIu64
+ " bad removes=%" PRIu64 "\n",
+ pool->total_pkt_appends, pool->total_pkt_removes,
+ pool->total_bad_removes);
+ _ODP_PRINT(" free_list size=%u min size=%u peak size=%u\n",
+ pool->free_list_size, pool->min_free_list_size,
+ pool->peak_free_list_size);
}
void _odp_queue_pool_destroy(_odp_int_queue_pool_t queue_pool)
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index cf7c2c415..d3fde70f6 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -1,125 +1,398 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
+#include <odp/api/atomic.h>
+#include <odp/api/hints.h>
#include <odp/api/pool.h>
#include <odp/api/shared_memory.h>
-#include <odp/api/align.h>
+#include <odp/api/system_info.h>
#include <odp/api/ticketlock.h>
+#include <odp/api/plat/pool_inline_types.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+
#include <odp_pool_internal.h>
-#include <odp_internal.h>
-#include <odp_buffer_inlines.h>
+#include <odp_init_internal.h>
#include <odp_packet_internal.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
-#include <odp_ring_internal.h>
+#include <odp_event_internal.h>
+#include <odp_event_validation_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ring_ptr_internal.h>
+#include <odp_global_data.h>
+#include <odp_libconfig_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_timer_internal.h>
+#include <odp_event_vector_internal.h>
+#include <odp_buffer_internal.h>
#include <string.h>
#include <stdio.h>
+#include <stddef.h>
#include <inttypes.h>
-#include <odp/api/plat/ticketlock_inlines.h>
-#define LOCK(a) _odp_ticketlock_lock(a)
-#define UNLOCK(a) _odp_ticketlock_unlock(a)
+#define LOCK(a) odp_ticketlock_lock(a)
+#define UNLOCK(a) odp_ticketlock_unlock(a)
#define LOCK_INIT(a) odp_ticketlock_init(a)
-#define CACHE_BURST 32
-#define RING_SIZE_MIN (2 * CACHE_BURST)
+#define RING_SIZE_MIN 64
+#define POOL_MAX_NUM_MIN RING_SIZE_MIN
+
+/* Make sure packet buffers don't cross huge page boundaries starting from this
+ * page size. 2MB is typically the smallest used huge page size. */
+#define FIRST_HP_SIZE (2 * 1024 * 1024)
/* Define a practical limit for contiguous memory allocations */
#define MAX_SIZE (10 * 1024 * 1024)
-ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST),
- "cache_burst_size_too_large_compared_to_cache_size");
+/* Maximum packet user area size */
+#define MAX_UAREA_SIZE 2048
ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256,
"ODP Segment size must be a minimum of 256 bytes");
+ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_SIZE < 0xffff,
+ "Segment size must be less than 64k (16 bit offsets)");
+
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_POOLS < CONFIG_POOLS,
+ "Internal pool count needs to be less than total configured pool count");
+
/* Thread local variables */
typedef struct pool_local_t {
- pool_cache_t *cache[ODP_CONFIG_POOLS];
+ pool_cache_t *cache[CONFIG_POOLS];
int thr_id;
+
} pool_local_t;
-pool_table_t *pool_tbl;
+extern const _odp_pool_mem_src_ops_t * const _odp_pool_mem_src_ops[];
+
+pool_global_t *_odp_pool_glb;
static __thread pool_local_t local;
-static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx)
+#include <odp/visibility_begin.h>
+
+/* Fill in pool header field offsets for inline functions */
+const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
+ .index = offsetof(pool_t, pool_idx),
+ .seg_len = offsetof(pool_t, seg_len),
+ .uarea_size = offsetof(pool_t, param_uarea_size),
+ .trailer_size = offsetof(pool_t, trailer_size),
+ .ext_head_offset = offsetof(pool_t, ext_head_offset),
+ .ext_pkt_buf_size = offsetof(pool_t, ext_param.pkt.buf_size)
+};
+
+#include <odp/visibility_end.h>
+
+static inline void cache_init(pool_cache_t *cache)
+{
+ memset(cache, 0, sizeof(pool_cache_t));
+ odp_atomic_init_u32(&cache->cache_num, 0);
+}
+
+static inline uint32_t cache_pop(pool_cache_t *cache,
+ _odp_event_hdr_t *event_hdr[], int max_num)
+{
+ uint32_t cache_num = odp_atomic_load_u32(&cache->cache_num);
+ uint32_t num_ch = max_num;
+ uint32_t cache_begin;
+ uint32_t i;
+
+ /* Cache does not have enough buffers */
+ if (odp_unlikely(cache_num < (uint32_t)max_num))
+ num_ch = cache_num;
+
+ /* Get buffers from the cache */
+ cache_begin = cache_num - num_ch;
+ for (i = 0; i < num_ch; i++)
+ event_hdr[i] = cache->event_hdr[cache_begin + i];
+
+ odp_atomic_store_u32(&cache->cache_num, cache_num - num_ch);
+
+ return num_ch;
+}
+
+static inline void cache_push(pool_cache_t *cache, _odp_event_hdr_t *event_hdr[],
+ uint32_t num)
+{
+ uint32_t cache_num = odp_atomic_load_u32(&cache->cache_num);
+ uint32_t i;
+
+ for (i = 0; i < num; i++)
+ cache->event_hdr[cache_num + i] = event_hdr[i];
+
+ odp_atomic_store_u32(&cache->cache_num, cache_num + num);
+}
+
+static void cache_flush(pool_cache_t *cache, pool_t *pool)
+{
+ _odp_event_hdr_t *event_hdr;
+ ring_ptr_t *ring;
+ uint32_t mask;
+
+ ring = &pool->ring->hdr;
+ mask = pool->ring_mask;
+
+ while (cache_pop(cache, &event_hdr, 1))
+ ring_ptr_enq(ring, mask, event_hdr);
+}
+
+static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
{
- return _odp_cast_scalar(odp_pool_t, pool_idx);
+ uint64_t cached = 0;
+ const uint16_t first = stats->thread.first;
+ const uint16_t last = stats->thread.last;
+ const odp_bool_t cache_available = pool->params.stats.bit.cache_available;
+ const odp_bool_t per_thread = pool->params.stats.bit.thread_cache_available;
+ const int max_threads = odp_thread_count_max();
+ uint16_t out_idx = 0;
+ int i, idx_limit;
+
+ if (per_thread) {
+ if (first > last || last >= max_threads) {
+ _ODP_ERR("Bad thread ids: first=%" PRIu16 " last=%" PRIu16 "\n",
+ first, last);
+ return -1;
+ }
+
+ if (last - first + 1 > ODP_POOL_MAX_THREAD_STATS) {
+ _ODP_ERR("Too many thread ids: max=%d\n", ODP_POOL_MAX_THREAD_STATS);
+ return -1;
+ }
+ }
+
+ if (cache_available) {
+ i = 0;
+ idx_limit = max_threads;
+ } else {
+ i = first;
+ idx_limit = last + 1;
+ }
+
+ for (; i < idx_limit; i++) {
+ uint32_t cur = odp_atomic_load_u32(&pool->local_cache[i].cache_num);
+
+ if (per_thread && i >= first && i <= last)
+ stats->thread.cache_available[out_idx++] = cur;
+
+ cached += cur;
+ }
+
+ if (cache_available)
+ stats->cache_available = cached;
+
+ return 0;
}
-static inline uint32_t pool_id_from_buf(odp_buffer_t buf)
+static inline uint64_t cache_total_available(pool_t *pool)
{
- odp_buffer_bits_t handle;
+ uint64_t cached = 0;
+ const int max_threads = odp_thread_count_max();
+
+ for (int i = 0; i < max_threads; i++)
+ cached += odp_atomic_load_u32(&pool->local_cache[i].cache_num);
+
+ return cached;
+}
+
+static int read_config_file(pool_global_t *pool_glb)
+{
+ uint32_t local_cache_size, burst_size, align;
+ const char *str;
+ int val = 0;
+
+ _ODP_PRINT("Pool config:\n");
+
+ str = "pool.local_cache_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > CONFIG_POOL_CACHE_MAX_SIZE || val < 0) {
+ _ODP_ERR("Bad value %s = %i, max %i\n", str, val, CONFIG_POOL_CACHE_MAX_SIZE);
+ return -1;
+ }
+
+ local_cache_size = val;
+ pool_glb->config.local_cache_size = local_cache_size;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "pool.burst_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val <= 0) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+
+ burst_size = val;
+ pool_glb->config.burst_size = burst_size;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ /* Check local cache size and burst size relation */
+ if (local_cache_size % burst_size) {
+ _ODP_ERR("Pool cache size not multiple of burst size\n");
+ return -1;
+ }
+
+ if (local_cache_size && (local_cache_size / burst_size < 2)) {
+ _ODP_ERR("Cache burst size too large compared to cache size\n");
+ return -1;
+ }
+
+ str = "pool.pkt.max_num";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > CONFIG_POOL_MAX_NUM || val < POOL_MAX_NUM_MIN) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
- handle.handle = buf;
- return handle.pool_id;
+ pool_glb->config.pkt_max_num = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "pool.pkt.max_len";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val <= 0) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+
+ pool_glb->config.pkt_max_len = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "pool.pkt.base_align";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ align = val;
+ if (val == 0)
+ align = ODP_CACHE_LINE_SIZE;
+
+ if (!_ODP_CHECK_IS_POWER2(align)) {
+ _ODP_ERR("Not a power of two: %s = %i\n", str, val);
+ return -1;
+ }
+
+ pool_glb->config.pkt_base_align = align;
+ _ODP_PRINT(" %s: %u\n", str, align);
+
+ str = "pool.buf.min_align";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ align = val;
+ if (val == 0)
+ align = ODP_CACHE_LINE_SIZE;
+
+ if (!_ODP_CHECK_IS_POWER2(align)) {
+ _ODP_ERR("Not a power of two: %s = %i\n", str, val);
+ return -1;
+ }
+
+ pool_glb->config.buf_min_align = align;
+ _ODP_PRINT(" %s: %u\n", str, align);
+
+ _ODP_PRINT("\n");
+
+ return 0;
}
-int odp_pool_init_global(void)
+int _odp_pool_init_global(void)
{
uint32_t i;
odp_shm_t shm;
- shm = odp_shm_reserve("_odp_pool_table",
- sizeof(pool_table_t),
- ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve("_odp_pool_global",
+ sizeof(pool_global_t),
+ ODP_CACHE_LINE_SIZE,
+ 0);
- pool_tbl = odp_shm_addr(shm);
+ _odp_pool_glb = odp_shm_addr(shm);
- if (pool_tbl == NULL)
+ if (_odp_pool_glb == NULL)
return -1;
- memset(pool_tbl, 0, sizeof(pool_table_t));
- pool_tbl->shm = shm;
+ memset(_odp_pool_glb, 0, sizeof(pool_global_t));
+ _odp_pool_glb->shm = shm;
+
+ if (read_config_file(_odp_pool_glb)) {
+ odp_shm_free(shm);
+ _odp_pool_glb = NULL;
+ return -1;
+ }
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool_t *pool = pool_entry(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
LOCK_INIT(&pool->lock);
- pool->pool_hdl = pool_index_to_handle(i);
pool->pool_idx = i;
}
- ODP_DBG("\nPool init global\n");
- ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
- ODP_DBG(" odp_packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
- ODP_DBG("\n");
+ _ODP_DBG("\nPool init global\n");
+ _ODP_DBG(" event_hdr_t size %zu\n", sizeof(_odp_event_hdr_t));
+ _ODP_DBG(" buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
+ _ODP_DBG(" packet_hdr_t size %zu\n", sizeof(odp_packet_hdr_t));
+ _ODP_DBG(" timeout_hdr_t size %zu\n", sizeof(odp_timeout_hdr_t));
+ _ODP_DBG(" event_vector_hdr_t size %zu\n", sizeof(odp_event_vector_hdr_t));
+ _ODP_DBG(" packet_hdr_t::seg_data offset %zu\n", offsetof(odp_packet_hdr_t, seg_data));
+ _ODP_DBG(" packet_hdr_t::timestamp offset %zu\n", offsetof(odp_packet_hdr_t, timestamp));
+ _ODP_DBG("\n");
return 0;
}
-int odp_pool_term_global(void)
+int _odp_pool_term_global(void)
{
int i;
pool_t *pool;
int ret = 0;
int rc = 0;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = pool_entry(i);
+ if (_odp_pool_glb == NULL)
+ return 0;
+
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
if (pool->reserved) {
- ODP_ERR("Not destroyed pool: %s\n", pool->name);
+ _ODP_ERR("Not destroyed pool: %s\n", pool->name);
rc = -1;
}
UNLOCK(&pool->lock);
}
- ret = odp_shm_free(pool_tbl->shm);
+ ret = odp_shm_free(_odp_pool_glb->shm);
if (ret < 0) {
- ODP_ERR("shm free failed");
+ _ODP_ERR("SHM free failed\n");
rc = -1;
}
return rc;
}
-int odp_pool_init_local(void)
+int _odp_pool_init_local(void)
{
pool_t *pool;
int i;
@@ -127,73 +400,68 @@ int odp_pool_init_local(void)
memset(&local, 0, sizeof(pool_local_t));
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = pool_entry(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
local.cache[i] = &pool->local_cache[thr_id];
- local.cache[i]->num = 0;
+ cache_init(local.cache[i]);
}
local.thr_id = thr_id;
return 0;
}
-static void flush_cache(pool_cache_t *cache, pool_t *pool)
-{
- ring_t *ring;
- uint32_t mask;
- uint32_t cache_num, i, data;
-
- ring = &pool->ring->hdr;
- mask = pool->ring_mask;
- cache_num = cache->num;
-
- for (i = 0; i < cache_num; i++) {
- data = (uint32_t)(uintptr_t)cache->buf[i];
- ring_enq(ring, mask, data);
- }
-
- cache->num = 0;
-}
-
-int odp_pool_term_local(void)
+int _odp_pool_term_local(void)
{
int i;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool_t *pool = pool_entry(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
- flush_cache(local.cache[i], pool);
+ cache_flush(local.cache[i], pool);
}
return 0;
}
-static pool_t *reserve_pool(void)
+static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
{
int i;
+ odp_shm_t shm;
+ uint32_t mem_size;
pool_t *pool;
char ring_name[ODP_POOL_NAME_LEN];
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = pool_entry(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
if (pool->reserved == 0) {
pool->reserved = 1;
UNLOCK(&pool->lock);
- sprintf(ring_name, "pool_ring_%d", i);
- pool->ring_shm =
- odp_shm_reserve(ring_name,
- sizeof(pool_ring_t),
- ODP_CACHE_LINE_SIZE, 0);
- if (odp_unlikely(pool->ring_shm == ODP_SHM_INVALID)) {
- ODP_ERR("Unable to alloc pool ring %d\n", i);
+
+ memset(&pool->memset_mark, 0,
+ sizeof(pool_t) - offsetof(pool_t, memset_mark));
+ sprintf(ring_name, "_odp_pool_ring_%d", i);
+
+ /* Reserve memory for the ring, and for lookup table in case of pool ext */
+ mem_size = sizeof(pool_ring_t);
+ if (pool_ext)
+ mem_size += num * sizeof(_odp_event_hdr_t *);
+
+ shm = odp_shm_reserve(ring_name, mem_size, ODP_CACHE_LINE_SIZE, shmflags);
+
+ if (odp_unlikely(shm == ODP_SHM_INVALID)) {
+ _ODP_ERR("Unable to alloc pool ring %d\n", i);
LOCK(&pool->lock);
pool->reserved = 0;
UNLOCK(&pool->lock);
break;
}
- pool->ring = odp_shm_addr(pool->ring_shm);
+
+ pool->ring_shm = shm;
+ pool->ring = odp_shm_addr(shm);
+ pool->pool_ext = pool_ext;
+
return pool;
}
UNLOCK(&pool->lock);
@@ -202,231 +470,523 @@ static pool_t *reserve_pool(void)
return NULL;
}
-static odp_buffer_t form_buffer_handle(uint32_t pool_idx, uint32_t buffer_idx)
+static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t event_index,
+ uint8_t *data_ptr, void *uarea)
{
- odp_buffer_bits_t bits;
+ uint32_t hdr_len;
+ odp_pool_type_t type = pool->type;
+
+ if (type == ODP_POOL_BUFFER)
+ hdr_len = sizeof(odp_buffer_hdr_t);
+ else if (type == ODP_POOL_PACKET)
+ hdr_len = sizeof(odp_packet_hdr_t);
+ else if (type == ODP_POOL_VECTOR)
+ hdr_len = sizeof(odp_event_vector_hdr_t);
+ else if (type == ODP_POOL_TIMEOUT)
+ hdr_len = sizeof(odp_timeout_hdr_t);
+ else
+ hdr_len = sizeof(_odp_event_hdr_t);
+
+ /* Zero all event and type specific header fields */
+ memset(event_hdr, 0, hdr_len);
+
+ /* Initialize common event metadata */
+ event_hdr->index.pool = pool->pool_idx;
+ event_hdr->index.event = event_index;
+ event_hdr->type = type;
+ event_hdr->event_type = type;
+ event_hdr->subtype = ODP_EVENT_NO_SUBTYPE;
+ event_hdr->pool = _odp_pool_handle(pool);
+
+ /* Store base values for fast init */
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET) {
+ event_hdr->base_data = data_ptr;
+ event_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
+ _odp_event_endmark_set(_odp_event_from_hdr(event_hdr));
+ }
+
+ if (type == ODP_POOL_BUFFER) {
+ odp_buffer_hdr_t *buf_hdr = (void *)event_hdr;
+
+ buf_hdr->uarea_addr = uarea;
+ }
- bits.handle = 0;
- bits.pool_id = pool_idx;
- bits.index = buffer_idx;
+ /* Initialize segmentation metadata */
+ if (type == ODP_POOL_PACKET) {
+ odp_packet_hdr_t *pkt_hdr = (void *)event_hdr;
- return bits.handle;
+ pkt_hdr->user_ptr = NULL;
+ pkt_hdr->uarea_addr = uarea;
+ pkt_hdr->seg_data = data_ptr;
+ pkt_hdr->seg_len = pool->seg_len;
+ pkt_hdr->seg_count = 1;
+ pkt_hdr->seg_next = NULL;
+
+ odp_atomic_init_u32(&pkt_hdr->ref_cnt, 0);
+ }
+
+ /* Initialize event vector metadata */
+ if (type == ODP_POOL_VECTOR) {
+ odp_event_vector_hdr_t *vect_hdr = (void *)event_hdr;
+
+ event_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
+ vect_hdr->uarea_addr = uarea;
+ }
+
+ /* Initialize timeout metadata */
+ if (type == ODP_POOL_TIMEOUT) {
+ odp_timeout_hdr_t *tmo_hdr = (void *)event_hdr;
+
+ tmo_hdr->uarea_addr = uarea;
+ }
}
static void init_buffers(pool_t *pool)
{
- uint32_t i;
+ _odp_event_hdr_t *event_hdr;
odp_buffer_hdr_t *buf_hdr;
odp_packet_hdr_t *pkt_hdr;
- odp_buffer_t buf_hdl;
+ odp_shm_info_t shm_info;
void *addr;
void *uarea = NULL;
- uint8_t *data;
+ uint8_t *data = NULL;
+ uint8_t *data_ptr = NULL;
uint32_t offset;
- ring_t *ring;
+ ring_ptr_t *ring;
uint32_t mask;
- int type;
- uint32_t seg_size;
+ odp_pool_type_t type;
+ uint64_t page_size;
+ int skipped_blocks = 0;
+
+ if (odp_shm_info(pool->shm, &shm_info))
+ _ODP_ABORT("Shm info failed\n");
+ page_size = shm_info.page_size;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- type = pool->params.type;
+ type = pool->type;
+
+ for (uint64_t i = 0; i < pool->num + skipped_blocks ; i++) {
+ int skip = 0;
+ addr = &pool->base_addr[i * pool->block_size];
+
+ /* Skip packet buffers which cross huge page boundaries. Some
+ * NICs cannot handle buffers which cross page boundaries. */
+ if (type == ODP_POOL_PACKET && page_size >= FIRST_HP_SIZE) {
+ uint64_t first_page;
+ uint64_t last_page;
+
+ first_page = ((uint64_t)(uintptr_t)addr &
+ ~(page_size - 1));
+ last_page = (((uint64_t)(uintptr_t)addr +
+ pool->block_size - 1) &
+ ~(page_size - 1));
+ if (last_page != first_page) {
+ skipped_blocks++;
+ skip = 1;
+ }
+ }
- for (i = 0; i < pool->num; i++) {
- addr = &pool->base_addr[i * pool->block_size];
+ addr = (uint8_t *)addr + pool->block_offset;
+ event_hdr = addr;
buf_hdr = addr;
pkt_hdr = addr;
if (pool->uarea_size)
+ uarea = &pool->uarea_base_addr[(i - skipped_blocks) *
+ pool->uarea_size];
+
+ /* Only buffers and packets have data pointer */
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET) {
+ if (type == ODP_POOL_BUFFER)
+ data = buf_hdr->data;
+ else
+ data = pkt_hdr->data;
+
+ offset = pool->headroom;
+
+ /* Move to correct align */
+ while (((uintptr_t)&data[offset]) % pool->align != 0)
+ offset++;
+
+ data_ptr = &data[offset];
+ }
+
+ init_event_hdr(pool, event_hdr, i, data_ptr, uarea);
+
+ /* Store buffer into the global pool */
+ if (!skip)
+ ring_ptr_enq(ring, mask, event_hdr);
+ }
+ pool->skipped_blocks = skipped_blocks;
+
+ if (pool->uarea_size && pool->params.uarea_init.init_fn) {
+ for (uint32_t i = 0; i < pool->num; i++) {
uarea = &pool->uarea_base_addr[i * pool->uarea_size];
+ pool->params.uarea_init.init_fn(uarea, pool->param_uarea_size,
+ pool->params.uarea_init.args, i);
+ }
+ }
+}
- data = buf_hdr->data;
+static bool shm_is_from_huge_pages(odp_shm_t shm)
+{
+ odp_shm_info_t info;
+ uint64_t huge_page_size = odp_sys_huge_page_size();
- if (type == ODP_POOL_PACKET)
- data = pkt_hdr->data;
+ if (huge_page_size == 0)
+ return 0;
- offset = pool->headroom;
+ if (odp_shm_info(shm, &info)) {
+ _ODP_ERR("Failed to fetch shm info\n");
+ return 0;
+ }
- /* move to correct align */
- while (((uintptr_t)&data[offset]) % pool->align != 0)
- offset++;
+ return (info.page_size >= huge_page_size);
+}
- memset(buf_hdr, 0, (uintptr_t)data - (uintptr_t)buf_hdr);
+static void set_pool_name(pool_t *pool, const char *name)
+{
+ if (name == NULL) {
+ pool->name[0] = 0;
+ } else {
+ strncpy(pool->name, name, ODP_POOL_NAME_LEN - 1);
+ pool->name[ODP_POOL_NAME_LEN - 1] = 0;
+ }
+}
- seg_size = pool->headroom + pool->data_size + pool->tailroom;
+static void set_pool_cache_size(pool_t *pool, uint32_t cache_size)
+{
+ uint32_t burst_size;
- /* Initialize buffer metadata */
- buf_hdr->size = seg_size;
- buf_hdr->type = type;
- buf_hdr->event_type = type;
- buf_hdr->pool_hdl = pool->pool_hdl;
- buf_hdr->uarea_addr = uarea;
- /* Show user requested size through API */
- buf_hdr->uarea_size = pool->params.pkt.uarea_size;
- buf_hdr->segcount = 1;
+ pool->cache_size = 0;
+ pool->burst_size = 1;
- /* Pointer to data start (of the first segment) */
- buf_hdr->seg[0].hdr = buf_hdr;
- buf_hdr->seg[0].data = &data[offset];
- buf_hdr->seg[0].len = pool->data_size;
+ if (cache_size > 1) {
+ cache_size = (cache_size / 2) * 2;
+ burst_size = _odp_pool_glb->config.burst_size;
- /* Store base values for fast init */
- buf_hdr->base_data = buf_hdr->seg[0].data;
- buf_hdr->buf_end = &data[offset + pool->data_size +
- pool->tailroom];
+ if ((cache_size / burst_size) < 2)
+ burst_size = cache_size / 2;
- buf_hdl = form_buffer_handle(pool->pool_idx, i);
- buf_hdr->handle.handle = buf_hdl;
+ pool->cache_size = cache_size;
+ pool->burst_size = burst_size;
+ }
+}
- /* Store buffer into the global pool */
- ring_enq(ring, mask, (uint32_t)(uintptr_t)buf_hdl);
+static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, uint32_t shmflags)
+{
+ odp_shm_t shm;
+ const char *max_prefix = "pool_000_uarea_";
+ int max_prefix_len = strlen(max_prefix);
+ char uarea_name[ODP_POOL_NAME_LEN + max_prefix_len];
+
+ pool->uarea_shm = ODP_SHM_INVALID;
+
+ if (uarea_size == 0) {
+ pool->param_uarea_size = 0;
+ pool->uarea_size = 0;
+ pool->uarea_shm_size = 0;
+ return 0;
+ }
+
+ sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
+
+ pool->param_uarea_size = uarea_size;
+ pool->uarea_size = _ODP_ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
+
+ shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, shmflags);
+
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ pool->uarea_shm = shm;
+ pool->uarea_base_addr = odp_shm_addr(shm);
+ return 0;
+}
+
+static void set_mem_src_ops(pool_t *pool)
+{
+ odp_bool_t is_active_found = false;
+
+ pool->mem_src_ops = NULL;
+
+ for (int i = 0; _odp_pool_mem_src_ops[i]; i++) {
+ if (!is_active_found) {
+ if (_odp_pool_mem_src_ops[i]->is_active()) {
+ is_active_found = true;
+ pool->mem_src_ops = _odp_pool_mem_src_ops[i];
+ _ODP_DBG("Packet pool as a memory source for: %s\n",
+ pool->mem_src_ops->name);
+ }
+ } else if (_odp_pool_mem_src_ops[i]->is_active()) {
+ _odp_pool_mem_src_ops[i]->force_disable();
+ }
}
}
-static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
- uint32_t shmflags)
+/* Create pool according to params. Actual type of the pool is type_2, which is recorded for pool
+ * info calls. */
+odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
+ odp_pool_type_t type_2)
{
pool_t *pool;
uint32_t uarea_size, headroom, tailroom;
odp_shm_t shm;
- uint32_t data_size, align, num, hdr_size, block_size;
- uint32_t max_len, max_seg_len;
+ uint32_t seg_len, align, num, hdr_size, block_size;
+ uint32_t max_len, cache_size, trailer_size;
uint32_t ring_size;
- int name_len;
- const char *postfix = "_uarea";
- char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)];
-
- if (params == NULL) {
- ODP_ERR("No params");
- return ODP_POOL_INVALID;
- }
+ odp_pool_type_t type = params->type;
+ uint32_t shmflags = 0;
+ uint32_t num_extra = 0;
+ const char *max_prefix = "pool_000_";
+ int max_prefix_len = strlen(max_prefix);
+ char shm_name[ODP_POOL_NAME_LEN + max_prefix_len];
+
+ if (type == ODP_POOL_PACKET)
+ shmflags = ODP_SHM_PROC;
+ if (odp_global_ro.shm_single_va)
+ shmflags |= ODP_SHM_SINGLE_VA;
align = 0;
- if (params->type == ODP_POOL_BUFFER)
- align = params->buf.align;
+ if (type == ODP_POOL_PACKET) {
+ uint32_t align_req = params->pkt.align;
+
+ if (align_req &&
+ (!_ODP_CHECK_IS_POWER2(align_req) ||
+ align_req > _odp_pool_glb->config.pkt_base_align)) {
+ _ODP_ERR("Bad align requirement\n");
+ return ODP_POOL_INVALID;
+ }
+
+ align = _odp_pool_glb->config.pkt_base_align;
+ } else {
+ if (type == ODP_POOL_BUFFER)
+ align = params->buf.align;
- if (align < ODP_CONFIG_BUFFER_ALIGN_MIN)
- align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+ if (align < _odp_pool_glb->config.buf_min_align)
+ align = _odp_pool_glb->config.buf_min_align;
+ }
/* Validate requested buffer alignment */
- if (align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
- align != ROUNDDOWN_POWER2(align, align)) {
- ODP_ERR("Bad align requirement");
+ if (align > CONFIG_BUFFER_ALIGN_MAX ||
+ align != _ODP_ROUNDDOWN_POWER2(align, align)) {
+ _ODP_ERR("Bad align requirement\n");
return ODP_POOL_INVALID;
}
headroom = 0;
tailroom = 0;
- data_size = 0;
+ seg_len = 0;
max_len = 0;
- max_seg_len = 0;
+ trailer_size = 0;
uarea_size = 0;
+ cache_size = 0;
- switch (params->type) {
+ switch (type) {
case ODP_POOL_BUFFER:
num = params->buf.num;
- data_size = params->buf.size;
+ seg_len = params->buf.size;
+ uarea_size = params->buf.uarea_size;
+ cache_size = params->buf.cache_size;
+ trailer_size = _ODP_EV_ENDMARK_SIZE;
break;
case ODP_POOL_PACKET:
+ if (params->pkt.headroom > CONFIG_PACKET_HEADROOM) {
+ _ODP_ERR("Packet headroom size not supported\n");
+ return ODP_POOL_INVALID;
+ }
+
+ num = params->pkt.num;
+ seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+ max_len = _odp_pool_glb->config.pkt_max_len;
+ trailer_size = _ODP_EV_ENDMARK_SIZE;
+
+ if (params->pkt.len &&
+ params->pkt.len < CONFIG_PACKET_MAX_SEG_LEN)
+ seg_len = params->pkt.len;
+ if (params->pkt.seg_len && params->pkt.seg_len > seg_len)
+ seg_len = params->pkt.seg_len;
+ if (seg_len < CONFIG_PACKET_SEG_LEN_MIN)
+ seg_len = CONFIG_PACKET_SEG_LEN_MIN;
+
+ /* Make sure that at least one 'max_len' packet can fit in the
+ * pool. */
+ if (params->pkt.max_len != 0)
+ max_len = params->pkt.max_len;
+ if ((max_len + seg_len - 1) / seg_len > PKT_MAX_SEGS)
+ seg_len = (max_len + PKT_MAX_SEGS - 1) / PKT_MAX_SEGS;
+ if (seg_len > CONFIG_PACKET_MAX_SEG_LEN) {
+ _ODP_ERR("Pool unable to store 'max_len' packet\n");
+ return ODP_POOL_INVALID;
+ }
+
+ /* Multiple segments required per 'params->pkt.len' packet */
+ if (params->pkt.len > seg_len)
+ num *= (params->pkt.len + seg_len - 1) / seg_len;
+
+ /* Make sure 'params->pkt.max_num' limitation holds */
+ if (params->pkt.max_num && num > params->pkt.max_num) {
+ _ODP_ERR("Pool 'max_num' parameter too small (%u/%u)\n",
+ params->pkt.max_num, num);
+ return ODP_POOL_INVALID;
+ }
+
headroom = CONFIG_PACKET_HEADROOM;
tailroom = CONFIG_PACKET_TAILROOM;
- num = params->pkt.num;
uarea_size = params->pkt.uarea_size;
- data_size = CONFIG_PACKET_MAX_SEG_LEN;
- max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
- max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len;
+ cache_size = params->pkt.cache_size;
break;
case ODP_POOL_TIMEOUT:
num = params->tmo.num;
+ uarea_size = params->tmo.uarea_size;
+ cache_size = params->tmo.cache_size;
+ break;
+
+ case ODP_POOL_VECTOR:
+ num = params->vector.num;
+ uarea_size = params->vector.uarea_size;
+ cache_size = params->vector.cache_size;
+ seg_len = params->vector.max_size * sizeof(odp_packet_t);
break;
default:
- ODP_ERR("Bad pool type");
+ _ODP_ERR("Bad pool type\n");
return ODP_POOL_INVALID;
}
- if (uarea_size)
- uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
-
- pool = reserve_pool();
+ pool = reserve_pool(shmflags, 0, num);
if (pool == NULL) {
- ODP_ERR("No more free pools");
+ _ODP_ERR("No more free pools\n");
return ODP_POOL_INVALID;
}
- if (name == NULL) {
- pool->name[0] = 0;
- } else {
- strncpy(pool->name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->name[ODP_POOL_NAME_LEN - 1] = 0;
- }
+ set_pool_name(pool, name);
- name_len = strlen(pool->name);
- memcpy(uarea_name, pool->name, name_len);
- strcpy(&uarea_name[name_len], postfix);
+ /* Format SHM names from prefix, pool index and pool name. */
+ sprintf(shm_name, "pool_%03i_%s", pool->pool_idx, pool->name);
+ pool->type = type;
+ pool->type_2 = type_2;
pool->params = *params;
+ pool->block_offset = 0;
+ set_mem_src_ops(pool);
+
+ if (type == ODP_POOL_PACKET) {
+ uint32_t adj_size;
- hdr_size = sizeof(odp_packet_hdr_t);
- hdr_size = ROUNDUP_CACHE_LINE(hdr_size);
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
+ block_size = hdr_size + align + headroom + seg_len + tailroom + trailer_size;
+ adj_size = block_size;
- block_size = ROUNDUP_CACHE_LINE(hdr_size + align + headroom +
- data_size + tailroom);
+ if (pool->mem_src_ops && pool->mem_src_ops->adjust_size) {
+ pool->mem_src_ops->adjust_size(pool->mem_src_data, &adj_size,
+ &pool->block_offset, &shmflags);
- if (num <= RING_SIZE_MIN)
+ if (!adj_size) {
+ _ODP_ERR("Calculating adjusted block size failed\n");
+ return ODP_POOL_INVALID;
+ }
+ }
+
+ if (adj_size != block_size)
+ block_size = adj_size;
+ else
+ block_size = _ODP_ROUNDUP_CACHE_LINE(block_size);
+ } else {
+ /* Header size is rounded up to cache line size, so the
+ * following data can be cache line aligned without extra
+ * padding. */
+ uint32_t align_pad = (align > ODP_CACHE_LINE_SIZE) ?
+ align - ODP_CACHE_LINE_SIZE : 0;
+
+ if (type == ODP_POOL_BUFFER)
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
+ else if (type == ODP_POOL_TIMEOUT)
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
+ else
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
+
+ block_size = _ODP_ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len + trailer_size);
+ }
+
+ /* Allocate extra memory for skipping packet buffers which cross huge
+ * page boundaries. */
+ if (type == ODP_POOL_PACKET) {
+ num_extra = ((((uint64_t)num * block_size) +
+ FIRST_HP_SIZE - 1) / FIRST_HP_SIZE);
+ num_extra += ((((uint64_t)num_extra * block_size) +
+ FIRST_HP_SIZE - 1) / FIRST_HP_SIZE);
+ }
+
+ /* Ring size must be larger than the number of items stored */
+ if (num + 1 <= RING_SIZE_MIN)
ring_size = RING_SIZE_MIN;
else
- ring_size = ROUNDUP_POWER2_U32(num);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(num + 1);
pool->ring_mask = ring_size - 1;
pool->num = num;
pool->align = align;
pool->headroom = headroom;
- pool->data_size = data_size;
+ pool->seg_len = seg_len;
+ pool->trailer_size = trailer_size;
+ pool->max_seg_len = headroom + seg_len + tailroom;
pool->max_len = max_len;
- pool->max_seg_len = max_seg_len;
pool->tailroom = tailroom;
pool->block_size = block_size;
- pool->uarea_size = uarea_size;
- pool->shm_size = num * block_size;
- pool->uarea_shm_size = num * uarea_size;
+ pool->shm_size = (num + num_extra) * (uint64_t)block_size;
+
+ set_pool_cache_size(pool, cache_size);
- shm = odp_shm_reserve(pool->name, pool->shm_size,
- ODP_PAGE_SIZE, shmflags);
+ shm = odp_shm_reserve(shm_name, pool->shm_size, ODP_PAGE_SIZE,
+ shmflags);
pool->shm = shm;
if (shm == ODP_SHM_INVALID) {
- ODP_ERR("Shm reserve failed");
+ _ODP_ERR("SHM reserve failed\n");
goto error;
}
+ pool->mem_from_huge_pages = shm_is_from_huge_pages(pool->shm);
+
pool->base_addr = odp_shm_addr(pool->shm);
+ pool->max_addr = pool->base_addr + pool->shm_size - 1;
- pool->uarea_shm = ODP_SHM_INVALID;
- if (uarea_size) {
- shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size,
- ODP_PAGE_SIZE, shmflags);
+ if (reserve_uarea(pool, uarea_size, num, shmflags)) {
+ _ODP_ERR("User area SHM reserve failed\n");
+ goto error;
+ }
- pool->uarea_shm = shm;
+ ring_ptr_init(&pool->ring->hdr);
+ init_buffers(pool);
- if (shm == ODP_SHM_INVALID) {
- ODP_ERR("Shm reserve failed (uarea)");
- goto error;
- }
+ if (type == ODP_POOL_PACKET && pool->mem_src_ops && pool->mem_src_ops->bind &&
+ pool->mem_src_ops->bind(pool->mem_src_data, pool)) {
+ _ODP_ERR("Binding pool as memory source failed\n");
+ goto error;
+ }
- pool->uarea_base_addr = odp_shm_addr(pool->uarea_shm);
+ /* Total ops utilizes alloc_ops and free_ops counters */
+ if (pool->params.stats.bit.total_ops) {
+ pool->params.stats.bit.alloc_ops = 1;
+ pool->params.stats.bit.free_ops = 1;
}
- ring_init(&pool->ring->hdr);
- init_buffers(pool);
+ /* Reset pool stats */
+ odp_atomic_init_u64(&pool->stats.alloc_ops, 0);
+ odp_atomic_init_u64(&pool->stats.alloc_fails, 0);
+ odp_atomic_init_u64(&pool->stats.free_ops, 0);
+ odp_atomic_init_u64(&pool->stats.cache_alloc_ops, 0);
+ odp_atomic_init_u64(&pool->stats.cache_free_ops, 0);
- return pool->pool_hdl;
+ return _odp_pool_handle(pool);
error:
if (pool->shm != ODP_SHM_INVALID)
@@ -441,90 +1001,187 @@ error:
return ODP_POOL_INVALID;
}
-static int check_params(odp_pool_param_t *params)
+static int check_params(const odp_pool_param_t *params)
{
odp_pool_capability_t capa;
+ uint32_t cache_size, num;
+ int num_threads = odp_global_ro.init_param.num_control +
+ odp_global_ro.init_param.num_worker;
+ int cur_threads = odp_thread_count();
+
+ if (!params || odp_pool_capability(&capa) < 0)
+ return -1;
- odp_pool_capability(&capa);
+ num = 0;
+ cache_size = 0;
+ if (num_threads < cur_threads)
+ num_threads = cur_threads;
switch (params->type) {
case ODP_POOL_BUFFER:
+ num = params->buf.num;
+ cache_size = params->buf.cache_size;
+
if (params->buf.num > capa.buf.max_num) {
- printf("buf.num too large %u\n", params->buf.num);
+ _ODP_ERR("buf.num too large %u\n", params->buf.num);
return -1;
}
if (params->buf.size > capa.buf.max_size) {
- printf("buf.size too large %u\n", params->buf.size);
+ _ODP_ERR("buf.size too large %u\n", params->buf.size);
return -1;
}
if (params->buf.align > capa.buf.max_align) {
- printf("buf.align too large %u\n", params->buf.align);
+ _ODP_ERR("buf.align too large %u\n", params->buf.align);
+ return -1;
+ }
+
+ if (params->buf.uarea_size > capa.buf.max_uarea_size) {
+ _ODP_ERR("buf.uarea_size too large %u\n", params->buf.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.buf.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
return -1;
}
break;
case ODP_POOL_PACKET:
+ num = params->pkt.num;
+ cache_size = params->pkt.cache_size;
+
+ if (params->pkt.num > capa.pkt.max_num) {
+ _ODP_ERR("pkt.num too large %u\n", params->pkt.num);
+ return -1;
+ }
+
+ if (params->pkt.max_num > capa.pkt.max_num) {
+ _ODP_ERR("pkt.max_num too large %u\n", params->pkt.max_num);
+ return -1;
+ }
+
if (params->pkt.len > capa.pkt.max_len) {
- printf("pkt.len too large %u\n", params->pkt.len);
+ _ODP_ERR("pkt.len too large %u\n", params->pkt.len);
return -1;
}
if (params->pkt.max_len > capa.pkt.max_len) {
- printf("pkt.max_len too large %u\n",
- params->pkt.max_len);
+ _ODP_ERR("pkt.max_len too large %u\n", params->pkt.max_len);
return -1;
}
if (params->pkt.seg_len > capa.pkt.max_seg_len) {
- printf("pkt.seg_len too large %u\n",
- params->pkt.seg_len);
+ _ODP_ERR("pkt.seg_len too large %u\n", params->pkt.seg_len);
return -1;
}
if (params->pkt.uarea_size > capa.pkt.max_uarea_size) {
- printf("pkt.uarea_size too large %u\n",
- params->pkt.uarea_size);
+ _ODP_ERR("pkt.uarea_size too large %u\n", params->pkt.uarea_size);
+ return -1;
+ }
+
+ if (params->pkt.headroom > capa.pkt.max_headroom) {
+ _ODP_ERR("pkt.headroom too large %u\n", params->pkt.headroom);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.pkt.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
return -1;
}
break;
case ODP_POOL_TIMEOUT:
+ num = params->tmo.num;
+ cache_size = params->tmo.cache_size;
+
if (params->tmo.num > capa.tmo.max_num) {
- printf("tmo.num too large %u\n", params->tmo.num);
+ _ODP_ERR("tmo.num too large %u\n", params->tmo.num);
return -1;
}
+
+ if (params->tmo.uarea_size > capa.tmo.max_uarea_size) {
+ _ODP_ERR("tmo.uarea_size too large %u\n", params->tmo.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.tmo.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
+ break;
+
+ case ODP_POOL_VECTOR:
+ num = params->vector.num;
+ cache_size = params->vector.cache_size;
+
+ if (params->vector.num == 0) {
+ _ODP_ERR("vector.num zero\n");
+ return -1;
+ }
+
+ if (params->vector.num > capa.vector.max_num) {
+ _ODP_ERR("vector.num too large %u\n", params->vector.num);
+ return -1;
+ }
+
+ if (params->vector.max_size == 0) {
+ _ODP_ERR("vector.max_size zero\n");
+ return -1;
+ }
+
+ if (params->vector.max_size > capa.vector.max_size) {
+ _ODP_ERR("vector.max_size too large %u\n", params->vector.max_size);
+ return -1;
+ }
+
+ if (params->vector.uarea_size > capa.vector.max_uarea_size) {
+ _ODP_ERR("vector.uarea_size too large %u\n", params->vector.uarea_size);
+ return -1;
+ }
+
+ if (params->stats.all & ~capa.vector.stats.all) {
+ _ODP_ERR("Unsupported pool statistics counter\n");
+ return -1;
+ }
+
break;
default:
- printf("bad pool type %i\n", params->type);
+ _ODP_ERR("bad pool type %i\n", params->type);
return -1;
}
+ if (cache_size > CONFIG_POOL_CACHE_MAX_SIZE) {
+ _ODP_ERR("Too large cache size %u\n", cache_size);
+ return -1;
+ }
+
+ if (num <= (num_threads * cache_size))
+ _ODP_DBG("Entire pool fits into thread local caches. Pool "
+ "starvation may occur if the pool is used by multiple "
+ "threads.\n");
+
return 0;
}
-odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
+odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
{
- uint32_t shm_flags = 0;
-
if (check_params(params))
return ODP_POOL_INVALID;
-#ifdef _ODP_PKTIO_IPC
- if (params && (params->type == ODP_POOL_PACKET))
- shm_flags = ODP_SHM_PROC;
-#endif
-
- return pool_create(name, params, shm_flags);
+ return _odp_pool_create(name, params, params->type);
}
int odp_pool_destroy(odp_pool_t pool_hdl)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+ const int max_threads = odp_thread_count_max();
int i;
if (pool == NULL)
@@ -534,15 +1191,19 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
if (pool->reserved == 0) {
UNLOCK(&pool->lock);
- ODP_ERR("Pool not created\n");
+ _ODP_ERR("Pool not created\n");
return -1;
}
+ if (pool->type == ODP_POOL_PACKET && pool->mem_src_ops && pool->mem_src_ops->unbind)
+ pool->mem_src_ops->unbind(pool->mem_src_data);
+
/* Make sure local caches are empty */
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
- flush_cache(&pool->local_cache[i], pool);
+ for (i = 0; i < max_threads; i++)
+ cache_flush(&pool->local_cache[i], pool);
- odp_shm_free(pool->shm);
+ if (pool->pool_ext == 0)
+ odp_shm_free(pool->shm);
if (pool->uarea_shm != ODP_SHM_INVALID)
odp_shm_free(pool->uarea_shm);
@@ -555,29 +1216,19 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
return 0;
}
-odp_event_type_t _odp_buffer_event_type(odp_buffer_t buf)
-{
- return buf_hdl_to_hdr(buf)->event_type;
-}
-
-void _odp_buffer_event_type_set(odp_buffer_t buf, int ev)
-{
- buf_hdl_to_hdr(buf)->event_type = ev;
-}
-
odp_pool_t odp_pool_lookup(const char *name)
{
uint32_t i;
pool_t *pool;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = pool_entry(i);
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
if (strcmp(name, pool->name) == 0) {
/* found it */
UNLOCK(&pool->lock);
- return pool->pool_hdl;
+ return _odp_pool_handle(pool);
}
UNLOCK(&pool->lock);
}
@@ -587,61 +1238,82 @@ odp_pool_t odp_pool_lookup(const char *name)
int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
if (pool == NULL || info == NULL)
return -1;
+ memset(info, 0, sizeof(odp_pool_info_t));
+
+ info->type = pool->type_2;
info->name = pool->name;
- info->params = pool->params;
+
+ if (pool->pool_ext) {
+ info->pool_ext = 1;
+ info->pool_ext_param = pool->ext_param;
+
+ } else if (pool->type_2 == ODP_POOL_DMA_COMPL) {
+ info->dma_pool_param.num = pool->params.buf.num;
+ info->dma_pool_param.uarea_size = pool->params.buf.uarea_size;
+ info->dma_pool_param.cache_size = pool->params.buf.cache_size;
+
+ } else if (pool->type_2 == ODP_POOL_ML_COMPL) {
+ info->ml_pool_param.num = pool->params.buf.num;
+ info->ml_pool_param.uarea_size = pool->params.buf.uarea_size;
+ info->ml_pool_param.cache_size = pool->params.buf.cache_size;
+ } else {
+ info->params = pool->params;
+ }
+
+ if (pool->type == ODP_POOL_PACKET)
+ info->pkt.max_num = pool->num;
+
+ info->min_data_addr = (uintptr_t)pool->base_addr;
+ info->max_data_addr = (uintptr_t)pool->max_addr;
return 0;
}
-int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
- odp_buffer_hdr_t *buf_hdr[], int max_num)
+int _odp_event_alloc_multi(pool_t *pool, _odp_event_hdr_t *event_hdr[], int max_num)
{
- ring_t *ring;
- uint32_t mask, i;
- pool_cache_t *cache;
- uint32_t cache_num, num_ch, num_deq, burst;
- odp_buffer_hdr_t *hdr;
+ uint32_t pool_idx = pool->pool_idx;
+ pool_cache_t *cache = local.cache[pool_idx];
+ ring_ptr_t *ring;
+ _odp_event_hdr_t *hdr;
+ uint32_t mask, num_ch, num_alloc, i;
+ uint32_t num_deq = 0;
+ uint32_t burst_size = pool->burst_size;
- cache = local.cache[pool->pool_idx];
+ /* First pull packets from local cache */
+ num_ch = cache_pop(cache, event_hdr, max_num);
- cache_num = cache->num;
- num_ch = max_num;
- num_deq = 0;
- burst = CACHE_BURST;
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_alloc_ops && num_ch)
+ odp_atomic_inc_u64(&pool->stats.cache_alloc_ops);
- if (odp_unlikely(cache_num < (uint32_t)max_num)) {
- /* Cache does not have enough buffers */
- num_ch = cache_num;
- num_deq = max_num - cache_num;
+ /* If needed, get more from the global pool */
+ if (odp_unlikely(num_ch != (uint32_t)max_num)) {
+ uint32_t burst = burst_size;
+ uint32_t cache_num;
- if (odp_unlikely(num_deq > CACHE_BURST))
+ num_deq = max_num - num_ch;
+ if (odp_unlikely(num_deq > burst_size))
burst = num_deq;
- }
-
- /* Get buffers from the cache */
- for (i = 0; i < num_ch; i++) {
- buf[i] = cache->buf[cache_num - num_ch + i];
- if (odp_likely(buf_hdr != NULL))
- buf_hdr[i] = pool_buf_hdl_to_hdr(pool, buf[i]);
- }
-
- /* If needed, get more from the global pool */
- if (odp_unlikely(num_deq)) {
- /* Temporary copy needed since odp_buffer_t is uintptr_t
- * and not uint32_t. */
- uint32_t data[burst];
+ _odp_event_hdr_t *hdr_tmp[burst];
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- burst = ring_deq_multi(ring, mask, data, burst);
+ burst = ring_ptr_deq_multi(ring, mask, (void **)hdr_tmp,
+ burst);
cache_num = burst - num_deq;
+ if (CONFIG_POOL_STATISTICS) {
+ if (pool->params.stats.bit.alloc_ops)
+ odp_atomic_inc_u64(&pool->stats.alloc_ops);
+ if (odp_unlikely(pool->params.stats.bit.alloc_fails && burst == 0))
+ odp_atomic_inc_u64(&pool->stats.alloc_fails);
+ }
+
if (odp_unlikely(burst < num_deq)) {
num_deq = burst;
cache_num = 0;
@@ -650,110 +1322,95 @@ int buffer_alloc_multi(pool_t *pool, odp_buffer_t buf[],
for (i = 0; i < num_deq; i++) {
uint32_t idx = num_ch + i;
- buf[idx] = (odp_buffer_t)(uintptr_t)data[i];
- hdr = pool_buf_hdl_to_hdr(pool, buf[idx]);
+ hdr = hdr_tmp[i];
odp_prefetch(hdr);
-
- if (odp_likely(buf_hdr != NULL))
- buf_hdr[idx] = hdr;
+ event_hdr[idx] = hdr;
}
- /* Cache extra buffers. Cache is currently empty. */
- for (i = 0; i < cache_num; i++)
- cache->buf[i] = (odp_buffer_t)
- (uintptr_t)data[num_deq + i];
-
- cache->num = cache_num;
- } else {
- cache->num = cache_num - num_ch;
+ /* Cache possible extra buffers. Cache is currently empty. */
+ if (cache_num)
+ cache_push(cache, &hdr_tmp[num_deq], cache_num);
}
- return num_ch + num_deq;
+ num_alloc = num_ch + num_deq;
+
+ return num_alloc;
}
-static inline void buffer_free_to_pool(uint32_t pool_id,
- const odp_buffer_t buf[], int num)
+static inline void event_free_to_pool(pool_t *pool,
+ _odp_event_hdr_t *event_hdr[], int num)
{
- pool_t *pool;
- int i;
- ring_t *ring;
- uint32_t mask;
- pool_cache_t *cache;
- uint32_t cache_num;
-
- cache = local.cache[pool_id];
- pool = pool_entry(pool_id);
+ uint32_t pool_idx = pool->pool_idx;
+ pool_cache_t *cache = local.cache[pool_idx];
+ ring_ptr_t *ring;
+ uint32_t cache_num, mask;
+ uint32_t cache_size = pool->cache_size;
/* Special case of a very large free. Move directly to
* the global pool. */
- if (odp_unlikely(num > CONFIG_POOL_CACHE_SIZE)) {
+ if (odp_unlikely(num > (int)cache_size)) {
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- for (i = 0; i < num; i++)
- ring_enq(ring, mask, (uint32_t)(uintptr_t)buf[i]);
+
+ ring_ptr_enq_multi(ring, mask, (void **)event_hdr, num);
+
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
+ odp_atomic_inc_u64(&pool->stats.free_ops);
return;
}
/* Make room into local cache if needed. Do at least burst size
* transfer. */
- cache_num = cache->num;
+ cache_num = odp_atomic_load_u32(&cache->cache_num);
- if (odp_unlikely((int)(CONFIG_POOL_CACHE_SIZE - cache_num) < num)) {
- uint32_t index;
- int burst = CACHE_BURST;
+ if (odp_unlikely((int)(cache_size - cache_num) < num)) {
+ int burst = pool->burst_size;
ring = &pool->ring->hdr;
mask = pool->ring_mask;
- if (odp_unlikely(num > CACHE_BURST))
+ if (odp_unlikely(num > burst))
burst = num;
+ if (odp_unlikely((uint32_t)num > cache_num))
+ burst = cache_num;
- {
- /* Temporary copy needed since odp_buffer_t is
- * uintptr_t and not uint32_t. */
- uint32_t data[burst];
-
- index = cache_num - burst;
+ _odp_event_hdr_t *ev_hdr[burst];
- for (i = 0; i < burst; i++)
- data[i] = (uint32_t)
- (uintptr_t)cache->buf[index + i];
+ cache_pop(cache, ev_hdr, burst);
- ring_enq_multi(ring, mask, data, burst);
- }
-
- cache_num -= burst;
+ ring_ptr_enq_multi(ring, mask, (void **)ev_hdr, burst);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.free_ops)
+ odp_atomic_inc_u64(&pool->stats.free_ops);
}
- for (i = 0; i < num; i++)
- cache->buf[cache_num + i] = buf[i];
-
- cache->num = cache_num + num;
+ cache_push(cache, event_hdr, num);
+ if (CONFIG_POOL_STATISTICS && pool->params.stats.bit.cache_free_ops)
+ odp_atomic_inc_u64(&pool->stats.cache_free_ops);
}
-void buffer_free_multi(const odp_buffer_t buf[], int num_total)
+void _odp_event_free_multi(_odp_event_hdr_t *event_hdr[], int num_total)
{
- uint32_t pool_id;
+ pool_t *pool;
int num;
int i;
int first = 0;
while (1) {
- num = 1;
- i = 1;
- pool_id = pool_id_from_buf(buf[first]);
+ num = 1;
+ i = 1;
+ pool = _odp_pool_entry(event_hdr[first]->pool);
/* 'num' buffers are from the same pool */
if (num_total > 1) {
for (i = first; i < num_total; i++)
- if (pool_id != pool_id_from_buf(buf[i]))
+ if (pool != _odp_pool_entry(event_hdr[i]->pool))
break;
num = i - first;
}
- buffer_free_to_pool(pool_id, &buf[first], num);
+ event_free_to_pool(pool, &event_hdr[first], num);
if (i == num_total)
return;
@@ -768,8 +1425,13 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
pool_t *pool;
int ret;
- pool = pool_entry_from_hdl(pool_hdl);
- ret = buffer_alloc_multi(pool, &buf, NULL, 1);
+ _ODP_ASSERT(ODP_POOL_INVALID != pool_hdl);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
+
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)&buf, 1);
if (odp_likely(ret == 1))
return buf;
@@ -777,103 +1439,248 @@ odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
return ODP_BUFFER_INVALID;
}
+odp_event_t _odp_event_alloc(pool_t *pool)
+{
+ odp_event_t event;
+ int ret;
+
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)&event, 1);
+
+ if (odp_likely(ret == 1))
+ return event;
+
+ return ODP_EVENT_INVALID;
+}
+
int odp_buffer_alloc_multi(odp_pool_t pool_hdl, odp_buffer_t buf[], int num)
{
pool_t *pool;
- pool = pool_entry_from_hdl(pool_hdl);
+ _ODP_ASSERT(ODP_POOL_INVALID != pool_hdl);
+
+ pool = _odp_pool_entry(pool_hdl);
- return buffer_alloc_multi(pool, buf, NULL, num);
+ _ODP_ASSERT(pool->type == ODP_POOL_BUFFER);
+
+ return _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)buf, num);
}
void odp_buffer_free(odp_buffer_t buf)
{
- buffer_free_multi(&buf, 1);
+ _odp_buffer_validate(buf, _ODP_EV_BUFFER_FREE);
+
+ _odp_event_free_multi((_odp_event_hdr_t **)&buf, 1);
}
void odp_buffer_free_multi(const odp_buffer_t buf[], int num)
{
- buffer_free_multi(buf, num);
+ _odp_buffer_validate_multi(buf, num, _ODP_EV_BUFFER_FREE_MULTI);
+
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)buf, num);
}
int odp_pool_capability(odp_pool_capability_t *capa)
{
+ odp_pool_stats_opt_t supported_stats;
uint32_t max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
+ /* Reserve pools for internal usage */
+ unsigned int max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
memset(capa, 0, sizeof(odp_pool_capability_t));
- capa->max_pools = ODP_CONFIG_POOLS;
+ capa->max_pools = max_pools;
+
+ supported_stats.all = 0;
+ supported_stats.bit.available = 1;
+ supported_stats.bit.alloc_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.alloc_fails = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.free_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.total_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.cache_available = 1;
+ supported_stats.bit.cache_alloc_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.cache_free_ops = CONFIG_POOL_STATISTICS;
+ supported_stats.bit.thread_cache_available = 1;
/* Buffer pools */
- capa->buf.max_pools = ODP_CONFIG_POOLS;
- capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
+ capa->buf.max_pools = max_pools;
+ capa->buf.max_align = CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.max_uarea_size = MAX_UAREA_SIZE;
+ capa->buf.uarea_persistence = true;
+ capa->buf.min_cache_size = 0;
+ capa->buf.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->buf.stats.all = supported_stats.all;
/* Packet pools */
- capa->pkt.max_pools = ODP_CONFIG_POOLS;
- capa->pkt.max_len = CONFIG_PACKET_MAX_SEGS * max_seg_len;
- capa->pkt.max_num = CONFIG_POOL_MAX_NUM;
+ capa->pkt.max_pools = max_pools;
+ capa->pkt.max_len = _odp_pool_glb->config.pkt_max_len;
+ capa->pkt.max_num = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.max_align = _odp_pool_glb->config.pkt_base_align;
capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM;
capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM;
- capa->pkt.max_segs_per_pkt = CONFIG_PACKET_MAX_SEGS;
- capa->pkt.min_seg_len = max_seg_len;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
+ capa->pkt.min_seg_len = CONFIG_PACKET_SEG_LEN_MIN;
capa->pkt.max_seg_len = max_seg_len;
- capa->pkt.max_uarea_size = MAX_SIZE;
+ capa->pkt.max_uarea_size = MAX_UAREA_SIZE;
+ capa->pkt.uarea_persistence = true;
+ capa->pkt.min_cache_size = 0;
+ capa->pkt.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->pkt.stats.all = supported_stats.all;
/* Timeout pools */
- capa->tmo.max_pools = ODP_CONFIG_POOLS;
+ capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
-
+ capa->tmo.max_uarea_size = MAX_UAREA_SIZE;
+ capa->tmo.uarea_persistence = true;
+ capa->tmo.min_cache_size = 0;
+ capa->tmo.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->tmo.stats.all = supported_stats.all;
+
+ /* Vector pools */
+ capa->vector.max_pools = max_pools;
+ capa->vector.max_num = CONFIG_POOL_MAX_NUM;
+ capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.max_uarea_size = MAX_UAREA_SIZE;
+ capa->vector.uarea_persistence = true;
+ capa->vector.min_cache_size = 0;
+ capa->vector.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->vector.stats.all = supported_stats.all;
return 0;
}
+static const char *get_long_type_str(odp_pool_type_t type)
+{
+ switch (type) {
+ case ODP_POOL_BUFFER:
+ return "buffer";
+ case ODP_POOL_PACKET:
+ return "packet";
+ case ODP_POOL_TIMEOUT:
+ return "timeout";
+ case ODP_POOL_VECTOR:
+ return "vector";
+ case ODP_POOL_DMA_COMPL:
+ return "dma completion";
+ case ODP_POOL_ML_COMPL:
+ return "ml completion";
+ default:
+ return "unknown";
+ }
+}
+
+static const char *get_short_type_str(odp_pool_type_t type)
+{
+ switch (type) {
+ case ODP_POOL_BUFFER:
+ return "B";
+ case ODP_POOL_PACKET:
+ return "P";
+ case ODP_POOL_TIMEOUT:
+ return "T";
+ case ODP_POOL_VECTOR:
+ return "V";
+ case ODP_POOL_DMA_COMPL:
+ return "D";
+ case ODP_POOL_ML_COMPL:
+ return "M";
+ default:
+ return "-";
+ }
+}
+
void odp_pool_print(odp_pool_t pool_hdl)
{
pool_t *pool;
- pool = pool_entry_from_hdl(pool_hdl);
-
- printf("\nPool info\n");
- printf("---------\n");
- printf(" pool %" PRIu64 "\n",
- odp_pool_to_u64(pool->pool_hdl));
- printf(" name %s\n", pool->name);
- printf(" pool type %s\n",
- pool->params.type == ODP_POOL_BUFFER ? "buffer" :
- (pool->params.type == ODP_POOL_PACKET ? "packet" :
- (pool->params.type == ODP_POOL_TIMEOUT ? "timeout" :
- "unknown")));
- printf(" pool shm %" PRIu64 "\n",
- odp_shm_to_u64(pool->shm));
- printf(" user area shm %" PRIu64 "\n",
- odp_shm_to_u64(pool->uarea_shm));
- printf(" num %u\n", pool->num);
- printf(" align %u\n", pool->align);
- printf(" headroom %u\n", pool->headroom);
- printf(" data size %u\n", pool->data_size);
- printf(" max data len %u\n", pool->max_len);
- printf(" max seg len %u\n", pool->max_seg_len);
- printf(" tailroom %u\n", pool->tailroom);
- printf(" block size %u\n", pool->block_size);
- printf(" uarea size %u\n", pool->uarea_size);
- printf(" shm size %u\n", pool->shm_size);
- printf(" base addr %p\n", pool->base_addr);
- printf(" uarea shm size %u\n", pool->uarea_shm_size);
- printf(" uarea base addr %p\n", pool->uarea_base_addr);
- printf("\n");
-}
-
-odp_pool_t odp_buffer_pool(odp_buffer_t buf)
-{
- uint32_t pool_id = pool_id_from_buf(buf);
-
- return pool_index_to_handle(pool_id);
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_PRINT("\nPool info\n");
+ _ODP_PRINT("---------\n");
+ _ODP_PRINT(" pool %" PRIu64 "\n",
+ odp_pool_to_u64(_odp_pool_handle(pool)));
+ _ODP_PRINT(" name %s\n", pool->name);
+ _ODP_PRINT(" pool type %s\n", get_long_type_str(pool->type_2));
+ _ODP_PRINT(" pool shm %" PRIu64 "\n", odp_shm_to_u64(pool->shm));
+ _ODP_PRINT(" user area shm %" PRIu64 "\n", odp_shm_to_u64(pool->uarea_shm));
+ _ODP_PRINT(" num %u\n", pool->num);
+ _ODP_PRINT(" align %u\n", pool->align);
+ _ODP_PRINT(" headroom %u\n", pool->headroom);
+ _ODP_PRINT(" seg len %u\n", pool->seg_len);
+ _ODP_PRINT(" max data len %u\n", pool->max_len);
+ _ODP_PRINT(" tailroom %u\n", pool->tailroom);
+ _ODP_PRINT(" block size %u\n", pool->block_size);
+ _ODP_PRINT(" uarea size %u\n", pool->uarea_size);
+ _ODP_PRINT(" shm size %" PRIu64 "\n", pool->shm_size);
+ _ODP_PRINT(" base addr %p\n", (void *)pool->base_addr);
+ _ODP_PRINT(" max addr %p\n", (void *)pool->max_addr);
+ _ODP_PRINT(" uarea shm size %" PRIu64 "\n", pool->uarea_shm_size);
+ _ODP_PRINT(" uarea base addr %p\n", (void *)pool->uarea_base_addr);
+ _ODP_PRINT(" cache size %u\n", pool->cache_size);
+ _ODP_PRINT(" burst size %u\n", pool->burst_size);
+ _ODP_PRINT(" mem src %s\n",
+ pool->mem_src_ops ? pool->mem_src_ops->name : "(none)");
+ _ODP_PRINT(" event valid. %d\n", _ODP_EVENT_VALIDATION);
+ _ODP_PRINT("\n");
+}
+
+void odp_pool_print_all(void)
+{
+ uint64_t available;
+ uint32_t i, index, tot, cache_size, seg_len;
+ uint32_t buf_len = 0;
+ uint8_t type, ext;
+ const int col_width = 24;
+ const char *name, *type_c;
+
+ _ODP_PRINT("\nList of all pools\n");
+ _ODP_PRINT("-----------------\n");
+ _ODP_PRINT(" idx %-*s type free tot cache buf_len ext\n", col_width, "name");
+
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
+
+ LOCK(&pool->lock);
+
+ if (!pool->reserved) {
+ UNLOCK(&pool->lock);
+ continue;
+ }
+
+ available = ring_ptr_len(&pool->ring->hdr);
+ cache_size = pool->cache_size;
+ ext = pool->pool_ext;
+ index = pool->pool_idx;
+ name = pool->name;
+ tot = pool->num;
+ type = pool->type;
+ seg_len = pool->seg_len;
+
+ UNLOCK(&pool->lock);
+
+ if (type == ODP_POOL_BUFFER || type == ODP_POOL_PACKET)
+ buf_len = seg_len;
+
+ type_c = get_short_type_str(pool->type_2);
+
+ _ODP_PRINT("%4u %-*s %s %6" PRIu64 " %6" PRIu32 " %6" PRIu32 " %8" PRIu32 " "
+ "%" PRIu8 "\n", index, col_width, name, type_c, available, tot,
+ cache_size, buf_len, ext);
+ }
+ _ODP_PRINT("\n");
}
void odp_pool_param_init(odp_pool_param_t *params)
{
+ uint32_t default_cache_size = _odp_pool_glb->config.local_cache_size;
+
memset(params, 0, sizeof(odp_pool_param_t));
+ params->pkt.headroom = CONFIG_PACKET_HEADROOM;
+ params->buf.cache_size = default_cache_size;
+ params->pkt.cache_size = default_cache_size;
+ params->tmo.cache_size = default_cache_size;
+ params->vector.cache_size = default_cache_size;
}
uint64_t odp_pool_to_u64(odp_pool_t hdl)
@@ -881,33 +1688,471 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl)
return _odp_pri(hdl);
}
-int seg_alloc_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+unsigned int odp_pool_max_index(void)
{
- (void)buf_hdr;
- (void)segcount;
+ return CONFIG_POOLS - 1;
+}
+
+int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ _ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ /* Zero everything else but per thread statistics */
+ memset(stats, 0, offsetof(odp_pool_stats_t, thread));
+
+ if (pool->params.stats.bit.available)
+ stats->available = ring_ptr_len(&pool->ring->hdr);
+
+ if (pool->params.stats.bit.alloc_ops)
+ stats->alloc_ops = odp_atomic_load_u64(&pool->stats.alloc_ops);
+
+ if (pool->params.stats.bit.alloc_fails)
+ stats->alloc_fails = odp_atomic_load_u64(&pool->stats.alloc_fails);
+
+ if (pool->params.stats.bit.free_ops)
+ stats->free_ops = odp_atomic_load_u64(&pool->stats.free_ops);
+
+ if (pool->params.stats.bit.total_ops)
+ stats->total_ops = stats->alloc_ops + stats->free_ops;
+
+ if (pool->params.stats.bit.cache_available ||
+ pool->params.stats.bit.thread_cache_available) {
+ if (cache_available(pool, stats))
+ return -1;
+ }
+
+ if (pool->params.stats.bit.cache_alloc_ops)
+ stats->cache_alloc_ops = odp_atomic_load_u64(&pool->stats.cache_alloc_ops);
+
+ if (pool->params.stats.bit.cache_free_ops)
+ stats->cache_free_ops = odp_atomic_load_u64(&pool->stats.cache_free_ops);
+
return 0;
}
-void seg_free_tail(odp_buffer_hdr_t *buf_hdr, int segcount)
+int odp_pool_stats_selected(odp_pool_t pool_hdl, odp_pool_stats_selected_t *stats,
+ const odp_pool_stats_opt_t *opt)
{
- (void)buf_hdr;
- (void)segcount;
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ _ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+ if (odp_unlikely(stats == NULL)) {
+ _ODP_ERR("Output buffer NULL\n");
+ return -1;
+ }
+ if (odp_unlikely(opt == NULL)) {
+ _ODP_ERR("Pool counters NULL\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ if (odp_unlikely(opt->all & ~pool->params.stats.all)) {
+ _ODP_ERR("Trying to read disabled counter\n");
+ return -1;
+ }
+
+ if (opt->bit.available)
+ stats->available = ring_ptr_len(&pool->ring->hdr);
+
+ if (opt->bit.alloc_ops || opt->bit.total_ops)
+ stats->alloc_ops = odp_atomic_load_u64(&pool->stats.alloc_ops);
+
+ if (opt->bit.alloc_fails)
+ stats->alloc_fails = odp_atomic_load_u64(&pool->stats.alloc_fails);
+
+ if (opt->bit.free_ops || opt->bit.total_ops)
+ stats->free_ops = odp_atomic_load_u64(&pool->stats.free_ops);
+
+ if (opt->bit.total_ops)
+ stats->total_ops = stats->alloc_ops + stats->free_ops;
+
+ if (opt->bit.cache_available)
+ stats->cache_available = cache_total_available(pool);
+
+ if (opt->bit.cache_alloc_ops)
+ stats->cache_alloc_ops = odp_atomic_load_u64(&pool->stats.cache_alloc_ops);
+
+ if (opt->bit.cache_free_ops)
+ stats->cache_free_ops = odp_atomic_load_u64(&pool->stats.cache_free_ops);
+
+ return 0;
}
-int odp_buffer_is_valid(odp_buffer_t buf)
+int odp_pool_stats_reset(odp_pool_t pool_hdl)
+{
+ pool_t *pool;
+
+ if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
+ _ODP_ERR("Invalid pool handle\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ odp_atomic_store_u64(&pool->stats.alloc_ops, 0);
+ odp_atomic_store_u64(&pool->stats.alloc_fails, 0);
+ odp_atomic_store_u64(&pool->stats.free_ops, 0);
+ odp_atomic_store_u64(&pool->stats.cache_alloc_ops, 0);
+ odp_atomic_store_u64(&pool->stats.cache_free_ops, 0);
+
+ return 0;
+}
+
+static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
+{
+ int i;
+ uint8_t *ptr = (uint8_t *)event_hdr;
+
+ for (i = 0; i < CONFIG_POOLS; i++) {
+ pool_t *pool = _odp_pool_entry_from_idx(i);
+
+ if (pool->reserved == 0)
+ continue;
+
+ if (ptr >= pool->base_addr && ptr < pool->max_addr)
+ return pool;
+ }
+
+ return NULL;
+}
+
+int _odp_event_is_valid(odp_event_t event)
{
- odp_buffer_bits_t handle;
pool_t *pool;
+ _odp_event_hdr_t *event_hdr = _odp_event_hdr(event);
- handle.handle = buf;
+ if (event == ODP_EVENT_INVALID)
+ return 0;
+
+ /* Check that buffer header is from a known pool */
+ pool = find_pool(event_hdr);
+ if (pool == NULL)
+ return 0;
+
+ if (pool != _odp_pool_entry(event_hdr->pool))
+ return 0;
+
+ if (event_hdr->index.event >= (pool->num + pool->skipped_blocks))
+ return 0;
+
+ return 1;
+}
- if (handle.pool_id >= ODP_CONFIG_POOLS)
+int odp_buffer_is_valid(odp_buffer_t buf)
+{
+ if (_odp_event_is_valid(odp_buffer_to_event(buf)) == 0)
return 0;
- pool = pool_entry(handle.pool_id);
+ if (odp_event_type(odp_buffer_to_event(buf)) != ODP_EVENT_BUFFER)
+ return 0;
- if (pool->reserved == 0)
+ if (odp_unlikely(_odp_buffer_validate(buf, _ODP_EV_BUFFER_IS_VALID)))
return 0;
return 1;
}
+
+/* No actual head pointer alignment requirement. Anyway, require even byte address. */
+#define MIN_HEAD_ALIGN 2
+
+int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *capa)
+{
+ odp_pool_stats_opt_t supported_stats;
+
+ _ODP_ASSERT(capa != NULL);
+
+ switch (type) {
+ case ODP_POOL_PACKET:
+ break;
+ case ODP_POOL_BUFFER:
+ case ODP_POOL_TIMEOUT:
+ case ODP_POOL_VECTOR:
+ case ODP_POOL_DMA_COMPL:
+ case ODP_POOL_ML_COMPL:
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+ return 0;
+ default:
+ _ODP_ERR("Invalid pool type: %d\n", type);
+ return -1;
+ }
+
+ supported_stats.all = 0;
+
+ memset(capa, 0, sizeof(odp_pool_ext_capability_t));
+
+ capa->type = type;
+ capa->max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
+ capa->min_cache_size = 0;
+ capa->max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
+ capa->stats.all = supported_stats.all;
+
+ capa->pkt.max_num_buf = _odp_pool_glb->config.pkt_max_num;
+ capa->pkt.max_buf_size = MAX_SIZE;
+ capa->pkt.odp_header_size = sizeof(odp_packet_hdr_t);
+ capa->pkt.odp_trailer_size = _ODP_EV_ENDMARK_SIZE;
+ capa->pkt.min_mem_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_buf_align = ODP_CACHE_LINE_SIZE;
+ capa->pkt.min_head_align = MIN_HEAD_ALIGN;
+ capa->pkt.buf_size_aligned = 0;
+ capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_headroom_size = CONFIG_PACKET_HEADROOM;
+ capa->pkt.max_segs_per_pkt = PKT_MAX_SEGS;
+ capa->pkt.max_uarea_size = MAX_UAREA_SIZE;
+ capa->pkt.uarea_persistence = true;
+
+ return 0;
+}
+
+void odp_pool_ext_param_init(odp_pool_type_t type, odp_pool_ext_param_t *param)
+{
+ uint32_t default_cache_size = _odp_pool_glb->config.local_cache_size;
+
+ memset(param, 0, sizeof(odp_pool_ext_param_t));
+
+ if (type != ODP_POOL_PACKET)
+ return;
+
+ param->type = ODP_POOL_PACKET;
+ param->cache_size = default_cache_size;
+ param->pkt.headroom = CONFIG_PACKET_HEADROOM;
+}
+
+static int check_pool_ext_param(const odp_pool_ext_param_t *param)
+{
+ odp_pool_ext_capability_t capa;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+
+ if (param->type != ODP_POOL_PACKET) {
+ _ODP_ERR("Pool type not supported\n");
+ return -1;
+ }
+
+ if (odp_pool_ext_capability(param->type, &capa)) {
+ _ODP_ERR("Capa failed\n");
+ return -1;
+ }
+
+ if (param->cache_size > capa.max_cache_size) {
+ _ODP_ERR("Too large cache size %u\n", param->cache_size);
+ return -1;
+ }
+
+ if (param->stats.all != capa.stats.all) {
+ _ODP_ERR("Pool statistics not supported\n");
+ return -1;
+ }
+
+ if (param->pkt.num_buf > capa.pkt.max_num_buf) {
+ _ODP_ERR("Too many packet buffers\n");
+ return -1;
+ }
+
+ if (param->pkt.buf_size > capa.pkt.max_buf_size) {
+ _ODP_ERR("Too large packet buffer size %u\n", param->pkt.buf_size);
+ return -1;
+ }
+
+ if (param->pkt.uarea_size > capa.pkt.max_uarea_size) {
+ _ODP_ERR("Too large user area size %u\n", param->pkt.uarea_size);
+ return -1;
+ }
+
+ if (param->pkt.headroom > capa.pkt.max_headroom) {
+ _ODP_ERR("Too large headroom size\n");
+ return -1;
+ }
+
+ if (head_offset % capa.pkt.min_head_align) {
+ _ODP_ERR("Head pointer not %u byte aligned\n", capa.pkt.min_head_align);
+ return -1;
+ }
+
+ return 0;
+}
+
+odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *param)
+{
+ pool_t *pool;
+ uint32_t ring_size;
+ uint32_t num_buf = param->pkt.num_buf;
+ uint32_t buf_size = param->pkt.buf_size;
+ uint32_t head_offset = sizeof(odp_packet_hdr_t) + param->pkt.app_header_size;
+ uint32_t headroom = param->pkt.headroom;
+ uint32_t shm_flags = 0;
+
+ if (check_pool_ext_param(param)) {
+ _ODP_ERR("Bad pool ext param\n");
+ return ODP_POOL_INVALID;
+ }
+
+ if (odp_global_ro.shm_single_va)
+ shm_flags |= ODP_SHM_SINGLE_VA;
+
+ pool = reserve_pool(shm_flags, 1, num_buf);
+
+ if (pool == NULL) {
+ _ODP_ERR("No more free pools\n");
+ return ODP_POOL_INVALID;
+ }
+
+ pool->ext_param = *param;
+ set_pool_name(pool, name);
+ set_pool_cache_size(pool, param->cache_size);
+
+ if (reserve_uarea(pool, param->pkt.uarea_size, num_buf, shm_flags)) {
+ _ODP_ERR("User area SHM reserve failed\n");
+ goto error;
+ }
+
+ /* Ring size must be larger than the number of items stored */
+ if (num_buf + 1 <= RING_SIZE_MIN)
+ ring_size = RING_SIZE_MIN;
+ else
+ ring_size = _ODP_ROUNDUP_POWER2_U32(num_buf + 1);
+
+ pool->ring_mask = ring_size - 1;
+ pool->type = param->type;
+ pool->num = num_buf;
+ pool->headroom = headroom;
+ pool->tailroom = 0;
+ pool->trailer_size = _ODP_EV_ENDMARK_SIZE;
+ pool->seg_len = buf_size - head_offset - headroom - pool->tailroom -
+ pool->trailer_size;
+ pool->max_seg_len = headroom + pool->seg_len + pool->tailroom;
+ pool->max_len = PKT_MAX_SEGS * pool->seg_len;
+ pool->ext_head_offset = head_offset;
+ pool->base_addr = (uint8_t *)(uintptr_t)UINT64_MAX;
+ pool->max_addr = 0;
+
+ ring_ptr_init(&pool->ring->hdr);
+
+ return _odp_pool_handle(pool);
+
+error:
+ if (pool->ring_shm != ODP_SHM_INVALID)
+ odp_shm_free(pool->ring_shm);
+
+ LOCK(&pool->lock);
+ pool->reserved = 0;
+ UNLOCK(&pool->lock);
+
+ return ODP_POOL_INVALID;
+}
+
+int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, uint32_t num,
+ uint32_t flags)
+{
+ pool_t *pool;
+ _odp_event_hdr_t *event_hdr;
+ ring_ptr_t *ring;
+ uint32_t i, ring_mask, buf_index, head_offset;
+ uint32_t num_populated;
+ uint8_t *data_ptr, *min_addr, *max_addr;
+ void *uarea = NULL;
+
+ if (pool_hdl == ODP_POOL_INVALID) {
+ _ODP_ERR("Bad pool handle\n");
+ return -1;
+ }
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ if (pool->type != ODP_POOL_PACKET || pool->pool_ext == 0) {
+ _ODP_ERR("Bad pool type\n");
+ return -1;
+ }
+
+ min_addr = pool->base_addr;
+ max_addr = pool->max_addr;
+
+ if (buf_size != pool->ext_param.pkt.buf_size) {
+ _ODP_ERR("Bad buffer size\n");
+ return -1;
+ }
+
+ num_populated = pool->num_populated;
+
+ if (num_populated + num > pool->num) {
+ _ODP_ERR("Trying to over populate the pool\n");
+ return -1;
+ }
+
+ if ((num_populated + num == pool->num) && !(flags & ODP_POOL_POPULATE_DONE)) {
+ _ODP_ERR("Missing ODP_POOL_POPULATE_DONE flag\n");
+ return -1;
+ }
+
+ if ((num_populated + num < pool->num) && flags) {
+ _ODP_ERR("Unexpected flags: 0x%x\n", flags);
+ return -1;
+ }
+
+ ring = &pool->ring->hdr;
+ ring_mask = pool->ring_mask;
+ buf_index = pool->num_populated;
+ head_offset = pool->ext_head_offset;
+
+ for (i = 0; i < num; i++) {
+ event_hdr = buf[i];
+
+ if ((uint8_t *)event_hdr < min_addr)
+ min_addr = (uint8_t *)event_hdr;
+
+ if ((uint8_t *)event_hdr > max_addr)
+ max_addr = (uint8_t *)event_hdr;
+
+ if ((uintptr_t)event_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
+ _ODP_ERR("Bad packet buffer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (((uintptr_t)event_hdr + head_offset) & (MIN_HEAD_ALIGN - 1)) {
+ _ODP_ERR("Bad head pointer align: buf[%u]\n", i);
+ return -1;
+ }
+
+ if (pool->uarea_size)
+ uarea = &pool->uarea_base_addr[buf_index * pool->uarea_size];
+
+ data_ptr = (uint8_t *)event_hdr + head_offset + pool->headroom;
+ init_event_hdr(pool, event_hdr, buf_index, data_ptr, uarea);
+ pool->ring->event_hdr_by_index[buf_index] = event_hdr;
+ buf_index++;
+
+ ring_ptr_enq(ring, ring_mask, event_hdr);
+ }
+
+ pool->num_populated += num;
+ pool->base_addr = min_addr;
+ pool->max_addr = max_addr;
+
+ if (flags & ODP_POOL_POPULATE_DONE) {
+ pool->max_addr = max_addr + buf_size - 1;
+
+ if (pool->uarea_size && pool->ext_param.uarea_init.init_fn) {
+ for (i = 0; i < pool->num_populated; i++) {
+ uarea = &pool->uarea_base_addr[i * pool->uarea_size];
+ pool->ext_param.uarea_init.init_fn(uarea, pool->param_uarea_size,
+ pool->ext_param.uarea_init.args,
+ i);
+ }
+ }
+ }
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_pool_api.c b/platform/linux-generic/odp_pool_api.c
new file mode 100644
index 000000000..d9e0aabad
--- /dev/null
+++ b/platform/linux-generic/odp_pool_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/pool.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/pool_inlines.h>
diff --git a/platform/linux-generic/odp_pool_mem_src_ops.c b/platform/linux-generic/odp_pool_mem_src_ops.c
new file mode 100644
index 000000000..2f8dc2078
--- /dev/null
+++ b/platform/linux-generic/odp_pool_mem_src_ops.c
@@ -0,0 +1,22 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+#include <odp_pool_internal.h>
+
+extern const _odp_pool_mem_src_ops_t _odp_pool_dpdk_mem_src_ops;
+extern const _odp_pool_mem_src_ops_t _odp_pool_sock_xdp_mem_src_ops;
+
+/* List of available ODP packet pool memory source operations. Array must be NULL terminated */
+const _odp_pool_mem_src_ops_t * const _odp_pool_mem_src_ops[] = {
+#ifdef _ODP_PKTIO_DPDK
+ &_odp_pool_dpdk_mem_src_ops,
+#endif
+#ifdef _ODP_PKTIO_XDP
+ &_odp_pool_sock_xdp_mem_src_ops,
+#endif
+ NULL
+};
diff --git a/platform/linux-generic/odp_print.c b/platform/linux-generic/odp_print.c
new file mode 100644
index 000000000..30a06c2f4
--- /dev/null
+++ b/platform/linux-generic/odp_print.c
@@ -0,0 +1,47 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/hints.h>
+#include <odp_print_internal.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Helps with snprintf() return value checking
+ *
+ * Otherwise like snprintf(), but returns always the number of characters
+ * printed (without the end mark) or zero on error. Terminates the string
+ * always with the end mark. */
+ODP_PRINTF_FORMAT(3, 4)
+int _odp_snprint(char *str, size_t size, const char *format, ...)
+{
+ va_list args;
+ int len;
+
+ /* No space to print new characters */
+ if (size < 1)
+ return 0;
+
+ if (size < 2) {
+ str[0] = 0;
+ return 0;
+ }
+
+ va_start(args, format);
+ len = vsnprintf(str, size, format, args);
+ va_end(args);
+
+ /* Error. Ensure that string has the end mark */
+ if (len < 0) {
+ str[0] = 0;
+ return 0;
+ }
+
+ /* Print would have been longer. Return the number of characters printed. */
+ if (len >= (int)size)
+ return (int)size - 1;
+
+ return len;
+}
diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
deleted file mode 100644
index d4267c72f..000000000
--- a/platform/linux-generic/odp_queue.c
+++ /dev/null
@@ -1,772 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/queue.h>
-#include <odp_queue_internal.h>
-#include <odp/api/std_types.h>
-#include <odp/api/align.h>
-#include <odp/api/buffer.h>
-#include <odp_buffer_internal.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_inlines.h>
-#include <odp_internal.h>
-#include <odp/api/shared_memory.h>
-#include <odp/api/schedule.h>
-#include <odp_schedule_if.h>
-#include <odp_config_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_packet_io_queue.h>
-#include <odp_debug_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/sync.h>
-#include <odp/api/traffic_mngr.h>
-
-#define NUM_INTERNAL_QUEUES 64
-
-#include <odp/api/plat/ticketlock_inlines.h>
-#define LOCK(a) _odp_ticketlock_lock(a)
-#define UNLOCK(a) _odp_ticketlock_unlock(a)
-#define LOCK_INIT(a) odp_ticketlock_init(a)
-
-#include <string.h>
-#include <inttypes.h>
-
-typedef struct queue_table_t {
- queue_entry_t queue[ODP_CONFIG_QUEUES];
-} queue_table_t;
-
-static queue_table_t *queue_tbl;
-
-static inline odp_queue_t queue_from_id(uint32_t queue_id)
-{
- return _odp_cast_scalar(odp_queue_t, queue_id + 1);
-}
-
-static inline int queue_is_atomic(queue_entry_t *qe)
-{
- return qe->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC;
-}
-
-static inline int queue_is_ordered(queue_entry_t *qe)
-{
- return qe->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED;
-}
-
-queue_entry_t *get_qentry(uint32_t queue_id)
-{
- return &queue_tbl->queue[queue_id];
-}
-
-static int queue_init(queue_entry_t *queue, const char *name,
- const odp_queue_param_t *param)
-{
- if (name == NULL) {
- queue->s.name[0] = 0;
- } else {
- strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
- queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
- }
- memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
- return -1;
-
- if (param->type == ODP_QUEUE_TYPE_SCHED) {
- queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
-
- if (param->sched.sync == ODP_SCHED_SYNC_ORDERED) {
- unsigned i;
-
- odp_atomic_init_u64(&queue->s.ordered.ctx, 0);
- odp_atomic_init_u64(&queue->s.ordered.next_ctx, 0);
-
- for (i = 0; i < queue->s.param.sched.lock_count; i++)
- odp_atomic_init_u64(&queue->s.ordered.lock[i],
- 0);
- }
- }
- queue->s.type = queue->s.param.type;
-
- queue->s.enqueue = queue_enq;
- queue->s.dequeue = queue_deq;
- queue->s.enqueue_multi = queue_enq_multi;
- queue->s.dequeue_multi = queue_deq_multi;
-
- queue->s.pktin = PKTIN_INVALID;
-
- queue->s.head = NULL;
- queue->s.tail = NULL;
-
- return 0;
-}
-
-
-int odp_queue_init_global(void)
-{
- uint32_t i;
- odp_shm_t shm;
-
- ODP_DBG("Queue init ... ");
-
- shm = odp_shm_reserve("odp_queues",
- sizeof(queue_table_t),
- sizeof(queue_entry_t), 0);
-
- queue_tbl = odp_shm_addr(shm);
-
- if (queue_tbl == NULL)
- return -1;
-
- memset(queue_tbl, 0, sizeof(queue_table_t));
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- /* init locks */
- queue_entry_t *queue = get_qentry(i);
- LOCK_INIT(&queue->s.lock);
- queue->s.index = i;
- queue->s.handle = queue_from_id(i);
- }
-
- ODP_DBG("done\n");
- ODP_DBG("Queue init global\n");
- ODP_DBG(" struct queue_entry_s size %zu\n",
- sizeof(struct queue_entry_s));
- ODP_DBG(" queue_entry_t size %zu\n",
- sizeof(queue_entry_t));
- ODP_DBG("\n");
-
- return 0;
-}
-
-int odp_queue_term_global(void)
-{
- int ret = 0;
- int rc = 0;
- queue_entry_t *queue;
- int i;
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- queue = &queue_tbl->queue[i];
- LOCK(&queue->s.lock);
- if (queue->s.status != QUEUE_STATUS_FREE) {
- ODP_ERR("Not destroyed queue: %s\n", queue->s.name);
- rc = -1;
- }
- UNLOCK(&queue->s.lock);
- }
-
- ret = odp_shm_free(odp_shm_lookup("odp_queues"));
- if (ret < 0) {
- ODP_ERR("shm free failed for odp_queues");
- rc = -1;
- }
-
- return rc;
-}
-
-int odp_queue_capability(odp_queue_capability_t *capa)
-{
- memset(capa, 0, sizeof(odp_queue_capability_t));
-
- /* Reserve some queues for internal use */
- capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
- capa->max_ordered_locks = sched_fn->max_ordered_locks();
- capa->max_sched_groups = sched_fn->num_grps();
- capa->sched_prios = odp_schedule_num_prio();
-
- return 0;
-}
-
-odp_queue_type_t odp_queue_type(odp_queue_t handle)
-{
- queue_entry_t *queue;
-
- queue = queue_to_qentry(handle);
-
- return queue->s.type;
-}
-
-odp_schedule_sync_t odp_queue_sched_type(odp_queue_t handle)
-{
- queue_entry_t *queue;
-
- queue = queue_to_qentry(handle);
-
- return queue->s.param.sched.sync;
-}
-
-odp_schedule_prio_t odp_queue_sched_prio(odp_queue_t handle)
-{
- queue_entry_t *queue;
-
- queue = queue_to_qentry(handle);
-
- return queue->s.param.sched.prio;
-}
-
-odp_schedule_group_t odp_queue_sched_group(odp_queue_t handle)
-{
- queue_entry_t *queue;
-
- queue = queue_to_qentry(handle);
-
- return queue->s.param.sched.group;
-}
-
-int odp_queue_lock_count(odp_queue_t handle)
-{
- queue_entry_t *queue = queue_to_qentry(handle);
-
- return queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
- (int)queue->s.param.sched.lock_count : -1;
-}
-
-odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
-{
- uint32_t i;
- queue_entry_t *queue;
- odp_queue_t handle = ODP_QUEUE_INVALID;
- odp_queue_type_t type = ODP_QUEUE_TYPE_PLAIN;
- odp_queue_param_t default_param;
-
- if (param == NULL) {
- odp_queue_param_init(&default_param);
- param = &default_param;
- }
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- queue = &queue_tbl->queue[i];
-
- if (queue->s.status != QUEUE_STATUS_FREE)
- continue;
-
- LOCK(&queue->s.lock);
- if (queue->s.status == QUEUE_STATUS_FREE) {
- if (queue_init(queue, name, param)) {
- UNLOCK(&queue->s.lock);
- return handle;
- }
-
- type = queue->s.type;
-
- if (type == ODP_QUEUE_TYPE_SCHED)
- queue->s.status = QUEUE_STATUS_NOTSCHED;
- else
- queue->s.status = QUEUE_STATUS_READY;
-
- handle = queue->s.handle;
- UNLOCK(&queue->s.lock);
- break;
- }
- UNLOCK(&queue->s.lock);
- }
-
- if (handle != ODP_QUEUE_INVALID && type == ODP_QUEUE_TYPE_SCHED) {
- if (sched_fn->init_queue(queue->s.index,
- &queue->s.param.sched)) {
- queue->s.status = QUEUE_STATUS_FREE;
- ODP_ERR("schedule queue init failed\n");
- return ODP_QUEUE_INVALID;
- }
- }
-
- return handle;
-}
-
-void sched_cb_queue_destroy_finalize(uint32_t queue_index)
-{
- queue_entry_t *queue = get_qentry(queue_index);
-
- LOCK(&queue->s.lock);
-
- if (queue->s.status == QUEUE_STATUS_DESTROYED) {
- queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue_index);
- }
- UNLOCK(&queue->s.lock);
-}
-
-int odp_queue_destroy(odp_queue_t handle)
-{
- queue_entry_t *queue;
-
- if (handle == ODP_QUEUE_INVALID)
- return -1;
-
- queue = queue_to_qentry(handle);
-
- if (handle == ODP_QUEUE_INVALID)
- return -1;
-
- LOCK(&queue->s.lock);
- if (queue->s.status == QUEUE_STATUS_FREE) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("queue \"%s\" already free\n", queue->s.name);
- return -1;
- }
- if (queue->s.status == QUEUE_STATUS_DESTROYED) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("queue \"%s\" already destroyed\n", queue->s.name);
- return -1;
- }
- if (queue->s.head != NULL) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("queue \"%s\" not empty\n", queue->s.name);
- return -1;
- }
- if (queue_is_ordered(queue) &&
- odp_atomic_load_u64(&queue->s.ordered.ctx) !=
- odp_atomic_load_u64(&queue->s.ordered.next_ctx)) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("queue \"%s\" reorder incomplete\n", queue->s.name);
- return -1;
- }
-
- switch (queue->s.status) {
- case QUEUE_STATUS_READY:
- queue->s.status = QUEUE_STATUS_FREE;
- break;
- case QUEUE_STATUS_NOTSCHED:
- queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue->s.index);
- break;
- case QUEUE_STATUS_SCHED:
- /* Queue is still in scheduling */
- queue->s.status = QUEUE_STATUS_DESTROYED;
- break;
- default:
- ODP_ABORT("Unexpected queue status\n");
- }
- UNLOCK(&queue->s.lock);
-
- return 0;
-}
-
-int odp_queue_context_set(odp_queue_t handle, void *context,
- uint32_t len ODP_UNUSED)
-{
- queue_entry_t *queue;
- queue = queue_to_qentry(handle);
- odp_mb_full();
- queue->s.param.context = context;
- odp_mb_full();
- return 0;
-}
-
-void *odp_queue_context(odp_queue_t handle)
-{
- queue_entry_t *queue;
- queue = queue_to_qentry(handle);
- return queue->s.param.context;
-}
-
-odp_queue_t odp_queue_lookup(const char *name)
-{
- uint32_t i;
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- queue_entry_t *queue = &queue_tbl->queue[i];
-
- if (queue->s.status == QUEUE_STATUS_FREE ||
- queue->s.status == QUEUE_STATUS_DESTROYED)
- continue;
-
- LOCK(&queue->s.lock);
- if (strcmp(name, queue->s.name) == 0) {
- /* found it */
- UNLOCK(&queue->s.lock);
- return queue->s.handle;
- }
- UNLOCK(&queue->s.lock);
- }
-
- return ODP_QUEUE_INVALID;
-}
-
-static inline int enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num)
-{
- int sched = 0;
- int i, ret;
- odp_buffer_hdr_t *hdr, *tail, *next_hdr;
-
- if (sched_fn->ord_enq_multi(queue->s.index, (void **)buf_hdr, num,
- &ret))
- return ret;
-
- /* Optimize the common case of single enqueue */
- if (num == 1) {
- tail = buf_hdr[0];
- hdr = tail;
- hdr->burst_num = 0;
- hdr->next = NULL;
- } else {
- int next;
-
- /* Start from the last buffer header */
- tail = buf_hdr[num - 1];
- hdr = tail;
- hdr->next = NULL;
- next = num - 2;
-
- while (1) {
- /* Build a burst. The buffer header carrying
- * a burst is the last buffer of the burst. */
- for (i = 0; next >= 0 && i < BUFFER_BURST_SIZE;
- i++, next--)
- hdr->burst[BUFFER_BURST_SIZE - 1 - i] =
- buf_hdr[next];
-
- hdr->burst_num = i;
- hdr->burst_first = BUFFER_BURST_SIZE - i;
-
- if (odp_likely(next < 0))
- break;
-
- /* Get another header and link it */
- next_hdr = hdr;
- hdr = buf_hdr[next];
- hdr->next = next_hdr;
- next--;
- }
- }
-
- LOCK(&queue->s.lock);
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("Bad queue status\n");
- return -1;
- }
-
- /* Empty queue */
- if (queue->s.head == NULL)
- queue->s.head = hdr;
- else
- queue->s.tail->next = hdr;
-
- queue->s.tail = tail;
-
- if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
- queue->s.status = QUEUE_STATUS_SCHED;
- sched = 1; /* retval: schedule queue */
- }
- UNLOCK(&queue->s.lock);
-
- /* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
- ODP_ABORT("schedule_queue failed\n");
-
- return num; /* All events enqueued */
-}
-
-int queue_enq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
-{
- return enq_multi(queue, buf_hdr, num);
-}
-
-int queue_enq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
-{
- int ret;
-
- ret = enq_multi(queue, &buf_hdr, 1);
-
- if (ret == 1)
- return 0;
- else
- return -1;
-}
-
-int odp_queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
-{
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- queue_entry_t *queue;
- int i;
-
- if (num > QUEUE_MULTI_MAX)
- num = QUEUE_MULTI_MAX;
-
- queue = queue_to_qentry(handle);
-
- for (i = 0; i < num; i++)
- buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
-
- return num == 0 ? 0 : queue->s.enqueue_multi(queue, buf_hdr,
- num);
-}
-
-int odp_queue_enq(odp_queue_t handle, odp_event_t ev)
-{
- odp_buffer_hdr_t *buf_hdr;
- queue_entry_t *queue;
-
- queue = queue_to_qentry(handle);
- buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
-
- return queue->s.enqueue(queue, buf_hdr);
-}
-
-static inline int deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[],
- int num)
-{
- odp_buffer_hdr_t *hdr, *next;
- int i, j;
- int updated = 0;
-
- LOCK(&queue->s.lock);
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- /* Bad queue, or queue has been destroyed.
- * Scheduler finalizes queue destroy after this. */
- UNLOCK(&queue->s.lock);
- return -1;
- }
-
- hdr = queue->s.head;
-
- if (hdr == NULL) {
- /* Already empty queue */
- if (queue->s.status == QUEUE_STATUS_SCHED) {
- queue->s.status = QUEUE_STATUS_NOTSCHED;
- sched_fn->unsched_queue(queue->s.index);
- }
-
- UNLOCK(&queue->s.lock);
- return 0;
- }
-
- for (i = 0; i < num && hdr; ) {
- int burst_num = hdr->burst_num;
- int first = hdr->burst_first;
-
- /* First, get bursted buffers */
- for (j = 0; j < burst_num && i < num; j++, i++) {
- buf_hdr[i] = hdr->burst[first + j];
- odp_prefetch(buf_hdr[i]);
- }
-
- if (burst_num) {
- hdr->burst_num = burst_num - j;
- hdr->burst_first = first + j;
- }
-
- if (i == num)
- break;
-
- /* When burst is empty, consume the current buffer header and
- * move to the next header */
- buf_hdr[i] = hdr;
- next = hdr->next;
- hdr->next = NULL;
- hdr = next;
- updated++;
- i++;
- }
-
- /* Write head only if updated */
- if (updated)
- queue->s.head = hdr;
-
- /* Queue is empty */
- if (hdr == NULL)
- queue->s.tail = NULL;
-
- if (queue->s.type == ODP_QUEUE_TYPE_SCHED)
- sched_fn->save_context(queue);
-
- UNLOCK(&queue->s.lock);
-
- return i;
-}
-
-int queue_deq_multi(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr[], int num)
-{
- return deq_multi(queue, buf_hdr, num);
-}
-
-odp_buffer_hdr_t *queue_deq(queue_entry_t *queue)
-{
- odp_buffer_hdr_t *buf_hdr = NULL;
- int ret;
-
- ret = deq_multi(queue, &buf_hdr, 1);
-
- if (ret == 1)
- return buf_hdr;
- else
- return NULL;
-}
-
-int odp_queue_deq_multi(odp_queue_t handle, odp_event_t events[], int num)
-{
- queue_entry_t *queue;
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- int i, ret;
-
- if (num > QUEUE_MULTI_MAX)
- num = QUEUE_MULTI_MAX;
-
- queue = queue_to_qentry(handle);
-
- ret = queue->s.dequeue_multi(queue, buf_hdr, num);
-
- for (i = 0; i < ret; i++)
- events[i] = odp_buffer_to_event(buf_hdr[i]->handle.handle);
-
- return ret;
-}
-
-
-odp_event_t odp_queue_deq(odp_queue_t handle)
-{
- queue_entry_t *queue;
- odp_buffer_hdr_t *buf_hdr;
-
- queue = queue_to_qentry(handle);
- buf_hdr = queue->s.dequeue(queue);
-
- if (buf_hdr)
- return odp_buffer_to_event(buf_hdr->handle.handle);
-
- return ODP_EVENT_INVALID;
-}
-
-void queue_lock(queue_entry_t *queue)
-{
- LOCK(&queue->s.lock);
-}
-
-void queue_unlock(queue_entry_t *queue)
-{
- UNLOCK(&queue->s.lock);
-}
-
-void odp_queue_param_init(odp_queue_param_t *params)
-{
- memset(params, 0, sizeof(odp_queue_param_t));
- params->type = ODP_QUEUE_TYPE_PLAIN;
- params->enq_mode = ODP_QUEUE_OP_MT;
- params->deq_mode = ODP_QUEUE_OP_MT;
- params->sched.prio = ODP_SCHED_PRIO_DEFAULT;
- params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
- params->sched.group = ODP_SCHED_GROUP_ALL;
-}
-
-int odp_queue_info(odp_queue_t handle, odp_queue_info_t *info)
-{
- uint32_t queue_id;
- queue_entry_t *queue;
- int status;
-
- if (odp_unlikely(info == NULL)) {
- ODP_ERR("Unable to store info, NULL ptr given\n");
- return -1;
- }
-
- queue_id = queue_to_id(handle);
-
- if (odp_unlikely(queue_id >= ODP_CONFIG_QUEUES)) {
- ODP_ERR("Invalid queue handle:%" PRIu64 "\n",
- odp_queue_to_u64(handle));
- return -1;
- }
-
- queue = get_qentry(queue_id);
-
- LOCK(&queue->s.lock);
- status = queue->s.status;
-
- if (odp_unlikely(status == QUEUE_STATUS_FREE ||
- status == QUEUE_STATUS_DESTROYED)) {
- UNLOCK(&queue->s.lock);
- ODP_ERR("Invalid queue status:%d\n", status);
- return -1;
- }
-
- info->name = queue->s.name;
- info->param = queue->s.param;
-
- UNLOCK(&queue->s.lock);
-
- return 0;
-}
-
-int sched_cb_num_queues(void)
-{
- return ODP_CONFIG_QUEUES;
-}
-
-int sched_cb_queue_prio(uint32_t queue_index)
-{
- queue_entry_t *qe = get_qentry(queue_index);
-
- return qe->s.param.sched.prio;
-}
-
-int sched_cb_queue_grp(uint32_t queue_index)
-{
- queue_entry_t *qe = get_qentry(queue_index);
-
- return qe->s.param.sched.group;
-}
-
-int sched_cb_queue_is_ordered(uint32_t queue_index)
-{
- return queue_is_ordered(get_qentry(queue_index));
-}
-
-int sched_cb_queue_is_atomic(uint32_t queue_index)
-{
- return queue_is_atomic(get_qentry(queue_index));
-}
-
-odp_queue_t sched_cb_queue_handle(uint32_t queue_index)
-{
- return queue_from_id(queue_index);
-}
-
-int sched_cb_queue_deq_multi(uint32_t queue_index, odp_event_t ev[], int num)
-{
- int i, ret;
- queue_entry_t *qe = get_qentry(queue_index);
- odp_buffer_hdr_t *buf_hdr[num];
-
- ret = deq_multi(qe, buf_hdr, num);
-
- if (ret > 0)
- for (i = 0; i < ret; i++)
- ev[i] = odp_buffer_to_event(buf_hdr[i]->handle.handle);
-
- return ret;
-}
-
-int sched_cb_queue_empty(uint32_t queue_index)
-{
- queue_entry_t *queue = get_qentry(queue_index);
- int ret = 0;
-
- LOCK(&queue->s.lock);
-
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- /* Bad queue, or queue has been destroyed. */
- UNLOCK(&queue->s.lock);
- return -1;
- }
-
- if (queue->s.head == NULL) {
- /* Already empty queue. Update status. */
- if (queue->s.status == QUEUE_STATUS_SCHED)
- queue->s.status = QUEUE_STATUS_NOTSCHED;
-
- ret = 1;
- }
-
- UNLOCK(&queue->s.lock);
-
- return ret;
-}
-
-uint64_t odp_queue_to_u64(odp_queue_t hdl)
-{
- return _odp_pri(hdl);
-}
diff --git a/platform/linux-generic/odp_queue_api.c b/platform/linux-generic/odp_queue_api.c
new file mode 100644
index 000000000..495cf8746
--- /dev/null
+++ b/platform/linux-generic/odp_queue_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/queue.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/queue_inlines.h>
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
new file mode 100644
index 000000000..f9700742c
--- /dev/null
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -0,0 +1,1301 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/std_types.h>
+#include <odp/api/sync.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/traffic_mngr.h>
+
+#include <odp/api/plat/queue_inline_types.h>
+#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_queue_if.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+
+#include <inttypes.h>
+#include <string.h>
+
+#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->lock))
+#define UNLOCK(queue_ptr) odp_ticketlock_unlock(&((queue_ptr)->lock))
+#define LOCK_INIT(queue_ptr) odp_ticketlock_init(&((queue_ptr)->lock))
+
+#define MIN_QUEUE_SIZE 32
+#define MAX_QUEUE_SIZE (1 * 1024 * 1024)
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param);
+
+queue_global_t *_odp_queue_glb;
+extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+
+static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
+{
+ memset(capa, 0, sizeof(odp_queue_capability_t));
+
+ /* Reserve some queues for internal use */
+ capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
+ capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
+ capa->plain.max_size = _odp_queue_glb->config.max_queue_size;
+ capa->plain.lockfree.max_num = _odp_queue_glb->queue_lf_num;
+ capa->plain.lockfree.max_size = _odp_queue_glb->queue_lf_size;
+
+ return 0;
+}
+
+static int read_config_file(queue_global_t *_odp_queue_glb)
+{
+ const char *str;
+ uint32_t val_u32;
+ int val = 0;
+
+ _ODP_PRINT("Queue config:\n");
+
+ str = "queue_basic.max_queue_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ val_u32 = val;
+
+ if (val_u32 > MAX_QUEUE_SIZE || val_u32 < MIN_QUEUE_SIZE ||
+ !_ODP_CHECK_IS_POWER2(val_u32)) {
+ _ODP_ERR("Bad value %s = %u\n", str, val_u32);
+ return -1;
+ }
+
+ _odp_queue_glb->config.max_queue_size = val_u32;
+ _ODP_PRINT(" %s: %u\n", str, val_u32);
+
+ str = "queue_basic.default_queue_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ val_u32 = val;
+
+ if (val_u32 > _odp_queue_glb->config.max_queue_size ||
+ val_u32 < MIN_QUEUE_SIZE ||
+ !_ODP_CHECK_IS_POWER2(val_u32)) {
+ _ODP_ERR("Bad value %s = %u\n", str, val_u32);
+ return -1;
+ }
+
+ _odp_queue_glb->config.default_queue_size = val_u32;
+ _ODP_PRINT(" %s: %u\n\n", str, val_u32);
+
+ return 0;
+}
+
+static int queue_init_global(void)
+{
+ uint32_t i;
+ odp_shm_t shm;
+ uint32_t lf_size = 0;
+ queue_lf_func_t *lf_func;
+ odp_queue_capability_t capa;
+ uint64_t mem_size;
+
+ _ODP_DBG("Starts...\n");
+
+ /* Fill in queue entry field offsets for inline functions */
+ memset(&_odp_queue_inline_offset, 0,
+ sizeof(_odp_queue_inline_offset_t));
+ _odp_queue_inline_offset.context = offsetof(queue_entry_t,
+ param.context);
+
+ shm = odp_shm_reserve("_odp_queue_basic_global",
+ sizeof(queue_global_t),
+ sizeof(queue_entry_t),
+ 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ _odp_queue_glb = odp_shm_addr(shm);
+
+ memset(_odp_queue_glb, 0, sizeof(queue_global_t));
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ /* init locks */
+ queue_entry_t *queue = qentry_from_index(i);
+
+ LOCK_INIT(queue);
+ queue->index = i;
+ queue->handle = (odp_queue_t)queue;
+ }
+
+ if (read_config_file(_odp_queue_glb)) {
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ _odp_queue_glb->queue_gbl_shm = shm;
+ mem_size = sizeof(uint32_t) * CONFIG_MAX_QUEUES *
+ (uint64_t)_odp_queue_glb->config.max_queue_size;
+
+ shm = odp_shm_reserve("_odp_queue_basic_rings", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
+
+ if (shm == ODP_SHM_INVALID) {
+ odp_shm_free(_odp_queue_glb->queue_gbl_shm);
+ return -1;
+ }
+
+ _odp_queue_glb->queue_ring_shm = shm;
+ _odp_queue_glb->ring_data = odp_shm_addr(shm);
+
+ lf_func = &_odp_queue_glb->queue_lf_func;
+ _odp_queue_glb->queue_lf_num = _odp_queue_lf_init_global(&lf_size, lf_func);
+ _odp_queue_glb->queue_lf_size = lf_size;
+
+ queue_capa(&capa, 0);
+
+ _ODP_DBG("... done.\n");
+ _ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t));
+ _ODP_DBG(" max num queues %u\n", capa.max_queues);
+ _ODP_DBG(" max queue size %u\n", capa.plain.max_size);
+ _ODP_DBG(" max num lockfree %u\n", capa.plain.lockfree.max_num);
+ _ODP_DBG(" max lockfree size %u\n\n", capa.plain.lockfree.max_size);
+
+ return 0;
+}
+
+static int queue_init_local(void)
+{
+ return 0;
+}
+
+static int queue_term_local(void)
+{
+ return 0;
+}
+
+static int queue_term_global(void)
+{
+ int ret = 0;
+ queue_entry_t *queue;
+ int i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue = qentry_from_index(i);
+ LOCK(queue);
+ if (queue->status != QUEUE_STATUS_FREE) {
+ _ODP_ERR("Not destroyed queue: %s\n", queue->name);
+ ret = -1;
+ }
+ UNLOCK(queue);
+ }
+
+ _odp_queue_lf_term_global();
+
+ if (odp_shm_free(_odp_queue_glb->queue_ring_shm)) {
+ _ODP_ERR("shm free failed");
+ ret = -1;
+ }
+
+ if (odp_shm_free(_odp_queue_glb->queue_gbl_shm)) {
+ _ODP_ERR("shm free failed");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int queue_capability(odp_queue_capability_t *capa)
+{
+ return queue_capa(capa, 1);
+}
+
+static odp_queue_type_t queue_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->type;
+}
+
+static odp_schedule_sync_t queue_sched_type(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.sync;
+}
+
+static odp_schedule_prio_t queue_sched_prio(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.prio;
+}
+
+static odp_schedule_group_t queue_sched_group(odp_queue_t handle)
+{
+ return qentry_from_handle(handle)->param.sched.group;
+}
+
+static uint32_t queue_lock_count(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ queue->param.sched.lock_count : 0;
+}
+
+static odp_queue_t queue_create(const char *name,
+ const odp_queue_param_t *param)
+{
+ uint32_t i;
+ uint32_t max_idx;
+ queue_entry_t *queue;
+ void *queue_lf;
+ odp_queue_type_t type;
+ odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
+
+ if (param == NULL) {
+ odp_queue_param_init(&default_param);
+ param = &default_param;
+ }
+
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ _ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ if (param->nonblocking == ODP_BLOCKING) {
+ if (param->size > _odp_queue_glb->config.max_queue_size)
+ return ODP_QUEUE_INVALID;
+ } else if (param->nonblocking == ODP_NONBLOCKING_LF) {
+ /* Only plain type lock-free queues supported */
+ if (type != ODP_QUEUE_TYPE_PLAIN)
+ return ODP_QUEUE_INVALID;
+ if (param->size > _odp_queue_glb->queue_lf_size)
+ return ODP_QUEUE_INVALID;
+ } else {
+ /* Wait-free queues not supported */
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ /* Start scheduled queue indices from zero to enable direct
+ * mapping to scheduler implementation indices. */
+ i = 0;
+ max_idx = CONFIG_MAX_SCHED_QUEUES;
+ } else {
+ i = CONFIG_MAX_SCHED_QUEUES;
+ /* All internal queues are of type plain */
+ max_idx = CONFIG_MAX_QUEUES;
+ }
+
+ for (; i < max_idx; i++) {
+ queue = qentry_from_index(i);
+
+ if (queue->status != QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ if (queue_init(queue, name, param)) {
+ UNLOCK(queue);
+ return ODP_QUEUE_INVALID;
+ }
+
+ if (!queue->spsc &&
+ param->nonblocking == ODP_NONBLOCKING_LF) {
+ queue_lf_func_t *lf_fn;
+
+ lf_fn = &_odp_queue_glb->queue_lf_func;
+
+ queue_lf = _odp_queue_lf_create(queue);
+
+ if (queue_lf == NULL) {
+ UNLOCK(queue);
+ return ODP_QUEUE_INVALID;
+ }
+ queue->queue_lf = queue_lf;
+
+ queue->enqueue = lf_fn->enq;
+ queue->enqueue_multi = lf_fn->enq_multi;
+ queue->dequeue = lf_fn->deq;
+ queue->dequeue_multi = lf_fn->deq_multi;
+ queue->orig_dequeue_multi = lf_fn->deq_multi;
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+ else
+ queue->status = QUEUE_STATUS_READY;
+
+ handle = queue->handle;
+ UNLOCK(queue);
+ break;
+ }
+ UNLOCK(queue);
+ }
+
+ if (handle == ODP_QUEUE_INVALID)
+ return ODP_QUEUE_INVALID;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (_odp_sched_fn->create_queue(queue->index,
+ &queue->param.sched)) {
+ queue->status = QUEUE_STATUS_FREE;
+ _ODP_ERR("schedule queue init failed\n");
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ return handle;
+}
+
+static int queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(param != NULL);
+ _ODP_ASSERT(queue != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ odp_queue_t cur_queue;
+ const char *cur_name = name != NULL ? name[i] : NULL;
+ const odp_queue_param_t *cur_param = share_param ? &param[0] : &param[i];
+
+ cur_queue = queue_create(cur_name, cur_param);
+ if (cur_queue == ODP_QUEUE_INVALID)
+ return (i == 0) ? -1 : i;
+
+ queue[i] = cur_queue;
+ }
+ return i;
+}
+
+void _odp_sched_queue_set_status(uint32_t queue_index, int status)
+{
+ queue_entry_t *queue = qentry_from_index(queue_index);
+
+ LOCK(queue);
+
+ queue->status = status;
+
+ UNLOCK(queue);
+}
+
+static int queue_destroy(odp_queue_t handle)
+{
+ int empty;
+ queue_entry_t *queue;
+
+ queue = qentry_from_handle(handle);
+
+ if (handle == ODP_QUEUE_INVALID)
+ return -1;
+
+ LOCK(queue);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" already free\n", queue->name);
+ return -1;
+ }
+ if (queue->status == QUEUE_STATUS_DESTROYED) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" already destroyed\n", queue->name);
+ return -1;
+ }
+
+ if (queue->spsc)
+ empty = ring_spsc_is_empty(&queue->ring_spsc);
+ else if (queue->type == ODP_QUEUE_TYPE_SCHED)
+ empty = ring_st_is_empty(&queue->ring_st);
+ else
+ empty = ring_mpmc_u32_is_empty(&queue->ring_mpmc);
+
+ if (!empty) {
+ UNLOCK(queue);
+ _ODP_ERR("queue \"%s\" not empty\n", queue->name);
+ return -1;
+ }
+
+ switch (queue->status) {
+ case QUEUE_STATUS_READY:
+ queue->status = QUEUE_STATUS_FREE;
+ break;
+ case QUEUE_STATUS_NOTSCHED:
+ queue->status = QUEUE_STATUS_FREE;
+ _odp_sched_fn->destroy_queue(queue->index);
+ break;
+ case QUEUE_STATUS_SCHED:
+ /* Queue is still in scheduling */
+ queue->status = QUEUE_STATUS_DESTROYED;
+ break;
+ default:
+ _ODP_ABORT("Unexpected queue status\n");
+ }
+
+ if (queue->queue_lf)
+ _odp_queue_lf_destroy(queue->queue_lf);
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static int queue_destroy_multi(odp_queue_t handle[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(handle != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ int ret = queue_destroy(handle[i]);
+
+ if (ret)
+ return (i == 0) ? ret : i;
+ }
+
+ return i;
+}
+
+static int queue_context_set(odp_queue_t handle, void *context,
+ uint32_t len ODP_UNUSED)
+{
+ odp_mb_full();
+ qentry_from_handle(handle)->param.context = context;
+ odp_mb_full();
+ return 0;
+}
+
+static odp_queue_t queue_lookup(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = qentry_from_index(i);
+
+ if (queue->status == QUEUE_STATUS_FREE ||
+ queue->status == QUEUE_STATUS_DESTROYED)
+ continue;
+
+ LOCK(queue);
+ if (strcmp(name, queue->name) == 0) {
+ /* found it */
+ UNLOCK(queue);
+ return queue->handle;
+ }
+ UNLOCK(queue);
+ }
+
+ return ODP_QUEUE_INVALID;
+}
+
+static inline void event_index_from_hdr(uint32_t event_index[],
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ event_index[i] = event_hdr[i]->index.u32;
+}
+
+static inline void event_index_to_hdr(_odp_event_hdr_t *event_hdr[],
+ uint32_t event_index[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ event_hdr[i] = _odp_event_hdr_from_index_u32(event_index[i]);
+ odp_prefetch(event_hdr[i]);
+ }
+}
+
+static inline int _plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ int ret, num_enq;
+ ring_mpmc_u32_t *ring_mpmc;
+ uint32_t event_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = &queue->ring_mpmc;
+
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
+ return ret;
+
+ event_index_from_hdr(event_idx, event_hdr, num);
+
+ num_enq = ring_mpmc_u32_enq_multi(ring_mpmc, queue->ring_data,
+ queue->ring_mask, event_idx, num);
+
+ return num_enq;
+}
+
+static inline int _plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int num_deq;
+ queue_entry_t *queue;
+ ring_mpmc_u32_t *ring_mpmc;
+ uint32_t event_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_mpmc = &queue->ring_mpmc;
+
+ num_deq = ring_mpmc_u32_deq_multi(ring_mpmc, queue->ring_data,
+ queue->ring_mask, event_idx, num);
+
+ if (num_deq == 0)
+ return 0;
+
+ event_index_to_hdr(event_hdr, event_idx, num_deq);
+
+ return num_deq;
+}
+
+static int plain_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int plain_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _plain_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int plain_queue_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _plain_queue_deq_multi(handle, event_hdr, num);
+}
+
+static _odp_event_hdr_t *plain_queue_deq(odp_queue_t handle)
+{
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ ret = _plain_queue_deq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+static int error_enqueue(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ (void)event_hdr;
+
+ _ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static int error_enqueue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static _odp_event_hdr_t *error_dequeue(odp_queue_t handle)
+{
+ _ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return NULL;
+}
+
+static int error_dequeue_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ (void)event_hdr;
+ (void)num;
+
+ _ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n", odp_queue_to_u64(handle));
+
+ return -1;
+}
+
+static void queue_param_init(odp_queue_param_t *params)
+{
+ memset(params, 0, sizeof(odp_queue_param_t));
+ params->type = ODP_QUEUE_TYPE_PLAIN;
+ params->enq_mode = ODP_QUEUE_OP_MT;
+ params->deq_mode = ODP_QUEUE_OP_MT;
+ params->nonblocking = ODP_BLOCKING;
+ params->order = ODP_QUEUE_ORDER_KEEP;
+ params->sched.prio = odp_schedule_default_prio();
+ params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ params->sched.group = ODP_SCHED_GROUP_ALL;
+}
+
+static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
+{
+ uint32_t queue_id;
+ queue_entry_t *queue;
+ int status;
+
+ if (odp_unlikely(info == NULL)) {
+ _ODP_ERR("Unable to store info, NULL ptr given\n");
+ return -1;
+ }
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return -1;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return -1;
+ }
+
+ info->name = queue->name;
+ info->param = queue->param;
+
+ UNLOCK(queue);
+
+ return 0;
+}
+
+static void queue_print(odp_queue_t handle)
+{
+ odp_pktio_info_t pktio_info;
+ queue_entry_t *queue;
+ uint32_t queue_id;
+ int status, prio;
+ int max_prio = odp_schedule_max_prio();
+
+ queue_id = queue_to_index(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return;
+ }
+
+ queue = qentry_from_index(queue_id);
+
+ LOCK(queue);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(queue);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return;
+ }
+ _ODP_PRINT("\nQueue info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" handle %p\n", (void *)queue->handle);
+ _ODP_PRINT(" index %" PRIu32 "\n", queue_id);
+ _ODP_PRINT(" name %s\n", queue->name);
+ _ODP_PRINT(" enq mode %s\n",
+ queue->param.enq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" deq mode %s\n",
+ queue->param.deq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" non-blocking %s\n",
+ queue->param.nonblocking == ODP_BLOCKING ? "ODP_BLOCKING" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_LF ? "ODP_NONBLOCKING_LF" :
+ (queue->param.nonblocking == ODP_NONBLOCKING_WF ? "ODP_NONBLOCKING_WF" :
+ "unknown")));
+ _ODP_PRINT(" type %s\n",
+ queue->type == ODP_QUEUE_TYPE_PLAIN ? "ODP_QUEUE_TYPE_PLAIN" :
+ (queue->type == ODP_QUEUE_TYPE_SCHED ? "ODP_QUEUE_TYPE_SCHED" : "unknown"));
+ if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" sync %s\n",
+ queue->param.sched.sync == ODP_SCHED_SYNC_PARALLEL ?
+ "ODP_SCHED_SYNC_PARALLEL" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ATOMIC ?
+ "ODP_SCHED_SYNC_ATOMIC" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ "ODP_SCHED_SYNC_ORDERED" : "unknown")));
+ prio = queue->param.sched.prio;
+ _ODP_PRINT(" priority %i (%i in API)\n", max_prio - prio, prio);
+ _ODP_PRINT(" group %i\n", queue->param.sched.group);
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" spread %i\n", _odp_sched_basic_get_spread(queue_id));
+ }
+ if (queue->pktin.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktin.pktio, &pktio_info))
+ _ODP_PRINT(" pktin %s\n", pktio_info.name);
+ }
+ if (queue->pktout.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktout.pktio, &pktio_info))
+ _ODP_PRINT(" pktout %s\n", pktio_info.name);
+ }
+ _ODP_PRINT(" timers %" PRIu64 "\n", odp_atomic_load_u64(&queue->num_timers));
+ _ODP_PRINT(" status %s\n",
+ queue->status == QUEUE_STATUS_READY ? "ready" :
+ (queue->status == QUEUE_STATUS_NOTSCHED ? "not scheduled" :
+ (queue->status == QUEUE_STATUS_SCHED ? "scheduled" : "unknown")));
+ _ODP_PRINT(" param.size %" PRIu32 "\n", queue->param.size);
+ if (queue->queue_lf) {
+ _ODP_PRINT(" implementation queue_lf\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ _odp_queue_lf_length(queue->queue_lf), _odp_queue_lf_max_length());
+ } else if (queue->spsc) {
+ _ODP_PRINT(" implementation ring_spsc\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_spsc_length(&queue->ring_spsc), queue->ring_mask + 1);
+ } else if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" implementation ring_st\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_st_length(&queue->ring_st), queue->ring_mask + 1);
+ } else {
+ _ODP_PRINT(" implementation ring_mpmc\n");
+ _ODP_PRINT(" length %" PRIu32 "/%" PRIu32 "\n",
+ ring_mpmc_u32_len(&queue->ring_mpmc), queue->ring_mask + 1);
+ }
+ _ODP_PRINT("\n");
+
+ UNLOCK(queue);
+}
+
+static void queue_print_all(void)
+{
+ uint32_t i, index, len, max_len;
+ const char *name;
+ int status;
+ odp_queue_type_t type;
+ odp_nonblocking_t blocking;
+ odp_queue_op_mode_t enq_mode;
+ odp_queue_op_mode_t deq_mode;
+ odp_queue_order_t order;
+ const char *status_str;
+ const char *bl_str;
+ char type_c, enq_c, deq_c, order_c, sync_c;
+ const int col_width = 24;
+ int prio = 0;
+ int spr = 0;
+ odp_schedule_sync_t sync = ODP_SCHED_SYNC_PARALLEL;
+ odp_schedule_group_t grp = ODP_SCHED_GROUP_INVALID;
+
+ _ODP_PRINT("\nList of all queues\n");
+ _ODP_PRINT("------------------\n");
+ _ODP_PRINT(" idx %-*s type stat blk enq deq ord len max_len sync prio grp", col_width, "name");
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" spr\n");
+ else
+ _ODP_PRINT("\n");
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = qentry_from_index(i);
+
+ if (queue->status < QUEUE_STATUS_READY)
+ continue;
+
+ LOCK(queue);
+
+ status = queue->status;
+ index = queue->index;
+ name = queue->name;
+ type = queue->type;
+ blocking = queue->param.nonblocking;
+ enq_mode = queue->param.enq_mode;
+ deq_mode = queue->param.deq_mode;
+ order = queue->param.order;
+
+ if (queue->queue_lf) {
+ len = _odp_queue_lf_length(queue->queue_lf);
+ max_len = _odp_queue_lf_max_length();
+ } else if (queue->spsc) {
+ len = ring_spsc_length(&queue->ring_spsc);
+ max_len = queue->ring_mask + 1;
+ } else if (type == ODP_QUEUE_TYPE_SCHED) {
+ len = ring_st_length(&queue->ring_st);
+ max_len = queue->ring_mask + 1;
+ prio = queue->param.sched.prio;
+ grp = queue->param.sched.group;
+ sync = queue->param.sched.sync;
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ spr = _odp_sched_basic_get_spread(index);
+ } else {
+ len = ring_mpmc_u32_len(&queue->ring_mpmc);
+ max_len = queue->ring_mask + 1;
+ }
+
+ UNLOCK(queue);
+
+ if (status < QUEUE_STATUS_READY)
+ continue;
+
+ status_str = (status == QUEUE_STATUS_READY) ? "R" :
+ ((status == QUEUE_STATUS_SCHED) ? "S" : "NS");
+
+ type_c = (type == ODP_QUEUE_TYPE_PLAIN) ? 'P' : 'S';
+
+ bl_str = (blocking == ODP_BLOCKING) ? "B" :
+ ((blocking == ODP_NONBLOCKING_LF) ? "LF" : "WF");
+
+ enq_c = (enq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((enq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ deq_c = (deq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((deq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ order_c = (order == ODP_QUEUE_ORDER_KEEP) ? 'K' : 'I';
+
+ _ODP_PRINT("%4u %-*s %c %2s %2s", index, col_width, name, type_c,
+ status_str, bl_str);
+ _ODP_PRINT(" %c %c %c %6u %6u", enq_c, deq_c, order_c, len, max_len);
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ sync_c = (sync == ODP_SCHED_SYNC_PARALLEL) ? 'P' :
+ ((sync == ODP_SCHED_SYNC_ATOMIC) ? 'A' : 'O');
+ /* Print prio level matching odp_schedule_print() output */
+ prio = odp_schedule_max_prio() - prio;
+
+ _ODP_PRINT(" %c %4i %3i", sync_c, prio, grp);
+
+ if (_odp_sched_id == _ODP_SCHED_ID_BASIC)
+ _ODP_PRINT(" %3i", spr);
+ }
+
+ _ODP_PRINT("\n");
+ }
+
+ _ODP_PRINT("\n");
+}
+
+static inline int _sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int sched = 0;
+ int ret;
+ queue_entry_t *queue;
+ int num_enq;
+ ring_st_t *ring_st;
+ uint32_t event_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_st = &queue->ring_st;
+
+ if (_odp_sched_fn->ord_enq_multi(handle, (void **)event_hdr, num, &ret))
+ return ret;
+
+ event_index_from_hdr(event_idx, event_hdr, num);
+
+ LOCK(queue);
+
+ num_enq = ring_st_enq_multi(ring_st, queue->ring_data,
+ queue->ring_mask, event_idx, num);
+
+ if (odp_unlikely(num_enq == 0)) {
+ UNLOCK(queue);
+ return 0;
+ }
+
+ if (queue->status == QUEUE_STATUS_NOTSCHED) {
+ queue->status = QUEUE_STATUS_SCHED;
+ sched = 1;
+ }
+
+ UNLOCK(queue);
+
+ /* Add queue to scheduling */
+ if (sched && _odp_sched_fn->sched_queue(queue->index))
+ _ODP_ABORT("schedule_queue failed\n");
+
+ return num_enq;
+}
+
+int _odp_sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
+ int update_status)
+{
+ int num_deq, status;
+ ring_st_t *ring_st;
+ queue_entry_t *queue = qentry_from_index(queue_index);
+ uint32_t event_idx[max_num];
+
+ ring_st = &queue->ring_st;
+
+ LOCK(queue);
+
+ status = queue->status;
+
+ if (odp_unlikely(status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed.
+ * Inform scheduler about a destroyed queue. */
+ if (queue->status == QUEUE_STATUS_DESTROYED) {
+ queue->status = QUEUE_STATUS_FREE;
+ _odp_sched_fn->destroy_queue(queue_index);
+ }
+
+ UNLOCK(queue);
+ return -1;
+ }
+
+ num_deq = ring_st_deq_multi(ring_st, queue->ring_data,
+ queue->ring_mask, event_idx, max_num);
+
+ if (num_deq == 0) {
+ /* Already empty queue */
+ if (update_status && status == QUEUE_STATUS_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+
+ UNLOCK(queue);
+
+ return 0;
+ }
+
+ UNLOCK(queue);
+
+ event_index_to_hdr((_odp_event_hdr_t **)ev, event_idx, num_deq);
+
+ return num_deq;
+}
+
+static int sched_queue_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ return _sched_queue_enq_multi(handle, event_hdr, num);
+}
+
+static int sched_queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = _sched_queue_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+int _odp_sched_queue_empty(uint32_t queue_index)
+{
+ queue_entry_t *queue = qentry_from_index(queue_index);
+ int ret = 0;
+
+ LOCK(queue);
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed. */
+ UNLOCK(queue);
+ return -1;
+ }
+
+ if (ring_st_is_empty(&queue->ring_st)) {
+ /* Already empty queue. Update status. */
+ if (queue->status == QUEUE_STATUS_SCHED)
+ queue->status = QUEUE_STATUS_NOTSCHED;
+
+ ret = 1;
+ }
+
+ UNLOCK(queue);
+
+ return ret;
+}
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param)
+{
+ uint64_t offset;
+ uint32_t queue_size;
+ odp_queue_type_t queue_type;
+ int spsc;
+
+ queue_type = param->type;
+
+ if (name == NULL) {
+ queue->name[0] = 0;
+ } else {
+ strncpy(queue->name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ }
+ memcpy(&queue->param, param, sizeof(odp_queue_param_t));
+ if (queue->param.sched.lock_count > _odp_sched_fn->max_ordered_locks())
+ return -1;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ queue->param.deq_mode = ODP_QUEUE_OP_DISABLED;
+
+ queue->type = queue_type;
+ odp_atomic_init_u64(&queue->num_timers, 0);
+
+ queue->pktin = PKTIN_INVALID;
+ queue->pktout = PKTOUT_INVALID;
+
+ queue_size = param->size;
+ if (queue_size == 0)
+ queue_size = _odp_queue_glb->config.default_queue_size;
+
+ if (queue_size < MIN_QUEUE_SIZE)
+ queue_size = MIN_QUEUE_SIZE;
+
+ /* Round up if not already a power of two */
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
+
+ if (queue_size > _odp_queue_glb->config.max_queue_size) {
+ _ODP_ERR("Too large queue size %u\n", queue_size);
+ return -1;
+ }
+
+ offset = queue->index * (uint64_t)_odp_queue_glb->config.max_queue_size;
+
+ /* Single-producer / single-consumer plain queue has simple and
+ * lock-free implementation */
+ spsc = (queue_type == ODP_QUEUE_TYPE_PLAIN) &&
+ (param->enq_mode == ODP_QUEUE_OP_MT_UNSAFE) &&
+ (param->deq_mode == ODP_QUEUE_OP_MT_UNSAFE);
+
+ queue->spsc = spsc;
+ queue->queue_lf = NULL;
+
+ /* Default to error functions */
+ queue->enqueue = error_enqueue;
+ queue->enqueue_multi = error_enqueue_multi;
+ queue->dequeue = error_dequeue;
+ queue->dequeue_multi = error_dequeue_multi;
+ queue->orig_dequeue_multi = error_dequeue_multi;
+
+ if (spsc) {
+ _odp_queue_spsc_init(queue, queue_size);
+ } else {
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
+ queue->enqueue = plain_queue_enq;
+ queue->enqueue_multi = plain_queue_enq_multi;
+ queue->dequeue = plain_queue_deq;
+ queue->dequeue_multi = plain_queue_deq_multi;
+ queue->orig_dequeue_multi = plain_queue_deq_multi;
+
+ queue->ring_data = &_odp_queue_glb->ring_data[offset];
+ queue->ring_mask = queue_size - 1;
+ ring_mpmc_u32_init(&queue->ring_mpmc);
+
+ } else {
+ queue->enqueue = sched_queue_enq;
+ queue->enqueue_multi = sched_queue_enq_multi;
+
+ queue->ring_data = &_odp_queue_glb->ring_data[offset];
+ queue->ring_mask = queue_size - 1;
+ ring_st_init(&queue->ring_st);
+ }
+ }
+
+ return 0;
+}
+
+static uint64_t queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktout;
+}
+
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktout.pktio = pktio;
+ qentry->pktout.index = index;
+}
+
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ return qentry->pktin;
+}
+
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ qentry->pktin.pktio = pktio;
+ qentry->pktin.index = index;
+}
+
+static void queue_set_enq_deq_func(odp_queue_t handle,
+ queue_enq_fn_t enq,
+ queue_enq_multi_fn_t enq_multi,
+ queue_deq_fn_t deq,
+ queue_deq_multi_fn_t deq_multi)
+{
+ queue_entry_t *qentry = qentry_from_handle(handle);
+
+ if (enq)
+ qentry->enqueue = enq;
+
+ if (enq_multi)
+ qentry->enqueue_multi = enq_multi;
+
+ if (deq)
+ qentry->dequeue = deq;
+
+ if (deq_multi)
+ qentry->dequeue_multi = deq_multi;
+}
+
+static int queue_orig_multi(odp_queue_t handle,
+ _odp_event_hdr_t **event_hdr, int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->orig_dequeue_multi(handle, event_hdr, num);
+}
+
+static int queue_api_enq_multi(odp_queue_t handle,
+ const odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ return queue->enqueue_multi(handle,
+ (_odp_event_hdr_t **)(uintptr_t)ev, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_inc_u64(&queue->num_timers);
+}
+
+static void queue_timer_rem(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_dec_u64(&queue->num_timers);
+}
+
+static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->enqueue(handle,
+ (_odp_event_hdr_t *)(uintptr_t)ev);
+}
+
+static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ int ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ ret = queue->dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
+}
+
+static odp_event_t queue_api_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+ odp_event_t ev = (odp_event_t)queue->dequeue(handle);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
+
+ return ev;
+}
+
+/* API functions */
+_odp_queue_api_fn_t _odp_queue_basic_api = {
+ .queue_create = queue_create,
+ .queue_create_multi = queue_create_multi,
+ .queue_destroy = queue_destroy,
+ .queue_destroy_multi = queue_destroy_multi,
+ .queue_lookup = queue_lookup,
+ .queue_capability = queue_capability,
+ .queue_context_set = queue_context_set,
+ .queue_enq = queue_api_enq,
+ .queue_enq_multi = queue_api_enq_multi,
+ .queue_deq = queue_api_deq,
+ .queue_deq_multi = queue_api_deq_multi,
+ .queue_type = queue_type,
+ .queue_sched_type = queue_sched_type,
+ .queue_sched_prio = queue_sched_prio,
+ .queue_sched_group = queue_sched_group,
+ .queue_lock_count = queue_lock_count,
+ .queue_to_u64 = queue_to_u64,
+ .queue_param_init = queue_param_init,
+ .queue_info = queue_info,
+ .queue_print = queue_print,
+ .queue_print_all = queue_print_all
+
+};
+
+/* Functions towards internal components */
+queue_fn_t _odp_queue_basic_fn = {
+ .init_global = queue_init_global,
+ .term_global = queue_term_global,
+ .init_local = queue_init_local,
+ .term_local = queue_term_local,
+ .get_pktout = queue_get_pktout,
+ .set_pktout = queue_set_pktout,
+ .get_pktin = queue_get_pktin,
+ .set_pktin = queue_set_pktin,
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
+};
diff --git a/platform/linux-generic/odp_queue_if.c b/platform/linux-generic/odp_queue_if.c
new file mode 100644
index 000000000..cc6594afd
--- /dev/null
+++ b/platform/linux-generic/odp_queue_if.c
@@ -0,0 +1,146 @@
+/* Copyright (c) 2017, ARM Limited
+ * Copyright (c) 2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+
+#include <odp_queue_if.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <odp/api/align.h>
+#include <odp/api/plat/queue_inline_types.h>
+
+#include <odp/visibility_begin.h>
+
+_odp_queue_inline_offset_t _odp_queue_inline_offset ODP_ALIGNED_CACHE;
+const _odp_queue_api_fn_t *_odp_queue_api;
+
+#include <odp/visibility_end.h>
+
+extern const _odp_queue_api_fn_t _odp_queue_scalable_api;
+extern const queue_fn_t _odp_queue_scalable_fn;
+
+extern const _odp_queue_api_fn_t _odp_queue_basic_api;
+extern const queue_fn_t _odp_queue_basic_fn;
+
+const queue_fn_t *_odp_queue_fn;
+
+odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
+{
+ return _odp_queue_api->queue_create(name, param);
+}
+
+int odp_queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], int num)
+{
+ return _odp_queue_api->queue_create_multi(name, param, share_param,
+ queue, num);
+}
+
+int odp_queue_destroy(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_destroy(queue);
+}
+
+int odp_queue_destroy_multi(odp_queue_t queue[], int num)
+{
+ return _odp_queue_api->queue_destroy_multi(queue, num);
+}
+
+odp_queue_t odp_queue_lookup(const char *name)
+{
+ return _odp_queue_api->queue_lookup(name);
+}
+
+int odp_queue_capability(odp_queue_capability_t *capa)
+{
+ return _odp_queue_api->queue_capability(capa);
+}
+
+int odp_queue_context_set(odp_queue_t queue, void *context, uint32_t len)
+{
+ return _odp_queue_api->queue_context_set(queue, context, len);
+}
+
+odp_queue_type_t odp_queue_type(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_type(queue);
+}
+
+odp_schedule_sync_t odp_queue_sched_type(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_type(queue);
+}
+
+odp_schedule_prio_t odp_queue_sched_prio(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_prio(queue);
+}
+
+odp_schedule_group_t odp_queue_sched_group(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_sched_group(queue);
+}
+
+uint32_t odp_queue_lock_count(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_lock_count(queue);
+}
+
+uint64_t odp_queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_queue_api->queue_to_u64(hdl);
+}
+
+void odp_queue_param_init(odp_queue_param_t *param)
+{
+ _odp_queue_api->queue_param_init(param);
+}
+
+int odp_queue_info(odp_queue_t queue, odp_queue_info_t *info)
+{
+ return _odp_queue_api->queue_info(queue, info);
+}
+
+void odp_queue_print(odp_queue_t queue)
+{
+ _odp_queue_api->queue_print(queue);
+}
+
+void odp_queue_print_all(void)
+{
+ _odp_queue_api->queue_print_all();
+}
+
+int _odp_queue_init_global(void)
+{
+ const char *sched = getenv("ODP_SCHEDULER");
+
+ if (sched == NULL || !strcmp(sched, "default"))
+ sched = _ODP_SCHEDULE_DEFAULT;
+
+ if (!strcmp(sched, "basic") || !strcmp(sched, "sp")) {
+ _odp_queue_fn = &_odp_queue_basic_fn;
+ _odp_queue_api = &_odp_queue_basic_api;
+ } else if (!strcmp(sched, "scalable")) {
+ _odp_queue_fn = &_odp_queue_scalable_fn;
+ _odp_queue_api = &_odp_queue_scalable_api;
+ } else {
+ _ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
+ return -1;
+ }
+
+ return _odp_queue_fn->init_global();
+}
+
+int _odp_queue_term_global(void)
+{
+ return _odp_queue_fn->term_global();
+}
diff --git a/platform/linux-generic/odp_queue_lf.c b/platform/linux-generic/odp_queue_lf.c
new file mode 100644
index 000000000..7c9ba4013
--- /dev/null
+++ b/platform/linux-generic/odp_queue_lf.c
@@ -0,0 +1,370 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/queue.h>
+#include <odp/api/atomic.h>
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/shared_memory.h>
+
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_types_internal.h>
+
+#include <string.h>
+#include <stdio.h>
+
+#define RING_LF_SIZE 32
+#define QUEUE_LF_NUM 128
+#define ENQ_RETRIES (RING_LF_SIZE / 4)
+#define DEQ_RETRIES (RING_LF_SIZE / 8)
+
+#ifdef __SIZEOF_INT128__
+
+static inline void lockfree_zero_u128(_odp_u128_t *atomic)
+{
+ __atomic_store_n(atomic, 0, __ATOMIC_RELAXED);
+}
+
+#include <odp_cpu.h>
+
+#else
+
+/* These definitions enable build in non 128 bit compatible systems.
+ * Implementation is active only when 128 bit lockfree atomics are available.
+ * So, these are never actually used. */
+typedef struct ODP_ALIGNED(16) {
+ uint64_t u64[2];
+} _odp_u128_t;
+
+static inline _odp_u128_t lockfree_load_u128(_odp_u128_t *atomic)
+{
+ return *atomic;
+}
+
+static inline void lockfree_zero_u128(_odp_u128_t *atomic)
+{
+ atomic->u64[0] = 0;
+ atomic->u64[1] = 0;
+}
+
+static inline int lockfree_cas_acq_rel_u128(_odp_u128_t *atomic, _odp_u128_t old_val,
+ _odp_u128_t new_val)
+{
+ if (atomic->u64[0] == old_val.u64[0] &&
+ atomic->u64[1] == old_val.u64[1]) {
+ atomic->u64[0] = new_val.u64[0];
+ atomic->u64[1] = new_val.u64[1];
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline int lockfree_check_u128(void)
+{
+ return 0;
+}
+
+#endif
+
+/* Node in lock-free ring */
+typedef union {
+ _odp_u128_t u128;
+
+ struct {
+ /* Data with lowest counter value is the head. Empty node has
+ * counter value 0. */
+ uint64_t counter;
+
+ /* Data pointer */
+ uint64_t ptr;
+ } s;
+
+} ring_lf_node_t;
+
+/* Lock-free ring */
+typedef struct ODP_ALIGNED_CACHE {
+ ring_lf_node_t node[RING_LF_SIZE];
+ int used;
+ odp_atomic_u64_t enq_counter;
+
+} queue_lf_t;
+
+/* Lock-free queue globals */
+typedef struct ODP_ALIGNED_CACHE {
+ queue_lf_t queue_lf[QUEUE_LF_NUM];
+ odp_shm_t shm;
+
+} queue_lf_global_t;
+
+static queue_lf_global_t *queue_lf_glb;
+
+static inline int next_idx(int idx)
+{
+ int next = idx + 1;
+
+ if (next == RING_LF_SIZE)
+ next = 0;
+
+ return next;
+}
+
+static int queue_lf_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ queue_entry_t *queue;
+ queue_lf_t *queue_lf;
+ int i, j, idx;
+ int found;
+ ring_lf_node_t node_val;
+ ring_lf_node_t new_val;
+ ring_lf_node_t *node;
+
+ queue = qentry_from_handle(handle);
+ queue_lf = queue->queue_lf;
+
+ new_val.s.ptr = (uintptr_t)event_hdr;
+ new_val.s.counter = odp_atomic_fetch_inc_u64(&queue_lf->enq_counter);
+
+ idx = 0;
+
+ for (j = 0; j < ENQ_RETRIES; j++) {
+ found = 0;
+
+ /* Find empty node */
+ for (i = 0; i < RING_LF_SIZE; i++) {
+ node = &queue_lf->node[idx];
+ idx = next_idx(idx);
+
+ node_val.u128 = lockfree_load_u128(&node->u128);
+
+ if (node_val.s.counter == 0) {
+ found = 1;
+ break;
+ }
+ }
+
+ /* Queue is full */
+ if (found == 0)
+ return -1;
+
+ /* Try to insert data */
+ if (lockfree_cas_acq_rel_u128(&node->u128, node_val.u128,
+ new_val.u128))
+ return 0;
+ }
+
+ return -1;
+}
+
+static int queue_lf_enq_multi(odp_queue_t handle, _odp_event_hdr_t **event_hdr,
+ int num)
+{
+ (void)num;
+
+ if (queue_lf_enq(handle, event_hdr[0]) == 0)
+ return 1;
+
+ return 0;
+}
+
+static _odp_event_hdr_t *queue_lf_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+ queue_lf_t *queue_lf;
+ int i, j, i_lowest = 0;
+ int found;
+ ring_lf_node_t node_val, old_val, new_val;
+ ring_lf_node_t *node, *old;
+ uint64_t lowest, counter;
+ _odp_event_hdr_t *event_hdr;
+
+ queue = qentry_from_handle(handle);
+ queue_lf = queue->queue_lf;
+ new_val.s.counter = 0;
+ new_val.s.ptr = 0;
+ old = NULL;
+
+ for (j = 0; j < DEQ_RETRIES; j++) {
+ found = 0;
+ lowest = -1;
+
+ /* Find the head node. The one with data and
+ * the lowest counter. */
+ for (i = 0; i < RING_LF_SIZE; i++) {
+ node = &queue_lf->node[i];
+ node_val.u128 = lockfree_load_u128(&node->u128);
+ counter = node_val.s.counter;
+
+ if (counter && counter < lowest) {
+ old = node;
+ old_val.u128 = node_val.u128;
+ lowest = counter;
+ i_lowest = i;
+ found = 1;
+ }
+ }
+
+ /* Queue is empty */
+ if (found == 0)
+ return NULL;
+
+ /* New data may have been written to the area we searched before
+ * finding the current lowest. Check that there are no lower
+ * values. */
+ for (i = 0; i < i_lowest; i++) {
+ node = &queue_lf->node[i];
+ node_val.u128 = lockfree_load_u128(&node->u128);
+ counter = node_val.s.counter;
+
+ if (counter && counter < lowest) {
+ old = node;
+ old_val.u128 = node_val.u128;
+ lowest = counter;
+ }
+ }
+
+ event_hdr = (void *)(uintptr_t)old_val.s.ptr;
+
+ /* Try to remove data */
+ if (lockfree_cas_acq_rel_u128(&old->u128, old_val.u128,
+ new_val.u128))
+ return event_hdr;
+ }
+
+ return NULL;
+}
+
+static int queue_lf_deq_multi(odp_queue_t handle, _odp_event_hdr_t **event_hdr,
+ int num)
+{
+ _odp_event_hdr_t *buf;
+
+ (void)num;
+
+ buf = queue_lf_deq(handle);
+
+ if (buf == NULL)
+ return 0;
+
+ event_hdr[0] = buf;
+ return 1;
+}
+
+uint32_t _odp_queue_lf_init_global(uint32_t *queue_lf_size,
+ queue_lf_func_t *lf_func)
+{
+ odp_shm_t shm;
+ int lockfree;
+
+ /* 16 byte lockfree CAS operation is needed. */
+ lockfree = lockfree_check_u128();
+
+ _ODP_DBG("\nLock-free queue init\n");
+ _ODP_DBG(" u128 lock-free: %i\n\n", lockfree);
+
+ if (!lockfree)
+ return 0;
+
+ shm = odp_shm_reserve("_odp_queues_lf_global", sizeof(queue_lf_global_t),
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID)
+ return 0;
+
+ queue_lf_glb = odp_shm_addr(shm);
+ memset(queue_lf_glb, 0, sizeof(queue_lf_global_t));
+
+ queue_lf_glb->shm = shm;
+
+ memset(lf_func, 0, sizeof(queue_lf_func_t));
+ lf_func->enq = queue_lf_enq;
+ lf_func->enq_multi = queue_lf_enq_multi;
+ lf_func->deq = queue_lf_deq;
+ lf_func->deq_multi = queue_lf_deq_multi;
+
+ *queue_lf_size = RING_LF_SIZE;
+
+ return QUEUE_LF_NUM;
+}
+
+void _odp_queue_lf_term_global(void)
+{
+ odp_shm_t shm;
+
+ if (queue_lf_glb == NULL)
+ return;
+
+ shm = queue_lf_glb->shm;
+
+ if (odp_shm_free(shm) < 0)
+ _ODP_ERR("shm free failed");
+}
+
+static void init_queue(queue_lf_t *queue_lf)
+{
+ int i;
+
+ odp_atomic_init_u64(&queue_lf->enq_counter, 1);
+
+ for (i = 0; i < RING_LF_SIZE; i++)
+ lockfree_zero_u128(&queue_lf->node[i].u128);
+}
+
+void *_odp_queue_lf_create(queue_entry_t *queue)
+{
+ int i;
+ queue_lf_t *queue_lf = NULL;
+
+ if (queue_lf_glb == NULL) {
+ _ODP_ERR("No lock-free queues available\n");
+ return NULL;
+ }
+
+ if (queue->type != ODP_QUEUE_TYPE_PLAIN)
+ return NULL;
+
+ for (i = 0; i < QUEUE_LF_NUM; i++) {
+ if (queue_lf_glb->queue_lf[i].used == 0) {
+ queue_lf = &queue_lf_glb->queue_lf[i];
+ memset(queue_lf, 0, sizeof(queue_lf_t));
+ init_queue(queue_lf);
+ queue_lf->used = 1;
+ break;
+ }
+ }
+
+ return queue_lf;
+}
+
+void _odp_queue_lf_destroy(void *queue_lf_ptr)
+{
+ queue_lf_t *queue_lf = queue_lf_ptr;
+
+ queue_lf->used = 0;
+}
+
+uint32_t _odp_queue_lf_length(void *queue_lf_ptr)
+{
+ queue_lf_t *queue_lf = queue_lf_ptr;
+ ring_lf_node_t node_val;
+ uint32_t i;
+ uint32_t num = 0;
+
+ for (i = 0; i < RING_LF_SIZE; i++) {
+ node_val.u128 = lockfree_load_u128(&queue_lf->node[i].u128);
+ if (node_val.s.counter)
+ num++;
+ }
+ return num;
+}
+
+uint32_t _odp_queue_lf_max_length(void)
+{
+ return RING_LF_SIZE;
+}
+
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c
new file mode 100644
index 000000000..bddaa532d
--- /dev/null
+++ b/platform/linux-generic/odp_queue_scalable.c
@@ -0,0 +1,1201 @@
+/* Copyright (c) 2017, ARM Limited
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/hints.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/plat/ticketlock_inlines.h>
+#include <odp/api/queue.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/sync.h>
+#include <odp/api/plat/sync_inlines.h>
+#include <odp/api/traffic_mngr.h>
+#include <odp/api/cpu.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_scalable_internal.h>
+#include <odp_schedule_if.h>
+#include <odp_timer_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_ishmpool_internal.h>
+#include <odp/api/plat/queue_inline_types.h>
+#include <odp_global_data.h>
+#include <odp_macros_internal.h>
+
+#include <string.h>
+#include <inttypes.h>
+
+#define LOCK(a) odp_ticketlock_lock(a)
+#define UNLOCK(a) odp_ticketlock_unlock(a)
+#define LOCK_INIT(a) odp_ticketlock_init(a)
+
+extern __thread sched_scalable_thread_state_t *_odp_sched_ts;
+extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+
+typedef struct queue_table_t {
+ queue_entry_t queue[CONFIG_MAX_QUEUES];
+} queue_table_t;
+
+static queue_table_t *queue_tbl;
+static _odp_ishm_pool_t *queue_shm_pool;
+
+static int _queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr);
+static _odp_event_hdr_t *_queue_deq(odp_queue_t handle);
+static int _queue_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num);
+static int _queue_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num);
+
+static queue_entry_t *get_qentry(uint32_t queue_id)
+{
+ return &queue_tbl->queue[queue_id];
+}
+
+queue_entry_t *_odp_qentry_from_ext(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
+static int _odp_queue_disable_enq(sched_elem_t *q)
+{
+ ringidx_t old_read, old_write, new_write;
+ uint32_t size;
+
+ old_write = q->prod_write;
+ size = q->prod_mask + 1;
+ do {
+ /* Need __atomic_load to avoid compiler reordering */
+ old_read = __atomic_load_n(&q->prod_read, __ATOMIC_ACQUIRE);
+ if (old_write != old_read) {
+ /* Queue is not empty, cannot claim all elements
+ * Cannot disable enqueue.
+ */
+ return -1;
+ }
+ /* Claim all elements in ring */
+ new_write = old_write + size;
+ } while (!__atomic_compare_exchange_n(&q->prod_write,
+ &old_write, /* Updated on failure */
+ new_write,
+ true,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+ /* All remaining elements claimed, no one else can enqueue */
+ return 0;
+}
+
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param)
+{
+ ringidx_t ring_idx;
+ sched_elem_t *sched_elem;
+ uint32_t ring_size;
+ _odp_event_hdr_t **ring;
+ uint32_t size;
+
+ sched_elem = &queue->sched_elem;
+ ring_size = param->size > 0 ?
+ _ODP_ROUNDUP_POWER2_U32(param->size) : CONFIG_SCAL_QUEUE_SIZE;
+ strncpy(queue->name, name ? name : "", ODP_QUEUE_NAME_LEN - 1);
+ queue->name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ memcpy(&queue->param, param, sizeof(odp_queue_param_t));
+
+ size = ring_size * sizeof(_odp_event_hdr_t *);
+ ring = (_odp_event_hdr_t **)shm_pool_alloc_align(queue_shm_pool, size);
+ if (NULL == ring)
+ return -1;
+
+ for (ring_idx = 0; ring_idx < ring_size; ring_idx++)
+ ring[ring_idx] = NULL;
+
+ queue->type = queue->param.type;
+
+ if (queue->type == ODP_QUEUE_TYPE_SCHED)
+ queue->param.deq_mode = ODP_QUEUE_OP_DISABLED;
+
+ odp_atomic_init_u64(&queue->num_timers, 0);
+
+ queue->enqueue = _queue_enq;
+ queue->dequeue = _queue_deq;
+ queue->enqueue_multi = _queue_enq_multi;
+ queue->dequeue_multi = _queue_deq_multi;
+ queue->orig_dequeue_multi = _queue_deq_multi;
+ queue->pktin = PKTIN_INVALID;
+
+ sched_elem->node.next = NULL;
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK_INIT(&sched_elem->qschlock);
+#endif
+ sched_elem->qschst.numevts = 0;
+ sched_elem->qschst.wrr_budget = CONFIG_WRR_WEIGHT;
+ sched_elem->qschst.cur_ticket = 0;
+ sched_elem->qschst.nxt_ticket = 0;
+ sched_elem->pop_deficit = 0;
+ if (queue->type == ODP_QUEUE_TYPE_SCHED)
+ sched_elem->qschst_type = queue->param.sched.sync;
+ else
+ sched_elem->qschst_type = ODP_NO_SCHED_QUEUE;
+ /* 2nd cache line - enqueue */
+ sched_elem->prod_read = 0;
+ sched_elem->prod_write = 0;
+ sched_elem->prod_ring = ring;
+ sched_elem->prod_mask = ring_size - 1;
+ /* 3rd cache line - dequeue */
+ sched_elem->cons_read = 0;
+ sched_elem->cons_write = 0;
+ sched_elem->rwin = NULL;
+ sched_elem->schedq = NULL;
+ sched_elem->user_ctx = queue->param.context;
+#ifdef CONFIG_SPLIT_PRODCONS
+ sched_elem->cons_ring = ring;
+ sched_elem->cons_mask = ring_size - 1;
+ sched_elem->cons_type = sched_elem->qschst_type;
+#endif
+
+ /* Queue initialized successfully, add it to the sched group */
+ if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ int prio = odp_schedule_max_prio() - param->sched.prio;
+
+ if (queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
+ sched_elem->rwin =
+ _odp_rwin_alloc(queue_shm_pool,
+ queue->param.sched.lock_count);
+ if (sched_elem->rwin == NULL) {
+ _ODP_ERR("Reorder window not created\n");
+ goto rwin_create_failed;
+ }
+ }
+ sched_elem->sched_grp = param->sched.group;
+ sched_elem->sched_prio = prio;
+ sched_elem->schedq =
+ _odp_sched_queue_add(param->sched.group, prio);
+ _ODP_ASSERT(sched_elem->schedq != NULL);
+ }
+
+ return 0;
+
+rwin_create_failed:
+ _odp_ishm_pool_free(queue_shm_pool, ring);
+
+ return -1;
+}
+
+static int queue_init_global(void)
+{
+ uint32_t i;
+ uint64_t pool_size;
+ uint64_t min_alloc;
+ uint64_t max_alloc;
+
+ _ODP_DBG("Queue init ... ");
+
+ /* Fill in queue entry field offsets for inline functions */
+ memset(&_odp_queue_inline_offset, 0,
+ sizeof(_odp_queue_inline_offset_t));
+ _odp_queue_inline_offset.context = offsetof(queue_entry_t,
+ param.context);
+
+ /* Create shared memory pool to allocate shared memory for the
+ * queues. Use the default queue size.
+ */
+ /* Add size of the array holding the queues */
+ pool_size = sizeof(queue_table_t);
+ /* Add storage required for queues */
+ pool_size += (CONFIG_SCAL_QUEUE_SIZE *
+ sizeof(_odp_event_hdr_t *)) * CONFIG_MAX_QUEUES;
+
+ /* Add the reorder window size */
+ pool_size += sizeof(reorder_window_t) * CONFIG_MAX_QUEUES;
+ /* Choose min_alloc and max_alloc such that buddy allocator is selected. */
+ min_alloc = 0;
+ max_alloc = CONFIG_SCAL_QUEUE_SIZE * sizeof(_odp_event_hdr_t *);
+ queue_shm_pool = _odp_ishm_pool_create("queue_shm_pool",
+ pool_size,
+ min_alloc, max_alloc, 0);
+ if (queue_shm_pool == NULL) {
+ _ODP_ERR("Failed to allocate shared memory pool for"
+ " queues\n");
+ goto queue_shm_pool_create_failed;
+ }
+
+ queue_tbl = (queue_table_t *)
+ shm_pool_alloc_align(queue_shm_pool,
+ sizeof(queue_table_t));
+ if (queue_tbl == NULL) {
+ _ODP_ERR("Failed to reserve shared memory for queue table\n");
+ goto queue_tbl_ishm_alloc_failed;
+ }
+
+ memset(queue_tbl, 0, sizeof(queue_table_t));
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ /* init locks */
+ queue_entry_t *queue;
+
+ queue = get_qentry(i);
+ LOCK_INIT(&queue->lock);
+ queue->index = i;
+ queue->handle = (odp_queue_t)queue;
+ }
+
+ _ODP_DBG("done\n");
+ _ODP_DBG("Queue init global\n");
+ _ODP_DBG(" struct queue_entry_s size %zu\n", sizeof(struct queue_entry_s));
+ _ODP_DBG(" queue_entry_t size %zu\n", sizeof(queue_entry_t));
+ _ODP_DBG("\n");
+
+ return 0;
+
+queue_shm_pool_create_failed:
+
+queue_tbl_ishm_alloc_failed:
+ _odp_ishm_pool_destroy(queue_shm_pool);
+
+ return -1;
+}
+
+static int queue_term_global(void)
+{
+ int ret = 0;
+ int rc = 0;
+ queue_entry_t *queue;
+ int i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue = &queue_tbl->queue[i];
+ if (__atomic_load_n(&queue->status,
+ __ATOMIC_RELAXED) != QUEUE_STATUS_FREE) {
+ _ODP_ERR("Not destroyed queue: %s\n", queue->name);
+ rc = -1;
+ }
+ }
+
+ _odp_ishm_pool_free(queue_shm_pool, queue_tbl);
+
+ ret = _odp_ishm_pool_destroy(queue_shm_pool);
+ if (ret < 0) {
+ _ODP_ERR("Failed to destroy shared memory pool for queues\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int queue_init_local(void)
+{
+ return 0;
+}
+
+static int queue_term_local(void)
+{
+ return 0;
+}
+
+static int queue_capability(odp_queue_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_queue_capability_t));
+
+ /* Reserve some queues for internal use */
+ capa->max_queues = CONFIG_MAX_QUEUES - CONFIG_INTERNAL_QUEUES;
+
+ capa->plain.max_num = CONFIG_MAX_PLAIN_QUEUES;
+ capa->plain.max_size = 0;
+
+ return 0;
+}
+
+static odp_queue_type_t queue_type(odp_queue_t handle)
+{
+ return _odp_qentry_from_ext(handle)->type;
+}
+
+static odp_schedule_sync_t queue_sched_type(odp_queue_t handle)
+{
+ return _odp_qentry_from_ext(handle)->param.sched.sync;
+}
+
+static odp_schedule_prio_t queue_sched_prio(odp_queue_t handle)
+{
+ return _odp_qentry_from_ext(handle)->param.sched.prio;
+}
+
+static odp_schedule_group_t queue_sched_group(odp_queue_t handle)
+{
+ return _odp_qentry_from_ext(handle)->param.sched.group;
+}
+
+static uint32_t queue_lock_count(odp_queue_t handle)
+{
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
+
+ return queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ queue->param.sched.lock_count : 0;
+}
+
+static odp_queue_t queue_create(const char *name,
+ const odp_queue_param_t *param)
+{
+ int queue_idx;
+ int max_idx;
+ queue_entry_t *queue;
+ odp_queue_type_t type;
+ odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
+
+ if (param == NULL) {
+ odp_queue_param_init(&default_param);
+ param = &default_param;
+ }
+
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ _ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ /* Start scheduled queue indices from zero to enable direct
+ * mapping to scheduler implementation indices. */
+ queue_idx = 0;
+ max_idx = CONFIG_MAX_SCHED_QUEUES;
+ } else {
+ queue_idx = CONFIG_MAX_SCHED_QUEUES;
+ /* All internal queues are of type plain */
+ max_idx = CONFIG_MAX_QUEUES;
+ }
+
+ for (; queue_idx < max_idx; queue_idx++) {
+ queue = &queue_tbl->queue[queue_idx];
+
+ if (queue->status != QUEUE_STATUS_FREE)
+ continue;
+
+ LOCK(&queue->lock);
+ if (queue->status == QUEUE_STATUS_FREE) {
+ if (queue_init(queue, name, param)) {
+ UNLOCK(&queue->lock);
+ return handle;
+ }
+ queue->status = QUEUE_STATUS_READY;
+ handle = queue->handle;
+ UNLOCK(&queue->lock);
+ break;
+ }
+ UNLOCK(&queue->lock);
+ }
+ return handle;
+}
+
+static int queue_create_multi(const char *name[], const odp_queue_param_t param[],
+ odp_bool_t share_param, odp_queue_t queue[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(param != NULL);
+ _ODP_ASSERT(queue != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ odp_queue_t cur_queue;
+ const char *cur_name = name != NULL ? name[i] : NULL;
+ const odp_queue_param_t *cur_param = share_param ? &param[0] : &param[i];
+
+ cur_queue = queue_create(cur_name, cur_param);
+ if (cur_queue == ODP_QUEUE_INVALID)
+ return (i == 0) ? -1 : i;
+
+ queue[i] = cur_queue;
+ }
+ return i;
+}
+
+static int queue_destroy(odp_queue_t handle)
+{
+ queue_entry_t *queue;
+ sched_elem_t *q;
+
+ if (handle == ODP_QUEUE_INVALID)
+ return -1;
+
+ queue = _odp_qentry_from_ext(handle);
+ LOCK(&queue->lock);
+ if (queue->status != QUEUE_STATUS_READY) {
+ UNLOCK(&queue->lock);
+ return -1;
+ }
+ q = &queue->sched_elem;
+
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&q->qschlock);
+#endif
+ if (_odp_queue_disable_enq(q)) {
+ /* Producer side not empty */
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&q->qschlock);
+#endif
+ UNLOCK(&queue->lock);
+ return -1;
+ }
+ /* Enqueue is now disabled */
+ if (q->cons_read != q->cons_write) {
+ /* Consumer side is not empty
+ * Roll back previous change, enable enqueue again.
+ */
+ uint32_t size;
+
+ size = q->prod_mask + 1;
+ __atomic_fetch_sub(&q->prod_write, size, __ATOMIC_RELAXED);
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&q->qschlock);
+#endif
+ UNLOCK(&queue->lock);
+ return -1;
+ }
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&q->qschlock);
+#endif
+ /* Producer and consumer sides empty, enqueue disabled
+ * Now wait until schedq state is empty and no outstanding tickets
+ */
+ while (__atomic_load_n(&q->qschst.numevts, __ATOMIC_RELAXED) != 0 ||
+ __atomic_load_n(&q->qschst.cur_ticket, __ATOMIC_RELAXED) !=
+ __atomic_load_n(&q->qschst.nxt_ticket, __ATOMIC_RELAXED))
+ _odp_wait_until_eq_u32((uint32_t *)&q->qschst.numevts, 0);
+
+ if (q->schedq != NULL) {
+ _odp_sched_queue_rem(q->sched_grp, q->sched_prio);
+ q->schedq = NULL;
+ }
+
+ _odp_ishm_pool_free(queue_shm_pool, q->prod_ring);
+
+ if (q->rwin != NULL) {
+ if (_odp_rwin_free(queue_shm_pool, q->rwin) < 0) {
+ _ODP_ERR("Failed to free reorder window\n");
+ UNLOCK(&queue->lock);
+ return -1;
+ }
+ q->rwin = NULL;
+ }
+ queue->status = QUEUE_STATUS_FREE;
+ UNLOCK(&queue->lock);
+ return 0;
+}
+
+static int queue_destroy_multi(odp_queue_t handle[], int num)
+{
+ int i;
+
+ _ODP_ASSERT(handle != NULL);
+ _ODP_ASSERT(num > 0);
+
+ for (i = 0; i < num; i++) {
+ int ret = queue_destroy(handle[i]);
+
+ if (ret)
+ return (i == 0) ? ret : i;
+ }
+
+ return i;
+}
+
+static int queue_context_set(odp_queue_t handle, void *context,
+ uint32_t len ODP_UNUSED)
+{
+ odp_mb_full();
+ _odp_qentry_from_ext(handle)->param.context = context;
+ odp_mb_full();
+ return 0;
+}
+
+static odp_queue_t queue_lookup(const char *name)
+{
+ uint32_t i;
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = &queue_tbl->queue[i];
+
+ if (queue->status == QUEUE_STATUS_FREE ||
+ queue->status == QUEUE_STATUS_DESTROYED)
+ continue;
+
+ LOCK(&queue->lock);
+ if (strcmp(name, queue->name) == 0) {
+ /* found it */
+ UNLOCK(&queue->lock);
+ return queue->handle;
+ }
+ UNLOCK(&queue->lock);
+ }
+
+ return ODP_QUEUE_INVALID;
+}
+
+#ifndef CONFIG_QSCHST_LOCK
+static inline int _odp_queue_enq(sched_elem_t *q,
+ _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ ringidx_t old_read;
+ ringidx_t old_write;
+ ringidx_t new_write;
+ int actual;
+ uint32_t mask;
+ _odp_event_hdr_t **ring;
+
+ mask = q->prod_mask;
+ ring = q->prod_ring;
+
+ /* Load producer ring state (read & write index) */
+ old_write = __atomic_load_n(&q->prod_write, __ATOMIC_RELAXED);
+ do {
+ /* Consumer does store-release prod_read, we need
+ * load-acquire.
+ */
+ old_read = __atomic_load_n(&q->prod_read, __ATOMIC_ACQUIRE);
+
+ actual = _ODP_MIN(num, (int)((mask + 1) - (old_write - old_read)));
+ if (odp_unlikely(actual <= 0))
+ return 0;
+
+ new_write = old_write + actual;
+ } while (!__atomic_compare_exchange_n(&q->prod_write,
+ &old_write, /* Updated on failure */
+ new_write,
+ true,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->cons_write, 0, 0);
+#endif
+ /* Store our event(s) in the ring */
+ do {
+ ring[old_write & mask] = *event_hdr++;
+ } while (++old_write != new_write);
+ old_write -= actual;
+
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->node, 1, 0);
+#endif
+ /* Wait for our turn to signal consumers */
+ if (odp_unlikely(__atomic_load_n(&q->cons_write, __ATOMIC_RELAXED) != old_write))
+ _odp_wait_until_eq_u32(&q->cons_write, old_write);
+
+ /* Signal consumers that events are available (release events)
+ * Enable other producers to continue
+ */
+ /* Wait for writes (to ring slots) to complete */
+ atomic_store_release(&q->cons_write, new_write, /*readonly=*/false);
+
+ return actual;
+}
+
+#endif
+
+int _odp_queue_enq_sp(sched_elem_t *q,
+ _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ ringidx_t old_read;
+ ringidx_t old_write;
+ ringidx_t new_write;
+ int actual;
+ uint32_t mask;
+ _odp_event_hdr_t **ring;
+
+ mask = q->prod_mask;
+ ring = q->prod_ring;
+
+ /* Load producer ring state (read & write index) */
+ old_write = q->prod_write;
+ /* Consumer does store-release prod_read, we need load-acquire */
+ old_read = __atomic_load_n(&q->prod_read, __ATOMIC_ACQUIRE);
+ actual = _ODP_MIN(num, (int)((mask + 1) - (old_write - old_read)));
+ if (odp_unlikely(actual <= 0))
+ return 0;
+
+ new_write = old_write + actual;
+ q->prod_write = new_write;
+
+ /* Store our event(s) in the ring */
+ do {
+ ring[old_write & mask] = *event_hdr++;
+ } while (++old_write != new_write);
+ old_write -= actual;
+
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->node, 1, 0);
+#endif
+
+ /* Signal consumers that events are available (release events)
+ * Enable other producers to continue
+ */
+#ifdef CONFIG_QSCHST_LOCK
+ q->cons_write = new_write;
+#else
+ atomic_store_release(&q->cons_write, new_write, /*readonly=*/false);
+#endif
+
+ return actual;
+}
+
+static int _queue_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ int actual;
+ queue_entry_t *queue;
+ sched_scalable_thread_state_t *ts;
+
+ queue = qentry_from_int(handle);
+ ts = _odp_sched_ts;
+ if (ts && odp_unlikely(ts->out_of_order) &&
+ (queue->param.order == ODP_QUEUE_ORDER_KEEP)) {
+ actual = _odp_rctx_save(queue, event_hdr, num);
+ return actual;
+ }
+
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&queue->sched_elem.qschlock);
+ actual = _odp_queue_enq_sp(&queue->sched_elem, event_hdr, num);
+#else
+ actual = _odp_queue_enq(&queue->sched_elem, event_hdr, num);
+#endif
+
+ if (odp_likely(queue->sched_elem.schedq != NULL && actual != 0)) {
+ /* Perform scheduler related updates. */
+#ifdef CONFIG_QSCHST_LOCK
+ _odp_sched_update_enq_sp(&queue->sched_elem, actual);
+#else
+ _odp_sched_update_enq(&queue->sched_elem, actual);
+#endif
+ }
+
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&queue->sched_elem.qschlock);
+#endif
+ return actual;
+}
+
+static int _queue_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ return odp_likely(_queue_enq_multi(handle, &event_hdr, 1) == 1) ? 0 : -1;
+}
+
+static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
+{
+ _odp_event_hdr_t *event_hdr[QUEUE_MULTI_MAX];
+ queue_entry_t *queue;
+ int i;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ queue = _odp_qentry_from_ext(handle);
+
+ for (i = 0; i < num; i++)
+ event_hdr[i] = _odp_event_hdr(ev[i]);
+
+ return queue->enqueue_multi(handle, event_hdr, num);
+}
+
+static int queue_enq(odp_queue_t handle, odp_event_t ev)
+{
+ _odp_event_hdr_t *event_hdr;
+ queue_entry_t *queue;
+
+ queue = _odp_qentry_from_ext(handle);
+ event_hdr = _odp_event_hdr(ev);
+
+ return queue->enqueue(handle, event_hdr);
+}
+
+/* Single-consumer dequeue. */
+int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num)
+{
+ int actual;
+ ringidx_t old_read;
+ ringidx_t old_write;
+ ringidx_t new_read;
+ uint32_t mask;
+ _odp_event_hdr_t **ring;
+
+ /* Load consumer ring state (read & write index). */
+ old_read = q->cons_read;
+ /* Producer does store-release cons_write, we need load-acquire */
+ old_write = __atomic_load_n(&q->cons_write, __ATOMIC_ACQUIRE);
+ actual = _ODP_MIN(num, (int)(old_write - old_read));
+
+ if (odp_unlikely(actual <= 0))
+ return 0;
+
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->node, 1, 0);
+#endif
+ new_read = old_read + actual;
+ q->cons_read = new_read;
+
+ mask = q->cons_mask;
+ ring = q->cons_ring;
+ do {
+ *evp++ = _odp_event_from_hdr(ring[old_read & mask]);
+ } while (++old_read != new_read);
+
+ /* Signal producers that empty slots are available
+ * (release ring slots). Enable other consumers to continue.
+ */
+#ifdef CONFIG_QSCHST_LOCK
+ q->prod_read = new_read;
+#else
+ /* Wait for loads (from ring slots) to complete. */
+ atomic_store_release(&q->prod_read, new_read, /*readonly=*/true);
+#endif
+ return actual;
+}
+
+int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num)
+{
+ int actual;
+ ringidx_t old_read;
+ ringidx_t old_write;
+ ringidx_t new_read;
+ uint32_t mask;
+ _odp_event_hdr_t **ring;
+ _odp_event_hdr_t **p_event_hdr;
+
+ mask = q->cons_mask;
+ ring = q->cons_ring;
+
+ /* Load consumer ring state (read & write index) */
+ old_read = __atomic_load_n(&q->cons_read, __ATOMIC_RELAXED);
+ do {
+ /* Need __atomic_load to avoid compiler reordering
+ * Producer does store-release cons_write, we need
+ * load-acquire.
+ */
+ old_write = __atomic_load_n(&q->cons_write, __ATOMIC_ACQUIRE);
+ /* Prefetch ring buffer array */
+ __builtin_prefetch(&q->cons_ring[old_read & mask], 0, 0);
+
+ actual = _ODP_MIN(num, (int)(old_write - old_read));
+ if (odp_unlikely(actual <= 0))
+ return 0;
+
+ /* Attempt to free ring slot(s) */
+ new_read = old_read + actual;
+ } while (!__atomic_compare_exchange_n(&q->cons_read,
+ &old_read, /* Updated on failure */
+ new_read,
+ true,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->prod_read, 0, 0);
+#endif
+ p_event_hdr = event_hdr;
+ do {
+ *p_event_hdr++ = ring[old_read & mask];
+ } while (++old_read != new_read);
+ old_read -= actual;
+
+#ifdef CONFIG_SPLIT_PRODCONS
+ __builtin_prefetch(&q->node, 1, 0);
+#endif
+ /* Wait for our turn to signal producers */
+ if (odp_unlikely(__atomic_load_n(&q->prod_read, __ATOMIC_RELAXED) != old_read))
+ _odp_wait_until_eq_u32(&q->prod_read, old_read);
+
+ /* Signal producers that empty slots are available
+ * (release ring slots)
+ * Enable other consumers to continue
+ */
+ /* Wait for loads (from ring slots) to complete */
+ atomic_store_release(&q->prod_read, new_read, /*readonly=*/true);
+
+ return actual;
+}
+
+int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num)
+{
+ int ret, evt_idx;
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ ret = _odp_queue_deq(q, hdr_tbl, num);
+ if (odp_likely(ret != 0)) {
+ for (evt_idx = 0; evt_idx < num; evt_idx++)
+ evp[evt_idx] = _odp_event_from_hdr(hdr_tbl[evt_idx]);
+ }
+
+ return ret;
+}
+
+static int _queue_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ sched_elem_t *q;
+ queue_entry_t *queue;
+
+ queue = qentry_from_int(handle);
+ q = &queue->sched_elem;
+ return _odp_queue_deq(q, event_hdr, num);
+}
+
+static _odp_event_hdr_t *_queue_deq(odp_queue_t handle)
+{
+ sched_elem_t *q;
+ _odp_event_hdr_t *event_hdr;
+ queue_entry_t *queue;
+
+ queue = qentry_from_int(handle);
+ q = &queue->sched_elem;
+ if (_odp_queue_deq(q, &event_hdr, 1) == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
+{
+ queue_entry_t *queue;
+ int ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ queue = _odp_qentry_from_ext(handle);
+
+ ret = queue->dequeue_multi(handle, (_odp_event_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
+}
+
+static odp_event_t queue_deq(odp_queue_t handle)
+{
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
+ odp_event_t ev = (odp_event_t)queue->dequeue(handle);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
+
+ return ev;
+}
+
+static void queue_param_init(odp_queue_param_t *params)
+{
+ memset(params, 0, sizeof(odp_queue_param_t));
+ params->type = ODP_QUEUE_TYPE_PLAIN;
+ params->enq_mode = ODP_QUEUE_OP_MT;
+ params->deq_mode = ODP_QUEUE_OP_MT;
+ params->nonblocking = ODP_BLOCKING;
+ params->sched.prio = odp_schedule_default_prio();
+ params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ params->sched.group = ODP_SCHED_GROUP_ALL;
+ params->order = ODP_QUEUE_ORDER_KEEP;
+}
+
+static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
+{
+ uint32_t queue_id;
+ queue_entry_t *queue;
+ int status;
+
+ if (odp_unlikely(info == NULL)) {
+ _ODP_ERR("Unable to store info, NULL ptr given\n");
+ return -1;
+ }
+
+ queue_id = queue_to_id(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle:%" PRIu64 "\n", odp_queue_to_u64(handle));
+ return -1;
+ }
+
+ queue = get_qentry(queue_id);
+
+ LOCK(&queue->lock);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(&queue->lock);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return -1;
+ }
+
+ info->name = queue->name;
+ info->param = queue->param;
+
+ UNLOCK(&queue->lock);
+
+ return 0;
+}
+
+static void queue_print(odp_queue_t handle)
+{
+ odp_pktio_info_t pktio_info;
+ queue_entry_t *queue;
+ uint32_t queue_id;
+ int status;
+
+ queue_id = queue_to_id(handle);
+
+ if (odp_unlikely(queue_id >= CONFIG_MAX_QUEUES)) {
+ _ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n", odp_queue_to_u64(handle));
+ return;
+ }
+
+ queue = get_qentry(queue_id);
+
+ LOCK(&queue->lock);
+ status = queue->status;
+
+ if (odp_unlikely(status == QUEUE_STATUS_FREE ||
+ status == QUEUE_STATUS_DESTROYED)) {
+ UNLOCK(&queue->lock);
+ _ODP_ERR("Invalid queue status:%d\n", status);
+ return;
+ }
+ _ODP_PRINT("\nQueue info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" handle %p\n", (void *)queue->handle);
+ _ODP_PRINT(" index %" PRIu32 "\n", queue->index);
+ _ODP_PRINT(" name %s\n", queue->name);
+ _ODP_PRINT(" enq mode %s\n",
+ queue->param.enq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.enq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" deq mode %s\n",
+ queue->param.deq_mode == ODP_QUEUE_OP_MT ? "ODP_QUEUE_OP_MT" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_MT_UNSAFE ? "ODP_QUEUE_OP_MT_UNSAFE" :
+ (queue->param.deq_mode == ODP_QUEUE_OP_DISABLED ? "ODP_QUEUE_OP_DISABLED" :
+ "unknown")));
+ _ODP_PRINT(" type %s\n",
+ queue->type == ODP_QUEUE_TYPE_PLAIN ? "ODP_QUEUE_TYPE_PLAIN" :
+ (queue->type == ODP_QUEUE_TYPE_SCHED ? "ODP_QUEUE_TYPE_SCHED" : "unknown"));
+ if (queue->type == ODP_QUEUE_TYPE_SCHED) {
+ _ODP_PRINT(" sync %s\n",
+ queue->param.sched.sync == ODP_SCHED_SYNC_PARALLEL ?
+ "ODP_SCHED_SYNC_PARALLEL" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ATOMIC ?
+ "ODP_SCHED_SYNC_ATOMIC" :
+ (queue->param.sched.sync == ODP_SCHED_SYNC_ORDERED ?
+ "ODP_SCHED_SYNC_ORDERED" : "unknown")));
+ _ODP_PRINT(" priority %d\n", queue->param.sched.prio);
+ _ODP_PRINT(" group %d\n", queue->param.sched.group);
+ }
+ if (queue->pktin.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktin.pktio, &pktio_info))
+ _ODP_PRINT(" pktin %s\n", pktio_info.name);
+ }
+ if (queue->pktout.pktio != ODP_PKTIO_INVALID) {
+ if (!odp_pktio_info(queue->pktout.pktio, &pktio_info))
+ _ODP_PRINT(" pktout %s\n", pktio_info.name);
+ }
+ _ODP_PRINT(" timers %" PRIu64 "\n", odp_atomic_load_u64(&queue->num_timers));
+ _ODP_PRINT(" param.size %" PRIu32 "\n", queue->param.size);
+ _ODP_PRINT("\n");
+
+ UNLOCK(&queue->lock);
+}
+
+static void queue_print_all(void)
+{
+ uint32_t i, index;
+ const char *name;
+ int status;
+ odp_queue_type_t type;
+ odp_nonblocking_t blocking;
+ odp_queue_op_mode_t enq_mode;
+ odp_queue_op_mode_t deq_mode;
+ odp_queue_order_t order;
+ odp_schedule_sync_t sync;
+ int prio;
+ const char *bl_str;
+ char type_c, enq_c, deq_c, order_c, sync_c;
+ const int col_width = 24;
+
+ _ODP_PRINT("\nList of all queues\n");
+ _ODP_PRINT("------------------\n");
+ _ODP_PRINT(" idx %-*s type blk enq deq ord sync prio\n", col_width, "name");
+
+ for (i = 0; i < CONFIG_MAX_QUEUES; i++) {
+ queue_entry_t *queue = &queue_tbl->queue[i];
+
+ if (queue->status != QUEUE_STATUS_READY)
+ continue;
+
+ LOCK(&queue->lock);
+
+ status = queue->status;
+ index = queue->index;
+ name = queue->name;
+ type = queue->type;
+ blocking = queue->param.nonblocking;
+ enq_mode = queue->param.enq_mode;
+ deq_mode = queue->param.deq_mode;
+ order = queue->param.order;
+ prio = queue->param.sched.prio;
+ sync = queue->param.sched.sync;
+
+ UNLOCK(&queue->lock);
+
+ if (status != QUEUE_STATUS_READY)
+ continue;
+
+ type_c = (type == ODP_QUEUE_TYPE_PLAIN) ? 'P' : 'S';
+
+ bl_str = (blocking == ODP_BLOCKING) ? "B" :
+ ((blocking == ODP_NONBLOCKING_LF) ? "LF" : "WF");
+
+ enq_c = (enq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((enq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ deq_c = (deq_mode == ODP_QUEUE_OP_MT) ? 'S' :
+ ((deq_mode == ODP_QUEUE_OP_MT_UNSAFE) ? 'U' : 'D');
+
+ order_c = (order == ODP_QUEUE_ORDER_KEEP) ? 'K' : 'I';
+
+ _ODP_PRINT("%4u %-*s %c %2s", index, col_width, name, type_c, bl_str);
+ _ODP_PRINT(" %c %c %c", enq_c, deq_c, order_c);
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ sync_c = (sync == ODP_SCHED_SYNC_PARALLEL) ? 'P' :
+ ((sync == ODP_SCHED_SYNC_ATOMIC) ? 'A' : 'O');
+ _ODP_PRINT(" %c %4i", sync_c, prio);
+ }
+
+ _ODP_PRINT("\n");
+ }
+
+ _ODP_PRINT("\n");
+}
+
+static uint64_t queue_to_u64(odp_queue_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
+{
+ return qentry_from_int(handle)->pktout;
+}
+
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ qentry_from_int(handle)->pktout.pktio = pktio;
+ qentry_from_int(handle)->pktout.index = index;
+}
+
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
+{
+ return qentry_from_int(handle)->pktin;
+}
+
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
+{
+ qentry_from_int(handle)->pktin.pktio = pktio;
+ qentry_from_int(handle)->pktin.index = index;
+}
+
+static void queue_set_enq_deq_func(odp_queue_t handle,
+ queue_enq_fn_t enq,
+ queue_enq_multi_fn_t enq_multi,
+ queue_deq_fn_t deq,
+ queue_deq_multi_fn_t deq_multi)
+{
+ if (enq)
+ qentry_from_int(handle)->enqueue = enq;
+
+ if (enq_multi)
+ qentry_from_int(handle)->enqueue_multi = enq_multi;
+
+ if (deq)
+ qentry_from_int(handle)->dequeue = deq;
+
+ if (deq_multi)
+ qentry_from_int(handle)->dequeue_multi = deq_multi;
+}
+
+static int queue_orig_multi(odp_queue_t handle,
+ _odp_event_hdr_t **event_hdr, int num)
+{
+ return qentry_from_int(handle)->orig_dequeue_multi(handle,
+ event_hdr, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
+{
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
+
+ odp_atomic_inc_u64(&queue->num_timers);
+}
+
+static void queue_timer_rem(odp_queue_t handle)
+{
+ queue_entry_t *queue = _odp_qentry_from_ext(handle);
+
+ odp_atomic_dec_u64(&queue->num_timers);
+}
+
+/* API functions */
+_odp_queue_api_fn_t _odp_queue_scalable_api = {
+ .queue_create = queue_create,
+ .queue_create_multi = queue_create_multi,
+ .queue_destroy = queue_destroy,
+ .queue_destroy_multi = queue_destroy_multi,
+ .queue_lookup = queue_lookup,
+ .queue_capability = queue_capability,
+ .queue_context_set = queue_context_set,
+ .queue_enq = queue_enq,
+ .queue_enq_multi = queue_enq_multi,
+ .queue_deq = queue_deq,
+ .queue_deq_multi = queue_deq_multi,
+ .queue_type = queue_type,
+ .queue_sched_type = queue_sched_type,
+ .queue_sched_prio = queue_sched_prio,
+ .queue_sched_group = queue_sched_group,
+ .queue_lock_count = queue_lock_count,
+ .queue_to_u64 = queue_to_u64,
+ .queue_param_init = queue_param_init,
+ .queue_info = queue_info,
+ .queue_print = queue_print,
+ .queue_print_all = queue_print_all
+};
+
+/* Functions towards internal components */
+queue_fn_t _odp_queue_scalable_fn = {
+ .init_global = queue_init_global,
+ .term_global = queue_term_global,
+ .init_local = queue_init_local,
+ .term_local = queue_term_local,
+ .get_pktout = queue_get_pktout,
+ .set_pktout = queue_set_pktout,
+ .get_pktin = queue_get_pktin,
+ .set_pktin = queue_set_pktin,
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
+};
diff --git a/platform/linux-generic/odp_queue_spsc.c b/platform/linux-generic/odp_queue_spsc.c
new file mode 100644
index 000000000..74cc740e0
--- /dev/null
+++ b/platform/linux-generic/odp_queue_spsc.c
@@ -0,0 +1,136 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_queue_basic_internal.h>
+
+#include <string.h>
+#include <stdio.h>
+
+static inline void event_index_from_hdr(uint32_t event_index[],
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ event_index[i] = event_hdr[i]->index.u32;
+}
+
+static inline void event_index_to_hdr(_odp_event_hdr_t *event_hdr[],
+ uint32_t event_index[], int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ event_hdr[i] = _odp_event_hdr_from_index_u32(event_index[i]);
+ odp_prefetch(event_hdr[i]);
+ }
+}
+
+static inline int spsc_enq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ ring_spsc_t *ring_spsc;
+ uint32_t buf_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_spsc = &queue->ring_spsc;
+
+ event_index_from_hdr(buf_idx, event_hdr, num);
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ _ODP_ERR("Bad queue status\n");
+ return -1;
+ }
+
+ return ring_spsc_enq_multi(ring_spsc, queue->ring_data,
+ queue->ring_mask, buf_idx, num);
+}
+
+static inline int spsc_deq_multi(odp_queue_t handle,
+ _odp_event_hdr_t *event_hdr[], int num)
+{
+ queue_entry_t *queue;
+ int num_deq;
+ ring_spsc_t *ring_spsc;
+ uint32_t buf_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_spsc = &queue->ring_spsc;
+
+ if (odp_unlikely(queue->status < QUEUE_STATUS_READY)) {
+ /* Bad queue, or queue has been destroyed. */
+ return -1;
+ }
+
+ num_deq = ring_spsc_deq_multi(ring_spsc, queue->ring_data,
+ queue->ring_mask, buf_idx, num);
+
+ if (num_deq == 0)
+ return 0;
+
+ event_index_to_hdr(event_hdr, buf_idx, num_deq);
+
+ return num_deq;
+}
+
+static int queue_spsc_enq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ return spsc_enq_multi(handle, event_hdr, num);
+}
+
+static int queue_spsc_enq(odp_queue_t handle, _odp_event_hdr_t *event_hdr)
+{
+ int ret;
+
+ ret = spsc_enq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
+static int queue_spsc_deq_multi(odp_queue_t handle, _odp_event_hdr_t *event_hdr[],
+ int num)
+{
+ return spsc_deq_multi(handle, event_hdr, num);
+}
+
+static _odp_event_hdr_t *queue_spsc_deq(odp_queue_t handle)
+{
+ _odp_event_hdr_t *event_hdr = NULL;
+ int ret;
+
+ ret = spsc_deq_multi(handle, &event_hdr, 1);
+
+ if (ret == 1)
+ return event_hdr;
+ else
+ return NULL;
+}
+
+void _odp_queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
+{
+ uint64_t offset;
+
+ queue->enqueue = queue_spsc_enq;
+ queue->dequeue = queue_spsc_deq;
+ queue->enqueue_multi = queue_spsc_enq_multi;
+ queue->dequeue_multi = queue_spsc_deq_multi;
+ queue->orig_dequeue_multi = queue_spsc_deq_multi;
+
+ offset = queue->index * (uint64_t)_odp_queue_glb->config.max_queue_size;
+
+ queue->ring_data = &_odp_queue_glb->ring_data[offset];
+ queue->ring_mask = queue_size - 1;
+ ring_spsc_init(&queue->ring_spsc);
+}
diff --git a/platform/linux-generic/odp_random.c b/platform/linux-generic/odp_random.c
new file mode 100644
index 000000000..3060e8ed9
--- /dev/null
+++ b/platform/linux-generic/odp_random.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdint.h>
+
+#include <odp/api/random.h>
+
+#include <odp/autoheader_internal.h>
+#include <odp_init_internal.h>
+#include <odp_random_std_internal.h>
+#include <odp_random_openssl_internal.h>
+#include <odp_random.h>
+
+odp_random_kind_t odp_random_max_kind(void)
+{
+ odp_random_kind_t kind, max_kind = ODP_RANDOM_BASIC;
+
+ if (_ODP_OPENSSL_RAND)
+ max_kind = ODP_RANDOM_CRYPTO;
+
+ kind = _odp_random_max_kind();
+ if (kind > max_kind)
+ max_kind = kind;
+
+ return max_kind;
+}
+
+int32_t odp_random_data(uint8_t *buf, uint32_t len, odp_random_kind_t kind)
+{
+ switch (kind) {
+ case ODP_RANDOM_BASIC:
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_data(buf, len);
+ return _odp_random_std_data(buf, len);
+ case ODP_RANDOM_CRYPTO:
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_data(buf, len);
+ return _odp_random_crypto_data(buf, len);
+ case ODP_RANDOM_TRUE:
+ return _odp_random_true_data(buf, len);
+ }
+
+ return -1;
+}
+
+int32_t odp_random_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
+{
+ return _odp_random_std_test_data(buf, len, seed);
+}
+
+int _odp_random_init_local(void)
+{
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_init_local();
+ return _odp_random_std_init_local();
+}
+
+int _odp_random_term_local(void)
+{
+ if (_ODP_OPENSSL_RAND)
+ return _odp_random_openssl_term_local();
+ return _odp_random_std_term_local();
+}
diff --git a/platform/linux-generic/odp_random_openssl.c b/platform/linux-generic/odp_random_openssl.c
new file mode 100644
index 000000000..fdc40871b
--- /dev/null
+++ b/platform/linux-generic/odp_random_openssl.c
@@ -0,0 +1,41 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+#include <stdint.h>
+#include <odp/autoheader_internal.h>
+#include <odp_init_internal.h>
+#include <odp_random_openssl_internal.h>
+
+#if _ODP_OPENSSL_RAND
+#include <openssl/rand.h>
+
+int32_t _odp_random_openssl_data(uint8_t *buf, uint32_t len)
+{
+ int rc;
+
+ rc = RAND_bytes(buf, len);
+ return (1 == rc) ? (int)len /*success*/: -1 /*failure*/;
+}
+#else
+/* Dummy functions for building without OpenSSL support */
+int32_t _odp_random_openssl_data(uint8_t *buf ODP_UNUSED,
+ uint32_t len ODP_UNUSED)
+{
+ return -1;
+}
+#endif /* _ODP_OPENSSL_RAND */
+
+int _odp_random_openssl_init_local(void)
+{
+ return 0;
+}
+
+int _odp_random_openssl_term_local(void)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/odp_random_std.c b/platform/linux-generic/odp_random_std.c
new file mode 100644
index 000000000..50cd773f2
--- /dev/null
+++ b/platform/linux-generic/odp_random_std.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/byteorder.h>
+#include <odp/api/cpu.h>
+#include <odp/api/debug.h>
+#include <odp_init_internal.h>
+#include <odp_random_std_internal.h>
+#include <odp_cpu.h>
+
+#include <stdint.h>
+#include <time.h>
+
+/*
+ * Xorshift64*, adapted from [1], and modified to return only the high 32 bits.
+ *
+ * [1] An experimental exploration of Marsaglia's xorshift generators, scrambled
+ * Sebastiano Vigna, July 2016.
+ * http://vigna.di.unimi.it/ftp/papers/xorshift.pdf
+ */
+static inline uint32_t xorshift64s32(uint64_t *x)
+{
+ /* The variable x should be initialized to a nonzero seed. [1] */
+ if (!*x)
+ /*
+ * 2^64 / phi. As far away as possible from any small integer
+ * fractions, which the caller might be likely to use for the
+ * next seed after 0.
+ */
+ *x = 11400714819323198485ull;
+
+ *x ^= *x >> 12; /* a */
+ *x ^= *x << 25; /* b */
+ *x ^= *x >> 27; /* c */
+ return (*x * 2685821657736338717ull) >> 32;
+}
+
+static int32_t _random_data(uint8_t *buf, uint32_t len, uint64_t *seed)
+{
+ const uint32_t ret = len;
+
+ if (!_ODP_UNALIGNED && ((uintptr_t)buf & 3) && len) {
+ uint32_t r = xorshift64s32(seed);
+
+ if ((uintptr_t)buf & 1) {
+ *(uint8_t *)(uintptr_t)buf = r & 0xff;
+ r >>= 8;
+ buf += 1;
+ len -= 1;
+ }
+
+ if (((uintptr_t)buf & 2) && len >= 2) {
+ *(uint16_t *)(uintptr_t)buf = r & 0xffff;
+ buf += 2;
+ len -= 2;
+ }
+ }
+
+ for (uint32_t i = 0; i < len / 4; i++) {
+ *(uint32_t *)(uintptr_t)buf = xorshift64s32(seed);
+ buf += 4;
+ }
+
+ if (len & 3) {
+ uint32_t r = xorshift64s32(seed);
+
+ if (len & 2) {
+ *(uint16_t *)(uintptr_t)buf = r & 0xffff;
+ r >>= 16;
+ buf += 2;
+ }
+
+ if (len & 1)
+ *(uint8_t *)(uintptr_t)buf = r & 0xff;
+ }
+
+ return ret;
+}
+
+int32_t _odp_random_std_test_data(uint8_t *buf, uint32_t len, uint64_t *seed)
+{
+ return _random_data(buf, len, seed);
+}
+
+static __thread uint64_t this_seed;
+
+int32_t _odp_random_std_data(uint8_t *buf, uint32_t len)
+{
+ return _random_data(buf, len, &this_seed);
+}
+
+int _odp_random_std_init_local(void)
+{
+ this_seed = time(NULL);
+ this_seed ^= (uint64_t)odp_cpu_id() << 32;
+
+ return 0;
+}
+
+int _odp_random_std_term_local(void)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/odp_rwlock.c b/platform/linux-generic/odp_rwlock.c
deleted file mode 100644
index 13c17a2c7..000000000
--- a/platform/linux-generic/odp_rwlock.c
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <stdbool.h>
-#include <odp/api/atomic.h>
-#include <odp/api/rwlock.h>
-#include <odp/api/cpu.h>
-
-void odp_rwlock_init(odp_rwlock_t *rwlock)
-{
- odp_atomic_init_u32(&rwlock->cnt, 0);
-}
-
-void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
-{
- uint32_t cnt;
- int is_locked = 0;
-
- while (is_locked == 0) {
- cnt = odp_atomic_load_u32(&rwlock->cnt);
- /* waiting for read lock */
- if ((int32_t)cnt < 0) {
- odp_cpu_pause();
- continue;
- }
- is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
- &cnt, cnt + 1);
- }
-}
-
-int odp_rwlock_read_trylock(odp_rwlock_t *rwlock)
-{
- uint32_t zero = 0;
-
- return odp_atomic_cas_acq_u32(&rwlock->cnt, &zero, (uint32_t)1);
-}
-
-void odp_rwlock_read_unlock(odp_rwlock_t *rwlock)
-{
- odp_atomic_sub_rel_u32(&rwlock->cnt, 1);
-}
-
-void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
-{
- uint32_t cnt;
- int is_locked = 0;
-
- while (is_locked == 0) {
- uint32_t zero = 0;
- cnt = odp_atomic_load_u32(&rwlock->cnt);
- /* lock acquired, wait */
- if (cnt != 0) {
- odp_cpu_pause();
- continue;
- }
- is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
- &zero, (uint32_t)-1);
- }
-}
-
-int odp_rwlock_write_trylock(odp_rwlock_t *rwlock)
-{
- uint32_t zero = 0;
-
- return odp_atomic_cas_acq_u32(&rwlock->cnt, &zero, (uint32_t)-1);
-}
-
-void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
-{
- odp_atomic_store_rel_u32(&rwlock->cnt, 0);
-}
diff --git a/platform/linux-generic/odp_rwlock_api.c b/platform/linux-generic/odp_rwlock_api.c
new file mode 100644
index 000000000..217479598
--- /dev/null
+++ b/platform/linux-generic/odp_rwlock_api.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/rwlock.h>
+
+#define _ODP_NO_INLINE
+#include <odp/api/plat/rwlock_inlines.h>
diff --git a/platform/linux-generic/odp_rwlock_recursive.c b/platform/linux-generic/odp_rwlock_recursive.c
deleted file mode 100644
index 6b0228143..000000000
--- a/platform/linux-generic/odp_rwlock_recursive.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/rwlock_recursive.h>
-#include <odp/api/thread.h>
-#include <string.h>
-
-#define NO_OWNER (-1)
-
-void odp_rwlock_recursive_init(odp_rwlock_recursive_t *rlock)
-{
- memset(rlock, 0, sizeof(odp_rwlock_recursive_t));
- odp_rwlock_init(&rlock->lock);
- rlock->wr_owner = NO_OWNER;
-}
-
-/* Multiple readers can recurse the lock concurrently */
-void odp_rwlock_recursive_read_lock(odp_rwlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->rd_cnt[thr]) {
- rlock->rd_cnt[thr]++;
- return;
- }
-
- odp_rwlock_read_lock(&rlock->lock);
- rlock->rd_cnt[thr] = 1;
-}
-
-/* Multiple readers can recurse the lock concurrently */
-int odp_rwlock_recursive_read_trylock(odp_rwlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->rd_cnt[thr]) {
- rlock->rd_cnt[thr]++;
- return 1;
- }
-
- if (odp_rwlock_read_trylock(&rlock->lock)) {
- rlock->rd_cnt[thr] = 1;
- return 1;
- }
-
- return 0;
-}
-
-void odp_rwlock_recursive_read_unlock(odp_rwlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- rlock->rd_cnt[thr]--;
-
- if (rlock->rd_cnt[thr] > 0)
- return;
-
- odp_rwlock_read_unlock(&rlock->lock);
-}
-
-/* Only one writer can recurse the lock */
-void odp_rwlock_recursive_write_lock(odp_rwlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->wr_owner == thr) {
- rlock->wr_cnt++;
- return;
- }
-
- odp_rwlock_write_lock(&rlock->lock);
- rlock->wr_owner = thr;
- rlock->wr_cnt = 1;
-}
-
-/* Only one writer can recurse the lock */
-int odp_rwlock_recursive_write_trylock(odp_rwlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->wr_owner == thr) {
- rlock->wr_cnt++;
- return 1;
- }
-
- if (odp_rwlock_write_trylock(&rlock->lock)) {
- rlock->wr_owner = thr;
- rlock->wr_cnt = 1;
- return 1;
- }
-
- return 0;
-}
-
-void odp_rwlock_recursive_write_unlock(odp_rwlock_recursive_t *rlock)
-{
- rlock->wr_cnt--;
-
- if (rlock->wr_cnt > 0)
- return;
-
- rlock->wr_owner = NO_OWNER;
- odp_rwlock_write_unlock(&rlock->lock);
-}
diff --git a/platform/linux-generic/odp_rwlock_recursive_api.c b/platform/linux-generic/odp_rwlock_recursive_api.c
new file mode 100644
index 000000000..b2580ae42
--- /dev/null
+++ b/platform/linux-generic/odp_rwlock_recursive_api.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/rwlock_recursive.h>
+
+#define _ODP_NO_INLINE
+#include <odp/api/plat/rwlock_recursive_inlines.h>
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
deleted file mode 100644
index cd5bf21bd..000000000
--- a/platform/linux-generic/odp_schedule.c
+++ /dev/null
@@ -1,1258 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <string.h>
-#include <odp/api/schedule.h>
-#include <odp_schedule_if.h>
-#include <odp/api/align.h>
-#include <odp/api/shared_memory.h>
-#include <odp_internal.h>
-#include <odp_debug_internal.h>
-#include <odp/api/thread.h>
-#include <odp/api/time.h>
-#include <odp/api/spinlock.h>
-#include <odp/api/hints.h>
-#include <odp/api/cpu.h>
-#include <odp/api/thrmask.h>
-#include <odp_config_internal.h>
-#include <odp_align_internal.h>
-#include <odp/api/sync.h>
-#include <odp_ring_internal.h>
-#include <odp_queue_internal.h>
-
-/* Number of priority levels */
-#define NUM_PRIO 8
-
-ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (NUM_PRIO - 1),
- "lowest_prio_does_not_match_with_num_prios");
-
-ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
- (ODP_SCHED_PRIO_NORMAL < (NUM_PRIO - 1)),
- "normal_prio_is_not_between_highest_and_lowest");
-
-/* Number of scheduling groups */
-#define NUM_SCHED_GRPS 256
-
-/* Priority queues per priority */
-#define QUEUES_PER_PRIO 4
-
-/* Packet input poll cmd queues */
-#define PKTIO_CMD_QUEUES 4
-
-/* Mask for wrapping command queues */
-#define PKTIO_CMD_QUEUE_MASK (PKTIO_CMD_QUEUES - 1)
-
-/* Maximum number of packet input queues per command */
-#define MAX_PKTIN 16
-
-/* Maximum number of packet IO interfaces */
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
-
-/* Maximum number of pktio poll commands */
-#define NUM_PKTIO_CMD (MAX_PKTIN * NUM_PKTIO)
-
-/* Not a valid poll command */
-#define PKTIO_CMD_INVALID ((uint32_t)-1)
-
-/* Pktio command is free */
-#define PKTIO_CMD_FREE PKTIO_CMD_INVALID
-
-/* Packet IO poll queue ring size. In worst case, all pktios have all pktins
- * enabled and one poll command is created per pktin queue. The ring size must
- * be larger than or equal to NUM_PKTIO_CMD / PKTIO_CMD_QUEUES, so that it can
- * hold all poll commands in the worst case. */
-#define PKTIO_RING_SIZE (NUM_PKTIO_CMD / PKTIO_CMD_QUEUES)
-
-/* Mask for wrapping around pktio poll command index */
-#define PKTIO_RING_MASK (PKTIO_RING_SIZE - 1)
-
-/* Priority queue ring size. In worst case, all event queues are scheduled
- * queues and have the same priority. The ring size must be larger than or
- * equal to ODP_CONFIG_QUEUES / QUEUES_PER_PRIO, so that it can hold all
- * queues in the worst case. */
-#define PRIO_QUEUE_RING_SIZE (ODP_CONFIG_QUEUES / QUEUES_PER_PRIO)
-
-/* Mask for wrapping around priority queue index */
-#define PRIO_QUEUE_MASK (PRIO_QUEUE_RING_SIZE - 1)
-
-/* Priority queue empty, not a valid queue index. */
-#define PRIO_QUEUE_EMPTY ((uint32_t)-1)
-
-/* For best performance, the number of queues should be a power of two. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES),
- "Number_of_queues_is_not_power_of_two");
-
-/* Ring size must be power of two, so that MAX_QUEUE_IDX_MASK can be used. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(PRIO_QUEUE_RING_SIZE),
- "Ring_size_is_not_power_of_two");
-
-/* Ring size must be power of two, so that PKTIO_RING_MASK can be used. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(PKTIO_RING_SIZE),
- "pktio_ring_size_is_not_power_of_two");
-
-/* Number of commands queues must be power of two, so that PKTIO_CMD_QUEUE_MASK
- * can be used. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(PKTIO_CMD_QUEUES),
- "pktio_cmd_queues_is_not_power_of_two");
-
-/* Mask of queues per priority */
-typedef uint8_t pri_mask_t;
-
-ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= QUEUES_PER_PRIO,
- "pri_mask_t_is_too_small");
-
-/* Start of named groups in group mask arrays */
-#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-
-/* Maximum number of dequeues */
-#define MAX_DEQ CONFIG_BURST_SIZE
-
-/* Maximum number of ordered locks per queue */
-#define MAX_ORDERED_LOCKS_PER_QUEUE 2
-
-ODP_STATIC_ASSERT(MAX_ORDERED_LOCKS_PER_QUEUE <= CONFIG_QUEUE_MAX_ORD_LOCKS,
- "Too_many_ordered_locks");
-
-/* Ordered stash size */
-#define MAX_ORDERED_STASH 512
-
-/* Storage for stashed enqueue operation arguments */
-typedef struct {
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- queue_entry_t *queue;
- int num;
-} ordered_stash_t;
-
-/* Ordered lock states */
-typedef union {
- uint8_t u8[CONFIG_QUEUE_MAX_ORD_LOCKS];
- uint32_t all;
-} lock_called_t;
-
-ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
- "Lock_called_values_do_not_fit_in_uint32");
-
-/* Scheduler local data */
-typedef struct {
- int thr;
- int num;
- int index;
- int pause;
- uint16_t round;
- uint16_t prefer_offset;
- uint16_t pktin_polls;
- uint32_t queue_index;
- odp_queue_t queue;
- odp_event_t ev_stash[MAX_DEQ];
- struct {
- queue_entry_t *src_queue; /**< Source queue entry */
- uint64_t ctx; /**< Ordered context id */
- int stash_num; /**< Number of stashed enqueue operations */
- uint8_t in_order; /**< Order status */
- lock_called_t lock_called; /**< States of ordered locks */
- /** Storage for stashed enqueue operations */
- ordered_stash_t stash[MAX_ORDERED_STASH];
- } ordered;
-
-} sched_local_t;
-
-/* Priority queue */
-typedef struct {
- /* Ring header */
- ring_t ring;
-
- /* Ring data: queue indexes */
- uint32_t queue_index[PRIO_QUEUE_RING_SIZE];
-
-} prio_queue_t ODP_ALIGNED_CACHE;
-
-/* Packet IO queue */
-typedef struct {
- /* Ring header */
- ring_t ring;
-
- /* Ring data: pktio poll command indexes */
- uint32_t cmd_index[PKTIO_RING_SIZE];
-
-} pktio_queue_t ODP_ALIGNED_CACHE;
-
-/* Packet IO poll command */
-typedef struct {
- int pktio_index;
- int num_pktin;
- int pktin[MAX_PKTIN];
- uint32_t cmd_index;
-} pktio_cmd_t;
-
-typedef struct {
- pri_mask_t pri_mask[NUM_PRIO];
- odp_spinlock_t mask_lock;
-
- prio_queue_t prio_q[NUM_PRIO][QUEUES_PER_PRIO];
-
- odp_spinlock_t poll_cmd_lock;
- /* Number of commands in a command queue */
- uint16_t num_pktio_cmd[PKTIO_CMD_QUEUES];
-
- /* Packet IO command queues */
- pktio_queue_t pktio_q[PKTIO_CMD_QUEUES];
-
- /* Packet IO poll commands */
- pktio_cmd_t pktio_cmd[NUM_PKTIO_CMD];
-
- odp_shm_t shm;
- uint32_t pri_count[NUM_PRIO][QUEUES_PER_PRIO];
-
- odp_spinlock_t grp_lock;
- odp_thrmask_t mask_all;
- struct {
- char name[ODP_SCHED_GROUP_NAME_LEN];
- odp_thrmask_t mask;
- int allocated;
- } sched_grp[NUM_SCHED_GRPS];
-
- struct {
- int prio;
- int queue_per_prio;
- } queue[ODP_CONFIG_QUEUES];
-
- struct {
- /* Number of active commands for a pktio interface */
- int num_cmd;
- } pktio[NUM_PKTIO];
-
-} sched_global_t;
-
-/* Global scheduler context */
-static sched_global_t *sched;
-
-/* Thread local scheduler context */
-__thread sched_local_t sched_local;
-
-/* Function prototypes */
-static inline void schedule_release_context(void);
-
-static void sched_local_init(void)
-{
- memset(&sched_local, 0, sizeof(sched_local_t));
-
- sched_local.thr = odp_thread_id();
- sched_local.queue = ODP_QUEUE_INVALID;
- sched_local.queue_index = PRIO_QUEUE_EMPTY;
-}
-
-static int schedule_init_global(void)
-{
- odp_shm_t shm;
- int i, j;
-
- ODP_DBG("Schedule init ... ");
-
- shm = odp_shm_reserve("odp_scheduler",
- sizeof(sched_global_t),
- ODP_CACHE_LINE_SIZE, 0);
-
- sched = odp_shm_addr(shm);
-
- if (sched == NULL) {
- ODP_ERR("Schedule init: Shm reserve failed.\n");
- return -1;
- }
-
- memset(sched, 0, sizeof(sched_global_t));
-
- sched->shm = shm;
- odp_spinlock_init(&sched->mask_lock);
-
- for (i = 0; i < NUM_PRIO; i++) {
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
- int k;
-
- ring_init(&sched->prio_q[i][j].ring);
-
- for (k = 0; k < PRIO_QUEUE_RING_SIZE; k++)
- sched->prio_q[i][j].queue_index[k] =
- PRIO_QUEUE_EMPTY;
- }
- }
-
- odp_spinlock_init(&sched->poll_cmd_lock);
- for (i = 0; i < PKTIO_CMD_QUEUES; i++) {
- ring_init(&sched->pktio_q[i].ring);
-
- for (j = 0; j < PKTIO_RING_SIZE; j++)
- sched->pktio_q[i].cmd_index[j] = PKTIO_CMD_INVALID;
- }
-
- for (i = 0; i < NUM_PKTIO_CMD; i++)
- sched->pktio_cmd[i].cmd_index = PKTIO_CMD_FREE;
-
- odp_spinlock_init(&sched->grp_lock);
-
- for (i = 0; i < NUM_SCHED_GRPS; i++) {
- memset(sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN);
- odp_thrmask_zero(&sched->sched_grp[i].mask);
- }
-
- odp_thrmask_setall(&sched->mask_all);
-
- ODP_DBG("done\n");
-
- return 0;
-}
-
-static int schedule_term_global(void)
-{
- int ret = 0;
- int rc = 0;
- int i, j;
-
- for (i = 0; i < NUM_PRIO; i++) {
- for (j = 0; j < QUEUES_PER_PRIO; j++) {
- ring_t *ring = &sched->prio_q[i][j].ring;
- uint32_t qi;
-
- while ((qi = ring_deq(ring, PRIO_QUEUE_MASK)) !=
- RING_EMPTY) {
- odp_event_t events[1];
- int num;
-
- num = sched_cb_queue_deq_multi(qi, events, 1);
-
- if (num < 0)
- sched_cb_queue_destroy_finalize(qi);
-
- if (num > 0)
- ODP_ERR("Queue not empty\n");
- }
- }
- }
-
- ret = odp_shm_free(sched->shm);
- if (ret < 0) {
- ODP_ERR("Shm free failed for odp_scheduler");
- rc = -1;
- }
-
- return rc;
-}
-
-static int schedule_init_local(void)
-{
- sched_local_init();
- return 0;
-}
-
-static int schedule_term_local(void)
-{
- if (sched_local.num) {
- ODP_ERR("Locally pre-scheduled events exist.\n");
- return -1;
- }
-
- schedule_release_context();
- return 0;
-}
-
-static unsigned schedule_max_ordered_locks(void)
-{
- return MAX_ORDERED_LOCKS_PER_QUEUE;
-}
-
-static inline int queue_per_prio(uint32_t queue_index)
-{
- return ((QUEUES_PER_PRIO - 1) & queue_index);
-}
-
-static void pri_set(int id, int prio)
-{
- odp_spinlock_lock(&sched->mask_lock);
- sched->pri_mask[prio] |= 1 << id;
- sched->pri_count[prio][id]++;
- odp_spinlock_unlock(&sched->mask_lock);
-}
-
-static void pri_clr(int id, int prio)
-{
- odp_spinlock_lock(&sched->mask_lock);
-
- /* Clear mask bit when last queue is removed*/
- sched->pri_count[prio][id]--;
-
- if (sched->pri_count[prio][id] == 0)
- sched->pri_mask[prio] &= (uint8_t)(~(1 << id));
-
- odp_spinlock_unlock(&sched->mask_lock);
-}
-
-static void pri_set_queue(uint32_t queue_index, int prio)
-{
- int id = queue_per_prio(queue_index);
-
- return pri_set(id, prio);
-}
-
-static void pri_clr_queue(uint32_t queue_index, int prio)
-{
- int id = queue_per_prio(queue_index);
- pri_clr(id, prio);
-}
-
-static int schedule_init_queue(uint32_t queue_index,
- const odp_schedule_param_t *sched_param)
-{
- int prio = sched_param->prio;
-
- pri_set_queue(queue_index, prio);
- sched->queue[queue_index].prio = prio;
- sched->queue[queue_index].queue_per_prio = queue_per_prio(queue_index);
-
- return 0;
-}
-
-static void schedule_destroy_queue(uint32_t queue_index)
-{
- int prio = sched->queue[queue_index].prio;
-
- pri_clr_queue(queue_index, prio);
- sched->queue[queue_index].prio = 0;
- sched->queue[queue_index].queue_per_prio = 0;
-}
-
-static int poll_cmd_queue_idx(int pktio_index, int pktin_idx)
-{
- return PKTIO_CMD_QUEUE_MASK & (pktio_index ^ pktin_idx);
-}
-
-static inline pktio_cmd_t *alloc_pktio_cmd(void)
-{
- int i;
- pktio_cmd_t *cmd = NULL;
-
- odp_spinlock_lock(&sched->poll_cmd_lock);
-
- /* Find next free command */
- for (i = 0; i < NUM_PKTIO_CMD; i++) {
- if (sched->pktio_cmd[i].cmd_index == PKTIO_CMD_FREE) {
- cmd = &sched->pktio_cmd[i];
- cmd->cmd_index = i;
- break;
- }
- }
-
- odp_spinlock_unlock(&sched->poll_cmd_lock);
-
- return cmd;
-}
-
-static inline void free_pktio_cmd(pktio_cmd_t *cmd)
-{
- odp_spinlock_lock(&sched->poll_cmd_lock);
-
- cmd->cmd_index = PKTIO_CMD_FREE;
-
- odp_spinlock_unlock(&sched->poll_cmd_lock);
-}
-
-static void schedule_pktio_start(int pktio_index, int num_pktin,
- int pktin_idx[])
-{
- int i, idx;
- pktio_cmd_t *cmd;
-
- if (num_pktin > MAX_PKTIN)
- ODP_ABORT("Too many input queues for scheduler\n");
-
- sched->pktio[pktio_index].num_cmd = num_pktin;
-
- /* Create a pktio poll command per queue */
- for (i = 0; i < num_pktin; i++) {
-
- cmd = alloc_pktio_cmd();
-
- if (cmd == NULL)
- ODP_ABORT("Scheduler out of pktio commands\n");
-
- idx = poll_cmd_queue_idx(pktio_index, pktin_idx[i]);
-
- odp_spinlock_lock(&sched->poll_cmd_lock);
- sched->num_pktio_cmd[idx]++;
- odp_spinlock_unlock(&sched->poll_cmd_lock);
-
- cmd->pktio_index = pktio_index;
- cmd->num_pktin = 1;
- cmd->pktin[0] = pktin_idx[i];
- ring_enq(&sched->pktio_q[idx].ring, PKTIO_RING_MASK,
- cmd->cmd_index);
- }
-}
-
-static int schedule_pktio_stop(int pktio_index, int first_pktin)
-{
- int num;
- int idx = poll_cmd_queue_idx(pktio_index, first_pktin);
-
- odp_spinlock_lock(&sched->poll_cmd_lock);
- sched->num_pktio_cmd[idx]--;
- sched->pktio[pktio_index].num_cmd--;
- num = sched->pktio[pktio_index].num_cmd;
- odp_spinlock_unlock(&sched->poll_cmd_lock);
-
- return num;
-}
-
-static void schedule_release_atomic(void)
-{
- uint32_t qi = sched_local.queue_index;
-
- if (qi != PRIO_QUEUE_EMPTY && sched_local.num == 0) {
- int prio = sched->queue[qi].prio;
- int queue_per_prio = sched->queue[qi].queue_per_prio;
- ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
-
- /* Release current atomic queue */
- ring_enq(ring, PRIO_QUEUE_MASK, qi);
- sched_local.queue_index = PRIO_QUEUE_EMPTY;
- }
-}
-
-static inline int ordered_own_turn(queue_entry_t *queue)
-{
- uint64_t ctx;
-
- ctx = odp_atomic_load_acq_u64(&queue->s.ordered.ctx);
-
- return ctx == sched_local.ordered.ctx;
-}
-
-static inline void wait_for_order(queue_entry_t *queue)
-{
- /* Busy loop to synchronize ordered processing */
- while (1) {
- if (ordered_own_turn(queue))
- break;
- odp_cpu_pause();
- }
-}
-
-/**
- * Perform stashed enqueue operations
- *
- * Should be called only when already in order.
- */
-static inline void ordered_stash_release(void)
-{
- int i;
-
- for (i = 0; i < sched_local.ordered.stash_num; i++) {
- queue_entry_t *queue;
- odp_buffer_hdr_t **buf_hdr;
- int num;
-
- queue = sched_local.ordered.stash[i].queue;
- buf_hdr = sched_local.ordered.stash[i].buf_hdr;
- num = sched_local.ordered.stash[i].num;
-
- queue_enq_multi(queue, buf_hdr, num);
- }
- sched_local.ordered.stash_num = 0;
-}
-
-static inline void release_ordered(void)
-{
- unsigned i;
- queue_entry_t *queue;
-
- queue = sched_local.ordered.src_queue;
-
- wait_for_order(queue);
-
- /* Release all ordered locks */
- for (i = 0; i < queue->s.param.sched.lock_count; i++) {
- if (!sched_local.ordered.lock_called.u8[i])
- odp_atomic_store_rel_u64(&queue->s.ordered.lock[i],
- sched_local.ordered.ctx + 1);
- }
-
- sched_local.ordered.lock_called.all = 0;
- sched_local.ordered.src_queue = NULL;
- sched_local.ordered.in_order = 0;
-
- ordered_stash_release();
-
- /* Next thread can continue processing */
- odp_atomic_add_rel_u64(&queue->s.ordered.ctx, 1);
-}
-
-static void schedule_release_ordered(void)
-{
- queue_entry_t *queue;
-
- queue = sched_local.ordered.src_queue;
-
- if (odp_unlikely(!queue || sched_local.num))
- return;
-
- release_ordered();
-}
-
-static inline void schedule_release_context(void)
-{
- if (sched_local.ordered.src_queue != NULL)
- release_ordered();
- else
- schedule_release_atomic();
-}
-
-static inline int copy_events(odp_event_t out_ev[], unsigned int max)
-{
- int i = 0;
-
- while (sched_local.num && max) {
- out_ev[i] = sched_local.ev_stash[sched_local.index];
- sched_local.index++;
- sched_local.num--;
- max--;
- i++;
- }
-
- return i;
-}
-
-static int schedule_ord_enq_multi(uint32_t queue_index, void *buf_hdr[],
- int num, int *ret)
-{
- int i;
- uint32_t stash_num = sched_local.ordered.stash_num;
- queue_entry_t *dst_queue = get_qentry(queue_index);
- queue_entry_t *src_queue = sched_local.ordered.src_queue;
-
- if (!sched_local.ordered.src_queue || sched_local.ordered.in_order)
- return 0;
-
- if (ordered_own_turn(src_queue)) {
- /* Own turn, so can do enqueue directly. */
- sched_local.ordered.in_order = 1;
- ordered_stash_release();
- return 0;
- }
-
- if (odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
- /* If the local stash is full, wait until it is our turn and
- * then release the stash and do enqueue directly. */
- wait_for_order(src_queue);
-
- sched_local.ordered.in_order = 1;
-
- ordered_stash_release();
- return 0;
- }
-
- sched_local.ordered.stash[stash_num].queue = dst_queue;
- sched_local.ordered.stash[stash_num].num = num;
- for (i = 0; i < num; i++)
- sched_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
-
- sched_local.ordered.stash_num++;
-
- *ret = num;
- return 1;
-}
-
-/*
- * Schedule queues
- */
-static int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
- unsigned int max_num)
-{
- int prio, i;
- int ret;
- int id;
- int offset = 0;
- unsigned int max_deq = MAX_DEQ;
- uint32_t qi;
-
- if (sched_local.num) {
- ret = copy_events(out_ev, max_num);
-
- if (out_queue)
- *out_queue = sched_local.queue;
-
- return ret;
- }
-
- schedule_release_context();
-
- if (odp_unlikely(sched_local.pause))
- return 0;
-
- /* Each thread prefers a priority queue. This offset avoids starvation
- * of other priority queues on low thread counts. */
- if (odp_unlikely((sched_local.round & 0x3f) == 0)) {
- offset = sched_local.prefer_offset;
- sched_local.prefer_offset = (offset + 1) &
- (QUEUES_PER_PRIO - 1);
- }
-
- sched_local.round++;
-
- /* Schedule events */
- for (prio = 0; prio < NUM_PRIO; prio++) {
-
- if (sched->pri_mask[prio] == 0)
- continue;
-
- id = (sched_local.thr + offset) & (QUEUES_PER_PRIO - 1);
-
- for (i = 0; i < QUEUES_PER_PRIO;) {
- int num;
- int grp;
- int ordered;
- odp_queue_t handle;
- ring_t *ring;
-
- if (id >= QUEUES_PER_PRIO)
- id = 0;
-
- /* No queues created for this priority queue */
- if (odp_unlikely((sched->pri_mask[prio] & (1 << id))
- == 0)) {
- i++;
- id++;
- continue;
- }
-
- /* Get queue index from the priority queue */
- ring = &sched->prio_q[prio][id].ring;
- qi = ring_deq(ring, PRIO_QUEUE_MASK);
-
- /* Priority queue empty */
- if (qi == RING_EMPTY) {
- i++;
- id++;
- continue;
- }
-
- grp = sched_cb_queue_grp(qi);
-
- if (grp > ODP_SCHED_GROUP_ALL &&
- !odp_thrmask_isset(&sched->sched_grp[grp].mask,
- sched_local.thr)) {
- /* This thread is not eligible for work from
- * this queue, so continue scheduling it.
- */
- ring_enq(ring, PRIO_QUEUE_MASK, qi);
-
- i++;
- id++;
- continue;
- }
-
- /* Low priorities have smaller batch size to limit
- * head of line blocking latency. */
- if (odp_unlikely(prio > ODP_SCHED_PRIO_DEFAULT))
- max_deq = MAX_DEQ / 2;
-
- ordered = sched_cb_queue_is_ordered(qi);
-
- /* Do not cache ordered events locally to improve
- * parallelism. Ordered context can only be released
- * when the local cache is empty. */
- if (ordered && max_num < MAX_DEQ)
- max_deq = max_num;
-
- num = sched_cb_queue_deq_multi(qi, sched_local.ev_stash,
- max_deq);
-
- if (num < 0) {
- /* Destroyed queue. Continue scheduling the same
- * priority queue. */
- sched_cb_queue_destroy_finalize(qi);
- continue;
- }
-
- if (num == 0) {
- /* Remove empty queue from scheduling. Continue
- * scheduling the same priority queue. */
- continue;
- }
-
- handle = sched_cb_queue_handle(qi);
- sched_local.num = num;
- sched_local.index = 0;
- sched_local.queue = handle;
- ret = copy_events(out_ev, max_num);
-
- if (ordered) {
- uint64_t ctx;
- queue_entry_t *queue;
- odp_atomic_u64_t *next_ctx;
-
- queue = get_qentry(qi);
- next_ctx = &queue->s.ordered.next_ctx;
-
- ctx = odp_atomic_fetch_inc_u64(next_ctx);
-
- sched_local.ordered.ctx = ctx;
- sched_local.ordered.src_queue = queue;
-
- /* Continue scheduling ordered queues */
- ring_enq(ring, PRIO_QUEUE_MASK, qi);
-
- } else if (sched_cb_queue_is_atomic(qi)) {
- /* Hold queue during atomic access */
- sched_local.queue_index = qi;
- } else {
- /* Continue scheduling the queue */
- ring_enq(ring, PRIO_QUEUE_MASK, qi);
- }
-
- /* Output the source queue handle */
- if (out_queue)
- *out_queue = handle;
-
- return ret;
- }
- }
-
- /*
- * Poll packet input when there are no events
- * * Each thread starts the search for a poll command from its
- * preferred command queue. If the queue is empty, it moves to other
- * queues.
- * * Most of the times, the search stops on the first command found to
- * optimize multi-threaded performance. A small portion of polls
- * have to do full iteration to avoid packet input starvation when
- * there are less threads than command queues.
- */
- id = sched_local.thr & PKTIO_CMD_QUEUE_MASK;
-
- for (i = 0; i < PKTIO_CMD_QUEUES; i++, id = ((id + 1) &
- PKTIO_CMD_QUEUE_MASK)) {
- ring_t *ring;
- uint32_t cmd_index;
- pktio_cmd_t *cmd;
-
- if (odp_unlikely(sched->num_pktio_cmd[id] == 0))
- continue;
-
- ring = &sched->pktio_q[id].ring;
- cmd_index = ring_deq(ring, PKTIO_RING_MASK);
-
- if (odp_unlikely(cmd_index == RING_EMPTY))
- continue;
-
- cmd = &sched->pktio_cmd[cmd_index];
-
- /* Poll packet input */
- if (odp_unlikely(sched_cb_pktin_poll(cmd->pktio_index,
- cmd->num_pktin,
- cmd->pktin))){
- /* Pktio stopped or closed. Remove poll command and call
- * stop_finalize when all commands of the pktio has
- * been removed. */
- if (schedule_pktio_stop(cmd->pktio_index,
- cmd->pktin[0]) == 0)
- sched_cb_pktio_stop_finalize(cmd->pktio_index);
-
- free_pktio_cmd(cmd);
- } else {
- /* Continue scheduling the pktio */
- ring_enq(ring, PKTIO_RING_MASK, cmd_index);
-
- /* Do not iterate through all pktin poll command queues
- * every time. */
- if (odp_likely(sched_local.pktin_polls & 0xf))
- break;
- }
- }
-
- sched_local.pktin_polls++;
- return 0;
-}
-
-
-static int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t out_ev[],
- unsigned int max_num)
-{
- odp_time_t next, wtime;
- int first = 1;
- int ret;
-
- while (1) {
- ret = do_schedule(out_queue, out_ev, max_num);
-
- if (ret)
- break;
-
- if (wait == ODP_SCHED_WAIT)
- continue;
-
- if (wait == ODP_SCHED_NO_WAIT)
- break;
-
- if (first) {
- wtime = odp_time_local_from_ns(wait);
- next = odp_time_sum(odp_time_local(), wtime);
- first = 0;
- continue;
- }
-
- if (odp_time_cmp(next, odp_time_local()) < 0)
- break;
- }
-
- return ret;
-}
-
-static odp_event_t schedule(odp_queue_t *out_queue, uint64_t wait)
-{
- odp_event_t ev;
-
- ev = ODP_EVENT_INVALID;
-
- schedule_loop(out_queue, wait, &ev, 1);
-
- return ev;
-}
-
-static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t events[], int num)
-{
- return schedule_loop(out_queue, wait, events, num);
-}
-
-static inline void order_lock(void)
-{
- queue_entry_t *queue;
-
- queue = sched_local.ordered.src_queue;
-
- if (!queue)
- return;
-
- wait_for_order(queue);
-}
-
-static void order_unlock(void)
-{
-}
-
-static void schedule_order_lock(unsigned lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- queue_entry_t *queue;
-
- queue = sched_local.ordered.src_queue;
-
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count &&
- !sched_local.ordered.lock_called.u8[lock_index]);
-
- ord_lock = &queue->s.ordered.lock[lock_index];
-
- /* Busy loop to synchronize ordered processing */
- while (1) {
- uint64_t lock_seq;
-
- lock_seq = odp_atomic_load_acq_u64(ord_lock);
-
- if (lock_seq == sched_local.ordered.ctx) {
- sched_local.ordered.lock_called.u8[lock_index] = 1;
- return;
- }
- odp_cpu_pause();
- }
-}
-
-static void schedule_order_unlock(unsigned lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- queue_entry_t *queue;
-
- queue = sched_local.ordered.src_queue;
-
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count);
-
- ord_lock = &queue->s.ordered.lock[lock_index];
-
- ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
-
- odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
-}
-
-static void schedule_pause(void)
-{
- sched_local.pause = 1;
-}
-
-static void schedule_resume(void)
-{
- sched_local.pause = 0;
-}
-
-static uint64_t schedule_wait_time(uint64_t ns)
-{
- return ns;
-}
-
-static int schedule_num_prio(void)
-{
- return NUM_PRIO;
-}
-
-static odp_schedule_group_t schedule_group_create(const char *name,
- const odp_thrmask_t *mask)
-{
- odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
- int i;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
- if (!sched->sched_grp[i].allocated) {
- char *grp_name = sched->sched_grp[i].name;
-
- if (name == NULL) {
- grp_name[0] = 0;
- } else {
- strncpy(grp_name, name,
- ODP_SCHED_GROUP_NAME_LEN - 1);
- grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
- }
- odp_thrmask_copy(&sched->sched_grp[i].mask, mask);
- group = (odp_schedule_group_t)i;
- sched->sched_grp[i].allocated = 1;
- break;
- }
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return group;
-}
-
-static int schedule_group_destroy(odp_schedule_group_t group)
-{
- int ret;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- odp_thrmask_zero(&sched->sched_grp[group].mask);
- memset(sched->sched_grp[group].name, 0,
- ODP_SCHED_GROUP_NAME_LEN);
- sched->sched_grp[group].allocated = 0;
- ret = 0;
- } else {
- ret = -1;
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return ret;
-}
-
-static odp_schedule_group_t schedule_group_lookup(const char *name)
-{
- odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
- int i;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
- if (strcmp(name, sched->sched_grp[i].name) == 0) {
- group = (odp_schedule_group_t)i;
- break;
- }
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return group;
-}
-
-static int schedule_group_join(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int ret;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- odp_thrmask_or(&sched->sched_grp[group].mask,
- &sched->sched_grp[group].mask,
- mask);
- ret = 0;
- } else {
- ret = -1;
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return ret;
-}
-
-static int schedule_group_leave(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int ret;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- odp_thrmask_t leavemask;
-
- odp_thrmask_xor(&leavemask, mask, &sched->mask_all);
- odp_thrmask_and(&sched->sched_grp[group].mask,
- &sched->sched_grp[group].mask,
- &leavemask);
- ret = 0;
- } else {
- ret = -1;
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return ret;
-}
-
-static int schedule_group_thrmask(odp_schedule_group_t group,
- odp_thrmask_t *thrmask)
-{
- int ret;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- *thrmask = sched->sched_grp[group].mask;
- ret = 0;
- } else {
- ret = -1;
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return ret;
-}
-
-static int schedule_group_info(odp_schedule_group_t group,
- odp_schedule_group_info_t *info)
-{
- int ret;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- info->name = sched->sched_grp[group].name;
- info->thrmask = sched->sched_grp[group].mask;
- ret = 0;
- } else {
- ret = -1;
- }
-
- odp_spinlock_unlock(&sched->grp_lock);
- return ret;
-}
-
-static int schedule_thr_add(odp_schedule_group_t group, int thr)
-{
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- odp_thrmask_set(&sched->sched_grp[group].mask, thr);
-
- odp_spinlock_unlock(&sched->grp_lock);
-
- return 0;
-}
-
-static int schedule_thr_rem(odp_schedule_group_t group, int thr)
-{
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- odp_spinlock_lock(&sched->grp_lock);
-
- odp_thrmask_clr(&sched->sched_grp[group].mask, thr);
-
- odp_spinlock_unlock(&sched->grp_lock);
-
- return 0;
-}
-
-/* This function is a no-op */
-static void schedule_prefetch(int num ODP_UNUSED)
-{
-}
-
-static int schedule_sched_queue(uint32_t queue_index)
-{
- int prio = sched->queue[queue_index].prio;
- int queue_per_prio = sched->queue[queue_index].queue_per_prio;
- ring_t *ring = &sched->prio_q[prio][queue_per_prio].ring;
-
- ring_enq(ring, PRIO_QUEUE_MASK, queue_index);
- return 0;
-}
-
-static int schedule_unsched_queue(uint32_t queue_index ODP_UNUSED)
-{
- return 0;
-}
-
-static int schedule_num_grps(void)
-{
- return NUM_SCHED_GRPS;
-}
-
-static void schedule_save_context(queue_entry_t *queue ODP_UNUSED)
-{
-}
-
-/* Fill in scheduler interface */
-const schedule_fn_t schedule_default_fn = {
- .pktio_start = schedule_pktio_start,
- .thr_add = schedule_thr_add,
- .thr_rem = schedule_thr_rem,
- .num_grps = schedule_num_grps,
- .init_queue = schedule_init_queue,
- .destroy_queue = schedule_destroy_queue,
- .sched_queue = schedule_sched_queue,
- .unsched_queue = schedule_unsched_queue,
- .ord_enq_multi = schedule_ord_enq_multi,
- .init_global = schedule_init_global,
- .term_global = schedule_term_global,
- .init_local = schedule_init_local,
- .term_local = schedule_term_local,
- .order_lock = order_lock,
- .order_unlock = order_unlock,
- .max_ordered_locks = schedule_max_ordered_locks,
- .save_context = schedule_save_context
-};
-
-/* Fill in scheduler API calls */
-const schedule_api_t schedule_default_api = {
- .schedule_wait_time = schedule_wait_time,
- .schedule = schedule,
- .schedule_multi = schedule_multi,
- .schedule_pause = schedule_pause,
- .schedule_resume = schedule_resume,
- .schedule_release_atomic = schedule_release_atomic,
- .schedule_release_ordered = schedule_release_ordered,
- .schedule_prefetch = schedule_prefetch,
- .schedule_num_prio = schedule_num_prio,
- .schedule_group_create = schedule_group_create,
- .schedule_group_destroy = schedule_group_destroy,
- .schedule_group_lookup = schedule_group_lookup,
- .schedule_group_join = schedule_group_join,
- .schedule_group_leave = schedule_group_leave,
- .schedule_group_thrmask = schedule_group_thrmask,
- .schedule_group_info = schedule_group_info,
- .schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
-};
diff --git a/platform/linux-generic/odp_schedule_api.c b/platform/linux-generic/odp_schedule_api.c
new file mode 100644
index 000000000..4be1ba6f7
--- /dev/null
+++ b/platform/linux-generic/odp_schedule_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/schedule.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/schedule_inlines.h>
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
new file mode 100644
index 000000000..379f1f828
--- /dev/null
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -0,0 +1,2412 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in prio_queue_t.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/schedule.h>
+#include <odp_schedule_if.h>
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp_debug_internal.h>
+#include <odp/api/thread.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp/api/time.h>
+#include <odp/api/plat/time_inlines.h>
+#include <odp/api/ticketlock.h>
+#include <odp/api/hints.h>
+#include <odp/api/cpu.h>
+#include <odp/api/thrmask.h>
+#include <odp_config_internal.h>
+#include <odp/api/sync.h>
+#include <odp/api/packet_io.h>
+#include <odp_ring_u32_internal.h>
+#include <odp_timer_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/schedule_inline_types.h>
+#include <odp_global_data.h>
+#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_print_internal.h>
+
+#include <string.h>
+#include <time.h>
+#include <inttypes.h>
+
+/* No synchronization context */
+#define NO_SYNC_CONTEXT ODP_SCHED_SYNC_PARALLEL
+
+/* Number of priority levels */
+#define NUM_PRIO 8
+
+/* Group mask (prio_grp_mask) size in bits */
+#define GRP_MASK_BITS 64
+
+/* Number of scheduling groups. Maximum value is GRP_MASK_BITS. */
+#define NUM_SCHED_GRPS GRP_MASK_BITS
+
+/* Spread balancing frequency. Balance every BALANCE_ROUNDS_M1 + 1 scheduling rounds. */
+#define BALANCE_ROUNDS_M1 0xfffff
+
+/* Number of scheduled queue synchronization types */
+#define NUM_SCHED_SYNC 3
+
+/* Queue types used as array indices */
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_PARALLEL == 0, "ODP_SCHED_SYNC_PARALLEL_value_changed");
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_ATOMIC == 1, "ODP_SCHED_SYNC_ATOMIC_value_changed");
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_ORDERED == 2, "ODP_SCHED_SYNC_ORDERED_value_changed");
+
+/* Load of a queue */
+#define QUEUE_LOAD 256
+
+/* Margin for load balance hysteresis */
+#define QUEUE_LOAD_MARGIN 8
+
+/* Ensure that load calculation does not wrap around */
+ODP_STATIC_ASSERT((QUEUE_LOAD * CONFIG_MAX_SCHED_QUEUES) < UINT32_MAX, "Load_value_too_large");
+
+/* Maximum priority queue spread */
+#define MAX_SPREAD 8
+
+/* Minimum priority queue spread */
+#define MIN_SPREAD 1
+
+/* A thread polls a non preferred sched queue every this many polls
+ * of the prefer queue. */
+#define MAX_PREFER_WEIGHT 127
+#define MIN_PREFER_WEIGHT 1
+#define MAX_PREFER_RATIO (MAX_PREFER_WEIGHT + 1)
+
+/* Spread weight table */
+#define SPREAD_TBL_SIZE ((MAX_SPREAD - 1) * MAX_PREFER_RATIO)
+
+/* Random data table size */
+#define RANDOM_TBL_SIZE 128
+
+/* Maximum number of packet IO interfaces */
+#define NUM_PKTIO CONFIG_PKTIO_ENTRIES
+
+/* Maximum pktin index. Needs to fit into 8 bits. */
+#define MAX_PKTIN_INDEX 255
+
+/* Maximum priority queue ring size. A ring must be large enough to store all
+ * queues in the worst case (all queues are scheduled, have the same priority
+ * and no spreading). */
+#define MAX_RING_SIZE CONFIG_MAX_SCHED_QUEUES
+
+/* For best performance, the number of queues should be a power of two. */
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
+ "Number_of_queues_is_not_power_of_two");
+
+/* Ring size must be power of two, so that mask can be used. */
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(MAX_RING_SIZE),
+ "Ring_size_is_not_power_of_two");
+
+/* Thread ID is saved into uint16_t variable */
+ODP_STATIC_ASSERT(ODP_THREAD_COUNT_MAX < (64 * 1024),
+ "Max_64k_threads_supported");
+
+/* Mask of queues per priority */
+typedef uint8_t prio_q_mask_t;
+
+ODP_STATIC_ASSERT((8 * sizeof(prio_q_mask_t)) >= MAX_SPREAD,
+ "prio_q_mask_t_is_too_small");
+
+/* Start of named groups in group mask arrays */
+#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
+
+/* Limits for burst size configuration */
+#define BURST_MAX 255
+#define STASH_SIZE CONFIG_BURST_SIZE
+
+/* Ordered stash size */
+#define MAX_ORDERED_STASH 512
+
+/* Storage for stashed enqueue operation arguments */
+typedef struct {
+ _odp_event_hdr_t *event_hdr[QUEUE_MULTI_MAX];
+ odp_queue_t queue;
+ int num;
+} ordered_stash_t;
+
+/* Ordered lock states */
+typedef union {
+ uint8_t u8[CONFIG_QUEUE_MAX_ORD_LOCKS];
+ uint32_t all;
+} lock_called_t;
+
+ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
+ "Lock_called_values_do_not_fit_in_uint32");
+
+/* Shuffled values from 0 to 127 */
+static uint8_t sched_random_u8[] = {
+ 0x5B, 0x56, 0x21, 0x28, 0x77, 0x2C, 0x7E, 0x10,
+ 0x29, 0x73, 0x39, 0x74, 0x60, 0x2B, 0x2D, 0x3E,
+ 0x6C, 0x4C, 0x1B, 0x79, 0x14, 0x76, 0x7B, 0x5A,
+ 0x4F, 0x3B, 0x0B, 0x16, 0x66, 0x0D, 0x05, 0x27,
+ 0x3F, 0x7F, 0x67, 0x3C, 0x41, 0x6F, 0x4E, 0x7A,
+ 0x04, 0x26, 0x11, 0x7C, 0x43, 0x38, 0x30, 0x2A,
+ 0x03, 0x22, 0x17, 0x75, 0x08, 0x71, 0x6D, 0x6B,
+ 0x0A, 0x4B, 0x52, 0x1D, 0x63, 0x59, 0x1C, 0x50,
+ 0x15, 0x1A, 0x64, 0x42, 0x47, 0x62, 0x1F, 0x37,
+ 0x46, 0x5D, 0x19, 0x35, 0x78, 0x68, 0x57, 0x7D,
+ 0x3A, 0x31, 0x4A, 0x45, 0x09, 0x49, 0x00, 0x01,
+ 0x65, 0x13, 0x48, 0x70, 0x5E, 0x69, 0x36, 0x58,
+ 0x1E, 0x5C, 0x23, 0x12, 0x18, 0x25, 0x55, 0x32,
+ 0x33, 0x61, 0x2F, 0x02, 0x06, 0x53, 0x24, 0x6E,
+ 0x2E, 0x5F, 0x54, 0x6A, 0x20, 0x07, 0x0F, 0x51,
+ 0x3D, 0x34, 0x44, 0x0C, 0x4D, 0x40, 0x72, 0x0E
+};
+
+ODP_STATIC_ASSERT(sizeof(sched_random_u8) == RANDOM_TBL_SIZE, "Bad_random_table_size");
+
+/* Scheduler local data */
+typedef struct ODP_ALIGNED_CACHE {
+ uint32_t sched_round;
+ uint16_t thr;
+ uint8_t pause;
+ uint8_t sync_ctx;
+ uint8_t balance_on;
+ uint16_t balance_start;
+ uint16_t spread_round;
+
+ struct {
+ uint16_t num_ev;
+ uint16_t ev_index;
+ uint32_t qi;
+ odp_queue_t queue;
+ ring_u32_t *ring;
+ odp_event_t ev[STASH_SIZE];
+ } stash;
+
+ uint64_t grp_mask;
+ uint32_t grp_epoch;
+ uint16_t num_grp;
+ uint8_t grp_idx;
+ uint8_t grp[NUM_SCHED_GRPS];
+ uint8_t spread_tbl[SPREAD_TBL_SIZE];
+
+ struct {
+ /* Source queue index */
+ uint32_t src_queue;
+ uint64_t ctx; /**< Ordered context id */
+ int stash_num; /**< Number of stashed enqueue operations */
+ uint8_t in_order; /**< Order status */
+ lock_called_t lock_called; /**< States of ordered locks */
+ /** Storage for stashed enqueue operations */
+ ordered_stash_t stash[MAX_ORDERED_STASH];
+ } ordered;
+
+} sched_local_t;
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+/* Priority queue */
+typedef struct ODP_ALIGNED_CACHE {
+ /* Ring header */
+ ring_u32_t ring;
+
+ /* Ring data: queue indexes */
+ uint32_t queue_index[MAX_RING_SIZE]; /* overlaps with ring.data[] */
+
+} prio_queue_t;
+#pragma GCC diagnostic pop
+
+/* Order context of a queue */
+typedef struct ODP_ALIGNED_CACHE {
+ /* Current ordered context id */
+ odp_atomic_u64_t ctx ODP_ALIGNED_CACHE;
+
+ /* Next unallocated context id */
+ odp_atomic_u64_t next_ctx;
+
+ /* Array of ordered locks */
+ odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
+
+} order_context_t;
+
+typedef struct {
+ struct {
+ uint8_t burst_default[NUM_SCHED_SYNC][NUM_PRIO];
+ uint8_t burst_max[NUM_SCHED_SYNC][NUM_PRIO];
+ uint16_t order_stash_size;
+ uint8_t num_spread;
+ uint8_t prefer_ratio;
+ } config;
+ uint32_t ring_mask;
+ uint16_t max_spread;
+ uint8_t load_balance;
+ odp_atomic_u32_t grp_epoch;
+ odp_shm_t shm;
+ odp_ticketlock_t mask_lock[NUM_SCHED_GRPS];
+ prio_q_mask_t prio_q_mask[NUM_SCHED_GRPS][NUM_PRIO];
+
+ /* Groups on a priority level that have queues created */
+ odp_atomic_u64_t prio_grp_mask[NUM_PRIO];
+
+ struct {
+ uint8_t grp;
+ /* Inverted prio value (max = 0) vs API (min = 0)*/
+ uint8_t prio;
+ uint8_t spread;
+ uint8_t sync;
+ uint8_t order_lock_count;
+ uint8_t poll_pktin;
+ uint8_t pktio_index;
+ uint8_t pktin_index;
+ } queue[CONFIG_MAX_SCHED_QUEUES];
+
+ /* Scheduler priority queues */
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+ prio_queue_t prio_q[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
+#pragma GCC diagnostic pop
+ uint32_t prio_q_count[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
+
+ /* Number of queues per group and priority */
+ uint32_t prio_grp_count[NUM_PRIO][NUM_SCHED_GRPS];
+
+ odp_thrmask_t mask_all;
+ odp_ticketlock_t grp_lock;
+
+ struct {
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+ odp_thrmask_t mask;
+ uint16_t spread_thrs[MAX_SPREAD];
+ uint8_t allocated;
+ } sched_grp[NUM_SCHED_GRPS];
+
+ struct {
+ int num_pktin;
+ } pktio[NUM_PKTIO];
+ odp_ticketlock_t pktio_lock;
+
+ order_context_t order[CONFIG_MAX_SCHED_QUEUES];
+
+ struct {
+ uint32_t poll_time;
+ uint64_t sleep_time;
+ } powersave;
+
+ /* Scheduler interface config options (not used in fast path) */
+ schedule_config_t config_if;
+ uint32_t max_queues;
+ odp_atomic_u32_t next_rand;
+
+} sched_global_t;
+
+/* Check that queue[] variables are large enough */
+ODP_STATIC_ASSERT(NUM_SCHED_GRPS <= GRP_MASK_BITS, "Groups do not fit into group mask");
+ODP_STATIC_ASSERT(NUM_PRIO <= 256, "Prio_does_not_fit_8_bits");
+ODP_STATIC_ASSERT(MAX_SPREAD <= 256, "Spread_does_not_fit_8_bits");
+ODP_STATIC_ASSERT(CONFIG_QUEUE_MAX_ORD_LOCKS <= 256,
+ "Ordered_lock_count_does_not_fit_8_bits");
+ODP_STATIC_ASSERT(NUM_PKTIO <= 256, "Pktio_index_does_not_fit_8_bits");
+
+/* Global scheduler context */
+static sched_global_t *sched;
+
+/* Thread local scheduler context */
+static __thread sched_local_t sched_local;
+
+static void prio_grp_mask_init(void)
+{
+ int i;
+
+ for (i = 0; i < NUM_PRIO; i++)
+ odp_atomic_init_u64(&sched->prio_grp_mask[i], 0);
+}
+
+static inline void prio_grp_mask_set(int prio, int grp)
+{
+ uint64_t grp_mask = (uint64_t)1 << grp;
+ uint64_t mask = odp_atomic_load_u64(&sched->prio_grp_mask[prio]);
+
+ odp_atomic_store_u64(&sched->prio_grp_mask[prio], mask | grp_mask);
+
+ sched->prio_grp_count[prio][grp]++;
+}
+
+static inline void prio_grp_mask_clear(int prio, int grp)
+{
+ uint64_t grp_mask = (uint64_t)1 << grp;
+ uint64_t mask = odp_atomic_load_u64(&sched->prio_grp_mask[prio]);
+
+ sched->prio_grp_count[prio][grp]--;
+
+ if (sched->prio_grp_count[prio][grp] == 0)
+ odp_atomic_store_u64(&sched->prio_grp_mask[prio], mask &= (~grp_mask));
+}
+
+static inline uint64_t prio_grp_mask_check(int prio, uint64_t grp_mask)
+{
+ return odp_atomic_load_u64(&sched->prio_grp_mask[prio]) & grp_mask;
+}
+
+static int read_burst_size_conf(uint8_t out_tbl[], const char *conf_str,
+ int min_val, int max_val, int print)
+{
+ int burst_val[NUM_PRIO];
+ const int max_len = 256;
+ const int n = max_len - 1;
+ char line[max_len];
+ int len = 0;
+
+ if (_odp_libconfig_lookup_array(conf_str, burst_val, NUM_PRIO) !=
+ NUM_PRIO) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+
+ char str[strlen(conf_str) + 4];
+
+ snprintf(str, sizeof(str), "%s[]:", conf_str);
+ len += snprintf(&line[len], n - len, " %-38s", str);
+
+ for (int i = 0; i < NUM_PRIO; i++) {
+ int val = burst_val[i];
+
+ if (val > max_val || val < min_val) {
+ _ODP_ERR("Bad value for %s: %i\n", conf_str, val);
+ return -1;
+ }
+ len += snprintf(&line[len], n - len, " %3i", val);
+ if (val > 0)
+ out_tbl[i] = val;
+ }
+ if (print)
+ _ODP_PRINT("%s\n", line);
+
+ return 0;
+}
+
+static int read_config_file(sched_global_t *sched)
+{
+ const char *str;
+ int val = 0;
+
+ _ODP_PRINT("Scheduler config:\n");
+
+ str = "sched_basic.prio_spread";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > MAX_SPREAD || val < MIN_SPREAD) {
+ _ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val,
+ MIN_SPREAD, MAX_SPREAD);
+ return -1;
+ }
+
+ sched->config.num_spread = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.prio_spread_weight";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > MAX_PREFER_WEIGHT || val < MIN_PREFER_WEIGHT) {
+ _ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val,
+ MIN_PREFER_WEIGHT, MAX_PREFER_WEIGHT);
+ return -1;
+ }
+
+ sched->config.prefer_ratio = val + 1;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.load_balance";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > 1 || val < 0) {
+ _ODP_ERR("Bad value %s = %i\n", str, val);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ sched->load_balance = 1;
+ if (val == 0 || sched->config.num_spread == 1)
+ sched->load_balance = 0;
+
+ str = "sched_basic.order_stash_size";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > MAX_ORDERED_STASH || val < 0) {
+ _ODP_ERR("Bad value %s = %i [min: 0, max: %u]\n", str, val, MAX_ORDERED_STASH);
+ return -1;
+ }
+
+ sched->config.order_stash_size = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ /* Initialize default values for all queue types */
+ str = "sched_basic.burst_size_default";
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC], str, 1,
+ STASH_SIZE, 1) ||
+ read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL], str, 1,
+ STASH_SIZE, 0) ||
+ read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ORDERED], str, 1,
+ STASH_SIZE, 0))
+ return -1;
+
+ str = "sched_basic.burst_size_max";
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC], str, 1,
+ BURST_MAX, 1) ||
+ read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL], str, 1,
+ BURST_MAX, 0) ||
+ read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ORDERED], str, 1,
+ BURST_MAX, 0))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC],
+ "sched_basic.burst_size_atomic", 0, STASH_SIZE, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC],
+ "sched_basic.burst_size_max_atomic", 0, BURST_MAX, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL],
+ "sched_basic.burst_size_parallel", 0, STASH_SIZE, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL],
+ "sched_basic.burst_size_max_parallel", 0, BURST_MAX, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ORDERED],
+ "sched_basic.burst_size_ordered", 0, STASH_SIZE, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ORDERED],
+ "sched_basic.burst_size_max_ordered", 0, BURST_MAX, 1))
+ return -1;
+
+ str = "sched_basic.group_enable.all";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ sched->config_if.group_enable.all = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.group_enable.worker";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ sched->config_if.group_enable.worker = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.group_enable.control";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ sched->config_if.group_enable.control = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.powersave.poll_time_nsec";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ sched->powersave.poll_time = _ODP_MAX(0, val);
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.powersave.sleep_time_nsec";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ val = _ODP_MAX(0, val);
+ val = _ODP_MIN((int)ODP_TIME_SEC_IN_NS - 1, val);
+ sched->powersave.sleep_time = val;
+ _ODP_PRINT(" %s: %i\n", str, val);
+
+ _ODP_PRINT(" dynamic load balance: %s\n", sched->load_balance ? "ON" : "OFF");
+
+ _ODP_PRINT("\n");
+
+ return 0;
+}
+
+/* Spread from thread or other index */
+static inline uint8_t spread_from_index(uint32_t index)
+{
+ return index % sched->config.num_spread;
+}
+
+static void sched_local_init(void)
+{
+ int i;
+ uint8_t spread, prefer_ratio;
+ uint8_t num_spread = sched->config.num_spread;
+ uint8_t offset = 1;
+
+ memset(&sched_local, 0, sizeof(sched_local_t));
+
+ sched_local.thr = odp_thread_id();
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+ sched_local.stash.queue = ODP_QUEUE_INVALID;
+
+ spread = spread_from_index(sched_local.thr);
+ prefer_ratio = sched->config.prefer_ratio;
+
+ for (i = 0; i < SPREAD_TBL_SIZE; i++) {
+ sched_local.spread_tbl[i] = spread;
+
+ if (num_spread > 1 && (i % prefer_ratio) == 0) {
+ sched_local.spread_tbl[i] = spread_from_index(spread + offset);
+ offset++;
+ if (offset == num_spread)
+ offset = 1;
+ }
+ }
+}
+
+static int schedule_init_global(void)
+{
+ odp_shm_t shm;
+ int i, j, grp;
+ int prefer_ratio;
+ uint32_t ring_size, num_rings;
+
+ _ODP_DBG("Schedule init ... ");
+
+ shm = odp_shm_reserve("_odp_sched_basic_global",
+ sizeof(sched_global_t),
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Schedule init: Shm reserve failed.\n");
+ return -1;
+ }
+
+ sched = odp_shm_addr(shm);
+ memset(sched, 0, sizeof(sched_global_t));
+
+ if (read_config_file(sched)) {
+ odp_shm_free(shm);
+ return -1;
+ }
+
+ sched->shm = shm;
+ prefer_ratio = sched->config.prefer_ratio;
+
+ /* When num_spread == 1, only spread_tbl[0] is used. */
+ sched->max_spread = (sched->config.num_spread - 1) * prefer_ratio;
+
+ /* Dynamic load balance may move all queues into a single ring.
+ * Ring size can be smaller with fixed spreading. */
+ if (sched->load_balance) {
+ ring_size = MAX_RING_SIZE;
+ num_rings = 1;
+ } else {
+ ring_size = MAX_RING_SIZE / sched->config.num_spread;
+ num_rings = sched->config.num_spread;
+ }
+
+ ring_size = _ODP_ROUNDUP_POWER2_U32(ring_size);
+ _ODP_ASSERT(ring_size <= MAX_RING_SIZE);
+ sched->ring_mask = ring_size - 1;
+
+ /* Each ring can hold in maximum ring_size-1 queues. Due to ring size round up,
+ * total capacity of rings may be larger than CONFIG_MAX_SCHED_QUEUES. */
+ sched->max_queues = sched->ring_mask * num_rings;
+ if (sched->max_queues > CONFIG_MAX_SCHED_QUEUES)
+ sched->max_queues = CONFIG_MAX_SCHED_QUEUES;
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ odp_ticketlock_init(&sched->mask_lock[grp]);
+
+ for (i = 0; i < NUM_PRIO; i++) {
+ for (j = 0; j < MAX_SPREAD; j++) {
+ prio_queue_t *prio_q;
+
+ prio_q = &sched->prio_q[grp][i][j];
+ ring_u32_init(&prio_q->ring);
+ }
+ }
+ }
+
+ odp_ticketlock_init(&sched->pktio_lock);
+ for (i = 0; i < NUM_PKTIO; i++)
+ sched->pktio[i].num_pktin = 0;
+
+ odp_ticketlock_init(&sched->grp_lock);
+ odp_atomic_init_u32(&sched->grp_epoch, 0);
+ odp_atomic_init_u32(&sched->next_rand, 0);
+
+ prio_grp_mask_init();
+
+ for (i = 0; i < NUM_SCHED_GRPS; i++) {
+ memset(sched->sched_grp[i].name, 0, ODP_SCHED_GROUP_NAME_LEN);
+ odp_thrmask_zero(&sched->sched_grp[i].mask);
+ }
+
+ sched->sched_grp[ODP_SCHED_GROUP_ALL].allocated = 1;
+ sched->sched_grp[ODP_SCHED_GROUP_WORKER].allocated = 1;
+ sched->sched_grp[ODP_SCHED_GROUP_CONTROL].allocated = 1;
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_ALL].name, "__SCHED_GROUP_ALL",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_WORKER].name, "__SCHED_GROUP_WORKER",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_CONTROL].name, "__SCHED_GROUP_CONTROL",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+
+
+ odp_thrmask_setall(&sched->mask_all);
+
+ _ODP_DBG("done\n");
+
+ return 0;
+}
+
+static int schedule_term_global(void)
+{
+ int ret = 0;
+ int rc = 0;
+ int i, j, grp;
+ uint32_t ring_mask = sched->ring_mask;
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ for (i = 0; i < NUM_PRIO; i++) {
+ for (j = 0; j < MAX_SPREAD; j++) {
+ ring_u32_t *ring;
+ uint32_t qi;
+
+ ring = &sched->prio_q[grp][i][j].ring;
+
+ while (ring_u32_deq(ring, ring_mask, &qi)) {
+ odp_event_t events[1];
+ int num;
+
+ num = _odp_sched_queue_deq(qi, events, 1, 1);
+
+ if (num > 0)
+ _ODP_ERR("Queue not empty\n");
+ }
+ }
+ }
+ }
+
+ ret = odp_shm_free(sched->shm);
+ if (ret < 0) {
+ _ODP_ERR("Shm free failed for odp_scheduler");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static int schedule_init_local(void)
+{
+ sched_local_init();
+ return 0;
+}
+
+static inline void grp_update_mask(int grp, const odp_thrmask_t *new_mask)
+{
+ odp_thrmask_copy(&sched->sched_grp[grp].mask, new_mask);
+ odp_atomic_add_rel_u32(&sched->grp_epoch, 1);
+}
+
+static inline int grp_update_tbl(void)
+{
+ int i;
+ int num = 0;
+ int thr = sched_local.thr;
+ uint64_t mask = 0;
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ for (i = 0; i < NUM_SCHED_GRPS; i++) {
+ if (sched->sched_grp[i].allocated == 0)
+ continue;
+
+ if (odp_thrmask_isset(&sched->sched_grp[i].mask, thr)) {
+ sched_local.grp[num] = i;
+ num++;
+ mask |= (uint64_t)1 << i;
+ }
+ }
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ sched_local.grp_mask = mask;
+ sched_local.grp_idx = 0;
+ sched_local.num_grp = num;
+
+ return num;
+}
+
+static uint32_t schedule_max_ordered_locks(void)
+{
+ return CONFIG_QUEUE_MAX_ORD_LOCKS;
+}
+
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return NUM_PRIO - 1;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
+static int schedule_num_prio(void)
+{
+ return NUM_PRIO;
+}
+
+static inline int prio_level_from_api(int api_prio)
+{
+ return schedule_max_prio() - api_prio;
+}
+
+static inline void dec_queue_count(int grp, int prio, int spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_count[grp][prio][spr]--;
+
+ /* Clear mask bit only when the last queue is removed */
+ if (sched->prio_q_count[grp][prio][spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+static inline void update_queue_count(int grp, int prio, int old_spr, int new_spr)
+{
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ sched->prio_q_mask[grp][prio] |= 1 << new_spr;
+ sched->prio_q_count[grp][prio][new_spr]++;
+
+ sched->prio_q_count[grp][prio][old_spr]--;
+
+ if (sched->prio_q_count[grp][prio][old_spr] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << old_spr));
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+}
+
+static uint8_t allocate_spread(int grp, int prio)
+{
+ uint8_t i, num_min, spr;
+ uint32_t num;
+ uint32_t min = UINT32_MAX;
+ uint8_t num_spread = sched->config.num_spread;
+ uint8_t min_spr[num_spread];
+
+ num_min = 1;
+ min_spr[0] = 0;
+
+ odp_ticketlock_lock(&sched->mask_lock[grp]);
+
+ /* Find spread(s) with the minimum number of queues */
+ for (i = 0; i < num_spread; i++) {
+ num = sched->prio_q_count[grp][prio][i];
+ if (num < min) {
+ min = num;
+ min_spr[0] = i;
+ num_min = 1;
+ } else if (num == min) {
+ min_spr[num_min] = i;
+ num_min++;
+ }
+ }
+
+ spr = min_spr[0];
+
+ /* When there are multiple minimum spreads, select one randomly */
+ if (num_min > 1) {
+ uint32_t next_rand = odp_atomic_fetch_inc_u32(&sched->next_rand);
+ uint8_t rand = sched_random_u8[next_rand % RANDOM_TBL_SIZE];
+
+ spr = min_spr[rand % num_min];
+ }
+
+ sched->prio_q_mask[grp][prio] |= 1 << spr;
+ sched->prio_q_count[grp][prio][spr]++;
+
+ odp_ticketlock_unlock(&sched->mask_lock[grp]);
+
+ return spr;
+}
+
+static int schedule_create_queue(uint32_t queue_index,
+ const odp_schedule_param_t *sched_param)
+{
+ int i;
+ uint8_t spread;
+ int grp = sched_param->group;
+ int prio = prio_level_from_api(sched_param->prio);
+
+ if (odp_global_rw->schedule_configured == 0) {
+ _ODP_ERR("Scheduler has not been configured\n");
+ return -1;
+ }
+
+ if (grp < 0 || grp >= NUM_SCHED_GRPS) {
+ _ODP_ERR("Bad schedule group %i\n", grp);
+ return -1;
+ }
+ if (grp == ODP_SCHED_GROUP_ALL && !sched->config_if.group_enable.all) {
+ _ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_ALL\n");
+ return -1;
+ }
+ if (grp == ODP_SCHED_GROUP_CONTROL && !sched->config_if.group_enable.control) {
+ _ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_CONTROL\n");
+ return -1;
+ }
+ if (grp == ODP_SCHED_GROUP_WORKER && !sched->config_if.group_enable.worker) {
+ _ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_WORKER\n");
+ return -1;
+ }
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (sched->sched_grp[grp].allocated == 0) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ _ODP_ERR("Group not created: %i\n", grp);
+ return -1;
+ }
+
+ prio_grp_mask_set(prio, grp);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ spread = allocate_spread(grp, prio);
+
+ sched->queue[queue_index].grp = grp;
+ sched->queue[queue_index].prio = prio;
+ sched->queue[queue_index].spread = spread;
+ sched->queue[queue_index].sync = sched_param->sync;
+ sched->queue[queue_index].order_lock_count = sched_param->lock_count;
+ sched->queue[queue_index].poll_pktin = 0;
+ sched->queue[queue_index].pktio_index = 0;
+ sched->queue[queue_index].pktin_index = 0;
+
+ odp_atomic_init_u64(&sched->order[queue_index].ctx, 0);
+ odp_atomic_init_u64(&sched->order[queue_index].next_ctx, 0);
+
+ for (i = 0; i < CONFIG_QUEUE_MAX_ORD_LOCKS; i++)
+ odp_atomic_init_u64(&sched->order[queue_index].lock[i], 0);
+
+ return 0;
+}
+
+static inline uint8_t sched_sync_type(uint32_t queue_index)
+{
+ return sched->queue[queue_index].sync;
+}
+
+static void schedule_destroy_queue(uint32_t queue_index)
+{
+ int grp = sched->queue[queue_index].grp;
+ int prio = sched->queue[queue_index].prio;
+ int spread = sched->queue[queue_index].spread;
+
+ dec_queue_count(grp, prio, spread);
+
+ odp_ticketlock_lock(&sched->grp_lock);
+ prio_grp_mask_clear(prio, grp);
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ sched->queue[queue_index].grp = 0;
+ sched->queue[queue_index].prio = 0;
+ sched->queue[queue_index].spread = 0;
+
+ if ((sched_sync_type(queue_index) == ODP_SCHED_SYNC_ORDERED) &&
+ odp_atomic_load_u64(&sched->order[queue_index].ctx) !=
+ odp_atomic_load_u64(&sched->order[queue_index].next_ctx))
+ _ODP_ERR("queue reorder incomplete\n");
+}
+
+static int schedule_sched_queue(uint32_t queue_index)
+{
+ int grp = sched->queue[queue_index].grp;
+ int prio = sched->queue[queue_index].prio;
+ int spread = sched->queue[queue_index].spread;
+ ring_u32_t *ring = &sched->prio_q[grp][prio][spread].ring;
+
+ ring_u32_enq(ring, sched->ring_mask, queue_index);
+ return 0;
+}
+
+static void schedule_pktio_start(int pktio_index, int num_pktin,
+ int pktin_idx[], odp_queue_t queue[])
+{
+ int i;
+ uint32_t qi;
+
+ sched->pktio[pktio_index].num_pktin = num_pktin;
+
+ for (i = 0; i < num_pktin; i++) {
+ qi = queue_to_index(queue[i]);
+ sched->queue[qi].poll_pktin = 1;
+ sched->queue[qi].pktio_index = pktio_index;
+ sched->queue[qi].pktin_index = pktin_idx[i];
+
+ _ODP_ASSERT(pktin_idx[i] <= MAX_PKTIN_INDEX);
+
+ /* Start polling */
+ _odp_sched_queue_set_status(qi, QUEUE_STATUS_SCHED);
+ schedule_sched_queue(qi);
+ }
+}
+
+static inline void release_atomic(void)
+{
+ uint32_t qi = sched_local.stash.qi;
+ ring_u32_t *ring = sched_local.stash.ring;
+
+ /* Release current atomic queue */
+ ring_u32_enq(ring, sched->ring_mask, qi);
+
+ /* We don't hold sync context anymore */
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+}
+
+static void schedule_release_atomic(void)
+{
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC &&
+ sched_local.stash.num_ev == 0)
+ release_atomic();
+}
+
+static inline int ordered_own_turn(uint32_t queue_index)
+{
+ uint64_t ctx;
+
+ ctx = odp_atomic_load_acq_u64(&sched->order[queue_index].ctx);
+
+ return ctx == sched_local.ordered.ctx;
+}
+
+static inline void wait_for_order(uint32_t queue_index)
+{
+ /* Busy loop to synchronize ordered processing */
+ while (1) {
+ if (ordered_own_turn(queue_index))
+ break;
+ odp_cpu_pause();
+ }
+}
+
+/**
+ * Perform stashed enqueue operations
+ *
+ * Should be called only when already in order.
+ */
+static inline void ordered_stash_release(void)
+{
+ int i;
+
+ for (i = 0; i < sched_local.ordered.stash_num; i++) {
+ odp_queue_t queue;
+ _odp_event_hdr_t **event_hdr;
+ int num, num_enq;
+
+ queue = sched_local.ordered.stash[i].queue;
+ event_hdr = sched_local.ordered.stash[i].event_hdr;
+ num = sched_local.ordered.stash[i].num;
+
+ num_enq = odp_queue_enq_multi(queue,
+ (odp_event_t *)event_hdr, num);
+
+ /* Drop packets that were not enqueued */
+ if (odp_unlikely(num_enq < num)) {
+ if (odp_unlikely(num_enq < 0))
+ num_enq = 0;
+
+ _ODP_DBG("Dropped %i packets\n", num - num_enq);
+ _odp_event_free_multi(&event_hdr[num_enq], num - num_enq);
+ }
+ }
+ sched_local.ordered.stash_num = 0;
+}
+
+static inline void release_ordered(void)
+{
+ uint32_t qi;
+ uint32_t i;
+
+ qi = sched_local.ordered.src_queue;
+
+ wait_for_order(qi);
+
+ /* Release all ordered locks */
+ for (i = 0; i < sched->queue[qi].order_lock_count; i++) {
+ if (!sched_local.ordered.lock_called.u8[i])
+ odp_atomic_store_rel_u64(&sched->order[qi].lock[i],
+ sched_local.ordered.ctx + 1);
+ }
+
+ sched_local.ordered.lock_called.all = 0;
+ sched_local.ordered.in_order = 0;
+
+ /* We don't hold sync context anymore */
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+
+ ordered_stash_release();
+
+ /* Next thread can continue processing */
+ odp_atomic_add_rel_u64(&sched->order[qi].ctx, 1);
+}
+
+static void schedule_release_ordered(void)
+{
+ if (odp_unlikely((sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED) ||
+ sched_local.stash.num_ev))
+ return;
+
+ release_ordered();
+}
+
+static int schedule_term_local(void)
+{
+ if (sched_local.stash.num_ev) {
+ _ODP_ERR("Locally pre-scheduled events exist.\n");
+ return -1;
+ }
+
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC)
+ schedule_release_atomic();
+ else if (sched_local.sync_ctx == ODP_SCHED_SYNC_ORDERED)
+ schedule_release_ordered();
+
+ return 0;
+}
+
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = sched->max_queues;
+ config->queue_size = _odp_queue_glb->config.max_queue_size;
+ config->sched_group.all = sched->config_if.group_enable.all;
+ config->sched_group.control = sched->config_if.group_enable.control;
+ config->sched_group.worker = sched->config_if.group_enable.worker;
+}
+
+static void schedule_group_clear(odp_schedule_group_t group)
+{
+ odp_thrmask_t zero;
+
+ odp_thrmask_zero(&zero);
+
+ if (group < 0 || group > ODP_SCHED_GROUP_CONTROL)
+ _ODP_ABORT("Invalid scheduling group\n");
+
+ grp_update_mask(group, &zero);
+ sched->sched_grp[group].allocated = 0;
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ sched->config_if.group_enable.all = config->sched_group.all;
+ sched->config_if.group_enable.control = config->sched_group.control;
+ sched->config_if.group_enable.worker = config->sched_group.worker;
+
+ /* Remove existing threads from predefined scheduling groups. */
+ if (!config->sched_group.all)
+ schedule_group_clear(ODP_SCHED_GROUP_ALL);
+
+ if (!config->sched_group.worker)
+ schedule_group_clear(ODP_SCHED_GROUP_WORKER);
+
+ if (!config->sched_group.control)
+ schedule_group_clear(ODP_SCHED_GROUP_CONTROL);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ return 0;
+}
+
+/* Spread load after adding 'num' queues */
+static inline uint32_t spread_load(int grp, int prio, int spr, int num)
+{
+ uint32_t num_q, num_thr;
+
+ num_q = sched->prio_q_count[grp][prio][spr];
+ num_thr = sched->sched_grp[grp].spread_thrs[spr];
+
+ if (num_thr == 0)
+ return UINT32_MAX;
+
+ return ((num_q + num) * QUEUE_LOAD) / num_thr;
+}
+
+static inline int balance_spread(int grp, int prio, int cur_spr)
+{
+ int spr;
+ uint64_t cur_load, min_load, load;
+ int num_spread = sched->config.num_spread;
+ int new_spr = cur_spr;
+
+ cur_load = spread_load(grp, prio, cur_spr, 0);
+ min_load = cur_load;
+
+ for (spr = 0; spr < num_spread; spr++) {
+ if (spr == cur_spr)
+ continue;
+
+ load = spread_load(grp, prio, spr, 1);
+
+ /* Move queue if improvement is larger than marginal */
+ if ((load + QUEUE_LOAD_MARGIN) < min_load) {
+ new_spr = spr;
+ min_load = load;
+ }
+ }
+
+ return new_spr;
+}
+
+static inline int copy_from_stash(odp_event_t out_ev[], uint32_t max)
+{
+ int i = 0;
+
+ while (sched_local.stash.num_ev && max) {
+ out_ev[i] = sched_local.stash.ev[sched_local.stash.ev_index];
+ sched_local.stash.ev_index++;
+ sched_local.stash.num_ev--;
+ max--;
+ i++;
+ }
+
+ return i;
+}
+
+static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *event_hdr[],
+ int num, int *ret)
+{
+ int i;
+ uint32_t stash_num;
+ queue_entry_t *dst_qentry;
+ uint32_t src_queue;
+
+ /* This check is done for every queue enqueue operation, also for plain
+ * queues. Return fast when not holding a scheduling context. */
+ if (odp_likely(sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED))
+ return 0;
+
+ if (sched_local.ordered.in_order)
+ return 0;
+
+ dst_qentry = qentry_from_handle(dst_queue);
+
+ if (dst_qentry->param.order == ODP_QUEUE_ORDER_IGNORE)
+ return 0;
+
+ src_queue = sched_local.ordered.src_queue;
+ stash_num = sched_local.ordered.stash_num;
+
+ if (ordered_own_turn(src_queue)) {
+ /* Own turn, so can do enqueue directly. */
+ sched_local.ordered.in_order = 1;
+ ordered_stash_release();
+ return 0;
+ }
+
+ /* Pktout may drop packets, so the operation cannot be stashed. */
+ if (dst_qentry->pktout.pktio != ODP_PKTIO_INVALID ||
+ odp_unlikely(stash_num >= sched->config.order_stash_size)) {
+ /* If the local stash is full, wait until it is our turn and
+ * then release the stash and do enqueue directly. */
+ wait_for_order(src_queue);
+
+ sched_local.ordered.in_order = 1;
+
+ ordered_stash_release();
+ return 0;
+ }
+
+ sched_local.ordered.stash[stash_num].queue = dst_queue;
+ sched_local.ordered.stash[stash_num].num = num;
+ for (i = 0; i < num; i++)
+ sched_local.ordered.stash[stash_num].event_hdr[i] = event_hdr[i];
+
+ sched_local.ordered.stash_num++;
+
+ *ret = num;
+ return 1;
+}
+
+static inline int queue_is_pktin(uint32_t queue_index)
+{
+ return sched->queue[queue_index].poll_pktin;
+}
+
+static inline int poll_pktin(uint32_t qi, int direct_recv,
+ odp_event_t ev_tbl[], int max_num)
+{
+ int pktio_index, pktin_index, num, num_pktin;
+ _odp_event_hdr_t **hdr_tbl;
+ int ret;
+ void *q_int;
+ _odp_event_hdr_t *b_hdr[CONFIG_BURST_SIZE];
+
+ hdr_tbl = (_odp_event_hdr_t **)ev_tbl;
+
+ if (!direct_recv) {
+ hdr_tbl = b_hdr;
+
+ /* Limit burst to max queue enqueue size */
+ if (max_num > CONFIG_BURST_SIZE)
+ max_num = CONFIG_BURST_SIZE;
+ }
+
+ pktio_index = sched->queue[qi].pktio_index;
+ pktin_index = sched->queue[qi].pktin_index;
+
+ num = _odp_sched_cb_pktin_poll(pktio_index, pktin_index, hdr_tbl, max_num);
+
+ if (num == 0)
+ return 0;
+
+ /* Pktio stopped or closed. Call stop_finalize when we have stopped
+ * polling all pktin queues of the pktio. */
+ if (odp_unlikely(num < 0)) {
+ odp_ticketlock_lock(&sched->pktio_lock);
+ sched->pktio[pktio_index].num_pktin--;
+ num_pktin = sched->pktio[pktio_index].num_pktin;
+ odp_ticketlock_unlock(&sched->pktio_lock);
+
+ _odp_sched_queue_set_status(qi, QUEUE_STATUS_NOTSCHED);
+
+ if (num_pktin == 0)
+ _odp_sched_cb_pktio_stop_finalize(pktio_index);
+
+ return num;
+ }
+
+ if (direct_recv)
+ return num;
+
+ q_int = qentry_from_index(qi);
+
+ ret = odp_queue_enq_multi(q_int, (odp_event_t *)b_hdr, num);
+
+ /* Drop packets that were not enqueued */
+ if (odp_unlikely(ret < num)) {
+ int num_enq = ret;
+
+ if (odp_unlikely(ret < 0))
+ num_enq = 0;
+
+ _ODP_DBG("Dropped %i packets\n", num - num_enq);
+ _odp_event_free_multi(&b_hdr[num_enq], num - num_enq);
+ }
+
+ return ret;
+}
+
+static inline int schedule_grp_prio(odp_queue_t *out_queue, odp_event_t out_ev[], uint32_t max_num,
+ int grp, int prio, int first_spr, int balance)
+{
+ int spr, new_spr, i, ret;
+ uint32_t qi;
+ int num_spread = sched->config.num_spread;
+ uint32_t ring_mask = sched->ring_mask;
+ const uint32_t burst_def_sync[NUM_SCHED_SYNC] = {
+ sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL][prio],
+ sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC][prio],
+ sched->config.burst_default[ODP_SCHED_SYNC_ORDERED][prio]};
+ const uint32_t burst_max_sync[NUM_SCHED_SYNC] = {
+ sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL][prio],
+ sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC][prio],
+ sched->config.burst_max[ODP_SCHED_SYNC_ORDERED][prio]};
+
+ /* Select the first spread based on weights */
+ spr = first_spr;
+
+ for (i = 0; i < num_spread;) {
+ int num;
+ uint8_t sync_ctx, ordered;
+ odp_queue_t handle;
+ ring_u32_t *ring;
+ int pktin;
+ uint32_t max_deq;
+ int stashed = 1;
+ odp_event_t *ev_tbl = sched_local.stash.ev;
+
+ if (spr >= num_spread)
+ spr = 0;
+
+ /* No queues allocated to this spread */
+ if (odp_unlikely((sched->prio_q_mask[grp][prio] & (1 << spr)) == 0)) {
+ i++;
+ spr++;
+ continue;
+ }
+
+ ring = &sched->prio_q[grp][prio][spr].ring;
+
+ /* Get queue index from the spread queue */
+ if (ring_u32_deq(ring, ring_mask, &qi) == 0) {
+ /* Spread queue is empty */
+ i++;
+ spr++;
+ continue;
+ }
+
+ sync_ctx = sched_sync_type(qi);
+ ordered = (sync_ctx == ODP_SCHED_SYNC_ORDERED);
+ max_deq = burst_def_sync[sync_ctx];
+
+ /* When application's array is larger than default burst
+ * size, output all events directly there. Also, ordered
+ * queues are not stashed locally to improve
+ * parallelism. Ordered context can only be released
+ * when the local cache is empty. */
+ if (max_num > max_deq || ordered) {
+ const uint32_t burst_max = burst_max_sync[sync_ctx];
+
+ stashed = 0;
+ ev_tbl = out_ev;
+ max_deq = max_num;
+ if (max_num > burst_max)
+ max_deq = burst_max;
+ }
+
+ pktin = queue_is_pktin(qi);
+
+ /* Update queue spread before dequeue. Dequeue changes status of an empty
+ * queue, which enables a following enqueue operation to insert the queue
+ * back into scheduling (with new spread). */
+ if (odp_unlikely(balance)) {
+ new_spr = balance_spread(grp, prio, spr);
+
+ if (new_spr != spr) {
+ sched->queue[qi].spread = new_spr;
+ ring = &sched->prio_q[grp][prio][new_spr].ring;
+ update_queue_count(grp, prio, spr, new_spr);
+ }
+ }
+
+ num = _odp_sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
+
+ if (odp_unlikely(num < 0)) {
+ /* Remove destroyed queue from scheduling. Continue scheduling
+ * the same group/prio/spread. */
+ continue;
+ }
+
+ if (num == 0) {
+ int direct_recv = !ordered;
+ int num_pkt;
+
+ if (!pktin) {
+ /* Remove empty queue from scheduling */
+ continue;
+ }
+
+ /* Poll packet input queue */
+ num_pkt = poll_pktin(qi, direct_recv, ev_tbl, max_deq);
+
+ if (odp_unlikely(num_pkt < 0)) {
+ /* Pktio has been stopped or closed. Stop polling
+ * the packet input queue. */
+ continue;
+ }
+
+ if (num_pkt == 0 || !direct_recv) {
+ /* No packets to be returned. Continue scheduling
+ * packet input queue even when it is empty. */
+ ring_u32_enq(ring, ring_mask, qi);
+
+ /* Continue scheduling from the next spread */
+ i++;
+ spr++;
+ continue;
+ }
+
+ /* Process packets from an atomic or parallel queue right away. */
+ num = num_pkt;
+ }
+
+ if (ordered) {
+ uint64_t ctx;
+ odp_atomic_u64_t *next_ctx;
+
+ next_ctx = &sched->order[qi].next_ctx;
+ ctx = odp_atomic_fetch_inc_u64(next_ctx);
+
+ sched_local.ordered.ctx = ctx;
+ sched_local.ordered.src_queue = qi;
+
+ /* Continue scheduling ordered queues */
+ ring_u32_enq(ring, ring_mask, qi);
+ sched_local.sync_ctx = sync_ctx;
+
+ } else if (sync_ctx == ODP_SCHED_SYNC_ATOMIC) {
+ /* Hold queue during atomic access */
+ sched_local.stash.qi = qi;
+ sched_local.stash.ring = ring;
+ sched_local.sync_ctx = sync_ctx;
+ } else {
+ /* Continue scheduling parallel queues */
+ ring_u32_enq(ring, ring_mask, qi);
+ }
+
+ handle = queue_from_index(qi);
+
+ if (stashed) {
+ sched_local.stash.num_ev = num;
+ sched_local.stash.ev_index = 0;
+ sched_local.stash.queue = handle;
+ ret = copy_from_stash(out_ev, max_num);
+ } else {
+ sched_local.stash.num_ev = 0;
+ ret = num;
+ }
+
+ /* Output the source queue handle */
+ if (out_queue)
+ *out_queue = handle;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Schedule queues
+ */
+static inline int do_schedule(odp_queue_t *out_q, odp_event_t out_ev[], uint32_t max_num)
+{
+ int i, num_grp, ret, spr, first_id, grp_id, grp, prio;
+ uint32_t sched_round;
+ uint16_t spread_round;
+ uint32_t epoch;
+ uint64_t my_groups;
+ int balance = 0;
+
+ if (sched_local.stash.num_ev) {
+ ret = copy_from_stash(out_ev, max_num);
+
+ if (out_q)
+ *out_q = sched_local.stash.queue;
+
+ return ret;
+ }
+
+ /* Release schedule context */
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC)
+ release_atomic();
+ else if (sched_local.sync_ctx == ODP_SCHED_SYNC_ORDERED)
+ release_ordered();
+
+ if (odp_unlikely(sched_local.pause))
+ return 0;
+
+ sched_round = sched_local.sched_round++;
+
+ /* Each thread prefers a priority queue. Spread weight table avoids
+ * starvation of other priority queues on low thread counts. */
+ spread_round = sched_local.spread_round;
+
+ if (odp_likely(sched->load_balance)) {
+ /* Spread balance is checked max_spread times in every BALANCE_ROUNDS_M1 + 1
+ * scheduling rounds. */
+ if (odp_unlikely(sched_local.balance_on)) {
+ balance = 1;
+
+ if (sched_local.balance_start == spread_round)
+ sched_local.balance_on = 0;
+ }
+
+ if (odp_unlikely((sched_round & BALANCE_ROUNDS_M1) == 0)) {
+ sched_local.balance_start = spread_round;
+ sched_local.balance_on = 1;
+ }
+ }
+
+ if (odp_unlikely(spread_round + 1 >= sched->max_spread))
+ sched_local.spread_round = 0;
+ else
+ sched_local.spread_round = spread_round + 1;
+
+ spr = sched_local.spread_tbl[spread_round];
+
+ epoch = odp_atomic_load_acq_u32(&sched->grp_epoch);
+ num_grp = sched_local.num_grp;
+
+ if (odp_unlikely(sched_local.grp_epoch != epoch)) {
+ num_grp = grp_update_tbl();
+ sched_local.grp_epoch = epoch;
+ }
+
+ if (odp_unlikely(num_grp == 0))
+ return 0;
+
+ my_groups = sched_local.grp_mask;
+ first_id = sched_local.grp_idx;
+ sched_local.grp_idx = (first_id + 1) % num_grp;
+
+ for (prio = 0; prio < NUM_PRIO; prio++) {
+ grp_id = first_id;
+
+ if (prio_grp_mask_check(prio, my_groups) == 0) {
+ /* My groups do not have queues at this priority level, continue to
+ * the next level.
+ *
+ * As a performance optimization, prio_grp_mask[] is checked without
+ * taking the lock. Masks change infrequently and usage of an old mask
+ * just leads into searching events from old priority levels,
+ * new levels are likely used on the next schedule call. */
+ continue;
+ }
+
+ for (i = 0; i < num_grp; i++) {
+ grp = sched_local.grp[grp_id];
+
+ grp_id++;
+ if (odp_unlikely(grp_id >= num_grp))
+ grp_id = 0;
+
+ if (sched->prio_q_mask[grp][prio] == 0) {
+ /* Group does not have queues at this priority level */
+ continue;
+ }
+
+ /* Schedule events from the selected group and priority level */
+ ret = schedule_grp_prio(out_q, out_ev, max_num, grp, prio, spr, balance);
+
+ if (odp_likely(ret))
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static inline int schedule_run(odp_queue_t *out_queue, odp_event_t out_ev[], uint32_t max_num)
+{
+ timer_run(1);
+
+ return do_schedule(out_queue, out_ev, max_num);
+}
+
+static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t out_ev[], uint32_t max_num)
+{
+ odp_time_t next;
+ int first = 1;
+ int ret;
+
+ while (1) {
+ ret = do_schedule(out_queue, out_ev, max_num);
+ if (ret) {
+ timer_run(2);
+ break;
+ }
+ timer_run(1);
+
+ if (wait == ODP_SCHED_WAIT)
+ continue;
+
+ if (wait == ODP_SCHED_NO_WAIT)
+ break;
+
+ if (first) {
+ next = odp_time_add_ns(odp_time_local(), wait);
+ first = 0;
+ continue;
+ }
+
+ if (odp_time_cmp(next, odp_time_local()) < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static inline int schedule_loop_sleep(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t out_ev[], uint32_t max_num)
+{
+ int ret;
+ odp_time_t start, end, current, start_sleep;
+ int first = 1, sleep = 0;
+
+ while (1) {
+ ret = do_schedule(out_queue, out_ev, max_num);
+ if (ret) {
+ timer_run(2);
+ break;
+ }
+ uint64_t next = timer_run(sleep ? TIMER_SCAN_FORCE : 1);
+
+ if (first) {
+ start = odp_time_local();
+ start_sleep = odp_time_add_ns(start, sched->powersave.poll_time);
+ if (wait != ODP_SCHED_WAIT)
+ end = odp_time_add_ns(start, wait);
+ first = 0;
+ continue;
+ }
+
+ if (sleep && next) {
+ uint64_t sleep_nsec = _ODP_MIN(sched->powersave.sleep_time, next);
+
+ if (wait != ODP_SCHED_WAIT) {
+ uint64_t nsec_to_end = odp_time_diff_ns(end, current);
+
+ sleep_nsec = _ODP_MIN(sleep_nsec, nsec_to_end);
+ }
+
+ struct timespec ts = { 0, sleep_nsec };
+
+ nanosleep(&ts, NULL);
+ }
+
+ if (!sleep || wait != ODP_SCHED_WAIT)
+ current = odp_time_local();
+
+ if (!sleep && odp_time_cmp(start_sleep, current) < 0)
+ sleep = 1;
+
+ if (wait != ODP_SCHED_WAIT && odp_time_cmp(end, current) < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static odp_event_t schedule(odp_queue_t *out_queue, uint64_t wait)
+{
+ odp_event_t ev;
+
+ ev = ODP_EVENT_INVALID;
+
+ schedule_loop(out_queue, wait, &ev, 1);
+
+ return ev;
+}
+
+static odp_event_t schedule_sleep(odp_queue_t *out_queue, uint64_t wait)
+{
+ odp_event_t ev;
+
+ ev = ODP_EVENT_INVALID;
+
+ if (wait == ODP_SCHED_NO_WAIT)
+ schedule_loop(out_queue, wait, &ev, 1);
+ else
+ schedule_loop_sleep(out_queue, wait, &ev, 1);
+
+ return ev;
+}
+
+static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t events[], int num)
+{
+ return schedule_loop(out_queue, wait, events, num);
+}
+
+static int schedule_multi_sleep(odp_queue_t *out_queue, uint64_t wait,
+ odp_event_t events[], int num)
+{
+ if (wait == ODP_SCHED_NO_WAIT)
+ return schedule_loop(out_queue, wait, events, num);
+
+ return schedule_loop_sleep(out_queue, wait, events, num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ return schedule_run(out_queue, events, num);
+}
+
+static int schedule_multi_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ int ret;
+
+ do {
+ ret = schedule_run(out_queue, events, num);
+ } while (ret == 0);
+
+ return ret;
+}
+
+static int schedule_multi_wait_sleep(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ return schedule_loop_sleep(out_queue, ODP_SCHED_WAIT, events, num);
+}
+
+static inline void order_lock(void)
+{
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
+ return;
+
+ wait_for_order(sched_local.ordered.src_queue);
+}
+
+static void order_unlock(void)
+{
+ /* Nothing to do */
+}
+
+static void schedule_order_lock(uint32_t lock_index)
+{
+ odp_atomic_u64_t *ord_lock;
+ uint32_t queue_index;
+
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
+ return;
+
+ queue_index = sched_local.ordered.src_queue;
+
+ _ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count &&
+ !sched_local.ordered.lock_called.u8[lock_index]);
+
+ ord_lock = &sched->order[queue_index].lock[lock_index];
+
+ /* Busy loop to synchronize ordered processing */
+ while (1) {
+ uint64_t lock_seq;
+
+ lock_seq = odp_atomic_load_acq_u64(ord_lock);
+
+ if (lock_seq == sched_local.ordered.ctx) {
+ sched_local.ordered.lock_called.u8[lock_index] = 1;
+ return;
+ }
+ odp_cpu_pause();
+ }
+}
+
+static void schedule_order_unlock(uint32_t lock_index)
+{
+ odp_atomic_u64_t *ord_lock;
+ uint32_t queue_index;
+
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
+ return;
+
+ queue_index = sched_local.ordered.src_queue;
+
+ _ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count);
+
+ ord_lock = &sched->order[queue_index].lock[lock_index];
+
+ _ODP_ASSERT(sched_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
+
+ odp_atomic_store_rel_u64(ord_lock, sched_local.ordered.ctx + 1);
+}
+
+static void schedule_order_unlock_lock(uint32_t unlock_index,
+ uint32_t lock_index)
+{
+ schedule_order_unlock(unlock_index);
+ schedule_order_lock(lock_index);
+}
+
+static void schedule_order_lock_start(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_lock_wait(uint32_t lock_index)
+{
+ schedule_order_lock(lock_index);
+}
+
+static void schedule_pause(void)
+{
+ sched_local.pause = 1;
+}
+
+static void schedule_resume(void)
+{
+ sched_local.pause = 0;
+}
+
+static uint64_t schedule_wait_time(uint64_t ns)
+{
+ return ns;
+}
+
+static inline void spread_thrs_inc(odp_schedule_group_t group, int thr_tbl[], int count)
+{
+ int thr, i;
+ uint8_t spread;
+
+ for (i = 0; i < count; i++) {
+ thr = thr_tbl[i];
+ spread = spread_from_index(thr);
+ sched->sched_grp[group].spread_thrs[spread]++;
+ }
+}
+
+static inline void spread_thrs_dec(odp_schedule_group_t group, int thr_tbl[], int count)
+{
+ int thr, i;
+ uint8_t spread;
+
+ for (i = 0; i < count; i++) {
+ thr = thr_tbl[i];
+ spread = spread_from_index(thr);
+ sched->sched_grp[group].spread_thrs[spread]--;
+ }
+}
+
+static inline int threads_from_mask(int thr_tbl[], int count, const odp_thrmask_t *mask)
+{
+ int i;
+ int thr = odp_thrmask_first(mask);
+
+ for (i = 0; i < count; i++) {
+ if (thr < 0) {
+ _ODP_ERR("No more threads in the mask\n");
+ return -1;
+ }
+
+ thr_tbl[i] = thr;
+ thr = odp_thrmask_next(mask, thr);
+ }
+
+ return 0;
+}
+
+static odp_schedule_group_t schedule_group_create(const char *name,
+ const odp_thrmask_t *mask)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int count, i;
+
+ count = odp_thrmask_count(mask);
+ if (count < 0) {
+ _ODP_ERR("Bad thread count\n");
+ return ODP_SCHED_GROUP_INVALID;
+ }
+
+ int thr_tbl[count];
+
+ if (count && threads_from_mask(thr_tbl, count, mask))
+ return ODP_SCHED_GROUP_INVALID;
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
+ if (!sched->sched_grp[i].allocated) {
+ char *grp_name = sched->sched_grp[i].name;
+
+ if (name == NULL) {
+ grp_name[0] = 0;
+ } else {
+ strncpy(grp_name, name,
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
+ }
+
+ grp_update_mask(i, mask);
+ group = (odp_schedule_group_t)i;
+ spread_thrs_inc(group, thr_tbl, count);
+ sched->sched_grp[i].allocated = 1;
+ break;
+ }
+ }
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return group;
+}
+
+static int schedule_group_destroy(odp_schedule_group_t group)
+{
+ odp_thrmask_t zero;
+ int i;
+
+ if (group >= NUM_SCHED_GRPS || group < SCHED_GROUP_NAMED) {
+ _ODP_ERR("Bad group %i\n", group);
+ return -1;
+ }
+
+ odp_thrmask_zero(&zero);
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (sched->sched_grp[group].allocated == 0) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ _ODP_ERR("Group not created: %i\n", group);
+ return -1;
+ }
+
+ grp_update_mask(group, &zero);
+
+ for (i = 0; i < MAX_SPREAD; i++)
+ sched->sched_grp[group].spread_thrs[i] = 0;
+
+ memset(sched->sched_grp[group].name, 0, ODP_SCHED_GROUP_NAME_LEN);
+ sched->sched_grp[group].allocated = 0;
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return 0;
+}
+
+static odp_schedule_group_t schedule_group_lookup(const char *name)
+{
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
+ int i;
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ for (i = SCHED_GROUP_NAMED; i < NUM_SCHED_GRPS; i++) {
+ if (strcmp(name, sched->sched_grp[i].name) == 0) {
+ group = (odp_schedule_group_t)i;
+ break;
+ }
+ }
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return group;
+}
+
+static int schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *mask)
+{
+ int i, count, thr;
+ odp_thrmask_t new_mask;
+
+ if (group >= NUM_SCHED_GRPS || group < SCHED_GROUP_NAMED) {
+ _ODP_ERR("Bad group %i\n", group);
+ return -1;
+ }
+
+ count = odp_thrmask_count(mask);
+ if (count <= 0) {
+ _ODP_ERR("No threads in the mask\n");
+ return -1;
+ }
+
+ int thr_tbl[count];
+
+ thr = odp_thrmask_first(mask);
+ for (i = 0; i < count; i++) {
+ if (thr < 0) {
+ _ODP_ERR("No more threads in the mask\n");
+ return -1;
+ }
+
+ thr_tbl[i] = thr;
+ thr = odp_thrmask_next(mask, thr);
+ }
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (sched->sched_grp[group].allocated == 0) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ _ODP_ERR("Bad group status\n");
+ return -1;
+ }
+
+ spread_thrs_inc(group, thr_tbl, count);
+
+ odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, mask);
+ grp_update_mask(group, &new_mask);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return 0;
+}
+
+static int schedule_group_leave(odp_schedule_group_t group, const odp_thrmask_t *mask)
+{
+ int i, count, thr;
+ odp_thrmask_t new_mask;
+
+ if (group >= NUM_SCHED_GRPS || group < SCHED_GROUP_NAMED) {
+ _ODP_ERR("Bad group %i\n", group);
+ return -1;
+ }
+
+ count = odp_thrmask_count(mask);
+ if (count <= 0) {
+ _ODP_ERR("No threads in the mask\n");
+ return -1;
+ }
+
+ int thr_tbl[count];
+
+ thr = odp_thrmask_first(mask);
+ for (i = 0; i < count; i++) {
+ if (thr < 0) {
+ _ODP_ERR("No more threads in the mask\n");
+ return -1;
+ }
+
+ thr_tbl[i] = thr;
+ thr = odp_thrmask_next(mask, thr);
+ }
+
+ odp_thrmask_xor(&new_mask, mask, &sched->mask_all);
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (sched->sched_grp[group].allocated == 0) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ _ODP_ERR("Bad group status\n");
+ return -1;
+ }
+
+ spread_thrs_dec(group, thr_tbl, count);
+
+ odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
+ grp_update_mask(group, &new_mask);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return 0;
+}
+
+static int schedule_group_thrmask(odp_schedule_group_t group,
+ odp_thrmask_t *thrmask)
+{
+ int ret;
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
+ *thrmask = sched->sched_grp[group].mask;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return ret;
+}
+
+static int schedule_group_info(odp_schedule_group_t group,
+ odp_schedule_group_info_t *info)
+{
+ int ret;
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
+ info->name = sched->sched_grp[group].name;
+ info->thrmask = sched->sched_grp[group].mask;
+ ret = 0;
+ } else {
+ ret = -1;
+ }
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return ret;
+}
+
+static int schedule_thr_add(odp_schedule_group_t group, int thr)
+{
+ odp_thrmask_t mask;
+ odp_thrmask_t new_mask;
+
+ if (group < 0 || group >= SCHED_GROUP_NAMED)
+ return -1;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (!sched->sched_grp[group].allocated) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return 0;
+ }
+
+ odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, &mask);
+ spread_thrs_inc(group, &thr, 1);
+ grp_update_mask(group, &new_mask);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ return 0;
+}
+
+static int schedule_thr_rem(odp_schedule_group_t group, int thr)
+{
+ odp_thrmask_t mask;
+ odp_thrmask_t new_mask;
+
+ if (group < 0 || group >= SCHED_GROUP_NAMED)
+ return -1;
+
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+ odp_thrmask_xor(&new_mask, &mask, &sched->mask_all);
+
+ odp_ticketlock_lock(&sched->grp_lock);
+
+ if (!sched->sched_grp[group].allocated) {
+ odp_ticketlock_unlock(&sched->grp_lock);
+ return 0;
+ }
+
+ odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
+ spread_thrs_dec(group, &thr, 1);
+ grp_update_mask(group, &new_mask);
+
+ odp_ticketlock_unlock(&sched->grp_lock);
+
+ return 0;
+}
+
+static void schedule_prefetch(int num)
+{
+ (void)num;
+}
+
+static int schedule_num_grps(void)
+{
+ return NUM_SCHED_GRPS - SCHED_GROUP_NAMED;
+}
+
+static void schedule_get_config(schedule_config_t *config)
+{
+ *config = sched->config_if;
+}
+
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = schedule_max_ordered_locks();
+ capa->max_groups = schedule_num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = sched->max_queues;
+ capa->max_queue_size = _odp_queue_glb->config.max_queue_size;
+ capa->max_flow_id = BUF_HDR_MAX_FLOW_ID;
+ capa->order_wait = ODP_SUPPORT_YES;
+
+ return 0;
+}
+
+static void schedule_print(void)
+{
+ int spr, prio, grp, pos;
+ uint32_t num_queues, num_active;
+ ring_u32_t *ring;
+ odp_schedule_capability_t capa;
+ int num_spread = sched->config.num_spread;
+ const int col_width = 24;
+ const int size = 512;
+ char str[size];
+
+ (void)schedule_capability(&capa);
+
+ _ODP_PRINT("\nScheduler debug info\n");
+ _ODP_PRINT("--------------------\n");
+ _ODP_PRINT(" scheduler: basic\n");
+ _ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ _ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ _ODP_PRINT(" num spread: %i\n", num_spread);
+ _ODP_PRINT(" prefer ratio: %u\n", sched->config.prefer_ratio);
+ _ODP_PRINT("\n");
+
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " Number of active event queues:\n");
+ pos += _odp_snprint(&str[pos], size - pos, " spread\n");
+ pos += _odp_snprint(&str[pos], size - pos, " ");
+
+ for (spr = 0; spr < num_spread; spr++)
+ pos += _odp_snprint(&str[pos], size - pos, " %7i", spr);
+
+ _ODP_PRINT("%s\n", str);
+
+ for (prio = 0; prio < NUM_PRIO; prio++) {
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++)
+ if (sched->prio_q_mask[grp][prio])
+ break;
+
+ if (grp == NUM_SCHED_GRPS)
+ continue;
+
+ _ODP_PRINT(" prio: %i\n", prio);
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ if (sched->sched_grp[grp].allocated == 0)
+ continue;
+
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " group %i:", grp);
+
+ for (spr = 0; spr < num_spread; spr++) {
+ num_queues = sched->prio_q_count[grp][prio][spr];
+ ring = &sched->prio_q[grp][prio][spr].ring;
+ num_active = ring_u32_len(ring);
+ pos += _odp_snprint(&str[pos], size - pos, " %3u/%3u",
+ num_active, num_queues);
+ }
+
+ _ODP_PRINT("%s\n", str);
+ }
+ }
+
+ _ODP_PRINT("\n Number of threads per schedule group:\n");
+ _ODP_PRINT(" name spread\n");
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ if (sched->sched_grp[grp].allocated == 0)
+ continue;
+
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " group %i: %-*s", grp, col_width,
+ sched->sched_grp[grp].name);
+
+ for (spr = 0; spr < num_spread; spr++)
+ pos += _odp_snprint(&str[pos], size - pos, " %u",
+ sched->sched_grp[grp].spread_thrs[spr]);
+
+ _ODP_PRINT("%s\n", str);
+ }
+
+ _ODP_PRINT("\n");
+}
+
+/* Returns spread for queue debug prints */
+int _odp_sched_basic_get_spread(uint32_t queue_index)
+{
+ return sched->queue[queue_index].spread;
+}
+
+const _odp_schedule_api_fn_t _odp_schedule_basic_api;
+const _odp_schedule_api_fn_t _odp_schedule_basic_sleep_api;
+
+static const _odp_schedule_api_fn_t *sched_api(void)
+{
+ if (sched->powersave.poll_time > 0)
+ return &_odp_schedule_basic_sleep_api;
+
+ return &_odp_schedule_basic_api;
+}
+
+/* Fill in scheduler interface */
+const schedule_fn_t _odp_schedule_basic_fn = {
+ .pktio_start = schedule_pktio_start,
+ .thr_add = schedule_thr_add,
+ .thr_rem = schedule_thr_rem,
+ .num_grps = schedule_num_grps,
+ .create_queue = schedule_create_queue,
+ .destroy_queue = schedule_destroy_queue,
+ .sched_queue = schedule_sched_queue,
+ .ord_enq_multi = schedule_ord_enq_multi,
+ .init_global = schedule_init_global,
+ .term_global = schedule_term_global,
+ .init_local = schedule_init_local,
+ .term_local = schedule_term_local,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
+ .max_ordered_locks = schedule_max_ordered_locks,
+ .get_config = schedule_get_config,
+ .sched_api = sched_api,
+};
+
+/* Fill in scheduler API calls */
+const _odp_schedule_api_fn_t _odp_schedule_basic_api = {
+ .schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
+ .schedule = schedule,
+ .schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
+ .schedule_pause = schedule_pause,
+ .schedule_resume = schedule_resume,
+ .schedule_release_atomic = schedule_release_atomic,
+ .schedule_release_ordered = schedule_release_ordered,
+ .schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
+ .schedule_num_prio = schedule_num_prio,
+ .schedule_group_create = schedule_group_create,
+ .schedule_group_destroy = schedule_group_destroy,
+ .schedule_group_lookup = schedule_group_lookup,
+ .schedule_group_join = schedule_group_join,
+ .schedule_group_leave = schedule_group_leave,
+ .schedule_group_thrmask = schedule_group_thrmask,
+ .schedule_group_info = schedule_group_info,
+ .schedule_order_lock = schedule_order_lock,
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
+ .schedule_print = schedule_print
+};
+
+/* API functions used when powersave is enabled in the config file. */
+const _odp_schedule_api_fn_t _odp_schedule_basic_sleep_api = {
+ .schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
+ /* Only the following *_sleep functions differ from _odp_schedule_basic_api */
+ .schedule = schedule_sleep,
+ .schedule_multi = schedule_multi_sleep,
+ .schedule_multi_wait = schedule_multi_wait_sleep,
+ /* End of powersave specific functions */
+ .schedule_multi_no_wait = schedule_multi_no_wait,
+ .schedule_pause = schedule_pause,
+ .schedule_resume = schedule_resume,
+ .schedule_release_atomic = schedule_release_atomic,
+ .schedule_release_ordered = schedule_release_ordered,
+ .schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
+ .schedule_num_prio = schedule_num_prio,
+ .schedule_group_create = schedule_group_create,
+ .schedule_group_destroy = schedule_group_destroy,
+ .schedule_group_lookup = schedule_group_lookup,
+ .schedule_group_join = schedule_group_join,
+ .schedule_group_leave = schedule_group_leave,
+ .schedule_group_thrmask = schedule_group_thrmask,
+ .schedule_group_info = schedule_group_info,
+ .schedule_order_lock = schedule_order_lock,
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
+ .schedule_print = schedule_print
+};
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index a9ede98d3..f4d50f84e 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -1,123 +1,172 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_schedule_if.h>
+#include <odp/autoheader_internal.h>
-extern const schedule_fn_t schedule_sp_fn;
-extern const schedule_api_t schedule_sp_api;
+#include <odp/api/plat/schedule_inline_types.h>
-extern const schedule_fn_t schedule_default_fn;
-extern const schedule_api_t schedule_default_api;
+#include <odp_schedule_if.h>
+#include <odp_init_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
-extern const schedule_fn_t schedule_iquery_fn;
-extern const schedule_api_t schedule_iquery_api;
+#include <stdlib.h>
+#include <string.h>
-#ifdef ODP_SCHEDULE_SP
-const schedule_fn_t *sched_fn = &schedule_sp_fn;
-const schedule_api_t *sched_api = &schedule_sp_api;
-#elif defined(ODP_SCHEDULE_IQUERY)
-const schedule_fn_t *sched_fn = &schedule_iquery_fn;
-const schedule_api_t *sched_api = &schedule_iquery_api;
-#else
-const schedule_fn_t *sched_fn = &schedule_default_fn;
-const schedule_api_t *sched_api = &schedule_default_api;
-#endif
+/* Enable visibility to inline headers */
+#include <odp/visibility_begin.h>
-uint64_t odp_schedule_wait_time(uint64_t ns)
-{
- return sched_api->schedule_wait_time(ns);
-}
+const _odp_schedule_api_fn_t *_odp_sched_api;
-odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
+int _odp_schedule_configured(void)
{
- return sched_api->schedule(from, wait);
+ return odp_global_rw->schedule_configured;
}
-int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
- int num)
+#include <odp/visibility_end.h>
+
+extern const schedule_fn_t _odp_schedule_sp_fn;
+extern const schedule_fn_t _odp_schedule_basic_fn;
+extern const schedule_fn_t _odp_schedule_scalable_fn;
+const schedule_fn_t *_odp_sched_fn;
+int _odp_sched_id;
+
+int odp_schedule_capability(odp_schedule_capability_t *capa)
{
- return sched_api->schedule_multi(from, wait, events, num);
+ return _odp_sched_api->schedule_capability(capa);
}
-void odp_schedule_pause(void)
+void odp_schedule_config_init(odp_schedule_config_t *config)
{
- return sched_api->schedule_pause();
+ memset(config, 0, sizeof(*config));
+
+ _odp_sched_api->schedule_config_init(config);
}
-void odp_schedule_resume(void)
+int odp_schedule_config(const odp_schedule_config_t *config)
{
- return sched_api->schedule_resume();
+ int ret;
+ odp_schedule_config_t defconfig;
+
+ if (odp_global_rw->schedule_configured) {
+ _ODP_ERR("Scheduler has been configured already\n");
+ return -1;
+ }
+
+ if (!config) {
+ odp_schedule_config_init(&defconfig);
+ config = &defconfig;
+ }
+
+ ret = _odp_sched_api->schedule_config(config);
+
+ if (ret >= 0)
+ odp_global_rw->schedule_configured = 1;
+
+ return ret;
}
-void odp_schedule_release_atomic(void)
+int odp_schedule_min_prio(void)
{
- return sched_api->schedule_release_atomic();
+ return _odp_sched_api->schedule_min_prio();
}
-void odp_schedule_release_ordered(void)
+int odp_schedule_max_prio(void)
{
- return sched_api->schedule_release_ordered();
+ return _odp_sched_api->schedule_max_prio();
}
-void odp_schedule_prefetch(int num)
+int odp_schedule_default_prio(void)
{
- return sched_api->schedule_prefetch(num);
+ return _odp_sched_api->schedule_default_prio();
}
int odp_schedule_num_prio(void)
{
- return sched_api->schedule_num_prio();
+ return _odp_sched_api->schedule_num_prio();
}
odp_schedule_group_t odp_schedule_group_create(const char *name,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_create(name, mask);
+ return _odp_sched_api->schedule_group_create(name, mask);
}
int odp_schedule_group_destroy(odp_schedule_group_t group)
{
- return sched_api->schedule_group_destroy(group);
+ return _odp_sched_api->schedule_group_destroy(group);
}
odp_schedule_group_t odp_schedule_group_lookup(const char *name)
{
- return sched_api->schedule_group_lookup(name);
+ return _odp_sched_api->schedule_group_lookup(name);
}
int odp_schedule_group_join(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_join(group, mask);
+ return _odp_sched_api->schedule_group_join(group, mask);
}
int odp_schedule_group_leave(odp_schedule_group_t group,
const odp_thrmask_t *mask)
{
- return sched_api->schedule_group_leave(group, mask);
+ return _odp_sched_api->schedule_group_leave(group, mask);
}
int odp_schedule_group_thrmask(odp_schedule_group_t group,
odp_thrmask_t *thrmask)
{
- return sched_api->schedule_group_thrmask(group, thrmask);
+ return _odp_sched_api->schedule_group_thrmask(group, thrmask);
}
int odp_schedule_group_info(odp_schedule_group_t group,
odp_schedule_group_info_t *info)
{
- return sched_api->schedule_group_info(group, info);
+ return _odp_sched_api->schedule_group_info(group, info);
}
-void odp_schedule_order_lock(unsigned lock_index)
+void odp_schedule_print(void)
{
- return sched_api->schedule_order_lock(lock_index);
+ _odp_sched_api->schedule_print();
+}
+
+int _odp_schedule_init_global(void)
+{
+ const char *sched = getenv("ODP_SCHEDULER");
+
+ if (sched == NULL || !strcmp(sched, "default"))
+ sched = _ODP_SCHEDULE_DEFAULT;
+
+ _ODP_PRINT("Using scheduler '%s'\n", sched);
+
+ if (!strcmp(sched, "basic")) {
+ _odp_sched_id = _ODP_SCHED_ID_BASIC;
+ _odp_sched_fn = &_odp_schedule_basic_fn;
+ } else if (!strcmp(sched, "sp")) {
+ _odp_sched_id = _ODP_SCHED_ID_SP;
+ _odp_sched_fn = &_odp_schedule_sp_fn;
+ } else if (!strcmp(sched, "scalable")) {
+ _odp_sched_id = _ODP_SCHED_ID_SCALABLE;
+ _odp_sched_fn = &_odp_schedule_scalable_fn;
+ } else {
+ _ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
+ return -1;
+ }
+
+ if (_odp_sched_fn->init_global())
+ return -1;
+
+ _odp_sched_api = _odp_sched_fn->sched_api();
+
+ return 0;
}
-void odp_schedule_order_unlock(unsigned lock_index)
+int _odp_schedule_term_global(void)
{
- return sched_api->schedule_order_unlock(lock_index);
+ return _odp_sched_fn->term_global();
}
diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c
deleted file mode 100644
index b69245788..000000000
--- a/platform/linux-generic/odp_schedule_iquery.c
+++ /dev/null
@@ -1,1521 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/schedule.h>
-#include <odp_schedule_if.h>
-#include <odp/api/align.h>
-#include <odp/api/queue.h>
-#include <odp/api/shared_memory.h>
-#include <odp_internal.h>
-#include <odp_debug_internal.h>
-#include <odp_ring_internal.h>
-#include <odp_queue_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp_bitmap_internal.h>
-#include <odp/api/thread.h>
-#include <odp/api/time.h>
-#include <odp/api/rwlock.h>
-#include <odp/api/hints.h>
-#include <odp/api/cpu.h>
-#include <odp/api/thrmask.h>
-#include <odp_config_internal.h>
-
-/* Number of priority levels */
-#define NUM_SCHED_PRIO 8
-
-ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (NUM_SCHED_PRIO - 1),
- "lowest_prio_does_not_match_with_num_prios");
-
-ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
- (ODP_SCHED_PRIO_NORMAL < (NUM_SCHED_PRIO - 1)),
- "normal_prio_is_not_between_highest_and_lowest");
-
-/* Number of scheduling groups */
-#define NUM_SCHED_GRPS 256
-
-/* Start of named groups in group mask arrays */
-#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-
-/* Instantiate a WAPL bitmap to be used as queue index bitmap */
-typedef WAPL_BITMAP(ODP_CONFIG_QUEUES) queue_index_bitmap_t;
-
-typedef struct {
- odp_rwlock_t lock;
- queue_index_bitmap_t queues; /* queues in this priority level */
-} sched_prio_t;
-
-typedef struct {
- odp_rwlock_t lock;
- bool allocated;
- odp_thrmask_t threads; /* threads subscribe to this group */
- queue_index_bitmap_t queues; /* queues in this group */
- char name[ODP_SCHED_GROUP_NAME_LEN];
-} sched_group_t;
-
-/* Packet input poll command queues */
-#define PKTIO_CMD_QUEUES 4
-
-/* Maximum number of packet input queues per command */
-#define MAX_PKTIN 16
-
-/* Maximum number of packet IO interfaces */
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
-
-/* Maximum number of pktio poll commands */
-#define NUM_PKTIO_CMD (MAX_PKTIN * NUM_PKTIO)
-
-/* Pktio command is free */
-#define PKTIO_CMD_FREE ((uint32_t)-1)
-
-/* Packet IO poll queue ring size. In worst case, all pktios
- * have all pktins enabled and one poll command is created per
- * pktin queue. The ring size must be larger than or equal to
- * NUM_PKTIO_CMD / PKTIO_CMD_QUEUES, so that it can hold all
- * poll commands in the worst case.
- */
-#define PKTIO_RING_SIZE (NUM_PKTIO_CMD / PKTIO_CMD_QUEUES)
-
-/* Mask for wrapping around pktio poll command index */
-#define PKTIO_RING_MASK (PKTIO_RING_SIZE - 1)
-
-/* Maximum number of dequeues */
-#define MAX_DEQ CONFIG_BURST_SIZE
-
-/* Instantiate a RING data structure as pktio command queue */
-typedef struct {
- /* Ring header */
- ring_t ring;
-
- /* Ring data: pktio poll command indexes */
- uint32_t cmd_index[PKTIO_RING_SIZE];
-} pktio_cmd_queue_t ODP_ALIGNED_CACHE;
-
-/* Packet IO poll command */
-typedef struct {
- int pktio;
- int count;
- int pktin[MAX_PKTIN];
- uint32_t index;
-} pktio_cmd_t;
-
-/* Collect the pktio poll resources */
-typedef struct {
- odp_rwlock_t lock;
- /* count active commands per pktio interface */
- int actives[NUM_PKTIO];
- pktio_cmd_t commands[NUM_PKTIO_CMD];
- pktio_cmd_queue_t queues[PKTIO_CMD_QUEUES];
-} pktio_poll_t;
-
-/* Forward declaration */
-typedef struct sched_thread_local sched_thread_local_t;
-
-typedef struct {
- odp_shm_t selfie;
-
- /* Schedule priorities */
- sched_prio_t prios[NUM_SCHED_PRIO];
-
- /* Schedule groups */
- sched_group_t groups[NUM_SCHED_GRPS];
-
- /* Cache queue parameters for easy reference */
- odp_schedule_param_t queues[ODP_CONFIG_QUEUES];
-
- /* Poll pktio inputs in spare time */
- pktio_poll_t pktio_poll;
-
- /* Queues send or unwind their availability indications
- * for scheduling, the bool value also serves as a focal
- * point for atomic competition. */
- bool availables[ODP_CONFIG_QUEUES];
-
- /* Quick reference to per thread context */
- sched_thread_local_t *threads[ODP_THREAD_COUNT_MAX];
-} sched_global_t;
-
-/* Per thread events cache */
-typedef struct {
- int count;
- odp_queue_t queue;
- odp_event_t stash[MAX_DEQ], *top;
-} event_cache_t;
-
-/* Maximum number of ordered locks per queue */
-#define MAX_ORDERED_LOCKS_PER_QUEUE 2
-
-ODP_STATIC_ASSERT(MAX_ORDERED_LOCKS_PER_QUEUE <= CONFIG_QUEUE_MAX_ORD_LOCKS,
- "Too_many_ordered_locks");
-
-/* Ordered stash size */
-#define MAX_ORDERED_STASH 512
-
-/* Storage for stashed enqueue operation arguments */
-typedef struct {
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- queue_entry_t *queue;
- int num;
-} ordered_stash_t;
-
-/* Ordered lock states */
-typedef union {
- uint8_t u8[CONFIG_QUEUE_MAX_ORD_LOCKS];
- uint32_t all;
-} lock_called_t;
-
-ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
- "Lock_called_values_do_not_fit_in_uint32");
-
-/* Instantiate a sparse bitmap to store thread's interested
- * queue indexes per priority.
- */
-typedef SPARSE_BITMAP(ODP_CONFIG_QUEUES) queue_index_sparse_t;
-
-struct sched_thread_local {
- int thread;
- bool pause;
-
- /* Cache events only for atomic queue */
- event_cache_t cache;
-
- /* Saved atomic context */
- bool *atomic;
-
- /* Record the pktio polls have done */
- uint16_t pktin_polls;
-
- /* Interested queue indexes to be checked by thread
- * at each priority level for scheduling, and a round
- * robin iterator to improve fairness between queues
- * in the same priority level.
- */
- odp_rwlock_t lock;
- queue_index_sparse_t indexes[NUM_SCHED_PRIO];
- sparse_bitmap_iterator_t iterators[NUM_SCHED_PRIO];
-
- struct {
- queue_entry_t *src_queue; /**< Source queue entry */
- uint64_t ctx; /**< Ordered context id */
- int stash_num; /**< Number of stashed enqueue operations */
- uint8_t in_order; /**< Order status */
- lock_called_t lock_called; /**< States of ordered locks */
- /** Storage for stashed enqueue operations */
- ordered_stash_t stash[MAX_ORDERED_STASH];
- } ordered;
-};
-
-/* Global scheduler context */
-static sched_global_t *sched;
-
-/* Thread local scheduler context */
-__thread sched_thread_local_t thread_local;
-
-static int schedule_init_global(void)
-{
- odp_shm_t shm;
- int i, k, prio, group;
-
- ODP_DBG("Schedule[iquery] init ... ");
-
- shm = odp_shm_reserve("odp_scheduler_iquery",
- sizeof(sched_global_t),
- ODP_CACHE_LINE_SIZE, 0);
-
- sched = odp_shm_addr(shm);
-
- if (sched == NULL) {
- ODP_ERR("Schedule[iquery] "
- "init: shm reserve.\n");
- return -1;
- }
-
- memset(sched, 0, sizeof(sched_global_t));
-
- sched->selfie = shm;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++)
- odp_rwlock_init(&sched->prios[prio].lock);
-
- for (group = 0; group < NUM_SCHED_GRPS; group++) {
- sched->groups[group].allocated = false;
- odp_rwlock_init(&sched->groups[group].lock);
- }
-
- odp_rwlock_init(&sched->pktio_poll.lock);
-
- for (i = 0; i < PKTIO_CMD_QUEUES; i++) {
- pktio_cmd_queue_t *queue =
- &sched->pktio_poll.queues[i];
-
- ring_init(&queue->ring);
-
- for (k = 0; k < PKTIO_RING_SIZE; k++)
- queue->cmd_index[k] = RING_EMPTY;
- }
-
- for (i = 0; i < NUM_PKTIO_CMD; i++)
- sched->pktio_poll.commands[i].index = PKTIO_CMD_FREE;
-
- ODP_DBG("done\n");
- return 0;
-}
-
-static int schedule_term_global(void)
-{
- uint32_t i;
- odp_shm_t shm = sched->selfie;
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- int count = 0;
- odp_event_t events[1];
-
- if (sched->availables[i])
- count = sched_cb_queue_deq_multi(i, events, 1);
-
- if (count < 0)
- sched_cb_queue_destroy_finalize(i);
- else if (count > 0)
- ODP_ERR("Queue (%d) not empty\n", i);
- }
-
- memset(sched, 0, sizeof(sched_global_t));
-
- if (odp_shm_free(shm) < 0) {
- ODP_ERR("Schedule[iquery] "
- "term: shm release.\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * These APIs are used to manipulate thread's interests.
- */
-static void thread_set_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio);
-
-static void thread_clear_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio);
-
-static void thread_set_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *set);
-
-static void thread_clear_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *clear);
-
-static void sched_thread_local_reset(void)
-{
- int prio;
- queue_index_sparse_t *index;
- sparse_bitmap_iterator_t *iterator;
-
- memset(&thread_local, 0, sizeof(sched_thread_local_t));
-
- thread_local.thread = odp_thread_id();
- thread_local.cache.queue = ODP_QUEUE_INVALID;
-
- odp_rwlock_init(&thread_local.lock);
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- index = &thread_local.indexes[prio];
- iterator = &thread_local.iterators[prio];
-
- sparse_bitmap_zero(index);
- sparse_bitmap_iterator(iterator, index);
- }
-}
-
-static int schedule_init_local(void)
-{
- int group;
- sched_group_t *G;
- queue_index_bitmap_t collect;
-
- wapl_bitmap_zero(&collect);
- sched_thread_local_reset();
-
- /* Collect all queue indexes of the schedule groups
- * which this thread has subscribed
- */
- for (group = 0; group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if ((group < SCHED_GROUP_NAMED || G->allocated) &&
- odp_thrmask_isset(&G->threads, thread_local.thread))
- wapl_bitmap_or(&collect, &collect, &G->queues);
-
- odp_rwlock_read_unlock(&G->lock);
- }
-
- /* Distribute the above collected queue indexes into
- * thread local interests per priority level.
- */
- thread_set_interests(&thread_local, &collect);
-
- /* "Night gathers, and now my watch begins..." */
- sched->threads[thread_local.thread] = &thread_local;
- return 0;
-}
-
-static inline void schedule_release_context(void);
-
-static int schedule_term_local(void)
-{
- int group;
- sched_group_t *G;
-
- if (thread_local.cache.count) {
- ODP_ERR("Locally pre-scheduled events exist.\n");
- return -1;
- }
-
- schedule_release_context();
-
- /* Unsubscribe all named schedule groups */
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated && odp_thrmask_isset(
- &G->threads, thread_local.thread))
- odp_thrmask_clr(&G->threads, thread_local.thread);
-
- odp_rwlock_write_unlock(&G->lock);
- }
-
- /* "...for this night and all the nights to come." */
- sched->threads[thread_local.thread] = NULL;
- sched_thread_local_reset();
- return 0;
-}
-
-static int init_sched_queue(uint32_t queue_index,
- const odp_schedule_param_t *sched_param)
-{
- int prio, group, thread;
- sched_prio_t *P;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- prio = sched_param->prio;
- group = sched_param->group;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- /* Named schedule group must be created prior
- * to queue creation to this group.
- */
- if (group >= SCHED_GROUP_NAMED && !G->allocated) {
- odp_rwlock_write_unlock(&G->lock);
- return -1;
- }
-
- /* Record the queue in its priority level globally */
- P = &sched->prios[prio];
-
- odp_rwlock_write_lock(&P->lock);
- wapl_bitmap_set(&P->queues, queue_index);
- odp_rwlock_write_unlock(&P->lock);
-
- /* Record the queue in its schedule group */
- wapl_bitmap_set(&G->queues, queue_index);
-
- /* Cache queue parameters for easy reference */
- memcpy(&sched->queues[queue_index],
- sched_param, sizeof(odp_schedule_param_t));
-
- /* Update all threads in this schedule group to
- * start check this queue index upon scheduling.
- */
- thread = odp_thrmask_first(&G->threads);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_set_interest(local, queue_index, prio);
- thread = odp_thrmask_next(&G->threads, thread);
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-/*
- * Must be called with schedule group's rwlock held.
- * This is also being used in destroy_schedule_group()
- * to destroy all orphan queues while destroying a whole
- * schedule group.
- */
-static void __destroy_sched_queue(
- sched_group_t *G, uint32_t queue_index)
-{
- int prio, thread;
- sched_prio_t *P;
- sched_thread_local_t *local;
-
- prio = sched->queues[queue_index].prio;
-
- /* Forget the queue in its schedule group */
- wapl_bitmap_clear(&G->queues, queue_index);
-
- /* Forget queue schedule parameters */
- memset(&sched->queues[queue_index],
- 0, sizeof(odp_schedule_param_t));
-
- /* Update all threads in this schedule group to
- * stop check this queue index upon scheduling.
- */
- thread = odp_thrmask_first(&G->threads);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_clear_interest(local, queue_index, prio);
- thread = odp_thrmask_next(&G->threads, thread);
- }
-
- /* Forget the queue in its priority level globally */
- P = &sched->prios[prio];
-
- odp_rwlock_write_lock(&P->lock);
- wapl_bitmap_clear(&P->queues, queue_index);
- odp_rwlock_write_unlock(&P->lock);
-}
-
-static void destroy_sched_queue(uint32_t queue_index)
-{
- int group;
- sched_group_t *G;
-
- group = sched->queues[queue_index].group;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- /* Named schedule group could have been destroyed
- * earlier and left these orphan queues.
- */
- if (group >= SCHED_GROUP_NAMED && !G->allocated) {
- odp_rwlock_write_unlock(&G->lock);
- return;
- }
-
- __destroy_sched_queue(G, queue_index);
- odp_rwlock_write_unlock(&G->lock);
-}
-
-static int pktio_cmd_queue_hash(int pktio, int pktin)
-{
- return (pktio ^ pktin) % PKTIO_CMD_QUEUES;
-}
-
-static inline pktio_cmd_t *alloc_pktio_cmd(void)
-{
- int i;
- pktio_cmd_t *cmd = NULL;
-
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- /* Find next free command */
- for (i = 0; i < NUM_PKTIO_CMD; i++) {
- if (sched->pktio_poll.commands[i].index
- == PKTIO_CMD_FREE) {
- cmd = &sched->pktio_poll.commands[i];
- cmd->index = i;
- break;
- }
- }
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
- return cmd;
-}
-
-static inline void free_pktio_cmd(pktio_cmd_t *cmd)
-{
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- cmd->index = PKTIO_CMD_FREE;
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
-}
-
-static void schedule_pktio_start(int pktio, int count, int pktin[])
-{
- int i, index;
- pktio_cmd_t *cmd;
-
- if (count > MAX_PKTIN)
- ODP_ABORT("Too many input queues for scheduler\n");
-
- /* Record the active commands count per pktio interface */
- sched->pktio_poll.actives[pktio] = count;
-
- /* Create a pktio poll command per pktin */
- for (i = 0; i < count; i++) {
- cmd = alloc_pktio_cmd();
-
- if (cmd == NULL)
- ODP_ABORT("Scheduler out of pktio commands\n");
-
- index = pktio_cmd_queue_hash(pktio, pktin[i]);
-
- cmd->pktio = pktio;
- cmd->count = 1;
- cmd->pktin[0] = pktin[i];
- ring_enq(&sched->pktio_poll.queues[index].ring,
- PKTIO_RING_MASK, cmd->index);
- }
-}
-
-static int schedule_pktio_stop(int pktio, int pktin ODP_UNUSED)
-{
- int remains;
-
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- sched->pktio_poll.actives[pktio]--;
- remains = sched->pktio_poll.actives[pktio];
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
- return remains;
-}
-
-#define DO_SCHED_LOCK() odp_rwlock_read_lock(&thread_local.lock)
-#define DO_SCHED_UNLOCK() odp_rwlock_read_unlock(&thread_local.lock)
-
-static inline bool do_schedule_prio(int prio);
-
-static inline int pop_cache_events(odp_event_t ev[], unsigned int max)
-{
- int k = 0;
- event_cache_t *cache;
-
- cache = &thread_local.cache;
- while (cache->count && max) {
- ev[k] = *cache->top++;
- k++;
- max--;
- cache->count--;
- }
-
- return k;
-}
-
-static inline void assign_queue_handle(odp_queue_t *handle)
-{
- if (handle)
- *handle = thread_local.cache.queue;
-}
-
-static inline void pktio_poll_input(void)
-{
- int i, hash;
- uint32_t index;
-
- ring_t *ring;
- pktio_cmd_t *cmd;
-
- /*
- * Each thread starts the search for a poll command
- * from the hash(threadID) queue to mitigate contentions.
- * If the queue is empty, it moves to other queues.
- *
- * Most of the times, the search stops on the first
- * command found to optimize multi-threaded performance.
- * A small portion of polls have to do full iteration to
- * avoid packet input starvation when there are less
- * threads than command queues.
- */
- hash = thread_local.thread % PKTIO_CMD_QUEUES;
-
- for (i = 0; i < PKTIO_CMD_QUEUES; i++,
- hash = (hash + 1) % PKTIO_CMD_QUEUES) {
- ring = &sched->pktio_poll.queues[hash].ring;
- index = ring_deq(ring, PKTIO_RING_MASK);
-
- if (odp_unlikely(index == RING_EMPTY))
- continue;
-
- cmd = &sched->pktio_poll.commands[index];
-
- /* Poll packet input */
- if (odp_unlikely(sched_cb_pktin_poll(cmd->pktio,
- cmd->count,
- cmd->pktin))) {
- /* Pktio stopped or closed. Remove poll
- * command and call stop_finalize when all
- * commands of the pktio has been removed.
- */
- if (schedule_pktio_stop(cmd->pktio,
- cmd->pktin[0]) == 0)
- sched_cb_pktio_stop_finalize(cmd->pktio);
-
- free_pktio_cmd(cmd);
- } else {
- /* Continue scheduling the pktio */
- ring_enq(ring, PKTIO_RING_MASK, index);
-
- /* Do not iterate through all pktin poll
- * command queues every time.
- */
- if (odp_likely(thread_local.pktin_polls & 0xF))
- break;
- }
- }
-
- thread_local.pktin_polls++;
-}
-
-/*
- * Schedule queues
- */
-static int do_schedule(odp_queue_t *out_queue,
- odp_event_t out_ev[], unsigned int max_num)
-{
- int prio, count;
-
- /* Consume locally cached events */
- count = pop_cache_events(out_ev, max_num);
- if (count > 0) {
- assign_queue_handle(out_queue);
- return count;
- }
-
- schedule_release_context();
-
- if (odp_unlikely(thread_local.pause))
- return count;
-
- DO_SCHED_LOCK();
- /* Schedule events */
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- /* Round robin iterate the interested queue
- * indexes in this priority level to compete
- * and consume available queues
- */
- if (!do_schedule_prio(prio))
- continue;
-
- count = pop_cache_events(out_ev, max_num);
- assign_queue_handle(out_queue);
- DO_SCHED_UNLOCK();
- return count;
- }
-
- DO_SCHED_UNLOCK();
-
- /* Poll packet input when there are no events */
- pktio_poll_input();
- return 0;
-}
-
-static int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t out_ev[], unsigned int max_num)
-{
- int count, first = 1;
- odp_time_t next, wtime;
-
- while (1) {
- count = do_schedule(out_queue, out_ev, max_num);
-
- if (count)
- break;
-
- if (wait == ODP_SCHED_WAIT)
- continue;
-
- if (wait == ODP_SCHED_NO_WAIT)
- break;
-
- if (first) {
- wtime = odp_time_local_from_ns(wait);
- next = odp_time_sum(odp_time_local(), wtime);
- first = 0;
- continue;
- }
-
- if (odp_time_cmp(next, odp_time_local()) < 0)
- break;
- }
-
- return count;
-}
-
-static odp_event_t schedule(odp_queue_t *out_queue, uint64_t wait)
-{
- odp_event_t ev;
-
- ev = ODP_EVENT_INVALID;
-
- schedule_loop(out_queue, wait, &ev, 1);
-
- return ev;
-}
-
-static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t events[], int num)
-{
- return schedule_loop(out_queue, wait, events, num);
-}
-
-static void schedule_pause(void)
-{
- thread_local.pause = 1;
-}
-
-static void schedule_resume(void)
-{
- thread_local.pause = 0;
-}
-
-static uint64_t schedule_wait_time(uint64_t ns)
-{
- return ns;
-}
-
-static int number_of_priorites(void)
-{
- return NUM_SCHED_PRIO;
-}
-
-/*
- * Create a named schedule group with pre-defined
- * set of subscription threads.
- *
- * Sched queues belonging to this group must be
- * created after the group creation. Upon creation
- * the group holds 0 sched queues.
- */
-static odp_schedule_group_t schedule_group_create(
- const char *name, const odp_thrmask_t *mask)
-{
- int group;
- sched_group_t *G;
-
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- if (!G->allocated) {
- strncpy(G->name, name ? name : "",
- ODP_SCHED_GROUP_NAME_LEN - 1);
- odp_thrmask_copy(&G->threads, mask);
- wapl_bitmap_zero(&G->queues);
-
- G->allocated = true;
- odp_rwlock_write_unlock(&G->lock);
- return (odp_schedule_group_t)group;
- }
- odp_rwlock_write_unlock(&G->lock);
- }
-
- return ODP_SCHED_GROUP_INVALID;
-}
-
-static inline void __destroy_group_queues(sched_group_t *group)
-{
- unsigned int index;
- queue_index_bitmap_t queues;
- wapl_bitmap_iterator_t it;
-
- /* Constructor */
- wapl_bitmap_zero(&queues);
- wapl_bitmap_copy(&queues, &group->queues);
- wapl_bitmap_iterator(&it, &queues);
-
- /* Walk through the queue index bitmap */
- for (it.start(&it); it.has_next(&it);) {
- index = it.next(&it);
- __destroy_sched_queue(group, index);
- }
-}
-
-/*
- * Destroy a named schedule group.
- */
-static int schedule_group_destroy(odp_schedule_group_t group)
-{
- int done = -1;
- sched_group_t *G;
-
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return -1;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Destroy all queues in this schedule group
- * and leave no orphan queues.
- */
- __destroy_group_queues(G);
-
- done = 0;
- G->allocated = false;
- wapl_bitmap_zero(&G->queues);
- odp_thrmask_zero(&G->threads);
- memset(G->name, 0, ODP_SCHED_GROUP_NAME_LEN);
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static odp_schedule_group_t schedule_group_lookup(const char *name)
-{
- int group;
- sched_group_t *G;
-
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
-
- odp_rwlock_read_lock(&G->lock);
- if (strcmp(name, G->name) == 0) {
- odp_rwlock_read_unlock(&G->lock);
- return (odp_schedule_group_t)group;
- }
- odp_rwlock_read_unlock(&G->lock);
- }
-
- return ODP_SCHED_GROUP_INVALID;
-}
-
-static int schedule_group_join(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int done = -1, thread;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Make new joined threads to start check
- * queue indexes in this schedule group
- */
- thread = odp_thrmask_first(mask);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_set_interests(local, &G->queues);
-
- odp_thrmask_set(&G->threads, thread);
- thread = odp_thrmask_next(mask, thread);
- }
- done = 0;
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_leave(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int done = -1, thread;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Make leaving threads to stop check
- * queue indexes in this schedule group
- */
- thread = odp_thrmask_first(mask);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_clear_interests(local, &G->queues);
-
- odp_thrmask_clr(&G->threads, thread);
- thread = odp_thrmask_next(mask, thread);
- }
- done = 0;
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_thrmask(odp_schedule_group_t group,
- odp_thrmask_t *thrmask)
-{
- int done = -1;
- sched_group_t *G;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if (G->allocated && thrmask != NULL) {
- done = 0;
- odp_thrmask_copy(thrmask, &G->threads);
- }
-
- odp_rwlock_read_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_info(odp_schedule_group_t group,
- odp_schedule_group_info_t *info)
-{
- int done = -1;
- sched_group_t *G;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if (G->allocated && info != NULL) {
- done = 0;
- info->name = G->name;
- odp_thrmask_copy(&info->thrmask, &G->threads);
- }
-
- odp_rwlock_read_unlock(&G->lock);
- return done;
-}
-
-/* This function is a no-op */
-static void schedule_prefetch(int num ODP_UNUSED)
-{
-}
-
-/*
- * Limited to join and leave pre-defined schedule groups
- * before and after thread local initialization or termination.
- */
-static int group_add_thread(odp_schedule_group_t group, int thread)
-{
- sched_group_t *G;
-
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- odp_thrmask_set(&G->threads, thread);
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-static int group_remove_thread(odp_schedule_group_t group, int thread)
-{
- sched_group_t *G;
-
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- odp_thrmask_clr(&G->threads, thread);
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-static int number_of_groups(void)
-{
- return NUM_SCHED_GRPS;
-}
-
-static int schedule_sched_queue(uint32_t queue_index)
-{
- /* Set available indications globally */
- sched->availables[queue_index] = true;
- return 0;
-}
-
-static int schedule_unsched_queue(uint32_t queue_index)
-{
- /* Clear available indications globally */
- sched->availables[queue_index] = false;
- return 0;
-}
-
-static void schedule_release_atomic(void)
-{
- unsigned int queue_index;
-
- if ((thread_local.atomic != NULL) &&
- (thread_local.cache.count == 0)) {
- queue_index = thread_local.atomic - sched->availables;
- thread_local.atomic = NULL;
- sched->availables[queue_index] = true;
- }
-}
-
-static inline int ordered_own_turn(queue_entry_t *queue)
-{
- uint64_t ctx;
-
- ctx = odp_atomic_load_acq_u64(&queue->s.ordered.ctx);
-
- return ctx == thread_local.ordered.ctx;
-}
-
-static inline void wait_for_order(queue_entry_t *queue)
-{
- /* Busy loop to synchronize ordered processing */
- while (1) {
- if (ordered_own_turn(queue))
- break;
- odp_cpu_pause();
- }
-}
-
-/**
- * Perform stashed enqueue operations
- *
- * Should be called only when already in order.
- */
-static inline void ordered_stash_release(void)
-{
- int i;
-
- for (i = 0; i < thread_local.ordered.stash_num; i++) {
- queue_entry_t *queue;
- odp_buffer_hdr_t **buf_hdr;
- int num;
-
- queue = thread_local.ordered.stash[i].queue;
- buf_hdr = thread_local.ordered.stash[i].buf_hdr;
- num = thread_local.ordered.stash[i].num;
-
- queue_enq_multi(queue, buf_hdr, num);
- }
- thread_local.ordered.stash_num = 0;
-}
-
-static inline void release_ordered(void)
-{
- unsigned i;
- queue_entry_t *queue;
-
- queue = thread_local.ordered.src_queue;
-
- wait_for_order(queue);
-
- /* Release all ordered locks */
- for (i = 0; i < queue->s.param.sched.lock_count; i++) {
- if (!thread_local.ordered.lock_called.u8[i])
- odp_atomic_store_rel_u64(&queue->s.ordered.lock[i],
- thread_local.ordered.ctx + 1);
- }
-
- thread_local.ordered.lock_called.all = 0;
- thread_local.ordered.src_queue = NULL;
- thread_local.ordered.in_order = 0;
-
- ordered_stash_release();
-
- /* Next thread can continue processing */
- odp_atomic_add_rel_u64(&queue->s.ordered.ctx, 1);
-}
-
-static void schedule_release_ordered(void)
-{
- queue_entry_t *queue;
-
- queue = thread_local.ordered.src_queue;
-
- if (odp_unlikely(!queue || thread_local.cache.count))
- return;
-
- release_ordered();
-}
-
-static inline void schedule_release_context(void)
-{
- if (thread_local.ordered.src_queue != NULL)
- release_ordered();
- else
- schedule_release_atomic();
-}
-
-static int schedule_ord_enq_multi(uint32_t queue_index, void *buf_hdr[],
- int num, int *ret)
-{
- int i;
- uint32_t stash_num = thread_local.ordered.stash_num;
- queue_entry_t *dst_queue = get_qentry(queue_index);
- queue_entry_t *src_queue = thread_local.ordered.src_queue;
-
- if (!thread_local.ordered.src_queue || thread_local.ordered.in_order)
- return 0;
-
- if (ordered_own_turn(src_queue)) {
- /* Own turn, so can do enqueue directly. */
- thread_local.ordered.in_order = 1;
- ordered_stash_release();
- return 0;
- }
-
- if (odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
- /* If the local stash is full, wait until it is our turn and
- * then release the stash and do enqueue directly. */
- wait_for_order(src_queue);
-
- thread_local.ordered.in_order = 1;
-
- ordered_stash_release();
- return 0;
- }
-
- thread_local.ordered.stash[stash_num].queue = dst_queue;
- thread_local.ordered.stash[stash_num].num = num;
- for (i = 0; i < num; i++)
- thread_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
-
- thread_local.ordered.stash_num++;
-
- *ret = num;
- return 1;
-}
-
-static void order_lock(void)
-{
- queue_entry_t *queue;
-
- queue = thread_local.ordered.src_queue;
-
- if (!queue)
- return;
-
- wait_for_order(queue);
-}
-
-static void order_unlock(void)
-{
-}
-
-static void schedule_order_lock(unsigned lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- queue_entry_t *queue;
-
- queue = thread_local.ordered.src_queue;
-
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count &&
- !thread_local.ordered.lock_called.u8[lock_index]);
-
- ord_lock = &queue->s.ordered.lock[lock_index];
-
- /* Busy loop to synchronize ordered processing */
- while (1) {
- uint64_t lock_seq;
-
- lock_seq = odp_atomic_load_acq_u64(ord_lock);
-
- if (lock_seq == thread_local.ordered.ctx) {
- thread_local.ordered.lock_called.u8[lock_index] = 1;
- return;
- }
- odp_cpu_pause();
- }
-}
-
-static void schedule_order_unlock(unsigned lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- queue_entry_t *queue;
-
- queue = thread_local.ordered.src_queue;
-
- ODP_ASSERT(queue && lock_index <= queue->s.param.sched.lock_count);
-
- ord_lock = &queue->s.ordered.lock[lock_index];
-
- ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
-
- odp_atomic_store_rel_u64(ord_lock, thread_local.ordered.ctx + 1);
-}
-
-static unsigned schedule_max_ordered_locks(void)
-{
- return MAX_ORDERED_LOCKS_PER_QUEUE;
-}
-
-static void schedule_save_context(queue_entry_t *queue)
-{
- if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ATOMIC) {
- thread_local.atomic = &sched->availables[queue->s.index];
- } else if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
- uint64_t ctx;
- odp_atomic_u64_t *next_ctx;
-
- next_ctx = &queue->s.ordered.next_ctx;
- ctx = odp_atomic_fetch_inc_u64(next_ctx);
-
- thread_local.ordered.ctx = ctx;
- thread_local.ordered.src_queue = queue;
- }
-}
-
-/* Fill in scheduler interface */
-const schedule_fn_t schedule_iquery_fn = {
- .pktio_start = schedule_pktio_start,
- .thr_add = group_add_thread,
- .thr_rem = group_remove_thread,
- .num_grps = number_of_groups,
- .init_queue = init_sched_queue,
- .destroy_queue = destroy_sched_queue,
- .sched_queue = schedule_sched_queue,
- .unsched_queue = schedule_unsched_queue,
- .ord_enq_multi = schedule_ord_enq_multi,
- .init_global = schedule_init_global,
- .term_global = schedule_term_global,
- .init_local = schedule_init_local,
- .term_local = schedule_term_local,
- .order_lock = order_lock,
- .order_unlock = order_unlock,
- .max_ordered_locks = schedule_max_ordered_locks,
- .save_context = schedule_save_context,
-};
-
-/* Fill in scheduler API calls */
-const schedule_api_t schedule_iquery_api = {
- .schedule_wait_time = schedule_wait_time,
- .schedule = schedule,
- .schedule_multi = schedule_multi,
- .schedule_pause = schedule_pause,
- .schedule_resume = schedule_resume,
- .schedule_release_atomic = schedule_release_atomic,
- .schedule_release_ordered = schedule_release_ordered,
- .schedule_prefetch = schedule_prefetch,
- .schedule_num_prio = number_of_priorites,
- .schedule_group_create = schedule_group_create,
- .schedule_group_destroy = schedule_group_destroy,
- .schedule_group_lookup = schedule_group_lookup,
- .schedule_group_join = schedule_group_join,
- .schedule_group_leave = schedule_group_leave,
- .schedule_group_thrmask = schedule_group_thrmask,
- .schedule_group_info = schedule_group_info,
- .schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
-};
-
-static void thread_set_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio)
-{
- queue_index_sparse_t *index;
-
- if (thread == NULL)
- return;
-
- if (prio >= NUM_SCHED_PRIO)
- return;
-
- index = &thread->indexes[prio];
-
- odp_rwlock_write_lock(&thread->lock);
- sparse_bitmap_set(index, queue_index);
- odp_rwlock_write_unlock(&thread->lock);
-}
-
-static void thread_clear_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio)
-{
- queue_index_sparse_t *index;
-
- if (thread == NULL)
- return;
-
- if (prio >= NUM_SCHED_PRIO)
- return;
-
- index = &thread->indexes[prio];
-
- odp_rwlock_write_lock(&thread->lock);
- sparse_bitmap_clear(index, queue_index);
- odp_rwlock_write_unlock(&thread->lock);
-}
-
-static void thread_set_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *set)
-{
- int prio;
- sched_prio_t *P;
- unsigned int queue_index;
- queue_index_bitmap_t subset;
- wapl_bitmap_iterator_t it;
-
- if (thread == NULL || set == NULL)
- return;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- P = &sched->prios[prio];
- odp_rwlock_read_lock(&P->lock);
-
- /* The collection of queue indexes in 'set'
- * may belong to several priority levels.
- */
- wapl_bitmap_zero(&subset);
- wapl_bitmap_and(&subset, &P->queues, set);
-
- odp_rwlock_read_unlock(&P->lock);
-
- /* Add the subset to local indexes */
- wapl_bitmap_iterator(&it, &subset);
- for (it.start(&it); it.has_next(&it);) {
- queue_index = it.next(&it);
- thread_set_interest(thread, queue_index, prio);
- }
- }
-}
-
-static void thread_clear_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *clear)
-{
- int prio;
- sched_prio_t *P;
- unsigned int queue_index;
- queue_index_bitmap_t subset;
- wapl_bitmap_iterator_t it;
-
- if (thread == NULL || clear == NULL)
- return;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- P = &sched->prios[prio];
- odp_rwlock_read_lock(&P->lock);
-
- /* The collection of queue indexes in 'clear'
- * may belong to several priority levels.
- */
- wapl_bitmap_zero(&subset);
- wapl_bitmap_and(&subset, &P->queues, clear);
-
- odp_rwlock_read_unlock(&P->lock);
-
- /* Remove the subset from local indexes */
- wapl_bitmap_iterator(&it, &subset);
- for (it.start(&it); it.has_next(&it);) {
- queue_index = it.next(&it);
- thread_clear_interest(thread, queue_index, prio);
- }
- }
-}
-
-static inline bool is_atomic_queue(unsigned int queue_index)
-{
- return (sched->queues[queue_index].sync
- == ODP_SCHED_SYNC_ATOMIC);
-}
-
-static inline bool is_ordered_queue(unsigned int queue_index)
-{
- return (sched->queues[queue_index].sync
- == ODP_SCHED_SYNC_ORDERED);
-}
-
-static inline bool compete_atomic_queue(unsigned int queue_index)
-{
- bool expected = sched->availables[queue_index];
-
- if (expected && is_atomic_queue(queue_index)) {
- expected = __atomic_compare_exchange_n(
- &sched->availables[queue_index],
- &expected, false, 0,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- }
-
- return expected;
-}
-
-static inline int consume_queue(int prio, unsigned int queue_index)
-{
- int count;
- unsigned int max = MAX_DEQ;
- event_cache_t *cache = &thread_local.cache;
-
- /* Low priorities have smaller batch size to limit
- * head of line blocking latency.
- */
- if (odp_unlikely(prio > ODP_SCHED_PRIO_DEFAULT))
- max = MAX_DEQ / 2;
-
- /* For ordered queues we want consecutive events to
- * be dispatched to separate threads, so do not cache
- * them locally.
- */
- if (is_ordered_queue(queue_index))
- max = 1;
-
- count = sched_cb_queue_deq_multi(
- queue_index, cache->stash, max);
-
- if (count < 0) {
- DO_SCHED_UNLOCK();
- sched_cb_queue_destroy_finalize(queue_index);
- DO_SCHED_LOCK();
- return 0;
- }
-
- if (count == 0)
- return 0;
-
- cache->top = &cache->stash[0];
- cache->count = count;
- cache->queue = sched_cb_queue_handle(queue_index);
- return count;
-}
-
-static inline bool do_schedule_prio(int prio)
-{
- int nbits, next, end;
- unsigned int queue_index;
- sparse_bitmap_iterator_t *it;
-
- it = &thread_local.iterators[prio];
- nbits = (int)*it->_base.last;
-
- /* No interests at all! */
- if (nbits <= 0)
- return false;
-
- /* In critical path, cannot afford iterator calls,
- * do it manually with internal knowledge
- */
- it->_start = (it->_start + 1) % nbits;
- end = it->_start + nbits;
-
- for (next = it->_start; next < end; next++) {
- queue_index = it->_base.il[next % nbits];
-
- if (!compete_atomic_queue(queue_index))
- continue;
-
- if (!consume_queue(prio, queue_index))
- continue;
-
- return true;
- }
-
- return false;
-}
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
new file mode 100644
index 000000000..5166fb6d0
--- /dev/null
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -0,0 +1,2209 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/atomic.h>
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+#include <odp/api/schedule.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/sync.h>
+#include <odp/api/thread.h>
+#include <odp/api/thrmask.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/schedule_inline_types.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_ishmpool_internal.h>
+
+#include <odp/api/plat/cpu_inlines.h>
+#include <odp_llqueue.h>
+#include <odp_queue_scalable_internal.h>
+#include <odp_schedule_if.h>
+#include <odp_bitset.h>
+#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_timer_internal.h>
+
+#include <limits.h>
+#include <stdbool.h>
+#include <string.h>
+
+#include <odp/api/plat/ticketlock_inlines.h>
+#define LOCK(a) odp_ticketlock_lock((a))
+#define UNLOCK(a) odp_ticketlock_unlock((a))
+
+#define MAXTHREADS ATOM_BITSET_SIZE
+
+#define FLAG_PKTIN 0x80
+
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
+ "Number_of_queues_is_not_power_of_two");
+
+#define SCHED_GROUP_JOIN 0
+#define SCHED_GROUP_LEAVE 1
+#define NUM_AUTO_GROUPS (ODP_SCHED_GROUP_CONTROL + 1)
+
+typedef struct {
+ odp_shm_t shm;
+ _odp_ishm_pool_t *sched_shm_pool;
+ /** Currently used scheduler groups */
+ sched_group_mask_t sg_free;
+ sched_group_t *sg_vec[MAX_SCHED_GROUP];
+ /** Group lock for MT-safe APIs */
+ odp_spinlock_t sched_grp_lock;
+ /** Initialization lock */
+ odp_spinlock_t init_lock;
+ /** Per thread state */
+ sched_scalable_thread_state_t thread_state[MAXTHREADS];
+ uint16_t poll_count[CONFIG_PKTIO_ENTRIES];
+ /* Scheduler interface config options (not used in fast path) */
+ schedule_config_t config_if;
+} sched_global_t;
+
+static sched_global_t *global;
+
+__thread sched_scalable_thread_state_t *_odp_sched_ts;
+
+static int thread_state_init(int tidx)
+{
+ sched_scalable_thread_state_t *ts;
+ uint32_t i;
+
+ _ODP_ASSERT(tidx < MAXTHREADS);
+ ts = &global->thread_state[tidx];
+ ts->atomq = NULL;
+ ts->src_schedq = NULL;
+ ts->rctx = NULL;
+ ts->pause = false;
+ ts->out_of_order = false;
+ ts->tidx = tidx;
+ ts->dequeued = 0;
+ ts->ticket = TICKET_INVALID;
+ ts->priv_rvec_free = 0;
+ ts->rvec_free = (1ULL << TS_RVEC_SIZE) - 1;
+ ts->num_schedq = 0;
+ ts->sg_sem = 1; /* Start with sched group semaphore changed */
+ ts->loop_cnt = 0;
+ memset(ts->sg_actual, 0, sizeof(ts->sg_actual));
+ for (i = 0; i < TS_RVEC_SIZE; i++) {
+ ts->rvec[i].rvec_free = &ts->rvec_free;
+ ts->rvec[i].idx = i;
+ }
+ _odp_sched_ts = ts;
+
+ return 0;
+}
+
+static void insert_schedq_in_list(sched_scalable_thread_state_t *ts,
+ sched_queue_t *schedq)
+{
+ /* Find slot for schedq */
+ for (uint32_t i = 0; i < ts->num_schedq; i++) {
+ /* Lower value is higher priority and closer to start of list */
+ if (schedq->prio <= ts->schedq_list[i]->prio) {
+ /* This is the slot! */
+ sched_queue_t *tmp;
+
+ tmp = ts->schedq_list[i];
+ ts->schedq_list[i] = schedq;
+ schedq = tmp;
+ /* Continue the insertion procedure with the
+ * new schedq.
+ */
+ }
+ }
+ if (ts->num_schedq == SCHEDQ_PER_THREAD)
+ _ODP_ABORT("Too many schedqs\n");
+ ts->schedq_list[ts->num_schedq++] = schedq;
+}
+
+static void remove_schedq_from_list(sched_scalable_thread_state_t *ts,
+ sched_queue_t *schedq)
+{
+ /* Find schedq */
+ for (uint32_t i = 0; i < ts->num_schedq; i++)
+ if (ts->schedq_list[i] == schedq) {
+ /* Move remaining schedqs */
+ for (uint32_t j = i + 1; j < ts->num_schedq; j++)
+ ts->schedq_list[j - 1] = ts->schedq_list[j];
+ ts->num_schedq--;
+ return;
+ }
+ _ODP_ABORT("Cannot find schedq\n");
+}
+
+/*******************************************************************************
+ * Scheduler queues
+ ******************************************************************************/
+#ifndef odp_container_of
+#define odp_container_of(pointer, type, member) \
+ ((type *)(void *)(((char *)pointer) - offsetof(type, member)))
+#endif
+
+static inline void schedq_init(sched_queue_t *schedq, uint32_t prio)
+{
+ llqueue_init(&schedq->llq);
+ schedq->prio = prio;
+}
+
+static inline sched_elem_t *schedq_peek(sched_queue_t *schedq)
+{
+ struct llnode *ptr;
+
+ ptr = llq_head(&schedq->llq);
+ return odp_container_of(ptr, sched_elem_t, node);
+}
+
+static inline odp_bool_t schedq_cond_pop(sched_queue_t *schedq,
+ sched_elem_t *elem)
+{
+ return llq_dequeue_cond(&schedq->llq, &elem->node);
+}
+
+static inline void schedq_push(sched_queue_t *schedq, sched_elem_t *elem)
+{
+ llq_enqueue(&schedq->llq, &elem->node);
+}
+
+static inline odp_bool_t schedq_cond_rotate(sched_queue_t *schedq,
+ sched_elem_t *elem)
+{
+ return llq_cond_rotate(&schedq->llq, &elem->node);
+}
+
+static inline bool schedq_elem_on_queue(sched_elem_t *elem)
+{
+ return llq_on_queue(&elem->node);
+}
+
+/*******************************************************************************
+ * Shared metadata btwn scheduler and queue
+ ******************************************************************************/
+
+void _odp_sched_update_enq(sched_elem_t *q, uint32_t actual)
+{
+ qschedstate_t oss, nss;
+ uint32_t ticket;
+
+ oss = q->qschst;
+ /* Update event counter, optionally taking a ticket. */
+ do {
+ ticket = TICKET_INVALID;
+ nss = oss;
+ nss.numevts += actual;
+ if (odp_unlikely(oss.numevts <= 0 && nss.numevts > 0))
+ /* E -> NE transition */
+ if (q->qschst_type != ODP_SCHED_SYNC_ATOMIC ||
+ oss.cur_ticket == oss.nxt_ticket)
+ /* Parallel or ordered queues: always take
+ * ticket.
+ * Atomic queue: only take ticket if one is
+ * immediately available.
+ * Otherwise ticket already taken => queue
+ * processed by some thread.
+ */
+ ticket = nss.nxt_ticket++;
+ /* Else queue already was non-empty. */
+ /* Attempt to update numevts counter and optionally take ticket. */
+ } while (!__atomic_compare_exchange(&q->qschst, &oss, &nss,
+ true, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+
+ if (odp_unlikely(ticket != TICKET_INVALID)) {
+ /* Wait for our turn to update schedq. */
+ if (odp_unlikely(__atomic_load_n(&q->qschst.cur_ticket,
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&q->qschst.cur_ticket, ticket);
+
+ /* Enqueue at end of scheduler queue */
+ /* We are here because of empty-to-non-empty transition
+ * This means queue must be pushed to schedq if possible
+ * but we can't do that if it already is on the schedq
+ */
+ if (odp_likely(!schedq_elem_on_queue(q) &&
+ q->pop_deficit == 0)) {
+ /* Queue not already on schedq and no pop deficit means
+ * we can push queue to schedq */
+ schedq_push(q->schedq, q);
+ } else {
+ /* Missed push => cancels one missed pop */
+ q->pop_deficit--;
+ }
+ atomic_store_release(&q->qschst.cur_ticket, ticket + 1,
+ /*readonly=*/false);
+ }
+ /* Else queue was not empty or atomic queue already busy. */
+}
+
+void _odp_sched_update_enq_sp(sched_elem_t *q, uint32_t actual)
+{
+ qschedstate_t oss, nss;
+ uint32_t ticket;
+
+ oss = q->qschst;
+ /* Update event counter, optionally taking a ticket. */
+ ticket = TICKET_INVALID;
+ nss = oss;
+ nss.numevts += actual;
+ if (odp_unlikely(oss.numevts <= 0 && nss.numevts > 0)) {
+ /* E -> NE transition */
+ if (q->qschst_type != ODP_SCHED_SYNC_ATOMIC ||
+ oss.cur_ticket == oss.nxt_ticket) {
+ /* Parallel or ordered queues: always take
+ * ticket.
+ * Atomic queue: only take ticket if one is
+ * immediately available. Otherwise ticket already
+ * taken => queue owned/processed by some thread
+ */
+ ticket = nss.nxt_ticket++;
+ }
+ }
+ /* Else queue already was non-empty. */
+ /* Attempt to update numevts counter and optionally take ticket. */
+ q->qschst = nss;
+
+ if (odp_unlikely(ticket != TICKET_INVALID)) {
+ /* Enqueue at end of scheduler queue */
+ /* We are here because of empty-to-non-empty transition
+ * This means queue must be pushed to schedq if possible
+ * but we can't do that if it already is on the schedq
+ */
+ if (odp_likely(!schedq_elem_on_queue(q) &&
+ q->pop_deficit == 0)) {
+ /* Queue not already on schedq and no pop deficit means
+ * we can push queue to schedq */
+ schedq_push(q->schedq, q);
+ } else {
+ /* Missed push => cancels one missed pop */
+ q->pop_deficit--;
+ }
+ q->qschst.cur_ticket = ticket + 1;
+ }
+ /* Else queue was not empty or atomic queue already busy. */
+}
+
+#ifndef CONFIG_QSCHST_LOCK
+/* The scheduler is the only entity that performs the dequeue from a queue. */
+static void
+sched_update_deq(sched_elem_t *q,
+ uint32_t actual,
+ bool atomic) __attribute__((always_inline));
+static inline void
+sched_update_deq(sched_elem_t *q,
+ uint32_t actual, bool atomic)
+{
+ qschedstate_t oss, nss;
+ uint32_t ticket;
+
+ if (atomic) {
+ bool pushed = false;
+
+ /* We own this atomic queue, only we can dequeue from it and
+ * thus decrease numevts. Other threads may enqueue and thus
+ * increase numevts.
+ * This means that numevts can't unexpectedly become 0 and
+ * invalidate a push operation already performed
+ */
+ oss = q->qschst;
+ do {
+ _ODP_ASSERT(oss.cur_ticket == _odp_sched_ts->ticket);
+ nss = oss;
+ nss.numevts -= actual;
+ if (nss.numevts > 0 && !pushed) {
+ schedq_push(q->schedq, q);
+ pushed = true;
+ }
+ /* Attempt to release ticket expecting our view of
+ * numevts to be correct
+ * Unfortunately nxt_ticket will also be included in
+ * the CAS operation
+ */
+ nss.cur_ticket = _odp_sched_ts->ticket + 1;
+ } while (odp_unlikely(!__atomic_compare_exchange(&q->qschst, &oss, &nss, true,
+ __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED)));
+ return;
+ }
+
+ oss = q->qschst;
+ do {
+ ticket = TICKET_INVALID;
+ nss = oss;
+ nss.numevts -= actual;
+ nss.wrr_budget -= actual;
+ if ((oss.numevts > 0 && nss.numevts <= 0) ||
+ oss.wrr_budget <= actual) {
+ /* If we have emptied parallel/ordered queue or
+ * exhausted its WRR budget, we need a ticket
+ * for a later pop.
+ */
+ ticket = nss.nxt_ticket++;
+ /* Reset wrr_budget as we might also push the
+ * queue to the schedq.
+ */
+ nss.wrr_budget = CONFIG_WRR_WEIGHT;
+ }
+ /* Attempt to update numevts and optionally take ticket. */
+ } while (!__atomic_compare_exchange(&q->qschst, &oss, &nss,
+ true, __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+
+ if (odp_unlikely(ticket != TICKET_INVALID)) {
+ _ODP_ASSERT(q->qschst_type != ODP_SCHED_SYNC_ATOMIC);
+ /* Wait for our turn to update schedq. */
+ if (odp_unlikely(__atomic_load_n(&q->qschst.cur_ticket,
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&q->qschst.cur_ticket, ticket);
+
+ /* We are here because of non-empty-to-empty transition or
+ * WRR budget exhausted
+ * This means the queue must be popped from the schedq, now or
+ * later
+ * If there was no NE->E transition but instead the WRR budget
+ * was exhausted, the queue needs to be moved (popped and
+ * pushed) to the tail of the schedq
+ */
+ if (oss.numevts > 0 && nss.numevts <= 0) {
+ /* NE->E transition, need to pop */
+ if (!schedq_elem_on_queue(q) ||
+ !schedq_cond_pop(q->schedq, q)) {
+ /* Queue not at head, failed to dequeue
+ * Missed a pop.
+ */
+ q->pop_deficit++;
+ }
+ } else {
+ /* WRR budget exhausted
+ * Need to move queue to tail of schedq if possible
+ */
+ if (odp_likely(schedq_elem_on_queue(q))) {
+ /* Queue is on schedq, try to move it to
+ * the tail
+ */
+ (void)schedq_cond_rotate(q->schedq, q);
+ }
+ /* Else queue not on schedq or not at head of schedq
+ * No pop => no push
+ */
+ }
+ atomic_store_release(&q->qschst.cur_ticket, ticket + 1,
+ /*readonly=*/false);
+ }
+}
+#endif
+
+#ifdef CONFIG_QSCHST_LOCK
+static void
+sched_update_deq_sc(sched_elem_t *q,
+ uint32_t actual,
+ bool atomic) __attribute__((always_inline));
+static inline void
+sched_update_deq_sc(sched_elem_t *q,
+ uint32_t actual, bool atomic)
+{
+ qschedstate_t oss, nss;
+ uint32_t ticket;
+
+ if (atomic) {
+ _ODP_ASSERT(q->qschst.cur_ticket == _odp_sched_ts->ticket);
+ _ODP_ASSERT(q->qschst.cur_ticket != q->qschst.nxt_ticket);
+ q->qschst.numevts -= actual;
+ q->qschst.cur_ticket = _odp_sched_ts->ticket + 1;
+ if (q->qschst.numevts > 0)
+ schedq_push(q->schedq, q);
+ return;
+ }
+
+ oss = q->qschst;
+ ticket = TICKET_INVALID;
+ nss = oss;
+ nss.numevts -= actual;
+ nss.wrr_budget -= actual;
+ if ((oss.numevts > 0 && nss.numevts <= 0) || oss.wrr_budget <= actual) {
+ /* If we emptied the queue or
+ * if we have served the maximum number of events
+ * then we need a ticket for a later pop.
+ */
+ ticket = nss.nxt_ticket++;
+ /* Also reset wrr_budget as we might also push the
+ * queue to the schedq.
+ */
+ nss.wrr_budget = CONFIG_WRR_WEIGHT;
+ }
+ q->qschst = nss;
+
+ if (ticket != TICKET_INVALID) {
+ if (oss.numevts > 0 && nss.numevts <= 0) {
+ /* NE->E transition, need to pop */
+ if (!schedq_elem_on_queue(q) ||
+ !schedq_cond_pop(q->schedq, q)) {
+ /* Queue not at head, failed to dequeue.
+ * Missed a pop.
+ */
+ q->pop_deficit++;
+ }
+ } else {
+ /* WRR budget exhausted
+ * Need to move queue to tail of schedq if possible
+ */
+ if (odp_likely(schedq_elem_on_queue(q))) {
+ /* Queue is on schedq, try to move it to
+ * the tail
+ */
+ (void)schedq_cond_rotate(q->schedq, q);
+ }
+ /* Else queue not on schedq or not at head of schedq
+ * No pop => no push
+ */
+ }
+ q->qschst.cur_ticket = ticket + 1;
+ }
+}
+#endif
+
+static inline void sched_update_popd_sc(sched_elem_t *elem)
+{
+ if (elem->pop_deficit != 0 &&
+ schedq_elem_on_queue(elem) &&
+ schedq_cond_pop(elem->schedq, elem))
+ elem->pop_deficit--;
+}
+
+#ifndef CONFIG_QSCHST_LOCK
+static inline void sched_update_popd(sched_elem_t *elem)
+{
+ uint32_t ticket = __atomic_fetch_add(&elem->qschst.nxt_ticket,
+ 1,
+ __ATOMIC_RELAXED);
+ if (odp_unlikely(__atomic_load_n(&elem->qschst.cur_ticket,
+ __ATOMIC_ACQUIRE) != ticket))
+ _odp_wait_until_eq_acq_u8(&elem->qschst.cur_ticket, ticket);
+
+ sched_update_popd_sc(elem);
+ atomic_store_release(&elem->qschst.cur_ticket, ticket + 1,
+ /*readonly=*/false);
+}
+#endif
+
+static void signal_threads_add(sched_group_t *sg, uint32_t sgi, uint32_t prio)
+{
+ sched_group_mask_t thrds = sg->thr_wanted;
+ uint32_t thr;
+
+ while (!bitset_is_null(thrds)) {
+ thr = bitset_ffs(thrds) - 1;
+ thrds = bitset_clr(thrds, thr);
+ /* Notify the thread about membership in this
+ * group/priority.
+ */
+ atom_bitset_set(&global->thread_state[thr].sg_wanted[prio],
+ sgi, __ATOMIC_RELEASE);
+ __atomic_store_n(&global->thread_state[thr].sg_sem, 1,
+ __ATOMIC_RELEASE);
+ }
+}
+
+sched_queue_t *_odp_sched_queue_add(odp_schedule_group_t grp, uint32_t prio)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ uint32_t x;
+
+ _ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
+ _ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
+ _ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
+
+ sgi = grp;
+ sg = global->sg_vec[sgi];
+
+ /* Use xcount to spread queues over the xfactor schedq's
+ * per priority.
+ */
+ x = __atomic_fetch_add(&sg->xcount[prio], 1, __ATOMIC_RELAXED);
+ if (x == 0) {
+ /* First ODP queue for this priority
+ * Notify all threads in sg->thr_wanted that they
+ * should join.
+ */
+ signal_threads_add(sg, sgi, prio);
+ }
+ return &sg->schedq[prio * sg->xfactor + x % sg->xfactor];
+}
+
+static uint32_t sched_pktin_add(odp_schedule_group_t grp, uint32_t prio)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+
+ _ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
+ _ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
+ _ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
+
+ sgi = grp;
+ sg = global->sg_vec[sgi];
+
+ (void)_odp_sched_queue_add(grp, ODP_SCHED_PRIO_PKTIN);
+ return (ODP_SCHED_PRIO_PKTIN - prio) * sg->xfactor;
+}
+
+static void signal_threads_rem(sched_group_t *sg, uint32_t sgi, uint32_t prio)
+{
+ sched_group_mask_t thrds = sg->thr_wanted;
+ uint32_t thr;
+
+ while (!bitset_is_null(thrds)) {
+ thr = bitset_ffs(thrds) - 1;
+ thrds = bitset_clr(thrds, thr);
+ /* Notify the thread about membership in this
+ * group/priority.
+ */
+ atom_bitset_clr(&global->thread_state[thr].sg_wanted[prio],
+ sgi, __ATOMIC_RELEASE);
+ __atomic_store_n(&global->thread_state[thr].sg_sem, 1,
+ __ATOMIC_RELEASE);
+ }
+}
+
+void _odp_sched_queue_rem(odp_schedule_group_t grp, uint32_t prio)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ uint32_t x;
+
+ _ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
+ _ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
+ _ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
+
+ sgi = grp;
+ sg = global->sg_vec[sgi];
+
+ x = __atomic_sub_fetch(&sg->xcount[prio], 1, __ATOMIC_RELAXED);
+ if (x == 0) {
+ /* Last ODP queue for this priority
+ * Notify all threads in sg->thr_wanted that they
+ * should leave.
+ */
+ signal_threads_rem(sg, sgi, prio);
+ }
+}
+
+static void sched_pktin_rem(odp_schedule_group_t grp)
+{
+ _odp_sched_queue_rem(grp, ODP_SCHED_PRIO_PKTIN);
+}
+
+static void update_sg_add(sched_scalable_thread_state_t *ts,
+ uint32_t p,
+ sched_group_mask_t sg_wanted)
+{
+ sched_group_mask_t added;
+ uint32_t sgi;
+ sched_group_t *sg;
+ uint32_t x;
+
+ added = bitset_andn(sg_wanted, ts->sg_actual[p]);
+ while (!bitset_is_null(added)) {
+ sgi = bitset_ffs(added) - 1;
+ sg = global->sg_vec[sgi];
+ for (x = 0; x < sg->xfactor; x++) {
+ /* Include our thread index to shift
+ * (rotate) the order of schedq's
+ */
+ insert_schedq_in_list(ts,
+ &sg->schedq[p * sg->xfactor +
+ (x + ts->tidx) % sg->xfactor]);
+ }
+ atom_bitset_set(&sg->thr_actual[p], ts->tidx, __ATOMIC_RELAXED);
+ added = bitset_clr(added, sgi);
+ }
+}
+
+static void update_sg_rem(sched_scalable_thread_state_t *ts,
+ uint32_t p,
+ sched_group_mask_t sg_wanted)
+{
+ sched_group_mask_t removed;
+ uint32_t sgi;
+ sched_group_t *sg;
+ uint32_t x;
+
+ removed = bitset_andn(ts->sg_actual[p], sg_wanted);
+ while (!bitset_is_null(removed)) {
+ sgi = bitset_ffs(removed) - 1;
+ sg = global->sg_vec[sgi];
+ for (x = 0; x < sg->xfactor; x++) {
+ remove_schedq_from_list(ts,
+ &sg->schedq[p *
+ sg->xfactor + x]);
+ }
+ atom_bitset_clr(&sg->thr_actual[p], ts->tidx, __ATOMIC_RELAXED);
+ removed = bitset_clr(removed, sgi);
+ }
+}
+
+static void update_sg_membership(sched_scalable_thread_state_t *ts)
+{
+ uint32_t p;
+ sched_group_mask_t sg_wanted;
+
+ for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
+ sg_wanted = atom_bitset_load(&ts->sg_wanted[p],
+ __ATOMIC_ACQUIRE);
+ if (!bitset_is_eql(ts->sg_actual[p], sg_wanted)) {
+ /* Our sched_group membership has changed */
+ update_sg_add(ts, p, sg_wanted);
+ update_sg_rem(ts, p, sg_wanted);
+ ts->sg_actual[p] = sg_wanted;
+ }
+ }
+}
+
+/*******************************************************************************
+ * Scheduler
+ ******************************************************************************/
+
+static inline void _schedule_release_atomic(sched_scalable_thread_state_t *ts)
+{
+#ifdef CONFIG_QSCHST_LOCK
+ sched_update_deq_sc(ts->atomq, ts->dequeued, true);
+ _ODP_ASSERT(ts->atomq->qschst.cur_ticket != ts->ticket);
+ _ODP_ASSERT(ts->atomq->qschst.cur_ticket ==
+ ts->atomq->qschst.nxt_ticket);
+#else
+ sched_update_deq(ts->atomq, ts->dequeued, true);
+#endif
+ ts->atomq = NULL;
+ ts->ticket = TICKET_INVALID;
+}
+
+static inline void _schedule_release_ordered(sched_scalable_thread_state_t *ts)
+{
+ ts->out_of_order = false;
+ _odp_rctx_release(ts->rctx);
+ ts->rctx = NULL;
+}
+
+static void pktio_start(int pktio_idx,
+ int num_in_queue,
+ int in_queue_idx[],
+ odp_queue_t odpq[])
+{
+ int i, rxq;
+ queue_entry_t *qentry;
+ sched_elem_t *elem;
+
+ _ODP_ASSERT(pktio_idx < CONFIG_PKTIO_ENTRIES);
+ for (i = 0; i < num_in_queue; i++) {
+ rxq = in_queue_idx[i];
+ _ODP_ASSERT(rxq < ODP_PKTIN_MAX_QUEUES);
+ __atomic_fetch_add(&global->poll_count[pktio_idx], 1,
+ __ATOMIC_RELAXED);
+ qentry = _odp_qentry_from_ext(odpq[i]);
+ elem = &qentry->sched_elem;
+ elem->cons_type |= FLAG_PKTIN; /* Set pktin queue flag */
+ elem->pktio_idx = pktio_idx;
+ elem->rx_queue = rxq;
+ elem->xoffset = sched_pktin_add(elem->sched_grp, elem->sched_prio);
+ _ODP_ASSERT(elem->schedq != NULL);
+ schedq_push(elem->schedq, elem);
+ }
+}
+
+static void pktio_stop(sched_elem_t *elem)
+{
+ sched_pktin_rem(elem->sched_grp);
+ if (__atomic_sub_fetch(&global->poll_count[elem->pktio_idx],
+ 1, __ATOMIC_RELAXED) == 0) {
+ /* Call stop_finalize when all queues
+ * of the pktio have been removed */
+ _odp_sched_cb_pktio_stop_finalize(elem->pktio_idx);
+ }
+}
+
+static bool have_reorder_ctx(sched_scalable_thread_state_t *ts)
+{
+ if (odp_unlikely(bitset_is_null(ts->priv_rvec_free))) {
+ ts->priv_rvec_free = atom_bitset_xchg(&ts->rvec_free, 0,
+ __ATOMIC_RELAXED);
+ if (odp_unlikely(bitset_is_null(ts->priv_rvec_free))) {
+ /* No free reorder contexts for this thread */
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline bool is_pktin(sched_elem_t *elem)
+{
+ return (elem->cons_type & FLAG_PKTIN) != 0;
+}
+
+static inline bool is_atomic(sched_elem_t *elem)
+{
+ return elem->cons_type == (ODP_SCHED_SYNC_ATOMIC | FLAG_PKTIN);
+}
+
+static inline bool is_ordered(sched_elem_t *elem)
+{
+ return elem->cons_type == (ODP_SCHED_SYNC_ORDERED | FLAG_PKTIN);
+}
+
+static int poll_pktin(sched_elem_t *elem, odp_event_t ev[], int num_evts)
+{
+ sched_scalable_thread_state_t *ts = _odp_sched_ts;
+ int num, i;
+ /* For ordered queues only */
+ reorder_context_t *rctx;
+ reorder_window_t *rwin = NULL;
+ uint32_t sn = 0;
+ uint32_t idx;
+
+ if (is_ordered(elem)) {
+ /* Need reorder context and slot in reorder window */
+ rwin = queue_get_rwin((queue_entry_t *)elem);
+ _ODP_ASSERT(rwin != NULL);
+ if (odp_unlikely(!have_reorder_ctx(ts) ||
+ !_odp_rwin_reserve_sc(rwin, &sn))) {
+ /* Put back queue on source schedq */
+ schedq_push(ts->src_schedq, elem);
+ return 0;
+ }
+ /* Slot in reorder window reserved! */
+ }
+
+ /* Try to dequeue events from the ingress queue itself */
+ num = _odp_queue_deq_sc(elem, ev, num_evts);
+ if (odp_likely(num > 0)) {
+events_dequeued:
+ if (is_atomic(elem)) {
+ ts->atomq = elem; /* Remember */
+ ts->dequeued += num;
+ /* Don't push atomic queue on schedq */
+ } else /* Parallel or ordered */ {
+ if (is_ordered(elem)) {
+ /* Find and initialise an unused reorder
+ * context. */
+ idx = bitset_ffs(ts->priv_rvec_free) - 1;
+ ts->priv_rvec_free =
+ bitset_clr(ts->priv_rvec_free, idx);
+ rctx = &ts->rvec[idx];
+ _odp_rctx_init(rctx, idx, rwin, sn);
+ /* Are we in-order or out-of-order? */
+ ts->out_of_order = sn != rwin->hc.head;
+ ts->rctx = rctx;
+ }
+ schedq_push(elem->schedq, elem);
+ }
+ return num;
+ }
+
+ /* Ingress queue empty => poll pktio RX queue */
+ _odp_event_hdr_t *rx_evts[QUEUE_MULTI_MAX];
+ int num_rx = _odp_sched_cb_pktin_poll(elem->pktio_idx, elem->rx_queue,
+ rx_evts, QUEUE_MULTI_MAX);
+
+ if (odp_likely(num_rx > 0)) {
+ num = num_rx < num_evts ? num_rx : num_evts;
+ for (i = 0; i < num; i++) {
+ /* Return events directly to caller */
+ ev[i] = _odp_event_from_hdr(rx_evts[i]);
+ }
+ if (num_rx > num) {
+ /* Events remain, enqueue them */
+ i = _odp_queue_enq_sp(elem, &rx_evts[num], num_rx - num);
+ /* Enqueue must succeed as the queue was empty */
+ _ODP_ASSERT(i == num_rx - num);
+ }
+ goto events_dequeued;
+ }
+ /* No packets received, reset state and undo side effects */
+ if (is_atomic(elem))
+ ts->atomq = NULL;
+ else if (is_ordered(elem))
+ _odp_rwin_unreserve_sc(rwin, sn);
+
+ if (odp_likely(num_rx == 0)) {
+ /* RX queue empty, push it to pktin priority schedq */
+ sched_queue_t *schedq = ts->src_schedq;
+ /* Check if queue came from the designated schedq */
+ if (schedq == elem->schedq) {
+ /* Yes, add offset to the pktin priority level
+ * in order to get alternate schedq */
+ schedq += elem->xoffset;
+ }
+ /* Else no, queue must have come from alternate schedq */
+ schedq_push(schedq, elem);
+ } else /* num_rx < 0 => pktio stopped or closed */ {
+ /* Remove queue */
+ pktio_stop(elem);
+ /* Don't push queue to schedq */
+ }
+
+ _ODP_ASSERT(ts->atomq == NULL);
+ _ODP_ASSERT(!ts->out_of_order);
+ _ODP_ASSERT(ts->rctx == NULL);
+ return 0;
+}
+
+static int _schedule(odp_queue_t *from, odp_event_t ev[], int num_evts)
+{
+ sched_scalable_thread_state_t *ts;
+ sched_elem_t *atomq;
+ int num;
+ int cpu_id;
+ uint32_t i;
+
+ ts = _odp_sched_ts;
+ atomq = ts->atomq;
+
+ timer_run(1);
+
+ /* Once an atomic queue has been scheduled to a thread, it will stay
+ * on that thread until empty or 'rotated' by WRR
+ */
+ if (atomq != NULL && is_pktin(atomq)) {
+ /* Atomic pktin queue */
+ if (ts->dequeued < atomq->qschst.wrr_budget) {
+ _ODP_ASSERT(ts->src_schedq != NULL);
+ num = poll_pktin(atomq, ev, num_evts);
+ if (odp_likely(num != 0)) {
+ if (from)
+ *from = queue_get_handle((queue_entry_t *)atomq);
+ return num;
+ }
+ } else {
+ /* WRR budget exhausted, move queue to end of schedq */
+ schedq_push(atomq->schedq, atomq);
+ }
+ ts->atomq = NULL;
+ } else if (atomq != NULL) {
+ _ODP_ASSERT(ts->ticket != TICKET_INVALID);
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&atomq->qschlock);
+#endif
+dequeue_atomic:
+ _ODP_ASSERT(ts->ticket == atomq->qschst.cur_ticket);
+ _ODP_ASSERT(ts->ticket != atomq->qschst.nxt_ticket);
+ /* Atomic queues can be dequeued without lock since this thread
+ * has the only reference to the atomic queue being processed.
+ */
+ if (ts->dequeued < atomq->qschst.wrr_budget) {
+ num = _odp_queue_deq_sc(atomq, ev, num_evts);
+ if (odp_likely(num != 0)) {
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&atomq->qschlock);
+#endif
+ ts->dequeued += num;
+ /* Allow this thread to continue to 'own' this
+ * atomic queue until all events have been
+ * processed and the thread re-invokes the
+ * scheduler.
+ */
+ if (from)
+ *from = queue_get_handle((queue_entry_t *)atomq);
+ return num;
+ }
+ }
+ /* Atomic queue was empty or interrupted by WRR, release it. */
+ _schedule_release_atomic(ts);
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&atomq->qschlock);
+#endif
+ }
+
+ /* Check for and perform any scheduler group updates. */
+ if (odp_unlikely(__atomic_load_n(&ts->sg_sem, __ATOMIC_RELAXED) != 0)) {
+ (void)__atomic_load_n(&ts->sg_sem, __ATOMIC_ACQUIRE);
+ ts->sg_sem = 0;
+ update_sg_membership(ts);
+ }
+
+ cpu_id = odp_cpu_id();
+ /* Scan our schedq list from beginning to end */
+ for (i = 0; i < ts->num_schedq; i++) {
+ sched_queue_t *schedq = ts->schedq_list[i];
+ sched_elem_t *elem;
+
+ ts->loop_cnt++;
+restart_same:
+ elem = schedq_peek(schedq);
+ if (odp_unlikely(elem == NULL)) {
+ /* Schedq empty, look at next one. */
+ continue;
+ }
+ if (is_pktin(elem)) {
+ /* Pktio ingress queue */
+ if (elem->schedq != schedq) { /* Low priority schedq*/
+ if (elem->loop_check[cpu_id] != ts->loop_cnt)
+ elem->loop_check[cpu_id] = ts->loop_cnt;
+ else /* Wrapped around */
+ continue; /* Go to next schedq */
+ }
+
+ if (odp_unlikely(!schedq_cond_pop(schedq, elem)))
+ goto restart_same;
+
+ ts->src_schedq = schedq; /* Remember source schedq */
+ num = poll_pktin(elem, ev, num_evts);
+ if (odp_unlikely(num <= 0))
+ goto restart_same;
+ if (from)
+ *from = queue_get_handle((queue_entry_t *)elem);
+ return num;
+ } else if (elem->cons_type == ODP_SCHED_SYNC_ATOMIC) {
+ /* Dequeue element only if it is still at head
+ * of schedq.
+ */
+ if (odp_unlikely(!schedq_cond_pop(schedq, elem))) {
+ /* Queue not at head of schedq anymore, some
+ * other thread popped it.
+ */
+ goto restart_same;
+ }
+ ts->atomq = elem;
+ atomq = elem;
+ ts->dequeued = 0;
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&atomq->qschlock);
+ ts->ticket = atomq->qschst.nxt_ticket++;
+ _ODP_ASSERT(atomq->qschst.cur_ticket == ts->ticket);
+#else
+ /* Dequeued atomic queue from the schedq, only we
+ * can process it and any qschst updates are our
+ * responsibility.
+ */
+ /* The ticket taken below will signal producers */
+ ts->ticket = __atomic_fetch_add(&atomq->qschst.nxt_ticket, 1,
+ __ATOMIC_RELAXED);
+ while (__atomic_load_n(&atomq->qschst.cur_ticket,
+ __ATOMIC_ACQUIRE) != ts->ticket) {
+ /* No need to use WFE, spinning here seems
+ * very infrequent.
+ */
+ odp_cpu_pause();
+ }
+#endif
+ goto dequeue_atomic;
+ } else if (elem->cons_type == ODP_SCHED_SYNC_PARALLEL) {
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&elem->qschlock);
+ num = _odp_queue_deq_sc(elem, ev, num_evts);
+ if (odp_likely(num != 0)) {
+ sched_update_deq_sc(elem, num, false);
+ UNLOCK(&elem->qschlock);
+ if (from)
+ *from =
+ queue_get_handle((queue_entry_t *)elem);
+ return num;
+ }
+ UNLOCK(&elem->qschlock);
+#else
+ num = _odp_queue_deq_mc(elem, ev, num_evts);
+ if (odp_likely(num != 0)) {
+ sched_update_deq(elem, num, false);
+ if (from)
+ *from =
+ queue_get_handle((queue_entry_t *)elem);
+ return num;
+ }
+#endif
+ } else if (elem->cons_type == ODP_SCHED_SYNC_ORDERED) {
+ reorder_window_t *rwin;
+ reorder_context_t *rctx;
+ uint32_t sn;
+ uint32_t idx;
+
+ /* The ordered queue has a reorder window so requires
+ * order restoration. We must use a reorder context to
+ * collect all outgoing events. Ensure there is at least
+ * one available reorder context.
+ */
+ if (odp_unlikely(!have_reorder_ctx(ts)))
+ continue;
+
+ /* _odp_rwin_reserve and odp_queue_deq must be atomic or
+ * there will be a potential race condition.
+ * Allocate a slot in the reorder window.
+ */
+ rwin = queue_get_rwin((queue_entry_t *)elem);
+ _ODP_ASSERT(rwin != NULL);
+ if (odp_unlikely(!_odp_rwin_reserve(rwin, &sn))) {
+ /* Reorder window full */
+ /* Look at next schedq, find other queue */
+ continue;
+ }
+ /* Wait for our turn to dequeue */
+ if (odp_unlikely(__atomic_load_n(&rwin->turn, __ATOMIC_ACQUIRE) != sn))
+ _odp_wait_until_eq_acq_u32(&rwin->turn, sn);
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&elem->qschlock);
+#endif
+ num = _odp_queue_deq_sc(elem, ev, num_evts);
+ /* Wait for prod_read write in _odp_queue_dequeue_sc()
+ * to complete before we signal the next consumer
+ */
+ atomic_store_release(&rwin->turn, sn + 1,
+ /*readonly=*/false);
+ /* Find and initialise an unused reorder context. */
+ idx = bitset_ffs(ts->priv_rvec_free) - 1;
+ ts->priv_rvec_free =
+ bitset_clr(ts->priv_rvec_free, idx);
+ rctx = &ts->rvec[idx];
+ /* Need to initialise reorder context or we can't
+ * release it later.
+ */
+ _odp_rctx_init(rctx, idx, rwin, sn);
+
+ /* Was dequeue successful? */
+ if (odp_likely(num != 0)) {
+ /* Perform scheduler related updates */
+#ifdef CONFIG_QSCHST_LOCK
+ sched_update_deq_sc(elem, num,
+ /*atomic=*/false);
+ UNLOCK(&elem->qschlock);
+#else
+ sched_update_deq(elem, num, /*atomic=*/false);
+#endif
+
+ /* Are we in-order or out-of-order? */
+ ts->out_of_order = sn != rwin->hc.head;
+
+ ts->rctx = rctx;
+ if (from)
+ *from = queue_get_handle((queue_entry_t *)elem);
+ return num;
+ }
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&elem->qschlock);
+#endif
+ /* Since a slot was reserved in the reorder window,
+ * the reorder context needs to be released and
+ * inserted into the reorder window.
+ */
+ _odp_rctx_release(rctx);
+ _ODP_ASSERT(ts->rctx == NULL);
+ }
+ /* Dequeue from parallel/ordered queue failed
+ * Check if we have a queue at the head of the schedq that needs
+ * to be popped
+ */
+ if (odp_unlikely(__atomic_load_n(&elem->pop_deficit,
+ __ATOMIC_RELAXED) != 0)) {
+#ifdef CONFIG_QSCHST_LOCK
+ LOCK(&elem->qschlock);
+ sched_update_popd_sc(elem);
+ UNLOCK(&elem->qschlock);
+#else
+ sched_update_popd(elem);
+#endif
+ }
+ }
+
+ return 0;
+}
+
+/******************************************************************************/
+
+static void schedule_order_lock(uint32_t lock_index)
+{
+ struct reorder_context *rctx = _odp_sched_ts->rctx;
+
+ if (odp_unlikely(rctx == NULL ||
+ rctx->rwin == NULL ||
+ lock_index >= rctx->rwin->lock_count)) {
+ _ODP_ERR("Invalid call to odp_schedule_order_lock\n");
+ return;
+ }
+ if (odp_unlikely(__atomic_load_n(&rctx->rwin->olock[lock_index],
+ __ATOMIC_ACQUIRE) != rctx->sn))
+ _odp_wait_until_eq_acq_u32(&rctx->rwin->olock[lock_index], rctx->sn);
+}
+
+static void schedule_order_unlock(uint32_t lock_index)
+{
+ struct reorder_context *rctx;
+
+ rctx = _odp_sched_ts->rctx;
+ if (odp_unlikely(rctx == NULL ||
+ rctx->rwin == NULL ||
+ lock_index >= rctx->rwin->lock_count ||
+ rctx->rwin->olock[lock_index] != rctx->sn)) {
+ _ODP_ERR("Invalid call to odp_schedule_order_unlock\n");
+ return;
+ }
+ atomic_store_release(&rctx->rwin->olock[lock_index],
+ rctx->sn + 1,
+ /*readonly=*/false);
+ rctx->olock_flags |= 1U << lock_index;
+}
+
+static void schedule_order_unlock_lock(uint32_t unlock_index,
+ uint32_t lock_index)
+{
+ schedule_order_unlock(unlock_index);
+ schedule_order_lock(lock_index);
+}
+
+static void schedule_order_lock_start(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_lock_wait(uint32_t lock_index)
+{
+ schedule_order_lock(lock_index);
+}
+
+static void schedule_release_atomic(void)
+{
+ sched_scalable_thread_state_t *ts;
+
+ ts = _odp_sched_ts;
+ if (odp_likely(ts->atomq != NULL)) {
+#ifdef CONFIG_QSCHST_LOCK
+ sched_elem_t *atomq;
+
+ atomq = ts->atomq;
+ LOCK(&atomq->qschlock);
+#endif
+ _schedule_release_atomic(ts);
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&atomq->qschlock);
+#endif
+ }
+}
+
+static void schedule_release_ordered(void)
+{
+ sched_scalable_thread_state_t *ts;
+
+ ts = _odp_sched_ts;
+ if (ts->rctx != NULL)
+ _schedule_release_ordered(ts);
+}
+
+static int schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t ev[],
+ int num)
+{
+ sched_scalable_thread_state_t *ts;
+ int n;
+ odp_time_t start;
+ odp_time_t deadline;
+
+ ts = _odp_sched_ts;
+ /* Release any previous reorder context. */
+ if (ts->rctx != NULL)
+ _schedule_release_ordered(ts);
+
+ if (odp_unlikely(ts->pause)) {
+ if (ts->atomq != NULL) {
+#ifdef CONFIG_QSCHST_LOCK
+ sched_elem_t *atomq;
+
+ atomq = ts->atomq;
+ LOCK(&atomq->qschlock);
+#endif
+ _schedule_release_atomic(ts);
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&atomq->qschlock);
+#endif
+ }
+ return 0;
+ }
+
+ if (wait == ODP_SCHED_NO_WAIT)
+ return _schedule(from, ev, num);
+
+ if (wait == ODP_SCHED_WAIT) {
+ for (;;) {
+ n = _schedule(from, ev, num);
+ if (odp_likely(n > 0))
+ return n;
+ }
+ }
+
+ start = odp_time_local();
+
+ n = _schedule(from, ev, num);
+ if (odp_likely(n > 0))
+ return n;
+
+ deadline = odp_time_add_ns(start, wait);
+
+ while (odp_time_cmp(deadline, odp_time_local()) > 0) {
+ n = _schedule(from, ev, num);
+ if (odp_likely(n > 0))
+ return n;
+ }
+
+ return 0;
+}
+
+static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
+{
+ odp_event_t ev = ODP_EVENT_INVALID;
+ const int num = 1;
+ sched_scalable_thread_state_t *ts;
+ int n;
+ odp_time_t start;
+ odp_time_t deadline;
+
+ ts = _odp_sched_ts;
+ /* Release any previous reorder context. */
+ if (ts->rctx != NULL)
+ _schedule_release_ordered(ts);
+
+ if (odp_unlikely(ts->pause)) {
+ if (ts->atomq != NULL) {
+#ifdef CONFIG_QSCHST_LOCK
+ sched_elem_t *atomq;
+
+ atomq = ts->atomq;
+ LOCK(&atomq->qschlock);
+#endif
+ _schedule_release_atomic(ts);
+#ifdef CONFIG_QSCHST_LOCK
+ UNLOCK(&atomq->qschlock);
+#endif
+ }
+ return ev;
+ }
+
+ if (wait == ODP_SCHED_NO_WAIT) {
+ (void)_schedule(from, &ev, num);
+ return ev;
+ }
+
+ if (wait == ODP_SCHED_WAIT) {
+ for (;;) {
+ n = _schedule(from, &ev, num);
+ if (odp_likely(n > 0))
+ return ev;
+ }
+ }
+
+ start = odp_time_local();
+
+ n = _schedule(from, &ev, num);
+ if (odp_likely(n > 0))
+ return ev;
+
+ deadline = odp_time_add_ns(start, wait);
+
+ while (odp_time_cmp(deadline, odp_time_local()) > 0) {
+ n = _schedule(from, &ev, num);
+ if (odp_likely(n > 0))
+ return ev;
+ }
+
+ return ev;
+}
+
+static int schedule_multi_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_WAIT, events, max_num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_NO_WAIT, events, max_num);
+}
+
+static void schedule_pause(void)
+{
+ _odp_sched_ts->pause = true;
+}
+
+static void schedule_resume(void)
+{
+ _odp_sched_ts->pause = false;
+}
+
+static uint64_t schedule_wait_time(uint64_t ns)
+{
+ return ns;
+}
+
+static int schedule_num_prio(void)
+{
+ return ODP_SCHED_PRIO_NUM - 1; /* Discount the pktin priority level */
+}
+
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return schedule_num_prio() - 1;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
+static int schedule_group_update(sched_group_t *sg,
+ uint32_t sgi,
+ const odp_thrmask_t *mask,
+ int join_leave)
+{
+ int thr;
+ uint32_t p;
+
+ /* Internal function, do not validate inputs */
+
+ /* Notify relevant threads about the change */
+ thr = odp_thrmask_first(mask);
+ while (0 <= thr) {
+ /* Add thread to scheduler group's wanted thread mask */
+ if (join_leave == SCHED_GROUP_JOIN)
+ atom_bitset_set(&sg->thr_wanted, thr, __ATOMIC_RELAXED);
+ else
+ atom_bitset_clr(&sg->thr_wanted, thr, __ATOMIC_RELAXED);
+ for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
+ if (sg->xcount[p] != 0) {
+ sched_scalable_thread_state_t *state;
+
+ state = &global->thread_state[thr];
+
+ /* This priority level has ODP queues
+ * Notify the thread about membership in
+ * this group/priority
+ */
+ if (join_leave == SCHED_GROUP_JOIN)
+ atom_bitset_set(&state->sg_wanted[p],
+ sgi, __ATOMIC_RELEASE);
+ else
+ atom_bitset_clr(&state->sg_wanted[p],
+ sgi, __ATOMIC_RELEASE);
+ __atomic_store_n(&state->sg_sem, 1,
+ __ATOMIC_RELEASE);
+ }
+ }
+ thr = odp_thrmask_next(mask, thr);
+ }
+
+ return 0;
+}
+
+static int _schedule_group_thrmask(sched_group_t *sg, odp_thrmask_t *mask)
+{
+ bitset_t bs;
+ uint32_t bit;
+
+ /* Internal function, do not validate inputs */
+
+ odp_thrmask_zero(mask);
+ bs = sg->thr_wanted;
+ while (!bitset_is_null(bs)) {
+ bit = bitset_ffs(bs) - 1;
+ bs = bitset_clr(bs, bit);
+ odp_thrmask_set(mask, bit);
+ }
+
+ return 0;
+}
+
+static odp_schedule_group_t schedule_group_create(const char *name,
+ const odp_thrmask_t *mask)
+{
+ uint32_t sgi;
+ sched_group_mask_t free;
+ uint32_t xfactor;
+ sched_group_t *sg;
+ uint32_t p;
+ uint32_t x;
+ uint32_t size;
+
+ /* Validate inputs */
+ if (mask == NULL)
+ _ODP_ABORT("mask is NULL\n");
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ /* Allocate a scheduler group */
+ free = atom_bitset_load(&global->sg_free, __ATOMIC_RELAXED);
+ do {
+ /* All sched_groups in use */
+ if (bitset_is_null(free))
+ goto no_free_sched_group;
+
+ sgi = bitset_ffs(free) - 1;
+ /* All sched_groups in use */
+ if (sgi >= MAX_SCHED_GROUP)
+ goto no_free_sched_group;
+ } while (!atom_bitset_cmpxchg(&global->sg_free,
+ &free,
+ bitset_clr(free, sgi),
+ true,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE));
+
+ /* Compute xfactor (spread factor) from the number of threads
+ * present in the thread mask. Preferable this would be an
+ * explicit parameter.
+ */
+ xfactor = odp_thrmask_count(mask);
+ if (xfactor < 1)
+ xfactor = CONFIG_DEFAULT_XFACTOR;
+
+ size = sizeof(sched_group_t) +
+ (ODP_SCHED_PRIO_NUM * xfactor - 1) * sizeof(sched_queue_t);
+ sg = (sched_group_t *)shm_pool_alloc_align(global->sched_shm_pool,
+ size);
+ if (sg == NULL)
+ goto shm_pool_alloc_failed;
+
+ strncpy(sg->name, name ? name : "", ODP_SCHED_GROUP_NAME_LEN - 1);
+ global->sg_vec[sgi] = sg;
+ memset(sg->thr_actual, 0, sizeof(sg->thr_actual));
+ sg->thr_wanted = bitset_null();
+ sg->xfactor = xfactor;
+ for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
+ sg->xcount[p] = 0;
+ for (x = 0; x < xfactor; x++)
+ schedq_init(&sg->schedq[p * xfactor + x], p);
+ }
+ if (odp_thrmask_count(mask) != 0)
+ schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return (odp_schedule_group_t)(sgi);
+
+shm_pool_alloc_failed:
+ /* Free the allocated group index */
+ atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELAXED);
+
+no_free_sched_group:
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ODP_SCHED_GROUP_INVALID;
+}
+
+static int schedule_group_destroy(odp_schedule_group_t group)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ uint32_t p;
+ int ret = 0;
+
+ /* Validate inputs */
+ if (group < 0 || group >= (odp_schedule_group_t)MAX_SCHED_GROUP) {
+ ret = -1;
+ goto invalid_group;
+ }
+
+ if (_odp_sched_ts &&
+ odp_unlikely(__atomic_load_n(&_odp_sched_ts->sg_sem,
+ __ATOMIC_RELAXED) != 0)) {
+ (void)__atomic_load_n(&_odp_sched_ts->sg_sem,
+ __ATOMIC_ACQUIRE);
+ _odp_sched_ts->sg_sem = 0;
+ update_sg_membership(_odp_sched_ts);
+ }
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ sgi = (uint32_t)group;
+ if (bitset_is_set(global->sg_free, sgi)) {
+ ret = -1;
+ goto group_not_found;
+ }
+
+ sg = global->sg_vec[sgi];
+ /* First ensure all threads have processed group_join/group_leave
+ * requests.
+ */
+ for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
+ if (sg->xcount[p] != 0) {
+ bitset_t wanted = atom_bitset_load(&sg->thr_wanted, __ATOMIC_RELAXED);
+
+ _odp_wait_until_eq_bitset(&sg->thr_actual[p], wanted);
+ }
+ /* Else ignore because no ODP queues on this prio */
+ }
+
+ /* Check if all threads/queues have left the group */
+ for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
+ if (!bitset_is_null(sg->thr_actual[p])) {
+ _ODP_ERR("Group has threads\n");
+ ret = -1;
+ goto thrd_q_present_in_group;
+ }
+ if (p != ODP_SCHED_PRIO_PKTIN && sg->xcount[p] != 0) {
+ _ODP_ERR("Group has queues\n");
+ ret = -1;
+ goto thrd_q_present_in_group;
+ }
+ }
+
+ _odp_ishm_pool_free(global->sched_shm_pool, sg);
+ global->sg_vec[sgi] = NULL;
+ atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELEASE);
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ret;
+
+thrd_q_present_in_group:
+
+group_not_found:
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+invalid_group:
+
+ return ret;
+}
+
+static odp_schedule_group_t schedule_group_lookup(const char *name)
+{
+ uint32_t sgi;
+ odp_schedule_group_t group;
+
+ /* Validate inputs */
+ if (name == NULL)
+ _ODP_ABORT("name or mask is NULL\n");
+
+ group = ODP_SCHED_GROUP_INVALID;
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ /* Scan through the schedule group array */
+ for (sgi = 0; sgi < MAX_SCHED_GROUP; sgi++) {
+ if ((global->sg_vec[sgi] != NULL) &&
+ (strncmp(name, global->sg_vec[sgi]->name,
+ ODP_SCHED_GROUP_NAME_LEN) == 0)) {
+ group = (odp_schedule_group_t)sgi;
+ break;
+ }
+ }
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return group;
+}
+
+static int schedule_group_join(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ int ret;
+
+ /* Validate inputs */
+ if (group < 0 || group >= ((odp_schedule_group_t)MAX_SCHED_GROUP))
+ return -1;
+
+ if (mask == NULL)
+ _ODP_ABORT("name or mask is NULL\n");
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ sgi = (uint32_t)group;
+ if (bitset_is_set(global->sg_free, sgi)) {
+ odp_spinlock_unlock(&global->sched_grp_lock);
+ return -1;
+ }
+
+ sg = global->sg_vec[sgi];
+ ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ret;
+}
+
+static int schedule_group_leave(odp_schedule_group_t group,
+ const odp_thrmask_t *mask)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ int ret = 0;
+
+ /* Validate inputs */
+ if (group < 0 || group >= (odp_schedule_group_t)MAX_SCHED_GROUP) {
+ ret = -1;
+ goto invalid_group;
+ }
+
+ if (mask == NULL)
+ _ODP_ABORT("name or mask is NULL\n");
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ sgi = (uint32_t)group;
+ if (bitset_is_set(global->sg_free, sgi)) {
+ ret = -1;
+ goto group_not_found;
+ }
+
+ sg = global->sg_vec[sgi];
+
+ ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_LEAVE);
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ret;
+
+group_not_found:
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+invalid_group:
+ return ret;
+}
+
+static int schedule_group_thrmask(odp_schedule_group_t group,
+ odp_thrmask_t *mask)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ int ret = 0;
+
+ /* Validate inputs */
+ if (group < 0 || group >= ((odp_schedule_group_t)MAX_SCHED_GROUP)) {
+ ret = -1;
+ goto invalid_group;
+ }
+
+ if (mask == NULL)
+ _ODP_ABORT("name or mask is NULL\n");
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ sgi = (uint32_t)group;
+ if (bitset_is_set(global->sg_free, sgi)) {
+ ret = -1;
+ goto group_not_found;
+ }
+
+ sg = global->sg_vec[sgi];
+ ret = _schedule_group_thrmask(sg, mask);
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ret;
+
+group_not_found:
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+invalid_group:
+ return ret;
+}
+
+static int schedule_group_info(odp_schedule_group_t group,
+ odp_schedule_group_info_t *info)
+{
+ uint32_t sgi;
+ sched_group_t *sg;
+ int ret = 0;
+
+ /* Validate inputs */
+ if (group < 0 || group >= ((odp_schedule_group_t)MAX_SCHED_GROUP)) {
+ ret = -1;
+ goto invalid_group;
+ }
+
+ if (info == NULL)
+ _ODP_ABORT("name or mask is NULL\n");
+
+ odp_spinlock_lock(&global->sched_grp_lock);
+
+ sgi = (uint32_t)group;
+ if (bitset_is_set(global->sg_free, sgi)) {
+ ret = -1;
+ goto group_not_found;
+ }
+
+ sg = global->sg_vec[sgi];
+
+ ret = _schedule_group_thrmask(sg, &info->thrmask);
+
+ info->name = sg->name;
+
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+ return ret;
+
+group_not_found:
+ odp_spinlock_unlock(&global->sched_grp_lock);
+
+invalid_group:
+ return ret;
+}
+
+static int schedule_init_global(void)
+{
+ odp_thrmask_t mask;
+ odp_schedule_group_t tmp_all;
+ odp_schedule_group_t tmp_wrkr;
+ odp_schedule_group_t tmp_ctrl;
+ odp_shm_t shm;
+ _odp_ishm_pool_t *pool;
+ uint32_t bits;
+ uint32_t pool_size;
+ uint64_t min_alloc;
+ uint64_t max_alloc;
+
+ shm = odp_shm_reserve("_odp_sched_scalable_global",
+ sizeof(sched_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ _ODP_ERR("Schedule init: Shm reserve failed.\n");
+ return -1;
+ }
+
+ memset(global, 0, sizeof(sched_global_t));
+ global->shm = shm;
+
+ /* Add storage required for sched groups. Assume worst case
+ * xfactor of MAXTHREADS.
+ */
+ pool_size = (sizeof(sched_group_t) +
+ (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
+ sizeof(sched_queue_t)) * MAX_SCHED_GROUP;
+ /* Choose min_alloc and max_alloc such that slab allocator
+ * is selected.
+ */
+ min_alloc = sizeof(sched_group_t) +
+ (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
+ sizeof(sched_queue_t);
+ max_alloc = min_alloc;
+ pool = _odp_ishm_pool_create("sched_shm_pool", pool_size,
+ min_alloc, max_alloc, 0);
+ if (pool == NULL) {
+ _ODP_ERR("Failed to allocate shared memory pool "
+ "for sched\n");
+ goto failed_sched_shm_pool_create;
+ }
+ global->sched_shm_pool = pool;
+
+ odp_spinlock_init(&global->sched_grp_lock);
+ odp_spinlock_init(&global->init_lock);
+
+ bits = MAX_SCHED_GROUP;
+ if (MAX_SCHED_GROUP == sizeof(global->sg_free) * CHAR_BIT)
+ global->sg_free = ~0;
+ else
+ global->sg_free = (1 << bits) - 1;
+
+ for (uint32_t i = 0; i < MAX_SCHED_GROUP; i++)
+ global->sg_vec[i] = NULL;
+ for (uint32_t i = 0; i < MAXTHREADS; i++) {
+ global->thread_state[i].sg_sem = 0;
+ for (uint32_t j = 0; j < ODP_SCHED_PRIO_NUM; j++)
+ global->thread_state[i].sg_wanted[j] = bitset_null();
+ }
+
+ /* Create sched groups for default GROUP_ALL, GROUP_WORKER and
+ * GROUP_CONTROL groups.
+ */
+ odp_thrmask_zero(&mask);
+ tmp_all = schedule_group_create("__group_all", &mask);
+ if (tmp_all != ODP_SCHED_GROUP_ALL) {
+ _ODP_ERR("Could not create ODP_SCHED_GROUP_ALL()\n");
+ goto failed_create_group_all;
+ }
+
+ tmp_wrkr = schedule_group_create("__group_worker", &mask);
+ if (tmp_wrkr != ODP_SCHED_GROUP_WORKER) {
+ _ODP_ERR("Could not create ODP_SCHED_GROUP_WORKER()\n");
+ goto failed_create_group_worker;
+ }
+
+ tmp_ctrl = schedule_group_create("__group_control", &mask);
+ if (tmp_ctrl != ODP_SCHED_GROUP_CONTROL) {
+ _ODP_ERR("Could not create ODP_SCHED_GROUP_CONTROL()\n");
+ goto failed_create_group_control;
+ }
+
+ global->config_if.group_enable.all = 1;
+ global->config_if.group_enable.control = 1;
+ global->config_if.group_enable.worker = 1;
+
+ return 0;
+
+failed_create_group_control:
+ if (tmp_ctrl != ODP_SCHED_GROUP_INVALID)
+ schedule_group_destroy(ODP_SCHED_GROUP_CONTROL);
+
+failed_create_group_worker:
+ if (tmp_wrkr != ODP_SCHED_GROUP_INVALID)
+ schedule_group_destroy(ODP_SCHED_GROUP_WORKER);
+
+failed_create_group_all:
+ if (tmp_all != ODP_SCHED_GROUP_INVALID)
+ schedule_group_destroy(ODP_SCHED_GROUP_ALL);
+
+failed_sched_shm_pool_create:
+
+ return -1;
+}
+
+static int schedule_term_global(void)
+{
+ /* Destroy enabled sched groups for default GROUP_ALL, GROUP_WORKER and
+ * GROUP_CONTROL groups. */
+ if (global->config_if.group_enable.all) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_ALL) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_ALL\n");
+ }
+ if (global->config_if.group_enable.worker) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_WORKER) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_WORKER\n");
+ }
+ if (global->config_if.group_enable.control) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_CONTROL) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_CONTROL\n");
+ }
+
+ _odp_ishm_pool_destroy(global->sched_shm_pool);
+
+ if (odp_shm_free(global->shm)) {
+ _ODP_ERR("Shm free failed for scalable scheduler");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int schedule_init_local(void)
+{
+ int thr_id;
+ odp_thread_type_t thr_type;
+ odp_thrmask_t mask;
+
+ thr_id = odp_thread_id();
+ if (thread_state_init(thr_id))
+ goto failed_to_init_ts;
+
+ /* Add this thread to default schedule groups */
+ thr_type = odp_thread_type();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ odp_spinlock_lock(&global->init_lock);
+
+ if (global->config_if.group_enable.all) {
+ if (schedule_group_join(ODP_SCHED_GROUP_ALL, &mask) != 0) {
+ _ODP_ERR("Failed to join ODP_SCHED_GROUP_ALL\n");
+ goto failed_to_join_grp_all;
+ }
+ }
+ if (global->config_if.group_enable.control && thr_type == ODP_THREAD_CONTROL) {
+ if (schedule_group_join(ODP_SCHED_GROUP_CONTROL, &mask) != 0) {
+ _ODP_ERR("Failed to join ODP_SCHED_GROUP_CONTROL\n");
+ goto failed_to_join_grp_ctrl;
+ }
+ }
+ if (global->config_if.group_enable.worker && thr_type == ODP_THREAD_WORKER) {
+ if (schedule_group_join(ODP_SCHED_GROUP_WORKER, &mask) != 0) {
+ _ODP_ERR("Failed to join ODP_SCHED_GROUP_WORKER\n");
+ goto failed_to_join_grp_wrkr;
+ }
+ }
+
+ odp_spinlock_unlock(&global->init_lock);
+
+ return 0;
+
+failed_to_join_grp_wrkr:
+failed_to_join_grp_ctrl:
+ if (global->config_if.group_enable.all)
+ schedule_group_leave(ODP_SCHED_GROUP_ALL, &mask);
+
+failed_to_join_grp_all:
+ odp_spinlock_unlock(&global->init_lock);
+
+failed_to_init_ts:
+ return -1;
+}
+
+static int schedule_term_local(void)
+{
+ int thr_id;
+ odp_thread_type_t thr_type;
+ odp_thrmask_t mask;
+ int rc = 0;
+
+ /* Remove this thread from default schedule groups */
+ thr_id = odp_thread_id();
+ thr_type = odp_thread_type();
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr_id);
+
+ if (global->config_if.group_enable.all) {
+ if (schedule_group_leave(ODP_SCHED_GROUP_ALL, &mask) != 0)
+ _ODP_ERR("Failed to leave ODP_SCHED_GROUP_ALL\n");
+ }
+ if (global->config_if.group_enable.control && thr_type == ODP_THREAD_CONTROL) {
+ if (schedule_group_leave(ODP_SCHED_GROUP_CONTROL, &mask) != 0)
+ _ODP_ERR("Failed to leave ODP_SCHED_GROUP_CONTROL\n");
+ }
+ if (global->config_if.group_enable.worker && thr_type == ODP_THREAD_WORKER) {
+ if (schedule_group_leave(ODP_SCHED_GROUP_WORKER, &mask) != 0)
+ _ODP_ERR("Failed to leave ODP_SCHED_GROUP_WORKER\n");
+ }
+
+ update_sg_membership(_odp_sched_ts);
+
+ /* Check if the thread is still part of any groups */
+ if (_odp_sched_ts->num_schedq != 0) {
+ _ODP_ERR("Thread %d still part of scheduler group(s)\n", _odp_sched_ts->tidx);
+ rc = -1;
+ }
+
+ return rc;
+}
+
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = CONFIG_MAX_SCHED_QUEUES;
+ config->queue_size = 0; /* FIXME ? */
+ config->sched_group.all = true;
+ config->sched_group.control = true;
+ config->sched_group.worker = true;
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ odp_spinlock_lock(&global->init_lock);
+
+ global->config_if.group_enable.all = config->sched_group.all;
+ global->config_if.group_enable.control = config->sched_group.control;
+ global->config_if.group_enable.worker = config->sched_group.worker;
+
+ /* Destroy disabled predefined scheduling groups. */
+ if (!config->sched_group.all) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_ALL) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_ALL\n");
+ }
+ if (!config->sched_group.worker) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_WORKER) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_WORKER\n");
+ }
+
+ if (!config->sched_group.control) {
+ if (schedule_group_destroy(ODP_SCHED_GROUP_CONTROL) != 0)
+ _ODP_ERR("Failed to destroy ODP_SCHED_GROUP_CONTROL\n");
+ }
+
+ odp_spinlock_unlock(&global->init_lock);
+
+ return 0;
+}
+
+static int num_grps(void)
+{
+ return MAX_SCHED_GROUP - NUM_AUTO_GROUPS;
+}
+
+/*
+ * Stubs for internal scheduler abstraction layer due to absence of NULL
+ * checking before calling the function pointer.
+ */
+
+static int thr_add(odp_schedule_group_t group, int thr)
+{
+ /* This function is a schedule_init_local duplicate. */
+ (void)group;
+ (void)thr;
+ return 0;
+}
+
+static int thr_rem(odp_schedule_group_t group, int thr)
+{
+ /* This function is a schedule_term_local duplicate. */
+ (void)group;
+ (void)thr;
+ return 0;
+}
+
+static int create_queue(uint32_t queue_index,
+ const odp_schedule_param_t *sched_param)
+{
+ /* Not used in scalable scheduler. */
+ (void)queue_index;
+ (void)sched_param;
+ return 0;
+}
+
+static void destroy_queue(uint32_t queue_index)
+{
+ /* Not used in scalable scheduler. */
+ (void)queue_index;
+}
+
+static int sched_queue(uint32_t queue_index)
+{
+ /* Not used in scalable scheduler. */
+ (void)queue_index;
+ return 0;
+}
+
+static int ord_enq_multi(odp_queue_t handle, void *event_hdr[], int num,
+ int *ret)
+
+{
+ queue_entry_t *queue;
+ sched_scalable_thread_state_t *ts;
+ int actual;
+
+ ts = _odp_sched_ts;
+ queue = qentry_from_int(handle);
+ if (ts && odp_unlikely(ts->out_of_order) &&
+ (queue->param.order == ODP_QUEUE_ORDER_KEEP)) {
+ actual = _odp_rctx_save(queue, (_odp_event_hdr_t **)event_hdr, num);
+ *ret = actual;
+ return 1;
+ }
+ return 0;
+}
+
+static void schedule_prefetch(int num)
+{
+ (void)num;
+}
+
+/* Wait until we are in-order (when processing an ordered queue)
+ * Note: this function may be called also when processing other queue types
+ */
+static void order_lock(void)
+{
+ sched_scalable_thread_state_t *ts;
+ reorder_window_t *rwin;
+ uint32_t sn;
+
+ ts = _odp_sched_ts;
+ if (odp_unlikely(ts->out_of_order)) {
+ /* We are processing ordered queue and are currently
+ * out-of-order.
+ * We are in-order when our reorder window slot number (sn)
+ * equals the head of the reorder window.
+ */
+ _ODP_ASSERT(ts->rctx != NULL);
+ rwin = ts->rctx->rwin;
+ sn = ts->rctx->sn;
+ /* Use acquire ordering to be on the safe side even if
+ * this isn't an acquire/release situation (aka lock).
+ */
+ _odp_wait_until_eq_acq_u32(&rwin->hc.head, sn);
+ }
+}
+
+/* This function is unnecessary.
+ * The next thread becomes in-order when we release our reorder context
+ * (i.e. when odp_schedule() is called again.
+ */
+static void order_unlock(void)
+{
+ /* Nothing to do */
+}
+
+static uint32_t schedule_max_ordered_locks(void)
+{
+ return CONFIG_QUEUE_MAX_ORD_LOCKS;
+}
+
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = schedule_max_ordered_locks();
+ capa->max_groups = num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = CONFIG_MAX_SCHED_QUEUES;
+ capa->max_queue_size = 0;
+ capa->order_wait = ODP_SUPPORT_YES;
+
+ return 0;
+}
+
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ _ODP_PRINT("\nScheduler debug info\n");
+ _ODP_PRINT("--------------------\n");
+ _ODP_PRINT(" scheduler: scalable\n");
+ _ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ _ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ _ODP_PRINT("\n");
+}
+
+const _odp_schedule_api_fn_t _odp_schedule_scalable_api;
+
+static const _odp_schedule_api_fn_t *sched_api(void)
+{
+ return &_odp_schedule_scalable_api;
+}
+
+const schedule_fn_t _odp_schedule_scalable_fn = {
+ .pktio_start = pktio_start,
+ .thr_add = thr_add,
+ .thr_rem = thr_rem,
+ .num_grps = num_grps,
+ .create_queue = create_queue,
+ .destroy_queue = destroy_queue,
+ .sched_queue = sched_queue,
+ .ord_enq_multi = ord_enq_multi,
+ .init_global = schedule_init_global,
+ .term_global = schedule_term_global,
+ .init_local = schedule_init_local,
+ .term_local = schedule_term_local,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
+ .max_ordered_locks = schedule_max_ordered_locks,
+ .sched_api = sched_api,
+};
+
+const _odp_schedule_api_fn_t _odp_schedule_scalable_api = {
+ .schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
+ .schedule = schedule,
+ .schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
+ .schedule_pause = schedule_pause,
+ .schedule_resume = schedule_resume,
+ .schedule_release_atomic = schedule_release_atomic,
+ .schedule_release_ordered = schedule_release_ordered,
+ .schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
+ .schedule_num_prio = schedule_num_prio,
+ .schedule_group_create = schedule_group_create,
+ .schedule_group_destroy = schedule_group_destroy,
+ .schedule_group_lookup = schedule_group_lookup,
+ .schedule_group_join = schedule_group_join,
+ .schedule_group_leave = schedule_group_leave,
+ .schedule_group_thrmask = schedule_group_thrmask,
+ .schedule_group_info = schedule_group_info,
+ .schedule_order_lock = schedule_order_lock,
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
+ .schedule_print = schedule_print
+};
diff --git a/platform/linux-generic/odp_schedule_scalable_ordered.c b/platform/linux-generic/odp_schedule_scalable_ordered.c
new file mode 100644
index 000000000..f8568ce53
--- /dev/null
+++ b/platform/linux-generic/odp_schedule_scalable_ordered.c
@@ -0,0 +1,370 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/shared_memory.h>
+#include <odp/api/cpu.h>
+#include <odp/api/plat/cpu_inlines.h>
+
+#include <odp_bitset.h>
+#include <odp_event_internal.h>
+#include <odp_queue_scalable_internal.h>
+#include <odp_schedule_if.h>
+
+#include <string.h>
+
+extern __thread sched_scalable_thread_state_t *_odp_sched_ts;
+
+reorder_window_t *_odp_rwin_alloc(_odp_ishm_pool_t *pool, unsigned int lock_count)
+{
+ reorder_window_t *rwin;
+ uint32_t i;
+
+ rwin = (reorder_window_t *)
+ shm_pool_alloc_align(pool, sizeof(reorder_window_t));
+ if (rwin == NULL)
+ return NULL;
+
+ rwin->hc.head = 0;
+ rwin->hc.chgi = 0;
+ rwin->winmask = RWIN_SIZE - 1;
+ rwin->tail = 0;
+ rwin->turn = 0;
+ rwin->lock_count = (uint16_t)lock_count;
+ memset(rwin->olock, 0, sizeof(rwin->olock));
+ for (i = 0; i < RWIN_SIZE; i++)
+ rwin->ring[i] = NULL;
+
+ return rwin;
+}
+
+int _odp_rwin_free(_odp_ishm_pool_t *pool, reorder_window_t *rwin)
+{
+ return _odp_ishm_pool_free(pool, rwin);
+}
+
+bool _odp_rwin_reserve(reorder_window_t *rwin, uint32_t *sn)
+{
+ uint32_t head;
+ uint32_t oldt;
+ uint32_t newt;
+ uint32_t winmask;
+
+ /* Read head and tail separately */
+ oldt = rwin->tail;
+ winmask = rwin->winmask;
+ do {
+ /* Need __atomic_load to avoid compiler reordering */
+ head = __atomic_load_n(&rwin->hc.head, __ATOMIC_RELAXED);
+ if (odp_unlikely(oldt - head >= winmask))
+ return false;
+
+ newt = oldt + 1;
+ } while (!__atomic_compare_exchange(&rwin->tail,
+ &oldt,
+ &newt,
+ true,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED));
+ *sn = oldt;
+
+ return true;
+}
+
+bool _odp_rwin_reserve_sc(reorder_window_t *rwin, uint32_t *sn)
+{
+ uint32_t head;
+ uint32_t oldt;
+ uint32_t newt;
+ uint32_t winmask;
+
+ /* Read head and tail separately */
+ oldt = rwin->tail;
+ winmask = rwin->winmask;
+ head = rwin->hc.head;
+ if (odp_unlikely(oldt - head >= winmask))
+ return false;
+ newt = oldt + 1;
+ rwin->tail = newt;
+ *sn = oldt;
+
+ return true;
+}
+
+void _odp_rwin_unreserve_sc(reorder_window_t *rwin, uint32_t sn)
+{
+ _ODP_ASSERT(rwin->tail == sn + 1);
+ rwin->tail = sn;
+}
+
+static void rwin_insert(reorder_window_t *rwin,
+ reorder_context_t *rctx,
+ uint32_t sn,
+ void (*callback)(reorder_context_t *))
+{
+ /* Initialise to silence scan-build */
+ hc_t old = {0, 0};
+ hc_t new;
+ uint32_t winmask;
+
+ __atomic_load(&rwin->hc, &old, __ATOMIC_ACQUIRE);
+ winmask = rwin->winmask;
+ if (old.head != sn) {
+ /* We are out-of-order. Store context in reorder window,
+ * releasing its content.
+ */
+ _ODP_ASSERT(rwin->ring[sn & winmask] == NULL);
+ atomic_store_release(&rwin->ring[sn & winmask],
+ rctx,
+ /*readonly=*/false);
+ rctx = NULL;
+ do {
+ new.head = old.head;
+ new.chgi = old.chgi + 1; /* Changed value */
+ /* Update head & chgi, fail if any has changed */
+ if (__atomic_compare_exchange(&rwin->hc,
+ /* Updated on fail */
+ &old,
+ &new,
+ true,
+ /* Rel our ring update */
+ __ATOMIC_RELEASE,
+ __ATOMIC_ACQUIRE))
+ /* CAS succeeded => head same (we are not
+ * in-order), chgi updated.
+ */
+ return;
+ /* CAS failed => head and/or chgi changed.
+ * We might not be out-of-order anymore.
+ */
+ } while (old.head != sn);
+ }
+
+ /* old.head == sn => we are now in-order! */
+ _ODP_ASSERT(old.head == sn);
+ /* We are in-order so our responsibility to retire contexts */
+ new.head = old.head;
+ new.chgi = old.chgi + 1;
+
+ /* Retire our in-order context (if we still have it) */
+ if (rctx != NULL) {
+ callback(rctx);
+ new.head++;
+ }
+
+ /* Retire in-order contexts in the ring
+ * The first context might actually be ours (if we were originally
+ * out-of-order)
+ */
+ do {
+ for (;;) {
+ rctx = __atomic_load_n(&rwin->ring[new.head & winmask],
+ __ATOMIC_ACQUIRE);
+ if (rctx == NULL)
+ break;
+ /* We are the only thread that are in-order
+ * (until head updated) so don't have to use
+ * atomic load-and-clear (exchange)
+ */
+ rwin->ring[new.head & winmask] = NULL;
+ callback(rctx);
+ new.head++;
+ }
+ /* Update head&chgi, fail if chgi has changed (head cannot change) */
+ } while (!__atomic_compare_exchange(&rwin->hc,
+ &old, /* Updated on failure */
+ &new,
+ false, /* weak */
+ __ATOMIC_RELEASE, /* Release our ring updates */
+ __ATOMIC_ACQUIRE));
+}
+
+void _odp_rctx_init(reorder_context_t *rctx, uint16_t idx,
+ reorder_window_t *rwin, uint32_t sn)
+{
+ /* rctx->rvec_free and rctx->idx already initialised in
+ * thread_state_init function.
+ */
+ _ODP_ASSERT(rctx->idx == idx);
+ rctx->rwin = rwin;
+ rctx->sn = sn;
+ rctx->olock_flags = 0;
+ /* First => no next reorder context */
+ rctx->next_idx = idx;
+ /* Where to store next event */
+ rctx->cur_idx = idx;
+ rctx->numevts = 0;
+}
+
+static inline void rctx_free(const reorder_context_t *rctx)
+{
+ const reorder_context_t *const base = &rctx[-(int)rctx->idx];
+ const uint32_t first = rctx->idx;
+ uint32_t next_idx;
+
+ next_idx = rctx->next_idx;
+
+ _ODP_ASSERT(rctx->rwin != NULL);
+ /* Set free bit */
+ if (rctx->rvec_free == &_odp_sched_ts->rvec_free)
+ /* Since it is our own reorder context, we can instead
+ * perform a non-atomic and relaxed update on our private
+ * rvec_free.
+ */
+ _odp_sched_ts->priv_rvec_free =
+ bitset_set(_odp_sched_ts->priv_rvec_free, rctx->idx);
+ else
+ atom_bitset_set(rctx->rvec_free, rctx->idx, __ATOMIC_RELEASE);
+
+ /* Can't dereference rctx after the corresponding free bit is set */
+ while (next_idx != first) {
+ rctx = &base[next_idx];
+ next_idx = rctx->next_idx;
+ /* Set free bit */
+ if (rctx->rvec_free == &_odp_sched_ts->rvec_free)
+ _odp_sched_ts->priv_rvec_free =
+ bitset_set(_odp_sched_ts->priv_rvec_free, rctx->idx);
+ else
+ atom_bitset_set(rctx->rvec_free, rctx->idx,
+ __ATOMIC_RELEASE);
+ }
+}
+
+static inline void olock_unlock(const reorder_context_t *rctx,
+ reorder_window_t *rwin,
+ uint32_t lock_index)
+{
+ if ((rctx->olock_flags & (1U << lock_index)) == 0) {
+ /* Use relaxed ordering, we are not releasing any updates */
+ rwin->olock[lock_index] = rctx->sn + 1;
+ }
+}
+
+static void olock_release(const reorder_context_t *rctx)
+{
+ reorder_window_t *rwin;
+ uint32_t i;
+
+ rwin = rctx->rwin;
+
+ for (i = 0; i < rwin->lock_count; i++)
+ olock_unlock(rctx, rwin, i);
+}
+
+static void blocking_enqueue(queue_entry_t *q, _odp_event_hdr_t **evts, int num)
+{
+ int actual;
+
+ /* Iterate until all events have been successfully enqueued */
+ for (;;) {
+ /* Attempt to enqueue remaining events */
+ actual = q->enqueue_multi(qentry_to_int(q), evts, num);
+ if (odp_unlikely(actual < 0))
+ _ODP_ERR("Failed to enqueue deferred events\n");
+ /* Update for potential partial success */
+ evts += actual;
+ num -= actual;
+ if (num == 0)
+ break;
+ /* Back-off to decrease load on the system */
+ odp_cpu_pause();
+ }
+}
+
+static void rctx_retire(reorder_context_t *first)
+{
+ reorder_context_t *rctx;
+ queue_entry_t *q;
+ uint32_t i;
+ uint32_t j;
+ uint32_t num;
+
+ rctx = first;
+ do {
+ /* Process all events in this reorder context */
+ for (i = 0; i < rctx->numevts;) {
+ q = rctx->destq[i];
+ /* Find index of next different destq */
+ j = i + 1;
+ while (j < rctx->numevts && rctx->destq[j] == q)
+ j++;
+ num = j - i;
+ /* Blocking enqueue of events to this destq */
+ blocking_enqueue(q, &rctx->events[i], num);
+ i += num;
+ }
+ /* Update rctx pointer to point to 'next_idx' element */
+ rctx += (int)rctx->next_idx - (int)rctx->idx;
+ } while (rctx != first);
+ olock_release(first);
+ rctx_free(first);
+}
+
+void _odp_rctx_release(reorder_context_t *rctx)
+{
+ /* Insert reorder context into reorder window, potentially calling the
+ * rctx_retire function for all pending reorder_contexts.
+ */
+ rwin_insert(rctx->rwin, rctx, rctx->sn, rctx_retire);
+}
+
+/* Save destination queue and events in the reorder context for deferred
+ * enqueue.
+ */
+int _odp_rctx_save(queue_entry_t *queue, _odp_event_hdr_t *event_hdr[], int num)
+{
+ int i;
+ sched_scalable_thread_state_t *ts;
+ reorder_context_t *first;
+ reorder_context_t *cur;
+ bitset_t next_idx;
+
+ ts = _odp_sched_ts;
+ first = ts->rctx;
+ _ODP_ASSERT(ts->rctx != NULL);
+ cur = &first[(int)first->cur_idx - (int)first->idx];
+ for (i = 0; i < num; i++) {
+ if (odp_unlikely(cur->numevts == RC_EVT_SIZE)) {
+ /* No more space in current reorder context
+ * Try to allocate another.
+ */
+ if (odp_unlikely(bitset_is_null(ts->priv_rvec_free))) {
+ ts->priv_rvec_free = atom_bitset_xchg(&ts->rvec_free, 0,
+ __ATOMIC_RELAXED);
+
+ if (odp_unlikely(bitset_is_null(ts->priv_rvec_free)))
+ /* Out of reorder contexts.
+ * Return the number of events
+ * stored so far.
+ */
+ return i;
+ }
+ next_idx = bitset_ffs(ts->priv_rvec_free) - 1;
+ ts->priv_rvec_free =
+ bitset_clr(ts->priv_rvec_free,
+ next_idx);
+ /* Link current to next (for eventual
+ * retiring)
+ */
+ cur->next_idx = next_idx;
+ /* Link first to next (for next call to
+ * queue_enq_multi())
+ */
+ first->cur_idx = next_idx;
+ /* Update current to next */
+ cur = &ts->rvec[next_idx];
+ _odp_rctx_init(cur, next_idx, NULL, 0);
+ /* The last rctx (so far) */
+ cur->next_idx = first->idx;
+ }
+ cur->events[cur->numevts] = event_hdr[i];
+ cur->destq[cur->numevts] = queue;
+ cur->numevts++;
+ }
+ /* All events stored. */
+ return num;
+}
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 0fd4d87da..030e95171 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -1,30 +1,51 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <string.h>
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in prio_queue_t.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
+#include <odp/api/packet.h>
#include <odp/api/ticketlock.h>
#include <odp/api/thread.h>
+#include <odp/api/plat/thread_inlines.h>
#include <odp/api/time.h>
+#include <odp/api/plat/time_inlines.h>
#include <odp/api/schedule.h>
#include <odp/api/shared_memory.h>
+
+#include <odp/api/plat/schedule_inline_types.h>
+
#include <odp_schedule_if.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
-#include <odp_ring_internal.h>
+#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ring_u32_internal.h>
+#include <odp_timer_internal.h>
+#include <odp_queue_basic_internal.h>
+#include <odp_global_data.h>
+
+#include <string.h>
#define NUM_THREAD ODP_THREAD_COUNT_MAX
-#define NUM_QUEUE ODP_CONFIG_QUEUES
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
+#define NUM_QUEUE CONFIG_MAX_SCHED_QUEUES
+#define NUM_PKTIO CONFIG_PKTIO_ENTRIES
#define NUM_ORDERED_LOCKS 1
-#define NUM_PRIO 3
#define NUM_STATIC_GROUP 3
#define NUM_GROUP (NUM_STATIC_GROUP + 9)
#define NUM_PKTIN 32
-#define LOWEST_QUEUE_PRIO (NUM_PRIO - 2)
+#define NUM_PRIO 3
+#define MAX_API_PRIO (NUM_PRIO - 2)
+/* Lowest internal priority */
#define PKTIN_PRIO (NUM_PRIO - 1)
#define CMD_QUEUE 0
#define CMD_PKTIO 1
@@ -34,20 +55,17 @@
#define GROUP_PKTIN GROUP_ALL
/* Maximum number of commands: one priority/group for all queues and pktios */
-#define RING_SIZE (ROUNDUP_POWER2_U32(NUM_QUEUE + NUM_PKTIO))
+#define RING_SIZE (_ODP_ROUNDUP_POWER2_U32(NUM_QUEUE + NUM_PKTIO))
#define RING_MASK (RING_SIZE - 1)
/* Ring size must be power of two */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(RING_SIZE),
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(RING_SIZE),
"Ring_size_is_not_power_of_two");
ODP_STATIC_ASSERT(NUM_ORDERED_LOCKS <= CONFIG_QUEUE_MAX_ORD_LOCKS,
"Too_many_ordered_locks");
-struct sched_cmd_t;
-
-struct sched_cmd_s {
- struct sched_cmd_t *next;
+typedef struct ODP_ALIGNED_CACHE {
uint32_t index;
uint32_t ring_idx;
int type;
@@ -56,22 +74,20 @@ struct sched_cmd_s {
int init;
int num_pktin;
int pktin_idx[NUM_PKTIN];
-};
-
-typedef struct sched_cmd_t {
- struct sched_cmd_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct sched_cmd_s)) -
- sizeof(struct sched_cmd_s)];
-} sched_cmd_t ODP_ALIGNED_CACHE;
+ odp_queue_t queue[NUM_PKTIN];
+} sched_cmd_t;
-typedef struct {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct ODP_ALIGNED_CACHE {
/* Ring header */
- ring_t ring;
+ ring_u32_t ring;
/* Ring data: queue indexes */
- uint32_t ring_idx[RING_SIZE];
+ uint32_t ring_idx[RING_SIZE]; /* overlaps with ring.data[] */
-} prio_queue_t ODP_ALIGNED_CACHE;
+} prio_queue_t;
+#pragma GCC diagnostic pop
typedef struct thr_group_t {
/* A generation counter for fast comparison if groups have changed */
@@ -85,7 +101,7 @@ typedef struct thr_group_t {
} thr_group_t;
-typedef struct sched_group_t {
+typedef struct ODP_ALIGNED_CACHE sched_group_t {
struct {
odp_ticketlock_t lock;
@@ -101,14 +117,20 @@ typedef struct sched_group_t {
} s;
-} sched_group_t ODP_ALIGNED_CACHE;
+} sched_group_t;
typedef struct {
sched_cmd_t queue_cmd[NUM_QUEUE];
sched_cmd_t pktio_cmd[NUM_PKTIO];
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
prio_queue_t prio_queue[NUM_GROUP][NUM_PRIO];
+#pragma GCC diagnostic pop
sched_group_t sched_group;
odp_shm_t shm;
+ /* Scheduler interface config options (not used in fast path) */
+ schedule_config_t config_if;
} sched_global_t;
typedef struct {
@@ -123,6 +145,8 @@ typedef struct {
static sched_global_t *sched_global;
static __thread sched_local_t sched_local;
+static void remove_group(sched_group_t *sched_group, int thr, int group);
+
static inline uint32_t index_to_ring_idx(int pktio, uint32_t index)
{
if (pktio)
@@ -149,16 +173,16 @@ static int init_global(void)
odp_shm_t shm;
sched_group_t *sched_group = NULL;
- ODP_DBG("Using SP scheduler\n");
+ _ODP_DBG("Using SP scheduler\n");
- shm = odp_shm_reserve("sp_scheduler",
+ shm = odp_shm_reserve("_odp_sched_sp_global",
sizeof(sched_global_t),
ODP_CACHE_LINE_SIZE, 0);
sched_global = odp_shm_addr(shm);
if (sched_global == NULL) {
- ODP_ERR("Schedule init: Shm reserve failed.\n");
+ _ODP_ERR("Schedule init: Shm reserve failed.\n");
return -1;
}
@@ -166,22 +190,22 @@ static int init_global(void)
sched_global->shm = shm;
for (i = 0; i < NUM_QUEUE; i++) {
- sched_global->queue_cmd[i].s.type = CMD_QUEUE;
- sched_global->queue_cmd[i].s.index = i;
- sched_global->queue_cmd[i].s.ring_idx = index_to_ring_idx(0, i);
+ sched_global->queue_cmd[i].type = CMD_QUEUE;
+ sched_global->queue_cmd[i].index = i;
+ sched_global->queue_cmd[i].ring_idx = index_to_ring_idx(0, i);
}
for (i = 0; i < NUM_PKTIO; i++) {
- sched_global->pktio_cmd[i].s.type = CMD_PKTIO;
- sched_global->pktio_cmd[i].s.index = i;
- sched_global->pktio_cmd[i].s.ring_idx = index_to_ring_idx(1, i);
- sched_global->pktio_cmd[i].s.prio = PKTIN_PRIO;
- sched_global->pktio_cmd[i].s.group = GROUP_PKTIN;
+ sched_global->pktio_cmd[i].type = CMD_PKTIO;
+ sched_global->pktio_cmd[i].index = i;
+ sched_global->pktio_cmd[i].ring_idx = index_to_ring_idx(1, i);
+ sched_global->pktio_cmd[i].prio = PKTIN_PRIO;
+ sched_global->pktio_cmd[i].group = GROUP_PKTIN;
}
for (i = 0; i < NUM_GROUP; i++)
for (j = 0; j < NUM_PRIO; j++)
- ring_init(&sched_global->prio_queue[i][j].ring);
+ ring_u32_init(&sched_global->prio_queue[i][j].ring);
sched_group = &sched_global->sched_group;
odp_ticketlock_init(&sched_group->s.lock);
@@ -204,6 +228,10 @@ static int init_global(void)
odp_thrmask_zero(&sched_group->s.group[GROUP_CONTROL].mask);
sched_group->s.group[GROUP_CONTROL].allocated = 1;
+ sched_global->config_if.group_enable.all = 1;
+ sched_global->config_if.group_enable.control = 1;
+ sched_global->config_if.group_enable.worker = 1;
+
return 0;
}
@@ -217,18 +245,26 @@ static int init_local(void)
static int term_global(void)
{
+ odp_event_t event;
int qi, ret = 0;
for (qi = 0; qi < NUM_QUEUE; qi++) {
- if (sched_global->queue_cmd[qi].s.init) {
- /* todo: dequeue until empty ? */
- sched_cb_queue_destroy_finalize(qi);
+ int report = 1;
+
+ if (sched_global->queue_cmd[qi].init) {
+ while (_odp_sched_queue_deq(qi, &event, 1, 1) > 0) {
+ if (report) {
+ _ODP_ERR("Queue not empty\n");
+ report = 0;
+ }
+ odp_event_free(event);
+ }
}
}
ret = odp_shm_free(sched_global->shm);
if (ret < 0) {
- ODP_ERR("Shm free failed for sp_scheduler");
+ _ODP_ERR("Shm free failed for sp_scheduler");
ret = -1;
}
@@ -240,7 +276,61 @@ static int term_local(void)
return 0;
}
-static unsigned max_ordered_locks(void)
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = CONFIG_MAX_SCHED_QUEUES;
+ config->queue_size = _odp_queue_glb->config.max_queue_size;
+ config->sched_group.all = true;
+ config->sched_group.control = true;
+ config->sched_group.worker = true;
+}
+
+static void schedule_group_clear(odp_schedule_group_t group)
+{
+ sched_group_t *sched_group = &sched_global->sched_group;
+ int thr;
+ const odp_thrmask_t *thrmask;
+
+ if (group < 0 || group >= NUM_STATIC_GROUP)
+ _ODP_ABORT("Invalid scheduling group\n");
+
+ thrmask = &sched_group->s.group[group].mask;
+
+ thr = odp_thrmask_first(thrmask);
+ while (thr >= 0) {
+ remove_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
+
+ memset(&sched_group->s.group[group], 0, sizeof(sched_group->s.group[0]));
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ sched_group_t *sched_group = &sched_global->sched_group;
+
+ odp_ticketlock_lock(&sched_group->s.lock);
+
+ sched_global->config_if.group_enable.all = config->sched_group.all;
+ sched_global->config_if.group_enable.control = config->sched_group.control;
+ sched_global->config_if.group_enable.worker = config->sched_group.worker;
+
+ /* Remove existing threads from predefined scheduling groups. */
+ if (!config->sched_group.all)
+ schedule_group_clear(ODP_SCHED_GROUP_ALL);
+
+ if (!config->sched_group.worker)
+ schedule_group_clear(ODP_SCHED_GROUP_WORKER);
+
+ if (!config->sched_group.control)
+ schedule_group_clear(ODP_SCHED_GROUP_CONTROL);
+
+ odp_ticketlock_unlock(&sched_group->s.lock);
+
+ return 0;
+}
+
+static uint32_t max_ordered_locks(void)
{
return NUM_ORDERED_LOCKS;
}
@@ -255,7 +345,7 @@ static void add_group(sched_group_t *sched_group, int thr, int group)
thr_group->group[num] = group;
thr_group->num_group = num + 1;
gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
- odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+ odp_atomic_store_rel_u32(&thr_group->gen_cnt, gen_cnt + 1);
}
static void remove_group(sched_group_t *sched_group, int thr, int group)
@@ -266,6 +356,12 @@ static void remove_group(sched_group_t *sched_group, int thr, int group)
num = thr_group->num_group;
+ /* Extra array bounds check to suppress warning on GCC 7.4 with -O3 */
+ if (num >= NUM_GROUP) {
+ _ODP_ERR("Too many groups");
+ return;
+ }
+
for (i = 0; i < num; i++) {
if (thr_group->group[i] == group) {
found = 1;
@@ -282,7 +378,7 @@ static void remove_group(sched_group_t *sched_group, int thr, int group)
thr_group->num_group = num - 1;
gen_cnt = odp_atomic_load_u32(&thr_group->gen_cnt);
- odp_atomic_store_u32(&thr_group->gen_cnt, gen_cnt + 1);
+ odp_atomic_store_rel_u32(&thr_group->gen_cnt, gen_cnt + 1);
}
}
@@ -290,7 +386,7 @@ static int thr_add(odp_schedule_group_t group, int thr)
{
sched_group_t *sched_group = &sched_global->sched_group;
- if (group < 0 || group >= NUM_GROUP)
+ if (group < 0 || group >= NUM_STATIC_GROUP)
return -1;
if (thr < 0 || thr >= NUM_THREAD)
@@ -300,7 +396,7 @@ static int thr_add(odp_schedule_group_t group, int thr)
if (!sched_group->s.group[group].allocated) {
odp_ticketlock_unlock(&sched_group->s.lock);
- return -1;
+ return 0;
}
odp_thrmask_set(&sched_group->s.group[group].mask, thr);
@@ -315,14 +411,14 @@ static int thr_rem(odp_schedule_group_t group, int thr)
{
sched_group_t *sched_group = &sched_global->sched_group;
- if (group < 0 || group >= NUM_GROUP)
+ if (group < 0 || group >= NUM_STATIC_GROUP)
return -1;
odp_ticketlock_lock(&sched_group->s.lock);
if (!sched_group->s.group[group].allocated) {
odp_ticketlock_unlock(&sched_group->s.lock);
- return -1;
+ return 0;
}
odp_thrmask_clr(&sched_group->s.group[group].mask, thr);
@@ -339,44 +435,49 @@ static int num_grps(void)
return NUM_GROUP - NUM_STATIC_GROUP;
}
-static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
+static int create_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
{
sched_group_t *sched_group = &sched_global->sched_group;
odp_schedule_group_t group = sched_param->group;
int prio = 0;
+ if (odp_global_rw->schedule_configured == 0) {
+ _ODP_ERR("Scheduler has not been configured\n");
+ return -1;
+ }
+
if (group < 0 || group >= NUM_GROUP)
return -1;
if (!sched_group->s.group[group].allocated)
return -1;
- if (sched_param->prio > 0)
- prio = LOWEST_QUEUE_PRIO;
+ /* Inverted prio value (max = 0) vs API */
+ prio = MAX_API_PRIO - sched_param->prio;
- sched_global->queue_cmd[qi].s.prio = prio;
- sched_global->queue_cmd[qi].s.group = group;
- sched_global->queue_cmd[qi].s.init = 1;
+ sched_global->queue_cmd[qi].prio = prio;
+ sched_global->queue_cmd[qi].group = group;
+ sched_global->queue_cmd[qi].init = 1;
return 0;
}
static void destroy_queue(uint32_t qi)
{
- sched_global->queue_cmd[qi].s.prio = 0;
- sched_global->queue_cmd[qi].s.group = 0;
- sched_global->queue_cmd[qi].s.init = 0;
+ sched_global->queue_cmd[qi].prio = 0;
+ sched_global->queue_cmd[qi].group = 0;
+ sched_global->queue_cmd[qi].init = 0;
}
static inline void add_tail(sched_cmd_t *cmd)
{
prio_queue_t *prio_queue;
- int group = cmd->s.group;
- int prio = cmd->s.prio;
- uint32_t idx = cmd->s.ring_idx;
+ int group = cmd->group;
+ int prio = cmd->prio;
+ uint32_t idx = cmd->ring_idx;
prio_queue = &sched_global->prio_queue[group][prio];
- ring_enq(&prio_queue->ring, RING_MASK, idx);
+ ring_u32_enq(&prio_queue->ring, RING_MASK, idx);
}
static inline sched_cmd_t *rem_head(int group, int prio)
@@ -386,9 +487,8 @@ static inline sched_cmd_t *rem_head(int group, int prio)
int pktio;
prio_queue = &sched_global->prio_queue[group][prio];
- ring_idx = ring_deq(&prio_queue->ring, RING_MASK);
- if (ring_idx == RING_EMPTY)
+ if (ring_u32_deq(&prio_queue->ring, RING_MASK, &ring_idx) == 0)
return NULL;
pktio = index_from_ring_idx(&index, ring_idx);
@@ -409,15 +509,10 @@ static int sched_queue(uint32_t qi)
return 0;
}
-static int unsched_queue(uint32_t qi ODP_UNUSED)
-{
- return 0;
-}
-
-static int ord_enq_multi(uint32_t queue_index, void *buf_hdr[], int num,
+static int ord_enq_multi(odp_queue_t queue, void *buf_hdr[], int num,
int *ret)
{
- (void)queue_index;
+ (void)queue;
(void)buf_hdr;
(void)num;
(void)ret;
@@ -426,24 +521,27 @@ static int ord_enq_multi(uint32_t queue_index, void *buf_hdr[], int num,
return 0;
}
-static void pktio_start(int pktio_index, int num, int pktin_idx[])
+static void pktio_start(int pktio_index,
+ int num,
+ int pktin_idx[],
+ odp_queue_t queue[])
{
int i;
sched_cmd_t *cmd;
- ODP_DBG("pktio index: %i, %i pktin queues %i\n",
- pktio_index, num, pktin_idx[0]);
+ _ODP_DBG("pktio index: %i, %i pktin queues %i\n", pktio_index, num, pktin_idx[0]);
cmd = &sched_global->pktio_cmd[pktio_index];
if (num > NUM_PKTIN)
- ODP_ABORT("Supports only %i pktin queues per interface\n",
- NUM_PKTIN);
+ _ODP_ABORT("Supports only %i pktin queues per interface\n", NUM_PKTIN);
- for (i = 0; i < num; i++)
- cmd->s.pktin_idx[i] = pktin_idx[i];
+ for (i = 0; i < num; i++) {
+ cmd->pktin_idx[i] = pktin_idx[i];
+ cmd->queue[i] = queue[i];
+ }
- cmd->s.num_pktin = num;
+ cmd->num_pktin = num;
add_tail(cmd);
}
@@ -495,6 +593,26 @@ static uint64_t schedule_wait_time(uint64_t ns)
return ns;
}
+static inline void enqueue_packets(odp_queue_t queue,
+ _odp_event_hdr_t *hdr_tbl[], int num_pkt)
+{
+ int num_enq, num_drop;
+
+ num_enq = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl,
+ num_pkt);
+
+ if (num_enq < 0)
+ num_enq = 0;
+
+ if (num_enq < num_pkt) {
+ num_drop = num_pkt - num_enq;
+
+ _ODP_DBG("Dropped %i packets\n", num_drop);
+ odp_packet_free_multi((odp_packet_t *)&hdr_tbl[num_enq],
+ num_drop);
+ }
+}
+
static int schedule_multi(odp_queue_t *from, uint64_t wait,
odp_event_t events[], int max_events ODP_UNUSED)
{
@@ -503,7 +621,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
if (sched_local.cmd) {
/* Continue scheduling if queue is not empty */
- if (sched_cb_queue_empty(sched_local.cmd->s.index) == 0)
+ if (_odp_sched_queue_empty(sched_local.cmd->index) == 0)
add_tail(sched_local.cmd);
sched_local.cmd = NULL;
@@ -519,12 +637,34 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
cmd = sched_cmd();
- if (cmd && cmd->s.type == CMD_PKTIO) {
- if (sched_cb_pktin_poll(cmd->s.index, cmd->s.num_pktin,
- cmd->s.pktin_idx)) {
- /* Pktio stopped or closed. */
- sched_cb_pktio_stop_finalize(cmd->s.index);
- } else {
+ if (cmd && cmd->type == CMD_PKTIO) {
+ _odp_event_hdr_t *hdr_tbl[CONFIG_BURST_SIZE];
+ int i;
+ int num_pkt = 0;
+ int max_num = CONFIG_BURST_SIZE;
+ int pktio_idx = cmd->index;
+ int num_pktin = cmd->num_pktin;
+ int *pktin_idx = cmd->pktin_idx;
+ odp_queue_t *queue = cmd->queue;
+
+ for (i = 0; i < num_pktin; i++) {
+ num_pkt = _odp_sched_cb_pktin_poll(pktio_idx,
+ pktin_idx[i],
+ hdr_tbl, max_num);
+
+ if (num_pkt < 0) {
+ /* Pktio stopped or closed. */
+ _odp_sched_cb_pktio_stop_finalize(pktio_idx);
+ break;
+ }
+
+ if (num_pkt == 0)
+ continue;
+
+ enqueue_packets(queue[i], hdr_tbl, num_pkt);
+ }
+
+ if (num_pkt >= 0) {
/* Continue polling pktio. */
add_tail(cmd);
}
@@ -534,6 +674,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
}
if (cmd == NULL) {
+ timer_run(1);
/* All priority queues are empty */
if (wait == ODP_SCHED_NO_WAIT)
return 0;
@@ -542,8 +683,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
continue;
if (update_t1) {
- t1 = odp_time_sum(odp_time_local(),
- odp_time_local_from_ns(wait));
+ t1 = odp_time_add_ns(odp_time_local(), wait);
update_t1 = 0;
continue;
}
@@ -554,31 +694,26 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
return 0;
}
- qi = cmd->s.index;
- num = sched_cb_queue_deq_multi(qi, events, 1);
+ qi = cmd->index;
+ num = _odp_sched_queue_deq(qi, events, 1, 1);
- if (num > 0) {
- sched_local.cmd = cmd;
+ if (num <= 0) {
+ timer_run(1);
+ /* Destroyed or empty queue. Remove empty queue from
+ * scheduling. A dequeue operation to on an already
+ * empty queue moves it to NOTSCHED state and
+ * sched_queue() will be called on next enqueue. */
+ continue;
+ }
- if (from)
- *from = sched_cb_queue_handle(qi);
+ timer_run(2);
- return num;
- }
+ sched_local.cmd = cmd;
- if (num < 0) {
- /* Destroyed queue */
- sched_cb_queue_destroy_finalize(qi);
- continue;
- }
+ if (from)
+ *from = queue_from_index(qi);
- if (num == 0) {
- /* Remove empty queue from scheduling. A dequeue
- * operation to on an already empty queue moves
- * it to NOTSCHED state and sched_queue() will
- * be called on next enqueue. */
- continue;
- }
+ return num;
}
}
@@ -592,6 +727,18 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
return ODP_EVENT_INVALID;
}
+static int schedule_multi_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_WAIT, events, max_num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_NO_WAIT, events, max_num);
+}
+
static void schedule_pause(void)
{
sched_local.pause = 1;
@@ -604,10 +751,12 @@ static void schedule_resume(void)
static void schedule_release_atomic(void)
{
+ /* Nothing to do */
}
static void schedule_release_ordered(void)
{
+ /* Nothing to do */
}
static void schedule_prefetch(int num)
@@ -615,6 +764,21 @@ static void schedule_prefetch(int num)
(void)num;
}
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return MAX_API_PRIO;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
static int schedule_num_prio(void)
{
/* Lowest priority is used for pktin polling and is internal
@@ -627,7 +791,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
{
odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
sched_group_t *sched_group = &sched_global->sched_group;
- int i;
+ int i, thr;
odp_ticketlock_lock(&sched_group->s.lock);
@@ -635,6 +799,11 @@ static odp_schedule_group_t schedule_group_create(const char *name,
if (!sched_group->s.group[i].allocated) {
char *grp_name = sched_group->s.group[i].name;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#if __GNUC__ >= 13
+#pragma GCC diagnostic ignored "-Wstringop-overflow"
+#endif
if (name == NULL) {
grp_name[0] = 0;
} else {
@@ -642,10 +811,17 @@ static odp_schedule_group_t schedule_group_create(const char *name,
ODP_SCHED_GROUP_NAME_LEN - 1);
grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
}
- odp_thrmask_copy(&sched_group->s.group[i].mask,
- thrmask);
+#pragma GCC diagnostic pop
+
+ odp_thrmask_copy(&sched_group->s.group[i].mask, thrmask);
sched_group->s.group[i].allocated = 1;
group = i;
+
+ thr = odp_thrmask_first(thrmask);
+ while (thr >= 0) {
+ add_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
break;
}
}
@@ -658,6 +834,8 @@ static odp_schedule_group_t schedule_group_create(const char *name,
static int schedule_group_destroy(odp_schedule_group_t group)
{
sched_group_t *sched_group = &sched_global->sched_group;
+ int thr;
+ const odp_thrmask_t *thrmask;
if (group < NUM_STATIC_GROUP || group >= NUM_GROUP)
return -1;
@@ -669,6 +847,14 @@ static int schedule_group_destroy(odp_schedule_group_t group)
return -1;
}
+ thrmask = &sched_group->s.group[group].mask;
+
+ thr = odp_thrmask_first(thrmask);
+ while (thr >= 0) {
+ remove_group(sched_group, thr, group);
+ thr = odp_thrmask_next(thrmask, thr);
+ }
+
memset(&sched_group->s.group[group], 0,
sizeof(sched_group->s.group[0]));
@@ -809,59 +995,121 @@ static int schedule_group_info(odp_schedule_group_t group,
return 0;
}
-static void schedule_order_lock(unsigned lock_index)
+static void schedule_order_lock(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_unlock(uint32_t lock_index)
+{
+ (void)lock_index;
+}
+
+static void schedule_order_unlock_lock(uint32_t unlock_index,
+ uint32_t lock_index)
+{
+ (void)unlock_index;
+ (void)lock_index;
+}
+
+static void schedule_order_lock_start(uint32_t lock_index)
{
(void)lock_index;
}
-static void schedule_order_unlock(unsigned lock_index)
+static void schedule_order_lock_wait(uint32_t lock_index)
{
(void)lock_index;
}
static void order_lock(void)
{
+ /* Nothing to do */
}
static void order_unlock(void)
{
+ /* Nothing to do */
}
-static void save_context(queue_entry_t *queue ODP_UNUSED)
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = max_ordered_locks();
+ capa->max_groups = num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = CONFIG_MAX_SCHED_QUEUES;
+ capa->max_queue_size = _odp_queue_glb->config.max_queue_size;
+
+ return 0;
+}
+
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ _ODP_PRINT("\nScheduler debug info\n");
+ _ODP_PRINT("--------------------\n");
+ _ODP_PRINT(" scheduler: sp\n");
+ _ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ _ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ _ODP_PRINT("\n");
+}
+
+static void get_config(schedule_config_t *config)
+{
+ *config = sched_global->config_if;
+}
+
+const _odp_schedule_api_fn_t _odp_schedule_sp_api;
+
+static const _odp_schedule_api_fn_t *sched_api(void)
{
+ return &_odp_schedule_sp_api;
}
/* Fill in scheduler interface */
-const schedule_fn_t schedule_sp_fn = {
+const schedule_fn_t _odp_schedule_sp_fn = {
.pktio_start = pktio_start,
.thr_add = thr_add,
.thr_rem = thr_rem,
.num_grps = num_grps,
- .init_queue = init_queue,
+ .create_queue = create_queue,
.destroy_queue = destroy_queue,
.sched_queue = sched_queue,
- .unsched_queue = unsched_queue,
.ord_enq_multi = ord_enq_multi,
.init_global = init_global,
.term_global = term_global,
.init_local = init_local,
.term_local = term_local,
- .order_lock = order_lock,
- .order_unlock = order_unlock,
+ .order_lock = order_lock,
+ .order_unlock = order_unlock,
.max_ordered_locks = max_ordered_locks,
- .save_context = save_context
+ .get_config = get_config,
+ .sched_api = sched_api,
};
/* Fill in scheduler API calls */
-const schedule_api_t schedule_sp_api = {
+const _odp_schedule_api_fn_t _odp_schedule_sp_api = {
.schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
.schedule = schedule,
.schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
.schedule_pause = schedule_pause,
.schedule_resume = schedule_resume,
.schedule_release_atomic = schedule_release_atomic,
.schedule_release_ordered = schedule_release_ordered,
.schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
.schedule_num_prio = schedule_num_prio,
.schedule_group_create = schedule_group_create,
.schedule_group_destroy = schedule_group_destroy,
@@ -871,5 +1119,10 @@ const schedule_api_t schedule_sp_api = {
.schedule_group_thrmask = schedule_group_thrmask,
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock
+ .schedule_order_unlock = schedule_order_unlock,
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
+ .schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c
index a6faff6e3..ef4df3a33 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -1,19 +1,24 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
#include <odp/api/debug.h>
#include <odp/api/std_types.h>
#include <odp/api/shared_memory.h>
#include <odp/api/plat/strong_types.h>
-#include <_ishm_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_init_internal.h>
+#include <odp_global_data.h>
#include <string.h>
-ODP_STATIC_ASSERT(ODP_CONFIG_SHM_BLOCKS >= ODP_CONFIG_POOLS,
- "ODP_CONFIG_SHM_BLOCKS < ODP_CONFIG_POOLS");
+/* Supported ODP_SHM_* flags */
+#define SUPPORTED_SHM_FLAGS (ODP_SHM_PROC | ODP_SHM_SINGLE_VA | ODP_SHM_EXPORT | \
+ ODP_SHM_HP | ODP_SHM_NO_HP)
static inline uint32_t from_handle(odp_shm_t shm)
{
@@ -44,9 +49,10 @@ int odp_shm_capability(odp_shm_capability_t *capa)
{
memset(capa, 0, sizeof(odp_shm_capability_t));
- capa->max_blocks = ODP_CONFIG_SHM_BLOCKS;
- capa->max_size = 0;
+ capa->max_blocks = CONFIG_SHM_BLOCKS;
+ capa->max_size = odp_global_ro.shm_max_size;
capa->max_align = 0;
+ capa->flags = SUPPORTED_SHM_FLAGS;
return 0;
}
@@ -55,11 +61,17 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
uint32_t flags)
{
int block_index;
- int flgs = 0; /* internal ishm flags */
+ uint32_t flgs = 0; /* internal ishm flags */
+ uint32_t supported_flgs = SUPPORTED_SHM_FLAGS;
+
+ if (flags & ~supported_flgs) {
+ _ODP_ERR("Unsupported SHM flag\n");
+ return ODP_SHM_INVALID;
+ }
flgs = get_ishm_flags(flags);
- block_index = _odp_ishm_reserve(name, size, -1, align, flgs, flags);
+ block_index = _odp_ishm_reserve(name, size, -1, align, 0, flgs, flags);
if (block_index >= 0)
return to_handle(block_index);
else
@@ -105,13 +117,43 @@ int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info)
info->size = ishm_info.size;
info->page_size = ishm_info.page_size;
info->flags = ishm_info.user_flags;
+ info->num_seg = 1;
+
+ return 0;
+}
+
+int odp_shm_segment_info(odp_shm_t shm, uint32_t index, uint32_t num,
+ odp_shm_segment_info_t seg_info[])
+{
+ odp_shm_info_t info;
+
+ /* No physical memory segment information available */
+ if (index != 0 || num != 1) {
+ _ODP_ERR("Only single segment supported (%u, %u)\n", index, num);
+ return -1;
+ }
+
+ if (odp_shm_info(shm, &info)) {
+ _ODP_ERR("SHM info call failed\n");
+ return -1;
+ }
+
+ seg_info[0].addr = (uintptr_t)info.addr;
+ seg_info[0].iova = ODP_SHM_IOVA_INVALID;
+ seg_info[0].pa = ODP_SHM_PA_INVALID;
+ seg_info[0].len = info.size;
return 0;
}
void odp_shm_print_all(void)
{
- _odp_ishm_status("Memory allocation status:");
+ _odp_ishm_status("ODP shared memory allocation status:");
+}
+
+void odp_shm_print(odp_shm_t shm)
+{
+ _odp_ishm_print(from_handle(shm));
}
uint64_t odp_shm_to_u64(odp_shm_t hdl)
diff --git a/platform/linux-generic/odp_sorted_list.c b/platform/linux-generic/odp_sorted_list.c
index 8a1dc3ac9..c93b11886 100644
--- a/platform/linux-generic/odp_sorted_list.c
+++ b/platform/linux-generic/odp_sorted_list.c
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -28,9 +28,12 @@ typedef struct {
uint32_t pad;
} sorted_list_desc_t;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef struct {
sorted_list_desc_t descs[0];
} sorted_list_descs_t;
+#pragma GCC diagnostic pop
typedef struct {
uint64_t total_inserts;
@@ -259,12 +262,12 @@ void _odp_sorted_list_stats_print(_odp_int_sorted_pool_t sorted_pool)
sorted_pool_t *pool;
pool = (sorted_pool_t *)(uintptr_t)sorted_pool;
- ODP_DBG("sorted_pool=0x%" PRIX64 "\n", sorted_pool);
- ODP_DBG(" max_sorted_lists=%u next_list_idx=%u\n",
- pool->max_sorted_lists, pool->next_list_idx);
- ODP_DBG(" total_inserts=%" PRIu64 " total_deletes=%" PRIu64
- " total_removes=%" PRIu64 "\n", pool->total_inserts,
- pool->total_deletes, pool->total_removes);
+ _ODP_PRINT(" sorted_pool=0x%" PRIX64 "\n", sorted_pool);
+ _ODP_PRINT(" max_sorted_lists=%u next_list_idx=%u\n",
+ pool->max_sorted_lists, pool->next_list_idx);
+ _ODP_PRINT(" total_inserts=%" PRIu64 " total_deletes=%" PRIu64
+ " total_removes=%" PRIu64 "\n", pool->total_inserts,
+ pool->total_deletes, pool->total_removes);
}
void _odp_sorted_pool_destroy(_odp_int_sorted_pool_t sorted_pool)
diff --git a/platform/linux-generic/odp_spinlock.c b/platform/linux-generic/odp_spinlock.c
deleted file mode 100644
index cb0f0533c..000000000
--- a/platform/linux-generic/odp_spinlock.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/spinlock.h>
-#include <odp/api/cpu.h>
-#include <odp_atomic_internal.h>
-
-void odp_spinlock_init(odp_spinlock_t *spinlock)
-{
- _odp_atomic_flag_init(&spinlock->lock, 0);
-}
-
-void odp_spinlock_lock(odp_spinlock_t *spinlock)
-{
- /* While the lock is already taken... */
- while (_odp_atomic_flag_tas(&spinlock->lock))
- /* ...spin reading the flag (relaxed MM),
- * the loop will exit when the lock becomes available
- * and we will retry the TAS operation above */
- while (_odp_atomic_flag_load(&spinlock->lock))
- odp_cpu_pause();
-}
-
-int odp_spinlock_trylock(odp_spinlock_t *spinlock)
-{
- return (_odp_atomic_flag_tas(&spinlock->lock) == 0);
-}
-
-void odp_spinlock_unlock(odp_spinlock_t *spinlock)
-{
- _odp_atomic_flag_clear(&spinlock->lock);
-}
-
-int odp_spinlock_is_locked(odp_spinlock_t *spinlock)
-{
- return _odp_atomic_flag_load(&spinlock->lock) != 0;
-}
diff --git a/platform/linux-generic/odp_spinlock_api.c b/platform/linux-generic/odp_spinlock_api.c
new file mode 100644
index 000000000..06925e9a5
--- /dev/null
+++ b/platform/linux-generic/odp_spinlock_api.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/spinlock.h>
+
+#define _ODP_NO_INLINE
+#include <odp/api/plat/spinlock_inlines.h>
diff --git a/platform/linux-generic/odp_spinlock_recursive.c b/platform/linux-generic/odp_spinlock_recursive.c
deleted file mode 100644
index 5ed481c4a..000000000
--- a/platform/linux-generic/odp_spinlock_recursive.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/spinlock_recursive.h>
-#include <odp/api/thread.h>
-
-#define NO_OWNER (-1)
-
-void odp_spinlock_recursive_init(odp_spinlock_recursive_t *rlock)
-{
- odp_spinlock_init(&rlock->lock);
- rlock->owner = NO_OWNER;
- rlock->cnt = 0;
-}
-
-void odp_spinlock_recursive_lock(odp_spinlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->owner == thr) {
- rlock->cnt++;
- return;
- }
-
- odp_spinlock_lock(&rlock->lock);
- rlock->owner = thr;
- rlock->cnt = 1;
-}
-
-int odp_spinlock_recursive_trylock(odp_spinlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->owner == thr) {
- rlock->cnt++;
- return 1;
- }
-
- if (odp_spinlock_trylock(&rlock->lock)) {
- rlock->owner = thr;
- rlock->cnt = 1;
- return 1;
- } else {
- return 0;
- }
-}
-
-void odp_spinlock_recursive_unlock(odp_spinlock_recursive_t *rlock)
-{
- rlock->cnt--;
-
- if (rlock->cnt > 0)
- return;
-
- rlock->owner = NO_OWNER;
- odp_spinlock_unlock(&rlock->lock);
-}
-
-int odp_spinlock_recursive_is_locked(odp_spinlock_recursive_t *rlock)
-{
- int thr = odp_thread_id();
-
- if (rlock->owner == thr)
- return 1;
-
- return odp_spinlock_is_locked(&rlock->lock);
-}
diff --git a/platform/linux-generic/odp_spinlock_recursive_api.c b/platform/linux-generic/odp_spinlock_recursive_api.c
new file mode 100644
index 000000000..2b1e8b200
--- /dev/null
+++ b/platform/linux-generic/odp_spinlock_recursive_api.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/spinlock_recursive.h>
+
+#define _ODP_NO_INLINE
+#include <odp/api/plat/spinlock_recursive_inlines.h>
diff --git a/platform/linux-generic/odp_stash.c b/platform/linux-generic/odp_stash.c
new file mode 100644
index 000000000..cb87a5fa5
--- /dev/null
+++ b/platform/linux-generic/odp_stash.c
@@ -0,0 +1,932 @@
+/* Copyright (c) 2020-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/align.h>
+#include <odp/api/shared_memory.h>
+#include <odp/api/stash.h>
+#include <odp/api/std_types.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/strong_types.h>
+
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_ring_mpmc_u32_internal.h>
+#include <odp_ring_mpmc_u64_internal.h>
+#include <odp_ring_u32_internal.h>
+#include <odp_ring_u64_internal.h>
+
+#include <stdio.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_STASHES < CONFIG_MAX_STASHES, "TOO_MANY_INTERNAL_STASHES");
+
+#define MIN_RING_SIZE 64
+
+enum {
+ STASH_FREE = 0,
+ STASH_RESERVED,
+ STASH_ACTIVE
+};
+
+typedef struct stash_t stash_t;
+
+typedef void (*ring_u32_init_fn_t)(stash_t *stash);
+typedef int32_t (*ring_u32_enq_multi_fn_t)(stash_t *stash, const uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_enq_batch_fn_t)(stash_t *stash, const uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_deq_multi_fn_t)(stash_t *stash, uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_deq_batch_fn_t)(stash_t *stash, uint32_t val[], int32_t num);
+typedef int32_t (*ring_u32_len_fn_t)(stash_t *stash);
+
+typedef void (*ring_u64_init_fn_t)(stash_t *stash);
+typedef int32_t (*ring_u64_enq_multi_fn_t)(stash_t *stash, const uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_enq_batch_fn_t)(stash_t *stash, const uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_deq_multi_fn_t)(stash_t *stash, uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_deq_batch_fn_t)(stash_t *stash, uint64_t val[], int32_t num);
+typedef int32_t (*ring_u64_len_fn_t)(stash_t *stash);
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
+typedef struct ODP_ALIGNED_CACHE stash_t {
+ /* Ring functions */
+ union {
+ struct {
+ ring_u32_enq_multi_fn_t enq_multi;
+ ring_u32_enq_batch_fn_t enq_batch;
+ ring_u32_deq_multi_fn_t deq_multi;
+ ring_u32_deq_batch_fn_t deq_batch;
+ ring_u32_init_fn_t init;
+ ring_u32_len_fn_t len;
+ } u32;
+
+ struct {
+ ring_u64_enq_multi_fn_t enq_multi;
+ ring_u64_enq_batch_fn_t enq_batch;
+ ring_u64_deq_multi_fn_t deq_multi;
+ ring_u64_deq_batch_fn_t deq_batch;
+ ring_u64_init_fn_t init;
+ ring_u64_len_fn_t len;
+ } u64;
+ } ring_fn;
+
+ uint32_t ring_mask;
+ uint32_t ring_size;
+ uint32_t obj_size;
+
+ char name[ODP_STASH_NAME_LEN];
+ int index;
+ uint8_t strict_size;
+
+ /* Ring header followed by variable sized data (object handles) */
+ union {
+ struct ODP_ALIGNED_CACHE {
+ ring_u32_t hdr;
+ uint32_t data[];
+ } ring_u32;
+
+ struct ODP_ALIGNED_CACHE {
+ ring_u64_t hdr;
+ uint64_t data[];
+ } ring_u64;
+
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_u32_t hdr;
+ uint32_t data[];
+ } ring_mpmc_u32;
+
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_u64_t hdr;
+ uint64_t data[];
+ } ring_mpmc_u64;
+ };
+
+} stash_t;
+#pragma GCC diagnostic pop
+
+typedef struct stash_global_t {
+ odp_ticketlock_t lock;
+ odp_shm_t shm;
+ uint32_t max_num;
+ uint32_t max_num_obj;
+ uint32_t num_internal;
+ uint8_t stash_state[CONFIG_MAX_STASHES];
+ stash_t *stash[CONFIG_MAX_STASHES];
+ uint8_t data[] ODP_ALIGNED_CACHE;
+
+} stash_global_t;
+
+static stash_global_t *stash_global;
+
+static inline stash_t *stash_entry(odp_stash_t st)
+{
+ return (stash_t *)(uintptr_t)st;
+}
+
+static inline odp_stash_t stash_handle(stash_t *stash)
+{
+ return (odp_stash_t)(uintptr_t)stash;
+}
+
+int _odp_stash_init_global(void)
+{
+ odp_shm_t shm;
+ uint32_t max_num, max_num_obj;
+ const char *str;
+ uint64_t ring_max_size, stash_max_size, stash_data_size, offset;
+ const uint32_t internal_stashes = odp_global_ro.disable.dma ? 0 : CONFIG_INTERNAL_STASHES;
+ uint8_t *stash_data;
+ int val = 0;
+
+ if (odp_global_ro.disable.stash && odp_global_ro.disable.dma) {
+ _ODP_PRINT("Stash is DISABLED\n");
+ return 0;
+ }
+
+ _ODP_PRINT("Stash config:\n");
+
+ str = "stash.max_num";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+ max_num = val;
+
+ str = "stash.max_num_obj";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ _ODP_PRINT(" %s: %i\n", str, val);
+ max_num_obj = val;
+
+ _ODP_PRINT("\n");
+
+ /* Reserve resources for implementation internal stashes */
+ if (max_num > CONFIG_MAX_STASHES - internal_stashes) {
+ _ODP_ERR("Maximum supported number of stashes: %d\n",
+ CONFIG_MAX_STASHES - internal_stashes);
+ return -1;
+ }
+ max_num += internal_stashes;
+
+ /* Must have room for minimum sized ring */
+ if (max_num_obj < MIN_RING_SIZE)
+ max_num_obj = MIN_RING_SIZE - 1;
+
+ /* Ring size must be larger than the number of items stored */
+ ring_max_size = _ODP_ROUNDUP_POWER2_U32(max_num_obj + 1);
+
+ stash_max_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(stash_t) +
+ (ring_max_size * sizeof(uint64_t)));
+ stash_data_size = max_num * stash_max_size;
+
+ shm = odp_shm_reserve("_odp_stash_global", sizeof(stash_global_t) + stash_data_size,
+ ODP_CACHE_LINE_SIZE, 0);
+
+ stash_global = odp_shm_addr(shm);
+
+ if (stash_global == NULL) {
+ _ODP_ERR("SHM reserve of stash global data failed\n");
+ return -1;
+ }
+
+ memset(stash_global, 0, sizeof(stash_global_t));
+ stash_global->shm = shm;
+ stash_global->max_num = max_num;
+ stash_global->max_num_obj = max_num_obj;
+ stash_global->num_internal = internal_stashes;
+ odp_ticketlock_init(&stash_global->lock);
+
+ /* Initialize stash pointers */
+ stash_data = stash_global->data;
+ offset = 0;
+
+ for (uint32_t i = 0; i < max_num; i++) {
+ stash_global->stash[i] = (stash_t *)(uintptr_t)(stash_data + offset);
+ offset += stash_max_size;
+ }
+
+ return 0;
+}
+
+int _odp_stash_term_global(void)
+{
+ if (odp_global_ro.disable.stash)
+ return 0;
+
+ if (stash_global == NULL)
+ return 0;
+
+ if (odp_shm_free(stash_global->shm)) {
+ _ODP_ERR("SHM free failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int odp_stash_capability(odp_stash_capability_t *capa, odp_stash_type_t type)
+{
+ uint32_t max_stashes;
+
+ if (odp_global_ro.disable.stash) {
+ _ODP_ERR("Stash is disabled\n");
+ return -1;
+ }
+
+ (void)type;
+ max_stashes = stash_global->max_num - stash_global->num_internal;
+
+ memset(capa, 0, sizeof(odp_stash_capability_t));
+
+ capa->max_stashes_any_type = max_stashes;
+ capa->max_stashes = max_stashes;
+
+ capa->max_num_obj = stash_global->max_num_obj;
+ capa->max_num.u8 = stash_global->max_num_obj;
+ capa->max_num.u16 = stash_global->max_num_obj;
+ capa->max_num.u32 = stash_global->max_num_obj;
+ capa->max_num.u64 = stash_global->max_num_obj;
+ capa->max_num.max_obj_size = stash_global->max_num_obj;
+
+ capa->max_obj_size = sizeof(uint64_t);
+ capa->max_get_batch = MIN_RING_SIZE;
+ capa->max_put_batch = MIN_RING_SIZE;
+ capa->stats.bit.count = 1;
+
+ return 0;
+}
+
+void odp_stash_param_init(odp_stash_param_t *param)
+{
+ memset(param, 0, sizeof(odp_stash_param_t));
+ param->type = ODP_STASH_TYPE_DEFAULT;
+ param->put_mode = ODP_STASH_OP_MT;
+ param->get_mode = ODP_STASH_OP_MT;
+}
+
+static int reserve_index(void)
+{
+ int index = -1;
+
+ odp_ticketlock_lock(&stash_global->lock);
+
+ for (uint32_t i = 0; i < stash_global->max_num; i++) {
+ if (stash_global->stash_state[i] == STASH_FREE) {
+ index = i;
+ stash_global->stash_state[i] = STASH_RESERVED;
+ break;
+ }
+ }
+
+ odp_ticketlock_unlock(&stash_global->lock);
+
+ return index;
+}
+
+static void free_index(int i)
+{
+ odp_ticketlock_lock(&stash_global->lock);
+
+ stash_global->stash_state[i] = STASH_FREE;
+
+ odp_ticketlock_unlock(&stash_global->lock);
+}
+
+static inline void strict_ring_u32_init(stash_t *stash)
+{
+ ring_u32_init(&stash->ring_u32.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_u32.data[i] = 0;
+}
+
+static inline void strict_ring_u64_init(stash_t *stash)
+{
+ ring_u64_init(&stash->ring_u64.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_u64.data[i] = 0;
+}
+
+static inline int32_t strict_ring_u32_enq_multi(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ /* Success always */
+ ring_u32_enq_multi(&stash->ring_u32.hdr, stash->ring_mask, (uint32_t *)(uintptr_t)val, num);
+
+ return num;
+}
+
+static inline int32_t strict_ring_u64_enq_multi(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ /* Success always */
+ ring_u64_enq_multi(&stash->ring_u64.hdr, stash->ring_mask, (uint64_t *)(uintptr_t)val, num);
+
+ return num;
+}
+
+static inline int32_t strict_ring_u32_deq_multi(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_u32_deq_multi(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u64_deq_multi(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_u64_deq_multi(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u32_deq_batch(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u64_deq_batch(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+}
+
+static inline int32_t strict_ring_u32_len(stash_t *stash)
+{
+ return ring_u32_len(&stash->ring_u32.hdr);
+}
+
+static inline int32_t strict_ring_u64_len(stash_t *stash)
+{
+ return ring_u64_len(&stash->ring_u64.hdr);
+}
+
+static inline void mpmc_ring_u32_init(stash_t *stash)
+{
+ ring_mpmc_u32_init(&stash->ring_mpmc_u32.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_mpmc_u32.data[i] = 0;
+}
+
+static inline void mpmc_ring_u64_init(stash_t *stash)
+{
+ ring_mpmc_u64_init(&stash->ring_mpmc_u64.hdr);
+
+ for (uint32_t i = 0; i < stash->ring_size; i++)
+ stash->ring_mpmc_u64.data[i] = 0;
+}
+
+static inline int32_t mpmc_ring_u32_enq_multi(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_enq_multi(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_enq_multi(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_enq_multi(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_enq_batch(stash_t *stash, const uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_enq_batch(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_enq_batch(stash_t *stash, const uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_enq_batch(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_deq_multi(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_deq_multi(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_deq_multi(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_deq_multi(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_deq_batch(stash_t *stash, uint32_t val[], int32_t num)
+{
+ return ring_mpmc_u32_deq_batch(&stash->ring_mpmc_u32.hdr, stash->ring_mpmc_u32.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u64_deq_batch(stash_t *stash, uint64_t val[], int32_t num)
+{
+ return ring_mpmc_u64_deq_batch(&stash->ring_mpmc_u64.hdr, stash->ring_mpmc_u64.data,
+ stash->ring_mask, val, num);
+}
+
+static inline int32_t mpmc_ring_u32_len(stash_t *stash)
+{
+ return ring_mpmc_u32_len(&stash->ring_mpmc_u32.hdr);
+}
+
+static inline int32_t mpmc_ring_u64_len(stash_t *stash)
+{
+ return ring_mpmc_u64_len(&stash->ring_mpmc_u64.hdr);
+}
+
+odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
+{
+ stash_t *stash;
+ uint64_t ring_size;
+ int ring_u64, index;
+
+ if (odp_global_ro.disable.stash) {
+ _ODP_ERR("Stash is disabled\n");
+ return ODP_STASH_INVALID;
+ }
+
+ if (param->obj_size > sizeof(uint64_t)) {
+ _ODP_ERR("Too large object handle.\n");
+ return ODP_STASH_INVALID;
+ }
+
+ if (param->num_obj > stash_global->max_num_obj) {
+ _ODP_ERR("Too many objects.\n");
+ return ODP_STASH_INVALID;
+ }
+
+ if (name && strlen(name) >= ODP_STASH_NAME_LEN) {
+ _ODP_ERR("Too long name.\n");
+ return ODP_STASH_INVALID;
+ }
+
+ index = reserve_index();
+
+ if (index < 0) {
+ _ODP_ERR("Maximum number of stashes created already.\n");
+ return ODP_STASH_INVALID;
+ }
+
+ ring_u64 = 0;
+ if (param->obj_size > sizeof(uint32_t))
+ ring_u64 = 1;
+
+ ring_size = param->num_obj;
+
+ /* Ring size must be larger than the number of items stored */
+ if (ring_size + 1 <= MIN_RING_SIZE)
+ ring_size = MIN_RING_SIZE;
+ else
+ ring_size = _ODP_ROUNDUP_POWER2_U32(ring_size + 1);
+
+ stash = stash_global->stash[index];
+ memset(stash, 0, sizeof(stash_t));
+
+ /* Set ring function pointers */
+ stash->strict_size = !!param->strict_size;
+ if (stash->strict_size) {
+ if (ring_u64) {
+ stash->ring_fn.u64.init = strict_ring_u64_init;
+ stash->ring_fn.u64.enq_multi = strict_ring_u64_enq_multi;
+ stash->ring_fn.u64.enq_batch = strict_ring_u64_enq_multi;
+ stash->ring_fn.u64.deq_multi = strict_ring_u64_deq_multi;
+ stash->ring_fn.u64.deq_batch = strict_ring_u64_deq_batch;
+ stash->ring_fn.u64.len = strict_ring_u64_len;
+ } else {
+ stash->ring_fn.u32.init = strict_ring_u32_init;
+ stash->ring_fn.u32.enq_multi = strict_ring_u32_enq_multi;
+ stash->ring_fn.u32.enq_batch = strict_ring_u32_enq_multi;
+ stash->ring_fn.u32.deq_multi = strict_ring_u32_deq_multi;
+ stash->ring_fn.u32.deq_batch = strict_ring_u32_deq_batch;
+ stash->ring_fn.u32.len = strict_ring_u32_len;
+ }
+ } else {
+ if (ring_u64) {
+ stash->ring_fn.u64.init = mpmc_ring_u64_init;
+ stash->ring_fn.u64.enq_multi = mpmc_ring_u64_enq_multi;
+ stash->ring_fn.u64.enq_batch = mpmc_ring_u64_enq_batch;
+ stash->ring_fn.u64.deq_multi = mpmc_ring_u64_deq_multi;
+ stash->ring_fn.u64.deq_batch = mpmc_ring_u64_deq_batch;
+ stash->ring_fn.u64.len = mpmc_ring_u64_len;
+ } else {
+ stash->ring_fn.u32.init = mpmc_ring_u32_init;
+ stash->ring_fn.u32.enq_multi = mpmc_ring_u32_enq_multi;
+ stash->ring_fn.u32.enq_batch = mpmc_ring_u32_enq_batch;
+ stash->ring_fn.u32.deq_multi = mpmc_ring_u32_deq_multi;
+ stash->ring_fn.u32.deq_batch = mpmc_ring_u32_deq_batch;
+ stash->ring_fn.u32.len = mpmc_ring_u32_len;
+ }
+ }
+
+ if (name)
+ strcpy(stash->name, name);
+
+ stash->index = index;
+ stash->obj_size = param->obj_size;
+ stash->ring_mask = ring_size - 1;
+ stash->ring_size = ring_size;
+
+ if (ring_u64)
+ stash->ring_fn.u64.init(stash);
+ else
+ stash->ring_fn.u32.init(stash);
+
+ /* This makes stash visible to lookups */
+ odp_ticketlock_lock(&stash_global->lock);
+ stash_global->stash_state[index] = STASH_ACTIVE;
+ odp_ticketlock_unlock(&stash_global->lock);
+
+ return stash_handle(stash);
+}
+
+int odp_stash_destroy(odp_stash_t st)
+{
+ if (st == ODP_STASH_INVALID)
+ return -1;
+
+ free_index(stash_entry(st)->index);
+
+ return 0;
+}
+
+uint64_t odp_stash_to_u64(odp_stash_t st)
+{
+ return _odp_pri(st);
+}
+
+odp_stash_t odp_stash_lookup(const char *name)
+{
+ stash_t *stash;
+
+ if (name == NULL)
+ return ODP_STASH_INVALID;
+
+ odp_ticketlock_lock(&stash_global->lock);
+
+ for (uint32_t i = 0; i < stash_global->max_num; i++) {
+ stash = stash_global->stash[i];
+
+ if (stash_global->stash_state[i] == STASH_ACTIVE &&
+ strcmp(stash->name, name) == 0) {
+ odp_ticketlock_unlock(&stash_global->lock);
+ return stash_handle(stash);
+ }
+ }
+
+ odp_ticketlock_unlock(&stash_global->lock);
+
+ return ODP_STASH_INVALID;
+}
+
+static inline int32_t stash_put(odp_stash_t st, const void *obj, int32_t num, odp_bool_t is_batch)
+{
+ int32_t (*ring_u32_enq)(stash_t *stash, const uint32_t val[], int32_t num);
+ int32_t (*ring_u64_enq)(stash_t *stash, const uint64_t val[], int32_t num);
+ stash_t *stash = stash_entry(st);
+ uint32_t obj_size;
+ int32_t i;
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ if (is_batch) {
+ ring_u32_enq = stash->ring_fn.u32.enq_batch;
+ ring_u64_enq = stash->ring_fn.u64.enq_batch;
+ } else {
+ ring_u32_enq = stash->ring_fn.u32.enq_multi;
+ ring_u64_enq = stash->ring_fn.u64.enq_multi;
+ }
+
+ obj_size = stash->obj_size;
+
+ if (obj_size == sizeof(uint64_t))
+ return ring_u64_enq(stash, (uint64_t *)(uintptr_t)obj, num);
+
+ if (obj_size == sizeof(uint32_t))
+ return ring_u32_enq(stash, (uint32_t *)(uintptr_t)obj, num);
+
+ if (obj_size == sizeof(uint16_t)) {
+ const uint16_t *u16_ptr = obj;
+ uint32_t u32[num];
+
+ for (i = 0; i < num; i++)
+ u32[i] = u16_ptr[i];
+
+ return ring_u32_enq(stash, u32, num);
+ }
+
+ if (obj_size == sizeof(uint8_t)) {
+ const uint8_t *u8_ptr = obj;
+ uint32_t u32[num];
+
+ for (i = 0; i < num; i++)
+ u32[i] = u8_ptr[i];
+
+ return ring_u32_enq(stash, u32, num);
+ }
+
+ return -1;
+}
+
+int32_t odp_stash_put(odp_stash_t st, const void *obj, int32_t num)
+{
+ return stash_put(st, obj, num, false);
+}
+
+int32_t odp_stash_put_batch(odp_stash_t st, const void *obj, int32_t num)
+{
+ return stash_put(st, obj, num, true);
+}
+
+int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return stash->ring_fn.u32.enq_multi(stash, val, num);
+}
+
+int32_t odp_stash_put_u32_batch(odp_stash_t st, const uint32_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return stash->ring_fn.u32.enq_batch(stash, val, num);
+}
+
+int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return stash->ring_fn.u64.enq_multi(stash, (uint64_t *)(uintptr_t)val, num);
+}
+
+int32_t odp_stash_put_u64_batch(odp_stash_t st, const uint64_t val[],
+ int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return stash->ring_fn.u64.enq_batch(stash, (uint64_t *)(uintptr_t)val, num);
+}
+
+int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return stash->ring_fn.u32.enq_multi(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.enq_multi(stash, (uint64_t *)(uintptr_t)ptr, num);
+
+ return -1;
+}
+
+int32_t odp_stash_put_ptr_batch(odp_stash_t st, const uintptr_t ptr[],
+ int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return stash->ring_fn.u32.enq_batch(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.enq_batch(stash, (uint64_t *)(uintptr_t)ptr, num);
+
+ return -1;
+}
+
+static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool_t is_batch)
+{
+ int32_t (*ring_u32_deq)(stash_t *stash, uint32_t val[], int32_t num);
+ int32_t (*ring_u64_deq)(stash_t *stash, uint64_t val[], int32_t num);
+ stash_t *stash = stash_entry(st);
+ uint32_t obj_size;
+ uint32_t i, num_deq;
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ if (is_batch) {
+ ring_u32_deq = stash->ring_fn.u32.deq_batch;
+ ring_u64_deq = stash->ring_fn.u64.deq_batch;
+ } else {
+ ring_u32_deq = stash->ring_fn.u32.deq_multi;
+ ring_u64_deq = stash->ring_fn.u64.deq_multi;
+ }
+
+ obj_size = stash->obj_size;
+
+ if (obj_size == sizeof(uint64_t))
+ return ring_u64_deq(stash, obj, num);
+
+ if (obj_size == sizeof(uint32_t))
+ return ring_u32_deq(stash, obj, num);
+
+ if (obj_size == sizeof(uint16_t)) {
+ uint16_t *u16_ptr = obj;
+ uint32_t u32[num];
+
+ num_deq = ring_u32_deq(stash, u32, num);
+
+ for (i = 0; i < num_deq; i++)
+ u16_ptr[i] = u32[i];
+
+ return num_deq;
+ }
+
+ if (obj_size == sizeof(uint8_t)) {
+ uint8_t *u8_ptr = obj;
+ uint32_t u32[num];
+
+ num_deq = ring_u32_deq(stash, u32, num);
+
+ for (i = 0; i < num_deq; i++)
+ u8_ptr[i] = u32[i];
+
+ return num_deq;
+ }
+
+ return -1;
+}
+
+int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
+{
+ return stash_get(st, obj, num, false);
+}
+
+int32_t odp_stash_get_batch(odp_stash_t st, void *obj, int32_t num)
+{
+ return stash_get(st, obj, num, true);
+}
+
+int32_t odp_stash_get_u32(odp_stash_t st, uint32_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return stash->ring_fn.u32.deq_multi(stash, val, num);
+}
+
+int32_t odp_stash_get_u32_batch(odp_stash_t st, uint32_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return stash->ring_fn.u32.deq_batch(stash, val, num);
+}
+
+int32_t odp_stash_get_u64(odp_stash_t st, uint64_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return stash->ring_fn.u64.deq_multi(stash, val, num);
+}
+
+int32_t odp_stash_get_u64_batch(odp_stash_t st, uint64_t val[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return stash->ring_fn.u64.deq_batch(stash, val, num);
+}
+
+int32_t odp_stash_get_ptr(odp_stash_t st, uintptr_t ptr[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return stash->ring_fn.u32.deq_multi(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.deq_multi(stash, (uint64_t *)(uintptr_t)ptr, num);
+
+ return -1;
+}
+
+int32_t odp_stash_get_ptr_batch(odp_stash_t st, uintptr_t ptr[], int32_t num)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ _ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return stash->ring_fn.u32.deq_batch(stash, (uint32_t *)(uintptr_t)ptr, num);
+
+ if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return stash->ring_fn.u64.deq_batch(stash, (uint64_t *)(uintptr_t)ptr, num);
+
+ return -1;
+}
+
+int odp_stash_flush_cache(odp_stash_t st)
+{
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ return 0;
+}
+
+static uint32_t stash_obj_count(stash_t *stash)
+{
+ uint32_t obj_size = stash->obj_size;
+
+ if (obj_size == sizeof(uint64_t))
+ return stash->ring_fn.u64.len(stash);
+
+ return stash->ring_fn.u32.len(stash);
+}
+
+void odp_stash_print(odp_stash_t st)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (st == ODP_STASH_INVALID) {
+ _ODP_ERR("Bad stash handle\n");
+ return;
+ }
+
+ _ODP_PRINT("\nStash info\n");
+ _ODP_PRINT("----------\n");
+ _ODP_PRINT(" handle 0x%" PRIx64 "\n", odp_stash_to_u64(st));
+ _ODP_PRINT(" name %s\n", stash->name);
+ _ODP_PRINT(" index %i\n", stash->index);
+ _ODP_PRINT(" obj size %u\n", stash->obj_size);
+ _ODP_PRINT(" obj count %u\n", stash_obj_count(stash));
+ _ODP_PRINT(" ring size %u\n", stash->ring_size);
+ _ODP_PRINT(" strict size %u\n", stash->strict_size);
+ _ODP_PRINT("\n");
+}
+
+int odp_stash_stats(odp_stash_t st, odp_stash_stats_t *stats)
+{
+ stash_t *stash = stash_entry(st);
+
+ if (st == ODP_STASH_INVALID) {
+ _ODP_ERR("Bad stash handle\n");
+ return -1;
+ }
+
+ stats->count = stash_obj_count(stash);
+ stats->cache_count = 0;
+
+ return 0;
+}
diff --git a/platform/linux-generic/odp_std.c b/platform/linux-generic/odp_std.c
new file mode 100644
index 000000000..9db5a35b3
--- /dev/null
+++ b/platform/linux-generic/odp_std.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/std.h>
+
+double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract)
+{
+ double fraction;
+
+ if (fract->numer == 0)
+ fraction = 0.0;
+ else
+ fraction = (double)fract->numer / fract->denom;
+
+ return fract->integer + fraction;
+}
diff --git a/platform/linux-generic/odp_std_api.c b/platform/linux-generic/odp_std_api.c
new file mode 100644
index 000000000..0bcd68de2
--- /dev/null
+++ b/platform/linux-generic/odp_std_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/std.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/std_inlines.h>
diff --git a/platform/linux-generic/odp_std_clib.c b/platform/linux-generic/odp_std_clib.c
deleted file mode 100644
index 24df249db..000000000
--- a/platform/linux-generic/odp_std_clib.c
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2016, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/std_clib.h>
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/std_clib_inlines.h>
-#endif
diff --git a/platform/linux-generic/odp_sync.c b/platform/linux-generic/odp_sync_api.c
index b7eb503ca..56c86db14 100644
--- a/platform/linux-generic/odp_sync.c
+++ b/platform/linux-generic/odp_sync_api.c
@@ -1,10 +1,11 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <odp/api/sync.h>
-#if ODP_ABI_COMPAT == 1
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
#include <odp/api/plat/sync_inlines.h>
-#endif
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index 18c61dbe7..a2593b531 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,13 +15,17 @@
#include <odp_posix_extensions.h>
#include <odp/api/system_info.h>
-#include <odp_internal.h>
+#include <odp/api/version.h>
+#include <odp_global_data.h>
+#include <odp_sysinfo_internal.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
#include <odp_debug_internal.h>
+#include <odp_config_internal.h>
#include <odp/api/align.h>
#include <odp/api/cpu.h>
+
#include <errno.h>
-#include <pthread.h>
-#include <sched.h>
#include <string.h>
#include <stdio.h>
#include <inttypes.h>
@@ -38,19 +43,9 @@
"/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size"
/*
- * Report the number of logical CPUs detected at boot time
- */
-static int sysconf_cpu_count(void)
-{
- return odp_global_data.num_cpus_installed;
-}
-
-#if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \
-defined __powerpc__
-/*
* Analysis of /sys/devices/system/cpu/ files
*/
-static int systemcpu_cache_line_size(void)
+static int read_cache_line_size(void)
{
FILE *file;
char str[128];
@@ -59,7 +54,9 @@ static int systemcpu_cache_line_size(void)
file = fopen(CACHE_LNSZ_FILE, "rt");
if (file == NULL) {
/* File not found */
- return 0;
+ _ODP_WARN("Unable to read host CPU cache line size. "
+ "Using ODP_CACHE_LINE_SIZE instead.\n");
+ return ODP_CACHE_LINE_SIZE;
}
if (fgets(str, sizeof(str), file) != NULL) {
@@ -73,17 +70,6 @@ static int systemcpu_cache_line_size(void)
return size;
}
-#else
-/*
- * Use dummy data if not available from /sys/devices/system/cpu/
- */
-static int systemcpu_cache_line_size(void)
-{
- return 64;
-}
-#endif
-
-
static uint64_t default_huge_page_size(void)
{
char str[1024];
@@ -91,16 +77,18 @@ static uint64_t default_huge_page_size(void)
FILE *file;
file = fopen("/proc/meminfo", "rt");
+ if (!file)
+ return 0;
while (fgets(str, sizeof(str), file) != NULL) {
if (sscanf(str, "Hugepagesize: %8lu kB", &sz) == 1) {
- ODP_DBG("defaut hp size is %" PRIu64 " kB\n", sz);
+ _ODP_DBG("default hp size is %lu kB\n", sz);
fclose(file);
return (uint64_t)sz * 1024;
}
}
- ODP_ERR("unable to get default hp size\n");
+ _ODP_ERR("unable to get default hp size\n");
fclose(file);
return 0;
}
@@ -226,7 +214,7 @@ static char *get_hugepage_dir(uint64_t hugepage_sz)
while (fgets(buf, sizeof(buf), fd)) {
if (strsplit(buf, sizeof(buf), tokens,
_FIELDNAME_MAX, split_tok) != _FIELDNAME_MAX) {
- ODP_ERR("Error parsing %s\n", proc_mounts);
+ _ODP_ERR("Error parsing %s\n", proc_mounts);
break; /* return NULL */
}
@@ -240,11 +228,9 @@ static char *get_hugepage_dir(uint64_t hugepage_sz)
retval = strdup(tokens[MOUNTPT]);
break;
}
- }
- /* there is an explicit page size, so check it */
- else {
- pagesz =
- str_to_size(&pagesz_str[pagesize_opt_len]);
+ } else {
+ /* there is an explicit page size, so check it */
+ pagesz = str_to_size(&pagesz_str[pagesize_opt_len]);
if (pagesz == hugepage_sz) {
retval = strdup(tokens[MOUNTPT]);
break;
@@ -258,33 +244,61 @@ static char *get_hugepage_dir(uint64_t hugepage_sz)
}
/*
- * Analysis of /sys/devices/system/cpu/ files
+ * Analysis of /sys/devices/system/cpu/cpu%d/cpufreq/ files
*/
-static int systemcpu(system_info_t *sysinfo)
+static uint64_t read_cpufreq(const char *filename, int id)
{
- int ret;
+ char path[256], buffer[256], *endptr = NULL;
+ FILE *file;
+ uint64_t ret = 0;
- ret = sysconf_cpu_count();
- if (ret == 0) {
- ODP_ERR("sysconf_cpu_count failed.\n");
- return -1;
- }
+ snprintf(path, sizeof(path),
+ "/sys/devices/system/cpu/cpu%d/cpufreq/%s", id, filename);
+
+ file = fopen(path, "r");
+ if (file == NULL)
+ return ret;
- sysinfo->cpu_count = ret;
+ if (fgets(buffer, sizeof(buffer), file) != NULL)
+ ret = strtoull(buffer, &endptr, 0) * 1000;
+ fclose(file);
+
+ return ret;
+}
- ret = systemcpu_cache_line_size();
+static inline uint64_t cpu_hz_current(int id)
+{
+ uint64_t cur_hz = read_cpufreq("cpuinfo_cur_freq", id);
+
+ if (!cur_hz)
+ cur_hz = odp_cpu_arch_hz_current(id);
+
+ return cur_hz;
+}
+
+static inline uint64_t cpu_hz_static(int id)
+{
+ return odp_global_ro.system_info.cpu_hz[id];
+}
+
+/*
+ * Analysis of /sys/devices/system/cpu/ files
+ */
+static int system_cache_line(system_info_t *sysinfo)
+{
+ int ret;
+
+ ret = read_cache_line_size();
if (ret == 0) {
- ODP_ERR("systemcpu_cache_line_size failed.\n");
+ _ODP_ERR("read_cache_line_size failed.\n");
return -1;
}
sysinfo->cache_line_size = ret;
- if (ret != ODP_CACHE_LINE_SIZE) {
- ODP_ERR("Cache line sizes definitions don't match.\n");
- return -1;
- }
+ if (ret != ODP_CACHE_LINE_SIZE)
+ _ODP_WARN("Host CPU cache line size and ODP_CACHE_LINE_SIZE don't match.\n");
return 0;
}
@@ -302,33 +316,103 @@ static int system_hp(hugepage_info_t *hugeinfo)
return 0;
}
+static int read_config_file(void)
+{
+ const char *str;
+ int val = 0;
+
+ str = "system.cpu_mhz";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.default_cpu_hz = (uint64_t)val * 1000000;
+
+ str = "system.cpu_mhz_max";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.default_cpu_hz_max = (uint64_t)val * 1000000;
+
+ str = "system.cpu_hz_static";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ odp_global_ro.system_info.cpu_hz_static = !!val;
+
+ return 0;
+}
+
+static void print_compiler_info(void)
+{
+ _ODP_PRINT("Compiler defines:\n");
+ _ODP_PRINT(" __GCC_ATOMIC_LLONG_LOCK_FREE: %d\n", __GCC_ATOMIC_LLONG_LOCK_FREE);
+ _ODP_PRINT(" __GCC_ATOMIC_LONG_LOCK_FREE: %d\n", __GCC_ATOMIC_LONG_LOCK_FREE);
+ _ODP_PRINT(" __GCC_ATOMIC_INT_LOCK_FREE: %d\n", __GCC_ATOMIC_INT_LOCK_FREE);
+ _ODP_PRINT(" __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16: ");
+#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
+ _ODP_PRINT("1\n");
+#else
+ _ODP_PRINT("0\n");
+#endif
+ _ODP_PRINT("\n");
+}
+
/*
* System info initialisation
*/
-int odp_system_info_init(void)
+int _odp_system_info_init(void)
{
+ int num_cpus;
+ int i;
FILE *file;
- memset(&odp_global_data.system_info, 0, sizeof(system_info_t));
+ memset(&odp_global_ro.system_info, 0, sizeof(system_info_t));
- odp_global_data.system_info.page_size = ODP_PAGE_SIZE;
+ odp_global_ro.system_info.page_size = ODP_PAGE_SIZE;
- file = fopen("/proc/cpuinfo", "rt");
- if (file == NULL) {
- ODP_ERR("Failed to open /proc/cpuinfo\n");
+ /* Read default CPU Hz values from config file */
+ if (read_config_file())
return -1;
- }
- cpuinfo_parser(file, &odp_global_data.system_info);
+ /* Check that CONFIG_NUM_CPU_IDS is large enough */
+ num_cpus = get_nprocs_conf();
+ if (num_cpus > CONFIG_NUM_CPU_IDS)
+ _ODP_ERR("Unable to handle all %d "
+ "CPU IDs. Increase CONFIG_NUM_CPU_IDS value.\n",
+ num_cpus);
- fclose(file);
+ /* Read and save all CPU frequencies for static mode */
+ if (odp_global_ro.system_info.cpu_hz_static)
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++)
+ odp_global_ro.system_info.cpu_hz[i] = cpu_hz_current(i);
- if (systemcpu(&odp_global_data.system_info)) {
- ODP_ERR("systemcpu failed\n");
- return -1;
+ /* By default, read max frequency from a cpufreq file */
+ for (i = 0; i < CONFIG_NUM_CPU_IDS; i++) {
+ uint64_t cpu_hz_max = read_cpufreq("cpuinfo_max_freq", i);
+
+ if (cpu_hz_max)
+ odp_global_ro.system_info.cpu_hz_max[i] = cpu_hz_max;
}
- system_hp(&odp_global_data.hugepage_info);
+ file = fopen("/proc/cpuinfo", "rt");
+ if (file != NULL) {
+ /* Read CPU model, and set max cpu frequency
+ * if not set from cpufreq. */
+ _odp_cpuinfo_parser(file, &odp_global_ro.system_info);
+ fclose(file);
+ } else {
+ _odp_dummy_cpuinfo(&odp_global_ro.system_info);
+ }
+
+ if (system_cache_line(&odp_global_ro.system_info))
+ return -1;
+
+ system_hp(&odp_global_ro.hugepage_info);
+
+ print_compiler_info();
return 0;
}
@@ -336,9 +420,9 @@ int odp_system_info_init(void)
/*
* System info termination
*/
-int odp_system_info_term(void)
+int _odp_system_info_term(void)
{
- free(odp_global_data.hugepage_info.default_huge_page_dir);
+ free(odp_global_ro.hugepage_info.default_huge_page_dir);
return 0;
}
@@ -350,14 +434,20 @@ int odp_system_info_term(void)
*/
uint64_t odp_cpu_hz(void)
{
- int id = sched_getcpu();
+ int id = odp_cpu_id();
- return odp_cpu_hz_current(id);
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
}
uint64_t odp_cpu_hz_id(int id)
{
- return odp_cpu_hz_current(id);
+ _ODP_ASSERT(id >= 0 && id < CONFIG_NUM_CPU_IDS);
+
+ if (odp_global_ro.system_info.cpu_hz_static)
+ return cpu_hz_static(id);
+ return cpu_hz_current(id);
}
uint64_t odp_cpu_hz_max(void)
@@ -367,20 +457,63 @@ uint64_t odp_cpu_hz_max(void)
uint64_t odp_cpu_hz_max_id(int id)
{
- if (id >= 0 && id < MAX_CPU_NUMBER)
- return odp_global_data.system_info.cpu_hz_max[id];
+ if (id >= 0 && id < CONFIG_NUM_CPU_IDS)
+ return odp_global_ro.system_info.cpu_hz_max[id];
else
return 0;
}
uint64_t odp_sys_huge_page_size(void)
{
- return odp_global_data.hugepage_info.default_huge_page_size;
+ return odp_global_ro.hugepage_info.default_huge_page_size;
+}
+
+static int pagesz_compare(const void *pagesz1, const void *pagesz2)
+{
+ const uint64_t val1 = *(const uint64_t *)pagesz1;
+ const uint64_t val2 = *(const uint64_t *)pagesz2;
+
+ if (val1 < val2)
+ return -1;
+ if (val1 > val2)
+ return 1;
+ return 0;
+}
+
+int odp_sys_huge_page_size_all(uint64_t size[], int num)
+{
+ DIR *dir;
+ struct dirent *entry;
+ int pagesz_num = 0;
+ int saved = 0;
+
+ /* See: kernel.org: hugetlbpage.txt */
+ dir = opendir("/sys/kernel/mm/hugepages");
+ if (!dir) {
+ _ODP_PRINT("Failed to open /sys/kernel/mm/hugepages: %s\n", strerror(errno));
+ return 0;
+ }
+
+ while ((entry = readdir(dir)) != NULL) {
+ unsigned long sz;
+
+ if (sscanf(entry->d_name, "hugepages-%8lukB", &sz) == 1) {
+ if (size != NULL && saved < num)
+ size[saved++] = sz * 1024;
+ pagesz_num++;
+ }
+ }
+ closedir(dir);
+
+ if (size != NULL && saved > 1)
+ qsort(size, saved, sizeof(uint64_t), pagesz_compare);
+
+ return pagesz_num;
}
uint64_t odp_sys_page_size(void)
{
- return odp_global_data.system_info.page_size;
+ return odp_global_ro.system_info.page_size;
}
const char *odp_cpu_model_str(void)
@@ -390,18 +523,112 @@ const char *odp_cpu_model_str(void)
const char *odp_cpu_model_str_id(int id)
{
- if (id >= 0 && id < MAX_CPU_NUMBER)
- return odp_global_data.system_info.model_str[id];
+ if (id >= 0 && id < CONFIG_NUM_CPU_IDS)
+ return odp_global_ro.system_info.model_str[id];
else
return NULL;
}
int odp_sys_cache_line_size(void)
{
- return odp_global_data.system_info.cache_line_size;
+ return odp_global_ro.system_info.cache_line_size;
}
int odp_cpu_count(void)
{
- return odp_global_data.system_info.cpu_count;
+ return odp_global_ro.num_cpus_installed;
+}
+
+int odp_system_info(odp_system_info_t *info)
+{
+ system_info_t *sys_info = &odp_global_ro.system_info;
+
+ memset(info, 0, sizeof(odp_system_info_t));
+
+ info->cpu_arch = sys_info->cpu_arch;
+ info->cpu_isa_sw = sys_info->cpu_isa_sw;
+ info->cpu_isa_hw = sys_info->cpu_isa_hw;
+
+ return 0;
+}
+
+void odp_sys_info_print(void)
+{
+ int len, num_cpu;
+ int max_len = 512;
+ odp_cpumask_t cpumask;
+ char cpumask_str[ODP_CPUMASK_STR_SIZE];
+ char str[max_len];
+
+ memset(cpumask_str, 0, sizeof(cpumask_str));
+
+ num_cpu = odp_cpumask_all_available(&cpumask);
+ odp_cpumask_to_str(&cpumask, cpumask_str, ODP_CPUMASK_STR_SIZE);
+
+ len = snprintf(str, max_len, "\n"
+ "ODP system info\n"
+ "---------------\n"
+ "ODP API version: %s\n"
+ "ODP impl name: %s\n"
+ "ODP impl details: %s\n"
+ "CPU model: %s\n"
+ "CPU freq (hz): %" PRIu64 "\n"
+ "Cache line size: %i\n"
+ "CPU count: %i\n"
+ "CPU mask: %s\n"
+ "\n",
+ odp_version_api_str(),
+ odp_version_impl_name(),
+ odp_version_impl_str(),
+ odp_cpu_model_str(),
+ odp_cpu_hz_max(),
+ odp_sys_cache_line_size(),
+ num_cpu, cpumask_str);
+
+ str[len] = '\0';
+ _ODP_PRINT("%s", str);
+
+ _odp_sys_info_print_arch();
+}
+
+void odp_sys_config_print(void)
+{
+ /* Print ODP_CONFIG_FILE default and override values */
+ if (_odp_libconfig_print())
+ _ODP_ERR("Config file print failed\n");
+
+ _ODP_PRINT("\n\nodp_config_internal.h values:\n"
+ "-----------------------------\n");
+ _ODP_PRINT("CONFIG_NUM_CPU_IDS: %i\n", CONFIG_NUM_CPU_IDS);
+ _ODP_PRINT("CONFIG_INTERNAL_QUEUES: %i\n", CONFIG_INTERNAL_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_QUEUES: %i\n", CONFIG_MAX_QUEUES);
+ _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
+ _ODP_PRINT("CONFIG_MAX_DMA_SESSIONS: %i\n", CONFIG_MAX_DMA_SESSIONS);
+ _ODP_PRINT("CONFIG_INTERNAL_STASHES: %i\n", CONFIG_INTERNAL_STASHES);
+ _ODP_PRINT("CONFIG_MAX_STASHES: %i\n", CONFIG_MAX_STASHES);
+ _ODP_PRINT("CONFIG_PKTIO_ENTRIES: %i\n", CONFIG_PKTIO_ENTRIES);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MAX: %i\n", CONFIG_BUFFER_ALIGN_MAX);
+ _ODP_PRINT("CONFIG_PACKET_HEADROOM: %i\n", CONFIG_PACKET_HEADROOM);
+ _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
+ _ODP_PRINT("CONFIG_PACKET_SEG_SIZE: %i\n", CONFIG_PACKET_SEG_SIZE);
+ _ODP_PRINT("CONFIG_PACKET_MAX_SEG_LEN: %i\n", CONFIG_PACKET_MAX_SEG_LEN);
+ _ODP_PRINT("CONFIG_PACKET_SEG_LEN_MIN: %i\n", CONFIG_PACKET_SEG_LEN_MIN);
+ _ODP_PRINT("CONFIG_PACKET_VECTOR_MAX_SIZE: %i\n", CONFIG_PACKET_VECTOR_MAX_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_SHM_BLOCKS: %i\n", CONFIG_INTERNAL_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_POOLS: %i\n", CONFIG_INTERNAL_POOLS);
+ _ODP_PRINT("CONFIG_POOLS: %i\n", CONFIG_POOLS);
+ _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
+ _ODP_PRINT("CONFIG_POOL_CACHE_MAX_SIZE: %i\n", CONFIG_POOL_CACHE_MAX_SIZE);
+ _ODP_PRINT("CONFIG_POOL_STATISTICS: %i\n", CONFIG_POOL_STATISTICS);
+ _ODP_PRINT("CONFIG_IPSEC_MAX_NUM_SA: %i\n", CONFIG_IPSEC_MAX_NUM_SA);
+ _ODP_PRINT("CONFIG_TIMER_128BIT_ATOMICS: %i\n", CONFIG_TIMER_128BIT_ATOMICS);
+ _ODP_PRINT("CONFIG_TIMER_PROFILE_INLINE: %i\n", CONFIG_TIMER_PROFILE_INLINE);
+ _ODP_PRINT("CONFIG_ML_MAX_MODELS: %i\n", CONFIG_ML_MAX_MODELS);
+ _ODP_PRINT("CONFIG_ML_MAX_INPUTS: %i\n", CONFIG_ML_MAX_INPUTS);
+ _ODP_PRINT("CONFIG_ML_MAX_OUTPUTS: %i\n", CONFIG_ML_MAX_OUTPUTS);
+ _ODP_PRINT("\n");
}
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index 33a8a7f3c..88aec8f06 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -9,28 +10,23 @@
#include <sched.h>
#include <odp/api/thread.h>
#include <odp/api/thrmask.h>
-#include <odp_internal.h>
#include <odp/api/spinlock.h>
+#include <odp_init_internal.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp/api/shared_memory.h>
#include <odp/api/align.h>
#include <odp/api/cpu.h>
#include <odp_schedule_if.h>
+#include <odp/api/plat/thread_inlines.h>
+#include <odp_libconfig_internal.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
typedef struct {
- int thr;
- int cpu;
- odp_thread_type_t type;
-} thread_state_t;
-
-
-typedef struct {
- thread_state_t thr[ODP_THREAD_COUNT_MAX];
+ _odp_thread_state_t thr[ODP_THREAD_COUNT_MAX];
struct {
odp_thrmask_t all;
@@ -41,23 +37,38 @@ typedef struct {
uint32_t num;
uint32_t num_worker;
uint32_t num_control;
+ uint32_t num_max;
odp_spinlock_t lock;
} thread_globals_t;
-
/* Globals */
static thread_globals_t *thread_globals;
+#include <odp/visibility_begin.h>
/* Thread local */
-static __thread thread_state_t *this_thread;
+__thread _odp_thread_state_t *_odp_this_thread;
+#include <odp/visibility_end.h>
-int odp_thread_init_global(void)
+int _odp_thread_init_global(void)
{
odp_shm_t shm;
+ int num_max = 0;
+ const char *str = "system.thread_count_max";
+
+ if (!_odp_libconfig_lookup_int(str, &num_max)) {
+ _ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+ if (num_max <= 0) {
+ _ODP_ERR("Config option '%s' not valid.\n", str);
+ return -1;
+ }
+ if (num_max > ODP_THREAD_COUNT_MAX)
+ num_max = ODP_THREAD_COUNT_MAX;
- shm = odp_shm_reserve("odp_thread_globals",
+ shm = odp_shm_reserve("_odp_thread_global",
sizeof(thread_globals_t),
ODP_CACHE_LINE_SIZE, 0);
@@ -68,17 +79,27 @@ int odp_thread_init_global(void)
memset(thread_globals, 0, sizeof(thread_globals_t));
odp_spinlock_init(&thread_globals->lock);
+ thread_globals->num_max = num_max;
+ _ODP_PRINT("System config:\n");
+ _ODP_PRINT(" system.thread_count_max: %d\n\n", num_max);
return 0;
}
-int odp_thread_term_global(void)
+int _odp_thread_term_global(void)
{
- int ret;
+ int ret, num;
+
+ odp_spinlock_lock(&thread_globals->lock);
+ num = thread_globals->num;
+ odp_spinlock_unlock(&thread_globals->lock);
- ret = odp_shm_free(odp_shm_lookup("odp_thread_globals"));
+ if (num)
+ _ODP_ERR("%u threads have not called odp_term_local().\n", num);
+
+ ret = odp_shm_free(odp_shm_lookup("_odp_thread_global"));
if (ret < 0)
- ODP_ERR("shm free failed for odp_thread_globals");
+ _ODP_ERR("shm free failed for _odp_thread_globals");
return ret;
}
@@ -88,10 +109,10 @@ static int alloc_id(odp_thread_type_t type)
int thr;
odp_thrmask_t *all = &thread_globals->all;
- if (thread_globals->num >= ODP_THREAD_COUNT_MAX)
+ if (thread_globals->num >= thread_globals->num_max)
return -1;
- for (thr = 0; thr < ODP_THREAD_COUNT_MAX; thr++) {
+ for (thr = 0; thr < (int)thread_globals->num_max; thr++) {
if (odp_thrmask_isset(all, thr) == 0) {
odp_thrmask_set(all, thr);
@@ -115,7 +136,7 @@ static int free_id(int thr)
{
odp_thrmask_t *all = &thread_globals->all;
- if (thr < 0 || thr >= ODP_THREAD_COUNT_MAX)
+ if (thr < 0 || thr >= (int)thread_globals->num_max)
return -1;
if (odp_thrmask_isset(all, thr) == 0)
@@ -135,24 +156,38 @@ static int free_id(int thr)
return thread_globals->num;
}
-int odp_thread_init_local(odp_thread_type_t type)
+int _odp_thread_init_local(odp_thread_type_t type)
{
int id;
int cpu;
+ int group_all, group_worker, group_control;
+
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
+
+ if (_odp_sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ _odp_sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
odp_spinlock_lock(&thread_globals->lock);
id = alloc_id(type);
odp_spinlock_unlock(&thread_globals->lock);
if (id < 0) {
- ODP_ERR("Too many threads\n");
+ _ODP_ERR("Too many threads\n");
return -1;
}
cpu = sched_getcpu();
if (cpu < 0) {
- ODP_ERR("getcpu failed\n");
+ _ODP_ERR("getcpu failed\n");
return -1;
}
@@ -160,66 +195,91 @@ int odp_thread_init_local(odp_thread_type_t type)
thread_globals->thr[id].cpu = cpu;
thread_globals->thr[id].type = type;
- this_thread = &thread_globals->thr[id];
+ _odp_this_thread = &thread_globals->thr[id];
+
+ if (group_all)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
+ if (type == ODP_THREAD_WORKER && group_worker)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
- if (type == ODP_THREAD_WORKER)
- sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
- sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
+ if (type == ODP_THREAD_CONTROL && group_control)
+ _odp_sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0;
}
-int odp_thread_term_local(void)
+int _odp_thread_term_local(void)
{
int num;
- int id = this_thread->thr;
- odp_thread_type_t type = this_thread->type;
+ int group_all, group_worker, group_control;
+ int id = _odp_this_thread->thr;
+ odp_thread_type_t type = _odp_this_thread->type;
+
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+ if (_odp_sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ _odp_sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
- if (type == ODP_THREAD_WORKER)
- sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
- sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+ if (group_all)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+
+ if (type == ODP_THREAD_WORKER && group_worker)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
+
+ if (type == ODP_THREAD_CONTROL && group_control)
+ _odp_sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
+
+ _odp_this_thread = NULL;
odp_spinlock_lock(&thread_globals->lock);
num = free_id(id);
odp_spinlock_unlock(&thread_globals->lock);
if (num < 0) {
- ODP_ERR("failed to free thread id %i", id);
+ _ODP_ERR("failed to free thread id %i", id);
return -1;
}
return num; /* return a number of threads left */
}
-int odp_thread_id(void)
+int odp_thread_count(void)
{
- return this_thread->thr;
+ return thread_globals->num;
}
-int odp_thread_count(void)
+int odp_thread_control_count(void)
{
- return thread_globals->num;
+ return thread_globals->num_control;
+}
+
+int odp_thread_worker_count(void)
+{
+ return thread_globals->num_worker;
}
int odp_thread_count_max(void)
{
- return ODP_THREAD_COUNT_MAX;
+ return thread_globals->num_max;
}
-odp_thread_type_t odp_thread_type(void)
+int odp_thread_control_count_max(void)
{
- return this_thread->type;
+ return thread_globals->num_max;
}
-int odp_cpu_id(void)
+int odp_thread_worker_count_max(void)
{
- return this_thread->cpu;
+ return thread_globals->num_max;
}
int odp_thrmask_worker(odp_thrmask_t *mask)
diff --git a/platform/linux-generic/odp_thread_api.c b/platform/linux-generic/odp_thread_api.c
new file mode 100644
index 000000000..470a82de7
--- /dev/null
+++ b/platform/linux-generic/odp_thread_api.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/thread.h>
+#include <odp/api/cpu.h>
+
+/* Include non-inlined versions of API functions */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/thread_inlines.h>
diff --git a/platform/linux-generic/odp_thrmask.c b/platform/linux-generic/odp_thrmask.c
index c176a5c6d..f8704a0dd 100644
--- a/platform/linux-generic/odp_thrmask.c
+++ b/platform/linux-generic/odp_thrmask.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/odp_ticketlock.c b/platform/linux-generic/odp_ticketlock.c
deleted file mode 100644
index f73dd9ab4..000000000
--- a/platform/linux-generic/odp_ticketlock.c
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/api/plat/ticketlock_inlines.h>
-#include <odp/api/spec/ticketlock.h>
-
-void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
-{
- odp_atomic_init_u32(&ticketlock->next_ticket, 0);
- odp_atomic_init_u32(&ticketlock->cur_ticket, 0);
-}
-
-/* Include non-inlined versions of API functions */
-#if ODP_ABI_COMPAT == 1
-#include <odp/api/plat/ticketlock_inlines_api.h>
-#endif
diff --git a/platform/linux-generic/odp_ticketlock_api.c b/platform/linux-generic/odp_ticketlock_api.c
new file mode 100644
index 000000000..8995dd555
--- /dev/null
+++ b/platform/linux-generic/odp_ticketlock_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2013-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/ticketlock.h>
+#include <odp/api/plat/atomic_inlines.h>
+
+#define _ODP_NO_INLINE
+#include <odp/api/plat/ticketlock_inlines.h>
diff --git a/platform/linux-generic/odp_time.c b/platform/linux-generic/odp_time.c
deleted file mode 100644
index 81e05224c..000000000
--- a/platform/linux-generic/odp_time.c
+++ /dev/null
@@ -1,213 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_posix_extensions.h>
-
-#include <time.h>
-#include <odp/api/time.h>
-#include <odp/api/hints.h>
-#include <odp_debug_internal.h>
-
-static odp_time_t start_time;
-
-static inline
-uint64_t time_to_ns(odp_time_t time)
-{
- uint64_t ns;
-
- ns = time.tv_sec * ODP_TIME_SEC_IN_NS;
- ns += time.tv_nsec;
-
- return ns;
-}
-
-static inline odp_time_t time_diff(odp_time_t t2, odp_time_t t1)
-{
- odp_time_t time;
-
- time.tv_sec = t2.tv_sec - t1.tv_sec;
- time.tv_nsec = t2.tv_nsec - t1.tv_nsec;
-
- if (time.tv_nsec < 0) {
- time.tv_nsec += ODP_TIME_SEC_IN_NS;
- --time.tv_sec;
- }
-
- return time;
-}
-
-static inline odp_time_t time_local(void)
-{
- int ret;
- odp_time_t time;
- struct timespec sys_time;
-
- ret = clock_gettime(CLOCK_MONOTONIC_RAW, &sys_time);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_gettime failed\n");
-
- time.tv_sec = sys_time.tv_sec;
- time.tv_nsec = sys_time.tv_nsec;
-
- return time_diff(time, start_time);
-}
-
-static inline int time_cmp(odp_time_t t2, odp_time_t t1)
-{
- if (t2.tv_sec < t1.tv_sec)
- return -1;
-
- if (t2.tv_sec > t1.tv_sec)
- return 1;
-
- return t2.tv_nsec - t1.tv_nsec;
-}
-
-static inline odp_time_t time_sum(odp_time_t t1, odp_time_t t2)
-{
- odp_time_t time;
-
- time.tv_sec = t2.tv_sec + t1.tv_sec;
- time.tv_nsec = t2.tv_nsec + t1.tv_nsec;
-
- if (time.tv_nsec >= (long)ODP_TIME_SEC_IN_NS) {
- time.tv_nsec -= ODP_TIME_SEC_IN_NS;
- ++time.tv_sec;
- }
-
- return time;
-}
-
-static inline odp_time_t time_local_from_ns(uint64_t ns)
-{
- odp_time_t time;
-
- time.tv_sec = ns / ODP_TIME_SEC_IN_NS;
- time.tv_nsec = ns - time.tv_sec * ODP_TIME_SEC_IN_NS;
-
- return time;
-}
-
-static inline void time_wait_until(odp_time_t time)
-{
- odp_time_t cur;
-
- do {
- cur = time_local();
- } while (time_cmp(time, cur) > 0);
-}
-
-static inline uint64_t time_local_res(void)
-{
- int ret;
- struct timespec tres;
-
- ret = clock_getres(CLOCK_MONOTONIC_RAW, &tres);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_getres failed\n");
-
- return ODP_TIME_SEC_IN_NS / (uint64_t)tres.tv_nsec;
-}
-
-odp_time_t odp_time_local(void)
-{
- return time_local();
-}
-
-odp_time_t odp_time_global(void)
-{
- return time_local();
-}
-
-odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1)
-{
- return time_diff(t2, t1);
-}
-
-uint64_t odp_time_to_ns(odp_time_t time)
-{
- return time_to_ns(time);
-}
-
-odp_time_t odp_time_local_from_ns(uint64_t ns)
-{
- return time_local_from_ns(ns);
-}
-
-odp_time_t odp_time_global_from_ns(uint64_t ns)
-{
- return time_local_from_ns(ns);
-}
-
-int odp_time_cmp(odp_time_t t2, odp_time_t t1)
-{
- return time_cmp(t2, t1);
-}
-
-odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
-{
- return time_sum(t1, t2);
-}
-
-uint64_t odp_time_local_res(void)
-{
- return time_local_res();
-}
-
-uint64_t odp_time_global_res(void)
-{
- return time_local_res();
-}
-
-void odp_time_wait_ns(uint64_t ns)
-{
- odp_time_t cur = time_local();
- odp_time_t wait = time_local_from_ns(ns);
- odp_time_t end_time = time_sum(cur, wait);
-
- time_wait_until(end_time);
-}
-
-void odp_time_wait_until(odp_time_t time)
-{
- return time_wait_until(time);
-}
-
-uint64_t odp_time_to_u64(odp_time_t time)
-{
- int ret;
- struct timespec tres;
- uint64_t resolution;
-
- ret = clock_getres(CLOCK_MONOTONIC_RAW, &tres);
- if (odp_unlikely(ret != 0))
- ODP_ABORT("clock_getres failed\n");
-
- resolution = (uint64_t)tres.tv_nsec;
-
- return time_to_ns(time) / resolution;
-}
-
-int odp_time_init_global(void)
-{
- int ret;
- struct timespec time;
-
- ret = clock_gettime(CLOCK_MONOTONIC_RAW, &time);
- if (ret) {
- start_time = ODP_TIME_NULL;
- } else {
- start_time.tv_sec = time.tv_sec;
- start_time.tv_nsec = time.tv_nsec;
- }
-
- return ret;
-}
-
-int odp_time_term_global(void)
-{
- return 0;
-}
diff --git a/platform/linux-generic/odp_time_api.c b/platform/linux-generic/odp_time_api.c
new file mode 100644
index 000000000..d906e14b8
--- /dev/null
+++ b/platform/linux-generic/odp_time_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/time.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/time_inlines.h>
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index cf610bfa0..c8ea31078 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -10,49 +11,65 @@
* ODP timer service
*
*/
-
-#if __SIZEOF_POINTER__ != 8
-/* TB_NEEDS_PAD defined if sizeof(odp_buffer_t) != 8 */
-#define TB_NEEDS_PAD
-#define TB_SET_PAD(x) ((x).pad = 0)
-#else
-#define TB_SET_PAD(x) (void)(x)
-#endif
-
#include <odp_posix_extensions.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <time.h>
-#include <signal.h>
-#include <pthread.h>
-#include <unistd.h>
-#include <sys/syscall.h>
-#include <inttypes.h>
-#include <string.h>
-
#include <odp/api/align.h>
-#include <odp_align_internal.h>
#include <odp/api/atomic.h>
-#include <odp_atomic_internal.h>
-#include <odp/api/buffer.h>
-#include <odp_buffer_inlines.h>
#include <odp/api/cpu.h>
-#include <odp/api/pool.h>
-#include <odp_pool_internal.h>
#include <odp/api/debug.h>
-#include <odp_debug_internal.h>
#include <odp/api/event.h>
#include <odp/api/hints.h>
-#include <odp_internal.h>
+#include <odp/api/pool.h>
#include <odp/api/queue.h>
#include <odp/api/shared_memory.h>
#include <odp/api/spinlock.h>
-#include <odp/api/std_types.h>
+#include <odp/api/std.h>
#include <odp/api/sync.h>
#include <odp/api/time.h>
#include <odp/api/timer.h>
+
+/* Inlined API functions */
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+#include <odp/api/plat/timer_inlines.h>
+
+#include <odp/api/plat/timer_inline_types.h>
+
+#include <odp_atomic_internal.h>
+#include <odp_config_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_print_internal.h>
+#include <odp_queue_if.h>
#include <odp_timer_internal.h>
+#include <odp_types_internal.h>
+
+#include <errno.h>
+#include <inttypes.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <unistd.h>
+
+/* Check whether 128-bit atomics should be used */
+#if defined(ODP_ATOMIC_U128) && CONFIG_TIMER_128BIT_ATOMICS
+#define USE_128BIT_ATOMICS 1
+#else
+#define USE_128BIT_ATOMICS 0
+#endif
+
+/* One divided by one nanosecond in Hz */
+#define GIGA_HZ 1000000000
#define TMO_UNUSED ((uint64_t)0xFFFFFFFFFFFFFFFF)
/* TMO_INACTIVE is or-ed with the expiration tick to indicate an expired timer.
@@ -60,117 +77,103 @@
* for checking the freshness of received timeouts */
#define TMO_INACTIVE ((uint64_t)0x8000000000000000)
-/******************************************************************************
- * Mutual exclusion in the absence of CAS16
- *****************************************************************************/
+/* Flag set into periodic tick value when periodic timer cancel did not succeed.
+ * Ack call checks this. */
+#define PERIODIC_CANCELLED TMO_INACTIVE
-#ifndef ODP_ATOMIC_U128
-#define NUM_LOCKS 1024
-static _odp_atomic_flag_t locks[NUM_LOCKS]; /* Multiple locks per cache line! */
-#define IDX2LOCK(idx) (&locks[(idx) % NUM_LOCKS])
-#endif
+/* Max timeout in capability. One year in nsec (0x0070 09D3 2DA3 0000). */
+#define MAX_TMO_NSEC (365 * 24 * ODP_TIME_HOUR_IN_NS)
-/******************************************************************************
- * Translation between timeout buffer and timeout header
- *****************************************************************************/
+/* Max inline timer resolution */
+#define MAX_INLINE_RES_NS 500
-static odp_timeout_hdr_t *timeout_hdr_from_buf(odp_buffer_t buf)
-{
- return (odp_timeout_hdr_t *)(void *)buf_hdl_to_hdr(buf);
-}
+/* Timer pool may be reused after this period */
+#define TIMER_POOL_REUSE_NS ODP_TIME_SEC_IN_NS
-static odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo)
-{
- odp_buffer_t buf = odp_buffer_from_event(odp_timeout_to_event(tmo));
- return timeout_hdr_from_buf(buf);
-}
+/* Minimum periodic timer base frequency */
+#define MIN_BASE_HZ 1
-/******************************************************************************
- * odp_timer abstract datatype
- *****************************************************************************/
+/* Maximum periodic timer multiplier */
+#define MAX_MULTIPLIER 1000000
-typedef struct tick_buf_s {
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- /* No atomics support for 64-bit variables, will use separate lock */
- /* Use the same layout as odp_atomic_u64_t but without lock variable */
- struct {
- uint64_t v;
- } exp_tck;/* Expiration tick or TMO_xxx */
-#else
- odp_atomic_u64_t exp_tck;/* Expiration tick or TMO_xxx */
-#endif
- odp_buffer_t tmo_buf;/* ODP_BUFFER_INVALID if timer not active */
-#ifdef TB_NEEDS_PAD
- uint32_t pad;/* Need to be able to access padding for successful CAS */
+/* Maximum number of periodic timers per pool */
+#define MAX_PERIODIC_TIMERS 100
+
+/* Mutual exclusion in the absence of CAS16 */
+#if !USE_128BIT_ATOMICS
+#define NUM_LOCKS 256
+#define IDX2LOCK(tp, idx) (&(tp)->locks[(idx) % NUM_LOCKS])
#endif
-} tick_buf_t
-#ifdef ODP_ATOMIC_U128
+
+#define ACC_SIZE (1ull << 32)
+
+#include <odp/visibility_begin.h>
+
+/* Fill in timeout header field offsets for inline functions */
+const _odp_timeout_inline_offset_t
+_odp_timeout_inline_offset ODP_ALIGNED_CACHE = {
+ .expiration = offsetof(odp_timeout_hdr_t, expiration),
+ .timer = offsetof(odp_timeout_hdr_t, timer),
+ .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr),
+ .uarea_addr = offsetof(odp_timeout_hdr_t, uarea_addr),
+};
+
+#include <odp/visibility_end.h>
+
+typedef union
+#if USE_128BIT_ATOMICS
ODP_ALIGNED(16) /* 16-byte atomic operations need properly aligned addresses */
#endif
-;
+tick_buf_s {
+#if USE_128BIT_ATOMICS
+ odp_atomic_u128_t tb_atomic_u128;
-#if __GCC_ATOMIC_LLONG_LOCK_FREE >= 2
-/* Only assert this when we perform atomic operations on tick_buf_t */
-ODP_STATIC_ASSERT(sizeof(tick_buf_t) == 16, "sizeof(tick_buf_t) == 16");
+ odp_u128_t tb_u128;
#endif
-typedef struct odp_timer_s {
- void *user_ptr;
- odp_queue_t queue;/* Used for free list when timer is free */
-} odp_timer;
-
-static void timer_init(odp_timer *tim,
- tick_buf_t *tb,
- odp_queue_t _q,
- void *_up)
-{
- tim->queue = _q;
- tim->user_ptr = _up;
- tb->tmo_buf = ODP_BUFFER_INVALID;
- /* All pad fields need a defined and constant value */
- TB_SET_PAD(*tb);
- /* Release the timer by setting timer state to inactive */
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- tb->exp_tck.v = TMO_INACTIVE;
-#else
- _odp_atomic_u64_store_mm(&tb->exp_tck, TMO_INACTIVE, _ODP_MEMMODEL_RLS);
+ struct {
+ /* Expiration tick or TMO_xxx */
+ odp_atomic_u64_t exp_tck;
+ union {
+ /* ODP_EVENT_INVALID if timer not active */
+ odp_event_t tmo_event;
+
+ /* Ensures that tick_buf_t is 128 bits */
+ uint64_t tmo_u64;
+ };
+ };
+} tick_buf_t;
+
+#if USE_128BIT_ATOMICS
+ODP_STATIC_ASSERT(sizeof(tick_buf_t) == 16, "sizeof(tick_buf_t) == 16");
#endif
-}
-/* Teardown when timer is freed */
-static void timer_fini(odp_timer *tim, tick_buf_t *tb)
-{
- ODP_ASSERT(tb->exp_tck.v == TMO_UNUSED);
- ODP_ASSERT(tb->tmo_buf == ODP_BUFFER_INVALID);
- tim->queue = ODP_QUEUE_INVALID;
- tim->user_ptr = NULL;
-}
+typedef struct {
+ const void *user_ptr;
+ odp_queue_t queue;
-static inline uint32_t get_next_free(odp_timer *tim)
-{
- /* Reusing 'queue' for next free index */
- return _odp_typeval(tim->queue);
-}
+ /* Period of periodic timer in ticks (nanoseconds), includes PERIODIC_CANCELLED flag. */
+ uint64_t periodic_ticks;
-static inline void set_next_free(odp_timer *tim, uint32_t nf)
-{
- ODP_ASSERT(tim->queue == ODP_QUEUE_INVALID);
- /* Reusing 'queue' for next free index */
- tim->queue = _odp_cast_scalar(odp_queue_t, nf);
-}
+ /* Periodic ticks fractional part. */
+ uint32_t periodic_ticks_frac;
-/******************************************************************************
- * odp_timer_pool abstract datatype
- * Inludes alloc and free timer
- *****************************************************************************/
+ /* Periodic ticks fractional part accumulator. */
+ uint32_t periodic_ticks_frac_acc;
-typedef struct odp_timer_pool_s {
-/* Put frequently accessed fields in the first cache line */
+ /* Used for free list of timers */
+ uint32_t next_free;
+
+} _odp_timer_t;
+
+typedef struct timer_pool_s {
+ /* Put frequently accessed fields in the first cache line */
+ uint64_t nsec_per_scan;
odp_atomic_u64_t cur_tick;/* Current tick value */
uint64_t min_rel_tck;
uint64_t max_rel_tck;
tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */
- odp_timer *timers; /* User pointer and queue handle (and lock) */
+ _odp_timer_t *timers; /* User pointer and queue handle (and lock) */
odp_atomic_u32_t high_wm;/* High watermark of allocated timers */
odp_spinlock_t lock;
uint32_t num_alloc;/* Current number of allocated timers */
@@ -178,107 +181,146 @@ typedef struct odp_timer_pool_s {
uint32_t tp_idx;/* Index into timer_pool array */
odp_timer_pool_param_t param;
char name[ODP_TIMER_POOL_NAME_LEN];
- odp_shm_t shm;
timer_t timerid;
- int notify_overrun;
- pthread_t timer_thread; /* pthread_t of timer thread */
- pid_t timer_thread_id; /* gettid() for timer thread */
- int timer_thread_exit; /* request to exit for timer thread */
-} odp_timer_pool;
+ /*
+ * Timer pool overrun notification (debug print). Initialize to 0
+ * (don't notify). When value is 0 and a timer is started, set to 1
+ * (notify). When notification is done, set to 2 (don't notify).
+ */
+ odp_atomic_u32_t notify_overrun;
+ int owner;
+ pthread_t thr_pthread; /* pthread_t of timer thread */
+ pid_t thr_pid; /* gettid() for timer thread */
+ int thr_warm_up; /* number of warm up rounds */
+ odp_atomic_u32_t thr_ready; /* thread ready from warm up */
+ int thr_exit; /* request to exit for timer thread */
+ double base_freq;
+ uint64_t max_multiplier;
+ uint8_t periodic;
+#if !USE_128BIT_ATOMICS
+ /* Multiple locks per cache line! */
+ _odp_atomic_flag_t locks[NUM_LOCKS] ODP_ALIGNED_CACHE;
+#endif
+
+} timer_pool_t;
-#define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */
+/* Timer pool index must fit into 8 bits with one index value reserved to
+ * ODP_TIMER_POOL_INVALID. */
+#define MAX_TIMER_POOLS 32
#define INDEX_BITS 24
-static odp_atomic_u32_t num_timer_pools;
-static odp_timer_pool *timer_pool[MAX_TIMER_POOLS];
+#define TIMER_RES_TEST_LOOP_COUNT 10
+#define TIMER_RES_ROUNDUP_FACTOR 10
+
+typedef struct timer_global_t {
+ odp_ticketlock_t lock;
+ odp_shm_t shm;
+ /* Max timer resolution in nanoseconds */
+ uint64_t highest_res_ns;
+ uint64_t highest_res_hz;
+ uint64_t max_base_hz;
+ uint64_t poll_interval_nsec;
+ int num_timer_pools;
+ uint8_t timer_pool_used[MAX_TIMER_POOLS];
+ odp_time_t destroy_time[MAX_TIMER_POOLS];
+ odp_shm_t tp_shm[MAX_TIMER_POOLS];
+ timer_pool_t *timer_pool[MAX_TIMER_POOLS];
+
+ /* These are read frequently from inline timer */
+ odp_time_t poll_interval_time;
+ odp_bool_t use_inline_timers;
+ int poll_interval;
+ int highest_tp_idx;
+ uint8_t thread_type;
+
+} timer_global_t;
+
+typedef struct timer_local_t {
+ odp_time_t last_run;
+ int run_cnt;
+ uint8_t poll_shared;
+ uint64_t prof_nsec;
+ uint64_t prof_rounds;
+} timer_local_t;
+
+/* Points to timer global data */
+static timer_global_t *timer_global;
+
+/* Timer thread local data */
+static __thread timer_local_t timer_local;
+
+static inline void set_next_free(_odp_timer_t *tim, uint32_t nf)
+{
+ _ODP_ASSERT(tim->queue == ODP_QUEUE_INVALID);
+
+ tim->next_free = nf;
+}
-static inline odp_timer_pool *handle_to_tp(odp_timer_t hdl)
+static inline timer_pool_t *timer_pool_from_hdl(odp_timer_pool_t hdl)
+{
+ return (timer_pool_t *)(uintptr_t)hdl;
+}
+
+static inline odp_timer_pool_t timer_pool_to_hdl(timer_pool_t *tp)
+{
+ return (odp_timer_pool_t)tp;
+}
+
+static inline timer_pool_t *handle_to_tp(odp_timer_t hdl)
{
uint32_t tp_idx = _odp_typeval(hdl) >> INDEX_BITS;
- if (odp_likely(tp_idx < MAX_TIMER_POOLS)) {
- odp_timer_pool *tp = timer_pool[tp_idx];
- if (odp_likely(tp != NULL))
- return timer_pool[tp_idx];
- }
- ODP_ABORT("Invalid timer handle %#x\n", hdl);
+ timer_pool_t *tp;
+
+ _ODP_ASSERT(tp_idx < MAX_TIMER_POOLS);
+
+ tp = timer_global->timer_pool[tp_idx];
+
+ _ODP_ASSERT(tp != NULL);
+
+ return tp;
}
static inline uint32_t handle_to_idx(odp_timer_t hdl,
- struct odp_timer_pool_s *tp)
+ timer_pool_t *tp)
{
- uint32_t idx = _odp_typeval(hdl) & ((1U << INDEX_BITS) - 1U);
+ uint32_t idx = (_odp_typeval(hdl) & ((1U << INDEX_BITS) - 1U)) - 1;
+
+ _ODP_ASSERT(idx < odp_atomic_load_u32(&tp->high_wm));
+
__builtin_prefetch(&tp->tick_buf[idx], 0, 0);
- if (odp_likely(idx < odp_atomic_load_u32(&tp->high_wm)))
- return idx;
- ODP_ABORT("Invalid timer handle %#x\n", hdl);
+
+ return idx;
}
-static inline odp_timer_t tp_idx_to_handle(struct odp_timer_pool_s *tp,
- uint32_t idx)
+static inline odp_timer_t tp_idx_to_handle(timer_pool_t *tp,
+ uint32_t idx)
{
- ODP_ASSERT(idx < (1U << INDEX_BITS));
- return _odp_cast_scalar(odp_timer_t, (tp->tp_idx << INDEX_BITS) | idx);
+ _ODP_ASSERT((idx + 1) < (1U << INDEX_BITS));
+ return _odp_cast_scalar(odp_timer_t, (tp->tp_idx << INDEX_BITS) |
+ (idx + 1));
}
-/* Forward declarations */
-static void itimer_init(odp_timer_pool *tp);
-static void itimer_fini(odp_timer_pool *tp);
+static inline odp_timeout_hdr_t *timeout_hdr_from_event(odp_event_t event)
+{
+ return (odp_timeout_hdr_t *)(uintptr_t)event;
+}
-static odp_timer_pool_t odp_timer_pool_new(const char *name,
- const odp_timer_pool_param_t *param)
+static inline odp_timeout_hdr_t *timeout_hdr(odp_timeout_t tmo)
{
- uint32_t tp_idx = odp_atomic_fetch_add_u32(&num_timer_pools, 1);
- if (odp_unlikely(tp_idx >= MAX_TIMER_POOLS)) {
- /* Restore the previous value */
- odp_atomic_sub_u32(&num_timer_pools, 1);
- __odp_errno = ENFILE; /* Table overflow */
- return ODP_TIMER_POOL_INVALID;
- }
- size_t sz0 = ROUNDUP_CACHE_LINE(sizeof(odp_timer_pool));
- size_t sz1 = ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers);
- size_t sz2 = ROUNDUP_CACHE_LINE(sizeof(odp_timer) * param->num_timers);
- odp_shm_t shm = odp_shm_reserve(name, sz0 + sz1 + sz2,
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
- if (odp_unlikely(shm == ODP_SHM_INVALID))
- ODP_ABORT("%s: timer pool shm-alloc(%zuKB) failed\n",
- name, (sz0 + sz1 + sz2) / 1024);
- odp_timer_pool *tp = (odp_timer_pool *)odp_shm_addr(shm);
- odp_atomic_init_u64(&tp->cur_tick, 0);
+ return (odp_timeout_hdr_t *)(uintptr_t)tmo;
+}
- if (name == NULL) {
- tp->name[0] = 0;
- } else {
- strncpy(tp->name, name, ODP_TIMER_POOL_NAME_LEN - 1);
- tp->name[ODP_TIMER_POOL_NAME_LEN - 1] = 0;
- }
- tp->shm = shm;
- tp->param = *param;
- tp->min_rel_tck = odp_timer_ns_to_tick(tp, param->min_tmo);
- tp->max_rel_tck = odp_timer_ns_to_tick(tp, param->max_tmo);
- tp->num_alloc = 0;
- odp_atomic_init_u32(&tp->high_wm, 0);
- tp->first_free = 0;
- tp->notify_overrun = 1;
- tp->tick_buf = (void *)((char *)odp_shm_addr(shm) + sz0);
- tp->timers = (void *)((char *)odp_shm_addr(shm) + sz0 + sz1);
- /* Initialize all odp_timer entries */
- uint32_t i;
- for (i = 0; i < tp->param.num_timers; i++) {
- tp->timers[i].queue = ODP_QUEUE_INVALID;
- set_next_free(&tp->timers[i], i + 1);
- tp->timers[i].user_ptr = NULL;
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- tp->tick_buf[i].exp_tck.v = TMO_UNUSED;
-#else
- odp_atomic_init_u64(&tp->tick_buf[i].exp_tck, TMO_UNUSED);
-#endif
- tp->tick_buf[i].tmo_buf = ODP_BUFFER_INVALID;
- }
- tp->tp_idx = tp_idx;
- odp_spinlock_init(&tp->lock);
- timer_pool[tp_idx] = tp;
- if (tp->param.clk_src == ODP_CLOCK_CPU)
- itimer_init(tp);
- return tp;
+static uint64_t max_multiplier_capa(double freq)
+{
+ uint64_t mult;
+
+ if (freq < MIN_BASE_HZ)
+ return 0;
+
+ mult = timer_global->max_base_hz / freq;
+ if (mult > MAX_MULTIPLIER)
+ mult = MAX_MULTIPLIER;
+
+ return mult;
}
static void block_sigalarm(void)
@@ -290,384 +332,568 @@ static void block_sigalarm(void)
sigprocmask(SIG_BLOCK, &sigset, NULL);
}
-static void stop_timer_thread(odp_timer_pool *tp)
+static void posix_timer_stop(timer_pool_t *tp)
{
int ret;
- ODP_DBG("stop\n");
- tp->timer_thread_exit = 1;
- ret = pthread_join(tp->timer_thread, NULL);
+ /* Stop POSIX timer signals */
+ if (timer_delete(tp->timerid) != 0)
+ _ODP_ABORT("timer_delete() returned error: %s\n", strerror(errno));
+
+ /* Stop the thread */
+ _ODP_DBG("stop\n");
+ tp->thr_exit = 1;
+ ret = pthread_join(tp->thr_pthread, NULL);
if (ret != 0)
- ODP_ABORT("unable to join thread, err %d\n", ret);
+ _ODP_ABORT("Unable to join thread, err %d\n", ret);
}
-static void odp_timer_pool_del(odp_timer_pool *tp)
+static void odp_timer_pool_del(timer_pool_t *tp)
{
- odp_spinlock_lock(&tp->lock);
- timer_pool[tp->tp_idx] = NULL;
+ int highest;
+ uint32_t tp_idx = tp->tp_idx;
- /* Stop timer triggering */
- if (tp->param.clk_src == ODP_CLOCK_CPU)
- itimer_fini(tp);
+ odp_spinlock_lock(&tp->lock);
- stop_timer_thread(tp);
+ if (!odp_global_rw->inline_timers)
+ posix_timer_stop(tp);
if (tp->num_alloc != 0) {
/* It's a programming error to attempt to destroy a */
/* timer pool which is still in use */
- ODP_ABORT("%s: timers in use\n", tp->name);
+ odp_spinlock_unlock(&tp->lock);
+ _ODP_ABORT("%s: timers in use\n", tp->name);
+ }
+
+ odp_spinlock_unlock(&tp->lock);
+
+ odp_ticketlock_lock(&timer_global->lock);
+ timer_global->timer_pool[tp_idx] = NULL;
+ timer_global->timer_pool_used[tp_idx] = 0;
+ timer_global->num_timer_pools--;
+ timer_global->destroy_time[tp_idx] = odp_time_global();
+
+ highest = -1;
+
+ /* Disable inline timer polling */
+ if (timer_global->num_timer_pools == 0) {
+ odp_global_rw->inline_timers = false;
+ } else {
+ int i;
+
+ for (i = 0; i < MAX_TIMER_POOLS; i++)
+ if (timer_global->timer_pool_used[i])
+ highest = i;
}
- int rc = odp_shm_free(tp->shm);
- if (rc != 0)
- ODP_ABORT("Failed to free shared memory (%d)\n", rc);
- odp_atomic_sub_u32(&num_timer_pools, 1);
+ timer_global->highest_tp_idx = highest;
+
+ odp_ticketlock_unlock(&timer_global->lock);
}
-static inline odp_timer_t timer_alloc(odp_timer_pool *tp,
- odp_queue_t queue,
- void *user_ptr)
+static inline odp_timer_t timer_alloc(timer_pool_t *tp, odp_queue_t queue, const void *user_ptr)
{
odp_timer_t hdl;
+
odp_spinlock_lock(&tp->lock);
if (odp_likely(tp->num_alloc < tp->param.num_timers)) {
tp->num_alloc++;
/* Remove first unused timer from free list */
- ODP_ASSERT(tp->first_free != tp->param.num_timers);
+ _ODP_ASSERT(tp->first_free != tp->param.num_timers);
uint32_t idx = tp->first_free;
- odp_timer *tim = &tp->timers[idx];
- tp->first_free = get_next_free(tim);
- /* Initialize timer */
- timer_init(tim, &tp->tick_buf[idx], queue, user_ptr);
- if (odp_unlikely(tp->num_alloc >
- odp_atomic_load_u32(&tp->high_wm)))
+ _odp_timer_t *tim = &tp->timers[idx];
+ tick_buf_t *tb = &tp->tick_buf[idx];
+
+ tp->first_free = tim->next_free;
+ tim->queue = queue;
+ tim->user_ptr = user_ptr;
+ tb->tmo_u64 = 0;
+ tb->tmo_event = ODP_EVENT_INVALID;
+
+ /* Release the timer by setting timer state to inactive */
+ odp_atomic_store_rel_u64(&tb->exp_tck, TMO_INACTIVE);
+
+
+ if (odp_unlikely(tp->num_alloc > odp_atomic_load_u32(&tp->high_wm))) {
/* Update high_wm last with release model to
* ensure timer initialization is visible */
- _odp_atomic_u32_store_mm(&tp->high_wm,
- tp->num_alloc,
- _ODP_MEMMODEL_RLS);
+ odp_atomic_store_rel_u32(&tp->high_wm, tp->num_alloc);
+ }
+
hdl = tp_idx_to_handle(tp, idx);
+ /* Add timer to queue */
+ _odp_queue_fn->timer_add(queue);
} else {
- __odp_errno = ENFILE; /* Reusing file table overflow */
+ /* Reusing file table overflow */
hdl = ODP_TIMER_INVALID;
}
odp_spinlock_unlock(&tp->lock);
return hdl;
}
-static odp_buffer_t timer_cancel(odp_timer_pool *tp,
- uint32_t idx,
- uint64_t new_state);
-
-static inline odp_buffer_t timer_free(odp_timer_pool *tp, uint32_t idx)
-{
- odp_timer *tim = &tp->timers[idx];
-
- /* Free the timer by setting timer state to unused and
- * grab any timeout buffer */
- odp_buffer_t old_buf = timer_cancel(tp, idx, TMO_UNUSED);
-
- /* Destroy timer */
- timer_fini(tim, &tp->tick_buf[idx]);
-
- /* Insert timer into free list */
- odp_spinlock_lock(&tp->lock);
- set_next_free(tim, tp->first_free);
- tp->first_free = idx;
- ODP_ASSERT(tp->num_alloc != 0);
- tp->num_alloc--;
- odp_spinlock_unlock(&tp->lock);
-
- return old_buf;
-}
-
-/******************************************************************************
- * Operations on timers
- * expire/reset/cancel timer
- *****************************************************************************/
-
-static bool timer_reset(uint32_t idx,
- uint64_t abs_tck,
- odp_buffer_t *tmo_buf,
- odp_timer_pool *tp)
+static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_event_t *tmo_event,
+ timer_pool_t *tp)
{
bool success = true;
tick_buf_t *tb = &tp->tick_buf[idx];
- if (tmo_buf == NULL || *tmo_buf == ODP_BUFFER_INVALID) {
-#ifdef ODP_ATOMIC_U128 /* Target supports 128-bit atomic operations */
+ if (tmo_event == NULL || *tmo_event == ODP_EVENT_INVALID) {
+#if USE_128BIT_ATOMICS /* Target supports 128-bit atomic operations */
tick_buf_t new, old;
+
+ /* Init all bits, also when tmo_event is less than 64 bits */
+ new.tmo_u64 = 0;
+ old.tmo_u64 = 0;
+
+ /* Relaxed and non-atomic read of current values */
+ old.exp_tck.v = tb->exp_tck.v;
+ old.tmo_event = tb->tmo_event;
+
do {
- /* Relaxed and non-atomic read of current values */
- old.exp_tck.v = tb->exp_tck.v;
- old.tmo_buf = tb->tmo_buf;
- TB_SET_PAD(old);
- /* Check if there actually is a timeout buffer
+ /* Check if there actually is a timeout event
* present */
- if (old.tmo_buf == ODP_BUFFER_INVALID) {
+ if (old.tmo_event == ODP_EVENT_INVALID) {
/* Cannot reset a timer with neither old nor
- * new timeout buffer */
+ * new timeout event */
success = false;
break;
}
/* Set up new values */
new.exp_tck.v = abs_tck;
- new.tmo_buf = old.tmo_buf;
- TB_SET_PAD(new);
+ new.tmo_event = old.tmo_event;
+
/* Atomic CAS will fail if we experienced torn reads,
* retry update sequence until CAS succeeds */
- } while (!_odp_atomic_u128_cmp_xchg_mm(
- (_odp_atomic_u128_t *)tb,
- (_uint128_t *)&old,
- (_uint128_t *)&new,
- _ODP_MEMMODEL_RLS,
- _ODP_MEMMODEL_RLX));
-#elif __GCC_ATOMIC_LLONG_LOCK_FREE >= 2 && \
- defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8
- /* Target supports lock-free 64-bit CAS (and probably exchange) */
- /* Since locks/barriers are not good for C-A15, we take an
- * alternative approach using relaxed memory model */
- uint64_t old;
- /* Swap in new expiration tick, get back old tick which
- * will indicate active/inactive timer state */
- old = _odp_atomic_u64_xchg_mm(&tb->exp_tck, abs_tck,
- _ODP_MEMMODEL_RLX);
- if ((old & TMO_INACTIVE) != 0) {
- /* Timer was inactive (cancelled or expired),
- * we can't reset a timer without a timeout buffer.
- * Attempt to restore inactive state, we don't
- * want this timer to continue as active without
- * timeout as this will trigger unnecessary and
- * aborted expiration attempts.
- * We don't care if we fail, then some other thread
- * reset or cancelled the timer. Without any
- * synchronization between the threads, we have a
- * data race and the behavior is undefined */
- (void)_odp_atomic_u64_cmp_xchg_strong_mm(
- &tb->exp_tck,
- &abs_tck,
- old,
- _ODP_MEMMODEL_RLX,
- _ODP_MEMMODEL_RLX);
- success = false;
- }
-#else /* Target supports neither 128-bit nor 64-bit CAS => use lock */
+ } while (!odp_atomic_cas_rel_u128(&tb->tb_atomic_u128,
+ &old.tb_u128, new.tb_u128));
+#else
/* Take a related lock */
- while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
/* While lock is taken, spin using relaxed loads */
- while (_odp_atomic_flag_load(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_load(IDX2LOCK(tp, idx)))
odp_cpu_pause();
- /* Only if there is a timeout buffer can be reset the timer */
- if (odp_likely(tb->tmo_buf != ODP_BUFFER_INVALID)) {
+ /* Only if there is a timeout event can the timer be reset */
+ if (odp_likely(tb->tmo_event != ODP_EVENT_INVALID)) {
/* Write the new expiration tick */
tb->exp_tck.v = abs_tck;
} else {
/* Cannot reset a timer with neither old nor new
- * timeout buffer */
+ * timeout event */
success = false;
}
/* Release the lock */
- _odp_atomic_flag_clear(IDX2LOCK(idx));
+ _odp_atomic_flag_clear(IDX2LOCK(tp, idx));
#endif
} else {
- /* We have a new timeout buffer which replaces any old one */
+ /* We have a new timeout event which replaces any old one */
/* Fill in some (constant) header fields for timeout events */
- if (odp_event_type(odp_buffer_to_event(*tmo_buf)) ==
- ODP_EVENT_TIMEOUT) {
- /* Convert from buffer to timeout hdr */
+ if (odp_event_type(*tmo_event) == ODP_EVENT_TIMEOUT) {
+ /* Convert from event to timeout hdr */
odp_timeout_hdr_t *tmo_hdr =
- timeout_hdr_from_buf(*tmo_buf);
+ timeout_hdr_from_event(*tmo_event);
tmo_hdr->timer = tp_idx_to_handle(tp, idx);
tmo_hdr->user_ptr = tp->timers[idx].user_ptr;
/* expiration field filled in when timer expires */
}
- /* Else ignore buffers of other types */
- odp_buffer_t old_buf = ODP_BUFFER_INVALID;
-#ifdef ODP_ATOMIC_U128
+ /* Else ignore events of other types */
+ odp_event_t old_event = ODP_EVENT_INVALID;
+#if USE_128BIT_ATOMICS
tick_buf_t new, old;
+
+ /* Init all bits, also when tmo_event is less than 64 bits */
+ new.tmo_u64 = 0;
+
new.exp_tck.v = abs_tck;
- new.tmo_buf = *tmo_buf;
- TB_SET_PAD(new);
- /* We are releasing the new timeout buffer to some other
+ new.tmo_event = *tmo_event;
+
+ /* We are releasing the new timeout event to some other
* thread */
_odp_atomic_u128_xchg_mm((_odp_atomic_u128_t *)tb,
- (_uint128_t *)&new,
- (_uint128_t *)&old,
+ (_odp_u128_t *)&new,
+ (_odp_u128_t *)&old,
_ODP_MEMMODEL_ACQ_RLS);
- old_buf = old.tmo_buf;
+ old_event = old.tmo_event;
#else
/* Take a related lock */
- while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
/* While lock is taken, spin using relaxed loads */
- while (_odp_atomic_flag_load(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_load(IDX2LOCK(tp, idx)))
odp_cpu_pause();
- /* Swap in new buffer, save any old buffer */
- old_buf = tb->tmo_buf;
- tb->tmo_buf = *tmo_buf;
+ /* Swap in new event, save any old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = *tmo_event;
/* Write the new expiration tick */
tb->exp_tck.v = abs_tck;
/* Release the lock */
- _odp_atomic_flag_clear(IDX2LOCK(idx));
+ _odp_atomic_flag_clear(IDX2LOCK(tp, idx));
#endif
- /* Return old timeout buffer */
- *tmo_buf = old_buf;
+ /* Return old timeout event */
+ *tmo_event = old_event;
}
return success;
}
-static odp_buffer_t timer_cancel(odp_timer_pool *tp,
- uint32_t idx,
- uint64_t new_state)
+static odp_event_t timer_set_unused(timer_pool_t *tp, uint32_t idx)
{
tick_buf_t *tb = &tp->tick_buf[idx];
- odp_buffer_t old_buf;
+ odp_event_t old_event;
-#ifdef ODP_ATOMIC_U128
+#if USE_128BIT_ATOMICS
tick_buf_t new, old;
+
+ /* Init all bits, also when tmo_event is less than 64 bits */
+ new.tmo_u64 = 0;
+
/* Update the timer state (e.g. cancel the current timeout) */
- new.exp_tck.v = new_state;
- /* Swap out the old buffer */
- new.tmo_buf = ODP_BUFFER_INVALID;
- TB_SET_PAD(new);
+ new.exp_tck.v = TMO_UNUSED;
+ /* Swap out the old event */
+ new.tmo_event = ODP_EVENT_INVALID;
+
_odp_atomic_u128_xchg_mm((_odp_atomic_u128_t *)tb,
- (_uint128_t *)&new, (_uint128_t *)&old,
- _ODP_MEMMODEL_RLX);
- old_buf = old.tmo_buf;
+ (_odp_u128_t *)&new, (_odp_u128_t *)&old,
+ _ODP_MEMMODEL_ACQ_RLS);
+ old_event = old.tmo_event;
#else
/* Take a related lock */
- while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
/* While lock is taken, spin using relaxed loads */
- while (_odp_atomic_flag_load(IDX2LOCK(idx)))
+ while (_odp_atomic_flag_load(IDX2LOCK(tp, idx)))
odp_cpu_pause();
/* Update the timer state (e.g. cancel the current timeout) */
- tb->exp_tck.v = new_state;
+ tb->exp_tck.v = TMO_UNUSED;
- /* Swap out the old buffer */
- old_buf = tb->tmo_buf;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ /* Swap out the old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Release the lock */
- _odp_atomic_flag_clear(IDX2LOCK(idx));
+ _odp_atomic_flag_clear(IDX2LOCK(tp, idx));
#endif
- /* Return the old buffer */
- return old_buf;
+ /* Return the old event */
+ return old_event;
}
-static unsigned timer_expire(odp_timer_pool *tp, uint32_t idx, uint64_t tick)
+int odp_timer_free(odp_timer_t hdl)
{
- odp_timer *tim = &tp->timers[idx];
+ timer_pool_t *tp = handle_to_tp(hdl);
+ uint32_t idx = handle_to_idx(hdl, tp);
+ _odp_timer_t *tim = &tp->timers[idx];
tick_buf_t *tb = &tp->tick_buf[idx];
- odp_buffer_t tmo_buf = ODP_BUFFER_INVALID;
+
+ /* Free the timer by setting timer state to unused and
+ * grab any timeout event */
+ odp_event_t old_event = timer_set_unused(tp, idx);
+ if (old_event != ODP_EVENT_INVALID) {
+ _ODP_ERR("Timer is active\n");
+ return -1;
+ }
+
+ /* Remove timer from queue */
+ _odp_queue_fn->timer_rem(tim->queue);
+
+ /* Destroy timer */
+ _ODP_ASSERT(tb->exp_tck.v == TMO_UNUSED);
+ _ODP_ASSERT(tb->tmo_event == ODP_EVENT_INVALID);
+ tim->queue = ODP_QUEUE_INVALID;
+ tim->user_ptr = NULL;
+
+ /* Insert timer into free list */
+ odp_spinlock_lock(&tp->lock);
+ set_next_free(tim, tp->first_free);
+ tp->first_free = idx;
+ _ODP_ASSERT(tp->num_alloc != 0);
+ tp->num_alloc--;
+ odp_spinlock_unlock(&tp->lock);
+
+ return 0;
+}
+
+static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx)
+{
+ tick_buf_t *tb = &tp->tick_buf[idx];
+ odp_event_t old_event;
+
+#if USE_128BIT_ATOMICS
+ tick_buf_t new, old;
+
+ /* Init all bits, also when tmo_event is less than 64 bits */
+ new.tmo_u64 = 0;
+ old.tmo_u64 = 0;
+
+ /* Relaxed and non-atomic read of current values */
+ old.exp_tck.v = tb->exp_tck.v;
+ old.tmo_event = tb->tmo_event;
+
+ do {
+ /* Check if it is not expired already */
+ if (old.exp_tck.v & TMO_INACTIVE) {
+ old.tmo_event = ODP_EVENT_INVALID;
+ break;
+ }
+
+ /* Set up new values */
+ new.exp_tck.v = TMO_INACTIVE;
+ new.tmo_event = ODP_EVENT_INVALID;
+
+ /* Atomic CAS will fail if we experienced torn reads,
+ * retry update sequence until CAS succeeds */
+ } while (!odp_atomic_cas_rel_u128(&tb->tb_atomic_u128, &old.tb_u128,
+ new.tb_u128));
+
+ old_event = old.tmo_event;
+#else
+ /* Take a related lock */
+ while (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
+ /* While lock is taken, spin using relaxed loads */
+ while (_odp_atomic_flag_load(IDX2LOCK(tp, idx)))
+ odp_cpu_pause();
+
+ /* Swap in new event, save any old event */
+ old_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
+
+ /* Write the new expiration tick if it not cancelled */
+ if (tb->exp_tck.v & TMO_INACTIVE)
+ old_event = ODP_EVENT_INVALID;
+ else
+ tb->exp_tck.v = TMO_INACTIVE;
+
+ /* Release the lock */
+ _odp_atomic_flag_clear(IDX2LOCK(tp, idx));
+#endif
+ /* Return the old event */
+ return old_event;
+}
+
+static inline void timer_expire(timer_pool_t *tp, uint32_t idx, uint64_t tick)
+{
uint64_t exp_tck;
-#ifdef ODP_ATOMIC_U128
+ odp_queue_t queue = ODP_QUEUE_INVALID;
+ _odp_timer_t *tim = &tp->timers[idx];
+ tick_buf_t *tb = &tp->tick_buf[idx];
+ odp_event_t tmo_event = ODP_EVENT_INVALID;
+
+#if USE_128BIT_ATOMICS
/* Atomic re-read for correctness */
- exp_tck = _odp_atomic_u64_load_mm(&tb->exp_tck, _ODP_MEMMODEL_RLX);
+ exp_tck = odp_atomic_load_acq_u64(&tb->exp_tck);
/* Re-check exp_tck */
if (odp_likely(exp_tck <= tick)) {
- /* Attempt to grab timeout buffer, replace with inactive timer
- * and invalid buffer */
+ /* Attempt to grab timeout event, replace with inactive timer
+ * and invalid event. */
tick_buf_t new, old;
+
+ /* Read queue handle between acq and rel. Timer_free overwrites the handle after
+ * it sets tick value to inactive. */
+ queue = tim->queue;
+
+ /* Init all bits, also when tmo_event is less than 64 bits. */
+ new.tmo_u64 = 0;
+ old.tmo_u64 = 0;
+
old.exp_tck.v = exp_tck;
- old.tmo_buf = tb->tmo_buf;
- TB_SET_PAD(old);
+ old.tmo_event = tb->tmo_event;
+
/* Set the inactive/expired bit keeping the expiration tick so
* that we can check against the expiration tick of the timeout
* when it is received */
new.exp_tck.v = exp_tck | TMO_INACTIVE;
- new.tmo_buf = ODP_BUFFER_INVALID;
- TB_SET_PAD(new);
- int succ = _odp_atomic_u128_cmp_xchg_mm(
- (_odp_atomic_u128_t *)tb,
- (_uint128_t *)&old, (_uint128_t *)&new,
- _ODP_MEMMODEL_RLS, _ODP_MEMMODEL_RLX);
+ new.tmo_event = ODP_EVENT_INVALID;
+
+ int succ = odp_atomic_cas_rel_u128(&tb->tb_atomic_u128,
+ &old.tb_u128, new.tb_u128);
if (succ)
- tmo_buf = old.tmo_buf;
+ tmo_event = old.tmo_event;
/* Else CAS failed, something changed => skip timer
* this tick, it will be checked again next tick */
}
/* Else false positive, ignore */
#else
- /* Take a related lock */
- while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
- /* While lock is taken, spin using relaxed loads */
- while (_odp_atomic_flag_load(IDX2LOCK(idx)))
- odp_cpu_pause();
+ /* Try to take a related lock */
+ if (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
+ return;
+
/* Proper check for timer expired */
exp_tck = tb->exp_tck.v;
if (odp_likely(exp_tck <= tick)) {
- /* Verify that there is a timeout buffer */
- if (odp_likely(tb->tmo_buf != ODP_BUFFER_INVALID)) {
- /* Grab timeout buffer, replace with inactive timer
- * and invalid buffer */
- tmo_buf = tb->tmo_buf;
- tb->tmo_buf = ODP_BUFFER_INVALID;
+ /* Verify that there is a timeout event */
+ if (odp_likely(tb->tmo_event != ODP_EVENT_INVALID)) {
+ queue = tim->queue;
+
+ /* Grab timeout event, replace with inactive timer
+ * and invalid event. */
+ tmo_event = tb->tmo_event;
+ tb->tmo_event = ODP_EVENT_INVALID;
/* Set the inactive/expired bit keeping the expiration
* tick so that we can check against the expiration
* tick of the timeout when it is received */
tb->exp_tck.v |= TMO_INACTIVE;
}
- /* Else somehow active timer without user buffer */
+ /* Else somehow active timer without user event */
}
/* Else false positive, ignore */
/* Release the lock */
- _odp_atomic_flag_clear(IDX2LOCK(idx));
+ _odp_atomic_flag_clear(IDX2LOCK(tp, idx));
#endif
- if (odp_likely(tmo_buf != ODP_BUFFER_INVALID)) {
+ if (odp_likely(tmo_event != ODP_EVENT_INVALID)) {
/* Fill in expiration tick for timeout events */
- if (odp_event_type(odp_buffer_to_event(tmo_buf)) ==
- ODP_EVENT_TIMEOUT) {
- /* Convert from buffer to timeout hdr */
+ if (odp_event_type(tmo_event) == ODP_EVENT_TIMEOUT) {
+ /* Convert from event to timeout hdr */
odp_timeout_hdr_t *tmo_hdr =
- timeout_hdr_from_buf(tmo_buf);
+ timeout_hdr_from_event(tmo_event);
tmo_hdr->expiration = exp_tck;
/* timer and user_ptr fields filled in when timer
* was set */
}
/* Else ignore events of other types */
/* Post the timeout to the destination queue */
- int rc = odp_queue_enq(tim->queue,
- odp_buffer_to_event(tmo_buf));
+ int rc = odp_queue_enq(queue, tmo_event);
+
if (odp_unlikely(rc != 0)) {
- odp_buffer_free(tmo_buf);
- ODP_ABORT("Failed to enqueue timeout buffer (%d)\n",
- rc);
+ _odp_event_free(tmo_event);
+ _ODP_ABORT("Failed to enqueue timeout event (%d)\n", rc);
}
- return 1;
- } else {
- /* Else false positive, ignore */
- return 0;
}
}
-static unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick)
+static inline uint64_t timer_pool_scan(timer_pool_t *tp, uint64_t tick)
{
- tick_buf_t *array = &tpid->tick_buf[0];
- uint32_t high_wm = _odp_atomic_u32_load_mm(&tpid->high_wm,
- _ODP_MEMMODEL_ACQ);
- unsigned nexp = 0;
+ tick_buf_t *array = &tp->tick_buf[0];
+ uint32_t high_wm = odp_atomic_load_acq_u32(&tp->high_wm);
uint32_t i;
+ uint64_t min = UINT64_MAX;
- ODP_ASSERT(high_wm <= tpid->param.num_timers);
- for (i = 0; i < high_wm;) {
+ _ODP_ASSERT(high_wm <= tp->param.num_timers);
+ for (i = 0; i < high_wm; i++) {
/* As a rare occurrence, we can outsmart the HW prefetcher
* and the compiler (GCC -fprefetch-loop-arrays) with some
* tuned manual prefetching (32x16=512B ahead), seems to
* give 30% better performance on ARM C-A15 */
__builtin_prefetch(&array[i + 32], 0, 0);
/* Non-atomic read for speed */
- uint64_t exp_tck = array[i++].exp_tck.v;
+ uint64_t exp_tck = array[i].exp_tck.v;
+
if (odp_unlikely(exp_tck <= tick)) {
/* Attempt to expire timer */
- nexp += timer_expire(tpid, i - 1, tick);
+ timer_expire(tp, i, tick);
+ min = 0;
+ } else {
+ min = _ODP_MIN(min, exp_tck - tick);
+ }
+ }
+
+ return min;
+}
+
+/******************************************************************************
+ * Inline timer processing
+ *****************************************************************************/
+
+static inline uint64_t timer_pool_scan_inline(int num, odp_time_t now, int force)
+{
+ timer_pool_t *tp;
+ uint64_t new_tick, old_tick, ticks_to_next_expire, nsec, min = UINT64_MAX;
+ int64_t diff;
+ int i;
+
+ for (i = 0; i < num; i++) {
+ tp = timer_global->timer_pool[i];
+
+ if (tp == NULL)
+ continue;
+
+ if (odp_likely(tp->owner < 0)) {
+ /* Skip shared pool, if this thread is not configured
+ * to process those */
+ if (odp_unlikely(timer_local.poll_shared == 0))
+ continue;
+ } else {
+ /* Skip private pool, if this thread is not the owner */
+ if (tp->owner != odp_thread_id())
+ continue;
}
+
+ nsec = odp_time_to_ns(now);
+ new_tick = nsec / tp->nsec_per_scan;
+ old_tick = odp_atomic_load_u64(&tp->cur_tick);
+ diff = new_tick - old_tick;
+
+ if (diff < 1 && !force)
+ continue;
+
+ if (odp_atomic_cas_u64(&tp->cur_tick, &old_tick, new_tick)) {
+ if (ODP_DEBUG_PRINT && odp_atomic_load_u32(&tp->notify_overrun) == 1 &&
+ diff > 1) {
+ if (old_tick == 0) {
+ _ODP_DBG("Timer pool (%s) missed %" PRIi64 " scans in start up\n",
+ tp->name, diff - 1);
+ } else {
+ _ODP_DBG("Timer pool (%s) resolution too high: %" PRIi64 " scans missed\n",
+ tp->name, diff - 1);
+ odp_atomic_store_u32(&tp->notify_overrun, 2);
+ }
+ }
+ ticks_to_next_expire = timer_pool_scan(tp, nsec);
+ min = _ODP_MIN(min, ticks_to_next_expire);
+ }
+ }
+
+ return min;
+}
+
+uint64_t _odp_timer_run_inline(int dec)
+{
+ odp_time_t now;
+ int num = timer_global->highest_tp_idx + 1;
+ int force = (dec == TIMER_SCAN_FORCE);
+ int poll_interval = force ? 0 : timer_global->poll_interval;
+
+ if (num == 0)
+ return UINT64_MAX;
+
+ /* Rate limit how often this thread checks the timer pools. */
+
+ if (poll_interval > 1) {
+ timer_local.run_cnt -= dec;
+ if (timer_local.run_cnt > 0)
+ return UINT64_MAX;
+ timer_local.run_cnt = poll_interval;
+ }
+
+ now = odp_time_global();
+
+ if (poll_interval > 1) {
+ odp_time_t period = odp_time_diff(now, timer_local.last_run);
+
+ if (odp_time_cmp(period,
+ timer_global->poll_interval_time) < 0)
+ return UINT64_MAX;
+ timer_local.last_run = now;
+ }
+
+ if (force) {
+ timer_local.run_cnt = poll_interval;
+ timer_local.last_run = now;
+ }
+
+ /* Check the timer pools. */
+ if (CONFIG_TIMER_PROFILE_INLINE) {
+ odp_time_t t1 = odp_time_local_strict();
+
+ uint64_t ret = timer_pool_scan_inline(num, now, force);
+ odp_time_t t2 = odp_time_local_strict();
+
+ timer_local.prof_nsec += odp_time_diff_ns(t2, t1);
+ timer_local.prof_rounds++;
+ return ret;
+ } else {
+ return timer_pool_scan_inline(num, now, force);
}
- return nexp;
}
/******************************************************************************
@@ -675,99 +901,190 @@ static unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick)
* Functions that use Linux/POSIX per-process timers and related facilities
*****************************************************************************/
-static void timer_notify(odp_timer_pool *tp)
+static inline void timer_run_posix(timer_pool_t *tp)
{
+ uint64_t nsec;
int overrun;
- int64_t prev_tick;
- if (tp->notify_overrun) {
+ if (ODP_DEBUG_PRINT && odp_atomic_load_u32(&tp->notify_overrun) == 1) {
overrun = timer_getoverrun(tp->timerid);
if (overrun) {
- ODP_ERR("\n\t%d ticks overrun on timer pool \"%s\", timer resolution too high\n",
- overrun, tp->name);
- tp->notify_overrun = 0;
+ _ODP_DBG("\n\t%d ticks overrun on timer pool \"%s\", timer resolution too high\n",
+ overrun, tp->name);
+ odp_atomic_store_u32(&tp->notify_overrun, 2);
}
}
- odp_timer *array = &tp->timers[0];
+ _odp_timer_t *array = &tp->timers[0];
uint32_t i;
/* Prefetch initial cache lines (match 32 above) */
for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / sizeof(array[0]))
__builtin_prefetch(&array[i], 0, 0);
- prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick);
-
- /* Scan timer array, looking for timers to expire */
- (void)odp_timer_pool_expire(tp, prev_tick + 1);
- /* Else skip scan of timers. cur_tick was updated and next itimer
- * invocation will process older expiration ticks as well */
+ nsec = odp_time_global_ns();
+ timer_pool_scan(tp, nsec);
}
static void *timer_thread(void *arg)
{
- odp_timer_pool *tp = (odp_timer_pool *)arg;
+ timer_pool_t *tp = (timer_pool_t *)arg;
sigset_t sigset;
int ret;
struct timespec tmo;
siginfo_t si;
+ int warm_up = tp->thr_warm_up;
+ int num = 0;
- tp->timer_thread_id = (pid_t)syscall(SYS_gettid);
-
- tmo.tv_sec = 0;
+ tmo.tv_sec = 0;
tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100;
+ /* Unblock sigalarm in this thread */
sigemptyset(&sigset);
- /* unblock sigalarm in this thread */
sigprocmask(SIG_BLOCK, &sigset, NULL);
-
sigaddset(&sigset, SIGALRM);
+ /* Signal that this thread has started */
+ odp_mb_full();
+ tp->thr_pid = (pid_t)syscall(SYS_gettid);
+ odp_mb_full();
+
while (1) {
ret = sigtimedwait(&sigset, &si, &tmo);
- if (tp->timer_thread_exit) {
- tp->timer_thread_id = 0;
+
+ if (tp->thr_exit) {
+ tp->thr_pid = 0;
return NULL;
}
- if (ret > 0)
- timer_notify(tp);
+
+ if (ret <= 0)
+ continue;
+
+ timer_run_posix(tp);
+
+ if (num < warm_up) {
+ num++;
+
+ if (num == warm_up)
+ odp_atomic_store_rel_u32(&tp->thr_ready, 1);
+ }
}
return NULL;
}
-static void itimer_init(odp_timer_pool *tp)
+/* Get the max timer resolution without overrun and fill in timer_res variable.
+ *
+ * Set timer's interval with candidate resolutions to get the max resolution
+ * that the timer would not be overrun.
+ * The candidate resolution value is from 1ms to 100us, 10us...1ns etc.
+ */
+static int timer_res_init(void)
+{
+ struct sigevent sigev;
+ timer_t timerid;
+ uint64_t res, sec, nsec;
+ struct itimerspec ispec;
+ sigset_t sigset;
+ siginfo_t si;
+ int loop_cnt;
+ struct timespec tmo;
+
+ sigev.sigev_notify = SIGEV_THREAD_ID;
+ sigev._sigev_un._tid = (pid_t)syscall(SYS_gettid);
+ sigev.sigev_value.sival_ptr = NULL;
+ sigev.sigev_signo = SIGUSR1;
+
+ /* Create timer */
+ if (timer_create(CLOCK_MONOTONIC, &sigev, &timerid))
+ _ODP_ABORT("timer_create() returned error %s\n", strerror(errno));
+
+ /* Timer resolution start from 1ms */
+ res = ODP_TIME_MSEC_IN_NS;
+ /* Set initial value of timer_res */
+ timer_global->highest_res_ns = res;
+ sigemptyset(&sigset);
+ /* Add SIGUSR1 to sigset */
+ sigaddset(&sigset, SIGUSR1);
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+ while (res > 0) {
+ /* Loop for 10 times to test the result */
+ loop_cnt = TIMER_RES_TEST_LOOP_COUNT;
+ sec = res / ODP_TIME_SEC_IN_NS;
+ nsec = res - sec * ODP_TIME_SEC_IN_NS;
+
+ memset(&ispec, 0, sizeof(ispec));
+ ispec.it_interval.tv_sec = (time_t)sec;
+ ispec.it_interval.tv_nsec = (long)nsec;
+ ispec.it_value.tv_sec = (time_t)sec;
+ ispec.it_value.tv_nsec = (long)nsec;
+
+ if (timer_settime(timerid, 0, &ispec, NULL))
+ _ODP_ABORT("timer_settime() returned error %s\n", strerror(errno));
+ /* Set signal wait timeout to 10*res */
+ tmo.tv_sec = 0;
+ tmo.tv_nsec = res * 10;
+ while (loop_cnt--) {
+ if (sigtimedwait(&sigset, &si, &tmo) > 0) {
+ if (timer_getoverrun(timerid))
+ /* overrun at this resolution */
+ /* goto the end */
+ goto timer_res_init_done;
+ }
+ }
+ /* Set timer_res */
+ timer_global->highest_res_ns = res;
+ /* Test the next timer resolution candidate */
+ res /= 10;
+ }
+
+timer_res_init_done:
+ timer_global->highest_res_ns *= TIMER_RES_ROUNDUP_FACTOR;
+ if (timer_delete(timerid) != 0)
+ _ODP_ABORT("timer_delete() returned error %s\n", strerror(errno));
+ sigemptyset(&sigset);
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+ return 0;
+}
+
+static void posix_timer_start(timer_pool_t *tp)
{
struct sigevent sigev;
struct itimerspec ispec;
uint64_t res, sec, nsec;
int ret;
- ODP_DBG("Creating POSIX timer for timer pool %s, period %"
- PRIu64" ns\n", tp->name, tp->param.res_ns);
+ _ODP_DBG("Creating POSIX timer for timer pool %s, period %" PRIu64 " ns\n",
+ tp->name, tp->param.res_ns);
+
+ res = tp->param.res_ns;
+ sec = res / ODP_TIME_SEC_IN_NS;
+ nsec = res - sec * ODP_TIME_SEC_IN_NS;
+
+ tp->thr_pid = 0;
+ tp->thr_warm_up = 1;
+
+ /* 20ms warm up */
+ if (res < (20 * ODP_TIME_MSEC_IN_NS))
+ tp->thr_warm_up = (20 * ODP_TIME_MSEC_IN_NS) / res;
- tp->timer_thread_id = 0;
- ret = pthread_create(&tp->timer_thread, NULL, timer_thread, tp);
+ odp_atomic_init_u32(&tp->thr_ready, 0);
+ ret = pthread_create(&tp->thr_pthread, NULL, timer_thread, tp);
if (ret)
- ODP_ABORT("unable to create timer thread\n");
+ _ODP_ABORT("Unable to create timer thread: %d\n", ret);
- /* wait thread set tp->timer_thread_id */
- do {
+ /* wait thread set tp->thr_pid */
+ while (tp->thr_pid == 0)
sched_yield();
- } while (tp->timer_thread_id == 0);
memset(&sigev, 0, sizeof(sigev));
sigev.sigev_notify = SIGEV_THREAD_ID;
sigev.sigev_value.sival_ptr = tp;
- sigev._sigev_un._tid = tp->timer_thread_id;
+ sigev._sigev_un._tid = tp->thr_pid;
sigev.sigev_signo = SIGALRM;
if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid))
- ODP_ABORT("timer_create() returned error %s\n",
- strerror(errno));
-
- res = tp->param.res_ns;
- sec = res / ODP_TIME_SEC_IN_NS;
- nsec = res - sec * ODP_TIME_SEC_IN_NS;
+ _ODP_ABORT("timer_create() returned error %s\n", strerror(errno));
memset(&ispec, 0, sizeof(ispec));
ispec.it_interval.tv_sec = (time_t)sec;
@@ -776,15 +1093,225 @@ static void itimer_init(odp_timer_pool *tp)
ispec.it_value.tv_nsec = (long)nsec;
if (timer_settime(tp->timerid, 0, &ispec, NULL))
- ODP_ABORT("timer_settime() returned error %s\n",
- strerror(errno));
+ _ODP_ABORT("timer_settime() returned error %s\n", strerror(errno));
+
+ /* Wait response from timer thread that warm up signals have been
+ * processed. Warm up helps avoiding overrun on the first timeout. */
+ while (odp_atomic_load_acq_u32(&tp->thr_ready) == 0)
+ sched_yield();
+
+ if (ODP_DEBUG_PRINT) {
+ uint32_t old_val = 0;
+
+ odp_atomic_cas_u32(&tp->notify_overrun, &old_val, 1);
+ }
}
-static void itimer_fini(odp_timer_pool *tp)
+static odp_timer_pool_t timer_pool_new(const char *name, const odp_timer_pool_param_t *param)
{
- if (timer_delete(tp->timerid) != 0)
- ODP_ABORT("timer_delete() returned error %s\n",
- strerror(errno));
+ uint32_t i;
+ int tp_idx;
+ size_t sz0, sz1, sz2;
+ uint64_t tp_size;
+ uint64_t res_ns, nsec_per_scan;
+ odp_shm_t shm;
+ timer_pool_t *tp;
+ odp_time_t diff, time;
+ odp_time_t max_diff = ODP_TIME_NULL;
+ double base_freq = 0.0;
+ uint64_t max_multiplier = 0;
+ uint32_t flags = 0;
+ int periodic = (param->timer_type == ODP_TIMER_TYPE_PERIODIC) ? 1 : 0;
+
+ if (param->res_ns)
+ res_ns = param->res_ns;
+ else
+ res_ns = GIGA_HZ / param->res_hz;
+
+ if (periodic) {
+ uint64_t max_capa, min_period_ns;
+
+ base_freq = odp_fract_u64_to_dbl(&param->periodic.base_freq_hz);
+ max_multiplier = param->periodic.max_multiplier;
+
+ if (base_freq < MIN_BASE_HZ || base_freq > timer_global->max_base_hz) {
+ _ODP_ERR("Bad base frequency: %f\n", base_freq);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ max_capa = max_multiplier_capa(base_freq);
+
+ if (max_multiplier == 0 || max_multiplier > max_capa) {
+ _ODP_ERR("Bad max multiplier: %" PRIu64 "\n", max_multiplier);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ min_period_ns = GIGA_HZ / (base_freq * max_multiplier);
+
+ if (res_ns > min_period_ns)
+ res_ns = min_period_ns;
+ }
+
+ if (odp_global_ro.shm_single_va)
+ flags |= ODP_SHM_SINGLE_VA;
+
+ time = odp_time_global();
+
+ odp_ticketlock_lock(&timer_global->lock);
+
+ if (timer_global->num_timer_pools >= MAX_TIMER_POOLS) {
+ odp_ticketlock_unlock(&timer_global->lock);
+ _ODP_DBG("No more free timer pools\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ /* Find timer pool that has not been used for a while, or is used least recently.
+ * This ensures that inline scan of an old timer pool has completed and its memory
+ * can be freed. */
+ tp_idx = -1;
+ for (i = 0; i < MAX_TIMER_POOLS; i++) {
+ if (timer_global->timer_pool_used[i] == 0) {
+ diff = odp_time_diff(time, timer_global->destroy_time[i]);
+
+ if (odp_time_to_ns(diff) > TIMER_POOL_REUSE_NS) {
+ tp_idx = i;
+ break;
+ }
+
+ if (odp_time_cmp(diff, max_diff) > 0) {
+ max_diff = diff;
+ tp_idx = i;
+ }
+ }
+ }
+
+ if (tp_idx < 0) {
+ odp_ticketlock_unlock(&timer_global->lock);
+ _ODP_DBG("Did not find free timer pool\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ shm = timer_global->tp_shm[tp_idx];
+ timer_global->timer_pool_used[tp_idx] = 1;
+ timer_global->num_timer_pools++;
+
+ odp_ticketlock_unlock(&timer_global->lock);
+
+ /* Free memory of previously destroyed timer pool */
+ if (shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(shm)) {
+ _ODP_ERR("Failed to free shared memory: tp_idx %i\n", tp_idx);
+ goto error;
+ }
+ }
+
+ sz0 = _ODP_ROUNDUP_CACHE_LINE(sizeof(timer_pool_t));
+ sz1 = _ODP_ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers);
+ sz2 = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_timer_t) * param->num_timers);
+ tp_size = sz0 + sz1 + sz2;
+
+ shm = odp_shm_reserve(name, tp_size, ODP_CACHE_LINE_SIZE, flags);
+
+ if (odp_unlikely(shm == ODP_SHM_INVALID)) {
+ _ODP_ERR("Timer pool shm reserve failed %" PRIu64 "kB\n", tp_size / 1024);
+ goto error;
+ }
+
+ tp = (timer_pool_t *)odp_shm_addr(shm);
+ memset(tp, 0, tp_size);
+
+ tp->periodic = periodic;
+
+ /* Scan timer pool twice during resolution interval */
+ if (res_ns > ODP_TIME_USEC_IN_NS)
+ nsec_per_scan = res_ns / 2;
+ else
+ nsec_per_scan = res_ns;
+
+ tp->nsec_per_scan = nsec_per_scan;
+
+ odp_atomic_init_u64(&tp->cur_tick, 0);
+
+ if (name == NULL) {
+ tp->name[0] = 0;
+ } else {
+ strncpy(tp->name, name, ODP_TIMER_POOL_NAME_LEN - 1);
+ tp->name[ODP_TIMER_POOL_NAME_LEN - 1] = 0;
+ }
+
+ tp->param = *param;
+ tp->param.res_ns = res_ns;
+ if (periodic) {
+ tp->base_freq = base_freq;
+ tp->max_multiplier = max_multiplier;
+ } else {
+ tp->min_rel_tck = odp_timer_ns_to_tick(timer_pool_to_hdl(tp), param->min_tmo);
+ tp->max_rel_tck = odp_timer_ns_to_tick(timer_pool_to_hdl(tp), param->max_tmo);
+ }
+ tp->num_alloc = 0;
+ odp_atomic_init_u32(&tp->high_wm, 0);
+ odp_atomic_init_u32(&tp->notify_overrun, 0);
+ tp->first_free = 0;
+ tp->owner = -1;
+
+ if (param->priv)
+ tp->owner = odp_thread_id();
+
+ tp->tick_buf = (void *)((char *)odp_shm_addr(shm) + sz0);
+ tp->timers = (void *)((char *)odp_shm_addr(shm) + sz0 + sz1);
+
+#if !USE_128BIT_ATOMICS
+ for (i = 0; i < NUM_LOCKS; i++)
+ _odp_atomic_flag_clear(&tp->locks[i]);
+#endif
+
+ /* Initialize all odp_timer entries */
+ for (i = 0; i < tp->param.num_timers; i++) {
+ tp->timers[i].queue = ODP_QUEUE_INVALID;
+ set_next_free(&tp->timers[i], i + 1);
+ tp->timers[i].user_ptr = NULL;
+ odp_atomic_init_u64(&tp->tick_buf[i].exp_tck, TMO_UNUSED);
+ tp->tick_buf[i].tmo_event = ODP_EVENT_INVALID;
+ }
+ tp->tp_idx = tp_idx;
+ odp_spinlock_init(&tp->lock);
+
+ odp_ticketlock_lock(&timer_global->lock);
+
+ /* Inline timer scan may find the timer pool after this */
+ odp_mb_release();
+ timer_global->timer_pool[tp_idx] = tp;
+ timer_global->tp_shm[tp_idx] = shm;
+
+ if (timer_global->num_timer_pools == 1)
+ odp_global_rw->inline_timers = timer_global->use_inline_timers;
+
+ /* Increase poll rate to match the highest resolution */
+ if (timer_global->poll_interval_nsec > nsec_per_scan) {
+ timer_global->poll_interval_nsec = nsec_per_scan;
+ timer_global->poll_interval_time =
+ odp_time_global_from_ns(nsec_per_scan);
+ }
+
+ /* Update the highest index for inline timer scan */
+ if (tp_idx > timer_global->highest_tp_idx)
+ timer_global->highest_tp_idx = tp_idx;
+
+ odp_ticketlock_unlock(&timer_global->lock);
+
+ if (!odp_global_rw->inline_timers)
+ posix_timer_start(tp);
+
+ return timer_pool_to_hdl(tp);
+
+error:
+ odp_ticketlock_lock(&timer_global->lock);
+ timer_global->tp_shm[tp_idx] = shm;
+ timer_global->timer_pool_used[tp_idx] = 0;
+ timer_global->num_timer_pools--;
+ odp_ticketlock_unlock(&timer_global->lock);
+
+ return ODP_TIMER_POOL_INVALID;
}
/******************************************************************************
@@ -792,17 +1319,147 @@ static void itimer_fini(odp_timer_pool *tp)
* Some parameter checks and error messages
* No modificatios of internal state
*****************************************************************************/
-odp_timer_pool_t
-odp_timer_pool_create(const char *name,
- const odp_timer_pool_param_t *param)
-{
- /* Verify that buffer pool can be used for timeouts */
- /* Verify that we have a valid (non-zero) timer resolution */
- if (param->res_ns == 0) {
- __odp_errno = EINVAL;
+int odp_timer_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_capability_t *capa)
+{
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ memset(capa, 0, sizeof(odp_timer_capability_t));
+
+ capa->max_pools_combined = MAX_TIMER_POOLS;
+ capa->max_pools = MAX_TIMER_POOLS;
+ capa->max_timers = 0;
+ capa->periodic.max_pools = MAX_TIMER_POOLS;
+ capa->periodic.max_timers = MAX_PERIODIC_TIMERS;
+ capa->highest_res_ns = timer_global->highest_res_ns;
+ capa->max_res.res_ns = timer_global->highest_res_ns;
+ capa->max_res.res_hz = timer_global->highest_res_hz;
+ capa->max_res.min_tmo = 0;
+ capa->max_res.max_tmo = MAX_TMO_NSEC;
+ capa->max_tmo.res_ns = timer_global->highest_res_ns;
+ capa->max_tmo.res_hz = timer_global->highest_res_hz;
+ capa->max_tmo.min_tmo = 0;
+ capa->max_tmo.max_tmo = MAX_TMO_NSEC;
+ capa->queue_type_sched = true;
+ capa->queue_type_plain = true;
+
+ capa->periodic.min_base_freq_hz.integer = MIN_BASE_HZ;
+ capa->periodic.max_base_freq_hz.integer = timer_global->max_base_hz;
+
+ return 0;
+}
+
+int odp_timer_res_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_res_capability_t *res_capa)
+{
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ if (res_capa->min_tmo) {
+ _ODP_ERR("Only res_ns or max_tmo based queries supported\n");
+ return -1;
+ }
+
+ if (res_capa->res_ns || res_capa->res_hz) {
+ if (res_capa->res_ns && res_capa->res_ns < timer_global->highest_res_ns) {
+ _ODP_DBG("Timeout resolution capability (res_ns) exceeded\n");
+ return -1;
+ }
+ if (res_capa->res_hz && res_capa->res_hz > timer_global->highest_res_hz) {
+ _ODP_DBG("Timeout resolution capability (res_hz) exceeded\n");
+ return -1;
+ }
+ res_capa->min_tmo = 0;
+ res_capa->max_tmo = MAX_TMO_NSEC;
+ } else { /* max_tmo */
+ if (res_capa->max_tmo > MAX_TMO_NSEC) {
+ _ODP_DBG("Maximum relative timeout capability (max_tmo) exceeded\n");
+ return -1;
+ }
+ res_capa->min_tmo = 0;
+ res_capa->res_ns = timer_global->highest_res_ns;
+ res_capa->res_hz = timer_global->highest_res_hz;
+ }
+
+ return 0;
+}
+
+int odp_timer_periodic_capability(odp_timer_clk_src_t clk_src,
+ odp_timer_periodic_capability_t *capa)
+{
+ double freq;
+ uint64_t multiplier;
+
+ if (clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", clk_src);
+ return -1;
+ }
+
+ freq = odp_fract_u64_to_dbl(&capa->base_freq_hz);
+ if (freq < MIN_BASE_HZ || freq > timer_global->max_base_hz) {
+ _ODP_ERR("Base frequency not supported (min: %f, max %f)\n",
+ (double)MIN_BASE_HZ, (double)timer_global->max_base_hz);
+ return -1;
+ }
+
+ multiplier = max_multiplier_capa(freq);
+
+ if (capa->max_multiplier > multiplier)
+ return -1;
+
+ if (capa->res_ns && capa->res_ns < timer_global->highest_res_ns)
+ return -1;
+
+ /* Update capa with supported values */
+ capa->max_multiplier = multiplier;
+ capa->res_ns = timer_global->highest_res_ns;
+
+ /* All base frequencies within the range are supported */
+ return 1;
+}
+
+void odp_timer_pool_param_init(odp_timer_pool_param_t *param)
+{
+ memset(param, 0, sizeof(odp_timer_pool_param_t));
+ param->timer_type = ODP_TIMER_TYPE_SINGLE;
+ param->clk_src = ODP_CLOCK_DEFAULT;
+ param->exp_mode = ODP_TIMER_EXP_AFTER;
+}
+
+odp_timer_pool_t odp_timer_pool_create(const char *name,
+ const odp_timer_pool_param_t *param)
+{
+ if (odp_global_ro.init_param.not_used.feat.timer) {
+ _ODP_ERR("Trying to use disabled ODP feature.\n");
return ODP_TIMER_POOL_INVALID;
}
- return odp_timer_pool_new(name, param);
+
+ if (param->clk_src != ODP_CLOCK_DEFAULT) {
+ _ODP_ERR("Only ODP_CLOCK_DEFAULT supported. Requested %i.\n", param->clk_src);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->timer_type != ODP_TIMER_TYPE_SINGLE &&
+ param->timer_type != ODP_TIMER_TYPE_PERIODIC) {
+ _ODP_ERR("Bad timer type %i\n", param->timer_type);
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if ((param->res_ns && param->res_hz) || (param->res_ns == 0 && param->res_hz == 0))
+ return ODP_TIMER_POOL_INVALID;
+
+ if (param->res_hz == 0 && param->res_ns < timer_global->highest_res_ns)
+ return ODP_TIMER_POOL_INVALID;
+
+ if (param->res_ns == 0 && param->res_hz > timer_global->highest_res_hz)
+ return ODP_TIMER_POOL_INVALID;
+
+ return timer_pool_new(name, param);
}
void odp_timer_pool_start(void)
@@ -810,34 +1467,76 @@ void odp_timer_pool_start(void)
/* Nothing to do here, timer pools are started by the create call */
}
-void odp_timer_pool_destroy(odp_timer_pool_t tpid)
+int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num)
{
- odp_timer_pool_del(tpid);
-}
+ _ODP_ASSERT(timer_pool != NULL);
+ _ODP_ASSERT(num > 0);
+ if (ODP_DEBUG) {
+ for (int i = 0; i < num; i++)
+ _ODP_ASSERT(timer_pool[i] != ODP_TIMER_POOL_INVALID);
+ }
-uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tpid, uint64_t ticks)
-{
- return ticks * tpid->param.res_ns;
+ /* Nothing to do here, timer pools are started by the create call. */
+ return num;
}
-uint64_t odp_timer_ns_to_tick(odp_timer_pool_t tpid, uint64_t ns)
+void odp_timer_pool_destroy(odp_timer_pool_t tpid)
{
- return (uint64_t)(ns / tpid->param.res_ns);
+ odp_timer_pool_del(timer_pool_from_hdl(tpid));
}
-uint64_t odp_timer_current_tick(odp_timer_pool_t tpid)
+int odp_timer_sample_ticks(odp_timer_pool_t timer_pool[], uint64_t tick[], uint64_t clk_count[],
+ int num)
{
- /* Relaxed atomic read for lowest overhead */
- return odp_atomic_load_u64(&tpid->cur_tick);
+ uint64_t nsec;
+ int i;
+
+ if (num <= 0 || num > MAX_TIMER_POOLS) {
+ _ODP_ERR("Bad number of timer pools: %i\n", num);
+ return -1;
+ }
+
+ for (i = 0; i < num; i++) {
+ if (odp_unlikely(timer_pool[i] == ODP_TIMER_POOL_INVALID)) {
+ _ODP_ERR("Invalid timer pool\n");
+ return -1;
+ }
+ }
+
+ nsec = odp_time_global_ns();
+
+ for (i = 0; i < num; i++) {
+ tick[i] = nsec;
+
+ if (clk_count)
+ clk_count[i] = tick[i];
+ }
+
+ return 0;
}
-int odp_timer_pool_info(odp_timer_pool_t tpid,
- odp_timer_pool_info_t *buf)
+int odp_timer_pool_info(odp_timer_pool_t tpid, odp_timer_pool_info_t *tp_info)
{
- buf->param = tpid->param;
- buf->cur_timers = tpid->num_alloc;
- buf->hwm_timers = odp_atomic_load_u32(&tpid->high_wm);
- buf->name = tpid->name;
+ timer_pool_t *tp;
+
+ if (odp_unlikely(tpid == ODP_TIMER_POOL_INVALID)) {
+ _ODP_ERR("Invalid timer pool.\n");
+ return -1;
+ }
+
+ tp = timer_pool_from_hdl(tpid);
+
+ memset(tp_info, 0, sizeof(odp_timer_pool_info_t));
+ tp_info->param = tp->param;
+ tp_info->cur_timers = tp->num_alloc;
+ tp_info->hwm_timers = odp_atomic_load_u32(&tp->high_wm);
+ tp_info->name = tp->name;
+
+ /* One API timer tick is one nsec. Leave source clock information to zero
+ * as there is no direct link between a source clock signal and a timer tick. */
+ tp_info->tick_info.freq.integer = ODP_TIME_SEC_IN_NS;
+ tp_info->tick_info.nsec.integer = 1;
+
return 0;
}
@@ -846,96 +1545,269 @@ uint64_t odp_timer_pool_to_u64(odp_timer_pool_t tpid)
return _odp_pri(tpid);
}
-odp_timer_t odp_timer_alloc(odp_timer_pool_t tpid,
- odp_queue_t queue,
- void *user_ptr)
+odp_timer_t odp_timer_alloc(odp_timer_pool_t tpid, odp_queue_t queue, const void *user_ptr)
{
+ timer_pool_t *tp = timer_pool_from_hdl(tpid);
+
if (odp_unlikely(tpid == ODP_TIMER_POOL_INVALID)) {
- ODP_ERR("Invalid timer pool.\n");
+ _ODP_ERR("Invalid timer pool.\n");
return ODP_TIMER_INVALID;
}
if (odp_unlikely(queue == ODP_QUEUE_INVALID)) {
- ODP_ERR("%s: Invalid queue handle\n", tpid->name);
+ _ODP_ERR("%s: Invalid queue handle\n", tp->name);
return ODP_TIMER_INVALID;
}
/* We don't care about the validity of user_ptr because we will not
* attempt to dereference it */
- return timer_alloc(tpid, queue, user_ptr);
+ return timer_alloc(tp, queue, user_ptr);
}
-odp_event_t odp_timer_free(odp_timer_t hdl)
+int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
{
- odp_timer_pool *tp = handle_to_tp(hdl);
- uint32_t idx = handle_to_idx(hdl, tp);
- odp_buffer_t old_buf = timer_free(tp, idx);
- return odp_buffer_to_event(old_buf);
+ uint64_t abs_tick, rel_tick;
+ timer_pool_t *tp = handle_to_tp(timer);
+ uint64_t cur_tick = odp_time_global_ns();
+ uint32_t idx = handle_to_idx(timer, tp);
+ odp_event_t tmo_ev = start_param->tmo_ev;
+
+ if (start_param->tick_type == ODP_TIMER_TICK_ABS) {
+ abs_tick = start_param->tick;
+ rel_tick = abs_tick - cur_tick;
+
+ if (odp_unlikely(abs_tick < cur_tick + tp->min_rel_tck))
+ return ODP_TIMER_TOO_NEAR;
+ } else {
+ rel_tick = start_param->tick;
+ abs_tick = rel_tick + cur_tick;
+
+ if (odp_unlikely(rel_tick < tp->min_rel_tck))
+ return ODP_TIMER_TOO_NEAR;
+ }
+
+ if (odp_unlikely(rel_tick > tp->max_rel_tck))
+ return ODP_TIMER_TOO_FAR;
+
+ if (!timer_reset(idx, abs_tick, &tmo_ev, tp))
+ return ODP_TIMER_FAIL;
+
+ /* Check that timer was not active */
+ if (odp_unlikely(tmo_ev != ODP_EVENT_INVALID)) {
+ _ODP_ERR("Timer was active already\n");
+ odp_event_free(tmo_ev);
+ }
+
+ if (ODP_DEBUG_PRINT) {
+ uint32_t old_val = 0;
+
+ odp_atomic_cas_u32(&tp->notify_overrun, &old_val, 1);
+ }
+
+ return ODP_TIMER_SUCCESS;
}
-int odp_timer_set_abs(odp_timer_t hdl,
- uint64_t abs_tck,
- odp_event_t *tmo_ev)
+int odp_timer_restart(odp_timer_t timer, const odp_timer_start_t *start_param)
{
- odp_timer_pool *tp = handle_to_tp(hdl);
- uint32_t idx = handle_to_idx(hdl, tp);
- uint64_t cur_tick = odp_atomic_load_u64(&tp->cur_tick);
- if (odp_unlikely(abs_tck < cur_tick + tp->min_rel_tck))
- return ODP_TIMER_TOOEARLY;
- if (odp_unlikely(abs_tck > cur_tick + tp->max_rel_tck))
- return ODP_TIMER_TOOLATE;
- if (timer_reset(idx, abs_tck, (odp_buffer_t *)tmo_ev, tp))
- return ODP_TIMER_SUCCESS;
- else
- return ODP_TIMER_NOEVENT;
+ uint64_t abs_tick, rel_tick;
+ timer_pool_t *tp = handle_to_tp(timer);
+ uint64_t cur_tick = odp_time_global_ns();
+ uint32_t idx = handle_to_idx(timer, tp);
+
+ if (start_param->tick_type == ODP_TIMER_TICK_ABS) {
+ abs_tick = start_param->tick;
+ rel_tick = abs_tick - cur_tick;
+
+ if (odp_unlikely(abs_tick < cur_tick + tp->min_rel_tck))
+ return ODP_TIMER_TOO_NEAR;
+ } else {
+ rel_tick = start_param->tick;
+ abs_tick = rel_tick + cur_tick;
+
+ if (odp_unlikely(rel_tick < tp->min_rel_tck))
+ return ODP_TIMER_TOO_NEAR;
+ }
+
+ if (odp_unlikely(rel_tick > tp->max_rel_tck))
+ return ODP_TIMER_TOO_FAR;
+
+ /* Reset timer without changing the event */
+ if (!timer_reset(idx, abs_tick, NULL, tp))
+ return ODP_TIMER_FAIL;
+
+ return ODP_TIMER_SUCCESS;
}
-int odp_timer_set_rel(odp_timer_t hdl,
- uint64_t rel_tck,
- odp_event_t *tmo_ev)
+int odp_timer_periodic_start(odp_timer_t timer, const odp_timer_periodic_start_t *start_param)
{
- odp_timer_pool *tp = handle_to_tp(hdl);
- uint32_t idx = handle_to_idx(hdl, tp);
- uint64_t abs_tck = odp_atomic_load_u64(&tp->cur_tick) + rel_tck;
- if (odp_unlikely(rel_tck < tp->min_rel_tck))
- return ODP_TIMER_TOOEARLY;
- if (odp_unlikely(rel_tck > tp->max_rel_tck))
- return ODP_TIMER_TOOLATE;
- if (timer_reset(idx, abs_tck, (odp_buffer_t *)tmo_ev, tp))
- return ODP_TIMER_SUCCESS;
- else
- return ODP_TIMER_NOEVENT;
+ uint64_t abs_tick, period_ns;
+ timer_pool_t *tp = handle_to_tp(timer);
+ uint64_t cur_tick = odp_time_global_ns();
+ uint32_t idx = handle_to_idx(timer, tp);
+ odp_event_t tmo_ev = start_param->tmo_ev;
+ _odp_timer_t *tim = &tp->timers[idx];
+ uint64_t multiplier = start_param->freq_multiplier;
+ double freq = multiplier * tp->base_freq;
+ double period_ns_dbl;
+
+ if (odp_unlikely(!tp->periodic)) {
+ _ODP_ERR("Not a periodic timer\n");
+ return ODP_TIMER_FAIL;
+ }
+
+ if (odp_unlikely(multiplier == 0 || multiplier > tp->max_multiplier)) {
+ _ODP_ERR("Bad frequency multiplier: %" PRIu64 "\n", multiplier);
+ return ODP_TIMER_FAIL;
+ }
+
+ if (odp_unlikely(odp_event_type(tmo_ev) != ODP_EVENT_TIMEOUT)) {
+ _ODP_ERR("Event type is not timeout\n");
+ return ODP_TIMER_FAIL;
+ }
+
+ period_ns_dbl = (double)ODP_TIME_SEC_IN_NS / freq;
+ period_ns = period_ns_dbl;
+
+ if (period_ns == 0) {
+ _ODP_ERR("Too high periodic timer frequency: %f\n", freq);
+ return ODP_TIMER_FAIL;
+ }
+
+ if (period_ns & PERIODIC_CANCELLED) {
+ _ODP_ERR("Periodic timer frequency error: %f\n", freq);
+ return ODP_TIMER_FAIL;
+ }
+
+ tim->periodic_ticks = period_ns;
+ tim->periodic_ticks_frac = (period_ns_dbl - period_ns) * ACC_SIZE;
+ tim->periodic_ticks_frac_acc = 0;
+ abs_tick = start_param->first_tick;
+
+ if (abs_tick) {
+ if (odp_unlikely(abs_tick < cur_tick))
+ return ODP_TIMER_TOO_NEAR;
+
+ if (odp_unlikely(abs_tick > cur_tick + tim->periodic_ticks))
+ return ODP_TIMER_TOO_FAR;
+ } else {
+ abs_tick = cur_tick + tim->periodic_ticks;
+ }
+
+ if (!timer_reset(idx, abs_tick, &tmo_ev, tp))
+ return ODP_TIMER_FAIL;
+
+ /* Check that timer was not active */
+ if (odp_unlikely(tmo_ev != ODP_EVENT_INVALID)) {
+ _ODP_ERR("Timer was active already\n");
+ odp_event_free(tmo_ev);
+ }
+
+ return ODP_TIMER_SUCCESS;
+}
+
+int odp_timer_periodic_ack(odp_timer_t timer, odp_event_t tmo_ev)
+{
+ uint64_t abs_tick, acc;
+ odp_timeout_t tmo = odp_timeout_from_event(tmo_ev);
+ timer_pool_t *tp = handle_to_tp(timer);
+ uint32_t idx = handle_to_idx(timer, tp);
+ _odp_timer_t *tim = &tp->timers[idx];
+
+ if (odp_unlikely(odp_event_type(tmo_ev) != ODP_EVENT_TIMEOUT)) {
+ _ODP_ERR("Event type is not timeout\n");
+ return -1;
+ }
+
+ abs_tick = tim->periodic_ticks;
+
+ if (odp_unlikely(abs_tick & PERIODIC_CANCELLED)) {
+ /* Timer was tried to cancel earlier, stop now. */
+ return 2;
+ }
+
+ acc = (uint64_t)tim->periodic_ticks_frac_acc + (uint64_t)tim->periodic_ticks_frac;
+
+ if (acc >= ACC_SIZE) {
+ abs_tick++;
+ acc -= ACC_SIZE;
+ }
+
+ tim->periodic_ticks_frac_acc = acc;
+ abs_tick += odp_timeout_tick(tmo);
+
+ if (!timer_reset(idx, abs_tick, &tmo_ev, tp))
+ return -1;
+
+ /* This should never happen. Timer should be always inactive before
+ * timer_reset() call above. */
+ if (odp_unlikely(tmo_ev != ODP_EVENT_INVALID)) {
+ /* Reset returned an event, free it. */
+ _ODP_ERR("Timer was active already\n");
+ odp_event_free(tmo_ev);
+ }
+
+ return 0;
}
int odp_timer_cancel(odp_timer_t hdl, odp_event_t *tmo_ev)
{
- odp_timer_pool *tp = handle_to_tp(hdl);
+ timer_pool_t *tp = handle_to_tp(hdl);
uint32_t idx = handle_to_idx(hdl, tp);
/* Set the expiration tick of the timer to TMO_INACTIVE */
- odp_buffer_t old_buf = timer_cancel(tp, idx, TMO_INACTIVE);
- if (old_buf != ODP_BUFFER_INVALID) {
- *tmo_ev = odp_buffer_to_event(old_buf);
- return 0; /* Active timer cancelled, timeout returned */
- } else {
- return -1; /* Timer already expired, no timeout returned */
+ odp_event_t old_event = timer_cancel(tp, idx);
+
+ if (old_event != ODP_EVENT_INVALID) {
+ /* Active timer cancelled, timeout returned */
+ *tmo_ev = old_event;
+ return ODP_TIMER_SUCCESS;
}
-}
-uint64_t odp_timer_to_u64(odp_timer_t hdl)
-{
- return _odp_pri(hdl);
+ /* Timer already expired, no timeout returned */
+ return ODP_TIMER_TOO_NEAR;
}
-odp_timeout_t odp_timeout_from_event(odp_event_t ev)
+int odp_timer_periodic_cancel(odp_timer_t hdl)
{
- /* This check not mandated by the API specification */
- if (odp_event_type(ev) != ODP_EVENT_TIMEOUT)
- ODP_ABORT("Event not a timeout");
- return (odp_timeout_t)ev;
+ timer_pool_t *tp;
+ uint32_t idx;
+ _odp_timer_t *tim;
+ odp_event_t ev;
+
+ if (odp_unlikely(hdl == ODP_TIMER_INVALID)) {
+ _ODP_ERR("Bad timer pool handle\n");
+ return -1;
+ }
+
+ tp = handle_to_tp(hdl);
+
+ if (odp_unlikely(tp->periodic == 0)) {
+ _ODP_ERR("Not a periodic timer\n");
+ return -1;
+ }
+
+ idx = handle_to_idx(hdl, tp);
+ tim = &tp->timers[idx];
+ ev = timer_cancel(tp, idx);
+
+ /* Cancel failed on a periodic timer. Mark timer cancelled, so that
+ * a following ack call stops restarting it. */
+ tim->periodic_ticks |= PERIODIC_CANCELLED;
+
+ if (ev != ODP_EVENT_INVALID) {
+ /* Timer cancelled and timeout returned. Enqueue tmo, ack call will flag
+ * it as the last event. */
+ if (odp_unlikely(odp_queue_enq(tim->queue, ev))) {
+ _ODP_ERR("Failed to enqueue timeout event\n");
+ _odp_event_free(ev);
+ return -1;
+ }
+ }
+
+ return 0;
}
-odp_event_t odp_timeout_to_event(odp_timeout_t tmo)
+uint64_t odp_timer_to_u64(odp_timer_t hdl)
{
- return (odp_event_t)tmo;
+ return _odp_pri(hdl);
}
uint64_t odp_timeout_to_u64(odp_timeout_t tmo)
@@ -943,69 +1815,366 @@ uint64_t odp_timeout_to_u64(odp_timeout_t tmo)
return _odp_pri(tmo);
}
-int odp_timeout_fresh(odp_timeout_t tmo)
+int ODP_DEPRECATE(odp_timeout_fresh)(odp_timeout_t tmo)
{
const odp_timeout_hdr_t *hdr = timeout_hdr(tmo);
odp_timer_t hdl = hdr->timer;
- odp_timer_pool *tp = handle_to_tp(hdl);
+
+ /* Timeout not connected to a timer */
+ if (odp_unlikely(hdl == ODP_TIMER_INVALID))
+ return 0;
+
+ timer_pool_t *tp = handle_to_tp(hdl);
uint32_t idx = handle_to_idx(hdl, tp);
tick_buf_t *tb = &tp->tick_buf[idx];
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- uint64_t exp_tck = tb->exp_tck.v;
-#else
uint64_t exp_tck = odp_atomic_load_u64(&tb->exp_tck);
-#endif
+
/* Return true if the timer still has the same expiration tick
* (ignoring the inactive/expired bit) as the timeout */
return hdr->expiration == (exp_tck & ~TMO_INACTIVE);
}
-odp_timer_t odp_timeout_timer(odp_timeout_t tmo)
+odp_timeout_t odp_timeout_alloc(odp_pool_t pool_hdl)
+{
+ odp_timeout_hdr_t *hdr;
+ odp_event_t event;
+ pool_t *pool;
+
+ _ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
+
+ event = _odp_event_alloc(pool);
+ if (odp_unlikely(event == ODP_EVENT_INVALID))
+ return ODP_TIMEOUT_INVALID;
+
+ hdr = timeout_hdr_from_event(event);
+ hdr->timer = ODP_TIMER_INVALID;
+
+ return odp_timeout_from_event(event);
+}
+
+int odp_timeout_alloc_multi(odp_pool_t pool_hdl, odp_timeout_t tmo[], int num)
{
- return timeout_hdr(tmo)->timer;
+ pool_t *pool;
+ int ret;
+
+ _ODP_ASSERT(pool_hdl != ODP_POOL_INVALID);
+ _ODP_ASSERT(tmo != NULL);
+ _ODP_ASSERT(num > 0);
+
+ pool = _odp_pool_entry(pool_hdl);
+
+ _ODP_ASSERT(pool->type == ODP_POOL_TIMEOUT);
+
+ ret = _odp_event_alloc_multi(pool, (_odp_event_hdr_t **)tmo, num);
+
+ for (int i = 0; i < ret; i++)
+ timeout_hdr(tmo[i])->timer = ODP_TIMER_INVALID;
+
+ return ret;
}
-uint64_t odp_timeout_tick(odp_timeout_t tmo)
+void odp_timeout_free(odp_timeout_t tmo)
{
- return timeout_hdr(tmo)->expiration;
+ _odp_event_free(odp_timeout_to_event(tmo));
}
-void *odp_timeout_user_ptr(odp_timeout_t tmo)
+void odp_timeout_free_multi(odp_timeout_t tmo[], int num)
{
- return timeout_hdr(tmo)->user_ptr;
+ _ODP_ASSERT(tmo != NULL);
+ _ODP_ASSERT(num > 0);
+
+ _odp_event_free_multi((_odp_event_hdr_t **)(uintptr_t)tmo, num);
}
-odp_timeout_t odp_timeout_alloc(odp_pool_t pool)
+void odp_timer_pool_print(odp_timer_pool_t timer_pool)
{
- odp_buffer_t buf = odp_buffer_alloc(pool);
- if (odp_unlikely(buf == ODP_BUFFER_INVALID))
- return ODP_TIMEOUT_INVALID;
- return odp_timeout_from_event(odp_buffer_to_event(buf));
+ timer_pool_t *tp;
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (timer_pool == ODP_TIMER_POOL_INVALID) {
+ _ODP_ERR("Bad timer pool handle\n");
+ return;
+ }
+
+ tp = timer_pool_from_hdl(timer_pool);
+
+ len += _odp_snprint(&str[len], n - len, "Timer pool info\n");
+ len += _odp_snprint(&str[len], n - len, "---------------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool));
+ len += _odp_snprint(&str[len], n - len, " tp index %u\n", tp->tp_idx);
+ len += _odp_snprint(&str[len], n - len, " num timers %u\n", tp->num_alloc);
+ len += _odp_snprint(&str[len], n - len, " num tp %i\n",
+ timer_global->num_timer_pools);
+ len += _odp_snprint(&str[len], n - len, " inline timers %i\n",
+ timer_global->use_inline_timers);
+ len += _odp_snprint(&str[len], n - len, " periodic %i\n", tp->periodic);
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
}
-void odp_timeout_free(odp_timeout_t tmo)
+void odp_timer_print(odp_timer_t timer)
+{
+ timer_pool_t *tp;
+ uint32_t idx;
+ _odp_timer_t *tim;
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (timer == ODP_TIMER_INVALID) {
+ _ODP_ERR("Bad timer handle\n");
+ return;
+ }
+
+ tp = handle_to_tp(timer);
+ idx = handle_to_idx(timer, tp);
+ tim = &tp->timers[idx];
+
+ len += _odp_snprint(&str[len], n - len, "Timer info\n");
+ len += _odp_snprint(&str[len], n - len, "----------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timer_to_u64(timer));
+ len += _odp_snprint(&str[len], n - len, " timer pool 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool_to_hdl(tp)));
+ len += _odp_snprint(&str[len], n - len, " timer index %u\n", idx);
+ len += _odp_snprint(&str[len], n - len, " dest queue 0x%" PRIx64 "\n",
+ odp_queue_to_u64(tim->queue));
+ len += _odp_snprint(&str[len], n - len, " user ptr %p\n", tim->user_ptr);
+ len += _odp_snprint(&str[len], n - len, " periodic ticks %" PRIu64 "\n",
+ tim->periodic_ticks & ~PERIODIC_CANCELLED);
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
+}
+
+void odp_timeout_print(odp_timeout_t tmo)
{
- odp_event_t ev = odp_timeout_to_event(tmo);
- odp_buffer_free(odp_buffer_from_event(ev));
+ const odp_timeout_hdr_t *tmo_hdr;
+ odp_timer_t timer;
+ int len = 0;
+ int max_len = 512;
+ int n = max_len - 1;
+ char str[max_len];
+
+ if (tmo == ODP_TIMEOUT_INVALID) {
+ _ODP_ERR("Bad timeout handle\n");
+ return;
+ }
+
+ tmo_hdr = timeout_hdr(tmo);
+ timer = tmo_hdr->timer;
+
+ len += _odp_snprint(&str[len], n - len, "Timeout info\n");
+ len += _odp_snprint(&str[len], n - len, "------------\n");
+ len += _odp_snprint(&str[len], n - len, " handle 0x%" PRIx64 "\n",
+ odp_timeout_to_u64(tmo));
+ len += _odp_snprint(&str[len], n - len, " expiration %" PRIu64 "\n",
+ tmo_hdr->expiration);
+ len += _odp_snprint(&str[len], n - len, " user ptr %p\n", tmo_hdr->user_ptr);
+ len += _odp_snprint(&str[len], n - len, " user area %p\n", tmo_hdr->uarea_addr);
+
+ if (timer != ODP_TIMER_INVALID) {
+ timer_pool_t *tp = handle_to_tp(timer);
+ uint32_t idx = handle_to_idx(timer, tp);
+
+ len += _odp_snprint(&str[len], n - len, " timer pool 0x%" PRIx64 "\n",
+ odp_timer_pool_to_u64(timer_pool_to_hdl(tp)));
+ len += _odp_snprint(&str[len], n - len, " timer 0x%" PRIx64 "\n",
+ odp_timer_to_u64(timer));
+ len += _odp_snprint(&str[len], n - len, " timer index %u\n", idx);
+ len += _odp_snprint(&str[len], n - len, " periodic %i\n", tp->periodic);
+ }
+ str[len] = 0;
+
+ _ODP_PRINT("%s\n", str);
}
-int odp_timer_init_global(void)
+int _odp_timer_init_global(const odp_init_t *params)
{
-#ifndef ODP_ATOMIC_U128
+ odp_shm_t shm;
+ odp_time_t time;
+ const char *conf_str;
uint32_t i;
- for (i = 0; i < NUM_LOCKS; i++)
- _odp_atomic_flag_clear(&locks[i]);
+ int val = 0;
+
+ if (params && params->not_used.feat.timer) {
+ _ODP_DBG("Timers disabled\n");
+ timer_global = NULL;
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_timer_global", sizeof(timer_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ timer_global = odp_shm_addr(shm);
+
+ if (timer_global == NULL) {
+ _ODP_ERR("Shm reserve failed for odp_timer\n");
+ return -1;
+ }
+
+ memset(timer_global, 0, sizeof(timer_global_t));
+ odp_ticketlock_init(&timer_global->lock);
+ timer_global->shm = shm;
+ timer_global->highest_res_ns = MAX_INLINE_RES_NS;
+ timer_global->highest_tp_idx = -1;
+
+ time = odp_time_global();
+ for (i = 0; i < MAX_TIMER_POOLS; i++) {
+ timer_global->destroy_time[i] = time;
+ timer_global->tp_shm[i] = ODP_SHM_INVALID;
+ }
+
+#if USE_128BIT_ATOMICS
+ _ODP_PRINT("Timer using lock-less implementation\n");
#else
- ODP_DBG("Using lock-less timer implementation\n");
+ _ODP_PRINT("Timer using lock-based implementation\n");
#endif
- odp_atomic_init_u32(&num_timer_pools, 0);
- block_sigalarm();
+ _ODP_PRINT("Timer config:\n");
+
+ conf_str = "timer.inline";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ goto error;
+ }
+ timer_global->use_inline_timers = val;
+ _ODP_PRINT(" %s: %i\n", conf_str, val);
+
+ conf_str = "timer.inline_poll_interval";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ goto error;
+ }
+ timer_global->poll_interval = val;
+ _ODP_PRINT(" %s: %i\n", conf_str, val);
+
+ conf_str = "timer.inline_poll_interval_nsec";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ goto error;
+ }
+ timer_global->poll_interval_nsec = val;
+ timer_global->poll_interval_time =
+ odp_time_global_from_ns(timer_global->poll_interval_nsec);
+ _ODP_PRINT(" %s: %i\n", conf_str, val);
+
+ conf_str = "timer.inline_thread_type";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ _ODP_ERR("Config option '%s' not found.\n", conf_str);
+ goto error;
+ }
+ timer_global->thread_type = val;
+ _ODP_PRINT(" %s: %i\n", conf_str, val);
+ _ODP_PRINT("\n");
+
+ if (!timer_global->use_inline_timers) {
+ timer_res_init();
+ block_sigalarm();
+ }
+
+ /* timer_res_init() may update highest_res_ns */
+ timer_global->highest_res_hz = GIGA_HZ / timer_global->highest_res_ns;
+ timer_global->max_base_hz = timer_global->highest_res_hz;
return 0;
+
+error:
+ odp_shm_free(shm);
+ return -1;
}
-int odp_timer_term_global(void)
+int _odp_timer_term_global(void)
{
+ odp_shm_t shm;
+ int i;
+ int rc = 0;
+
+ if (timer_global == NULL)
+ return 0;
+
+ for (i = 0; i < MAX_TIMER_POOLS; i++) {
+ shm = timer_global->tp_shm[i];
+
+ if (timer_global->timer_pool_used[i]) {
+ _ODP_ERR("Not destroyed timer pool: %i\n", i);
+ rc = -1;
+
+ /* Prevent crash from timer thread */
+ if (!odp_global_rw->inline_timers) {
+ timer_pool_t *tp = timer_global->timer_pool[i];
+
+ if (tp != NULL)
+ posix_timer_stop(tp);
+ }
+ }
+ if (shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(shm)) {
+ _ODP_ERR("Shm free failed for timer pool: %i\n", i);
+ rc = -1;
+ }
+ }
+ }
+
+ if (odp_shm_free(timer_global->shm)) {
+ _ODP_ERR("Shm free failed for timer_global\n");
+ return -1;
+ }
+
+ return rc;
+}
+
+int _odp_timer_init_local(void)
+{
+ int conf_thr_type;
+ odp_thread_type_t thr_type;
+
+ timer_local.last_run = odp_time_global_from_ns(0);
+ timer_local.run_cnt = 1;
+ timer_local.poll_shared = 0;
+ timer_local.prof_nsec = 0;
+ timer_local.prof_rounds = 0;
+
+ /* Timer feature disabled */
+ if (timer_global == NULL)
+ return 0;
+
+ /* Check if this thread polls shared (non-private) timer pools */
+ conf_thr_type = timer_global->thread_type;
+ thr_type = odp_thread_type();
+
+ if (conf_thr_type == 0)
+ timer_local.poll_shared = 1;
+ else if (conf_thr_type == 1 && thr_type == ODP_THREAD_WORKER)
+ timer_local.poll_shared = 1;
+ else if (conf_thr_type == 2 && thr_type == ODP_THREAD_CONTROL)
+ timer_local.poll_shared = 1;
+
+ return 0;
+}
+
+int _odp_timer_term_local(void)
+{
+ if (CONFIG_TIMER_PROFILE_INLINE) {
+ if (timer_local.prof_rounds) {
+ _ODP_PRINT("\n"
+ "Inline timer profiling for thread %i:\n"
+ "scan rounds: %" PRIu64 "\n"
+ "ave scan nsec: %.1f\n",
+ odp_thread_id(), timer_local.prof_rounds,
+ (double)timer_local.prof_nsec / timer_local.prof_rounds);
+ }
+ }
+
return 0;
}
diff --git a/platform/linux-generic/odp_timer_api.c b/platform/linux-generic/odp_timer_api.c
new file mode 100644
index 000000000..cd657956b
--- /dev/null
+++ b/platform/linux-generic/odp_timer_api.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/timer.h>
+
+/* Non-inlined functions for ABI compat mode */
+#define _ODP_NO_INLINE
+#include <odp/api/plat/timer_inlines.h>
diff --git a/platform/linux-generic/odp_timer_wheel.c b/platform/linux-generic/odp_timer_wheel.c
index f2c802a85..876e06730 100644
--- a/platform/linux-generic/odp_timer_wheel.c
+++ b/platform/linux-generic/odp_timer_wheel.c
@@ -1,6 +1,6 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -14,6 +14,7 @@
#include <odp_timer_wheel_internal.h>
#include <odp_traffic_mngr_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
/* The following constants can be changed either at compile time or run time
* as long as the following constraints are met (by the way REV stands for
@@ -93,9 +94,12 @@ typedef union {
timer_blk_t *timer_blk_list;
} current_timer_slot_t;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef struct {
current_timer_slot_t slots[0];
} current_wheel_t;
+#pragma GCC diagnostic pop
typedef struct {
uint32_t count;
@@ -104,7 +108,7 @@ typedef struct {
uint32_t head_idx;
uint32_t tail_idx;
uint32_t max_idx;
- current_timer_slot_t entries[0];
+ current_timer_slot_t entries[];
} expired_ring_t;
typedef struct {
@@ -129,9 +133,12 @@ typedef union { /* Each general_timer_slot is 16 bytes long. */
list_entry_t list_entry;
} general_timer_slot_t;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wpedantic"
typedef struct {
general_timer_slot_t slots[0];
} general_wheel_t;
+#pragma GCC diagnostic pop
typedef struct {
/* Note that rev stands for revolution - one complete sweep through
@@ -625,7 +632,7 @@ static int timer_current_wheel_update(timer_wheels_t *timer_wheels,
slot_idx = wheel_desc->slot_idx;
num_slots = wheel_desc->num_slots;
max_ticks = wheel_desc->max_ticks;
- max_cnt = (uint32_t)MIN(elapsed_ticks, 32);
+ max_cnt = _ODP_MIN(elapsed_ticks, UINT32_C(32));
current_wheel = timer_wheels->current_wheel;
ret_code = 0;
rc = -1;
@@ -636,9 +643,7 @@ static int timer_current_wheel_update(timer_wheels_t *timer_wheels,
if (timer_slot->user_data != 0) {
rc = expired_timers_append(timer_wheels, timer_slot);
if (rc < 0)
- timer_wheels->
- expired_timers_ring->
- expired_ring_full_cnt++;
+ timer_wheels->expired_timers_ring->expired_ring_full_cnt++;
timer_slot->user_data = 0;
}
@@ -935,10 +940,10 @@ uint32_t _odp_timer_wheel_count(_odp_timer_wheel_t timer_wheel)
static void _odp_int_timer_wheel_desc_print(wheel_desc_t *wheel_desc,
uint32_t wheel_idx)
{
- ODP_DBG(" wheel=%u num_slots=%u ticks_shift=%u ticks_per_slot=%u"
- " ticks_per_rev=%" PRIu64 "\n",
- wheel_idx, wheel_desc->num_slots, wheel_desc->ticks_shift,
- wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev);
+ _ODP_PRINT(" wheel=%u num_slots=%u ticks_shift=%u ticks_per_slot=%u"
+ " ticks_per_rev=%" PRIu64 "\n",
+ wheel_idx, wheel_desc->num_slots, wheel_desc->ticks_shift,
+ wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev);
}
void _odp_timer_wheel_stats_print(_odp_timer_wheel_t timer_wheel)
@@ -950,28 +955,28 @@ void _odp_timer_wheel_stats_print(_odp_timer_wheel_t timer_wheel)
timer_wheels = (timer_wheels_t *)(uintptr_t)timer_wheel;
expired_ring = timer_wheels->expired_timers_ring;
- ODP_DBG("_odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n",
- timer_wheels->current_ticks);
+ _ODP_PRINT(" _odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n",
+ timer_wheels->current_ticks);
for (wheel_idx = 0; wheel_idx < 4; wheel_idx++)
- _odp_int_timer_wheel_desc_print(
- &timer_wheels->wheel_descs[wheel_idx],
- wheel_idx);
-
- ODP_DBG(" total timer_inserts=%" PRIu64 " timer_removes=%" PRIu64
- " insert_fails=%" PRIu64 "\n",
- timer_wheels->total_timer_inserts,
- timer_wheels->total_timer_removes,
- timer_wheels->insert_fail_cnt);
- ODP_DBG(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%"
- PRIu64 "\n", timer_wheels->total_promote_cnt,
- timer_wheels->promote_fail_cnt);
- ODP_DBG(" free_list_size=%u min_size=%u peak_size=%u\n",
- timer_wheels->free_list_size, timer_wheels->min_free_list_size,
- timer_wheels->peak_free_list_size);
- ODP_DBG(" expired_timers_ring size=%u count=%u "
- "peak_count=%u full_cnt=%u\n",
- expired_ring->max_idx + 1, expired_ring->count,
- expired_ring->peak_count, expired_ring->expired_ring_full_cnt);
+ _odp_int_timer_wheel_desc_print(&timer_wheels->wheel_descs[wheel_idx], wheel_idx);
+
+ _ODP_PRINT(" total timer_inserts=%" PRIu64 " timer_removes=%" PRIu64
+ " insert_fails=%" PRIu64 "\n",
+ timer_wheels->total_timer_inserts,
+ timer_wheels->total_timer_removes,
+ timer_wheels->insert_fail_cnt);
+ _ODP_PRINT(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%"
+ PRIu64 "\n", timer_wheels->total_promote_cnt,
+ timer_wheels->promote_fail_cnt);
+ _ODP_PRINT(" free_list_size=%u min_size=%u peak_size=%u\n",
+ timer_wheels->free_list_size,
+ timer_wheels->min_free_list_size,
+ timer_wheels->peak_free_list_size);
+ _ODP_PRINT(" expired_timers_ring size=%u count=%u "
+ "peak_count=%u full_cnt=%u\n",
+ expired_ring->max_idx + 1, expired_ring->count,
+ expired_ring->peak_count,
+ expired_ring->expired_ring_full_cnt);
}
void _odp_timer_wheel_destroy(_odp_timer_wheel_t timer_wheel)
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index 4e9358b96..f0327fad0 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -1,12 +1,35 @@
/* Copyright 2015 EZchip Semiconductor Ltd. All Rights Reserved.
*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2022, Marvell
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#define _GNU_SOURCE
+#include <odp_posix_extensions.h>
+
+#include <odp/api/packet.h>
+#include <odp/api/packet_flags.h>
+#include <odp/api/std_types.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/time_inlines.h>
+
+#include <odp_packet_io_internal.h>
+#include <odp_traffic_mngr_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_init_internal.h>
+#include <odp_global_data.h>
+#include <odp_schedule_if.h>
+#include <odp_event_internal.h>
+
+#include <protocols/eth.h>
+#include <protocols/ip.h>
+
#include <stdint.h>
#include <string.h>
#include <malloc.h>
@@ -18,10 +41,6 @@
#include <sched.h>
#include <unistd.h>
#include <pthread.h>
-#include <odp/api/std_types.h>
-#include <protocols/eth.h>
-#include <protocols/ip.h>
-#include <odp_traffic_mngr_internal.h>
/* Local vars */
static const
@@ -35,60 +54,122 @@ _odp_int_name_kind_t PROFILE_TO_HANDLE_KIND[ODP_TM_NUM_PROFILES] = {
static const pkt_desc_t EMPTY_PKT_DESC = { .word = 0 };
#define MAX_PRIORITIES ODP_TM_MAX_PRIORITIES
-#define NUM_SHAPER_COLORS ODP_NUM_SHAPER_COLORS
-/* Traffic manager queue */
-#define QUEUE_TYPE_TM 4
+/* Shaper BW limits in bits/sec */
+#define TM_MIN_SHAPER_BW 8000ULL
+#define TM_MAX_SHAPER_BW (100ULL * 1000ULL * 1000ULL * 1000ULL)
+
+/* Possible values for running the shaper algorithm. TM_SHAPER_GREEN means that
+ * the traffic is within the commit specification (rate and burst size),
+ * TM_SHAPER_YELLOW means that the traffic is within the peak specification
+ * (rate and burst size) and TM_SHAPER_RED means that the traffic is exceeding
+ * both its commit and peak specifications. Note that packets can also have an
+ * assigned packet color of ODP_PACKET_GREEN, ODP_PACKET_YELLOW or
+ * ODP_PACKET_RED, which has a different meaning and purpose than the shaper
+ * colors.
+ */
+typedef enum {
+ TM_SHAPER_GREEN, TM_SHAPER_YELLOW, TM_SHAPER_RED
+} tm_shaper_color_t;
+
+/* Number of enumeration values defined in tm_shaper_color_t type. */
+#define NUM_SHAPER_COLORS 3
-static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
+static const tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
[0] = {
- [ODP_TM_SHAPER_GREEN] = { 0, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 0, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 0, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 0, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 0, DECR_BOTH },
+ [TM_SHAPER_RED] = { 0, DELAY_PKT } },
[1] = {
- [ODP_TM_SHAPER_GREEN] = { 1, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 1, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 1, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 1, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 1, DECR_BOTH },
+ [TM_SHAPER_RED] = { 1, DELAY_PKT } },
[2] = {
- [ODP_TM_SHAPER_GREEN] = { 2, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 2, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 2, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 2, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 2, DECR_BOTH },
+ [TM_SHAPER_RED] = { 2, DELAY_PKT } },
[3] = {
- [ODP_TM_SHAPER_GREEN] = { 3, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 3, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 3, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 3, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 3, DECR_BOTH },
+ [TM_SHAPER_RED] = { 3, DELAY_PKT } },
[4] = {
- [ODP_TM_SHAPER_GREEN] = { 4, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 4, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 4, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 4, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 4, DECR_BOTH },
+ [TM_SHAPER_RED] = { 4, DELAY_PKT } },
[5] = {
- [ODP_TM_SHAPER_GREEN] = { 5, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 5, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 5, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 5, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 5, DECR_BOTH },
+ [TM_SHAPER_RED] = { 5, DELAY_PKT } },
[6] = {
- [ODP_TM_SHAPER_GREEN] = { 6, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 6, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 6, DELAY_PKT } },
+ [TM_SHAPER_GREEN] = { 6, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 6, DECR_BOTH },
+ [TM_SHAPER_RED] = { 6, DELAY_PKT } },
[7] = {
- [ODP_TM_SHAPER_GREEN] = { 7, DECR_BOTH },
- [ODP_TM_SHAPER_YELLOW] = { 7, DECR_BOTH },
- [ODP_TM_SHAPER_RED] = { 7, DELAY_PKT } }
+ [TM_SHAPER_GREEN] = { 7, DECR_BOTH },
+ [TM_SHAPER_YELLOW] = { 7, DECR_BOTH },
+ [TM_SHAPER_RED] = { 7, DELAY_PKT } }
};
-/* Profile tables. */
-static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES];
-
-/* TM systems table. */
-static tm_system_t *odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
-
-static tm_system_group_t *tm_group_list;
-
-static odp_ticketlock_t tm_create_lock;
-static odp_ticketlock_t tm_profile_lock;
-static odp_barrier_t tm_first_enq;
-
-static int g_main_thread_cpu = -1;
-static int g_tm_cpu_num;
+#define MAX_SHAPER_PROFILES 128
+#define MAX_SCHED_PROFILES 128
+#define MAX_THRESHOLD_PROFILES 128
+#define MAX_WRED_PROFILES 128
+
+typedef struct {
+ struct {
+ tm_shaper_params_t profile[MAX_SHAPER_PROFILES];
+ odp_ticketlock_t lock;
+ } shaper;
+ struct {
+ tm_sched_params_t profile[MAX_SCHED_PROFILES];
+ odp_ticketlock_t lock;
+ } sched;
+ struct {
+ tm_queue_thresholds_t profile[MAX_THRESHOLD_PROFILES];
+ odp_ticketlock_t lock;
+ } threshold;
+ struct {
+ tm_wred_params_t profile[MAX_WRED_PROFILES];
+ odp_ticketlock_t lock;
+ } wred;
+} profile_tbl_t;
+
+typedef struct {
+ tm_system_t system[ODP_TM_MAX_NUM_SYSTEMS];
+
+ struct {
+ tm_system_group_t group[ODP_TM_MAX_NUM_SYSTEMS];
+ odp_ticketlock_t lock;
+ } system_group;
+ struct {
+ tm_queue_obj_t obj[ODP_TM_MAX_TM_QUEUES];
+ odp_ticketlock_t lock;
+ } queue_obj;
+ struct {
+ tm_node_obj_t obj[ODP_TM_MAX_NUM_TM_NODES];
+ odp_ticketlock_t lock;
+ } node_obj;
+
+ profile_tbl_t profile_tbl;
+
+ odp_ticketlock_t create_lock;
+ odp_ticketlock_t profile_lock;
+ odp_barrier_t first_enq;
+
+ int main_thread_cpu;
+ int cpu_num;
+
+ /* Service threads */
+ uint64_t busy_wait_counter;
+ odp_bool_t main_loop_running;
+ odp_atomic_u64_t atomic_request_cnt;
+ odp_atomic_u64_t currently_serving_cnt;
+ odp_atomic_u64_t atomic_done_cnt;
+
+ odp_shm_t shm;
+} tm_global_t;
+
+static tm_global_t *tm_glb;
/* Forward function declarations. */
static void tm_queue_cnts_decrement(tm_system_t *tm_system,
@@ -102,22 +183,14 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
tm_shaper_obj_t *timer_shaper,
pkt_desc_t *demoted_pkt_desc);
-static int queue_tm_reenq(queue_entry_t *queue, odp_buffer_hdr_t *buf_hdr)
+static inline tm_queue_obj_t *tm_qobj_from_index(uint32_t queue_id)
{
- odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE((uint8_t *)queue -
- offsetof(tm_queue_obj_t,
- tm_qentry));
- odp_packet_t pkt = _odp_packet_from_buffer(buf_hdr->handle.handle);
-
- return odp_tm_enq(tm_queue, pkt);
+ return &tm_glb->queue_obj.obj[queue_id];
}
-static int queue_tm_reenq_multi(queue_entry_t *queue ODP_UNUSED,
- odp_buffer_hdr_t *buf[] ODP_UNUSED,
- int num ODP_UNUSED)
+static inline tm_node_obj_t *tm_nobj_from_index(uint32_t node_id)
{
- ODP_ABORT("Invalid call to queue_tm_reenq_multi()\n");
- return 0;
+ return &tm_glb->node_obj.obj[node_id];
}
static tm_queue_obj_t *get_tm_queue_obj(tm_system_t *tm_system,
@@ -174,7 +247,7 @@ static void tm_init_random_data(tm_random_data_t *tm_random_data)
byte_cnt = 0;
while (byte_cnt < 256)
byte_cnt += odp_random_data(&tm_random_data->buf[byte_cnt],
- 256 - byte_cnt, 1);
+ 256 - byte_cnt, ODP_RANDOM_BASIC);
tm_random_data->next_random_byte = 0;
}
@@ -212,74 +285,131 @@ static odp_bool_t tm_random_drop(tm_random_data_t *tm_random_data,
return drop;
}
-static void *alloc_entry_in_dynamic_tbl(dynamic_tbl_t *dynamic_tbl,
- uint32_t record_size,
- uint32_t *dynamic_idx_ptr)
+static void *alloc_entry_in_tbl(profile_tbl_t *profile_tbl,
+ profile_kind_t profile_kind,
+ uint32_t *idx)
{
- uint32_t num_allocd, new_num_allocd, idx;
- void **new_array_ptrs, *new_record;
-
- num_allocd = dynamic_tbl->num_allocd;
- if (num_allocd <= dynamic_tbl->num_used) {
- /* Need to alloc or realloc the array of ptrs. */
- if (num_allocd <= 32)
- new_num_allocd = 64;
- else
- new_num_allocd = 4 * num_allocd;
-
- new_array_ptrs = malloc(new_num_allocd * sizeof(void *));
- memset(new_array_ptrs, 0, new_num_allocd * sizeof(void *));
+ uint32_t i;
- if (dynamic_tbl->num_used != 0)
- memcpy(new_array_ptrs, dynamic_tbl->array_ptrs,
- dynamic_tbl->num_used * sizeof(void *));
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE: {
+ tm_shaper_params_t *profile = NULL;
- if (dynamic_tbl->array_ptrs)
- free(dynamic_tbl->array_ptrs);
+ odp_ticketlock_lock(&profile_tbl->shaper.lock);
+ for (i = 0; i < MAX_SHAPER_PROFILES; i++) {
+ if (profile_tbl->shaper.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
- dynamic_tbl->num_allocd = new_num_allocd;
- dynamic_tbl->array_ptrs = new_array_ptrs;
+ profile = &profile_tbl->shaper.profile[i];
+ memset(profile, 0, sizeof(tm_shaper_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->shaper.lock);
+ return profile;
+ }
+ case TM_SCHED_PROFILE: {
+ tm_sched_params_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->sched.lock);
+ for (i = 0; i < MAX_SCHED_PROFILES; i++) {
+ if (profile_tbl->sched.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->sched.profile[i];
+ memset(profile, 0, sizeof(tm_sched_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->sched.lock);
+ return profile;
+ }
+ case TM_THRESHOLD_PROFILE: {
+ tm_queue_thresholds_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->threshold.lock);
+ for (i = 0; i < MAX_THRESHOLD_PROFILES; i++) {
+ if (profile_tbl->threshold.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->threshold.profile[i];
+ memset(profile, 0, sizeof(tm_queue_thresholds_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->threshold.lock);
+ return profile;
+ }
+ case TM_WRED_PROFILE: {
+ tm_wred_params_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->wred.lock);
+ for (i = 0; i < MAX_WRED_PROFILES; i++) {
+ if (profile_tbl->wred.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->wred.profile[i];
+ memset(profile, 0, sizeof(tm_wred_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->wred.lock);
+ return profile;
+ }
+ default:
+ _ODP_ERR("Invalid TM profile\n");
+ return NULL;
}
+}
- idx = dynamic_tbl->num_used;
- new_record = malloc(record_size);
- memset(new_record, 0, record_size);
+static void free_tbl_entry(profile_tbl_t *profile_tbl,
+ profile_kind_t profile_kind,
+ uint32_t idx)
+{
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->shaper.lock);
+ profile_tbl->shaper.profile[idx].status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&profile_tbl->shaper.lock);
+ return;
- dynamic_tbl->array_ptrs[idx] = new_record;
- dynamic_tbl->num_used++;
- if (dynamic_idx_ptr)
- *dynamic_idx_ptr = idx;
+ case TM_SCHED_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->sched.lock);
+ profile_tbl->sched.profile[idx].status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&profile_tbl->sched.lock);
+ return;
- return new_record;
-}
+ case TM_THRESHOLD_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->threshold.lock);
+ profile_tbl->threshold.profile[idx].status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&profile_tbl->threshold.lock);
+ return;
-static void free_dynamic_tbl_entry(dynamic_tbl_t *dynamic_tbl,
- uint32_t record_size ODP_UNUSED,
- uint32_t dynamic_idx)
-{
- void *record;
+ case TM_WRED_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->wred.lock);
+ profile_tbl->wred.profile[idx].status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&profile_tbl->wred.lock);
+ return;
- record = dynamic_tbl->array_ptrs[dynamic_idx];
- if (record) {
- free(record);
- dynamic_tbl->array_ptrs[dynamic_idx] = NULL;
- dynamic_tbl->num_freed++;
- if (dynamic_tbl->num_freed == dynamic_tbl->num_used) {
- free(dynamic_tbl->array_ptrs);
- memset(dynamic_tbl, 0, sizeof(dynamic_tbl_t));
- }
+ default:
+ _ODP_ERR("Invalid TM profile\n");
+ return;
}
}
-static input_work_queue_t *input_work_queue_create(void)
+static void input_work_queue_init(input_work_queue_t *input_work_queue)
{
- input_work_queue_t *input_work_queue;
-
- input_work_queue = malloc(sizeof(input_work_queue_t));
memset(input_work_queue, 0, sizeof(input_work_queue_t));
odp_atomic_init_u64(&input_work_queue->queue_cnt, 0);
odp_ticketlock_init(&input_work_queue->lock);
- return input_work_queue;
}
static void input_work_queue_destroy(input_work_queue_t *input_work_queue)
@@ -289,7 +419,7 @@ static void input_work_queue_destroy(input_work_queue_t *input_work_queue)
* stopped new tm_enq() (et al) calls from succeeding.
*/
odp_ticketlock_lock(&input_work_queue->lock);
- free(input_work_queue);
+ memset(input_work_queue, 0, sizeof(input_work_queue_t));
}
static int input_work_queue_append(tm_system_t *tm_system,
@@ -299,7 +429,7 @@ static int input_work_queue_append(tm_system_t *tm_system,
input_work_item_t *entry_ptr;
uint32_t queue_cnt, tail_idx;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
queue_cnt = odp_atomic_load_u64(&input_work_queue->queue_cnt);
if (INPUT_WORK_RING_SIZE <= queue_cnt) {
input_work_queue->enqueue_fail_cnt++;
@@ -357,11 +487,11 @@ static tm_system_t *tm_system_alloc(void)
/* Find an open slot in the odp_tm_systems array. */
for (tm_idx = 0; tm_idx < ODP_TM_MAX_NUM_SYSTEMS; tm_idx++) {
- if (!odp_tm_systems[tm_idx]) {
- tm_system = malloc(sizeof(tm_system_t));
+ if (tm_glb->system[tm_idx].status == TM_STATUS_FREE) {
+ tm_system = &tm_glb->system[tm_idx];
memset(tm_system, 0, sizeof(tm_system_t));
- odp_tm_systems[tm_idx] = tm_system;
tm_system->tm_idx = tm_idx;
+ tm_system->status = TM_STATUS_RESERVED;
return tm_system;
}
}
@@ -371,47 +501,38 @@ static tm_system_t *tm_system_alloc(void)
static void tm_system_free(tm_system_t *tm_system)
{
- if (tm_system->root_node)
- free(tm_system->root_node);
-
- if (tm_system->queue_num_tbl)
- free(tm_system->queue_num_tbl);
-
- odp_tm_systems[tm_system->tm_idx] = NULL;
- free(tm_system);
+ tm_glb->system[tm_system->tm_idx].status = TM_STATUS_FREE;
}
static void *tm_common_profile_create(const char *name,
profile_kind_t profile_kind,
- uint32_t object_size,
tm_handle_t *profile_handle_ptr,
_odp_int_name_t *name_tbl_id_ptr)
{
_odp_int_name_kind_t handle_kind;
_odp_int_name_t name_tbl_id;
- dynamic_tbl_t *dynamic_tbl;
tm_handle_t profile_handle;
- uint32_t dynamic_tbl_idx;
void *object_ptr;
-
- /* Note that alloc_entry_in_dynamic_tbl will zero out all of the memory
- * that it allocates, so an additional memset here is unnnecessary. */
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- object_ptr = alloc_entry_in_dynamic_tbl(dynamic_tbl, object_size,
- &dynamic_tbl_idx);
- if (!object_ptr)
+ uint32_t idx = 0;
+
+ /* Note that alloc_entry_in_tbl will zero out all of the memory that it
+ * allocates, so an additional memset here is unnecessary. */
+ object_ptr = alloc_entry_in_tbl(&tm_glb->profile_tbl, profile_kind,
+ &idx);
+ if (!object_ptr) {
+ _ODP_ERR("No free profiles left\n");
return NULL;
+ }
handle_kind = PROFILE_TO_HANDLE_KIND[profile_kind];
- profile_handle = MAKE_PROFILE_HANDLE(profile_kind, dynamic_tbl_idx);
+ profile_handle = MAKE_PROFILE_HANDLE(profile_kind, idx);
name_tbl_id = ODP_INVALID_NAME;
if ((name != NULL) && (name[0] != '\0')) {
name_tbl_id = _odp_int_name_tbl_add(name, handle_kind,
profile_handle);
if (name_tbl_id == ODP_INVALID_NAME) {
- free_dynamic_tbl_entry(dynamic_tbl, object_size,
- dynamic_tbl_idx);
+ free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx);
return NULL;
}
}
@@ -423,20 +544,18 @@ static void *tm_common_profile_create(const char *name,
}
static int tm_common_profile_destroy(tm_handle_t profile_handle,
- uint32_t object_size,
_odp_int_name_t name_tbl_id)
{
profile_kind_t profile_kind;
- dynamic_tbl_t *dynamic_tbl;
- uint32_t dynamic_tbl_idx;
+ uint32_t idx;
if (name_tbl_id != ODP_INVALID_NAME)
_odp_int_name_tbl_delete(name_tbl_id);
- profile_kind = GET_PROFILE_KIND(profile_handle);
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- dynamic_tbl_idx = GET_TBL_IDX(profile_handle);
- free_dynamic_tbl_entry(dynamic_tbl, object_size, dynamic_tbl_idx);
+ profile_kind = GET_PROFILE_KIND(profile_handle);
+ idx = GET_TBL_IDX(profile_handle);
+ free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx);
+
return 0;
}
@@ -444,16 +563,31 @@ static void *tm_get_profile_params(tm_handle_t profile_handle,
profile_kind_t expected_profile_kind)
{
profile_kind_t profile_kind;
- dynamic_tbl_t *dynamic_tbl;
- uint32_t dynamic_tbl_idx;
+ uint32_t idx;
profile_kind = GET_PROFILE_KIND(profile_handle);
if (profile_kind != expected_profile_kind)
return NULL;
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- dynamic_tbl_idx = GET_TBL_IDX(profile_handle);
- return dynamic_tbl->array_ptrs[dynamic_tbl_idx];
+ idx = GET_TBL_IDX(profile_handle);
+
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE:
+ return &tm_glb->profile_tbl.shaper.profile[idx];
+
+ case TM_SCHED_PROFILE:
+ return &tm_glb->profile_tbl.sched.profile[idx];
+
+ case TM_THRESHOLD_PROFILE:
+ return &tm_glb->profile_tbl.threshold.profile[idx];
+
+ case TM_WRED_PROFILE:
+ return &tm_glb->profile_tbl.wred.profile[idx];
+
+ default:
+ _ODP_ERR("Invalid TM profile\n");
+ return NULL;
+ }
}
static uint64_t tm_bps_to_rate(uint64_t bps)
@@ -479,7 +613,7 @@ static uint64_t tm_max_time_delta(uint64_t rate)
return (1ULL << (26 + 30)) / rate;
}
-static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
+static void tm_shaper_params_cvt_to(const odp_tm_shaper_params_t *shaper_params,
tm_shaper_params_t *tm_shaper_params)
{
uint64_t commit_rate, peak_rate, max_commit_time_delta, highest_rate;
@@ -487,8 +621,8 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
uint32_t min_time_delta;
int64_t commit_burst, peak_burst;
- commit_rate = tm_bps_to_rate(odp_shaper_params->commit_bps);
- if ((odp_shaper_params->commit_bps == 0) || (commit_rate == 0)) {
+ commit_rate = tm_bps_to_rate(shaper_params->commit_rate);
+ if ((shaper_params->commit_rate == 0) || (commit_rate == 0)) {
tm_shaper_params->max_commit_time_delta = 0;
tm_shaper_params->max_peak_time_delta = 0;
tm_shaper_params->commit_rate = 0;
@@ -503,18 +637,18 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
}
max_commit_time_delta = tm_max_time_delta(commit_rate);
- commit_burst = (int64_t)odp_shaper_params->commit_burst;
+ commit_burst = (int64_t)shaper_params->commit_burst;
- peak_rate = tm_bps_to_rate(odp_shaper_params->peak_bps);
- if ((odp_shaper_params->peak_bps == 0) || (peak_rate == 0)) {
+ peak_rate = tm_bps_to_rate(shaper_params->peak_rate);
+ if ((!shaper_params->dual_rate) || (peak_rate == 0)) {
peak_rate = 0;
max_peak_time_delta = 0;
peak_burst = 0;
min_time_delta = (uint32_t)((1 << 26) / commit_rate);
} else {
max_peak_time_delta = tm_max_time_delta(peak_rate);
- peak_burst = (int64_t)odp_shaper_params->peak_burst;
- highest_rate = MAX(commit_rate, peak_rate);
+ peak_burst = (int64_t)shaper_params->peak_burst;
+ highest_rate = _ODP_MAX(commit_rate, peak_rate);
min_time_delta = (uint32_t)((1 << 26) / highest_rate);
}
@@ -526,8 +660,8 @@ static void tm_shaper_params_cvt_to(odp_tm_shaper_params_t *odp_shaper_params,
tm_shaper_params->max_commit = commit_burst << (26 - 3);
tm_shaper_params->max_peak = peak_burst << (26 - 3);
tm_shaper_params->min_time_delta = min_time_delta;
- tm_shaper_params->len_adjust = odp_shaper_params->shaper_len_adjust;
- tm_shaper_params->dual_rate = odp_shaper_params->dual_rate;
+ tm_shaper_params->len_adjust = shaper_params->shaper_len_adjust;
+ tm_shaper_params->dual_rate = shaper_params->dual_rate;
tm_shaper_params->enabled = 1;
}
@@ -546,8 +680,8 @@ static void tm_shaper_params_cvt_from(tm_shaper_params_t *tm_shaper_params,
commit_burst = tm_shaper_params->max_commit >> (26 - 3);
peak_burst = tm_shaper_params->max_peak >> (26 - 3);
- odp_shaper_params->commit_bps = commit_bps;
- odp_shaper_params->peak_bps = peak_bps;
+ odp_shaper_params->commit_rate = commit_bps;
+ odp_shaper_params->peak_rate = peak_bps;
odp_shaper_params->commit_burst = (uint32_t)commit_burst;
odp_shaper_params->peak_burst = (uint32_t)peak_burst;
odp_shaper_params->shaper_len_adjust = tm_shaper_params->len_adjust;
@@ -618,8 +752,8 @@ static void tm_sched_config_set(tm_shaper_obj_t *shaper_obj,
}
/* Any locking required and validity checks must be done by the caller! */
-static void tm_threshold_config_set(tm_wred_node_t *wred_node,
- odp_tm_threshold_t thresholds_profile)
+static int tm_threshold_config_set(tm_wred_node_t *wred_node,
+ odp_tm_threshold_t thresholds_profile)
{
tm_queue_thresholds_t *threshold_params;
@@ -629,15 +763,18 @@ static void tm_threshold_config_set(tm_wred_node_t *wred_node,
}
if (thresholds_profile == ODP_TM_INVALID)
- return;
+ return 0;
threshold_params = tm_get_profile_params(thresholds_profile,
TM_THRESHOLD_PROFILE);
- if (threshold_params == NULL)
- return;
+ if (threshold_params == NULL) {
+ _ODP_DBG("threshold_params is NULL\n");
+ return -1;
+ }
threshold_params->ref_cnt++;
wred_node->threshold_params = threshold_params;
+ return 0;
}
/* Any locking required and validity checks must be done by the caller! */
@@ -698,9 +835,9 @@ static void update_shaper_elapsed_time(tm_system_t *tm_system,
else
commit_inc = time_delta * shaper_params->commit_rate;
- shaper_obj->commit_cnt = (int64_t)MIN(max_commit, commit + commit_inc);
+ shaper_obj->commit_cnt = (int64_t)_ODP_MIN(max_commit, commit + commit_inc);
- if (shaper_params->peak_rate != 0) {
+ if (shaper_params->dual_rate) {
peak = shaper_obj->peak_cnt;
max_peak = shaper_params->max_peak;
if (shaper_params->max_peak_time_delta <= time_delta)
@@ -708,7 +845,7 @@ static void update_shaper_elapsed_time(tm_system_t *tm_system,
else
peak_inc = time_delta * shaper_params->peak_rate;
- shaper_obj->peak_cnt = (int64_t)MIN(max_peak, peak + peak_inc);
+ shaper_obj->peak_cnt = (int64_t)_ODP_MIN(max_peak, peak + peak_inc);
}
shaper_obj->last_update_time = tm_system->current_time;
@@ -728,22 +865,22 @@ static uint64_t time_till_not_red(tm_shaper_params_t *shaper_params,
commit_delay = (-shaper_obj->commit_cnt)
/ shaper_params->commit_rate;
- min_time_delay = MAX(shaper_obj->shaper_params->min_time_delta, 256);
- commit_delay = MAX(commit_delay, min_time_delay);
- if (shaper_params->peak_rate == 0)
+ min_time_delay = _ODP_MAX(shaper_obj->shaper_params->min_time_delta, UINT64_C(256));
+ commit_delay = _ODP_MAX(commit_delay, min_time_delay);
+ if (!shaper_params->dual_rate)
return commit_delay;
peak_delay = 0;
if (shaper_obj->peak_cnt < 0)
peak_delay = (-shaper_obj->peak_cnt) / shaper_params->peak_rate;
- peak_delay = MAX(peak_delay, min_time_delay);
+ peak_delay = _ODP_MAX(peak_delay, min_time_delay);
if (0 < shaper_obj->commit_cnt)
return peak_delay;
else if (0 < shaper_obj->peak_cnt)
return commit_delay;
else
- return MIN(commit_delay, peak_delay);
+ return _ODP_MIN(commit_delay, peak_delay);
}
static int delete_timer(tm_system_t *tm_system ODP_UNUSED,
@@ -815,7 +952,7 @@ static void tm_block_pkt(tm_system_t *tm_system,
tm_queue_obj->timer_shaper, pkt_desc);
else if (tm_queue_obj->timer_reason != NO_CALLBACK)
- ODP_DBG("%s timer_reason != NO_CALLBACK\n", __func__);
+ _ODP_DBG("%s timer_reason != NO_CALLBACK\n", __func__);
tm_queue_obj->blocked_cnt = 1;
tm_queue_obj->blocked_scheduler = schedulers_obj;
@@ -845,8 +982,7 @@ static odp_bool_t delay_pkt(tm_system_t *tm_system,
rc = _odp_timer_wheel_insert(tm_system->_odp_int_timer_wheel,
wakeup_time, timer_context);
if (rc < 0) {
- ODP_DBG("%s odp_timer_wheel_insert() failed rc=%d\n",
- __func__, rc);
+ _ODP_DBG("%s odp_timer_wheel_insert() failed rc=%d\n", __func__, rc);
return false;
}
@@ -934,7 +1070,7 @@ static odp_bool_t rm_pkt_from_shaper(tm_system_t *tm_system,
(shaper_action == DECR_COMMIT))
shaper_obj->commit_cnt -= tkn_count;
- if (shaper_params->peak_rate != 0)
+ if (shaper_params->dual_rate)
if ((shaper_action == DECR_BOTH) ||
(shaper_action == DECR_PEAK))
shaper_obj->peak_cnt -= tkn_count;
@@ -952,30 +1088,30 @@ static odp_bool_t run_shaper(tm_system_t *tm_system,
pkt_desc_t *pkt_desc,
uint8_t priority)
{
- odp_tm_shaper_color_t shaper_color;
+ tm_shaper_color_t shaper_color;
tm_shaper_params_t *shaper_params;
odp_bool_t output_change;
tm_prop_t propagation;
shaper_params = shaper_obj->shaper_params;
- shaper_color = ODP_TM_SHAPER_GREEN;
+ shaper_color = TM_SHAPER_GREEN;
if (shaper_params) {
update_shaper_elapsed_time(tm_system, shaper_params,
shaper_obj);
if (shaper_params->enabled) {
if (0 < shaper_obj->commit_cnt)
- shaper_color = ODP_TM_SHAPER_GREEN;
- else if (shaper_params->peak_rate == 0)
- shaper_color = ODP_TM_SHAPER_RED;
+ shaper_color = TM_SHAPER_GREEN;
+ else if (!shaper_params->dual_rate)
+ shaper_color = TM_SHAPER_RED;
else if (shaper_obj->peak_cnt <= 0)
- shaper_color = ODP_TM_SHAPER_RED;
+ shaper_color = TM_SHAPER_RED;
else
- shaper_color = ODP_TM_SHAPER_YELLOW;
+ shaper_color = TM_SHAPER_YELLOW;
- if (shaper_color == ODP_TM_SHAPER_GREEN)
+ if (shaper_color == TM_SHAPER_GREEN)
tm_system->shaper_green_cnt++;
- else if (shaper_color == ODP_TM_SHAPER_YELLOW)
+ else if (shaper_color == TM_SHAPER_YELLOW)
tm_system->shaper_yellow_cnt++;
else
tm_system->shaper_red_cnt++;
@@ -1053,8 +1189,8 @@ static int tm_set_finish_time(tm_schedulers_obj_t *schedulers_obj,
frame_weight = ((inverted_weight * frame_len) + (1 << 15)) >> 16;
sched_state = &schedulers_obj->sched_states[new_priority];
- base_virtual_time = MAX(prod_shaper_obj->virtual_finish_time,
- sched_state->base_virtual_time);
+ base_virtual_time = _ODP_MAX(prod_shaper_obj->virtual_finish_time,
+ sched_state->base_virtual_time);
virtual_finish_time = base_virtual_time + frame_weight;
prod_shaper_obj->virtual_finish_time = virtual_finish_time;
@@ -1086,7 +1222,7 @@ static odp_bool_t run_sched(tm_system_t *tm_system,
new_sched_state = &schedulers_obj->sched_states[priority];
prev_best_pkt_desc = new_sched_state->smallest_pkt_desc;
if (pkt_descs_equal(new_pkt_desc, &prev_best_pkt_desc)) {
- ODP_DBG("%s spurious execution ****\n", __func__);
+ _ODP_DBG("%s spurious execution ****\n", __func__);
return false;
}
}
@@ -1109,10 +1245,9 @@ static odp_bool_t run_sched(tm_system_t *tm_system,
* virtual finish time, just insert it into this
* sched_state's list sorted by virtual finish times.
*/
- rc = _odp_sorted_list_insert(
- tm_system->_odp_int_sorted_pool,
- new_sched_state->sorted_list,
- new_finish_time, new_pkt_desc->word);
+ rc = _odp_sorted_list_insert(tm_system->_odp_int_sorted_pool,
+ new_sched_state->sorted_list,
+ new_finish_time, new_pkt_desc->word);
if (0 <= rc) {
new_sched_state->sorted_list_cnt++;
@@ -1305,7 +1440,7 @@ static odp_bool_t tm_propagate_pkt_desc(tm_system_t *tm_system,
if (!shaper_change)
return false;
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sched_was_empty = prev_sched_pkt.queue_num == 0;
sched_change = false;
@@ -1399,7 +1534,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
if ((!blocked_scheduler) && (!timer_shaper))
return false;
- if (tm_node_obj->schedulers_obj == blocked_scheduler)
+ if (&tm_node_obj->schedulers_obj == blocked_scheduler)
return false;
/* See if this first shaper_obj is delaying the demoted_pkt_desc */
@@ -1425,7 +1560,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
if ((!demoted_pkt_desc) && (!shaper_change))
return false;
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sched_was_empty = prev_sched_pkt.queue_num == 0;
sched_change = false;
@@ -1538,17 +1673,17 @@ static odp_bool_t tm_consume_pkt_desc(tm_system_t *tm_system,
shaper_is_empty = new_shaper_pkt.queue_num == 0;
if (pkt_descs_equal(&new_shaper_pkt, sent_pkt_desc))
- ODP_DBG("%s shaper has old pkt_desc\n", __func__);
+ _ODP_DBG("%s shaper has old pkt_desc\n", __func__);
tm_node_obj = shaper_obj->next_tm_node;
while (!tm_node_obj->is_root_node) { /* not at egress */
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sent_priority = schedulers_obj->highest_priority;
/* Verify that the scheduler output is the sent_pkt_desc. */
if (pkt_descs_not_equal(&prev_sched_pkt, sent_pkt_desc)) {
- ODP_DBG("%s sched has bad out pkt_desc\n", __func__);
+ _ODP_DBG("%s sched has bad out pkt_desc\n", __func__);
return false;
}
@@ -1566,17 +1701,17 @@ static odp_bool_t tm_consume_pkt_desc(tm_system_t *tm_system,
sched_is_empty = new_sched_pkt.queue_num == 0;
if (pkt_descs_equal(&new_sched_pkt, sent_pkt_desc))
- ODP_DBG("%s sched has old pkt_desc\n", __func__);
+ _ODP_DBG("%s sched has old pkt_desc\n", __func__);
if (pkt_descs_equal(&new_sched_pkt, sent_pkt_desc))
- ODP_DBG("%s scheduler has old pkt_desc\n", __func__);
+ _ODP_DBG("%s scheduler has old pkt_desc\n", __func__);
shaper_obj = &tm_node_obj->shaper_obj;
prev_shaper_pkt = shaper_obj->out_pkt_desc;
/* Verify that the shaper output is the sent_pkt_desc. */
if (pkt_descs_not_equal(&prev_shaper_pkt, sent_pkt_desc)) {
- ODP_DBG("%s shaper has bad out pkt_desc\n", __func__);
+ _ODP_DBG("%s shaper has bad out pkt_desc\n", __func__);
return false;
}
@@ -1593,7 +1728,7 @@ static odp_bool_t tm_consume_pkt_desc(tm_system_t *tm_system,
shaper_is_empty = new_shaper_pkt.queue_num == 0;
if (pkt_descs_equal(&new_shaper_pkt, sent_pkt_desc))
- ODP_DBG("%s shaper has old pkt_desc\n", __func__);
+ _ODP_DBG("%s shaper has old pkt_desc\n", __func__);
tm_node_obj = shaper_obj->next_tm_node;
}
@@ -1626,7 +1761,7 @@ static odp_bool_t tm_consume_sent_pkt(tm_system_t *tm_system,
pkt_len = sent_pkt_desc->pkt_len;
tm_queue_obj->pkts_consumed_cnt++;
- tm_queue_cnts_decrement(tm_system, tm_queue_obj->tm_wred_node,
+ tm_queue_cnts_decrement(tm_system, &tm_queue_obj->tm_wred_node,
tm_queue_obj->priority, pkt_len);
/* Get the next pkt in the tm_queue, if there is one. */
@@ -1667,7 +1802,7 @@ static odp_tm_percent_t tm_queue_fullness(tm_wred_params_t *wred_params,
return 0;
fullness = (10000 * current_cnt) / max_cnt;
- return (odp_tm_percent_t)MIN(fullness, 50000);
+ return (odp_tm_percent_t)_ODP_MIN(fullness, UINT64_C(50000));
}
static odp_bool_t tm_local_random_drop(tm_system_t *tm_system,
@@ -1871,6 +2006,12 @@ static void tm_queue_cnts_decrement(tm_system_t *tm_system,
odp_atomic_sub_u64(&queue_cnts->byte_cnt, frame_len);
}
+static inline void activate_packet_aging(odp_packet_hdr_t *pkt_hdr)
+{
+ if (odp_unlikely(pkt_hdr->p.flags.tx_aging))
+ pkt_hdr->tx_aging_ns = pkt_hdr->tx_aging_ns + odp_time_global_ns();
+}
+
static int tm_enqueue(tm_system_t *tm_system,
tm_queue_obj_t *tm_queue_obj,
odp_packet_t pkt)
@@ -1892,22 +2033,27 @@ static int tm_enqueue(tm_system_t *tm_system,
pkt_color = odp_packet_color(pkt);
drop_eligible = odp_packet_drop_eligible(pkt);
- initial_tm_wred_node = tm_queue_obj->tm_wred_node;
+ initial_tm_wred_node = &tm_queue_obj->tm_wred_node;
if (drop_eligible) {
drop = random_early_discard(tm_system, tm_queue_obj,
initial_tm_wred_node, pkt_color);
if (drop)
- return -1;
+ return -2;
}
work_item.queue_num = tm_queue_obj->queue_num;
work_item.pkt = pkt;
- sched_fn->order_lock();
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_lock();
+
+ activate_packet_aging(packet_hdr(pkt));
rc = input_work_queue_append(tm_system, &work_item);
- sched_fn->order_unlock();
+
+ if (tm_queue_obj->ordered_enqueue)
+ _odp_sched_fn->order_unlock();
if (rc < 0) {
- ODP_DBG("%s work queue full\n", __func__);
+ _ODP_DBG("%s work queue full\n", __func__);
return rc;
}
@@ -1923,8 +2069,8 @@ static void egress_vlan_marking(tm_vlan_marking_t *vlan_marking,
_odp_vlanhdr_t vlan_hdr, *vlan_hdr_ptr;
_odp_ethhdr_t *ether_hdr_ptr;
odp_bool_t split_hdr;
- uint32_t hdr_len;
uint16_t old_tci, new_tci;
+ uint32_t hdr_len = 0;
ether_hdr_ptr = odp_packet_l2_ptr(odp_pkt, &hdr_len);
vlan_hdr_ptr = (_odp_vlanhdr_t *)(ether_hdr_ptr + 1);
@@ -1938,8 +2084,7 @@ static void egress_vlan_marking(tm_vlan_marking_t *vlan_marking,
* correctness rather then performance. */
split_hdr = hdr_len < (_ODP_ETHHDR_LEN + _ODP_VLANHDR_LEN);
if (split_hdr) {
- odp_packet_copy_to_mem(odp_pkt, _ODP_ETHHDR_LEN,
- _ODP_VLANHDR_LEN, &vlan_hdr);
+ odp_packet_copy_to_mem(odp_pkt, _ODP_ETHHDR_LEN, _ODP_VLANHDR_LEN, &vlan_hdr);
vlan_hdr_ptr = &vlan_hdr;
}
@@ -1953,8 +2098,7 @@ static void egress_vlan_marking(tm_vlan_marking_t *vlan_marking,
vlan_hdr_ptr->tci = odp_cpu_to_be_16(new_tci);
if (split_hdr)
- odp_packet_copy_from_mem(odp_pkt, _ODP_ETHHDR_LEN,
- _ODP_VLANHDR_LEN, &vlan_hdr);
+ odp_packet_copy_from_mem(odp_pkt, _ODP_ETHHDR_LEN, _ODP_VLANHDR_LEN, &vlan_hdr);
}
static void egress_ipv4_tos_marking(tm_tos_marking_t *tos_marking,
@@ -1962,8 +2106,9 @@ static void egress_ipv4_tos_marking(tm_tos_marking_t *tos_marking,
{
_odp_ipv4hdr_t ipv4_hdr, *ipv4_hdr_ptr;
odp_bool_t split_hdr;
- uint32_t hdr_len, l3_offset, old_chksum, ones_compl_sum, tos_diff;
+ uint32_t l3_offset, old_chksum, ones_compl_sum, tos_diff;
uint8_t old_tos, new_tos, ecn;
+ uint32_t hdr_len = 0;
l3_offset = odp_packet_l3_offset(odp_pkt);
ipv4_hdr_ptr = odp_packet_l3_ptr(odp_pkt, &hdr_len);
@@ -1977,8 +2122,7 @@ static void egress_ipv4_tos_marking(tm_tos_marking_t *tos_marking,
* correctness rather then performance. */
split_hdr = hdr_len < 12;
if (split_hdr) {
- odp_packet_copy_to_mem(odp_pkt, l3_offset,
- _ODP_IPV4HDR_LEN, &ipv4_hdr);
+ odp_packet_copy_to_mem(odp_pkt, l3_offset, _ODP_IPV4HDR_LEN, &ipv4_hdr);
ipv4_hdr_ptr = &ipv4_hdr;
}
@@ -2019,8 +2163,7 @@ static void egress_ipv4_tos_marking(tm_tos_marking_t *tos_marking,
ipv4_hdr_ptr->tos = new_tos;
ipv4_hdr_ptr->chksum = odp_cpu_to_be_16((~ones_compl_sum) & 0xFFFF);
if (split_hdr)
- odp_packet_copy_from_mem(odp_pkt, l3_offset,
- _ODP_IPV4HDR_LEN, &ipv4_hdr);
+ odp_packet_copy_from_mem(odp_pkt, l3_offset, _ODP_IPV4HDR_LEN, &ipv4_hdr);
}
static void egress_ipv6_tc_marking(tm_tos_marking_t *tos_marking,
@@ -2028,8 +2171,9 @@ static void egress_ipv6_tc_marking(tm_tos_marking_t *tos_marking,
{
_odp_ipv6hdr_t ipv6_hdr, *ipv6_hdr_ptr;
odp_bool_t split_hdr;
- uint32_t hdr_len, old_ver_tc_flow, new_ver_tc_flow, l3_offset;
+ uint32_t old_ver_tc_flow, new_ver_tc_flow, l3_offset;
uint8_t old_tc, new_tc, ecn;
+ uint32_t hdr_len = 0;
l3_offset = odp_packet_l3_offset(odp_pkt);
ipv6_hdr_ptr = odp_packet_l3_ptr(odp_pkt, &hdr_len);
@@ -2043,8 +2187,7 @@ static void egress_ipv6_tc_marking(tm_tos_marking_t *tos_marking,
* correctness rather then performance. */
split_hdr = hdr_len < 4;
if (split_hdr) {
- odp_packet_copy_to_mem(odp_pkt, l3_offset,
- _ODP_IPV6HDR_LEN, &ipv6_hdr);
+ odp_packet_copy_to_mem(odp_pkt, l3_offset, _ODP_IPV6HDR_LEN, &ipv6_hdr);
ipv6_hdr_ptr = &ipv6_hdr;
}
@@ -2072,8 +2215,7 @@ static void egress_ipv6_tc_marking(tm_tos_marking_t *tos_marking,
ipv6_hdr_ptr->ver_tc_flow = odp_cpu_to_be_32(new_ver_tc_flow);
if (split_hdr)
- odp_packet_copy_from_mem(odp_pkt, l3_offset,
- _ODP_IPV6HDR_LEN, &ipv6_hdr);
+ odp_packet_copy_from_mem(odp_pkt, l3_offset, _ODP_IPV6HDR_LEN, &ipv6_hdr);
}
static void tm_egress_marking(tm_system_t *tm_system, odp_packet_t odp_pkt)
@@ -2083,6 +2225,7 @@ static void tm_egress_marking(tm_system_t *tm_system, odp_packet_t odp_pkt)
tm_tos_marking_t *ip_marking;
color = odp_packet_color(odp_pkt);
+ _ODP_ASSERT(color < ODP_NUM_PACKET_COLORS);
if (odp_packet_has_vlan(odp_pkt)) {
vlan_marking = &tm_system->marking.vlan_marking[color];
@@ -2101,12 +2244,19 @@ static void tm_egress_marking(tm_system_t *tm_system, odp_packet_t odp_pkt)
}
}
+static inline odp_bool_t is_packet_aged(odp_packet_hdr_t *pkt_hdr)
+{
+ return pkt_hdr->p.flags.tx_aging && pkt_hdr->tx_aging_ns < odp_time_global_ns();
+}
+
static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
{
tm_queue_obj_t *tm_queue_obj;
odp_packet_t odp_pkt;
pkt_desc_t *pkt_desc;
uint32_t cnt;
+ int ret;
+ pktio_entry_t *pktio_entry;
for (cnt = 1; cnt <= max_sends; cnt++) {
pkt_desc = &tm_system->egress_pkt_desc;
@@ -2124,12 +2274,29 @@ static void tm_send_pkt(tm_system_t *tm_system, uint32_t max_sends)
tm_egress_marking(tm_system, odp_pkt);
tm_system->egress_pkt_desc = EMPTY_PKT_DESC;
- if (tm_system->egress.egress_kind == ODP_TM_EGRESS_PKT_IO)
- odp_pktout_send(tm_system->pktout, &odp_pkt, 1);
- else if (tm_system->egress.egress_kind == ODP_TM_EGRESS_FN)
+ if (tm_system->egress.egress_kind == ODP_TM_EGRESS_PKT_IO) {
+ pktio_entry = get_pktio_entry(tm_system->pktout.pktio);
+ if (odp_unlikely(_odp_pktio_tx_aging_enabled(pktio_entry) &&
+ is_packet_aged(packet_hdr(odp_pkt))))
+ ret = 0; /* Aged packet handled as a discard */
+ else
+ ret = odp_pktout_send(tm_system->pktout, &odp_pkt, 1);
+ if (odp_unlikely(ret != 1)) {
+ if (odp_unlikely(_odp_pktio_tx_compl_enabled(pktio_entry)))
+ _odp_pktio_process_tx_compl(pktio_entry, &odp_pkt, 1);
+ odp_packet_free(odp_pkt);
+ if (odp_unlikely(ret < 0))
+ odp_atomic_inc_u64(&tm_queue_obj->stats.errors);
+ else
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ } else {
+ odp_atomic_inc_u64(&tm_queue_obj->stats.packets);
+ }
+ } else if (tm_system->egress.egress_kind == ODP_TM_EGRESS_FN) {
tm_system->egress.egress_fcn(odp_pkt);
- else
+ } else {
return;
+ }
tm_queue_obj->sent_pkt = tm_queue_obj->pkt;
tm_queue_obj->sent_pkt_desc = tm_queue_obj->in_pkt_desc;
@@ -2158,8 +2325,7 @@ static int tm_process_input_work_queue(tm_system_t *tm_system,
for (cnt = 1; cnt <= pkts_to_process; cnt++) {
rc = input_work_queue_remove(input_work_queue, &work_item);
if (rc < 0) {
- ODP_DBG("%s input_work_queue_remove() failed\n",
- __func__);
+ _ODP_DBG("%s input_work_queue_remove() failed\n", __func__);
return rc;
}
@@ -2176,9 +2342,8 @@ static int tm_process_input_work_queue(tm_system_t *tm_system,
/* If the tm_queue_obj already has a pkt to work with,
* then just add this new pkt to the associated
* _odp_int_pkt_queue. */
- (void)_odp_pkt_queue_append(
- tm_system->_odp_int_queue_pool,
- tm_queue_obj->_odp_int_pkt_queue, pkt);
+ (void)_odp_pkt_queue_append(tm_system->_odp_int_queue_pool,
+ tm_queue_obj->_odp_int_pkt_queue, pkt);
tm_queue_obj->pkts_enqueued_cnt++;
} else {
/* If the tm_queue_obj doesn't have a pkt to work
@@ -2229,7 +2394,7 @@ static int tm_process_expired_timers(tm_system_t *tm_system,
if (tm_queue_obj->timer_cancels_outstanding != 0)
tm_queue_obj->timer_cancels_outstanding--;
else
- ODP_DBG("%s bad timer return\n", __func__);
+ _ODP_DBG("%s bad timer return\n", __func__);
return work_done;
}
@@ -2250,31 +2415,24 @@ static int tm_process_expired_timers(tm_system_t *tm_system,
return work_done;
}
-static volatile uint64_t busy_wait_counter;
-
-static odp_bool_t main_loop_running;
-static odp_atomic_u64_t atomic_request_cnt;
-static odp_atomic_u64_t currently_serving_cnt;
-static odp_atomic_u64_t atomic_done_cnt;
-
static void busy_wait(uint32_t iterations)
{
uint32_t cnt;
for (cnt = 1; cnt <= iterations; cnt++)
- busy_wait_counter++;
+ tm_glb->busy_wait_counter++;
}
static void signal_request(void)
{
- uint64_t my_request_num, serving_cnt;
+ uint64_t request_num, serving;
- my_request_num = odp_atomic_fetch_inc_u64(&atomic_request_cnt) + 1;
+ request_num = odp_atomic_fetch_inc_u64(&tm_glb->atomic_request_cnt) + 1;
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
- while (serving_cnt != my_request_num) {
+ serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
+ while (serving != request_num) {
busy_wait(100);
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
+ serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
}
}
@@ -2282,26 +2440,26 @@ static void check_for_request(void)
{
uint64_t request_num, serving_cnt, done_cnt;
- request_num = odp_atomic_load_u64(&atomic_request_cnt);
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
+ request_num = odp_atomic_load_u64(&tm_glb->atomic_request_cnt);
+ serving_cnt = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
if (serving_cnt == request_num)
return;
/* Signal the other requesting thread to proceed and then
* wait for their done indication */
- odp_atomic_inc_u64(&currently_serving_cnt);
+ odp_atomic_inc_u64(&tm_glb->currently_serving_cnt);
busy_wait(100);
- done_cnt = odp_atomic_load_u64(&atomic_done_cnt);
+ done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt);
while (done_cnt != request_num) {
busy_wait(100);
- done_cnt = odp_atomic_load_u64(&atomic_done_cnt);
+ done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt);
}
}
static void signal_request_done(void)
{
- odp_atomic_inc_u64(&atomic_done_cnt);
+ odp_atomic_inc_u64(&tm_glb->atomic_done_cnt);
}
static int thread_affinity_get(odp_cpumask_t *odp_cpu_mask)
@@ -2313,8 +2471,7 @@ static int thread_affinity_get(odp_cpumask_t *odp_cpu_mask)
CPU_ZERO(&linux_cpu_set);
rc = sched_getaffinity(0, sizeof(cpu_set_t), &linux_cpu_set);
if (rc != 0) {
- ODP_DBG("%s sched_getaffinity failed with rc=%d\n",
- __func__, rc);
+ _ODP_DBG("%s sched_getaffinity failed with rc=%d\n", __func__, rc);
return -1;
}
@@ -2336,18 +2493,18 @@ static void *tm_system_thread(void *arg)
uint32_t destroying, work_queue_cnt, timer_cnt;
int rc;
- rc = odp_init_local((odp_instance_t)odp_global_data.main_pid,
+ rc = odp_init_local((odp_instance_t)odp_global_ro.main_pid,
ODP_THREAD_WORKER);
- ODP_ASSERT(rc == 0);
+ _ODP_ASSERT(rc == 0);
tm_group = arg;
tm_system = tm_group->first_tm_system;
_odp_int_timer_wheel = tm_system->_odp_int_timer_wheel;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
/* Wait here until we have seen the first enqueue operation. */
odp_barrier_wait(&tm_group->tm_group_barrier);
- main_loop_running = true;
+ tm_glb->main_loop_running = true;
destroying = odp_atomic_load_u64(&tm_system->destroying);
@@ -2397,11 +2554,13 @@ static void *tm_system_thread(void *arg)
/* Advance to the next tm_system in the tm_system_group. */
tm_system = tm_system->next;
_odp_int_timer_wheel = tm_system->_odp_int_timer_wheel;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
}
odp_barrier_wait(&tm_system->tm_system_destroy_barrier);
- odp_term_local();
+ if (odp_term_local() < 0)
+ _ODP_ERR("Term local failed\n");
+
return NULL;
}
@@ -2416,6 +2575,8 @@ odp_bool_t odp_tm_is_idle(odp_tm_t odp_tm)
void odp_tm_requirements_init(odp_tm_requirements_t *requirements)
{
memset(requirements, 0, sizeof(odp_tm_requirements_t));
+
+ requirements->pkt_prio_mode = ODP_TM_PKT_PRIO_MODE_PRESERVE;
}
void odp_tm_egress_init(odp_tm_egress_t *egress)
@@ -2423,21 +2584,8 @@ void odp_tm_egress_init(odp_tm_egress_t *egress)
memset(egress, 0, sizeof(odp_tm_egress_t));
}
-static tm_node_obj_t *create_dummy_root_node(void)
-{
- tm_node_obj_t *tm_node_obj;
-
- tm_node_obj = malloc(sizeof(tm_node_obj_t));
- if (!tm_node_obj)
- return NULL;
-
- memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
- tm_node_obj->is_root_node = true;
- return tm_node_obj;
-}
-
-int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
- uint32_t capabilities_size)
+static int tm_capabilities(odp_tm_capabilities_t capabilities[],
+ uint32_t capabilities_size)
{
odp_tm_level_capabilities_t *per_level_cap;
odp_tm_capabilities_t *cap_ptr;
@@ -2450,6 +2598,9 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr = &capabilities[0];
memset(cap_ptr, 0, sizeof(odp_tm_capabilities_t));
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS)
+ return 1;
+
cap_ptr->max_tm_queues = ODP_TM_MAX_TM_QUEUES;
cap_ptr->max_levels = ODP_TM_MAX_LEVELS;
cap_ptr->tm_queue_shaper_supported = true;
@@ -2460,6 +2611,24 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
cap_ptr->ecn_marking_supported = true;
cap_ptr->drop_prec_marking_supported = true;
+ cap_ptr->tm_queue_threshold.byte = true;
+ cap_ptr->tm_queue_threshold.packet = true;
+ cap_ptr->tm_queue_threshold.byte_and_packet = true;
+
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
+
+ cap_ptr->dynamic_topology_update = true;
+ cap_ptr->dynamic_shaper_update = true;
+ cap_ptr->dynamic_sched_update = true;
+ cap_ptr->dynamic_wred_update = true;
+ cap_ptr->dynamic_threshold_update = true;
+
+ /* We only support pkt priority mode preserve */
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] = true;
@@ -2471,17 +2640,55 @@ int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
per_level_cap->max_priority = ODP_TM_MAX_PRIORITIES - 1;
per_level_cap->min_weight = ODP_TM_MIN_SCHED_WEIGHT;
per_level_cap->max_weight = ODP_TM_MAX_SCHED_WEIGHT;
+ per_level_cap->min_burst = 0;
+ per_level_cap->max_burst = UINT32_MAX;
+ per_level_cap->min_rate = TM_MIN_SHAPER_BW;
+ per_level_cap->max_rate = TM_MAX_SHAPER_BW;
per_level_cap->tm_node_shaper_supported = true;
per_level_cap->tm_node_wred_supported = true;
per_level_cap->tm_node_dual_slope_supported = true;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+
+ per_level_cap->tm_node_threshold.byte = true;
+ per_level_cap->tm_node_threshold.packet = true;
+ per_level_cap->tm_node_threshold.byte_and_packet = true;
}
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
+
return 1;
}
+int odp_tm_egress_capabilities(odp_tm_capabilities_t *capabilities,
+ const odp_tm_egress_t *egress)
+{
+ pktio_entry_t *entry;
+ int ret;
+
+ memset(capabilities, 0, sizeof(odp_tm_capabilities_t));
+ if (egress->egress_kind == ODP_TM_EGRESS_PKT_IO) {
+ entry = get_pktio_entry(egress->pktio);
+ if (entry == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n",
+ (uintptr_t)egress->pktio);
+ return -1;
+ }
+
+ /* Report not capable if pktout mode is not TM */
+ if (entry->param.out_mode != ODP_PKTOUT_MODE_TM)
+ return 0;
+ }
+
+ ret = tm_capabilities(capabilities, 1);
+ if (ret <= 0)
+ return -1;
+ return 0;
+}
+
static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_requirements_t *req_ptr)
{
@@ -2489,19 +2696,20 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
odp_tm_level_capabilities_t *per_level_cap;
odp_packet_color_t color;
odp_bool_t shaper_supported, wred_supported;
- odp_bool_t dual_slope;
+ odp_bool_t dual_slope, threshold;
uint32_t num_levels, level_idx, max_nodes;
uint32_t max_queues, max_fanin;
- uint8_t max_priority, min_weight, max_weight;
+ uint32_t min_weight, max_weight;
+ uint8_t max_priority;
- num_levels = MAX(MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
+ num_levels = _ODP_MAX(_ODP_MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
memset(cap_ptr, 0, sizeof(odp_tm_capabilities_t));
- max_queues = MIN(req_ptr->max_tm_queues,
- ODP_TM_MAX_NUM_TM_NODES);
+ max_queues = _ODP_MIN(req_ptr->max_tm_queues, (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
shaper_supported = req_ptr->tm_queue_shaper_needed;
wred_supported = req_ptr->tm_queue_wred_needed;
dual_slope = req_ptr->tm_queue_dual_slope_needed;
+ threshold = req_ptr->tm_queue_threshold_needed;
cap_ptr->max_tm_queues = max_queues;
cap_ptr->max_levels = num_levels;
@@ -2513,6 +2721,23 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
cap_ptr->drop_prec_marking_supported =
req_ptr->drop_prec_marking_needed;
+ cap_ptr->tm_queue_threshold.byte = threshold;
+ cap_ptr->tm_queue_threshold.packet = threshold;
+ cap_ptr->tm_queue_threshold.byte_and_packet = threshold;
+
+ cap_ptr->tm_queue_query_flags = (ODP_TM_QUERY_PKT_CNT |
+ ODP_TM_QUERY_BYTE_CNT |
+ ODP_TM_QUERY_THRESHOLDS);
+ cap_ptr->max_schedulers_per_node = ODP_TM_MAX_PRIORITIES;
+
+ cap_ptr->dynamic_topology_update = true;
+ cap_ptr->dynamic_shaper_update = true;
+ cap_ptr->dynamic_sched_update = true;
+ cap_ptr->dynamic_wred_update = true;
+ cap_ptr->dynamic_threshold_update = true;
+
+ cap_ptr->pkt_prio_modes[ODP_TM_PKT_PRIO_MODE_PRESERVE] = true;
+
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
cap_ptr->marking_colors_supported[color] =
req_ptr->marking_colors_needed[color];
@@ -2521,31 +2746,45 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
per_level_cap = &cap_ptr->per_level[level_idx];
per_level_req = &req_ptr->per_level[level_idx];
- max_nodes = MIN(per_level_req->max_num_tm_nodes,
- ODP_TM_MAX_NUM_TM_NODES);
- max_fanin = MIN(per_level_req->max_fanin_per_node, 1024);
- max_priority = MIN(per_level_req->max_priority,
- ODP_TM_MAX_PRIORITIES - 1);
- min_weight = MAX(per_level_req->min_weight,
- ODP_TM_MIN_SCHED_WEIGHT);
- max_weight = MIN(per_level_req->max_weight,
- ODP_TM_MAX_SCHED_WEIGHT);
+ max_nodes = _ODP_MIN(per_level_req->max_num_tm_nodes,
+ (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
+ max_fanin = _ODP_MIN(per_level_req->max_fanin_per_node,
+ UINT32_C(1024));
+ max_priority = _ODP_MIN(per_level_req->max_priority,
+ ODP_TM_MAX_PRIORITIES - 1);
+ min_weight = _ODP_MAX(per_level_req->min_weight,
+ ODP_TM_MIN_SCHED_WEIGHT);
+ max_weight = _ODP_MIN(per_level_req->max_weight,
+ ODP_TM_MAX_SCHED_WEIGHT);
shaper_supported = per_level_req->tm_node_shaper_needed;
wred_supported = per_level_req->tm_node_wred_needed;
dual_slope = per_level_req->tm_node_dual_slope_needed;
+ threshold = per_level_req->tm_node_threshold_needed;
per_level_cap->max_num_tm_nodes = max_nodes;
per_level_cap->max_fanin_per_node = max_fanin;
per_level_cap->max_priority = max_priority;
per_level_cap->min_weight = min_weight;
per_level_cap->max_weight = max_weight;
+ per_level_cap->min_burst = 0;
+ per_level_cap->max_burst = UINT32_MAX;
+ per_level_cap->min_rate = TM_MIN_SHAPER_BW;
+ per_level_cap->max_rate = TM_MAX_SHAPER_BW;
per_level_cap->tm_node_shaper_supported = shaper_supported;
per_level_cap->tm_node_wred_supported = wred_supported;
per_level_cap->tm_node_dual_slope_supported = dual_slope;
per_level_cap->fair_queuing_supported = true;
per_level_cap->weights_supported = true;
+
+ per_level_cap->tm_node_threshold.byte = threshold;
+ per_level_cap->tm_node_threshold.packet = threshold;
+ per_level_cap->tm_node_threshold.byte_and_packet = threshold;
}
+
+ cap_ptr->queue_stats.counter.discards = 1;
+ cap_ptr->queue_stats.counter.errors = 1;
+ cap_ptr->queue_stats.counter.packets = 1;
}
static int affinitize_main_thread(void)
@@ -2563,7 +2802,7 @@ static int affinitize_main_thread(void)
* just record this value and return. */
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count == 1) {
- g_main_thread_cpu = odp_cpumask_first(&odp_cpu_mask);
+ tm_glb->main_thread_cpu = odp_cpumask_first(&odp_cpu_mask);
return 0;
} else if (cpu_count == 0) {
return -1;
@@ -2575,10 +2814,9 @@ static int affinitize_main_thread(void)
CPU_SET(cpu_num, &linux_cpu_set);
rc = sched_setaffinity(0, sizeof(cpu_set_t), &linux_cpu_set);
if (rc == 0)
- g_main_thread_cpu = cpu_num;
+ tm_glb->main_thread_cpu = cpu_num;
else
- ODP_DBG("%s sched_setaffinity failed with rc=%d\n",
- __func__, rc);
+ _ODP_DBG("%s sched_setaffinity failed with rc=%d\n", __func__, rc);
return rc;
}
@@ -2588,32 +2826,32 @@ static uint32_t tm_thread_cpu_select(void)
int cpu_count, cpu;
odp_cpumask_default_worker(&odp_cpu_mask, 0);
- if ((g_main_thread_cpu != -1) &&
- odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu))
- odp_cpumask_clr(&odp_cpu_mask, g_main_thread_cpu);
+ if ((tm_glb->main_thread_cpu != -1) &&
+ odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu))
+ odp_cpumask_clr(&odp_cpu_mask, tm_glb->main_thread_cpu);
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count < 1) {
odp_cpumask_all_available(&odp_cpu_mask);
- if ((g_main_thread_cpu != -1) &&
- odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu))
+ if ((tm_glb->main_thread_cpu != -1) &&
+ odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu))
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count < 1)
odp_cpumask_all_available(&odp_cpu_mask);
}
- if (g_tm_cpu_num == 0) {
+ if (tm_glb->cpu_num == 0) {
cpu = odp_cpumask_first(&odp_cpu_mask);
} else {
- cpu = odp_cpumask_next(&odp_cpu_mask, g_tm_cpu_num);
+ cpu = odp_cpumask_next(&odp_cpu_mask, tm_glb->cpu_num);
if (cpu == -1) {
- g_tm_cpu_num = 0;
+ tm_glb->cpu_num = 0;
cpu = odp_cpumask_first(&odp_cpu_mask);
}
}
- g_tm_cpu_num++;
+ tm_glb->cpu_num++;
return cpu;
}
@@ -2633,68 +2871,28 @@ static int tm_thread_create(tm_system_group_t *tm_group)
rc = pthread_create(&tm_group->thread, &tm_group->attr,
tm_system_thread, tm_group);
if (rc != 0)
- ODP_DBG("Failed to start thread on cpu num=%u\n", cpu_num);
-
+ _ODP_ERR("Failed to start TM thread on CPU #%u: %d\n", cpu_num, rc);
return rc;
}
-static _odp_tm_group_t _odp_tm_group_create(const char *name ODP_UNUSED)
-{
- tm_system_group_t *tm_group, *first_tm_group, *second_tm_group;
-
- tm_group = malloc(sizeof(tm_system_group_t));
- memset(tm_group, 0, sizeof(tm_system_group_t));
- odp_barrier_init(&tm_group->tm_group_barrier, 2);
-
- /* Add this group to the tm_group_list linked list. */
- if (tm_group_list == NULL) {
- tm_group_list = tm_group;
- tm_group->next = tm_group;
- tm_group->prev = tm_group;
- } else {
- first_tm_group = tm_group_list;
- second_tm_group = first_tm_group->next;
- first_tm_group->next = tm_group;
- second_tm_group->prev = tm_group;
- tm_group->next = second_tm_group;
- tm_group->prev = first_tm_group;
- }
-
- return MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
-}
-
static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group)
{
- tm_system_group_t *tm_group, *prev_tm_group, *next_tm_group;
+ tm_system_group_t *tm_group;
int rc;
tm_group = GET_TM_GROUP(odp_tm_group);
/* Wait for the thread to exit. */
- ODP_ASSERT(tm_group->num_tm_systems <= 1);
+ _ODP_ASSERT(tm_group->num_tm_systems <= 1);
rc = pthread_join(tm_group->thread, NULL);
- ODP_ASSERT(rc == 0);
+ _ODP_ASSERT(rc == 0);
pthread_attr_destroy(&tm_group->attr);
- if (g_tm_cpu_num > 0)
- g_tm_cpu_num--;
-
- /* Remove this group from the tm_group_list linked list. Special case
- * when this is the last tm_group in the linked list. */
- prev_tm_group = tm_group->prev;
- next_tm_group = tm_group->next;
- if (prev_tm_group == next_tm_group) {
- ODP_ASSERT(tm_group_list == tm_group);
- tm_group_list = NULL;
- } else {
- prev_tm_group->next = next_tm_group;
- next_tm_group->prev = prev_tm_group;
- if (tm_group_list == tm_group)
- tm_group_list = next_tm_group;
- }
+ if (tm_glb->cpu_num > 0)
+ tm_glb->cpu_num--;
- tm_group->prev = NULL;
- tm_group->next = NULL;
- free(tm_group);
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ tm_group->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
}
static int _odp_tm_group_add(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm)
@@ -2766,12 +2964,21 @@ static int _odp_tm_group_remove(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm)
return 0;
}
+static void _odp_tm_init_tm_group(tm_system_group_t *tm_group)
+{
+ memset(tm_group, 0, sizeof(tm_system_group_t));
+
+ tm_group->status = TM_STATUS_RESERVED;
+ odp_barrier_init(&tm_group->tm_group_barrier, 2);
+}
+
static int tm_group_attach(odp_tm_t odp_tm)
{
tm_system_group_t *tm_group, *min_tm_group;
_odp_tm_group_t odp_tm_group;
odp_cpumask_t all_cpus, worker_cpus;
uint32_t total_cpus, avail_cpus;
+ uint32_t i;
/* If this platform has a small number of cpu's then allocate one
* tm_group and assign all tm_system's to this tm_group. Otherwise in
@@ -2785,34 +2992,37 @@ static int tm_group_attach(odp_tm_t odp_tm)
avail_cpus = odp_cpumask_count(&worker_cpus);
if (total_cpus < 24) {
- tm_group = tm_group_list;
- odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
- if (tm_group == NULL)
- odp_tm_group = _odp_tm_group_create("");
+ tm_group = &tm_glb->system_group.group[0];
- _odp_tm_group_add(odp_tm_group, odp_tm);
- return 0;
- }
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ if (tm_group->status == TM_STATUS_FREE)
+ _odp_tm_init_tm_group(tm_group);
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
- /* Manycore case. */
- if ((tm_group_list == NULL) || (avail_cpus > 1)) {
- odp_tm_group = _odp_tm_group_create("");
+ odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
_odp_tm_group_add(odp_tm_group, odp_tm);
return 0;
}
/* Pick a tm_group according to the smallest number of tm_systems. */
- tm_group = tm_group_list;
min_tm_group = NULL;
- while (tm_group != NULL) {
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ for (i = 0; i < ODP_TM_MAX_NUM_SYSTEMS && i < avail_cpus; i++) {
+ tm_group = &tm_glb->system_group.group[i];
+
+ if (tm_group->status == TM_STATUS_FREE) {
+ _odp_tm_init_tm_group(tm_group);
+ min_tm_group = tm_group;
+ break;
+ }
+
if (min_tm_group == NULL)
min_tm_group = tm_group;
else if (tm_group->num_tm_systems <
min_tm_group->num_tm_systems)
min_tm_group = tm_group;
-
- tm_group = tm_group->next;
}
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
if (min_tm_group == NULL)
return -1;
@@ -2831,22 +3041,42 @@ odp_tm_t odp_tm_create(const char *name,
odp_bool_t create_fail;
odp_tm_t odp_tm;
odp_pktout_queue_t pktout;
- uint32_t malloc_len, max_num_queues, max_queued_pkts, max_timers;
+ uint32_t max_num_queues, max_queued_pkts, max_timers;
uint32_t max_tm_queues, max_sorted_lists;
int rc;
+ if (odp_global_ro.disable.traffic_mngr) {
+ _ODP_ERR("TM has been disabled\n");
+ return ODP_TM_INVALID;
+ }
+
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS) {
+ _ODP_ERR("TM is not supported in process mode\n");
+ return ODP_TM_INVALID;
+ }
+
+ /* We only support global pkt priority mode */
+ if (requirements->pkt_prio_mode != ODP_TM_PKT_PRIO_MODE_PRESERVE) {
+ _ODP_ERR("Unsupported Packet priority mode\n");
+ return ODP_TM_INVALID;
+ }
+ odp_ticketlock_lock(&tm_glb->create_lock);
+
/* If we are using pktio output (usual case) get the first associated
* pktout_queue for this pktio and fail if there isn't one.
*/
- if (egress->egress_kind == ODP_TM_EGRESS_PKT_IO &&
- odp_pktout_queue(egress->pktio, &pktout, 1) != 1)
- return ODP_TM_INVALID;
+ if (egress->egress_kind == ODP_TM_EGRESS_PKT_IO) {
+ rc = _odp_pktio_pktout_tm_config(egress->pktio, &pktout, false);
+ if (rc) {
+ odp_ticketlock_unlock(&tm_glb->create_lock);
+ return ODP_TM_INVALID;
+ }
+ }
/* Allocate tm_system_t record. */
- odp_ticketlock_lock(&tm_create_lock);
tm_system = tm_system_alloc();
if (!tm_system) {
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
@@ -2854,7 +3084,7 @@ odp_tm_t odp_tm_create(const char *name,
name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_HANDLE, odp_tm);
if (name_tbl_id == ODP_INVALID_NAME) {
tm_system_free(tm_system);
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
@@ -2870,10 +3100,7 @@ odp_tm_t odp_tm_create(const char *name,
tm_system_capabilities_set(&tm_system->capabilities,
&tm_system->requirements);
- malloc_len = max_tm_queues * sizeof(tm_queue_obj_t *);
- tm_system->queue_num_tbl = malloc(malloc_len);
- memset(tm_system->queue_num_tbl, 0, malloc_len);
- tm_system->next_queue_num = 1;
+ tm_system->root_node.is_root_node = true;
tm_init_random_data(&tm_system->tm_random_data);
@@ -2890,34 +3117,24 @@ odp_tm_t odp_tm_create(const char *name,
odp_ticketlock_init(&tm_system->tm_system_lock);
odp_atomic_init_u64(&tm_system->destroying, 0);
- tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(
- max_sorted_lists);
+ tm_system->_odp_int_sorted_pool = _odp_sorted_pool_create(max_sorted_lists);
create_fail |= tm_system->_odp_int_sorted_pool
== _ODP_INT_SORTED_POOL_INVALID;
if (create_fail == 0) {
- tm_system->_odp_int_queue_pool = _odp_queue_pool_create(
- max_num_queues, max_queued_pkts);
+ tm_system->_odp_int_queue_pool = _odp_queue_pool_create(max_num_queues,
+ max_queued_pkts);
create_fail |= tm_system->_odp_int_queue_pool
== _ODP_INT_QUEUE_POOL_INVALID;
}
if (create_fail == 0) {
- tm_system->_odp_int_timer_wheel = _odp_timer_wheel_create(
- max_timers, tm_system);
+ tm_system->_odp_int_timer_wheel = _odp_timer_wheel_create(max_timers, tm_system);
create_fail |= tm_system->_odp_int_timer_wheel
== _ODP_INT_TIMER_WHEEL_INVALID;
}
- if (create_fail == 0) {
- tm_system->root_node = create_dummy_root_node();
- create_fail |= tm_system->root_node == NULL;
- }
-
- if (create_fail == 0) {
- tm_system->input_work_queue = input_work_queue_create();
- create_fail |= !tm_system->input_work_queue;
- }
+ input_work_queue_init(&tm_system->input_work_queue);
if (create_fail == 0) {
/* Pass any odp_groups or hints to tm_group_attach here. */
@@ -2928,30 +3145,22 @@ odp_tm_t odp_tm_create(const char *name,
if (create_fail) {
_odp_int_name_tbl_delete(name_tbl_id);
- if (tm_system->input_work_queue)
- input_work_queue_destroy(tm_system->input_work_queue);
- if (tm_system->_odp_int_sorted_pool
- != _ODP_INT_SORTED_POOL_INVALID)
- _odp_sorted_pool_destroy(
- tm_system->_odp_int_sorted_pool);
+ if (tm_system->_odp_int_sorted_pool != _ODP_INT_SORTED_POOL_INVALID)
+ _odp_sorted_pool_destroy(tm_system->_odp_int_sorted_pool);
- if (tm_system->_odp_int_queue_pool !=
- _ODP_INT_QUEUE_POOL_INVALID)
- _odp_queue_pool_destroy(
- tm_system->_odp_int_queue_pool);
+ if (tm_system->_odp_int_queue_pool != _ODP_INT_QUEUE_POOL_INVALID)
+ _odp_queue_pool_destroy(tm_system->_odp_int_queue_pool);
- if (tm_system->_odp_int_timer_wheel
- != _ODP_INT_TIMER_WHEEL_INVALID)
- _odp_timer_wheel_destroy(
- tm_system->_odp_int_timer_wheel);
+ if (tm_system->_odp_int_timer_wheel != _ODP_INT_TIMER_WHEEL_INVALID)
+ _odp_timer_wheel_destroy(tm_system->_odp_int_timer_wheel);
tm_system_free(tm_system);
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return odp_tm;
}
@@ -2979,6 +3188,22 @@ int odp_tm_capability(odp_tm_t odp_tm, odp_tm_capabilities_t *capabilities)
return 0;
}
+int odp_tm_start(odp_tm_t odp_tm)
+{
+ (void)odp_tm;
+
+ /* Nothing more to do after TM create */
+ return 0;
+}
+
+int odp_tm_stop(odp_tm_t odp_tm)
+{
+ (void)odp_tm;
+
+ /* Nothing more to do for topology changes */
+ return 0;
+}
+
int odp_tm_destroy(odp_tm_t odp_tm)
{
tm_system_t *tm_system;
@@ -2997,11 +3222,12 @@ int odp_tm_destroy(odp_tm_t odp_tm)
* allocated by this group. */
_odp_tm_group_remove(tm_system->odp_tm_group, odp_tm);
- input_work_queue_destroy(tm_system->input_work_queue);
+ input_work_queue_destroy(&tm_system->input_work_queue);
_odp_sorted_pool_destroy(tm_system->_odp_int_sorted_pool);
_odp_queue_pool_destroy(tm_system->_odp_int_queue_pool);
_odp_timer_wheel_destroy(tm_system->_odp_int_timer_wheel);
+ _odp_int_name_tbl_delete(tm_system->name_tbl_id);
tm_system_free(tm_system);
return 0;
}
@@ -3135,14 +3361,17 @@ void odp_tm_shaper_params_init(odp_tm_shaper_params_t *params)
}
odp_tm_shaper_t odp_tm_shaper_create(const char *name,
- odp_tm_shaper_params_t *params)
+ const odp_tm_shaper_params_t *params)
{
tm_shaper_params_t *profile_obj;
odp_tm_shaper_t shaper_handle;
_odp_int_name_t name_tbl_id;
+ /* We don't support shaper in packet mode */
+ if (params->packet_mode)
+ return ODP_TM_INVALID;
+
profile_obj = tm_common_profile_create(name, TM_SHAPER_PROFILE,
- sizeof(tm_shaper_params_t),
&shaper_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3168,7 +3397,6 @@ int odp_tm_shaper_destroy(odp_tm_shaper_t shaper_profile)
return -1;
return tm_common_profile_destroy(shaper_profile,
- sizeof(tm_shaper_params_t),
profile_obj->name_tbl_id);
}
@@ -3189,7 +3417,7 @@ int odp_tm_shaper_params_read(odp_tm_shaper_t shaper_profile,
}
int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile,
- odp_tm_shaper_params_t *params)
+ const odp_tm_shaper_params_t *params)
{
tm_shaper_params_t *profile_obj;
@@ -3200,7 +3428,7 @@ int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_shaper_params_cvt_to(params, profile_obj);
return 0;
}
@@ -3226,19 +3454,19 @@ void odp_tm_sched_params_init(odp_tm_sched_params_t *params)
memset(params, 0, sizeof(odp_tm_sched_params_t));
}
-static void tm_sched_params_cvt_to(odp_tm_sched_params_t *odp_sched_params,
+static void tm_sched_params_cvt_to(const odp_tm_sched_params_t *sched_params,
tm_sched_params_t *tm_sched_params)
{
odp_tm_sched_mode_t sched_mode;
uint32_t priority, weight, inv_weight;
for (priority = 0; priority < ODP_TM_MAX_PRIORITIES; priority++) {
- sched_mode = odp_sched_params->sched_modes[priority];
- weight = odp_sched_params->sched_weights[priority];
+ sched_mode = sched_params->sched_modes[priority];
+ weight = sched_params->sched_weights[priority];
if (weight == 0)
inv_weight = 0;
else
- inv_weight = 0x10000 / weight;
+ inv_weight = 0xFFFF / weight;
tm_sched_params->sched_modes[priority] = sched_mode;
tm_sched_params->inverted_weights[priority] = inv_weight;
@@ -3254,7 +3482,7 @@ static void tm_sched_params_cvt_from(tm_sched_params_t *tm_sched_params,
for (priority = 0; priority < ODP_TM_MAX_PRIORITIES; priority++) {
sched_mode = tm_sched_params->sched_modes[priority];
inv_weight = tm_sched_params->inverted_weights[priority];
- weight = 0x10000 / inv_weight;
+ weight = 0xFFFF / inv_weight;
odp_sched_params->sched_modes[priority] = sched_mode;
odp_sched_params->sched_weights[priority] = weight;
@@ -3262,14 +3490,13 @@ static void tm_sched_params_cvt_from(tm_sched_params_t *tm_sched_params,
}
odp_tm_sched_t odp_tm_sched_create(const char *name,
- odp_tm_sched_params_t *params)
+ const odp_tm_sched_params_t *params)
{
tm_sched_params_t *profile_obj;
_odp_int_name_t name_tbl_id;
odp_tm_sched_t sched_handle;
profile_obj = tm_common_profile_create(name, TM_SCHED_PROFILE,
- sizeof(tm_sched_params_t),
&sched_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3295,7 +3522,6 @@ int odp_tm_sched_destroy(odp_tm_sched_t sched_profile)
return -1;
return tm_common_profile_destroy(sched_profile,
- sizeof(tm_sched_params_t),
profile_obj->name_tbl_id);
}
@@ -3316,7 +3542,7 @@ int odp_tm_sched_params_read(odp_tm_sched_t sched_profile,
}
int odp_tm_sched_params_update(odp_tm_sched_t sched_profile,
- odp_tm_sched_params_t *params)
+ const odp_tm_sched_params_t *params)
{
tm_sched_params_t *profile_obj;
@@ -3327,7 +3553,7 @@ int odp_tm_sched_params_update(odp_tm_sched_t sched_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_sched_params_cvt_to(params, profile_obj);
return 0;
}
@@ -3354,14 +3580,14 @@ void odp_tm_threshold_params_init(odp_tm_threshold_params_t *params)
}
odp_tm_threshold_t odp_tm_threshold_create(const char *name,
- odp_tm_threshold_params_t *params)
+ const odp_tm_threshold_params_t
+ *params)
{
tm_queue_thresholds_t *profile_obj;
odp_tm_threshold_t threshold_handle;
_odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_THRESHOLD_PROFILE,
- sizeof(tm_queue_thresholds_t),
&threshold_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3390,7 +3616,6 @@ int odp_tm_threshold_destroy(odp_tm_threshold_t threshold_profile)
return -1;
return tm_common_profile_destroy(threshold_profile,
- sizeof(odp_tm_threshold_params_t),
threshold_params->name_tbl_id);
}
@@ -3415,7 +3640,7 @@ int odp_tm_thresholds_params_read(odp_tm_threshold_t threshold_profile,
}
int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile,
- odp_tm_threshold_params_t *params)
+ const odp_tm_threshold_params_t *params)
{
tm_queue_thresholds_t *profile_obj;
@@ -3427,7 +3652,7 @@ int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
profile_obj->max_pkts =
params->enable_max_pkts ? params->max_pkts : 0;
profile_obj->max_bytes =
@@ -3459,15 +3684,15 @@ void odp_tm_wred_params_init(odp_tm_wred_params_t *params)
memset(params, 0, sizeof(odp_tm_wred_params_t));
}
-static void tm_wred_params_cvt_to(odp_tm_wred_params_t *odp_tm_wred_params,
+static void tm_wred_params_cvt_to(const odp_tm_wred_params_t *params,
tm_wred_params_t *wred_params)
{
- wred_params->min_threshold = odp_tm_wred_params->min_threshold;
- wred_params->med_threshold = odp_tm_wred_params->med_threshold;
- wred_params->med_drop_prob = odp_tm_wred_params->med_drop_prob;
- wred_params->max_drop_prob = odp_tm_wred_params->max_drop_prob;
- wred_params->enable_wred = odp_tm_wred_params->enable_wred;
- wred_params->use_byte_fullness = odp_tm_wred_params->use_byte_fullness;
+ wred_params->min_threshold = params->min_threshold;
+ wred_params->med_threshold = params->med_threshold;
+ wred_params->med_drop_prob = params->med_drop_prob;
+ wred_params->max_drop_prob = params->max_drop_prob;
+ wred_params->enable_wred = params->enable_wred;
+ wred_params->use_byte_fullness = params->use_byte_fullness;
}
static void tm_wred_params_cvt_from(tm_wred_params_t *wred_params,
@@ -3481,14 +3706,14 @@ static void tm_wred_params_cvt_from(tm_wred_params_t *wred_params,
odp_tm_wred_params->use_byte_fullness = wred_params->use_byte_fullness;
}
-odp_tm_wred_t odp_tm_wred_create(const char *name, odp_tm_wred_params_t *params)
+odp_tm_wred_t odp_tm_wred_create(const char *name,
+ const odp_tm_wred_params_t *params)
{
tm_wred_params_t *profile_obj;
odp_tm_wred_t wred_handle;
_odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_WRED_PROFILE,
- sizeof(tm_wred_params_t),
&wred_handle, &name_tbl_id);
if (!profile_obj)
@@ -3515,8 +3740,7 @@ int odp_tm_wred_destroy(odp_tm_wred_t wred_profile)
return -1;
return tm_common_profile_destroy(wred_profile,
- sizeof(tm_wred_params_t),
- ODP_INVALID_NAME);
+ wred_params->name_tbl_id);
}
int odp_tm_wred_params_read(odp_tm_wred_t wred_profile,
@@ -3536,7 +3760,7 @@ int odp_tm_wred_params_read(odp_tm_wred_t wred_profile,
}
int odp_tm_wred_params_update(odp_tm_wred_t wred_profile,
- odp_tm_wred_params_t *params)
+ const odp_tm_wred_params_t *params)
{
tm_wred_params_t *wred_params;
@@ -3547,7 +3771,7 @@ int odp_tm_wred_params_update(odp_tm_wred_t wred_profile,
if (!wred_params)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_wred_params_cvt_to(params, wred_params);
return 0;
}
@@ -3573,77 +3797,73 @@ void odp_tm_node_params_init(odp_tm_node_params_t *params)
memset(params, 0, sizeof(odp_tm_node_params_t));
}
-odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
- const char *name,
- odp_tm_node_params_t *params)
+odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm, const char *name,
+ const odp_tm_node_params_t *params)
{
odp_tm_level_requirements_t *requirements;
_odp_int_sorted_list_t sorted_list;
tm_schedulers_obj_t *schedulers_obj;
_odp_int_name_t name_tbl_id;
tm_wred_node_t *tm_wred_node;
- tm_node_obj_t *tm_node_obj;
+ tm_node_obj_t *tm_node_obj = NULL;
odp_tm_node_t odp_tm_node;
odp_tm_wred_t wred_profile;
tm_system_t *tm_system;
- uint32_t level, num_priorities, priority, schedulers_obj_len, color;
+ uint32_t level, num_priorities, priority, color;
+ uint32_t i;
/* Allocate a tm_node_obj_t record. */
tm_system = GET_TM_SYSTEM(odp_tm);
- tm_node_obj = malloc(sizeof(tm_node_obj_t));
- if (!tm_node_obj)
- return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t));
- if (!tm_wred_node) {
- free(tm_node_obj);
- return ODP_TM_INVALID;
- }
+ odp_ticketlock_lock(&tm_glb->node_obj.lock);
- level = params->level;
- requirements = &tm_system->requirements.per_level[level];
- num_priorities = requirements->max_priority + 1;
- schedulers_obj_len = sizeof(tm_schedulers_obj_t)
- + (sizeof(tm_sched_state_t) * num_priorities);
- schedulers_obj = malloc(schedulers_obj_len);
- if (!schedulers_obj) {
- free(tm_wred_node);
- free(tm_node_obj);
- return ODP_TM_INVALID;
- }
+ for (i = 0; i < ODP_TM_MAX_NUM_TM_NODES; i++) {
+ tm_node_obj_t *cur_node_obj = tm_nobj_from_index(i);
- memset(schedulers_obj, 0, schedulers_obj_len);
- odp_tm_node = MAKE_ODP_TM_NODE(tm_node_obj);
- name_tbl_id = ODP_INVALID_NAME;
- if ((name) && (name[0] != '\0')) {
- name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE,
- odp_tm_node);
- if (name_tbl_id == ODP_INVALID_NAME) {
- free(schedulers_obj);
- free(tm_wred_node);
- free(tm_node_obj);
- return ODP_TM_INVALID;
+ if (cur_node_obj->status != TM_STATUS_FREE)
+ continue;
+
+ level = params->level;
+ requirements = &tm_system->requirements.per_level[level];
+ num_priorities = requirements->max_priority + 1;
+
+ odp_tm_node = MAKE_ODP_TM_NODE(cur_node_obj);
+ name_tbl_id = ODP_INVALID_NAME;
+ if ((name) && (name[0] != '\0')) {
+ name_tbl_id = _odp_int_name_tbl_add(name,
+ ODP_TM_NODE_HANDLE,
+ odp_tm_node);
+ if (name_tbl_id == ODP_INVALID_NAME)
+ break;
}
+ tm_node_obj = cur_node_obj;
+
+ memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
+ tm_node_obj->status = TM_STATUS_RESERVED;
+
+ break;
}
- memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
- memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
- memset(schedulers_obj, 0, schedulers_obj_len);
+ odp_ticketlock_unlock(&tm_glb->node_obj.lock);
+
+ if (!tm_node_obj)
+ return ODP_TM_INVALID;
+
tm_node_obj->user_context = params->user_context;
tm_node_obj->name_tbl_id = name_tbl_id;
tm_node_obj->max_fanin = params->max_fanin;
tm_node_obj->is_root_node = false;
tm_node_obj->level = params->level;
tm_node_obj->tm_idx = tm_system->tm_idx;
- tm_node_obj->tm_wred_node = tm_wred_node;
- tm_node_obj->schedulers_obj = schedulers_obj;
+
+ tm_wred_node = &tm_node_obj->tm_wred_node;
odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+ schedulers_obj = &tm_node_obj->schedulers_obj;
schedulers_obj->num_priorities = num_priorities;
for (priority = 0; priority < num_priorities; priority++) {
- sorted_list = _odp_sorted_list_create(
- tm_system->_odp_int_sorted_pool,
- params->max_fanin);
+ sorted_list = _odp_sorted_list_create(tm_system->_odp_int_sorted_pool,
+ params->max_fanin);
schedulers_obj->sched_states[priority].sorted_list =
sorted_list;
}
@@ -3666,7 +3886,7 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
tm_node_obj->magic_num = TM_NODE_MAGIC_NUM;
tm_node_obj->shaper_obj.enclosing_entity = tm_node_obj;
tm_node_obj->shaper_obj.in_tm_node_obj = 1;
- tm_node_obj->schedulers_obj->enclosing_entity = tm_node_obj;
+ tm_node_obj->schedulers_obj.enclosing_entity = tm_node_obj;
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return odp_tm_node;
@@ -3691,7 +3911,7 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (!tm_node_obj)
return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_node_obj->tm_idx];
if (!tm_system)
return -1;
@@ -3709,16 +3929,14 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (shaper_obj->shaper_params != NULL)
return -1;
- tm_wred_node = tm_node_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- if (tm_wred_node->threshold_params != NULL)
- return -1;
+ tm_wred_node = &tm_node_obj->tm_wred_node;
+ if (tm_wred_node->threshold_params != NULL)
+ return -1;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- return -1;
- }
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_params = tm_wred_node->wred_params[color];
+ if (wred_params != NULL)
+ return -1;
}
/* Now that all of the checks are done, time to so some freeing. */
@@ -3726,25 +3944,22 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (tm_node_obj->name_tbl_id != ODP_INVALID_NAME)
_odp_int_name_tbl_delete(tm_node_obj->name_tbl_id);
- if (tm_node_obj->tm_wred_node != NULL)
- free(tm_node_obj->tm_wred_node);
-
- schedulers_obj = tm_node_obj->schedulers_obj;
- if (schedulers_obj != NULL) {
- num_priorities = schedulers_obj->num_priorities;
- for (priority = 0; priority < num_priorities; priority++) {
- sched_state = &schedulers_obj->sched_states[priority];
- sorted_list = sched_state->sorted_list;
- sorted_pool = tm_system->_odp_int_sorted_pool;
- rc = _odp_sorted_list_destroy(sorted_pool,
- sorted_list);
- if (rc != 0)
- return rc;
- }
+ schedulers_obj = &tm_node_obj->schedulers_obj;
+ num_priorities = schedulers_obj->num_priorities;
+ for (priority = 0; priority < num_priorities; priority++) {
+ sched_state = &schedulers_obj->sched_states[priority];
+ sorted_list = sched_state->sorted_list;
+ sorted_pool = tm_system->_odp_int_sorted_pool;
+ rc = _odp_sorted_list_destroy(sorted_pool,
+ sorted_list);
+ if (rc != 0)
+ return rc;
}
- free(schedulers_obj);
- free(tm_node_obj);
+ odp_ticketlock_lock(&tm_glb->node_obj.lock);
+ tm_node_obj->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->node_obj.lock);
+
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return 0;
}
@@ -3754,19 +3969,25 @@ int odp_tm_node_shaper_config(odp_tm_node_t tm_node,
{
tm_node_obj_t *tm_node_obj;
tm_system_t *tm_system;
+ odp_bool_t sync_needed;
tm_node_obj = GET_TM_NODE_OBJ(tm_node);
if (!tm_node_obj)
return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_node_obj->tm_idx];
if (!tm_system)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ sync_needed = tm_glb->main_loop_running;
+ if (sync_needed)
+ signal_request();
tm_shaper_config_set(tm_system, shaper_profile,
&tm_node_obj->shaper_obj);
- odp_ticketlock_unlock(&tm_profile_lock);
+ if (sync_needed)
+ signal_request_done();
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3785,10 +4006,10 @@ int odp_tm_node_sched_config(odp_tm_node_t tm_node,
if (!child_tm_node_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
child_shaper_obj = &child_tm_node_obj->shaper_obj;
tm_sched_config_set(child_shaper_obj, sched_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3798,12 +4019,12 @@ int odp_tm_node_threshold_config(odp_tm_node_t tm_node,
tm_node_obj_t *tm_node_obj;
tm_node_obj = GET_TM_NODE_OBJ(tm_node);
- if ((!tm_node_obj) || (!tm_node_obj->tm_wred_node))
+ if (!tm_node_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
- tm_threshold_config_set(tm_node_obj->tm_wred_node, thresholds_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ tm_threshold_config_set(&tm_node_obj->tm_wred_node, thresholds_profile);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3820,9 +4041,9 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
if (!tm_node_obj)
return -1;
- wred_node = tm_node_obj->tm_wred_node;
+ wred_node = &tm_node_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
rc = 0;
if (pkt_color == ODP_PACKET_ALL_COLORS) {
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
@@ -3833,7 +4054,7 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
rc = -1;
}
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return rc;
}
@@ -3873,94 +4094,99 @@ int odp_tm_node_context_set(odp_tm_node_t tm_node, void *user_context)
void odp_tm_queue_params_init(odp_tm_queue_params_t *params)
{
memset(params, 0, sizeof(odp_tm_queue_params_t));
+
+ params->ordered_enqueue = true;
}
odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
- odp_tm_queue_params_t *params)
+ const odp_tm_queue_params_t *params)
{
_odp_int_pkt_queue_t _odp_int_pkt_queue;
- tm_queue_obj_t *tm_queue_obj;
- tm_wred_node_t *tm_wred_node;
- odp_tm_queue_t odp_tm_queue;
+ tm_queue_obj_t *queue_obj;
+ odp_tm_queue_t odp_tm_queue = ODP_TM_INVALID;
odp_tm_wred_t wred_profile;
tm_system_t *tm_system;
uint32_t color;
+ uint32_t i;
/* Allocate a tm_queue_obj_t record. */
tm_system = GET_TM_SYSTEM(odp_tm);
- tm_queue_obj = malloc(sizeof(tm_queue_obj_t));
- if (!tm_queue_obj)
- return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t));
- if (!tm_wred_node) {
- free(tm_queue_obj);
- return ODP_TM_INVALID;
- }
+ odp_ticketlock_lock(&tm_glb->queue_obj.lock);
- _odp_int_pkt_queue = _odp_pkt_queue_create(
- tm_system->_odp_int_queue_pool);
- if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID) {
- free(tm_wred_node);
- free(tm_queue_obj);
- return ODP_TM_INVALID;
- }
+ for (i = 0; i < ODP_TM_MAX_TM_QUEUES; i++) {
+ _odp_int_queue_pool_t int_queue_pool;
- odp_tm_queue = MAKE_ODP_TM_QUEUE(tm_queue_obj);
- memset(tm_queue_obj, 0, sizeof(tm_queue_obj_t));
- memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
- tm_queue_obj->user_context = params->user_context;
- tm_queue_obj->priority = params->priority;
- tm_queue_obj->tm_idx = tm_system->tm_idx;
- tm_queue_obj->queue_num = tm_system->next_queue_num++;
- tm_queue_obj->tm_wred_node = tm_wred_node;
- tm_queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
- tm_queue_obj->pkt = ODP_PACKET_INVALID;
- odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+ queue_obj = tm_qobj_from_index(i);
- tm_queue_obj->tm_qentry.s.type = QUEUE_TYPE_TM;
- tm_queue_obj->tm_qentry.s.enqueue = queue_tm_reenq;
- tm_queue_obj->tm_qentry.s.enqueue_multi = queue_tm_reenq_multi;
+ if (queue_obj->status != TM_STATUS_FREE)
+ continue;
- tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = tm_queue_obj;
- odp_ticketlock_lock(&tm_system->tm_system_lock);
- if (params->shaper_profile != ODP_TM_INVALID)
- tm_shaper_config_set(tm_system, params->shaper_profile,
- &tm_queue_obj->shaper_obj);
+ int_queue_pool = tm_system->_odp_int_queue_pool;
+ _odp_int_pkt_queue = _odp_pkt_queue_create(int_queue_pool);
+ if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID)
+ continue;
- if (params->threshold_profile != ODP_TM_INVALID)
- tm_threshold_config_set(tm_wred_node,
- params->threshold_profile);
+ odp_tm_queue = MAKE_ODP_TM_QUEUE(queue_obj);
+ memset(queue_obj, 0, sizeof(tm_queue_obj_t));
+ queue_obj->user_context = params->user_context;
+ queue_obj->priority = params->priority;
+ queue_obj->ordered_enqueue = params->ordered_enqueue;
+ queue_obj->tm_idx = tm_system->tm_idx;
+ queue_obj->queue_num = (uint32_t)_odp_int_pkt_queue;
+ queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
+ queue_obj->pkt = ODP_PACKET_INVALID;
+ odp_ticketlock_init(&queue_obj->tm_wred_node.tm_wred_node_lock);
+ odp_atomic_init_u64(&queue_obj->stats.discards, 0);
+ odp_atomic_init_u64(&queue_obj->stats.errors, 0);
+ odp_atomic_init_u64(&queue_obj->stats.packets, 0);
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_profile = params->wred_profile[color];
- if (wred_profile != ODP_TM_INVALID)
- tm_wred_config_set(tm_wred_node, color, wred_profile);
+ tm_system->queue_num_tbl[queue_obj->queue_num - 1] = queue_obj;
+
+ odp_ticketlock_lock(&tm_system->tm_system_lock);
+
+ if (params->shaper_profile != ODP_TM_INVALID)
+ tm_shaper_config_set(tm_system, params->shaper_profile,
+ &queue_obj->shaper_obj);
+
+ if (params->threshold_profile != ODP_TM_INVALID)
+ tm_threshold_config_set(&queue_obj->tm_wred_node,
+ params->threshold_profile);
+
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_profile = params->wred_profile[color];
+ if (wred_profile != ODP_TM_INVALID)
+ tm_wred_config_set(&queue_obj->tm_wred_node,
+ color, wred_profile);
+ }
+
+ queue_obj->magic_num = TM_QUEUE_MAGIC_NUM;
+ queue_obj->shaper_obj.enclosing_entity = queue_obj;
+ queue_obj->shaper_obj.in_tm_node_obj = 0;
+
+ odp_ticketlock_unlock(&tm_system->tm_system_lock);
+
+ queue_obj->status = TM_STATUS_RESERVED;
+ break;
}
- tm_queue_obj->magic_num = TM_QUEUE_MAGIC_NUM;
- tm_queue_obj->shaper_obj.enclosing_entity = tm_queue_obj;
- tm_queue_obj->shaper_obj.in_tm_node_obj = 0;
+ odp_ticketlock_unlock(&tm_glb->queue_obj.lock);
- odp_ticketlock_unlock(&tm_system->tm_system_lock);
return odp_tm_queue;
}
int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
{
- tm_wred_params_t *wred_params;
tm_shaper_obj_t *shaper_obj;
tm_queue_obj_t *tm_queue_obj;
- tm_wred_node_t *tm_wred_node;
tm_system_t *tm_system;
- uint32_t color;
/* First lookup tm_queue. */
tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
@@ -3971,31 +4197,16 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
(tm_queue_obj->pkt != ODP_PACKET_INVALID))
return -1;
- /* Check that there is no shaper profile, threshold profile or wred
- * profile currently associated with this tm_queue. */
- if (shaper_obj->shaper_params != NULL)
- return -1;
-
- tm_wred_node = tm_queue_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- if (tm_wred_node->threshold_params != NULL)
- return -1;
-
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- return -1;
- }
- }
-
/* Now that all of the checks are done, time to so some freeing. */
odp_ticketlock_lock(&tm_system->tm_system_lock);
tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = NULL;
- /* First delete any associated tm_wred_node and then the tm_queue_obj
- * itself */
- free(tm_queue_obj->tm_wred_node);
- free(tm_queue_obj);
+ odp_ticketlock_lock(&tm_glb->queue_obj.lock);
+ _odp_pkt_queue_destroy(tm_system->_odp_int_queue_pool,
+ tm_queue_obj->_odp_int_pkt_queue);
+ tm_queue_obj->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->queue_obj.lock);
+
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return 0;
}
@@ -4033,14 +4244,14 @@ int odp_tm_queue_shaper_config(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_shaper_config_set(tm_system, shaper_profile,
&tm_queue_obj->shaper_obj);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4060,10 +4271,10 @@ int odp_tm_queue_sched_config(odp_tm_node_t tm_node,
if (!child_tm_queue_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
child_shaper_obj = &child_tm_queue_obj->shaper_obj;
tm_sched_config_set(child_shaper_obj, sched_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4071,15 +4282,17 @@ int odp_tm_queue_threshold_config(odp_tm_queue_t tm_queue,
odp_tm_threshold_t thresholds_profile)
{
tm_queue_obj_t *tm_queue_obj;
+ int ret;
tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
if (!tm_queue_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
- tm_threshold_config_set(tm_queue_obj->tm_wred_node, thresholds_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
- return 0;
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ ret = tm_threshold_config_set(&tm_queue_obj->tm_wred_node,
+ thresholds_profile);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
+ return ret;
}
int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
@@ -4095,9 +4308,9 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- wred_node = tm_queue_obj->tm_wred_node;
+ wred_node = &tm_queue_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
rc = 0;
if (pkt_color == ODP_PACKET_ALL_COLORS) {
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
@@ -4108,7 +4321,7 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
rc = -1;
}
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return rc;
}
@@ -4173,13 +4386,15 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
if ((!src_tm_node_obj) || src_tm_node_obj->is_root_node)
return -1;
- tm_system = odp_tm_systems[src_tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[src_tm_node_obj->tm_idx];
if (!tm_system)
return -1;
- src_tm_wred_node = src_tm_node_obj->tm_wred_node;
+ src_tm_wred_node = &src_tm_node_obj->tm_wred_node;
if (dst_tm_node == ODP_TM_ROOT) {
- src_tm_node_obj->shaper_obj.next_tm_node = tm_system->root_node;
+ tm_node_obj_t *root_node = &tm_system->root_node;
+
+ src_tm_node_obj->shaper_obj.next_tm_node = root_node;
src_tm_wred_node->next_tm_wred_node = NULL;
return 0;
}
@@ -4188,7 +4403,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node)
return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+ dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node;
if (src_tm_node_obj->tm_idx != dst_tm_node_obj->tm_idx)
return -1;
@@ -4219,9 +4434,8 @@ int odp_tm_node_disconnect(odp_tm_node_t src_tm_node)
dst_tm_node_obj->current_tm_node_fanin--;
}
- src_tm_wred_node = src_tm_node_obj->tm_wred_node;
- if (src_tm_wred_node != NULL)
- src_tm_wred_node->next_tm_wred_node = NULL;
+ src_tm_wred_node = &src_tm_node_obj->tm_wred_node;
+ src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_node_obj->shaper_obj.next_tm_node = NULL;
return 0;
@@ -4242,13 +4456,13 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
if (!src_tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[src_tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[src_tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node;
+ src_tm_wred_node = &src_tm_queue_obj->tm_wred_node;
if (dst_tm_node == ODP_TM_ROOT) {
- root_node = tm_system->root_node;
+ root_node = &tm_system->root_node;
src_tm_queue_obj->shaper_obj.next_tm_node = root_node;
src_tm_wred_node->next_tm_wred_node = NULL;
return 0;
@@ -4258,7 +4472,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node)
return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+ dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node;
if (src_tm_queue_obj->tm_idx != dst_tm_node_obj->tm_idx)
return -1;
@@ -4290,9 +4504,8 @@ int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue)
dst_tm_node_obj->current_tm_queue_fanin--;
}
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node;
- if (src_tm_wred_node != NULL)
- src_tm_wred_node->next_tm_wred_node = NULL;
+ src_tm_wred_node = &src_tm_queue_obj->tm_wred_node;
+ src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_queue_obj->shaper_obj.next_tm_node = NULL;
return 0;
@@ -4302,19 +4515,23 @@ int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt)
{
tm_queue_obj_t *tm_queue_obj;
tm_system_t *tm_system;
+ int rc;
tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
if (odp_atomic_load_u64(&tm_system->destroying))
return -1;
- return tm_enqueue(tm_system, tm_queue_obj, pkt);
+ rc = tm_enqueue(tm_system, tm_queue_obj, pkt);
+ if (rc < 0)
+ return rc;
+ return 0;
}
int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
@@ -4328,7 +4545,7 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
@@ -4343,6 +4560,127 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
return pkt_cnt;
}
+int odp_tm_enq_multi(odp_tm_queue_t tm_queue, const odp_packet_t packets[],
+ int num)
+{
+ tm_queue_obj_t *tm_queue_obj;
+ tm_system_t *tm_system;
+ int i, rc;
+
+ tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+ if (!tm_queue_obj)
+ return -1;
+
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
+ if (!tm_system)
+ return -1;
+
+ if (odp_atomic_load_u64(&tm_system->destroying))
+ return -1;
+
+ for (i = 0; i < num; i++) {
+ rc = tm_enqueue(tm_system, tm_queue_obj, packets[i]);
+ if (rc < 0 && rc != -2)
+ break;
+ /* For RED failure, just drop current pkt but
+ * continue with next pkts.
+ */
+ if (rc == -2) {
+ odp_packet_free(packets[i]);
+ odp_atomic_inc_u64(&tm_queue_obj->stats.discards);
+ }
+ }
+
+ return i;
+}
+
+int odp_tm_enq_multi_lso(odp_tm_queue_t tm_queue, const odp_packet_t packets[], int num,
+ const odp_packet_lso_opt_t *opt)
+{
+ int i, ret, num_pkt;
+ uint32_t payload_len, left_over_len;
+ odp_packet_t pkt;
+ odp_packet_lso_opt_t lso_opt;
+ const odp_packet_lso_opt_t *opt_ptr = &lso_opt;
+
+ if (odp_unlikely(num <= 0)) {
+ _ODP_ERR("No packets\n");
+ return -1;
+ }
+
+ memset(&lso_opt, 0, sizeof(odp_packet_lso_opt_t));
+ if (opt)
+ opt_ptr = opt;
+
+ for (i = 0; i < num; i++) {
+ pkt = packets[i];
+
+ if (opt == NULL) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ if (pkt_hdr->p.flags.lso == 0) {
+ _ODP_ERR("No LSO options on packet %i\n", i);
+ goto error;
+ }
+ /* Fill in LSO options from packet */
+ lso_opt.lso_profile = _odp_lso_prof_from_idx(pkt_hdr->lso_profile_idx);
+ lso_opt.payload_offset = odp_packet_payload_offset(pkt);
+ lso_opt.max_payload_len = pkt_hdr->lso_max_payload;
+ }
+
+ /* Calculate number of packets */
+ num_pkt = _odp_lso_num_packets(pkt, opt_ptr, &payload_len, &left_over_len);
+ if (odp_unlikely(num_pkt <= 0)) {
+ _ODP_DBG("LSO num packets failed on packet %i\n", i);
+ goto error;
+ }
+
+ if (odp_unlikely(num_pkt == 1)) {
+ /* Segmentation not needed */
+ if (odp_tm_enq_multi(tm_queue, &pkt, 1) != 1) {
+ _ODP_DBG("TM enqueue failed on packet %i\n", i);
+
+ goto error;
+ }
+
+ continue;
+ }
+
+ /* Create packets */
+ odp_packet_t pkt_out[num_pkt];
+
+ ret = _odp_lso_create_packets(pkt, opt_ptr, payload_len, left_over_len, pkt_out,
+ num_pkt);
+
+ if (odp_unlikely(ret))
+ goto error;
+
+ /* Enqueue resulting packets */
+ ret = odp_tm_enq_multi(tm_queue, pkt_out, num_pkt);
+
+ if (odp_unlikely(ret < num_pkt)) {
+ _ODP_DBG("TM enqueue failed on packet %i\n", i);
+
+ if (ret < 0)
+ ret = 0;
+
+ odp_packet_free_multi(&pkt_out[ret], num_pkt - ret);
+ goto error;
+ }
+
+ /* Free original packet */
+ odp_packet_free(pkt);
+ }
+
+ return i;
+
+error:
+ if (i == 0)
+ return -1;
+
+ return i;
+}
+
int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
{
tm_queue_thresholds_t *threshold_params;
@@ -4378,19 +4716,17 @@ int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
if (shaper_params != NULL)
info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_node_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- threshold_params = tm_wred_node->threshold_params;
- if (threshold_params != NULL)
- info->threshold_profile =
- threshold_params->thresholds_profile;
+ tm_wred_node = &tm_node_obj->tm_wred_node;
+ threshold_params = tm_wred_node->threshold_params;
+ if (threshold_params != NULL)
+ info->threshold_profile =
+ threshold_params->thresholds_profile;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- info->wred_profile[color] =
- wred_params->wred_profile;
- }
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_params = tm_wred_node->wred_params[color];
+ if (wred_params != NULL)
+ info->wred_profile[color] =
+ wred_params->wred_profile;
}
return 0;
@@ -4494,7 +4830,7 @@ int odp_tm_queue_info(odp_tm_queue_t tm_queue, odp_tm_queue_info_t *info)
if (shaper_params != NULL)
info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_queue_obj->tm_wred_node;
+ tm_wred_node = &tm_queue_obj->tm_wred_node;
if (tm_wred_node != NULL) {
threshold_params = tm_wred_node->threshold_params;
if (threshold_params != NULL)
@@ -4516,7 +4852,8 @@ static int tm_query_info_copy(tm_queue_info_t *queue_info,
uint32_t query_flags,
odp_tm_query_info_t *info)
{
- tm_queue_thresholds_t *threshold_params;
+ if ((query_flags & ODP_TM_QUERY_THRESHOLDS) && !queue_info->threshold_params)
+ return -1;
memset(info, 0, sizeof(odp_tm_query_info_t));
info->total_pkt_cnt =
@@ -4528,9 +4865,7 @@ static int tm_query_info_copy(tm_queue_info_t *queue_info,
info->approx_byte_cnt = 0;
if (query_flags & ODP_TM_QUERY_THRESHOLDS) {
- threshold_params = queue_info->threshold_params;
- if (!threshold_params)
- return -1;
+ tm_queue_thresholds_t *threshold_params = queue_info->threshold_params;
info->max_pkt_cnt = threshold_params->max_pkts;
info->max_byte_cnt = threshold_params->max_bytes;
@@ -4553,9 +4888,7 @@ int odp_tm_queue_query(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- tm_wred_node = tm_queue_obj->tm_wred_node;
- if (!tm_wred_node)
- return -1;
+ tm_wred_node = &tm_queue_obj->tm_wred_node;
/* **TBD** Where do we get the queue_info from. */
queue_info.threshold_params = tm_wred_node->threshold_params;
@@ -4598,11 +4931,11 @@ int odp_tm_priority_threshold_config(odp_tm_t odp_tm,
if (thresholds_profile == ODP_TM_INVALID)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_system->priority_info[priority].threshold_params =
tm_get_profile_params(thresholds_profile,
TM_THRESHOLD_PROFILE);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4615,10 +4948,10 @@ int odp_tm_total_threshold_config(odp_tm_t odp_tm,
if (thresholds_profile == ODP_TM_INVALID)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
- tm_system->total_info.threshold_params = tm_get_profile_params(
- thresholds_profile, TM_THRESHOLD_PROFILE);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ tm_system->total_info.threshold_params = tm_get_profile_params(thresholds_profile,
+ TM_THRESHOLD_PROFILE);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4627,57 +4960,140 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
input_work_queue_t *input_work_queue;
tm_queue_obj_t *tm_queue_obj;
tm_system_t *tm_system;
- uint32_t queue_num, max_queue_num;
+ uint32_t queue_num;
tm_system = GET_TM_SYSTEM(odp_tm);
- input_work_queue = tm_system->input_work_queue;
-
- ODP_DBG("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n",
- odp_tm, tm_system->tm_idx);
- ODP_DBG(" input_work_queue size=%u current cnt=%u peak cnt=%u\n",
- INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt,
- input_work_queue->peak_cnt);
- ODP_DBG(" input_work_queue enqueues=%" PRIu64 " dequeues=% " PRIu64
- " fail_cnt=%" PRIu64 "\n", input_work_queue->total_enqueues,
- input_work_queue->total_dequeues,
- input_work_queue->enqueue_fail_cnt);
- ODP_DBG(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " red_cnt=%"
- PRIu64 "\n", tm_system->shaper_green_cnt,
- tm_system->shaper_yellow_cnt,
- tm_system->shaper_red_cnt);
+ input_work_queue = &tm_system->input_work_queue;
+
+ _ODP_PRINT("\nTM stats\n");
+ _ODP_PRINT("--------\n");
+ _ODP_PRINT(" tm_system=0x%" PRIX64 " tm_idx=%u\n", odp_tm, tm_system->tm_idx);
+ _ODP_PRINT(" input_work_queue size=%u current cnt=%" PRIu64 " peak cnt=%" PRIu32 "\n",
+ INPUT_WORK_RING_SIZE, odp_atomic_load_u64(&input_work_queue->queue_cnt),
+ input_work_queue->peak_cnt);
+ _ODP_PRINT(" input_work_queue enqueues=%" PRIu64 " dequeues=%" PRIu64
+ " fail_cnt=%" PRIu64 "\n", input_work_queue->total_enqueues,
+ input_work_queue->total_dequeues,
+ input_work_queue->enqueue_fail_cnt);
+ _ODP_PRINT(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " red_cnt=%" PRIu64 "\n",
+ tm_system->shaper_green_cnt,
+ tm_system->shaper_yellow_cnt,
+ tm_system->shaper_red_cnt);
_odp_pkt_queue_stats_print(tm_system->_odp_int_queue_pool);
_odp_timer_wheel_stats_print(tm_system->_odp_int_timer_wheel);
_odp_sorted_list_stats_print(tm_system->_odp_int_sorted_pool);
- max_queue_num = tm_system->next_queue_num;
- for (queue_num = 1; queue_num < max_queue_num; queue_num++) {
+ for (queue_num = 1; queue_num <= ODP_TM_MAX_TM_QUEUES; queue_num++) {
tm_queue_obj = tm_system->queue_num_tbl[queue_num - 1];
if (tm_queue_obj && tm_queue_obj->pkts_rcvd_cnt != 0)
- ODP_DBG("queue_num=%u priority=%u rcvd=%u enqueued=%u "
- "dequeued=%u consumed=%u\n",
- queue_num,
- tm_queue_obj->priority,
- tm_queue_obj->pkts_rcvd_cnt,
- tm_queue_obj->pkts_enqueued_cnt,
- tm_queue_obj->pkts_dequeued_cnt,
- tm_queue_obj->pkts_consumed_cnt);
+ _ODP_PRINT("queue_num=%u priority=%u rcvd=%u enqueued=%u "
+ "dequeued=%u consumed=%u\n",
+ queue_num,
+ tm_queue_obj->priority,
+ tm_queue_obj->pkts_rcvd_cnt,
+ tm_queue_obj->pkts_enqueued_cnt,
+ tm_queue_obj->pkts_dequeued_cnt,
+ tm_queue_obj->pkts_consumed_cnt);
}
}
-int odp_tm_init_global(void)
+int odp_tm_queue_stats(odp_tm_queue_t tm_queue, odp_tm_queue_stats_t *stats)
+{
+ tm_queue_obj_t *tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
+
+ if (!tm_queue_obj) {
+ _ODP_ERR("Invalid TM queue handle\n");
+ return -1;
+ }
+
+ memset(stats, 0, sizeof(odp_tm_queue_stats_t));
+ stats->discards = odp_atomic_load_u64(&tm_queue_obj->stats.discards);
+ stats->errors = odp_atomic_load_u64(&tm_queue_obj->stats.errors);
+ stats->packets = odp_atomic_load_u64(&tm_queue_obj->stats.packets);
+
+ return 0;
+}
+
+uint64_t odp_tm_to_u64(odp_tm_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_queue_to_u64(odp_tm_queue_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_node_to_u64(odp_tm_node_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_shaper_to_u64(odp_tm_shaper_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_sched_to_u64(odp_tm_sched_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_threshold_to_u64(odp_tm_threshold_t hdl)
+{
+ return _odp_pri(hdl);
+}
+
+uint64_t odp_tm_wred_to_u64(odp_tm_wred_t hdl)
{
- odp_ticketlock_init(&tm_create_lock);
- odp_ticketlock_init(&tm_profile_lock);
- odp_barrier_init(&tm_first_enq, 2);
+ return _odp_pri(hdl);
+}
- odp_atomic_init_u64(&atomic_request_cnt, 0);
- odp_atomic_init_u64(&currently_serving_cnt, 0);
- odp_atomic_init_u64(&atomic_done_cnt, 0);
+int _odp_tm_init_global(void)
+{
+ odp_shm_t shm;
+
+ if (odp_global_ro.disable.traffic_mngr) {
+ _ODP_PRINT("\nODP traffic manager is DISABLED\n");
+ return 0;
+ }
+
+ shm = odp_shm_reserve("_odp_traffic_mng_global", sizeof(tm_global_t), 0, 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ tm_glb = odp_shm_addr(shm);
+ memset(tm_glb, 0, sizeof(tm_global_t));
+
+ tm_glb->shm = shm;
+ tm_glb->main_thread_cpu = -1;
+
+ odp_ticketlock_init(&tm_glb->queue_obj.lock);
+ odp_ticketlock_init(&tm_glb->node_obj.lock);
+ odp_ticketlock_init(&tm_glb->system_group.lock);
+ odp_ticketlock_init(&tm_glb->create_lock);
+ odp_ticketlock_init(&tm_glb->profile_lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.sched.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.shaper.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.threshold.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.wred.lock);
+ odp_barrier_init(&tm_glb->first_enq, 2);
+
+ odp_atomic_init_u64(&tm_glb->atomic_request_cnt, 0);
+ odp_atomic_init_u64(&tm_glb->currently_serving_cnt, 0);
+ odp_atomic_init_u64(&tm_glb->atomic_done_cnt, 0);
return 0;
}
-int odp_tm_term_global(void)
+int _odp_tm_term_global(void)
{
+ if (odp_global_ro.disable.traffic_mngr)
+ return 0;
+
+ if (odp_shm_free(tm_glb->shm)) {
+ _ODP_ERR("shm free failed\n");
+ return -1;
+ }
return 0;
}
diff --git a/platform/linux-generic/odp_version.c b/platform/linux-generic/odp_version.c
index 7b704d054..baa336839 100644
--- a/platform/linux-generic/odp_version.c
+++ b/platform/linux-generic/odp_version.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
diff --git a/platform/linux-generic/odp_weak.c b/platform/linux-generic/odp_weak.c
index 0fbf6645e..747886d4e 100644
--- a/platform/linux-generic/odp_weak.c
+++ b/platform/linux-generic/odp_weak.c
@@ -1,10 +1,9 @@
-/* Copyright (c) 2014, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_internal.h>
#include <odp/api/debug.h>
#include <odp_debug_internal.h>
#include <odp/api/hints.h>
@@ -21,6 +20,7 @@ int odp_override_log(odp_log_level_t level, const char *fmt, ...)
switch (level) {
case ODP_LOG_ERR:
case ODP_LOG_UNIMPLEMENTED:
+ case ODP_LOG_WARN:
case ODP_LOG_ABORT:
logfd = stderr;
break;
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 1922109a7..3fa796007 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -1,209 +1,1163 @@
-/* Copyright (c) 2016, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#ifdef ODP_PKTIO_DPDK
+#include <odp/autoheader_internal.h>
-#include <odp_posix_extensions.h>
+#ifdef _ODP_PKTIO_DPDK
-#include <sched.h>
-#include <ctype.h>
-#include <unistd.h>
+#include <odp_posix_extensions.h>
#include <odp/api/cpumask.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/packet_io_stats.h>
+#include <odp/api/std_types.h>
+#include <odp/api/time.h>
#include <odp/api/plat/packet_inlines.h>
-#include <odp/api/packet.h>
+#include <odp/api/plat/time_inlines.h>
-#include <odp_packet_io_internal.h>
#include <odp_classification_internal.h>
-#include <odp_packet_dpdk.h>
#include <odp_debug_internal.h>
-
+#include <odp_global_data.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_dpdk.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
#include <protocols/eth.h>
+#include <protocols/udp.h>
+#include <odp_pool_internal.h>
+#include <odp_socket_common.h>
#include <rte_config.h>
-#include <rte_mbuf.h>
+#include <rte_common.h>
#include <rte_ethdev.h>
+#include <rte_mbuf.h>
+#include <rte_malloc.h>
+#include <rte_mempool.h>
+#include <rte_ip.h>
+#include <rte_ip_frag.h>
+#include <rte_log.h>
#include <rte_string_fns.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_version.h>
+#include <rte_vfio.h>
+
+/* NUMA is not supported on all platforms */
+#ifdef _ODP_HAVE_NUMA_LIBRARY
+#include <numa.h>
+#else
+#define numa_num_configured_nodes() 1
+#endif
-static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
+#include <ctype.h>
+#include <errno.h>
+#include <sched.h>
+#include <stdint.h>
+#include <unistd.h>
-/* Has dpdk_pktio_init() been called */
-static odp_bool_t dpdk_initialized;
-
-#define PMD_EXT(drv) \
-extern void devinitfn_##drv(void)
-
-PMD_EXT(aesni_gcm_pmd_drv);
-PMD_EXT(cryptodev_aesni_mb_pmd_drv);
-PMD_EXT(cryptodev_kasumi_pmd_drv);
-PMD_EXT(cryptodev_null_pmd_drv);
-PMD_EXT(cryptodev_snow3g_pmd_drv);
-PMD_EXT(pmd_qat_drv);
-PMD_EXT(pmd_af_packet_drv);
-PMD_EXT(rte_bnx2x_driver);
-PMD_EXT(rte_bnx2xvf_driver);
-PMD_EXT(bnxt_pmd_drv);
-PMD_EXT(bond_drv);
-PMD_EXT(rte_cxgbe_driver);
-PMD_EXT(em_pmd_drv);
-PMD_EXT(pmd_igb_drv);
-PMD_EXT(pmd_igbvf_drv);
-PMD_EXT(ena_pmd_drv);
-PMD_EXT(rte_enic_driver);
-PMD_EXT(rte_fm10k_driver);
-PMD_EXT(rte_i40e_driver);
-PMD_EXT(rte_i40evf_driver);
-PMD_EXT(rte_ixgbe_driver);
-PMD_EXT(rte_ixgbevf_driver);
-PMD_EXT(rte_mlx4_driver);
-PMD_EXT(rte_mlx5_driver);
-PMD_EXT(pmd_mpipe_xgbe_drv);
-PMD_EXT(pmd_mpipe_gbe_drv);
-PMD_EXT(rte_nfp_net_driver);
-PMD_EXT(pmd_null_drv);
-PMD_EXT(pmd_pcap_drv);
-PMD_EXT(rte_qede_driver);
-PMD_EXT(rte_qedevf_driver);
-PMD_EXT(pmd_ring_drv);
-PMD_EXT(pmd_szedata2_drv);
-PMD_EXT(rte_nicvf_driver);
-PMD_EXT(pmd_vhost_drv);
-PMD_EXT(rte_virtio_driver);
-PMD_EXT(virtio_user_driver);
-PMD_EXT(rte_vmxnet3_driver);
-PMD_EXT(pmd_xenvirt_drv);
-
-#define MEMPOOL_OPS(hdl) \
-extern void mp_hdlr_init_##hdl(void)
-
-MEMPOOL_OPS(ops_mp_mc);
-MEMPOOL_OPS(ops_sp_sc);
-MEMPOOL_OPS(ops_mp_sc);
-MEMPOOL_OPS(ops_sp_mc);
-MEMPOOL_OPS(ops_stack);
-
-/*
- * This function is not called from anywhere, it's only purpose is to make sure
- * that if ODP and DPDK are statically linked to an application, the GCC
- * constructors of the PMDs are linked as well. Otherwise the linker would omit
- * them. It's not an issue with dynamic linking. */
-void refer_constructors(void);
-void refer_constructors(void)
-{
-#ifdef RTE_LIBRTE_PMD_AESNI_GCM
- devinitfn_aesni_gcm_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_AESNI_MB
- devinitfn_cryptodev_aesni_mb_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_KASUMI
- devinitfn_cryptodev_kasumi_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_NULL_CRYPTO
- devinitfn_cryptodev_null_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_SNOW3G
- devinitfn_cryptodev_snow3g_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_QAT
- devinitfn_pmd_qat_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_AF_PACKET
- devinitfn_pmd_af_packet_drv();
-#endif
-#ifdef RTE_LIBRTE_BNX2X_PMD
- devinitfn_rte_bnx2x_driver();
- devinitfn_rte_bnx2xvf_driver();
-#endif
-#ifdef RTE_LIBRTE_BNXT_PMD
- devinitfn_bnxt_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_BOND
- devinitfn_bond_drv();
-#endif
-#ifdef RTE_LIBRTE_CXGBE_PMD
- devinitfn_rte_cxgbe_driver();
-#endif
-#ifdef RTE_LIBRTE_EM_PMD
- devinitfn_em_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_IGB_PMD
- devinitfn_pmd_igb_drv();
- devinitfn_pmd_igbvf_drv();
-#endif
-#ifdef RTE_LIBRTE_ENA_PMD
- devinitfn_ena_pmd_drv();
-#endif
-#ifdef RTE_LIBRTE_ENIC_PMD
- devinitfn_rte_enic_driver();
-#endif
-#ifdef RTE_LIBRTE_FM10K_PMD
- devinitfn_rte_fm10k_driver();
-#endif
-#ifdef RTE_LIBRTE_I40E_PMD
- devinitfn_rte_i40e_driver();
- devinitfn_rte_i40evf_driver();
-#endif
-#ifdef RTE_LIBRTE_IXGBE_PMD
- devinitfn_rte_ixgbe_driver();
- devinitfn_rte_ixgbevf_driver();
-#endif
-#ifdef RTE_LIBRTE_MLX4_PMD
- devinitfn_rte_mlx4_driver();
-#endif
-#ifdef RTE_LIBRTE_MLX5_PMD
- devinitfn_rte_mlx5_driver();
-#endif
-#ifdef RTE_LIBRTE_MPIPE_PMD
- devinitfn_pmd_mpipe_xgbe_drv()
- devinitfn_pmd_mpipe_gbe_drv()
-#endif
-#ifdef RTE_LIBRTE_NFP_PMD
- devinitfn_rte_nfp_net_driver();
-#endif
-#ifdef RTE_LIBRTE_PMD_NULL
- devinitfn_pmd_null_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_PCAP
- devinitfn_pmd_pcap_drv();
-#endif
-#ifdef RTE_LIBRTE_QEDE_PMD
- devinitfn_rte_qede_driver();
- devinitfn_rte_qedevf_driver();
-#endif
-#ifdef RTE_LIBRTE_PMD_RING
- devinitfn_pmd_ring_drv();
-#endif
-#ifdef RTE_LIBRTE_PMD_SZEDATA2
- devinitfn_pmd_szedata2_drv();
-#endif
-#ifdef RTE_LIBRTE_THUNDERX_NICVF_PMD
- devinitfn_rte_nicvf_driver();
-#endif
-#ifdef RTE_LIBRTE_PMD_VHOST
- devinitfn_pmd_vhost_drv();
-#endif
-#ifdef RTE_LIBRTE_VIRTIO_PMD
- devinitfn_rte_virtio_driver();
-#endif
-#ifdef RTE_VIRTIO_USER
- devinitfn_rte_virtio_driver();
-#endif
-#ifdef RTE_LIBRTE_VMXNET3_PMD
- devinitfn_rte_vmxnet3_driver();
+#if RTE_VERSION < RTE_VERSION_NUM(21, 11, 0, 0)
+ #define RTE_MBUF_F_RX_RSS_HASH PKT_RX_RSS_HASH
+ #define RTE_MBUF_F_TX_IPV4 PKT_TX_IPV4
+ #define RTE_MBUF_F_TX_IPV6 PKT_TX_IPV6
+ #define RTE_MBUF_F_TX_IP_CKSUM PKT_TX_IP_CKSUM
+ #define RTE_MBUF_F_TX_UDP_CKSUM PKT_TX_UDP_CKSUM
+ #define RTE_MBUF_F_TX_TCP_CKSUM PKT_TX_TCP_CKSUM
+ #define RTE_MEMPOOL_REGISTER_OPS MEMPOOL_REGISTER_OPS
+
+ #define RTE_ETH_RSS_IPV4 ETH_RSS_IPV4
+ #define RTE_ETH_RSS_FRAG_IPV4 ETH_RSS_FRAG_IPV4
+ #define RTE_ETH_RSS_NONFRAG_IPV4_TCP ETH_RSS_NONFRAG_IPV4_TCP
+ #define RTE_ETH_RSS_NONFRAG_IPV4_UDP ETH_RSS_NONFRAG_IPV4_UDP
+ #define RTE_ETH_RSS_NONFRAG_IPV4_OTHER ETH_RSS_NONFRAG_IPV4_OTHER
+
+ #define RTE_ETH_RSS_IPV6 ETH_RSS_IPV6
+ #define RTE_ETH_RSS_IPV6_EX ETH_RSS_IPV6_EX
+ #define RTE_ETH_RSS_IPV6_UDP_EX ETH_RSS_IPV6_UDP_EX
+ #define RTE_ETH_RSS_IPV6_TCP_EX ETH_RSS_IPV6_TCP_EX
+ #define RTE_ETH_RSS_FRAG_IPV6 ETH_RSS_FRAG_IPV6
+ #define RTE_ETH_RSS_NONFRAG_IPV6_TCP ETH_RSS_NONFRAG_IPV6_TCP
+ #define RTE_ETH_RSS_NONFRAG_IPV6_UDP ETH_RSS_NONFRAG_IPV6_UDP
+ #define RTE_ETH_RSS_NONFRAG_IPV6_OTHER ETH_RSS_NONFRAG_IPV6_OTHER
+
+ #define RTE_ETH_MQ_RX_RSS ETH_MQ_RX_RSS
+ #define RTE_ETH_MQ_TX_NONE ETH_MQ_TX_NONE
+
+ #define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM DEV_RX_OFFLOAD_IPV4_CKSUM
+ #define RTE_ETH_RX_OFFLOAD_TCP_CKSUM DEV_RX_OFFLOAD_TCP_CKSUM
+ #define RTE_ETH_RX_OFFLOAD_UDP_CKSUM DEV_RX_OFFLOAD_UDP_CKSUM
+
+ #define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM DEV_TX_OFFLOAD_IPV4_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM DEV_TX_OFFLOAD_SCTP_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_TCP_CKSUM DEV_TX_OFFLOAD_TCP_CKSUM
+ #define RTE_ETH_TX_OFFLOAD_UDP_CKSUM DEV_TX_OFFLOAD_UDP_CKSUM
+
+ #define RTE_ETH_FC_FULL RTE_FC_FULL
+ #define RTE_ETH_FC_RX_PAUSE RTE_FC_RX_PAUSE
+ #define RTE_ETH_FC_TX_PAUSE RTE_FC_TX_PAUSE
+ #define RTE_ETH_LINK_AUTONEG ETH_LINK_AUTONEG
+ #define RTE_ETH_LINK_FULL_DUPLEX ETH_LINK_FULL_DUPLEX
+ #define RTE_ETH_LINK_UP ETH_LINK_UP
+ #define RTE_ETH_SPEED_NUM_NONE ETH_SPEED_NUM_NONE
#endif
-#ifdef RTE_LIBRTE_PMD_XENVIRT
- devinitfn_pmd_xenvirt_drv();
+
+#define MEMPOOL_FLAGS 0
+
+#if _ODP_DPDK_ZERO_COPY
+ODP_STATIC_ASSERT(CONFIG_PACKET_HEADROOM == RTE_PKTMBUF_HEADROOM,
+ "ODP and DPDK headroom sizes not matching!");
#endif
- mp_hdlr_init_ops_mp_mc();
- mp_hdlr_init_ops_sp_sc();
- mp_hdlr_init_ops_mp_sc();
- mp_hdlr_init_ops_sp_mc();
- mp_hdlr_init_ops_stack();
+
+/* DPDK poll mode drivers requiring minimum RX burst size DPDK_MIN_RX_BURST */
+#define IXGBE_DRV_NAME "net_ixgbe"
+#define I40E_DRV_NAME "net_i40e"
+
+#define DPDK_MEMORY_MB 512
+#define DPDK_NB_MBUF 16384
+#define DPDK_MBUF_BUF_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
+#define DPDK_MEMPOOL_CACHE_SIZE 64
+
+ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) &&
+ (DPDK_MEMPOOL_CACHE_SIZE <= RTE_MEMPOOL_CACHE_MAX_SIZE) &&
+ (DPDK_MEMPOOL_CACHE_SIZE <= DPDK_MBUF_BUF_SIZE * 10 / 15)
+ , "DPDK mempool cache size failure");
+
+/* Minimum RX burst size */
+#define DPDK_MIN_RX_BURST 4
+
+ODP_STATIC_ASSERT(DPDK_MIN_RX_BURST <= UINT8_MAX, "DPDK_MIN_RX_BURST too large");
+
+/* Limits for setting link MTU */
+#define DPDK_MTU_MIN (RTE_ETHER_MIN_MTU + _ODP_ETHHDR_LEN)
+#define DPDK_MTU_MAX (9000 + _ODP_ETHHDR_LEN)
+
+/** DPDK runtime configuration options */
+typedef struct {
+ int num_rx_desc_default;
+ int num_tx_desc_default;
+ uint8_t multicast_en;
+ uint8_t rx_drop_en;
+ uint8_t set_flow_hash;
+} dpdk_opt_t;
+
+typedef struct {
+ /* Array for storing extra RX packets */
+ struct rte_mbuf *pkt[DPDK_MIN_RX_BURST];
+ /* Head of cache */
+ uint8_t idx;
+ /* Packets in cache */
+ uint8_t count;
+} pkt_cache_t;
+
+/* DPDK pktio specific data */
+typedef struct ODP_ALIGNED_CACHE {
+ /* --- Fast path data --- */
+
+ /* Function for mbuf to ODP packet conversion */
+ int (*mbuf_to_pkt_fn)(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ struct rte_mbuf *mbuf_table[], uint16_t mbuf_num, odp_time_t *ts);
+ /* Packet output capabilities */
+ odp_pktout_config_opt_t pktout_capa;
+ /* DPDK port identifier */
+ uint16_t port_id;
+ /* Maximum transmission unit */
+ uint16_t mtu;
+ struct {
+ /* No locking for rx */
+ uint8_t lockless_rx;
+ /* No locking for tx */
+ uint8_t lockless_tx;
+ /* Store RX RSS hash as flow hash */
+ uint8_t set_flow_hash;
+ } flags;
+ /* Minimum RX burst size */
+ uint8_t min_rx_burst;
+ /* Cache for storing extra RX packets */
+ pkt_cache_t rx_cache[ODP_PKTIN_MAX_QUEUES];
+
+ /* --- Control path data --- */
+
+ /* Runtime config options */
+ dpdk_opt_t opt;
+ /* ODP packet pool */
+ odp_pool_t pool;
+ /* DPDK packet pool */
+ struct rte_mempool *pkt_pool;
+ /* RSS configuration */
+ struct rte_eth_rss_conf rss_conf;
+ /* Maximum packet length */
+ uint32_t data_room;
+ /* Maximum supported MTU value */
+ uint32_t mtu_max;
+ /* DPDK MTU has been modified */
+ uint8_t mtu_set;
+ /* Use system call to get/set vdev promisc mode */
+ uint8_t vdev_sysc_promisc;
+ /* Number of RX descriptors per queue */
+ uint16_t num_rx_desc[ODP_PKTIN_MAX_QUEUES];
+ /* Number of TX descriptors per queue */
+ uint16_t num_tx_desc[ODP_PKTOUT_MAX_QUEUES];
+
+ /* --- Locks for MT safe operations --- */
+
+ /* RX queue locks */
+ odp_ticketlock_t rx_lock[ODP_PKTIN_MAX_QUEUES] ODP_ALIGNED_CACHE;
+ /* TX queue locks */
+ odp_ticketlock_t tx_lock[ODP_PKTOUT_MAX_QUEUES] ODP_ALIGNED_CACHE;
+} pkt_dpdk_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_dpdk_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+typedef struct {
+ uint32_t dpdk_elt_size;
+ uint8_t pool_in_use;
+ struct rte_mempool *pkt_pool;
+} mem_src_data_t;
+
+ODP_STATIC_ASSERT(_ODP_POOL_MEM_SRC_DATA_SIZE >= sizeof(mem_src_data_t),
+ "_ODP_POOL_MEM_SRC_DATA_SIZE too small");
+
+static inline struct rte_mbuf *mbuf_from_pkt_hdr(odp_packet_hdr_t *pkt_hdr)
+{
+ return ((struct rte_mbuf *)pkt_hdr) - 1;
+}
+
+static inline odp_packet_hdr_t *pkt_hdr_from_mbuf(struct rte_mbuf *mbuf)
+{
+ return (odp_packet_hdr_t *)(mbuf + 1);
+}
+
+static inline pkt_dpdk_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_dpdk_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
+
+static inline mem_src_data_t *mem_src_priv(uint8_t *data)
+{
+ return (mem_src_data_t *)data;
+}
+
+static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
+
+static int dpdk_pktio_init(void);
+
+static int lookup_opt(const char *opt_name, const char *drv_name, int *val)
+{
+ const char *base = "pktio_dpdk";
+ int ret;
+
+ ret = _odp_libconfig_lookup_ext_int(base, drv_name, opt_name, val);
+ if (ret == 0)
+ _ODP_ERR("Unable to find DPDK configuration option: %s\n", opt_name);
+
+ return ret;
+}
+
+static int init_options(pktio_entry_t *pktio_entry,
+ const struct rte_eth_dev_info *dev_info)
+{
+ dpdk_opt_t *opt = &pkt_priv(pktio_entry)->opt;
+ int val;
+
+ if (!lookup_opt("num_rx_desc", dev_info->driver_name,
+ &opt->num_rx_desc_default))
+ return -1;
+
+ if (!lookup_opt("num_tx_desc", dev_info->driver_name,
+ &opt->num_tx_desc_default))
+ return -1;
+
+ if (!lookup_opt("rx_drop_en", dev_info->driver_name, &val))
+ return -1;
+ opt->rx_drop_en = !!val;
+
+ if (!lookup_opt("set_flow_hash", NULL, &val))
+ return -1;
+ opt->set_flow_hash = !!val;
+
+ if (!lookup_opt("multicast_en", NULL, &val))
+ return -1;
+ opt->multicast_en = !!val;
+
+ _ODP_DBG("DPDK interface (%s): %" PRIu16 "\n", dev_info->driver_name,
+ pkt_priv(pktio_entry)->port_id);
+ _ODP_DBG(" multicast_en: %d\n", opt->multicast_en);
+ _ODP_DBG(" num_rx_desc: %d\n", opt->num_rx_desc_default);
+ _ODP_DBG(" num_tx_desc: %d\n", opt->num_tx_desc_default);
+ _ODP_DBG(" rx_drop_en: %d\n", opt->rx_drop_en);
+
+ return 0;
+}
+
+/**
+ * Calculate valid cache size for DPDK packet pool
+ */
+static uint32_t cache_size(uint32_t num)
+{
+ uint32_t size = 0;
+ uint32_t i;
+
+ if (!RTE_MEMPOOL_CACHE_MAX_SIZE)
+ return 0;
+
+ i = (num + RTE_MEMPOOL_CACHE_MAX_SIZE - 1) / RTE_MEMPOOL_CACHE_MAX_SIZE;
+ i = RTE_MAX(i, 2UL);
+ for (; i <= (num / 2); ++i)
+ if ((num % i) == 0) {
+ size = num / i;
+ break;
+ }
+ if (odp_unlikely(size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
+ (uint32_t)size * 1.5 > num)) {
+ _ODP_ERR("Cache size calc failure: %d\n", size);
+ size = 0;
+ }
+
+ return size;
+}
+
+static inline uint16_t mbuf_data_off(struct rte_mbuf *mbuf,
+ odp_packet_hdr_t *pkt_hdr)
+{
+ return (uintptr_t)pkt_hdr->seg_data - (uintptr_t)mbuf->buf_addr;
+}
+
+/**
+ * Update mbuf
+ *
+ * Called always before rte_mbuf is passed to DPDK.
+ */
+static inline void mbuf_update(struct rte_mbuf *mbuf, odp_packet_hdr_t *pkt_hdr,
+ uint16_t pkt_len)
+{
+ mbuf->data_len = pkt_len;
+ mbuf->pkt_len = pkt_len;
+ mbuf->refcnt = 1;
+ mbuf->ol_flags = 0;
+
+ if (odp_unlikely(((uint8_t *)mbuf->buf_addr + mbuf->data_off) != pkt_hdr->seg_data))
+ mbuf->data_off = mbuf_data_off(mbuf, pkt_hdr);
+}
+
+/**
+ * Initialize packet mbuf. Modified version of standard rte_pktmbuf_init()
+ * function.
+ */
+static void pktmbuf_init(struct rte_mempool *mp, void *opaque_arg ODP_UNUSED,
+ void *_m, unsigned i ODP_UNUSED)
+{
+ struct rte_mbuf *m = _m;
+ uint32_t mbuf_size, buf_len, priv_size;
+ odp_packet_hdr_t *pkt_hdr;
+ void *buf_addr;
+
+ pkt_hdr = pkt_hdr_from_mbuf(m);
+ buf_addr = pkt_hdr->event_hdr.base_data - RTE_PKTMBUF_HEADROOM;
+
+ priv_size = rte_pktmbuf_priv_size(mp);
+ mbuf_size = sizeof(struct rte_mbuf);
+ buf_len = rte_pktmbuf_data_room_size(mp);
+
+ /* odp_packet_hdr_t stored in private data so don't zero */
+ memset(m, 0, mbuf_size);
+ m->priv_size = priv_size;
+ m->buf_addr = buf_addr;
+
+ m->buf_iova = rte_mem_virt2iova(buf_addr);
+ if (odp_unlikely(m->buf_iova == 0))
+ _ODP_ABORT("Bad IO virtual address\n");
+
+ m->buf_len = (uint16_t)buf_len;
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+
+ /* Init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = MBUF_INVALID_PORT;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
+}
+
+/**
+ * Create custom DPDK packet pool
+ */
+static struct rte_mempool *mbuf_pool_create(const char *name,
+ pool_t *pool_entry,
+ uint32_t dpdk_elt_size)
+{
+ odp_shm_info_t shm_info;
+ struct rte_mempool *mp = NULL;
+ struct rte_pktmbuf_pool_private mbp_priv;
+ struct rte_mempool_objsz sz;
+ unsigned int elt_size = dpdk_elt_size;
+ unsigned int num = pool_entry->num, populated = 0;
+ uint32_t total_size;
+ uint64_t page_size, offset = 0, remainder = 0;
+ uint8_t *addr;
+ int ret;
+
+ if (!(pool_entry->mem_from_huge_pages)) {
+ _ODP_ERR("DPDK requires memory is allocated from huge pages\n");
+ goto fail;
+ }
+
+ if (pool_entry->seg_len < RTE_MBUF_DEFAULT_BUF_SIZE) {
+ _ODP_ERR("Some NICs need at least %dB buffers to not segment "
+ "standard ethernet frames. Increase pool seg_len.\n",
+ RTE_MBUF_DEFAULT_BUF_SIZE);
+ goto fail;
+ }
+
+ if (odp_shm_info(pool_entry->shm, &shm_info)) {
+ _ODP_ERR("Failed to query SHM info.\n");
+ goto fail;
+ }
+
+ page_size = shm_info.page_size;
+ total_size = rte_mempool_calc_obj_size(elt_size, MEMPOOL_FLAGS, &sz);
+ if (total_size != pool_entry->block_size) {
+ _ODP_ERR("DPDK pool block size not matching to ODP pool: "
+ "%" PRIu32 "/%" PRIu32 "\n", total_size,
+ pool_entry->block_size);
+ goto fail;
+ }
+
+ mp = rte_mempool_create_empty(name, num, elt_size, cache_size(num),
+ sizeof(struct rte_pktmbuf_pool_private),
+ rte_socket_id(), MEMPOOL_FLAGS);
+ if (mp == NULL) {
+ _ODP_ERR("Failed to create empty DPDK packet pool\n");
+ goto fail;
+ }
+
+ mp->pool_data = _odp_pool_handle(pool_entry);
+
+ if (rte_mempool_set_ops_byname(mp, "odp_pool", pool_entry)) {
+ _ODP_ERR("Failed setting mempool operations\n");
+ goto fail;
+ }
+
+ mbp_priv.mbuf_data_room_size = pool_entry->headroom +
+ pool_entry->seg_len + pool_entry->tailroom;
+ mbp_priv.mbuf_priv_size = RTE_ALIGN(sizeof(odp_packet_hdr_t),
+ RTE_MBUF_PRIV_ALIGN);
+ rte_pktmbuf_pool_init(mp, &mbp_priv);
+
+ /* DPDK expects buffers that would be crossing a hugepage boundary to be aligned to the
+ * boundary. This isn't the case with ODP pools as boundary-crossing buffers are skipped
+ * and unused but still part of the pool. Thus, populate the mempool with several virtually
+ * and physically contiguous chunks as dictated by the skipped buffers. */
+ for (uint64_t i = 0; i < pool_entry->shm_size; i += page_size) {
+ remainder = (page_size - offset) % total_size;
+ addr = pool_entry->base_addr + i + offset;
+ ret = rte_mempool_populate_iova(mp, (char *)addr, rte_mem_virt2iova(addr),
+ page_size - remainder - offset,
+ NULL, NULL);
+
+ if (ret <= 0) {
+ _ODP_ERR("Failed to populate mempool: %d\n", ret);
+ goto fail;
+ }
+
+ populated += ret;
+ offset = remainder ? total_size - remainder : 0;
+ }
+
+ if (populated != num) {
+ _ODP_ERR("Failed to populate mempool with all requested blocks, populated: %u, "
+ "requested: %u\n", populated, num);
+ goto fail;
+ }
+
+ /* Map pages for DMA access to enable VFIO usage */
+ for (uint64_t i = 0; i < pool_entry->shm_size; i += page_size) {
+ addr = pool_entry->base_addr + i;
+
+ rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+ (uint64_t)(uintptr_t)addr,
+ rte_mem_virt2iova(addr), page_size);
+ }
+
+ rte_mempool_obj_iter(mp, pktmbuf_init, NULL);
+
+ return mp;
+
+fail:
+ if (mp)
+ rte_mempool_free(mp);
+ return NULL;
+}
+
+/* DPDK external memory pool operations */
+
+static int pool_enqueue(struct rte_mempool *mp,
+ void * const *obj_table, unsigned num)
+{
+ odp_packet_t pkt_tbl[num];
+ pool_t *pool_entry = (pool_t *)mp->pool_config;
+ mem_src_data_t *mem_src_data = mem_src_priv(pool_entry->mem_src_data);
+ unsigned i;
+
+ if (odp_unlikely(num == 0 || !mem_src_data->pool_in_use))
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)obj_table[i];
+ odp_packet_hdr_t *pkt_hdr = pkt_hdr_from_mbuf(mbuf);
+
+ pkt_tbl[i] = packet_handle(pkt_hdr);
+ }
+
+ odp_packet_free_multi(pkt_tbl, num);
+
+ return 0;
+}
+
+static int pool_dequeue_bulk(struct rte_mempool *mp, void **obj_table,
+ unsigned num)
+{
+ odp_pool_t pool = (odp_pool_t)mp->pool_data;
+ pool_t *pool_entry = (pool_t *)mp->pool_config;
+ odp_packet_t packet_tbl[num];
+ int pkts;
+ int i;
+
+ pkts = _odp_packet_alloc_multi(pool, pool_entry->seg_len, packet_tbl,
+ num);
+
+ if (odp_unlikely(pkts != (int)num)) {
+ if (pkts > 0)
+ odp_packet_free_multi(packet_tbl, pkts);
+ return -ENOENT;
+ }
+
+ for (i = 0; i < pkts; i++) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(packet_tbl[i]);
+
+ obj_table[i] = mbuf_from_pkt_hdr(pkt_hdr);
+ }
+
+ return 0;
+}
+
+static int pool_alloc(struct rte_mempool *mp ODP_UNUSED)
+{
+ return 0;
+}
+
+static unsigned pool_get_count(const struct rte_mempool *mp)
+{
+ odp_pool_t pool = (odp_pool_t)mp->pool_data;
+ odp_pool_info_t info;
+
+ if (odp_pool_info(pool, &info)) {
+ _ODP_ERR("Failed to read pool info\n");
+ return 0;
+ }
+ return info.params.pkt.num;
+}
+
+static void pool_free(struct rte_mempool *mp)
+{
+ unsigned lcore_id;
+
+ RTE_LCORE_FOREACH(lcore_id) {
+ struct rte_mempool_cache *cache;
+
+ cache = rte_mempool_default_cache(mp, lcore_id);
+ if (cache != NULL)
+ rte_mempool_cache_flush(cache, mp);
+ }
+}
+
+static void pool_destroy(uint8_t *data)
+{
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
+
+ if (mem_src_data->pkt_pool != NULL) {
+ mem_src_data->pool_in_use = 0;
+ rte_mempool_free(mem_src_data->pkt_pool);
+ }
+
+ mem_src_data->pkt_pool = NULL;
+}
+
+static int pool_create(uint8_t *data, pool_t *pool)
+{
+ struct rte_mempool *pkt_pool;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
+
+ mem_src_data->pkt_pool = NULL;
+
+ if (!_ODP_DPDK_ZERO_COPY)
+ return 0;
+
+ mem_src_data->pool_in_use = 0;
+ snprintf(pool_name, sizeof(pool_name),
+ "dpdk_pktpool_%" PRIu32 "_%" PRIu32 "", odp_global_ro.main_pid,
+ pool->pool_idx);
+ pkt_pool = mbuf_pool_create(pool_name, pool, mem_src_data->dpdk_elt_size);
+
+ if (pkt_pool == NULL) {
+ _ODP_ERR("Creating external DPDK pool failed\n");
+ return -1;
+ }
+
+ mem_src_data->pkt_pool = pkt_pool;
+ mem_src_data->pool_in_use = 1;
+
+ return 0;
+}
+
+static void pool_obj_size(uint8_t *data, uint32_t *block_size, uint32_t *block_offset,
+ uint32_t *flags)
+{
+ struct rte_mempool_objsz sz;
+ uint32_t size;
+ uint32_t total_size;
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
+
+ if (!_ODP_DPDK_ZERO_COPY)
+ return;
+
+ if (odp_global_rw->dpdk_initialized == 0) {
+ if (dpdk_pktio_init()) {
+ _ODP_ERR("Initializing DPDK failed\n");
+ *block_size = 0;
+ return;
+ }
+ odp_global_rw->dpdk_initialized = 1;
+ }
+
+ *flags |= ODP_SHM_HP;
+ size = *block_size + sizeof(struct rte_mbuf);
+ total_size = rte_mempool_calc_obj_size(size, MEMPOOL_FLAGS, &sz);
+ mem_src_data->dpdk_elt_size = sz.elt_size;
+ *block_size = total_size;
+ *block_offset = sz.header_size + sizeof(struct rte_mbuf);
+}
+
+static struct rte_mempool_ops odp_pool_ops = {
+ .name = "odp_pool",
+ .alloc = pool_alloc,
+ .free = pool_free,
+ .enqueue = pool_enqueue,
+ .dequeue = pool_dequeue_bulk,
+ .get_count = pool_get_count
+};
+
+RTE_MEMPOOL_REGISTER_OPS(odp_pool_ops)
+
+static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
+ odp_packet_t pkt_table[],
+ struct rte_mbuf *mbuf_table[],
+ uint16_t mbuf_num, odp_time_t *ts)
+{
+ odp_packet_t pkt;
+ odp_packet_hdr_t *pkt_hdr;
+ uint16_t pkt_len;
+ struct rte_mbuf *mbuf;
+ void *data;
+ int i, j, num;
+ uint32_t max_len;
+ int nb_pkts = 0;
+ int nb_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pool_t pool = pkt_dpdk->pool;
+ odp_pktin_config_opt_t pktin_cfg = pktio_entry->config.pktin;
+ odp_pktio_t input = pktio_entry->handle;
+ uint16_t frame_offset = pktio_entry->pktin_frame_offset;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+
+ /* Allocate maximum sized packets */
+ max_len = pkt_dpdk->data_room;
+
+ num = _odp_packet_alloc_multi(pool, max_len + frame_offset,
+ pkt_table, mbuf_num);
+ if (num != mbuf_num) {
+ _ODP_DBG("_odp_packet_alloc_multi() unable to allocate all packets: "
+ "%d/%" PRIu16 " allocated\n", num, mbuf_num);
+ for (i = num; i < mbuf_num; i++)
+ rte_pktmbuf_free(mbuf_table[i]);
+ }
+
+ for (i = 0; i < num; i++) {
+ mbuf = mbuf_table[i];
+ if (odp_unlikely(mbuf->nb_segs != 1)) {
+ _ODP_ERR("Segmented buffers not supported\n");
+ goto fail;
+ }
+
+ data = rte_pktmbuf_mtod(mbuf, char *);
+ odp_prefetch(data);
+
+ pkt_len = rte_pktmbuf_pkt_len(mbuf);
+ pkt = pkt_table[i];
+ pkt_hdr = packet_hdr(pkt);
+
+ if (layer) {
+ int ret;
+
+ packet_parse_reset(pkt_hdr, 1);
+ ret = _odp_dpdk_packet_parse_common(pkt_hdr, data, pkt_len, pkt_len,
+ mbuf, layer, pktin_cfg);
+ if (ret)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
+
+ if (ret < 0) {
+ odp_packet_free(pkt);
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, (const uint8_t *)data,
+ &new_pool, pkt_hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ if (ret) {
+ odp_packet_free(pkt);
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &pkt_hdr, new_pool))) {
+ odp_packet_free(pkt);
+ rte_pktmbuf_free(mbuf);
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ continue;
+ }
+ }
+ }
+
+ pull_tail(pkt_hdr, max_len - pkt_len);
+ if (frame_offset)
+ pull_head(pkt_hdr, frame_offset);
+
+ if (odp_packet_copy_from_mem(pkt, 0, pkt_len, data) != 0)
+ goto fail;
+
+ pkt_hdr->input = input;
+
+ if (mbuf->ol_flags & RTE_MBUF_F_RX_RSS_HASH)
+ packet_set_flow_hash(pkt_hdr, mbuf->hash.rss);
+
+ packet_set_ts(pkt_hdr, ts);
+
+ rte_pktmbuf_free(mbuf);
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == num));
+ } else {
+ pkt_table[nb_pkts++] = pkt;
+ }
+ }
+
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
+ return nb_pkts;
+
+fail:
+ odp_packet_free_multi(&pkt_table[i], num - i);
+
+ for (j = i; j < num; j++)
+ rte_pktmbuf_free(mbuf_table[j]);
+
+ return (i > 0 ? i : -1);
+}
+
+static inline int check_proto(void *l3_hdr, odp_bool_t *l3_proto_v4,
+ uint8_t *l4_proto)
+{
+ uint8_t l3_proto_ver = _ODP_IPV4HDR_VER(*(uint8_t *)l3_hdr);
+
+ if (l3_proto_ver == _ODP_IPV4) {
+ struct rte_ipv4_hdr *ip = (struct rte_ipv4_hdr *)l3_hdr;
+
+ *l3_proto_v4 = 1;
+ if (!rte_ipv4_frag_pkt_is_fragmented(ip))
+ *l4_proto = ip->next_proto_id;
+ else
+ *l4_proto = 0;
+
+ return 0;
+ } else if (l3_proto_ver == _ODP_IPV6) {
+ struct rte_ipv6_hdr *ipv6 = (struct rte_ipv6_hdr *)l3_hdr;
+
+ *l3_proto_v4 = 0;
+ *l4_proto = ipv6->proto;
+ return 0;
+ }
+
+ return -1;
+}
+
+static inline uint16_t phdr_csum(odp_bool_t ipv4, void *l3_hdr,
+ uint64_t ol_flags)
+{
+ if (ipv4)
+ return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
+ else /*ipv6*/
+ return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
+}
+
+#define OL_TX_CHKSUM_PKT(_cfg, _capa, _proto, _ovr_set, _ovr) \
+ (_capa && _proto && (_ovr_set ? _ovr : _cfg))
+
+static inline void pkt_set_ol_tx(odp_pktout_config_opt_t *pktout_cfg,
+ odp_pktout_config_opt_t *pktout_capa,
+ odp_packet_hdr_t *pkt_hdr,
+ struct rte_mbuf *mbuf,
+ char *mbuf_data)
+{
+ void *l3_hdr, *l4_hdr;
+ uint8_t l4_proto;
+ odp_bool_t l3_proto_v4;
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt;
+ packet_parser_t *pkt_p = &pkt_hdr->p;
+
+ if (pkt_p->l3_offset == ODP_PACKET_OFFSET_INVALID)
+ return;
+
+ l3_hdr = (void *)(mbuf_data + pkt_p->l3_offset);
+
+ if (check_proto(l3_hdr, &l3_proto_v4, &l4_proto))
+ return;
+
+ ipv4_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.ipv4_chksum,
+ pktout_capa->bit.ipv4_chksum,
+ l3_proto_v4,
+ pkt_p->flags.l3_chksum_set,
+ pkt_p->flags.l3_chksum);
+ udp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.udp_chksum,
+ pktout_capa->bit.udp_chksum,
+ (l4_proto == _ODP_IPPROTO_UDP),
+ pkt_p->flags.l4_chksum_set,
+ pkt_p->flags.l4_chksum);
+ tcp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.tcp_chksum,
+ pktout_capa->bit.tcp_chksum,
+ (l4_proto == _ODP_IPPROTO_TCP),
+ pkt_p->flags.l4_chksum_set,
+ pkt_p->flags.l4_chksum);
+
+ if (!ipv4_chksum_pkt && !udp_chksum_pkt && !tcp_chksum_pkt)
+ return;
+
+ mbuf->l2_len = pkt_p->l3_offset - pkt_p->l2_offset;
+
+ if (l3_proto_v4)
+ mbuf->ol_flags = RTE_MBUF_F_TX_IPV4;
+ else
+ mbuf->ol_flags = RTE_MBUF_F_TX_IPV6;
+
+ if (ipv4_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+
+ ((struct rte_ipv4_hdr *)l3_hdr)->hdr_checksum = 0;
+ mbuf->l3_len = _ODP_IPV4HDR_IHL(*(uint8_t *)l3_hdr) * 4;
+ }
+
+ if (pkt_p->l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return;
+
+ mbuf->l3_len = pkt_p->l4_offset - pkt_p->l3_offset;
+
+ l4_hdr = (void *)(mbuf_data + pkt_p->l4_offset);
+
+ if (udp_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
+
+ ((struct rte_udp_hdr *)l4_hdr)->dgram_cksum =
+ phdr_csum(l3_proto_v4, l3_hdr, mbuf->ol_flags);
+ } else if (tcp_chksum_pkt) {
+ mbuf->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
+
+ ((struct rte_tcp_hdr *)l4_hdr)->cksum =
+ phdr_csum(l3_proto_v4, l3_hdr, mbuf->ol_flags);
+ }
+}
+
+static inline int pkt_to_mbuf(pktio_entry_t *pktio_entry,
+ struct rte_mbuf *mbuf_table[],
+ const odp_packet_t pkt_table[], uint16_t num,
+ uint16_t *tx_ts_idx)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ char *data;
+ uint16_t i, j, pkt_len;
+ uint8_t chksum_enabled = pktio_entry->enabled.chksum_insert;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
+ odp_pktout_config_opt_t *pktout_cfg = &pktio_entry->config.pktout;
+
+ if (odp_unlikely((rte_pktmbuf_alloc_bulk(pkt_dpdk->pkt_pool,
+ mbuf_table, num)))) {
+ _ODP_ERR("Failed to alloc mbuf\n");
+ return 0;
+ }
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt_table[i]);
+
+ pkt_len = packet_len(pkt_hdr);
+
+ if (odp_unlikely(pkt_len > pkt_dpdk->mtu))
+ goto fail;
+
+ /* Packet always fits in mbuf */
+ data = rte_pktmbuf_append(mbuf_table[i], pkt_len);
+
+ odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, data);
+
+ if (odp_unlikely(chksum_enabled))
+ pkt_set_ol_tx(pktout_cfg, &pkt_dpdk->pktout_capa, pkt_hdr,
+ mbuf_table[i], data);
+
+ if (odp_unlikely(tx_ts_enabled)) {
+ if (odp_unlikely(*tx_ts_idx == 0 && pkt_hdr->p.flags.ts_set))
+ *tx_ts_idx = i + 1;
+ }
+ }
+ return i;
+
+fail:
+ for (j = i; j < num; j++)
+ rte_pktmbuf_free(mbuf_table[j]);
+
+ return i > 0 ? i : -1;
+}
+
+static inline void prefetch_pkt(struct rte_mbuf *mbuf)
+{
+ odp_packet_hdr_t *pkt_hdr = pkt_hdr_from_mbuf(mbuf);
+ void *data = rte_pktmbuf_mtod(mbuf, char *);
+
+ odp_prefetch(pkt_hdr);
+ odp_prefetch_store((uint8_t *)pkt_hdr + ODP_CACHE_LINE_SIZE);
+ odp_prefetch(data);
+}
+
+/**
+ * Convert mbufs when packet parsing is required
+ */
+static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
+ odp_packet_t pkt_table[],
+ struct rte_mbuf *mbuf_table[],
+ uint16_t mbuf_num, odp_time_t *ts)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ const uint8_t set_flow_hash = pkt_dpdk->flags.set_flow_hash;
+ const odp_pktio_t input = pktio_entry->handle;
+ const odp_pktin_config_opt_t pktin_cfg = pktio_entry->config.pktin;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ int nb_cls = 0;
+ int nb_pkts = 0;
+
+ _ODP_ASSERT(layer != ODP_PROTO_LAYER_NONE);
+
+ prefetch_pkt(mbuf_table[0]);
+
+ if (odp_likely(mbuf_num > 1))
+ prefetch_pkt(mbuf_table[1]);
+
+ for (uint16_t i = 0; i < mbuf_num; i++) {
+ odp_packet_t pkt;
+ odp_packet_hdr_t *pkt_hdr;
+ struct rte_mbuf *mbuf;
+ void *data;
+ uint16_t pkt_len;
+ int ret;
+
+ if (odp_likely((i + 2) < mbuf_num))
+ prefetch_pkt(mbuf_table[i + 2]);
+
+ mbuf = mbuf_table[i];
+ if (odp_unlikely(mbuf->nb_segs != 1)) {
+ _ODP_ERR("Segmented buffers not supported\n");
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ data = rte_pktmbuf_mtod(mbuf, char *);
+ pkt_len = rte_pktmbuf_pkt_len(mbuf);
+ pkt_hdr = pkt_hdr_from_mbuf(mbuf);
+ pkt = packet_handle(pkt_hdr);
+ packet_init(pkt_hdr, pkt_len);
+
+ /* Init buffer segments. Currently, only single segment packets
+ * are supported. */
+ pkt_hdr->seg_data = data;
+ pkt_hdr->input = input;
+
+ if (set_flow_hash && (mbuf->ol_flags & RTE_MBUF_F_RX_RSS_HASH))
+ packet_set_flow_hash(pkt_hdr, mbuf->hash.rss);
+
+ packet_set_ts(pkt_hdr, ts);
+
+ ret = _odp_dpdk_packet_parse_common(pkt_hdr, data, pkt_len, pkt_len,
+ mbuf, layer, pktin_cfg);
+ if (ret)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
+
+ if (ret < 0) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, (const uint8_t *)data,
+ &new_pool, pkt_hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ if (ret) {
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(&pkt, &pkt_hdr, new_pool))) {
+ rte_pktmbuf_free(mbuf);
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ continue;
+ }
+
+ /* Enqueue packet directly to CoS destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == mbuf_num));
+ } else {
+ pkt_table[nb_pkts++] = pkt;
+ }
+ }
+
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
+ return nb_pkts;
+}
+
+/**
+ * Convert mbufs when packet parsing and classifier are disabled
+ */
+static inline int mbuf_to_pkt_zero_minimal(pktio_entry_t *pktio_entry,
+ odp_packet_t pkt_table[], struct rte_mbuf *mbuf_table[],
+ uint16_t mbuf_num, odp_time_t *ts)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ const uint8_t set_flow_hash = pkt_dpdk->flags.set_flow_hash;
+ const odp_pktio_t input = pktio_entry->handle;
+ uint16_t nb_pkts = 0;
+
+ _ODP_ASSERT(pktio_entry->parse_layer == ODP_PROTO_LAYER_NONE);
+
+ prefetch_pkt(mbuf_table[0]);
+
+ if (odp_likely(mbuf_num > 1))
+ prefetch_pkt(mbuf_table[1]);
+
+ for (int i = 0; i < mbuf_num; i++) {
+ odp_packet_hdr_t *pkt_hdr;
+ struct rte_mbuf *mbuf;
+ uint16_t pkt_len;
+
+ if (odp_likely((i + 2) < mbuf_num))
+ prefetch_pkt(mbuf_table[i + 2]);
+
+ mbuf = mbuf_table[i];
+ if (odp_unlikely(mbuf->nb_segs != 1)) {
+ _ODP_ERR("Segmented buffers not supported\n");
+ rte_pktmbuf_free(mbuf);
+ continue;
+ }
+
+ pkt_len = rte_pktmbuf_pkt_len(mbuf);
+ pkt_hdr = pkt_hdr_from_mbuf(mbuf);
+ packet_init(pkt_hdr, pkt_len);
+
+ /* Init buffer segments. Currently, only single segment packets
+ * are supported. */
+ pkt_hdr->seg_data = rte_pktmbuf_mtod(mbuf, uint8_t *);
+ pkt_hdr->input = input;
+
+ if (set_flow_hash && (mbuf->ol_flags & RTE_MBUF_F_RX_RSS_HASH))
+ packet_set_flow_hash(pkt_hdr, mbuf->hash.rss);
+
+ packet_set_ts(pkt_hdr, ts);
+
+ pkt_table[nb_pkts++] = packet_handle(pkt_hdr);
+ }
+
+ return nb_pkts;
+}
+
+static inline int pkt_to_mbuf_zero(pktio_entry_t *pktio_entry,
+ struct rte_mbuf *mbuf_table[],
+ const odp_packet_t pkt_table[], uint16_t num,
+ uint16_t *copy_count, uint16_t cpy_idx[], uint16_t *tx_ts_idx)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pktout_config_opt_t *pktout_cfg = &pktio_entry->config.pktout;
+ odp_pktout_config_opt_t *pktout_capa = &pkt_dpdk->pktout_capa;
+ uint16_t mtu = pkt_dpdk->mtu;
+ uint16_t i;
+ uint8_t chksum_enabled = pktio_entry->enabled.chksum_insert;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
+ *copy_count = 0;
+
+ for (i = 0; i < num; i++) {
+ odp_packet_t pkt = pkt_table[i];
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ struct rte_mbuf *mbuf = mbuf_from_pkt_hdr(pkt_hdr);
+ uint16_t pkt_len = odp_packet_len(pkt);
+
+ if (odp_unlikely(pkt_len > mtu))
+ goto fail;
+
+ if (odp_likely(pkt_hdr->seg_count == 1)) {
+ mbuf_update(mbuf, pkt_hdr, pkt_len);
+
+ if (odp_unlikely(chksum_enabled))
+ pkt_set_ol_tx(pktout_cfg, pktout_capa, pkt_hdr,
+ mbuf, odp_packet_data(pkt));
+ } else {
+ uint16_t dummy_idx = 0;
+
+ /* Fall back to packet copy */
+ if (odp_unlikely(pkt_to_mbuf(pktio_entry, &mbuf,
+ &pkt, 1, &dummy_idx) != 1))
+ goto fail;
+ cpy_idx[(*copy_count)++] = i;
+ }
+ mbuf_table[i] = mbuf;
+
+ if (odp_unlikely(tx_ts_enabled)) {
+ if (odp_unlikely(*tx_ts_idx == 0 && pkt_hdr->p.flags.ts_set))
+ *tx_ts_idx = i + 1;
+ }
+ }
+ return i;
+
+fail:
+ return i > 0 ? i : -1;
}
/* Test if s has only digits or not. Dpdk pktio uses only digits.*/
@@ -217,7 +1171,7 @@ static int dpdk_netdev_is_valid(const char *s)
return 1;
}
-static uint32_t dpdk_vdev_mtu_get(uint8_t port_id)
+static uint32_t dpdk_vdev_mtu_get(uint16_t port_id)
{
struct rte_eth_dev_info dev_info;
struct ifreq ifr;
@@ -231,19 +1185,19 @@ static uint32_t dpdk_vdev_mtu_get(uint8_t port_id)
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0) {
- ODP_ERR("Failed to create control socket\n");
+ _ODP_ERR("Failed to create control socket\n");
return 0;
}
- mtu = mtu_get_fd(sockfd, ifr.ifr_name);
+ mtu = _odp_mtu_get_fd(sockfd, ifr.ifr_name);
close(sockfd);
return mtu;
}
static uint32_t dpdk_mtu_get(pktio_entry_t *pktio_entry)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- uint32_t mtu;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint32_t mtu = 0;
if (rte_eth_dev_get_mtu(pkt_dpdk->port_id, (uint16_t *)&mtu))
return 0;
@@ -261,7 +1215,34 @@ static uint32_t dpdk_mtu_get(pktio_entry_t *pktio_entry)
return mtu;
}
-static int dpdk_vdev_promisc_mode_get(uint8_t port_id)
+static uint32_t dpdk_maxlen(pktio_entry_t *pktio_entry)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+
+ return pkt_dpdk->mtu;
+}
+
+static int dpdk_maxlen_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t mtu;
+ int ret;
+
+ /* DPDK MTU value does not include Ethernet header */
+ mtu = maxlen_input - _ODP_ETHHDR_LEN;
+
+ ret = rte_eth_dev_set_mtu(pkt_dpdk->port_id, mtu);
+ if (odp_unlikely(ret))
+ _ODP_ERR("rte_eth_dev_set_mtu() failed: %d\n", ret);
+
+ pkt_dpdk->mtu = maxlen_input;
+ pkt_dpdk->mtu_set = 1;
+
+ return ret;
+}
+
+static int dpdk_vdev_promisc_mode_get(uint16_t port_id)
{
struct rte_eth_dev_info dev_info;
struct ifreq ifr;
@@ -275,16 +1256,16 @@ static int dpdk_vdev_promisc_mode_get(uint8_t port_id)
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0) {
- ODP_ERR("Failed to create control socket\n");
+ _ODP_ERR("Failed to create control socket\n");
return -1;
}
- mode = promisc_mode_get_fd(sockfd, ifr.ifr_name);
+ mode = _odp_promisc_mode_get_fd(sockfd, ifr.ifr_name);
close(sockfd);
return mode;
}
-static int dpdk_vdev_promisc_mode_set(uint8_t port_id, int enable)
+static int dpdk_vdev_promisc_mode_set(uint16_t port_id, int enable)
{
struct rte_eth_dev_info dev_info;
struct ifreq ifr;
@@ -298,79 +1279,88 @@ static int dpdk_vdev_promisc_mode_set(uint8_t port_id, int enable)
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd < 0) {
- ODP_ERR("Failed to create control socket\n");
+ _ODP_ERR("Failed to create control socket\n");
return -1;
}
- mode = promisc_mode_set_fd(sockfd, ifr.ifr_name, enable);
+ mode = _odp_promisc_mode_set_fd(sockfd, ifr.ifr_name, enable);
close(sockfd);
return mode;
}
-static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
+static void hash_proto_to_rss_conf(struct rte_eth_rss_conf *rss_conf,
const odp_pktin_hash_proto_t *hash_proto)
{
- memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
-
if (hash_proto->proto.ipv4_udp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
if (hash_proto->proto.ipv4_tcp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (hash_proto->proto.ipv4)
- rss_conf->rss_hf |= ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 |
+ RTE_ETH_RSS_NONFRAG_IPV4_OTHER;
if (hash_proto->proto.ipv6_udp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP |
- ETH_RSS_IPV6_UDP_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
+ RTE_ETH_RSS_IPV6_UDP_EX;
if (hash_proto->proto.ipv6_tcp)
- rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP |
- ETH_RSS_IPV6_TCP_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
+ RTE_ETH_RSS_IPV6_TCP_EX;
if (hash_proto->proto.ipv6)
- rss_conf->rss_hf |= ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER |
- ETH_RSS_IPV6_EX;
+ rss_conf->rss_hf |= RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 |
+ RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
+ RTE_ETH_RSS_IPV6_EX;
rss_conf->rss_key = NULL;
}
-static int dpdk_setup_port(pktio_entry_t *pktio_entry)
+static int dpdk_setup_eth_dev(pktio_entry_t *pktio_entry)
{
int ret;
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- struct rte_eth_rss_conf rss_conf;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ struct rte_eth_conf eth_conf;
+ uint64_t rx_offloads = 0;
+ uint64_t tx_offloads = 0;
- /* Always set some hash functions to enable DPDK RSS hash calculation */
- if (pkt_dpdk->hash.all_bits == 0) {
- memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
- rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP;
- } else {
- rss_conf_to_hash_proto(&rss_conf, &pkt_dpdk->hash);
- }
-
- struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = pkt_dpdk->data_room,
- .split_hdr_size = 0,
- .header_split = 0,
- .hw_ip_checksum = 0,
- .hw_vlan_filter = 0,
- .jumbo_frame = 1,
- .hw_strip_crc = 0,
- },
- .rx_adv_conf = {
- .rss_conf = rss_conf,
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- };
+ memset(&eth_conf, 0, sizeof(eth_conf));
+
+ eth_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS;
+ eth_conf.txmode.mq_mode = RTE_ETH_MQ_TX_NONE;
+ eth_conf.rx_adv_conf.rss_conf = pkt_dpdk->rss_conf;
+
+ /* Setup RX checksum offloads */
+ if (pktio_entry->config.pktin.bit.ipv4_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->config.pktin.bit.udp_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->config.pktin.bit.tcp_chksum)
+ rx_offloads |= RTE_ETH_RX_OFFLOAD_TCP_CKSUM;
+
+ eth_conf.rxmode.offloads = rx_offloads;
+
+ /* Setup TX checksum offloads */
+ if (pktio_entry->config.pktout.bit.ipv4_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.udp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.tcp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pktio_entry->config.pktout.bit.sctp_chksum_ena)
+ tx_offloads |= RTE_ETH_TX_OFFLOAD_SCTP_CKSUM;
+
+ eth_conf.txmode.offloads = tx_offloads;
+
+ if (tx_offloads)
+ pktio_entry->enabled.chksum_insert = 1;
ret = rte_eth_dev_configure(pkt_dpdk->port_id,
- pktio_entry->s.num_in_queue,
- pktio_entry->s.num_out_queue, &port_conf);
+ pktio_entry->num_in_queue,
+ pktio_entry->num_out_queue, &eth_conf);
if (ret < 0) {
- ODP_ERR("Failed to setup device: err=%d, port=%" PRIu8 "\n",
- ret, pkt_dpdk->port_id);
+ _ODP_ERR("Failed to setup device: err=%d, port=%" PRIu8 "\n",
+ ret, pkt_dpdk->port_id);
return -1;
}
return 0;
@@ -378,23 +1368,18 @@ static int dpdk_setup_port(pktio_entry_t *pktio_entry)
static int dpdk_close(pktio_entry_t *pktio_entry)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
unsigned idx;
unsigned i, j;
/* Free cache packets */
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
- idx = pkt_dpdk->rx_cache[i].s.idx;
+ for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++) {
+ idx = pkt_dpdk->rx_cache[i].idx;
- for (j = 0; j < pkt_dpdk->rx_cache[i].s.count; j++)
- rte_pktmbuf_free(pkt_dpdk->rx_cache[i].s.pkt[idx++]);
+ for (j = 0; j < pkt_dpdk->rx_cache[i].count; j++)
+ rte_pktmbuf_free(pkt_dpdk->rx_cache[i].pkt[idx++]);
}
- if (pktio_entry->s.state != PKTIO_STATE_OPENED)
- rte_eth_dev_close(pkt_dpdk->port_id);
-
- rte_mempool_free(pkt_dpdk->pkt_pool);
-
return 0;
}
@@ -408,8 +1393,8 @@ static int dpdk_pktio_init(void)
int32_t masklen;
int mem_str_len;
int cmd_len;
+ int numa_nodes;
cpu_set_t original_cpuset;
- struct rte_config *cfg;
/**
* DPDK init changes the affinity of the calling thread, so after it
@@ -424,7 +1409,7 @@ static int dpdk_pktio_init(void)
i = pthread_getaffinity_np(pthread_self(),
sizeof(original_cpuset), &original_cpuset);
if (i != 0) {
- ODP_ERR("Failed to read thread affinity: %d\n", i);
+ _ODP_ERR("Failed to read thread affinity: %d\n", i);
return -1;
}
@@ -438,25 +1423,39 @@ static int dpdk_pktio_init(void)
masklen = odp_cpumask_to_str(&mask, mask_str, ODP_CPUMASK_STR_SIZE);
if (masklen < 0) {
- ODP_ERR("CPU mask error: d\n", masklen);
+ _ODP_ERR("CPU mask error: %" PRId32 "\n", masklen);
return -1;
}
- mem_str_len = snprintf(NULL, 0, "%d", DPDK_MEMORY_MB);
+ mem_str_len = snprintf(NULL, 0, "%d,", DPDK_MEMORY_MB);
+
+ /* numa_num_configured_nodes() may return 0 on some platforms */
+ numa_nodes = numa_num_configured_nodes();
+ if (numa_nodes <= 0)
+ numa_nodes = 1;
+
+ char mem_str[mem_str_len * numa_nodes];
+
+ for (i = 0; i < numa_nodes; i++)
+ sprintf(&mem_str[i * mem_str_len], "%d,", DPDK_MEMORY_MB);
+ mem_str[mem_str_len * numa_nodes - 1] = '\0';
cmdline = getenv("ODP_PKTIO_DPDK_PARAMS");
if (cmdline == NULL)
cmdline = "";
/* masklen includes the terminating null as well */
- cmd_len = strlen("odpdpdk -c -m ") + masklen + mem_str_len +
- strlen(cmdline) + strlen(" ");
+ cmd_len = snprintf(NULL, 0, "odpdpdk --file-prefix %" PRIu32 "_ "
+ "--proc-type auto -c %s --socket-mem %s %s ",
+ odp_global_ro.main_pid, mask_str, mem_str, cmdline);
char full_cmd[cmd_len];
/* first argument is facility log, simply bind it to odpdpdk for now.*/
- cmd_len = snprintf(full_cmd, cmd_len, "odpdpdk -c %s -m %d %s",
- mask_str, DPDK_MEMORY_MB, cmdline);
+ cmd_len = snprintf(full_cmd, cmd_len,
+ "odpdpdk --file-prefix %" PRIu32 "_ "
+ "--proc-type auto -c %s --socket-mem %s %s ",
+ odp_global_ro.main_pid, mask_str, mem_str, cmdline);
for (i = 0, dpdk_argc = 1; i < cmd_len; ++i) {
if (isspace(full_cmd[i]))
@@ -468,29 +1467,30 @@ static int dpdk_pktio_init(void)
dpdk_argc = rte_strsplit(full_cmd, strlen(full_cmd), dpdk_argv,
dpdk_argc, ' ');
for (i = 0; i < dpdk_argc; ++i)
- ODP_DBG("arg[%d]: %s\n", i, dpdk_argv[i]);
+ _ODP_DBG("arg[%d]: %s\n", i, dpdk_argv[i]);
i = rte_eal_init(dpdk_argc, dpdk_argv);
+ /* Force getopt() to reset its internal state */
+ optind = 0;
+
if (i < 0) {
- ODP_ERR("Cannot init the Intel DPDK EAL!\n");
+ _ODP_ERR("Cannot init the Intel DPDK EAL!\n");
return -1;
} else if (i + 1 != dpdk_argc) {
- ODP_DBG("Some DPDK args were not processed!\n");
- ODP_DBG("Passed: %d Consumed %d\n", dpdk_argc, i + 1);
+ _ODP_DBG("Some DPDK args were not processed!\n");
+ _ODP_DBG("Passed: %d Consumed %d\n", dpdk_argc, i + 1);
}
- ODP_DBG("rte_eal_init OK\n");
+ _ODP_DBG("rte_eal_init OK\n");
- rte_set_log_level(RTE_LOG_WARNING);
+ rte_log_set_global_level(RTE_LOG_WARNING);
i = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
&original_cpuset);
if (i)
- ODP_ERR("Failed to reset thread affinity: %d\n", i);
+ _ODP_ERR("Failed to reset thread affinity: %d\n", i);
- cfg = rte_eal_get_configuration();
- for (i = 0; i < RTE_MAX_LCORE; i++)
- cfg->lcore_role[i] = ROLE_RTE;
+ _ODP_PRINT("\nDPDK version: %s\n", rte_version());
return 0;
}
@@ -499,12 +1499,12 @@ static int dpdk_pktio_init(void)
static int dpdk_pktio_init_global(void)
{
if (getenv("ODP_PKTIO_DISABLE_DPDK")) {
- ODP_PRINT("PKTIO: dpdk pktio skipped,"
- " enabled export ODP_PKTIO_DISABLE_DPDK=1.\n");
+ _ODP_PRINT("PKTIO: dpdk pktio skipped,"
+ " enabled export ODP_PKTIO_DISABLE_DPDK=1.\n");
disable_pktio = 1;
} else {
- ODP_PRINT("PKTIO: initialized dpdk pktio,"
- " use export ODP_PKTIO_DISABLE_DPDK=1 to disable.\n");
+ _ODP_PRINT("PKTIO: initialized dpdk pktio,"
+ " use export ODP_PKTIO_DISABLE_DPDK=1 to disable.\n");
}
return 0;
}
@@ -515,7 +1515,7 @@ static int dpdk_pktio_init_local(void)
cpu = sched_getcpu();
if (cpu < 0) {
- ODP_ERR("getcpu failed\n");
+ _ODP_ERR("getcpu failed\n");
return -1;
}
@@ -524,25 +1524,129 @@ static int dpdk_pktio_init_local(void)
return 0;
}
+static void dpdk_mempool_free(struct rte_mempool *mp, void *arg ODP_UNUSED)
+{
+ rte_mempool_free(mp);
+}
+
+static int dpdk_pktio_term(void)
+{
+ uint16_t port_id;
+
+ if (!odp_global_rw->dpdk_initialized)
+ return 0;
+
+ RTE_ETH_FOREACH_DEV(port_id) {
+ rte_eth_dev_close(port_id);
+ }
+
+ if (!_ODP_DPDK_ZERO_COPY)
+ rte_mempool_walk(dpdk_mempool_free, NULL);
+
+ return 0;
+}
+
+static void prepare_rss_conf(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t rss_hf_capa;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+
+ memset(&pkt_dpdk->rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+
+ if (!p->hash_enable)
+ return;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ rss_hf_capa = dev_info.flow_type_rss_offloads;
+
+ /* Print debug info about unsupported hash protocols */
+ if (p->hash_proto.proto.ipv4 &&
+ ((rss_hf_capa & RTE_ETH_RSS_IPV4) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv4_udp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV4_UDP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv4_tcp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV4_TCP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv4_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6 &&
+ ((rss_hf_capa & RTE_ETH_RSS_IPV6) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6_udp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV6_UDP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ if (p->hash_proto.proto.ipv6_tcp &&
+ ((rss_hf_capa & RTE_ETH_RSS_NONFRAG_IPV6_TCP) == 0))
+ _ODP_PRINT("DPDK: hash_proto.ipv6_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
+
+ hash_proto_to_rss_conf(&pkt_dpdk->rss_conf, &p->hash_proto);
+
+ /* Filter out unsupported hash functions */
+ pkt_dpdk->rss_conf.rss_hf &= rss_hf_capa;
+}
+
static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
const odp_pktin_queue_param_t *p)
{
- odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
- odp_bool_t lockless;
+ struct rte_eth_dev_info dev_info;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pktin_mode_t mode = pktio_entry->param.in_mode;
+ uint8_t lockless;
+ int ret;
+
+ prepare_rss_conf(pktio_entry, p);
/**
* Scheduler synchronizes input queue polls. Only single thread
* at a time polls a queue */
- if (mode == ODP_PKTIN_MODE_SCHED ||
- p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
+ if (mode == ODP_PKTIN_MODE_SCHED || p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
lockless = 1;
else
lockless = 0;
- if (p->hash_enable && p->num_queues > 1)
- pktio_entry->s.pkt_dpdk.hash = p->hash_proto;
+ pkt_dpdk->flags.lockless_rx = lockless;
+
+ ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info);
+ if (ret) {
+ _ODP_ERR("DPDK: rte_eth_dev_info_get() failed: %d\n", ret);
+ return -1;
+ }
+
+ /* Configure RX descriptors */
+ for (uint32_t i = 0; i < p->num_queues; i++) {
+ uint16_t num_rx_desc = pkt_dpdk->opt.num_rx_desc_default;
- pktio_entry->s.pkt_dpdk.lockless_rx = lockless;
+ if (mode == ODP_PKTIN_MODE_DIRECT && p->queue_size[i] != 0) {
+ num_rx_desc = p->queue_size[i];
+ /* Make sure correct alignment is used */
+ if (dev_info.rx_desc_lim.nb_align)
+ num_rx_desc = RTE_ALIGN_MUL_CEIL(num_rx_desc,
+ dev_info.rx_desc_lim.nb_align);
+ }
+
+ if (num_rx_desc < dev_info.rx_desc_lim.nb_min ||
+ num_rx_desc > dev_info.rx_desc_lim.nb_max ||
+ num_rx_desc % dev_info.rx_desc_lim.nb_align) {
+ _ODP_ERR("DPDK: invalid number of RX descriptors (%" PRIu16 ") for queue "
+ "%" PRIu32 "\n", num_rx_desc, i);
+ return -1;
+ }
+ pkt_dpdk->num_rx_desc[i] = num_rx_desc;
+ }
return 0;
}
@@ -550,38 +1654,212 @@ static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
static int dpdk_output_queues_config(pktio_entry_t *pktio_entry,
const odp_pktout_queue_param_t *p)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- odp_bool_t lockless;
+ struct rte_eth_dev_info dev_info;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint8_t lockless;
+ int ret;
if (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE)
lockless = 1;
else
lockless = 0;
- pkt_dpdk->lockless_tx = lockless;
+ pkt_dpdk->flags.lockless_tx = lockless;
+
+ ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info);
+ if (ret) {
+ _ODP_ERR("DPDK: rte_eth_dev_info_get() failed: %d\n", ret);
+ return -1;
+ }
+
+ /* Configure TX descriptors */
+ for (uint32_t i = 0; i < p->num_queues; i++) {
+ uint16_t num_tx_desc = pkt_dpdk->opt.num_tx_desc_default;
+
+ if (p->queue_size[i] != 0) {
+ num_tx_desc = p->queue_size[i];
+ /* Make sure correct alignment is used */
+ if (dev_info.tx_desc_lim.nb_align)
+ num_tx_desc = RTE_ALIGN_MUL_CEIL(num_tx_desc,
+ dev_info.tx_desc_lim.nb_align);
+ }
+ if (num_tx_desc < dev_info.tx_desc_lim.nb_min ||
+ num_tx_desc > dev_info.tx_desc_lim.nb_max ||
+ num_tx_desc % dev_info.tx_desc_lim.nb_align) {
+ _ODP_ERR("DPDK: invalid number of TX descriptors (%" PRIu16 ") for queue "
+ "%" PRIu32 "\n", num_tx_desc, i);
+ return -1;
+ }
+ pkt_dpdk->num_tx_desc[i] = num_tx_desc;
+ }
return 0;
}
-static void dpdk_init_capability(pktio_entry_t *pktio_entry,
- struct rte_eth_dev_info *dev_info)
+static int dpdk_init_capability(pktio_entry_t *pktio_entry,
+ const struct rte_eth_dev_info *dev_info)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- odp_pktio_capability_t *capa = &pkt_dpdk->capa;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ odp_pktio_capability_t *capa = &pktio_entry->capa;
+ struct rte_ether_addr mac_addr;
+ int ptype_cnt;
+ int ptype_l3_ipv4 = 0;
+ int ptype_l4_tcp = 0;
+ int ptype_l4_udp = 0;
+ int ret;
+ uint32_t ptype_mask = RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK;
- memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
memset(capa, 0, sizeof(odp_pktio_capability_t));
- rte_eth_dev_info_get(pkt_dpdk->port_id, dev_info);
capa->max_input_queues = RTE_MIN(dev_info->max_rx_queues,
- PKTIO_MAX_QUEUES);
+ ODP_PKTIN_MAX_QUEUES);
+ capa->min_input_queue_size = dev_info->rx_desc_lim.nb_min;
+ capa->max_input_queue_size = dev_info->rx_desc_lim.nb_max;
+
+ /* ixgbe devices support only 16 rx queues in RSS mode */
+ if (!strncmp(dev_info->driver_name, IXGBE_DRV_NAME,
+ strlen(IXGBE_DRV_NAME)))
+ capa->max_input_queues = RTE_MIN((unsigned)16,
+ capa->max_input_queues);
+
capa->max_output_queues = RTE_MIN(dev_info->max_tx_queues,
- PKTIO_MAX_QUEUES);
+ ODP_PKTOUT_MAX_QUEUES);
+ capa->min_output_queue_size = dev_info->tx_desc_lim.nb_min;
+ capa->max_output_queue_size = dev_info->tx_desc_lim.nb_max;
+
capa->set_op.op.promisc_mode = 1;
+ /* Check if setting default MAC address is supporter */
+ rte_eth_macaddr_get(pkt_dpdk->port_id, &mac_addr);
+ ret = rte_eth_dev_default_mac_addr_set(pkt_dpdk->port_id, &mac_addr);
+ if (ret == 0) {
+ capa->set_op.op.mac_addr = 1;
+ } else if (ret != -ENOTSUP && ret != -EPERM) {
+ _ODP_ERR("Failed to set interface default MAC: %d\n", ret);
+ return -1;
+ }
+
+ /* Check if setting MTU is supported */
+ ret = rte_eth_dev_set_mtu(pkt_dpdk->port_id, pkt_dpdk->mtu - _ODP_ETHHDR_LEN);
+ /* From DPDK 21.11 onwards, calling rte_eth_dev_set_mtu() before device is configured with
+ * rte_eth_dev_configure() will result in failure. The least hacky (unfortunately still
+ * very hacky) way to continue checking the support is to take into account that the
+ * function will fail earlier with -ENOTSUP if MTU setting is not supported by device than
+ * if the device was not yet configured. */
+ if (ret != -ENOTSUP) {
+ capa->set_op.op.maxlen = 1;
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = DPDK_MTU_MIN;
+ capa->maxlen.max_input = pkt_dpdk->mtu_max;
+ capa->maxlen.min_output = DPDK_MTU_MIN;
+ capa->maxlen.max_output = pkt_dpdk->mtu_max;
+ }
+
+ ptype_cnt = rte_eth_dev_get_supported_ptypes(pkt_dpdk->port_id,
+ ptype_mask, NULL, 0);
+ if (ptype_cnt > 0) {
+ uint32_t ptypes[ptype_cnt];
+ int i;
+
+ ptype_cnt = rte_eth_dev_get_supported_ptypes(pkt_dpdk->port_id,
+ ptype_mask, ptypes,
+ ptype_cnt);
+ for (i = 0; i < ptype_cnt; i++)
+ switch (ptypes[i]) {
+ case RTE_PTYPE_L3_IPV4:
+ /* Fall through */
+ case RTE_PTYPE_L3_IPV4_EXT_UNKNOWN:
+ /* Fall through */
+ case RTE_PTYPE_L3_IPV4_EXT:
+ ptype_l3_ipv4 = 1;
+ break;
+ case RTE_PTYPE_L4_TCP:
+ ptype_l4_tcp = 1;
+ break;
+ case RTE_PTYPE_L4_UDP:
+ ptype_l4_udp = 1;
+ break;
+ }
+ }
+
odp_pktio_config_init(&capa->config);
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktin.bit.ipv4_chksum = ptype_l3_ipv4 &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.ipv4_chksum)
+ capa->config.pktin.bit.drop_ipv4_err = 1;
+
+ capa->config.pktin.bit.udp_chksum = ptype_l4_udp &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.udp_chksum)
+ capa->config.pktin.bit.drop_udp_err = 1;
+
+ capa->config.pktin.bit.tcp_chksum = ptype_l4_tcp &&
+ (dev_info->rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ? 1 : 0;
+ if (capa->config.pktin.bit.tcp_chksum)
+ capa->config.pktin.bit.drop_tcp_err = 1;
+
+ capa->config.pktout.bit.ipv4_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) ? 1 : 0;
+ capa->config.pktout.bit.udp_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ? 1 : 0;
+ capa->config.pktout.bit.tcp_chksum =
+ (dev_info->tx_offload_capa & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ? 1 : 0;
+
+ capa->config.pktout.bit.ipv4_chksum_ena =
+ capa->config.pktout.bit.ipv4_chksum;
+ capa->config.pktout.bit.udp_chksum_ena =
+ capa->config.pktout.bit.udp_chksum;
+ capa->config.pktout.bit.tcp_chksum_ena =
+ capa->config.pktout.bit.tcp_chksum;
+ capa->config.pktout.bit.ts_ena = 1;
+
+ if (!_ODP_DPDK_ZERO_COPY) {
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+ capa->free_ctrl.dont_free = 1;
+ }
+
+ /* Copy for fast path access */
+ pkt_dpdk->pktout_capa = capa->config.pktout;
+
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
+
+ return 0;
+}
+
+/* Some DPDK PMD virtual devices, like PCAP, do not support promisc
+ * mode change. Use system call for them. */
+static void promisc_mode_check(pkt_dpdk_t *pkt_dpdk)
+{
+ int ret;
+
+ ret = rte_eth_promiscuous_enable(pkt_dpdk->port_id);
+
+ if (!rte_eth_promiscuous_get(pkt_dpdk->port_id))
+ pkt_dpdk->vdev_sysc_promisc = 1;
+
+ ret += rte_eth_promiscuous_disable(pkt_dpdk->port_id);
+
+ if (ret)
+ pkt_dpdk->vdev_sysc_promisc = 1;
}
static int dpdk_open(odp_pktio_t id ODP_UNUSED,
@@ -589,282 +1867,287 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
const char *netdev,
odp_pool_t pool)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
struct rte_eth_dev_info dev_info;
struct rte_mempool *pkt_pool;
- odp_pool_info_t pool_info;
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
uint16_t data_room;
uint32_t mtu;
- int i;
+ int i, ret;
+ pool_t *pool_entry;
+ uint16_t port_id;
if (disable_pktio)
return -1;
if (pool == ODP_POOL_INVALID)
return -1;
+ pool_entry = _odp_pool_entry(pool);
+
+ /* Init pktio entry */
+ memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
- if (!dpdk_netdev_is_valid(netdev)) {
- ODP_ERR("Invalid dpdk netdev: %s\n", netdev);
+ if (!rte_eth_dev_get_port_by_name(netdev, &port_id))
+ pkt_dpdk->port_id = port_id;
+ else if (dpdk_netdev_is_valid(netdev))
+ pkt_dpdk->port_id = atoi(netdev);
+ else {
+ _ODP_ERR("Invalid DPDK interface name: %s\n", netdev);
return -1;
}
/* Initialize DPDK here instead of odp_init_global() to enable running
* 'make check' without root privileges */
- if (dpdk_initialized == 0) {
+ if (odp_global_rw->dpdk_initialized == 0) {
dpdk_pktio_init();
- dpdk_initialized = 1;
+ odp_global_rw->dpdk_initialized = 1;
}
- /* Init pktio entry */
- memset(pkt_dpdk, 0, sizeof(*pkt_dpdk));
-
pkt_dpdk->pool = pool;
- pkt_dpdk->port_id = atoi(netdev);
-
- snprintf(pkt_dpdk->pool_name, sizeof(pkt_dpdk->pool_name), "pktpool_%s",
- netdev);
- if (rte_eth_dev_count() == 0) {
- ODP_ERR("No DPDK ports found\n");
+ if (rte_eth_dev_count_avail() == 0) {
+ _ODP_ERR("No DPDK ports found\n");
return -1;
}
- if (odp_pool_info(pool, &pool_info) < 0) {
- ODP_ERR("Failed to read pool info\n");
+ memset(&dev_info, 0, sizeof(struct rte_eth_dev_info));
+ ret = rte_eth_dev_info_get(pkt_dpdk->port_id, &dev_info);
+ if (ret) {
+ _ODP_ERR("Failed to read device info: %d\n", ret);
return -1;
}
- dpdk_init_capability(pktio_entry, &dev_info);
+ /* Initialize runtime options */
+ if (init_options(pktio_entry, &dev_info)) {
+ _ODP_ERR("Initializing runtime options failed\n");
+ return -1;
+ }
+ pkt_dpdk->flags.set_flow_hash = pkt_dpdk->opt.set_flow_hash; /* Copy for fast path access */
mtu = dpdk_mtu_get(pktio_entry);
if (mtu == 0) {
- ODP_ERR("Failed to read interface MTU\n");
+ _ODP_ERR("Failed to read interface MTU\n");
return -1;
}
pkt_dpdk->mtu = mtu + _ODP_ETHHDR_LEN;
+ pkt_dpdk->mtu_max = RTE_MAX(pkt_dpdk->mtu, DPDK_MTU_MAX);
+ pkt_dpdk->mtu_set = 0;
- /* Some DPDK PMD virtual devices, like PCAP, do not support promisc
- * mode change. Use system call for them. */
- rte_eth_promiscuous_enable(pkt_dpdk->port_id);
- if (!rte_eth_promiscuous_get(pkt_dpdk->port_id))
- pkt_dpdk->vdev_sysc_promisc = 1;
- rte_eth_promiscuous_disable(pkt_dpdk->port_id);
+ promisc_mode_check(pkt_dpdk);
- if (!strcmp(dev_info.driver_name, "rte_ixgbe_pmd"))
- pkt_dpdk->min_rx_burst = DPDK_IXGBE_MIN_RX_BURST;
+ if (pkt_dpdk->opt.multicast_en)
+ ret = rte_eth_allmulticast_enable(pkt_dpdk->port_id);
+ else
+ ret = rte_eth_allmulticast_disable(pkt_dpdk->port_id);
+
+ /* Not supported by all PMDs, so ignore the return value */
+ if (ret)
+ _ODP_DBG("Configuring multicast reception not supported by the PMD\n");
+
+ /* Drivers requiring minimum burst size. Supports also *_vf versions
+ * of the drivers. */
+ if (!strncmp(dev_info.driver_name, IXGBE_DRV_NAME,
+ strlen(IXGBE_DRV_NAME)) ||
+ !strncmp(dev_info.driver_name, I40E_DRV_NAME,
+ strlen(I40E_DRV_NAME)))
+ pkt_dpdk->min_rx_burst = DPDK_MIN_RX_BURST;
else
pkt_dpdk->min_rx_burst = 0;
- pkt_pool = rte_pktmbuf_pool_create(pkt_dpdk->pool_name, DPDK_NB_MBUF,
- DPDK_MEMPOOL_CACHE_SIZE, 0,
- DPDK_MBUF_BUF_SIZE, rte_socket_id());
+ if (_ODP_DPDK_ZERO_COPY) {
+ mem_src_data_t *mem_src_data = mem_src_priv(pool_entry->mem_src_data);
+
+ pkt_pool = mem_src_data->pkt_pool;
+ } else {
+ snprintf(pool_name, sizeof(pool_name), "pktpool_%s", netdev);
+ /* Check if the pool exists already */
+ pkt_pool = rte_mempool_lookup(pool_name);
+ if (pkt_pool == NULL) {
+ unsigned cache_size = DPDK_MEMPOOL_CACHE_SIZE;
+
+ pkt_pool = rte_pktmbuf_pool_create(pool_name,
+ DPDK_NB_MBUF,
+ cache_size, 0,
+ DPDK_MBUF_BUF_SIZE,
+ rte_socket_id());
+ }
+ }
if (pkt_pool == NULL) {
- ODP_ERR("Cannot init mbuf packet pool\n");
+ _ODP_ERR("Cannot init mbuf packet pool\n");
return -1;
}
+
pkt_dpdk->pkt_pool = pkt_pool;
data_room = rte_pktmbuf_data_room_size(pkt_dpdk->pkt_pool) -
RTE_PKTMBUF_HEADROOM;
- pkt_dpdk->data_room = RTE_MIN(pool_info.params.pkt.len, data_room);
+ pkt_dpdk->data_room = RTE_MIN(pool_entry->seg_len, data_room);
+
+ /* Reserve room for packet input offset */
+ pkt_dpdk->data_room -= pktio_entry->pktin_frame_offset;
/* Mbuf chaining not yet supported */
- pkt_dpdk->mtu = RTE_MIN(pkt_dpdk->mtu, pkt_dpdk->data_room);
+ pkt_dpdk->mtu = RTE_MIN(pkt_dpdk->mtu, pkt_dpdk->data_room);
+ pkt_dpdk->mtu_max = RTE_MIN(pkt_dpdk->mtu_max, pkt_dpdk->data_room);
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ if (dpdk_init_capability(pktio_entry, &dev_info)) {
+ _ODP_ERR("Failed to initialize capability\n");
+ return -1;
+ }
+
+ for (i = 0; i < ODP_PKTIN_MAX_QUEUES; i++)
odp_ticketlock_init(&pkt_dpdk->rx_lock[i]);
+
+ for (i = 0; i < ODP_PKTOUT_MAX_QUEUES; i++)
odp_ticketlock_init(&pkt_dpdk->tx_lock[i]);
- }
rte_eth_stats_reset(pkt_dpdk->port_id);
return 0;
}
-static int dpdk_start(pktio_entry_t *pktio_entry)
+static int dpdk_setup_eth_tx(pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- uint8_t port_id = pkt_dpdk->port_id;
+ uint32_t i;
int ret;
- unsigned i;
+ uint16_t port_id = pkt_dpdk->port_id;
- /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
- if (!pktio_entry->s.num_in_queue)
- pktio_entry->s.num_in_queue = 1;
- if (!pktio_entry->s.num_out_queue)
- pktio_entry->s.num_out_queue = 1;
-
- /* init port */
- if (dpdk_setup_port(pktio_entry)) {
- ODP_ERR("Failed to configure device\n");
- return -1;
- }
- /* Init TX queues */
- for (i = 0; i < pktio_entry->s.num_out_queue; i++) {
- ret = rte_eth_tx_queue_setup(port_id, i, DPDK_NM_TX_DESC,
+ for (i = 0; i < pktio_entry->num_out_queue; i++) {
+ ret = rte_eth_tx_queue_setup(port_id, i,
+ pkt_dpdk->num_tx_desc[i],
rte_eth_dev_socket_id(port_id),
- NULL);
+ &dev_info->default_txconf);
if (ret < 0) {
- ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n",
- ret, port_id);
+ _ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
return -1;
}
}
- /* Init RX queues */
- for (i = 0; i < pktio_entry->s.num_in_queue; i++) {
- ret = rte_eth_rx_queue_setup(port_id, i, DPDK_NM_RX_DESC,
- rte_eth_dev_socket_id(port_id),
- NULL, pkt_dpdk->pkt_pool);
- if (ret < 0) {
- ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n",
- ret, port_id);
- return -1;
+
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->num_out_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ _ODP_DBG("Mapping per TX queue statistics not supported: %d\n", ret);
+ break;
}
}
- /* Start device */
- ret = rte_eth_dev_start(port_id);
- if (ret < 0) {
- ODP_ERR("Device start failed: err=%d, port=%" PRIu8 "\n",
- ret, port_id);
- return -1;
- }
-
- return 0;
-}
-
-static int dpdk_stop(pktio_entry_t *pktio_entry)
-{
- rte_eth_dev_stop(pktio_entry->s.pkt_dpdk.port_id);
+ _ODP_DBG("Mapped %" PRIu32 "/%d TX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
return 0;
}
-static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
- odp_packet_t pkt_table[],
- struct rte_mbuf *mbuf_table[],
- uint16_t mbuf_num, odp_time_t *ts)
+static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
{
- odp_packet_t pkt;
- odp_packet_hdr_t *pkt_hdr;
- uint16_t pkt_len;
- struct rte_mbuf *mbuf;
- void *buf;
- int i, j;
- int nb_pkts = 0;
- int alloc_len, num;
- odp_pool_t pool = pktio_entry->s.pkt_dpdk.pool;
-
- /* Allocate maximum sized packets */
- alloc_len = pktio_entry->s.pkt_dpdk.data_room;
+ struct rte_eth_rxconf rxconf;
+ uint32_t i;
+ int ret;
+ uint16_t port_id = pkt_dpdk->port_id;
- num = packet_alloc_multi(pool, alloc_len, pkt_table, mbuf_num);
- if (num != mbuf_num) {
- ODP_DBG("packet_alloc_multi() unable to allocate all packets: "
- "%d/%" PRIu16 " allocated\n", num, mbuf_num);
- for (i = num; i < mbuf_num; i++)
- rte_pktmbuf_free(mbuf_table[i]);
- }
+ rxconf = dev_info->default_rxconf;
- for (i = 0; i < num; i++) {
- odp_packet_hdr_t parsed_hdr;
+ rxconf.rx_drop_en = pkt_dpdk->opt.rx_drop_en;
- mbuf = mbuf_table[i];
- if (odp_unlikely(mbuf->nb_segs != 1)) {
- ODP_ERR("Segmented buffers not supported\n");
- goto fail;
+ for (i = 0; i < pktio_entry->num_in_queue; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i,
+ pkt_dpdk->num_rx_desc[i],
+ rte_eth_dev_socket_id(port_id),
+ &rxconf, pkt_dpdk->pkt_pool);
+ if (ret < 0) {
+ _ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
+ return -1;
}
+ }
- buf = rte_pktmbuf_mtod(mbuf, char *);
- odp_prefetch(buf);
-
- pkt_len = rte_pktmbuf_pkt_len(mbuf);
-
- if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry,
- (const uint8_t *)buf,
- pkt_len, pkt_len, &pool,
- &parsed_hdr))
- goto fail;
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->num_in_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ _ODP_DBG("Mapping per RX queue statistics not supported: %d\n", ret);
+ break;
}
+ }
+ _ODP_DBG("Mapped %" PRIu32 "/%d RX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
- pkt = pkt_table[i];
- pkt_hdr = odp_packet_hdr(pkt);
- pull_tail(pkt_hdr, alloc_len - pkt_len);
+ return 0;
+}
- /* For now copy the data in the mbuf,
- worry about zero-copy later */
- if (odp_packet_copy_from_mem(pkt, 0, pkt_len, buf) != 0)
- goto fail;
+static int dpdk_start(pktio_entry_t *pktio_entry)
+{
+ struct rte_eth_dev_info dev_info;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+ int ret;
- pkt_hdr->input = pktio_entry->s.handle;
+ /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
+ if (!pktio_entry->num_in_queue)
+ pktio_entry->num_in_queue = 1;
+ if (!pktio_entry->num_out_queue)
+ pktio_entry->num_out_queue = 1;
- if (pktio_cls_enabled(pktio_entry))
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
- else
- packet_parse_l2(&pkt_hdr->p, pkt_len);
+ rte_eth_dev_info_get(port_id, &dev_info);
- if (mbuf->ol_flags & PKT_RX_RSS_HASH)
- odp_packet_flow_hash_set(pkt, mbuf->hash.rss);
+ /* Setup device */
+ if (dpdk_setup_eth_dev(pktio_entry)) {
+ _ODP_ERR("Failed to configure device\n");
+ return -1;
+ }
- packet_set_ts(pkt_hdr, ts);
+ /* Setup TX queues */
+ if (dpdk_setup_eth_tx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
- pkt_table[nb_pkts++] = pkt;
+ /* Setup RX queues */
+ if (dpdk_setup_eth_rx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
- rte_pktmbuf_free(mbuf);
+ /* Restore MTU value reset by dpdk_setup_eth_rx() */
+ if (pkt_dpdk->mtu_set && pktio_entry->capa.set_op.op.maxlen) {
+ ret = dpdk_maxlen_set(pktio_entry, pkt_dpdk->mtu, 0);
+ if (ret) {
+ _ODP_ERR("Restoring device MTU failed: err=%d, port=%" PRIu8 "\n",
+ ret, port_id);
+ return -1;
+ }
}
- return nb_pkts;
+ if (_ODP_DPDK_ZERO_COPY) {
+ /* Use simpler function when packet parsing and classifying are not required */
+ if (pktio_entry->parse_layer == ODP_PROTO_LAYER_NONE)
+ pkt_dpdk->mbuf_to_pkt_fn = mbuf_to_pkt_zero_minimal;
+ else
+ pkt_dpdk->mbuf_to_pkt_fn = mbuf_to_pkt_zero;
-fail:
- odp_packet_free_multi(&pkt_table[i], num - i);
+ } else {
+ pkt_dpdk->mbuf_to_pkt_fn = mbuf_to_pkt;
+ }
- for (j = i; j < num; j++)
- rte_pktmbuf_free(mbuf_table[j]);
+ /* Start device */
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0) {
+ _ODP_ERR("Device start failed: err=%d, port=%" PRIu8 "\n", ret, port_id);
+ return -1;
+ }
- return (i > 0 ? i : -1);
+ return 0;
}
-static inline int pkt_to_mbuf(pktio_entry_t *pktio_entry,
- struct rte_mbuf *mbuf_table[],
- const odp_packet_t pkt_table[], uint16_t num)
+static int dpdk_stop(pktio_entry_t *pktio_entry)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- int i, j;
- char *data;
- uint16_t pkt_len;
+ rte_eth_dev_stop(pkt_priv(pktio_entry)->port_id);
- if (odp_unlikely((rte_pktmbuf_alloc_bulk(pkt_dpdk->pkt_pool,
- mbuf_table, num)))) {
- ODP_ERR("Failed to alloc mbuf\n");
- return 0;
- }
- for (i = 0; i < num; i++) {
- pkt_len = _odp_packet_len(pkt_table[i]);
-
- if (pkt_len > pkt_dpdk->mtu) {
- if (i == 0)
- __odp_errno = EMSGSIZE;
- goto fail;
- }
-
- /* Packet always fits in mbuf */
- data = rte_pktmbuf_append(mbuf_table[i], pkt_len);
-
- odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, data);
- }
- return i;
-
-fail:
- for (j = i; j < num; j++)
- rte_pktmbuf_free(mbuf_table[j]);
-
- return i;
+ return 0;
}
static int dpdk_recv(pktio_entry_t *pktio_entry, int index,
odp_packet_t pkt_table[], int num)
{
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
pkt_cache_t *rx_cache = &pkt_dpdk->rx_cache[index];
odp_time_t ts_val;
odp_time_t *ts = NULL;
@@ -873,59 +2156,56 @@ static int dpdk_recv(pktio_entry_t *pktio_entry, int index,
int i;
unsigned cache_idx;
- if (odp_unlikely(pktio_entry->s.state != PKTIO_STATE_STARTED))
- return 0;
-
- if (!pkt_dpdk->lockless_rx)
+ if (!pkt_dpdk->flags.lockless_rx)
odp_ticketlock_lock(&pkt_dpdk->rx_lock[index]);
/**
- * ixgbe_pmd has a minimum supported RX burst size ('min_rx_burst'). If
- * 'num' < 'min_rx_burst', 'min_rx_burst' is used as rte_eth_rx_burst()
- * argument and the possibly received extra packets are cached for the
- * next dpdk_recv_queue() call to use.
+ * ixgbe and i40e drivers have a minimum supported RX burst size
+ * ('min_rx_burst'). If 'num' < 'min_rx_burst', 'min_rx_burst' is used
+ * as rte_eth_rx_burst() argument and the possibly received extra
+ * packets are cached for the next dpdk_recv_queue() call to use.
*
* Either use cached packets or receive new ones. Not both during the
* same call. */
- if (rx_cache->s.count > 0) {
- for (i = 0; i < num && rx_cache->s.count; i++) {
- rx_mbufs[i] = rx_cache->s.pkt[rx_cache->s.idx];
- rx_cache->s.idx++;
- rx_cache->s.count--;
+ if (rx_cache->count > 0) {
+ for (i = 0; i < num && rx_cache->count; i++) {
+ rx_mbufs[i] = rx_cache->pkt[rx_cache->idx];
+ rx_cache->idx++;
+ rx_cache->count--;
}
nb_rx = i;
} else if ((unsigned)num < pkt_dpdk->min_rx_burst) {
struct rte_mbuf *new_mbufs[pkt_dpdk->min_rx_burst];
- nb_rx = rte_eth_rx_burst(pktio_entry->s.pkt_dpdk.port_id, index,
+ nb_rx = rte_eth_rx_burst(pkt_priv(pktio_entry)->port_id, index,
new_mbufs, pkt_dpdk->min_rx_burst);
- rx_cache->s.idx = 0;
+ rx_cache->idx = 0;
for (i = 0; i < nb_rx; i++) {
if (i < num) {
rx_mbufs[i] = new_mbufs[i];
} else {
- cache_idx = rx_cache->s.count;
- rx_cache->s.pkt[cache_idx] = new_mbufs[i];
- rx_cache->s.count++;
+ cache_idx = rx_cache->count;
+ rx_cache->pkt[cache_idx] = new_mbufs[i];
+ rx_cache->count++;
}
}
nb_rx = RTE_MIN(num, nb_rx);
} else {
- nb_rx = rte_eth_rx_burst(pktio_entry->s.pkt_dpdk.port_id, index,
+ nb_rx = rte_eth_rx_burst(pkt_priv(pktio_entry)->port_id, index,
rx_mbufs, num);
}
- if (!pkt_dpdk->lockless_rx)
+ if (!pkt_dpdk->flags.lockless_rx)
odp_ticketlock_unlock(&pkt_dpdk->rx_lock[index]);
if (nb_rx > 0) {
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp) {
+ if (pktio_entry->config.pktin.bit.ts_all ||
+ pktio_entry->config.pktin.bit.ts_ptp) {
ts_val = odp_time_global();
ts = &ts_val;
}
- nb_rx = mbuf_to_pkt(pktio_entry, pkt_table, rx_mbufs, nb_rx,
- ts);
+
+ nb_rx = pkt_dpdk->mbuf_to_pkt_fn(pktio_entry, pkt_table, rx_mbufs, nb_rx, ts);
}
return nb_rx;
@@ -935,35 +2215,80 @@ static int dpdk_send(pktio_entry_t *pktio_entry, int index,
const odp_packet_t pkt_table[], int num)
{
struct rte_mbuf *tx_mbufs[num];
- pkt_dpdk_t *pkt_dpdk = &pktio_entry->s.pkt_dpdk;
- int tx_pkts;
- int i;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t copy_count = 0;
+ uint16_t cpy_idx[num];
+ uint16_t tx_pkts;
int mbufs;
+ uint16_t tx_ts_idx = 0;
- if (odp_unlikely(pktio_entry->s.state != PKTIO_STATE_STARTED))
- return 0;
+ if (_ODP_DPDK_ZERO_COPY)
+ mbufs = pkt_to_mbuf_zero(pktio_entry, tx_mbufs, pkt_table, num,
+ &copy_count, cpy_idx, &tx_ts_idx);
+ else
+ mbufs = pkt_to_mbuf(pktio_entry, tx_mbufs, pkt_table, num,
+ &tx_ts_idx);
- mbufs = pkt_to_mbuf(pktio_entry, tx_mbufs, pkt_table, num);
+ if (odp_unlikely(mbufs < 1))
+ return mbufs;
- if (!pkt_dpdk->lockless_tx)
+ if (!pkt_dpdk->flags.lockless_tx)
odp_ticketlock_lock(&pkt_dpdk->tx_lock[index]);
tx_pkts = rte_eth_tx_burst(pkt_dpdk->port_id, index,
tx_mbufs, mbufs);
- if (!pkt_dpdk->lockless_tx)
+ if (!pkt_dpdk->flags.lockless_tx)
odp_ticketlock_unlock(&pkt_dpdk->tx_lock[index]);
- if (odp_unlikely(tx_pkts < num)) {
- for (i = tx_pkts; i < mbufs; i++)
- rte_pktmbuf_free(tx_mbufs[i]);
- }
+ if (odp_unlikely(tx_ts_idx && tx_pkts >= tx_ts_idx))
+ _odp_pktio_tx_ts_set(pktio_entry);
- if (odp_unlikely(tx_pkts == 0)) {
- if (__odp_errno != 0)
- return -1;
+ if (_ODP_DPDK_ZERO_COPY) {
+ /* Free copied packets */
+ if (odp_unlikely(copy_count)) {
+ uint16_t idx;
+
+ for (uint16_t i = 0; i < copy_count; i++) {
+ idx = cpy_idx[i];
+
+ if (odp_likely(idx < tx_pkts))
+ odp_packet_free(pkt_table[idx]);
+ else
+ rte_pktmbuf_free(tx_mbufs[idx]);
+ }
+ }
} else {
- odp_packet_free_multi(pkt_table, tx_pkts);
+ int i;
+ int first = tx_pkts;
+
+ if (odp_unlikely(tx_pkts < mbufs)) {
+ for (i = tx_pkts; i < mbufs; i++)
+ rte_pktmbuf_free(tx_mbufs[i]);
+ }
+
+ if (odp_unlikely(tx_pkts == 0))
+ return 0;
+
+ /* Find the first packet with (possible) don't free flag */
+ for (i = 0; i < tx_pkts; i++) {
+ if (odp_packet_free_ctrl(pkt_table[i]) == ODP_PACKET_FREE_CTRL_DONT_FREE) {
+ first = i;
+ break;
+ }
+ }
+
+ /* Free first N packets that don't have the flag */
+ if (odp_likely(first > 0))
+ odp_packet_free_multi(pkt_table, first);
+
+ /* Free rest of the packets (according to the flag) */
+ for (i = first; i < tx_pkts; i++) {
+ if (odp_packet_free_ctrl(pkt_table[i]) == ODP_PACKET_FREE_CTRL_DONT_FREE)
+ continue;
+
+ odp_packet_free(pkt_table[i]);
+ }
}
return tx_pkts;
@@ -971,16 +2296,24 @@ static int dpdk_send(pktio_entry_t *pktio_entry, int index,
static int dpdk_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
- rte_eth_macaddr_get(pktio_entry->s.pkt_dpdk.port_id,
- (struct ether_addr *)mac_addr);
+ rte_eth_macaddr_get(pkt_priv(pktio_entry)->port_id,
+ (struct rte_ether_addr *)mac_addr);
return ETH_ALEN;
}
+static int dpdk_mac_addr_set(pktio_entry_t *pktio_entry, const void *mac_addr)
+{
+ struct rte_ether_addr addr = *(const struct rte_ether_addr *)mac_addr;
+
+ return rte_eth_dev_default_mac_addr_set(pkt_priv(pktio_entry)->port_id,
+ &addr);
+}
+
static int dpdk_promisc_mode_set(pktio_entry_t *pktio_entry, odp_bool_t enable)
{
- uint8_t port_id = pktio_entry->s.pkt_dpdk.port_id;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
- if (pktio_entry->s.pkt_dpdk.vdev_sysc_promisc)
+ if (pkt_priv(pktio_entry)->vdev_sysc_promisc)
return dpdk_vdev_promisc_mode_set(port_id, enable);
if (enable)
@@ -993,9 +2326,9 @@ static int dpdk_promisc_mode_set(pktio_entry_t *pktio_entry, odp_bool_t enable)
static int dpdk_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- uint8_t port_id = pktio_entry->s.pkt_dpdk.port_id;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
- if (pktio_entry->s.pkt_dpdk.vdev_sysc_promisc)
+ if (pkt_priv(pktio_entry)->vdev_sysc_promisc)
return dpdk_vdev_promisc_mode_get(port_id);
else
return rte_eth_promiscuous_get(port_id);
@@ -1004,7 +2337,7 @@ static int dpdk_promisc_mode_get(pktio_entry_t *pktio_entry)
static int dpdk_capability(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
- *capa = pktio_entry->s.pkt_dpdk.capa;
+ *capa = pktio_entry->capa;
return 0;
}
@@ -1014,9 +2347,64 @@ static int dpdk_link_status(pktio_entry_t *pktio_entry)
memset(&link, 0, sizeof(struct rte_eth_link));
- rte_eth_link_get_nowait(pktio_entry->s.pkt_dpdk.port_id, &link);
+ rte_eth_link_get_nowait(pkt_priv(pktio_entry)->port_id, &link);
+ if (link.link_status)
+ return ODP_PKTIO_LINK_STATUS_UP;
+ return ODP_PKTIO_LINK_STATUS_DOWN;
+}
+
+static int dpdk_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ struct rte_eth_link link;
+ struct rte_eth_fc_conf fc_conf;
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int ret;
+
+ memset(&fc_conf, 0, sizeof(struct rte_eth_fc_conf));
+ memset(&link, 0, sizeof(struct rte_eth_link));
+
+ ret = rte_eth_dev_flow_ctrl_get(port_id, &fc_conf);
+ if (ret && ret != -ENOTSUP) {
+ _ODP_ERR("rte_eth_dev_flow_ctrl_get() failed\n");
+ return -1;
+ }
+
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ if (fc_conf.mode == RTE_ETH_FC_RX_PAUSE) {
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_ON;
+ } else if (fc_conf.mode == RTE_ETH_FC_TX_PAUSE) {
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_ON;
+ } else if (fc_conf.mode == RTE_ETH_FC_FULL) {
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_ON;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_ON;
+ }
+
+ rte_eth_link_get_nowait(port_id, &link);
+ if (link.link_autoneg == RTE_ETH_LINK_AUTONEG)
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_ON;
+ else
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+
+ if (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX)
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ else
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_HALF;
+
+ if (link.link_speed == RTE_ETH_SPEED_NUM_NONE)
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ else
+ info->speed = link.link_speed;
+
+ if (link.link_status == RTE_ETH_LINK_UP)
+ info->status = ODP_PKTIO_LINK_STATUS_UP;
+ else
+ info->status = ODP_PKTIO_LINK_STATUS_DOWN;
+
+ info->media = "unknown";
- return link.link_status;
+ return 0;
}
static void stats_convert(const struct rte_eth_stats *rte_stats,
@@ -1025,9 +2413,11 @@ static void stats_convert(const struct rte_eth_stats *rte_stats,
memset(stats, 0, sizeof(odp_pktio_stats_t));
stats->in_octets = rte_stats->ibytes;
+ stats->in_packets = rte_stats->ipackets;
stats->in_discards = rte_stats->imissed;
stats->in_errors = rte_stats->ierrors;
stats->out_octets = rte_stats->obytes;
+ stats->out_packets = rte_stats->opackets;
stats->out_errors = rte_stats->oerrors;
}
@@ -1036,7 +2426,7 @@ static int dpdk_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
int ret;
struct rte_eth_stats rte_stats;
- ret = rte_eth_stats_get(pktio_entry->s.pkt_dpdk.port_id, &rte_stats);
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
if (ret == 0) {
stats_convert(&rte_stats, stats);
@@ -1047,34 +2437,195 @@ static int dpdk_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
static int dpdk_stats_reset(pktio_entry_t *pktio_entry)
{
- rte_eth_stats_reset(pktio_entry->s.pkt_dpdk.port_id);
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+
+ (void)rte_eth_stats_reset(port_id);
+ (void)rte_eth_xstats_reset(port_id);
return 0;
}
-const pktio_if_ops_t dpdk_pktio_ops = {
+static int dpdk_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if (num_stats < 0) {
+ _ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (info == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat_name xstats_names[num_stats];
+
+ ret = rte_eth_xstats_get_names(port_id, xstats_names, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ _ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ strncpy(info[i].name, xstats_names[i].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+
+ return num_stats;
+}
+
+static int dpdk_extra_stats(pktio_entry_t *pktio_entry,
+ uint64_t stats[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get(port_id, NULL, 0);
+ if (num_stats < 0) {
+ _ODP_ERR("rte_eth_xstats_get() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (stats == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat xstats[num_stats];
+
+ ret = rte_eth_xstats_get(port_id, xstats, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ _ODP_ERR("rte_eth_xstats_get() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ stats[i] = xstats[i].value;
+
+ return num_stats;
+}
+
+static int dpdk_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ uint64_t xstat_id = id;
+ int ret;
+
+ ret = rte_eth_xstats_get_by_id(port_id, &xstat_id, stat, 1);
+ if (ret != 1) {
+ _ODP_ERR("rte_eth_xstats_get_by_id() failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dpdk_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ _ODP_ERR("DPDK supports max %d per queue counters\n", RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ _ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+
+ pktin_stats->packets = rte_stats.q_ipackets[index];
+ pktin_stats->octets = rte_stats.q_ibytes[index];
+ pktin_stats->errors = rte_stats.q_errors[index];
+
+ return 0;
+}
+
+static int dpdk_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ _ODP_ERR("DPDK supports max %d per queue counters\n", RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ _ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+
+ pktout_stats->packets = rte_stats.q_opackets[index];
+ pktout_stats->octets = rte_stats.q_obytes[index];
+
+ return 0;
+}
+
+const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.name = "dpdk",
.init_global = dpdk_pktio_init_global,
.init_local = dpdk_pktio_init_local,
- .term = NULL,
+ .term = dpdk_pktio_term,
.open = dpdk_open,
.close = dpdk_close,
.start = dpdk_start,
.stop = dpdk_stop,
.stats = dpdk_stats,
.stats_reset = dpdk_stats_reset,
+ .pktin_queue_stats = dpdk_pktin_stats,
+ .pktout_queue_stats = dpdk_pktout_stats,
+ .extra_stat_info = dpdk_extra_stat_info,
+ .extra_stats = dpdk_extra_stats,
+ .extra_stat_counter = dpdk_extra_stat_counter,
.recv = dpdk_recv,
.send = dpdk_send,
.link_status = dpdk_link_status,
- .mtu_get = dpdk_mtu_get,
+ .link_info = dpdk_link_info,
+ .maxlen_get = dpdk_maxlen,
+ .maxlen_set = dpdk_maxlen_set,
.promisc_mode_set = dpdk_promisc_mode_set,
.promisc_mode_get = dpdk_promisc_mode_get,
.mac_get = dpdk_mac_addr_get,
+ .mac_set = dpdk_mac_addr_set,
.capability = dpdk_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL,
.input_queues_config = dpdk_input_queues_config,
.output_queues_config = dpdk_output_queues_config
};
-#endif /* ODP_PKTIO_DPDK */
+static odp_bool_t is_mem_src_active(void)
+{
+ return !disable_pktio && _ODP_DPDK_ZERO_COPY;
+}
+
+static void force_mem_src_disable(void)
+{
+ if (_ODP_DPDK_ZERO_COPY)
+ disable_pktio = 1;
+}
+
+const _odp_pool_mem_src_ops_t _odp_pool_dpdk_mem_src_ops = {
+ .name = "dpdk_zc",
+ .is_active = is_mem_src_active,
+ .force_disable = force_mem_src_disable,
+ .adjust_size = pool_obj_size,
+ .bind = pool_create,
+ .unbind = pool_destroy
+};
+
+#else
+/* Avoid warning about empty translation unit */
+typedef int _odp_dummy;
+#endif /* _ODP_PKTIO_DPDK */
diff --git a/platform/linux-generic/pktio/ethtool.c b/platform/linux-generic/pktio/ethtool.c
deleted file mode 100644
index 1b0f25b2c..000000000
--- a/platform/linux-generic/pktio/ethtool.c
+++ /dev/null
@@ -1,164 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <sys/ioctl.h>
-#include <netinet/in.h>
-#include <linux/sockios.h>
-#include <linux/if.h>
-#include <linux/ethtool.h>
-#include <errno.h>
-#include <net/if.h>
-
-#include <odp_api.h>
-#include <odp_packet_socket.h>
-#include <odp_debug_internal.h>
-
-static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
-{
- struct {
- struct ethtool_sset_info hdr;
- uint32_t buf[1];
- } sset_info;
- struct ethtool_drvinfo drvinfo;
- uint32_t len;
- struct ethtool_gstrings *strings;
- ptrdiff_t drvinfo_offset = offsetof(struct ethtool_drvinfo, n_stats);
-
- sset_info.hdr.cmd = ETHTOOL_GSSET_INFO;
- sset_info.hdr.reserved = 0;
- sset_info.hdr.sset_mask = 1ULL << ETH_SS_STATS;
- ifr->ifr_data = &sset_info;
- if (ioctl(fd, SIOCETHTOOL, ifr) == 0) {
- len = sset_info.hdr.sset_mask ? sset_info.hdr.data[0] : 0;
- } else if (errno == EOPNOTSUPP && drvinfo_offset != 0) {
- /* Fallback for old kernel versions */
- drvinfo.cmd = ETHTOOL_GDRVINFO;
- ifr->ifr_data = &drvinfo;
- if (ioctl(fd, SIOCETHTOOL, ifr)) {
- __odp_errno = errno;
- ODP_ERR("Cannot get stats information\n");
- return NULL;
- }
- len = *(uint32_t *)(void *)((char *)&drvinfo + drvinfo_offset);
- } else {
- __odp_errno = errno;
- return NULL;
- }
-
- if (!len) {
- ODP_ERR("len is zero");
- return NULL;
- }
-
- strings = calloc(1, sizeof(*strings) + len * ETH_GSTRING_LEN);
- if (!strings) {
- ODP_ERR("alloc failed\n");
- return NULL;
- }
-
- strings->cmd = ETHTOOL_GSTRINGS;
- strings->string_set = ETH_SS_STATS;
- strings->len = len;
- ifr->ifr_data = strings;
- if (ioctl(fd, SIOCETHTOOL, ifr)) {
- __odp_errno = errno;
- ODP_ERR("Cannot get stats information\n");
- free(strings);
- return NULL;
- }
-
- return strings;
-}
-
-static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
-{
- struct ethtool_gstrings *strings;
- struct ethtool_stats *estats;
- unsigned int n_stats, i;
- int err;
- int cnts;
-
- strings = get_stringset(fd, ifr);
- if (!strings)
- return -1;
-
- n_stats = strings->len;
- if (n_stats < 1) {
- ODP_ERR("no stats available\n");
- free(strings);
- return -1;
- }
-
- estats = calloc(1, n_stats * sizeof(uint64_t) +
- sizeof(struct ethtool_stats));
- if (!estats) {
- free(strings);
- return -1;
- }
-
- estats->cmd = ETHTOOL_GSTATS;
- estats->n_stats = n_stats;
- ifr->ifr_data = estats;
- err = ioctl(fd, SIOCETHTOOL, ifr);
- if (err < 0) {
- __odp_errno = errno;
- free(strings);
- free(estats);
- return -1;
- }
-
- cnts = 0;
- for (i = 0; i < n_stats; i++) {
- char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
- uint64_t val = estats->data[i];
-
- if (!strcmp(cnt, "rx_octets")) {
- stats->in_octets = val;
- cnts++;
- } else if (!strcmp(cnt, "rx_ucast_packets")) {
- stats->in_ucast_pkts = val;
- cnts++;
- } else if (!strcmp(cnt, "rx_discards")) {
- stats->in_discards = val;
- cnts++;
- } else if (!strcmp(cnt, "rx_errors")) {
- stats->in_errors = val;
- cnts++;
- } else if (!strcmp(cnt, "tx_octets")) {
- stats->out_octets = val;
- cnts++;
- } else if (!strcmp(cnt, "tx_ucast_packets")) {
- stats->out_ucast_pkts = val;
- cnts++;
- } else if (!strcmp(cnt, "tx_discards")) {
- stats->out_discards = val;
- cnts++;
- } else if (!strcmp(cnt, "tx_errors")) {
- stats->out_errors = val;
- cnts++;
- }
- }
-
- free(strings);
- free(estats);
-
- /* Ethtool strings came from kernel driver. Name of that
- * strings is not universal. Current function needs to be updated
- * if your driver has different names for counters */
- if (cnts < 8)
- return -1;
-
- return 0;
-}
-
-int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
-{
- struct ifreq ifr;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
-
- return ethtool_stats(fd, &ifr, stats);
-}
diff --git a/platform/linux-generic/pktio/ethtool_rss.c b/platform/linux-generic/pktio/ethtool_rss.c
new file mode 100644
index 000000000..df97e2417
--- /dev/null
+++ b/platform/linux-generic/pktio/ethtool_rss.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <odp_ethtool_rss.h>
+#include <odp_debug_internal.h>
+
+/**
+ * Get enabled hash options of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param flow_type Packet flow type
+ * @param options[out] Enabled hash options
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+static inline int get_rss_hash_options(int fd, const char *name,
+ uint32_t flow_type, uint64_t *options)
+{
+ struct ifreq ifr;
+ struct ethtool_rxnfc rsscmd;
+
+ memset(&ifr, 0, sizeof(ifr));
+ memset(&rsscmd, 0, sizeof(rsscmd));
+ *options = 0;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ rsscmd.cmd = ETHTOOL_GRXFH;
+ rsscmd.flow_type = flow_type;
+
+ ifr.ifr_data = (caddr_t)&rsscmd;
+
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
+ return -1;
+
+ *options = rsscmd.data;
+ return 0;
+}
+
+int _odp_rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ int rss_enabled = 0;
+
+ memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
+
+ get_rss_hash_options(fd, name, IPV4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
+ hash_proto->proto.ipv4 = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, TCP_V4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv4_tcp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, UDP_V4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv4_udp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, IPV6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
+ hash_proto->proto.ipv6 = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, TCP_V6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv6_tcp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, UDP_V6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv6_udp = 1;
+ rss_enabled++;
+ }
+ return rss_enabled;
+}
+
+/**
+ * Set hash options of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param flow_type Packet flow type
+ * @param options Hash options
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+static inline int set_rss_hash(int fd, const char *name,
+ uint32_t flow_type, uint64_t options)
+{
+ struct ifreq ifr;
+ struct ethtool_rxnfc rsscmd;
+
+ memset(&rsscmd, 0, sizeof(rsscmd));
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ rsscmd.cmd = ETHTOOL_SRXFH;
+ rsscmd.flow_type = flow_type;
+ rsscmd.data = options;
+
+ ifr.ifr_data = (caddr_t)&rsscmd;
+
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
+ return -1;
+
+ return 0;
+}
+
+int _odp_rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ odp_pktin_hash_proto_t cur_hash;
+
+ /* Compare to currently set hash protocols */
+ _odp_rss_conf_get_fd(fd, name, &cur_hash);
+
+ if (hash_proto->proto.ipv4_udp && !cur_hash.proto.ipv4_udp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, UDP_V4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv4_tcp && !cur_hash.proto.ipv4_tcp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, TCP_V4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6_udp && !cur_hash.proto.ipv6_udp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, UDP_V6_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6_tcp && !cur_hash.proto.ipv6_tcp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, TCP_V6_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv4 && !cur_hash.proto.ipv4) {
+ options = RXH_IP_SRC | RXH_IP_DST;
+ if (set_rss_hash(fd, name, IPV4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6 && !cur_hash.proto.ipv6) {
+ options = RXH_IP_SRC | RXH_IP_DST;
+ if (set_rss_hash(fd, name, IPV6_FLOW, options))
+ return -1;
+ }
+ return 0;
+}
+
+int _odp_rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ int rss_supported = 0;
+
+ memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
+
+ if (!get_rss_hash_options(fd, name, IPV4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, IPV4_FLOW, options)) {
+ hash_proto->proto.ipv4 = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, TCP_V4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, TCP_V4_FLOW, options)) {
+ hash_proto->proto.ipv4_tcp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, UDP_V4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, UDP_V4_FLOW, options)) {
+ hash_proto->proto.ipv4_udp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, IPV6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, IPV6_FLOW, options)) {
+ hash_proto->proto.ipv6 = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, TCP_V6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, TCP_V6_FLOW, options)) {
+ hash_proto->proto.ipv6_tcp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, UDP_V6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, UDP_V6_FLOW, options)) {
+ hash_proto->proto.ipv6_udp = 1;
+ rss_supported++;
+ }
+ }
+ return rss_supported;
+}
+
+void _odp_rss_conf_print(const odp_pktin_hash_proto_t *hash_proto)
+{ int max_len = 512;
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+
+ len += snprintf(&str[len], n - len, " rss conf\n");
+
+ if (hash_proto->proto.ipv4)
+ len += snprintf(&str[len], n - len,
+ " IPV4\n");
+ if (hash_proto->proto.ipv4_tcp)
+ len += snprintf(&str[len], n - len,
+ " IPV4 TCP\n");
+ if (hash_proto->proto.ipv4_udp)
+ len += snprintf(&str[len], n - len,
+ " IPV4 UDP\n");
+ if (hash_proto->proto.ipv6)
+ len += snprintf(&str[len], n - len,
+ " IPV6\n");
+ if (hash_proto->proto.ipv6_tcp)
+ len += snprintf(&str[len], n - len,
+ " IPV6 TCP\n");
+ if (hash_proto->proto.ipv6_udp)
+ len += snprintf(&str[len], n - len,
+ " IPV6 UDP\n");
+ str[len] = '\0';
+
+ _ODP_PRINT("%s\n", str);
+}
+
diff --git a/platform/linux-generic/pktio/io_ops.c b/platform/linux-generic/pktio/io_ops.c
index fbf30ca7a..141b881e3 100644
--- a/platform/linux-generic/pktio/io_ops.c
+++ b/platform/linux-generic/pktio/io_ops.c
@@ -1,31 +1,31 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/autoheader_internal.h>
#include <odp_packet_io_internal.h>
/* Ops for all implementation of pktio.
* Order matters. The first implementation to setup successfully
* will be picked.
* Array must be NULL terminated */
-const pktio_if_ops_t * const pktio_if_ops[] = {
- &loopback_pktio_ops,
-#ifdef ODP_PKTIO_DPDK
- &dpdk_pktio_ops,
+const pktio_if_ops_t * const _odp_pktio_if_ops[] = {
+ &_odp_loopback_pktio_ops,
+#ifdef _ODP_PKTIO_DPDK
+ &_odp_dpdk_pktio_ops,
#endif
-#ifdef ODP_NETMAP
- &netmap_pktio_ops,
+#ifdef _ODP_PKTIO_XDP
+ &_odp_sock_xdp_pktio_ops,
#endif
-#ifdef HAVE_PCAP
- &pcap_pktio_ops,
+#ifdef _ODP_PKTIO_PCAP
+ &_odp_pcap_pktio_ops,
#endif
-#ifdef _ODP_PKTIO_IPC
- &ipc_pktio_ops,
-#endif
- &tap_pktio_ops,
- &sock_mmap_pktio_ops,
- &sock_mmsg_pktio_ops,
+ &_odp_ipc_pktio_ops,
+ &_odp_tap_pktio_ops,
+ &_odp_null_pktio_ops,
+ &_odp_sock_mmap_pktio_ops,
+ &_odp_sock_mmsg_pktio_ops,
NULL
};
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index 06175e5a0..dd286328a 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -1,49 +1,204 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_packet_io_ipc_internal.h>
+
+#include <odp/api/hints.h>
+#include <odp/api/system_info.h>
+
#include <odp_debug_internal.h>
#include <odp_packet_io_internal.h>
-#include <odp/api/system_info.h>
+#include <odp_pool_internal.h>
+#include <odp_macros_internal.h>
#include <odp_shm_internal.h>
-#include <_ishm_internal.h>
+#include <odp_ring_ptr_internal.h>
+#include <odp_global_data.h>
+#include <fcntl.h>
+#include <stdint.h>
#include <sys/mman.h>
#include <sys/stat.h>
-#include <fcntl.h>
+#include <unistd.h>
+
+/* Debug level for IPC */
+#define IPC_DBG 3
-#define IPC_ODP_DEBUG_PRINT 0
+/* Burst size for IPC free operations */
+#define IPC_BURST_SIZE 32
-#define IPC_ODP_DBG(fmt, ...) \
- do { \
- if (IPC_ODP_DEBUG_PRINT == 1) \
- ODP_DBG(fmt, ##__VA_ARGS__);\
- } while (0)
+/* that struct is exported to shared memory, so that processes can find
+ * each other.
+ */
+struct pktio_info {
+ struct {
+ /* Pool base address */
+ void *base_addr;
+ /* number of buffer*/
+ char pool_name[ODP_POOL_NAME_LEN];
+ /* 1 if master finished creation of all shared objects */
+ int init_done;
+ /* IPC ring size */
+ uint32_t ring_size;
+ /* IPC ring mask */
+ uint32_t ring_mask;
+ } master;
+ struct {
+ /* Pool base address */
+ void *base_addr;
+ char pool_name[ODP_POOL_NAME_LEN];
+ /* pid of the slave process written to shm and
+ * used by master to look up memory created by
+ * slave
+ */
+ int pid;
+ int init_done;
+ } slave;
+} ODP_PACKED;
+
+typedef struct {
+ /* TX */
+ struct {
+ /* ODP ring for IPC msg packets indexes transmitted to shared
+ * memory */
+ ring_ptr_t *send;
+ /* ODP ring for IPC msg packets indexes already processed by
+ * remote process */
+ ring_ptr_t *free;
+ } tx;
+ /* RX */
+ struct {
+ /* ODP ring for IPC msg packets indexes received from shared
+ * memory (from remote process) */
+ ring_ptr_t *recv;
+ /* odp ring for ipc msg packets indexes already processed by
+ * current process */
+ ring_ptr_t *free;
+ /* local cache to keep packet order right */
+ ring_ptr_t *cache;
+ } rx; /* slave */
+ /* Remote pool mdata base addr */
+ void *pool_mdata_base;
+ /* Remote pool base address for offset calculation */
+ void *remote_base_addr;
+ odp_pool_t pool; /**< Pool of main process */
+ enum {
+ PKTIO_TYPE_IPC_MASTER = 0, /**< Master is the process which
+ creates shm */
+ PKTIO_TYPE_IPC_SLAVE /**< Slave is the process which
+ connects to shm */
+ } type; /**< define if it's master or slave process */
+ odp_atomic_u32_t ready; /**< 1 - pktio is ready and can recv/send
+ packet, 0 - not yet ready */
+ /* Local copy of IPC ring size */
+ uint32_t ring_size;
+ /* Local copy IPC ring mask */
+ uint32_t ring_mask;
+ struct pktio_info *pinfo;
+ odp_shm_t pinfo_shm;
+ odp_shm_t remote_pool_shm; /**< shm of remote pool get with
+ _ipc_map_remote_pool() */
+} pkt_ipc_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_ipc_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_ipc_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_ipc_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
/* MAC address for the "ipc" interface */
-static const char pktio_ipc_mac[] = {0x12, 0x12, 0x12, 0x12, 0x12, 0x12};
+static const uint8_t pktio_ipc_mac[] = {0x12, 0x12, 0x12, 0x12, 0x12, 0x12};
static odp_shm_t _ipc_map_remote_pool(const char *name, int pid);
+/* create the ring */
+static ring_ptr_t *_ring_create(const char *name, uint32_t count,
+ uint32_t shm_flags)
+{
+ ring_ptr_t *r;
+ size_t ring_size;
+ odp_shm_t shm;
+
+ if (odp_global_ro.shm_single_va)
+ shm_flags |= ODP_SHM_SINGLE_VA;
+
+ /* count must be a power of 2 */
+ if (!_ODP_CHECK_IS_POWER2(count)) {
+ _ODP_ERR("Requested size is invalid, must be a power of 2\n");
+ return NULL;
+ }
+
+ ring_size = sizeof(ring_ptr_t) + count * sizeof(void *);
+
+ /* reserve a memory zone for this ring.*/
+ shm = odp_shm_reserve(name, ring_size, ODP_CACHE_LINE_SIZE, shm_flags);
+
+ r = odp_shm_addr(shm);
+ if (r != NULL) {
+ /* init the ring structure */
+ ring_ptr_init(r);
+
+ } else {
+ _ODP_ERR("Cannot reserve memory\n");
+ }
+
+ return r;
+}
+
+static int _ring_destroy(const char *name)
+{
+ odp_shm_t shm = odp_shm_lookup(name);
+
+ if (shm != ODP_SHM_INVALID)
+ return odp_shm_free(shm);
+
+ return 0;
+}
+
+/**
+ * Return the number of entries in a ring.
+ */
+static uint32_t _ring_count(ring_ptr_t *r, uint32_t mask)
+{
+ uint32_t prod_tail = odp_atomic_load_u32(&r->r.w_tail);
+ uint32_t cons_tail = odp_atomic_load_u32(&r->r.r_tail);
+
+ return (prod_tail - cons_tail) & mask;
+}
+
+/**
+ * Return the number of free entries in a ring.
+ */
+static uint32_t _ring_free_count(ring_ptr_t *r, uint32_t mask)
+{
+ uint32_t prod_tail = odp_atomic_load_u32(&r->r.w_tail);
+ uint32_t cons_tail = odp_atomic_load_u32(&r->r.r_tail);
+
+ return (cons_tail - prod_tail - 1) & mask;
+}
+
static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl)
{
pool_t *pool;
odp_shm_t shm;
odp_shm_info_t info;
- pool = pool_entry_from_hdl(pool_hdl);
+ pool = _odp_pool_entry(pool_hdl);
shm = pool->shm;
- odp_shm_info(shm, &info);
+ if (odp_shm_info(shm, &info))
+ return "name_unknown";
return info.name;
}
static int _ipc_master_start(pktio_entry_t *pktio_entry)
{
- struct pktio_info *pinfo = pktio_entry->s.ipc.pinfo;
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ struct pktio_info *pinfo = pktio_ipc->pinfo;
odp_shm_t shm;
if (pinfo->slave.init_done == 0)
@@ -52,19 +207,17 @@ static int _ipc_master_start(pktio_entry_t *pktio_entry)
shm = _ipc_map_remote_pool(pinfo->slave.pool_name,
pinfo->slave.pid);
if (shm == ODP_SHM_INVALID) {
- ODP_DBG("no pool file %s for pid %d\n",
- pinfo->slave.pool_name, pinfo->slave.pid);
+ _ODP_DBG("no pool file %s for pid %d\n", pinfo->slave.pool_name, pinfo->slave.pid);
return -1;
}
- pktio_entry->s.ipc.remote_pool_shm = shm;
- pktio_entry->s.ipc.pool_base = odp_shm_addr(shm);
- pktio_entry->s.ipc.pool_mdata_base = (char *)odp_shm_addr(shm) +
- pinfo->slave.base_addr_offset;
+ pktio_ipc->remote_pool_shm = shm;
+ pktio_ipc->remote_base_addr = pinfo->slave.base_addr;
+ pktio_ipc->pool_mdata_base = (char *)odp_shm_addr(shm);
- odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 1);
+ odp_atomic_store_u32(&pktio_ipc->ready, 1);
- IPC_ODP_DBG("%s started.\n", pktio_entry->s.name);
+ ODP_DBG_LVL(IPC_DBG, "%s started.\n", pktio_entry->name);
return 0;
}
@@ -72,16 +225,38 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
const char *dev,
odp_pool_t pool_hdl)
{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_m_prod")];
- pool_t *pool;
struct pktio_info *pinfo;
const char *pool_name;
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+ uint32_t ring_size;
+ uint32_t ring_mask;
+
+ if ((uint64_t)_ODP_ROUNDUP_POWER2_U32(pool->num + 1) > UINT32_MAX) {
+ _ODP_ERR("Too large packet pool\n");
+ return -1;
+ }
+
+ /* Ring must be able to store all packets in the pool */
+ ring_size = _ODP_ROUNDUP_POWER2_U32(pool->num + 1);
- pool = pool_entry_from_hdl(pool_hdl);
- (void)pool;
+ /* Ring size has to larger than burst size */
+ if (ring_size <= IPC_BURST_SIZE)
+ ring_size = _ODP_ROUNDUP_POWER2_U32(IPC_BURST_SIZE + 1);
+ ring_mask = ring_size - 1;
+
+ pktio_ipc->ring_size = ring_size;
+ pktio_ipc->ring_mask = ring_mask;
if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_m_prod"))) {
- ODP_ERR("too big ipc name\n");
+ _ODP_ERR("too big ipc name\n");
+ return -1;
+ }
+
+ pktio_ipc->rx.cache = _ring_create("ipc_rx_cache", ring_size, 0);
+ if (!pktio_ipc->rx.cache) {
+ _ODP_ERR("pid %d unable to create ipc rx cache\n", getpid());
return -1;
}
@@ -89,78 +264,75 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
* to be processed packets ring.
*/
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
- pktio_entry->s.ipc.tx.send = _ring_create(ipc_shm_name,
- PKTIO_IPC_ENTRIES,
- _RING_SHM_PROC | _RING_NO_LIST);
- if (!pktio_entry->s.ipc.tx.send) {
- ODP_ERR("pid %d unable to create ipc ring %s name\n",
- getpid(), ipc_shm_name);
+ pktio_ipc->tx.send = _ring_create(ipc_shm_name, ring_size,
+ ODP_SHM_PROC | ODP_SHM_EXPORT);
+ if (!pktio_ipc->tx.send) {
+ _ODP_ERR("pid %d unable to create ipc ring %s name\n", getpid(), ipc_shm_name);
return -1;
}
- ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.tx.send),
- _ring_free_count(pktio_entry->s.ipc.tx.send));
+ _ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->tx.send, ring_mask),
+ _ring_free_count(pktio_ipc->tx.send, ring_mask));
/* generate name in shm like ipc_pktio_p for
* already processed packets
*/
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
- pktio_entry->s.ipc.tx.free = _ring_create(ipc_shm_name,
- PKTIO_IPC_ENTRIES,
- _RING_SHM_PROC | _RING_NO_LIST);
- if (!pktio_entry->s.ipc.tx.free) {
- ODP_ERR("pid %d unable to create ipc ring %s name\n",
- getpid(), ipc_shm_name);
+ pktio_ipc->tx.free = _ring_create(ipc_shm_name, ring_size,
+ ODP_SHM_PROC | ODP_SHM_EXPORT);
+ if (!pktio_ipc->tx.free) {
+ _ODP_ERR("pid %d unable to create ipc ring %s name\n", getpid(), ipc_shm_name);
goto free_m_prod;
}
- ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.tx.free),
- _ring_free_count(pktio_entry->s.ipc.tx.free));
+ _ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->tx.free, ring_mask),
+ _ring_free_count(pktio_ipc->tx.free, ring_mask));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
- pktio_entry->s.ipc.rx.recv = _ring_create(ipc_shm_name,
- PKTIO_IPC_ENTRIES,
- _RING_SHM_PROC | _RING_NO_LIST);
- if (!pktio_entry->s.ipc.rx.recv) {
- ODP_ERR("pid %d unable to create ipc ring %s name\n",
- getpid(), ipc_shm_name);
+ pktio_ipc->rx.recv = _ring_create(ipc_shm_name, ring_size,
+ ODP_SHM_PROC | ODP_SHM_EXPORT);
+ if (!pktio_ipc->rx.recv) {
+ _ODP_ERR("pid %d unable to create ipc ring %s name\n", getpid(), ipc_shm_name);
goto free_m_cons;
}
- ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.rx.recv),
- _ring_free_count(pktio_entry->s.ipc.rx.recv));
+ _ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->rx.recv, ring_mask),
+ _ring_free_count(pktio_ipc->rx.recv, ring_mask));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
- pktio_entry->s.ipc.rx.free = _ring_create(ipc_shm_name,
- PKTIO_IPC_ENTRIES,
- _RING_SHM_PROC | _RING_NO_LIST);
- if (!pktio_entry->s.ipc.rx.free) {
- ODP_ERR("pid %d unable to create ipc ring %s name\n",
- getpid(), ipc_shm_name);
+ pktio_ipc->rx.free = _ring_create(ipc_shm_name, ring_size,
+ ODP_SHM_PROC | ODP_SHM_EXPORT);
+ if (!pktio_ipc->rx.free) {
+ _ODP_ERR("pid %d unable to create ipc ring %s name\n", getpid(), ipc_shm_name);
goto free_s_prod;
}
- ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.rx.free),
- _ring_free_count(pktio_entry->s.ipc.rx.free));
+ _ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->rx.free, ring_mask),
+ _ring_free_count(pktio_ipc->rx.free, ring_mask));
/* Set up pool name for remote info */
- pinfo = pktio_entry->s.ipc.pinfo;
+ pinfo = pktio_ipc->pinfo;
pool_name = _ipc_odp_buffer_pool_shm_name(pool_hdl);
if (strlen(pool_name) > ODP_POOL_NAME_LEN) {
- ODP_ERR("pid %d ipc pool name %s is too big %d\n",
- getpid(), pool_name, strlen(pool_name));
+ _ODP_ERR("pid %d ipc pool name %s is too big %zu\n",
+ getpid(), pool_name, strlen(pool_name));
goto free_s_prod;
}
- memcpy(pinfo->master.pool_name, pool_name, strlen(pool_name));
- pinfo->slave.base_addr_offset = 0;
+ strcpy(pinfo->master.pool_name, pool_name);
+
+ /* Export ring info for the slave process to use */
+ pinfo->master.ring_size = ring_size;
+ pinfo->master.ring_mask = ring_mask;
+ pinfo->master.base_addr = odp_shm_addr(pool->shm);
+
pinfo->slave.base_addr = 0;
pinfo->slave.pid = 0;
pinfo->slave.init_done = 0;
- pktio_entry->s.ipc.pool = pool_hdl;
+ pktio_ipc->pool = pool_hdl;
- ODP_DBG("Pre init... DONE.\n");
+ _ODP_DBG("Pre init... DONE.\n");
pinfo->master.init_done = 1;
_ipc_master_start(pktio_entry);
@@ -182,13 +354,12 @@ free_m_prod:
static void _ipc_export_pool(struct pktio_info *pinfo,
odp_pool_t pool_hdl)
{
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
snprintf(pinfo->slave.pool_name, ODP_POOL_NAME_LEN, "%s",
_ipc_odp_buffer_pool_shm_name(pool_hdl));
- pinfo->slave.pid = odp_global_data.main_pid;
- pinfo->slave.block_size = pool->block_size;
- pinfo->slave.base_addr = pool->base_addr;
+ pinfo->slave.pid = odp_global_ro.main_pid;
+ pinfo->slave.base_addr = odp_shm_addr(pool->shm);
}
static odp_shm_t _ipc_map_remote_pool(const char *name, int pid)
@@ -199,11 +370,11 @@ static odp_shm_t _ipc_map_remote_pool(const char *name, int pid)
snprintf(rname, ODP_SHM_NAME_LEN, "remote-%s", name);
shm = odp_shm_import(name, pid, rname);
if (shm == ODP_SHM_INVALID) {
- ODP_ERR("unable map %s\n", name);
+ _ODP_ERR("unable map %s\n", name);
return ODP_SHM_INVALID;
}
- IPC_ODP_DBG("Mapped remote pool %s to local %s\n", name, rname);
+ ODP_DBG_LVL(IPC_DBG, "Mapped remote pool %s to local %s\n", name, rname);
return shm;
}
@@ -213,100 +384,117 @@ static void *_ipc_shm_map(char *name, int pid)
shm = odp_shm_import(name, pid, name);
if (ODP_SHM_INVALID == shm) {
- ODP_ERR("unable to map: %s\n", name);
+ _ODP_ERR("unable to map: %s\n", name);
return NULL;
}
return odp_shm_addr(shm);
}
-static int _ipc_init_slave(const char *dev,
- pktio_entry_t *pktio_entry,
- odp_pool_t pool)
+static int _ipc_init_slave(const char *dev, pktio_entry_t *pktio_entry,
+ odp_pool_t pool_hdl)
{
- if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r")))
- ODP_ABORT("too big ipc name\n");
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
+ uint32_t ring_size = pktio_ipc->pinfo->master.ring_size;
+
+ if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) {
+ _ODP_ERR("Too big ipc name\n");
+ return -1;
+ }
+
+ /* Check that IPC rings are able to store all packets */
+ if (pool->num >= ring_size) {
+ _ODP_ERR("Slave process packet pool too large. Master process "
+ "packet pool has to be larger than slave pool.\n");
+ return -1;
+ }
+
+ pktio_ipc->rx.cache = _ring_create("ipc_rx_cache", ring_size, 0);
+ if (!pktio_ipc->rx.cache) {
+ _ODP_ERR("Pid %d unable to create ipc rx cache\n", getpid());
+ return -1;
+ }
+ pktio_ipc->ring_size = ring_size;
+ pktio_ipc->ring_mask = pktio_ipc->pinfo->master.ring_mask;
+ pktio_ipc->pool = pool_hdl;
- pktio_entry->s.ipc.pool = pool;
return 0;
}
static int _ipc_slave_start(pktio_entry_t *pktio_entry)
{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")];
struct pktio_info *pinfo;
odp_shm_t shm;
char tail[ODP_POOL_NAME_LEN];
char dev[ODP_POOL_NAME_LEN];
int pid;
+ uint32_t ring_mask = pktio_ipc->ring_mask;
- if (sscanf(pktio_entry->s.name, "ipc:%d:%s", &pid, tail) != 2) {
- ODP_ERR("wrong pktio name\n");
+ if (sscanf(pktio_entry->name, "ipc:%d:%s", &pid, tail) != 2) {
+ _ODP_ERR("wrong pktio name\n");
return -1;
}
sprintf(dev, "ipc:%s", tail);
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
- pktio_entry->s.ipc.rx.recv = _ipc_shm_map(ipc_shm_name, pid);
- if (!pktio_entry->s.ipc.rx.recv) {
- ODP_DBG("pid %d unable to find ipc ring %s name\n",
- getpid(), dev);
+ pktio_ipc->rx.recv = _ipc_shm_map(ipc_shm_name, pid);
+ if (!pktio_ipc->rx.recv) {
+ _ODP_DBG("pid %d unable to find ipc ring %s name\n", getpid(), dev);
sleep(1);
return -1;
}
- ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.rx.recv),
- _ring_free_count(pktio_entry->s.ipc.rx.recv));
+ _ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->rx.recv, ring_mask),
+ _ring_free_count(pktio_ipc->rx.recv, ring_mask));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
- pktio_entry->s.ipc.rx.free = _ipc_shm_map(ipc_shm_name, pid);
- if (!pktio_entry->s.ipc.rx.free) {
- ODP_ERR("pid %d unable to find ipc ring %s name\n",
- getpid(), dev);
+ pktio_ipc->rx.free = _ipc_shm_map(ipc_shm_name, pid);
+ if (!pktio_ipc->rx.free) {
+ _ODP_ERR("pid %d unable to find ipc ring %s name\n", getpid(), dev);
goto free_m_prod;
}
- ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.rx.free),
- _ring_free_count(pktio_entry->s.ipc.rx.free));
+ _ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->rx.free, ring_mask),
+ _ring_free_count(pktio_ipc->rx.free, ring_mask));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
- pktio_entry->s.ipc.tx.send = _ipc_shm_map(ipc_shm_name, pid);
- if (!pktio_entry->s.ipc.tx.send) {
- ODP_ERR("pid %d unable to find ipc ring %s name\n",
- getpid(), dev);
+ pktio_ipc->tx.send = _ipc_shm_map(ipc_shm_name, pid);
+ if (!pktio_ipc->tx.send) {
+ _ODP_ERR("pid %d unable to find ipc ring %s name\n", getpid(), dev);
goto free_m_cons;
}
- ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.tx.send),
- _ring_free_count(pktio_entry->s.ipc.tx.send));
+ _ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->tx.send, ring_mask),
+ _ring_free_count(pktio_ipc->tx.send, ring_mask));
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
- pktio_entry->s.ipc.tx.free = _ipc_shm_map(ipc_shm_name, pid);
- if (!pktio_entry->s.ipc.tx.free) {
- ODP_ERR("pid %d unable to find ipc ring %s name\n",
- getpid(), dev);
+ pktio_ipc->tx.free = _ipc_shm_map(ipc_shm_name, pid);
+ if (!pktio_ipc->tx.free) {
+ _ODP_ERR("pid %d unable to find ipc ring %s name\n", getpid(), dev);
goto free_s_prod;
}
- ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
- ipc_shm_name, _ring_count(pktio_entry->s.ipc.tx.free),
- _ring_free_count(pktio_entry->s.ipc.tx.free));
+ _ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, _ring_count(pktio_ipc->tx.free, ring_mask),
+ _ring_free_count(pktio_ipc->tx.free, ring_mask));
/* Get info about remote pool */
- pinfo = pktio_entry->s.ipc.pinfo;
+ pinfo = pktio_ipc->pinfo;
shm = _ipc_map_remote_pool(pinfo->master.pool_name,
pid);
- pktio_entry->s.ipc.remote_pool_shm = shm;
- pktio_entry->s.ipc.pool_mdata_base = (char *)odp_shm_addr(shm) +
- pinfo->master.base_addr_offset;
- pktio_entry->s.ipc.pkt_size = pinfo->master.block_size;
+ pktio_ipc->remote_pool_shm = shm;
+ pktio_ipc->pool_mdata_base = (char *)odp_shm_addr(shm);
+ pktio_ipc->remote_base_addr = pinfo->master.base_addr;
- _ipc_export_pool(pinfo, pktio_entry->s.ipc.pool);
+ _ipc_export_pool(pinfo, pktio_ipc->pool);
- odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 1);
+ odp_atomic_store_u32(&pktio_ipc->ready, 1);
pinfo->slave.init_done = 1;
- ODP_DBG("%s started.\n", pktio_entry->s.name);
+ _ODP_DBG("%s started.\n", pktio_entry->name);
return 0;
free_s_prod:
@@ -329,87 +517,96 @@ static int ipc_pktio_open(odp_pktio_t id ODP_UNUSED,
const char *dev,
odp_pool_t pool)
{
- int ret = -1;
- int pid ODP_UNUSED;
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ int ret = 0;
+ int pid;
struct pktio_info *pinfo;
char name[ODP_POOL_NAME_LEN + sizeof("_info")];
char tail[ODP_POOL_NAME_LEN];
odp_shm_t shm;
- ODP_STATIC_ASSERT(ODP_POOL_NAME_LEN == _RING_NAMESIZE,
- "mismatch pool and ring name arrays");
-
if (strncmp(dev, "ipc", 3))
return -1;
- odp_atomic_init_u32(&pktio_entry->s.ipc.ready, 0);
+ odp_atomic_init_u32(&pktio_ipc->ready, 0);
/* Shared info about remote pktio */
if (sscanf(dev, "ipc:%d:%s", &pid, tail) == 2) {
- pktio_entry->s.ipc.type = PKTIO_TYPE_IPC_SLAVE;
+ pktio_ipc->type = PKTIO_TYPE_IPC_SLAVE;
snprintf(name, sizeof(name), "ipc:%s_info", tail);
- IPC_ODP_DBG("lookup for name %s for pid %d\n", name, pid);
+ ODP_DBG_LVL(IPC_DBG, "lookup for name %s for pid %d\n", name, pid);
shm = odp_shm_import(name, pid, name);
if (ODP_SHM_INVALID == shm)
return -1;
+
pinfo = odp_shm_addr(shm);
if (!pinfo->master.init_done) {
odp_shm_free(shm);
return -1;
}
- pktio_entry->s.ipc.pinfo = pinfo;
- pktio_entry->s.ipc.pinfo_shm = shm;
- ODP_DBG("process %d is slave\n", getpid());
+ pktio_ipc->pinfo = pinfo;
+ pktio_ipc->pinfo_shm = shm;
+ _ODP_DBG("process %d is slave\n", getpid());
ret = _ipc_init_slave(name, pktio_entry, pool);
} else {
- pktio_entry->s.ipc.type = PKTIO_TYPE_IPC_MASTER;
+ pktio_ipc->type = PKTIO_TYPE_IPC_MASTER;
snprintf(name, sizeof(name), "%s_info", dev);
shm = odp_shm_reserve(name, sizeof(struct pktio_info),
ODP_CACHE_LINE_SIZE,
- _ODP_ISHM_EXPORT | _ODP_ISHM_LOCK);
+ ODP_SHM_EXPORT | ODP_SHM_SINGLE_VA);
if (ODP_SHM_INVALID == shm) {
- ODP_ERR("can not create shm %s\n", name);
+ _ODP_ERR("can not create shm %s\n", name);
return -1;
}
pinfo = odp_shm_addr(shm);
pinfo->master.init_done = 0;
pinfo->master.pool_name[0] = 0;
- pktio_entry->s.ipc.pinfo = pinfo;
- pktio_entry->s.ipc.pinfo_shm = shm;
- ODP_DBG("process %d is master\n", getpid());
+
+ pktio_ipc->pinfo = pinfo;
+ pktio_ipc->pinfo_shm = shm;
+ _ODP_DBG("process %d is master\n", getpid());
ret = _ipc_init_master(pktio_entry, dev, pool);
}
+ if (ret)
+ odp_shm_free(shm);
+
return ret;
}
-static void _ipc_free_ring_packets(pktio_entry_t *pktio_entry, _ring_t *r)
+static void _ipc_free_ring_packets(pktio_entry_t *pktio_entry, ring_ptr_t *r,
+ uint32_t r_mask)
{
- uintptr_t offsets[PKTIO_IPC_ENTRIES];
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ uintptr_t offsets[IPC_BURST_SIZE];
int ret;
void **rbuf_p;
int i;
+ void *addr;
+ pool_t *pool;
if (!r)
return;
+ pool = _odp_pool_entry(pktio_ipc->pool);
+ addr = odp_shm_addr(pool->shm);
+
rbuf_p = (void *)&offsets;
while (1) {
- ret = _ring_mc_dequeue_burst(r, rbuf_p,
- PKTIO_IPC_ENTRIES);
- if (0 == ret)
+ ret = ring_ptr_deq_multi(r, r_mask, rbuf_p, IPC_BURST_SIZE);
+ if (ret <= 0)
break;
for (i = 0; i < ret; i++) {
odp_packet_hdr_t *phdr;
odp_packet_t pkt;
- void *mbase = pktio_entry->s.ipc.pool_mdata_base;
- phdr = (void *)((uint8_t *)mbase + offsets[i]);
+ phdr = (void *)((uint8_t *)addr + offsets[i]);
pkt = packet_handle(phdr);
+
odp_packet_free(pkt);
}
}
@@ -418,27 +615,39 @@ static void _ipc_free_ring_packets(pktio_entry_t *pktio_entry, _ring_t *r)
static int ipc_pktio_recv_lockless(pktio_entry_t *pktio_entry,
odp_packet_t pkt_table[], int len)
{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ uint32_t ring_mask = pktio_ipc->ring_mask;
int pkts = 0;
int i;
- _ring_t *r;
- _ring_t *r_p;
- uintptr_t offsets[PKTIO_IPC_ENTRIES];
- void **ipcbufs_p = (void *)&offsets;
+ ring_ptr_t *r;
+ ring_ptr_t *r_p;
+ uintptr_t offsets[len];
+ void **ipcbufs_p = (void *)&offsets[0];
uint32_t ready;
- int pkts_ring;
- ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
+ ready = odp_atomic_load_u32(&pktio_ipc->ready);
if (odp_unlikely(!ready)) {
- IPC_ODP_DBG("start pktio is missing before usage?\n");
+ ODP_DBG_LVL(IPC_DBG, "start pktio is missing before usage?\n");
return 0;
}
- _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
+ _ipc_free_ring_packets(pktio_entry, pktio_ipc->tx.free, ring_mask);
- r = pktio_entry->s.ipc.rx.recv;
- pkts = _ring_mc_dequeue_burst(r, ipcbufs_p, len);
+ /* rx from cache */
+ r = pktio_ipc->rx.cache;
+ pkts = ring_ptr_deq_multi(r, ring_mask, ipcbufs_p, len);
if (odp_unlikely(pkts < 0))
- ODP_ABORT("internal error dequeue\n");
+ _ODP_ABORT("internal error dequeue\n");
+
+ /* rx from other app */
+ if (pkts == 0) {
+ ipcbufs_p = (void *)&offsets[0];
+ r = pktio_ipc->rx.recv;
+ pkts = ring_ptr_deq_multi(r, ring_mask, ipcbufs_p,
+ len);
+ if (odp_unlikely(pkts < 0))
+ _ODP_ABORT("internal error dequeue\n");
+ }
/* fast path */
if (odp_likely(0 == pkts))
@@ -452,118 +661,133 @@ static int ipc_pktio_recv_lockless(pktio_entry_t *pktio_entry,
uint64_t data_pool_off;
void *rmt_data_ptr;
- phdr = (void *)((uint8_t *)pktio_entry->s.ipc.pool_mdata_base +
- offsets[i]);
+ phdr = (void *)((uint8_t *)pktio_ipc->pool_mdata_base +
+ offsets[i]);
- pool = pktio_entry->s.ipc.pool;
+ pool = pktio_ipc->pool;
if (odp_unlikely(pool == ODP_POOL_INVALID))
- ODP_ABORT("invalid pool");
+ _ODP_ABORT("invalid pool");
- data_pool_off = phdr->buf_hdr.ipc_data_offset;
+ data_pool_off = (uint8_t *)phdr->seg_data -
+ (uint8_t *)pktio_ipc->remote_base_addr;
pkt = odp_packet_alloc(pool, phdr->frame_len);
if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
/* Original pool might be smaller then
- * PKTIO_IPC_ENTRIES. If packet can not be
+ * ring size. If packet can not be
* allocated from pool at this time,
- * simple get in on next recv() call.
+ * simple get in on next recv() call. To keep
+ * packet ordering store such packets in local
+ * cache.
*/
- if (i == 0)
- return 0;
+ ODP_DBG_LVL(IPC_DBG, "unable to allocate packet %d/%d\n",
+ i, pkts);
break;
}
/* Copy packet data. */
pkt_data = odp_packet_data(pkt);
if (odp_unlikely(!pkt_data))
- ODP_ABORT("unable to map pkt_data ipc_slave %d\n",
- (PKTIO_TYPE_IPC_SLAVE ==
- pktio_entry->s.ipc.type));
+ _ODP_ABORT("unable to map pkt_data ipc_slave %d\n",
+ (PKTIO_TYPE_IPC_SLAVE == pktio_ipc->type));
/* Copy packet data from shared pool to local pool. */
- rmt_data_ptr = (uint8_t *)pktio_entry->s.ipc.pool_mdata_base +
- data_pool_off;
+ rmt_data_ptr = (uint8_t *)pktio_ipc->pool_mdata_base +
+ data_pool_off;
memcpy(pkt_data, rmt_data_ptr, phdr->frame_len);
/* Copy packets L2, L3 parsed offsets and size */
- copy_packet_cls_metadata(phdr, odp_packet_hdr(pkt));
+ _odp_packet_copy_cls_md(packet_hdr(pkt), phdr);
- odp_packet_hdr(pkt)->frame_len = phdr->frame_len;
- odp_packet_hdr(pkt)->headroom = phdr->headroom;
- odp_packet_hdr(pkt)->tailroom = phdr->tailroom;
+ packet_hdr(pkt)->frame_len = phdr->frame_len;
+ packet_hdr(pkt)->headroom = phdr->headroom;
+ packet_hdr(pkt)->tailroom = phdr->tailroom;
/* Take classification fields */
- odp_packet_hdr(pkt)->p = phdr->p;
+ packet_hdr(pkt)->p = phdr->p;
pkt_table[i] = pkt;
}
+ /* put back to rx ring dequeued but not processed packets*/
+ if (pkts != i) {
+ ipcbufs_p = (void *)&offsets[i];
+ r_p = pktio_ipc->rx.cache;
+ ring_ptr_enq_multi(r_p, ring_mask, ipcbufs_p,
+ pkts - i);
+
+ if (i == 0)
+ return 0;
+ }
+
+ /*num of actually received packets*/
+ pkts = i;
+
/* Now tell other process that we no longer need that buffers.*/
- r_p = pktio_entry->s.ipc.rx.free;
+ r_p = pktio_ipc->rx.free;
-repeat:
- pkts_ring = _ring_mp_enqueue_burst(r_p, ipcbufs_p, pkts);
- if (odp_unlikely(pkts < 0))
- ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n");
- if (odp_unlikely(pkts != pkts_ring)) {
- IPC_ODP_DBG("odp_ring_full: %d, odp_ring_count %d,"
- " _ring_free_count %d\n",
- _ring_full(r_p), _ring_count(r_p),
- _ring_free_count(r_p));
- ipcbufs_p = (void *)&offsets[pkts_ring - 1];
- pkts = pkts - pkts_ring;
- goto repeat;
+ ipcbufs_p = (void *)&offsets[0];
+ ring_ptr_enq_multi(r_p, ring_mask, ipcbufs_p, pkts);
+
+ for (i = 0; i < pkts; i++) {
+ ODP_DBG_LVL(IPC_DBG, "%d/%d send to be free packet offset %" PRIuPTR "\n",
+ i, pkts, offsets[i]);
}
return pkts;
}
static int ipc_pktio_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkt_table[], int len)
+ odp_packet_t pkt_table[], int num)
{
int ret;
- odp_ticketlock_lock(&pktio_entry->s.rxl);
+ odp_ticketlock_lock(&pktio_entry->rxl);
- ret = ipc_pktio_recv_lockless(pktio_entry, pkt_table, len);
+ ret = ipc_pktio_recv_lockless(pktio_entry, pkt_table, num);
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ odp_ticketlock_unlock(&pktio_entry->rxl);
return ret;
}
static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
- const odp_packet_t pkt_table[], int len)
+ const odp_packet_t pkt_table[], int num)
{
- _ring_t *r;
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ uint32_t ring_mask = pktio_ipc->ring_mask;
+ ring_ptr_t *r;
void **rbuf_p;
- int ret;
int i;
- uint32_t ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
- odp_packet_t pkt_table_mapped[len]; /**< Ready to send packet has to be
+ uint32_t ready = odp_atomic_load_u32(&pktio_ipc->ready);
+ pool_t *ipc_pool = _odp_pool_entry(pktio_ipc->pool);
+ odp_packet_t pkt_table_mapped[num]; /**< Ready to send packet has to be
* in memory mapped pool. */
- uintptr_t offsets[len];
+ uintptr_t offsets[num];
if (odp_unlikely(!ready))
return 0;
- _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
+ _ipc_free_ring_packets(pktio_entry, pktio_ipc->tx.free, ring_mask);
- /* Copy packets to shm shared pool if they are in different */
- for (i = 0; i < len; i++) {
+ /* Copy packets to shm shared pool if they are in different
+ * pool, or if they are references (we can't share across IPC).
+ */
+ for (i = 0; i < num; i++) {
odp_packet_t pkt = pkt_table[i];
- pool_t *ipc_pool = pool_entry_from_hdl(pktio_entry->s.ipc.pool);
- odp_buffer_bits_t handle;
- uint32_t pkt_pool_id;
+ odp_packet_hdr_t *pkt_hdr;
+ pool_t *pool;
- handle.handle = _odp_packet_to_buffer(pkt);
- pkt_pool_id = handle.pool_id;
- if (pkt_pool_id != ipc_pool->pool_idx) {
+ pkt_hdr = packet_hdr(pkt);
+ pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
+
+ if (pool->pool_idx != ipc_pool->pool_idx ||
+ odp_packet_has_ref(pkt)) {
odp_packet_t newpkt;
- newpkt = odp_packet_copy(pkt, pktio_entry->s.ipc.pool);
+ newpkt = odp_packet_copy(pkt, pktio_ipc->pool);
if (newpkt == ODP_PACKET_INVALID)
- ODP_ABORT("Unable to copy packet\n");
+ _ODP_ABORT("Unable to copy packet\n");
odp_packet_free(pkt);
pkt_table_mapped[i] = newpkt;
@@ -573,55 +797,44 @@ static int ipc_pktio_send_lockless(pktio_entry_t *pktio_entry,
}
/* Set offset to phdr for outgoing packets */
- for (i = 0; i < len; i++) {
- uint64_t data_pool_off;
+ for (i = 0; i < num; i++) {
odp_packet_t pkt = pkt_table_mapped[i];
- odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
odp_pool_t pool_hdl = odp_packet_pool(pkt);
- pool_t *pool = pool_entry_from_hdl(pool_hdl);
+ pool_t *pool = _odp_pool_entry(pool_hdl);
offsets[i] = (uint8_t *)pkt_hdr -
(uint8_t *)odp_shm_addr(pool->shm);
- data_pool_off = (uint8_t *)pkt_hdr->buf_hdr.seg[0].data -
- (uint8_t *)odp_shm_addr(pool->shm);
/* compile all function code even if ipc disabled with config */
- pkt_hdr->buf_hdr.ipc_data_offset = data_pool_off;
- IPC_ODP_DBG("%d/%d send packet %llx, pool %llx,"
- "phdr = %p, offset %x\n",
- i, len,
+ ODP_DBG_LVL(IPC_DBG, "%d/%d send packet %" PRIu64 ", pool %" PRIu64 ","
+ "phdr = %p, offset %td, sendoff %" PRIxPTR ", addr %p iaddr "
+ "%p\n", i, num,
odp_packet_to_u64(pkt), odp_pool_to_u64(pool_hdl),
- pkt_hdr, pkt_hdr->buf_hdr.ipc_data_offset);
+ (void *)pkt_hdr, (uint8_t *)pkt_hdr->seg_data -
+ (uint8_t *)odp_shm_addr(pool->shm), offsets[i],
+ odp_shm_addr(pool->shm),
+ odp_shm_addr(ipc_pool->shm));
}
/* Put packets to ring to be processed by other process. */
rbuf_p = (void *)&offsets[0];
- r = pktio_entry->s.ipc.tx.send;
- ret = _ring_mp_enqueue_burst(r, rbuf_p, len);
- if (odp_unlikely(ret < 0)) {
- ODP_ERR("pid %d odp_ring_mp_enqueue_bulk fail, ipc_slave %d, ret %d\n",
- getpid(),
- (PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.ipc.type),
- ret);
- ODP_ERR("odp_ring_full: %d, odp_ring_count %d, _ring_free_count %d\n",
- _ring_full(r), _ring_count(r),
- _ring_free_count(r));
- ODP_ABORT("Unexpected!\n");
- }
+ r = pktio_ipc->tx.send;
+ ring_ptr_enq_multi(r, ring_mask, rbuf_p, num);
- return ret;
+ return num;
}
static int ipc_pktio_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkt_table[], int len)
+ const odp_packet_t pkt_table[], int num)
{
int ret;
- odp_ticketlock_lock(&pktio_entry->s.txl);
+ odp_ticketlock_lock(&pktio_entry->txl);
- ret = ipc_pktio_send_lockless(pktio_entry, pkt_table, len);
+ ret = ipc_pktio_send_lockless(pktio_entry, pkt_table, num);
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ odp_ticketlock_unlock(&pktio_entry->txl);
return ret;
}
@@ -641,14 +854,15 @@ static int ipc_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED,
static int ipc_start(pktio_entry_t *pktio_entry)
{
- uint32_t ready = odp_atomic_load_u32(&pktio_entry->s.ipc.ready);
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ uint32_t ready = odp_atomic_load_u32(&pktio_ipc->ready);
if (ready) {
- ODP_ABORT("%s Already started\n", pktio_entry->s.name);
+ _ODP_ABORT("%s Already started\n", pktio_entry->name);
return -1;
}
- if (pktio_entry->s.ipc.type == PKTIO_TYPE_IPC_MASTER)
+ if (pktio_ipc->type == PKTIO_TYPE_IPC_MASTER)
return _ipc_master_start(pktio_entry);
else
return _ipc_slave_start(pktio_entry);
@@ -656,41 +870,79 @@ static int ipc_start(pktio_entry_t *pktio_entry)
static int ipc_stop(pktio_entry_t *pktio_entry)
{
- unsigned tx_send = 0, tx_free = 0;
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+ uint32_t ring_mask = pktio_ipc->ring_mask;
- odp_atomic_store_u32(&pktio_entry->s.ipc.ready, 0);
+ odp_atomic_store_u32(&pktio_ipc->ready, 0);
- if (pktio_entry->s.ipc.tx.send)
- _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.send);
+ if (pktio_ipc->tx.send)
+ _ipc_free_ring_packets(pktio_entry, pktio_ipc->tx.send,
+ ring_mask);
/* other process can transfer packets from one ring to
* other, use delay here to free that packets. */
sleep(1);
- if (pktio_entry->s.ipc.tx.free)
- _ipc_free_ring_packets(pktio_entry, pktio_entry->s.ipc.tx.free);
-
- if (pktio_entry->s.ipc.tx.send)
- tx_send = _ring_count(pktio_entry->s.ipc.tx.send);
- if (pktio_entry->s.ipc.tx.free)
- tx_free = _ring_count(pktio_entry->s.ipc.tx.free);
- if (tx_send | tx_free) {
- ODP_DBG("IPC rings: tx send %d tx free %d\n",
- tx_send, tx_free);
- }
+ if (pktio_ipc->tx.free)
+ _ipc_free_ring_packets(pktio_entry, pktio_ipc->tx.free,
+ ring_mask);
+
+ return 0;
+}
+
+static int ipc_link_status(pktio_entry_t *pktio_entry)
+{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+
+ if (odp_atomic_load_u32(&pktio_ipc->ready))
+ return ODP_PKTIO_LINK_STATUS_UP;
+ return ODP_PKTIO_LINK_STATUS_DOWN;
+}
+
+static int ipc_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
+
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ info->media = "virtual";
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ if (odp_atomic_load_u32(&pktio_ipc->ready))
+ info->status = ODP_PKTIO_LINK_STATUS_UP;
+ else
+ info->status = ODP_PKTIO_LINK_STATUS_DOWN;
+
+ return 0;
+}
+
+static int ipc_capability(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+
+ capa->max_input_queues = 1;
+ capa->max_output_queues = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
return 0;
}
static int ipc_close(pktio_entry_t *pktio_entry)
{
+ pkt_ipc_t *pktio_ipc = pkt_priv(pktio_entry);
char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_m_prod")];
- char *dev = pktio_entry->s.name;
+ char *dev = pktio_entry->name;
char name[ODP_POOL_NAME_LEN];
char tail[ODP_POOL_NAME_LEN];
int pid = 0;
ipc_stop(pktio_entry);
- odp_shm_free(pktio_entry->s.ipc.remote_pool_shm);
+ odp_shm_free(pktio_ipc->remote_pool_shm);
if (sscanf(dev, "ipc:%d:%s", &pid, tail) == 2)
snprintf(name, sizeof(name), "ipc:%s", tail);
@@ -698,7 +950,7 @@ static int ipc_close(pktio_entry_t *pktio_entry)
snprintf(name, sizeof(name), "%s", dev);
/* unlink this pktio info for both master and slave */
- odp_shm_free(pktio_entry->s.ipc.pinfo_shm);
+ odp_shm_free(pktio_ipc->pinfo_shm);
/* destroy rings */
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", name);
@@ -709,21 +961,15 @@ static int ipc_close(pktio_entry_t *pktio_entry)
_ring_destroy(ipc_shm_name);
snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", name);
_ring_destroy(ipc_shm_name);
+ _ring_destroy("ipc_rx_cache");
return 0;
}
-static int ipc_pktio_init_global(void)
-{
- _ring_tailq_init();
- ODP_PRINT("PKTIO: initialized ipc interface.\n");
- return 0;
-}
-
-const pktio_if_ops_t ipc_pktio_ops = {
+const pktio_if_ops_t _odp_ipc_pktio_ops = {
.name = "ipc",
.print = NULL,
- .init_global = ipc_pktio_init_global,
+ .init_global = NULL,
.init_local = NULL,
.term = NULL,
.open = ipc_pktio_open,
@@ -732,11 +978,16 @@ const pktio_if_ops_t ipc_pktio_ops = {
.send = ipc_pktio_send,
.start = ipc_start,
.stop = ipc_stop,
- .mtu_get = ipc_mtu_get,
+ .link_status = ipc_link_status,
+ .link_info = ipc_link_info,
+ .capability = ipc_capability,
+ .maxlen_get = ipc_mtu_get,
.promisc_mode_set = NULL,
.promisc_mode_get = NULL,
.mac_get = ipc_mac_addr_get,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .mac_set = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL
};
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 70962839f..ff48525a3 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -1,255 +1,795 @@
-/* Copyright (c) 2013, Linaro Limited
- * Copyright (c) 2013, Nokia Solutions and Networks
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp_api.h>
-#include <odp_packet_internal.h>
-#include <odp_packet_io_internal.h>
+#include <odp/api/debug.h>
+#include <odp/api/event.h>
+#include <odp/api/hash.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/queue.h>
+#include <odp/api/time.h>
+
+#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/packet_flag_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
+
+#include <odp_parse_internal.h>
#include <odp_classification_internal.h>
#include <odp_debug_internal.h>
-#include <odp/api/hints.h>
+#include <odp_event_internal.h>
+#include <odp_global_data.h>
+#include <odp_ipsec_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_queue_if.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
-#include <errno.h>
#include <inttypes.h>
#include <limits.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#define MAX_QUEUES (ODP_PKTIN_MAX_QUEUES > ODP_PKTOUT_MAX_QUEUES ? \
+ ODP_PKTIN_MAX_QUEUES : ODP_PKTOUT_MAX_QUEUES)
+
+#define MAX_LOOP 16
+
+#define LOOP_MTU_MIN 68
+#define LOOP_MTU_MAX UINT16_MAX
+
+#define LOOP_MAX_QUEUE_SIZE 1024
+
+typedef struct {
+ odp_atomic_u64_t in_octets;
+ odp_atomic_u64_t in_packets;
+ odp_atomic_u64_t in_discards;
+ odp_atomic_u64_t in_errors;
+ odp_atomic_u64_t out_octets;
+ odp_atomic_u64_t out_packets;
+} stats_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ /* queue handle as the "wire" */
+ odp_queue_t queue;
+ /* queue specific statistics */
+ stats_t stats;
+ /* config input queue size */
+ uint32_t in_size;
+ /* config output queue size */
+ uint32_t out_size;
+} loop_queue_t;
+
+typedef struct {
+ /* loopback entries for "loop" device */
+ loop_queue_t loopqs[MAX_QUEUES];
+ /* hash config */
+ odp_pktin_hash_proto_t hash;
+ /* config queue count */
+ uint32_t num_conf_qs;
+ /* actual number queues */
+ uint32_t num_qs;
+ /* link MTU */
+ uint16_t mtu;
+ /* index of "loop" device */
+ uint8_t idx;
+ /* create or re-create queue during start */
+ uint8_t queue_create;
+} pkt_loop_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_loop_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_loop_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_loop_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
/* MAC address for the "loop" interface */
-static const char pktio_loop_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x01};
+static const uint8_t pktio_loop_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x01};
-static int loopback_stats_reset(pktio_entry_t *pktio_entry);
+static int loopback_init_capability(pktio_entry_t *pktio_entry);
-static int loopback_open(odp_pktio_t id, pktio_entry_t *pktio_entry,
+static int loopback_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
const char *devname, odp_pool_t pool ODP_UNUSED)
{
- if (strcmp(devname, "loop"))
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+ long idx;
+
+ if (!strcmp(devname, "loop")) {
+ idx = 0;
+ } else if (!strncmp(devname, "loop", 4)) {
+ char *end;
+
+ idx = strtol(devname + 4, &end, 10);
+ if (idx <= 0 || idx >= MAX_LOOP || *end)
+ return -1;
+ } else {
return -1;
+ }
+
+ memset(pkt_loop, 0, sizeof(pkt_loop_t));
+ pkt_loop->mtu = LOOP_MTU_MAX;
+ pkt_loop->idx = idx;
+ pkt_loop->queue_create = 1;
+ loopback_init_capability(pktio_entry);
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_octets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_packets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_discards, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.in_errors, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.out_octets, 0);
+ odp_atomic_init_u64(&pkt_loop->loopqs[i].stats.out_packets, 0);
+ }
+
+ return 0;
+}
+
+static int loopback_queue_destroy(odp_queue_t queue)
+{
+ odp_event_t event;
- char loopq_name[ODP_QUEUE_NAME_LEN];
+ do {
+ event = odp_queue_deq(queue);
+ if (event != ODP_EVENT_INVALID)
+ odp_event_free(event);
- snprintf(loopq_name, sizeof(loopq_name), "%" PRIu64 "-pktio_loopq",
- odp_pktio_to_u64(id));
- pktio_entry->s.pkt_loop.loopq =
- odp_queue_create(loopq_name, NULL);
+ } while (event != ODP_EVENT_INVALID);
- if (pktio_entry->s.pkt_loop.loopq == ODP_QUEUE_INVALID)
+ if (odp_queue_destroy(queue)) {
+ _ODP_ERR("Destroying loopback pktio queue failed\n");
return -1;
+ }
+ return 0;
+}
+
+static int loopback_queues_destroy(loop_queue_t *queues, uint32_t num_queues)
+{
+ int ret = 0;
+
+ for (uint32_t i = 0; i < num_queues; i++) {
+ if (loopback_queue_destroy(queues[i].queue))
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int loopback_start(pktio_entry_t *pktio_entry)
+{
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+ odp_queue_param_t queue_param;
+ char queue_name[ODP_QUEUE_NAME_LEN];
+
+ /* Re-create queue only when necessary */
+ if (!pkt_loop->queue_create)
+ return 0;
+
+ /* Destroy old queues */
+ if (loopback_queues_destroy(pkt_loop->loopqs, pkt_loop->num_qs))
+ return -1;
+
+ pkt_loop->num_qs = 0;
+
+ for (uint32_t i = 0; i < pkt_loop->num_conf_qs; i++) {
+ odp_queue_param_init(&queue_param);
+ queue_param.size = _ODP_MAX(pkt_loop->loopqs[i].in_size,
+ pkt_loop->loopqs[i].out_size);
+ snprintf(queue_name, sizeof(queue_name), "_odp_pktio_loopq-%" PRIu64 "-%u",
+ odp_pktio_to_u64(pktio_entry->handle), i);
+ pkt_loop->loopqs[i].queue = odp_queue_create(queue_name, &queue_param);
+
+ if (pkt_loop->loopqs[i].queue == ODP_QUEUE_INVALID) {
+ _ODP_ERR("Creating loopback pktio queue %s failed\n", queue_name);
+ (void)loopback_queues_destroy(pkt_loop->loopqs, i);
+ return -1;
+ }
+ }
- loopback_stats_reset(pktio_entry);
+ pkt_loop->num_qs = pkt_loop->num_conf_qs;
+
+ return 0;
+}
+
+static int loopback_pktin_queue_config(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *param)
+{
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ pkt_loop->num_conf_qs = param->num_queues;
+ pkt_loop->queue_create = 1;
+ pkt_loop->hash.all_bits = param->hash_enable ? param->hash_proto.all_bits : 0;
+
+ if (pktio_entry->param.in_mode == ODP_PKTIN_MODE_DIRECT) {
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ if (i < pkt_loop->num_conf_qs)
+ pkt_loop->loopqs[i].in_size = param->queue_size[i];
+ else
+ pkt_loop->loopqs[i].in_size = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int loopback_pktout_queue_config(pktio_entry_t *pktio_entry,
+ const odp_pktout_queue_param_t *param)
+{
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ pkt_loop->queue_create = 1;
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ if (i < param->num_queues)
+ pkt_loop->loopqs[i].out_size = param->queue_size[i];
+ else
+ pkt_loop->loopqs[i].out_size = 0;
+ }
return 0;
}
static int loopback_close(pktio_entry_t *pktio_entry)
{
- return odp_queue_destroy(pktio_entry->s.pkt_loop.loopq);
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ return loopback_queues_destroy(pkt_loop->loopqs, pkt_loop->num_qs);
}
-static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkts[], int len)
+static int loopback_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t pkts[], int num)
{
int nbr, i;
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- queue_entry_t *qentry;
+ loop_queue_t *entry = &pkt_priv(pktio_entry)->loopqs[index];
+ odp_queue_t queue = entry->queue;
+ stats_t *stats = &entry->stats;
+ _odp_event_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
+ odp_packet_t cls_tbl[QUEUE_MULTI_MAX];
odp_packet_hdr_t *pkt_hdr;
- odp_packet_hdr_t parsed_hdr;
odp_packet_t pkt;
odp_time_t ts_val;
odp_time_t *ts = NULL;
int num_rx = 0;
- int failed = 0;
-
- if (odp_unlikely(len > QUEUE_MULTI_MAX))
- len = QUEUE_MULTI_MAX;
+ int packets = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ uint32_t octets = 0;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
- odp_ticketlock_lock(&pktio_entry->s.rxl);
+ if (odp_unlikely(num > QUEUE_MULTI_MAX))
+ num = QUEUE_MULTI_MAX;
- qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq);
- nbr = queue_deq_multi(qentry, hdr_tbl, len);
+ nbr = odp_queue_deq_multi(queue, (odp_event_t *)hdr_tbl, num);
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp) {
+ if (opt.bit.ts_all || opt.bit.ts_ptp) {
ts_val = odp_time_global();
ts = &ts_val;
}
for (i = 0; i < nbr; i++) {
uint32_t pkt_len;
+ int do_ipsec_enq = 0;
- pkt = _odp_packet_from_buffer(odp_hdr_to_buf(hdr_tbl[i]));
+ pkt = packet_from_event_hdr(hdr_tbl[i]);
pkt_len = odp_packet_len(pkt);
+ pkt_hdr = packet_hdr(pkt);
-
- if (pktio_cls_enabled(pktio_entry)) {
- odp_packet_t new_pkt;
- odp_pool_t new_pool;
+ if (layer) {
uint8_t *pkt_addr;
- uint8_t buf[PACKET_PARSE_SEG_LEN];
+ uint8_t buf[PARSE_BYTES];
int ret;
uint32_t seg_len = odp_packet_seg_len(pkt);
/* Make sure there is enough data for the packet
* parser in the case of a segmented packet. */
- if (odp_unlikely(seg_len < PACKET_PARSE_SEG_LEN &&
- pkt_len > PACKET_PARSE_SEG_LEN)) {
- odp_packet_copy_to_mem(pkt, 0,
- PACKET_PARSE_SEG_LEN,
- buf);
- seg_len = PACKET_PARSE_SEG_LEN;
+ if (odp_unlikely(seg_len < PARSE_BYTES &&
+ pkt_len > seg_len)) {
+ seg_len = _ODP_MIN(pkt_len, PARSE_BYTES);
+ odp_packet_copy_to_mem(pkt, 0, seg_len, buf);
pkt_addr = buf;
} else {
pkt_addr = odp_packet_data(pkt);
}
- ret = cls_classify_packet(pktio_entry, pkt_addr,
- pkt_len, seg_len,
- &new_pool, &parsed_hdr);
- if (ret) {
- failed++;
+
+ packet_parse_reset(pkt_hdr, 1);
+ ret = _odp_packet_parse_common(pkt_hdr, pkt_addr, pkt_len,
+ seg_len, layer, opt);
+ if (ret)
+ odp_atomic_inc_u64(&stats->in_errors);
+
+ if (ret < 0) {
odp_packet_free(pkt);
continue;
}
- if (new_pool != odp_packet_pool(pkt)) {
- new_pkt = odp_packet_copy(pkt, new_pool);
- odp_packet_free(pkt);
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, pkt_addr,
+ &new_pool, pkt_hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&stats->in_discards);
+
+ if (ret) {
+ odp_packet_free(pkt);
+ continue;
+ }
- if (new_pkt == ODP_PACKET_INVALID) {
- failed++;
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &pkt_hdr, new_pool))) {
+ odp_packet_free(pkt);
+ odp_atomic_inc_u64(&stats->in_discards);
continue;
}
- pkt = new_pkt;
}
}
- pkt_hdr = odp_packet_hdr(pkt);
- pkt_hdr->input = pktio_entry->s.handle;
+ packet_set_ts(pkt_hdr, ts);
+ pkt_hdr->input = pktio_entry->handle;
+
+ /* Try IPsec inline processing */
+ if (pktio_entry->config.inbound_ipsec &&
+ !pkt_hdr->p.flags.ip_err &&
+ odp_packet_has_ipsec(pkt)) {
+ do_ipsec_enq = !_odp_ipsec_try_inline(&pkt);
+ pkt_hdr = packet_hdr(pkt);
+ }
+
+ if (!pkt_hdr->p.flags.all.error) {
+ octets += pkt_len;
+ packets++;
+ }
+
+ if (do_ipsec_enq) {
+ if (odp_unlikely(odp_queue_enq(pkt_hdr->dst_queue,
+ odp_packet_to_event(pkt)))) {
+ odp_atomic_inc_u64(&stats->in_discards);
+ if (!pkt_hdr->p.flags.all.error) {
+ octets -= pkt_len;
+ packets--;
+ }
+ odp_packet_free(pkt);
+ }
+ } else if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ cls_tbl[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(cls_tbl, num_cls, (i + 1 == nbr));
+ } else {
+ pkts[num_rx++] = pkt;
+ }
+ }
+
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(cls_tbl, num_cls, true);
+
+ odp_atomic_add_u64(&stats->in_octets, octets);
+ odp_atomic_add_u64(&stats->in_packets, packets);
+
+ return num_rx;
+}
- if (pktio_cls_enabled(pktio_entry))
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
+#define OL_TX_CHKSUM_PKT(_cfg, _capa, _proto, _ovr_set, _ovr) \
+ (_capa && _proto && (_ovr_set ? _ovr : _cfg))
+
+static inline int check_proto(void *l3_hdr,
+ uint32_t l3_len,
+ odp_bool_t *l3_proto_v4,
+ uint8_t *l4_proto)
+{
+ uint8_t l3_proto_ver = _ODP_IPV4HDR_VER(*(uint8_t *)l3_hdr);
+
+ if (l3_proto_ver == _ODP_IPV4 && l3_len >= _ODP_IPV4HDR_LEN) {
+ _odp_ipv4hdr_t *ip = l3_hdr;
+ uint16_t frag_offset = odp_be_to_cpu_16(ip->frag_offset);
+
+ *l3_proto_v4 = 1;
+ if (!_ODP_IPV4HDR_IS_FRAGMENT(frag_offset))
+ *l4_proto = ip->proto;
else
- packet_parse_l2(&pkt_hdr->p, pkt_len);
+ *l4_proto = 255;
- packet_set_ts(pkt_hdr, ts);
+ return 0;
+ } else if (l3_proto_ver == _ODP_IPV6 && l3_len >= _ODP_IPV6HDR_LEN) {
+ _odp_ipv6hdr_t *ipv6 = l3_hdr;
- pktio_entry->s.stats.in_octets += pkt_len;
+ *l3_proto_v4 = 0;
+ *l4_proto = ipv6->next_hdr;
- pkts[num_rx++] = pkt;
+ /* FIXME: check that packet is not a fragment !!!
+ * Might require parsing headers spanning several segments, so
+ * not implemented yet. */
+ return 0;
}
- pktio_entry->s.stats.in_errors += failed;
- pktio_entry->s.stats.in_ucast_pkts += num_rx - failed;
+ return -1;
+}
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+static inline void loopback_fix_checksums(odp_packet_t pkt,
+ odp_pktout_config_opt_t *pktout_cfg,
+ odp_pktout_config_opt_t *pktout_capa)
+{
+ odp_bool_t l3_proto_v4 = false;
+ uint8_t l4_proto;
+ void *l3_hdr;
+ uint32_t l3_len;
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt,
+ sctp_chksum_pkt;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+
+ l3_hdr = odp_packet_l3_ptr(pkt, &l3_len);
+
+ if (l3_hdr == NULL ||
+ check_proto(l3_hdr, l3_len, &l3_proto_v4, &l4_proto))
+ return;
+
+ ipv4_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.ipv4_chksum,
+ pktout_capa->bit.ipv4_chksum,
+ l3_proto_v4,
+ pkt_hdr->p.flags.l3_chksum_set,
+ pkt_hdr->p.flags.l3_chksum);
+ udp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.udp_chksum,
+ pktout_capa->bit.udp_chksum,
+ l4_proto == _ODP_IPPROTO_UDP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+ tcp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.tcp_chksum,
+ pktout_capa->bit.tcp_chksum,
+ l4_proto == _ODP_IPPROTO_TCP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+ sctp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.sctp_chksum,
+ pktout_capa->bit.sctp_chksum,
+ l4_proto == _ODP_IPPROTO_SCTP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+
+ if (ipv4_chksum_pkt)
+ _odp_packet_ipv4_chksum_insert(pkt);
+
+ if (tcp_chksum_pkt)
+ _odp_packet_tcp_chksum_insert(pkt);
+
+ if (udp_chksum_pkt)
+ _odp_packet_udp_chksum_insert(pkt);
+
+ if (sctp_chksum_pkt)
+ _odp_packet_sctp_chksum_insert(pkt);
+}
- return num_rx;
+static inline uint8_t *add_data(uint8_t *data, void *src, uint32_t len)
+{
+ return (uint8_t *)memcpy(data, src, len) + len;
}
-static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkt_tbl[], int len)
+static inline odp_queue_t get_dest_queue(const pkt_loop_t *pkt_loop, odp_packet_t pkt, int index)
{
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- queue_entry_t *qentry;
+ const odp_pktin_hash_proto_t *hash = &pkt_loop->hash;
+ _odp_udphdr_t udp;
+ _odp_tcphdr_t tcp;
+ _odp_ipv4hdr_t ipv4;
+ _odp_ipv6hdr_t ipv6;
+ uint32_t off;
+ /* Space for UDP/TCP source and destination ports and IPv4/IPv6 source and destination
+ * addresses. */
+ uint8_t data[2 * sizeof(uint16_t) + 2 * 4 * sizeof(uint32_t)];
+ uint8_t *head = data;
+
+ if (hash->all_bits == 0)
+ return pkt_loop->loopqs[index % pkt_loop->num_qs].queue;
+
+ memset(data, 0, sizeof(data));
+ off = odp_packet_l4_offset(pkt);
+
+ if (off != ODP_PACKET_OFFSET_INVALID) {
+ if ((hash->proto.ipv4_udp || hash->proto.ipv6_udp) && odp_packet_has_udp(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_UDPHDR_LEN, &udp) == 0) {
+ head = add_data(head, &udp.src_port, sizeof(udp.src_port));
+ head = add_data(head, &udp.dst_port, sizeof(udp.dst_port));
+ }
+ } else if ((hash->proto.ipv4_tcp || hash->proto.ipv6_tcp) &&
+ odp_packet_has_tcp(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_TCPHDR_LEN, &tcp) == 0) {
+ head = add_data(head, &tcp.src_port, sizeof(tcp.src_port));
+ head = add_data(head, &tcp.dst_port, sizeof(tcp.dst_port));
+ }
+ }
+ }
+
+ off = odp_packet_l3_offset(pkt);
+
+ if (off != ODP_PACKET_OFFSET_INVALID) {
+ if (hash->proto.ipv4 && odp_packet_has_ipv4(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_IPV4HDR_LEN, &ipv4) == 0) {
+ head = add_data(head, &ipv4.src_addr, sizeof(ipv4.src_addr));
+ head = add_data(head, &ipv4.dst_addr, sizeof(ipv4.dst_addr));
+ }
+ } else if (hash->proto.ipv6 && odp_packet_has_ipv6(pkt)) {
+ if (odp_packet_copy_to_mem(pkt, off, _ODP_IPV6HDR_LEN, &ipv6) == 0) {
+ head = add_data(head, &ipv6.src_addr, sizeof(ipv6.src_addr));
+ head = add_data(head, &ipv6.dst_addr, sizeof(ipv6.dst_addr));
+ }
+ }
+ }
+
+ return pkt_loop->loopqs[odp_hash_crc32c(data, head - data, 0) % pkt_loop->num_qs].queue;
+}
+
+static int loopback_send(pktio_entry_t *pktio_entry, int index, const odp_packet_t pkt_tbl[],
+ int num)
+{
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+ odp_queue_t queue;
+ stats_t *stats;
int i;
int ret;
- uint32_t bytes = 0;
+ int nb_tx = 0;
+ int tx_ts_idx = 0;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
+ odp_pktout_config_opt_t *pktout_cfg = &pktio_entry->config.pktout;
+ odp_pktout_config_opt_t *pktout_capa = &pktio_entry->capa.config.pktout;
- if (odp_unlikely(len > QUEUE_MULTI_MAX))
- len = QUEUE_MULTI_MAX;
+ if (pkt_loop->num_qs == 0)
+ return 0;
- for (i = 0; i < len; ++i) {
- hdr_tbl[i] = buf_hdl_to_hdr(_odp_packet_to_buffer(pkt_tbl[i]));
- bytes += odp_packet_len(pkt_tbl[i]);
- }
+ stats = &pkt_loop->loopqs[index].stats;
- odp_ticketlock_lock(&pktio_entry->s.txl);
+ if (odp_unlikely(num > QUEUE_MULTI_MAX))
+ num = QUEUE_MULTI_MAX;
- qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq);
- ret = queue_enq_multi(qentry, hdr_tbl, len);
+ for (i = 0; i < num; ++i) {
+ uint32_t pkt_len = odp_packet_len(pkt_tbl[i]);
- if (ret > 0) {
- pktio_entry->s.stats.out_ucast_pkts += ret;
- pktio_entry->s.stats.out_octets += bytes;
- } else {
- ODP_DBG("queue enqueue failed %i\n", ret);
- return -1;
+ if (odp_unlikely(pkt_len > pkt_loop->mtu)) {
+ if (nb_tx == 0)
+ return -1;
+ break;
+ }
+
+ if (tx_ts_enabled && tx_ts_idx == 0) {
+ if (odp_unlikely(packet_hdr(pkt_tbl[i])->p.flags.ts_set))
+ tx_ts_idx = i + 1;
+ }
+
+ packet_subtype_set(pkt_tbl[i], ODP_EVENT_PACKET_BASIC);
+ loopback_fix_checksums(pkt_tbl[i], pktout_cfg, pktout_capa);
+ queue = get_dest_queue(pkt_loop, pkt_tbl[i], index);
+ ret = odp_queue_enq(queue, odp_packet_to_event(pkt_tbl[i]));
+
+ if (ret < 0) {
+ _ODP_DBG("queue enqueue failed %i to queue: %" PRIu64 "\n", ret,
+ odp_queue_to_u64(queue));
+ break;
+ }
+
+ nb_tx++;
+ odp_atomic_inc_u64(&stats->out_packets);
+ odp_atomic_add_u64(&stats->out_octets, pkt_len);
}
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ if (nb_tx > 0) {
+ if (odp_unlikely(tx_ts_idx) && nb_tx >= tx_ts_idx)
+ _odp_pktio_tx_ts_set(pktio_entry);
+ }
- return ret;
+ return nb_tx;
}
-static uint32_t loopback_mtu_get(pktio_entry_t *pktio_entry ODP_UNUSED)
+static uint32_t loopback_mtu_get(pktio_entry_t *pktio_entry)
{
- /* the loopback interface imposes no maximum transmit size limit */
- return INT_MAX;
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ return pkt_loop->mtu;
}
-static int loopback_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED,
- void *mac_addr)
+static int loopback_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ pkt_loop->mtu = maxlen_input;
+
+ return 0;
+}
+
+static int loopback_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
memcpy(mac_addr, pktio_loop_mac, ETH_ALEN);
+ ((uint8_t *)mac_addr)[ETH_ALEN - 1] += pkt_priv(pktio_entry)->idx;
return ETH_ALEN;
}
static int loopback_link_status(pktio_entry_t *pktio_entry ODP_UNUSED)
{
/* loopback interfaces are always up */
- return 1;
+ return ODP_PKTIO_LINK_STATUS_UP;
+}
+
+static int loopback_link_info(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_link_info_t *info)
+{
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ info->media = "virtual";
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ info->status = ODP_PKTIO_LINK_STATUS_UP;
+
+ return 0;
}
-static int loopback_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
- odp_pktio_capability_t *capa)
+static int loopback_init_capability(pktio_entry_t *pktio_entry)
{
+ odp_pktio_capability_t *capa = &pktio_entry->capa;
+ odp_queue_capability_t queue_capa;
+
+ if (odp_queue_capability(&queue_capa)) {
+ _ODP_ERR("Queue capability failed\n");
+ return -1;
+ }
+
memset(capa, 0, sizeof(odp_pktio_capability_t));
- capa->max_input_queues = 1;
- capa->max_output_queues = 1;
- capa->set_op.op.promisc_mode = 1;
+ capa->max_input_queues = ODP_PKTIN_MAX_QUEUES;
+ capa->max_output_queues = ODP_PKTOUT_MAX_QUEUES;
+ capa->set_op.op.promisc_mode = 0;
+ capa->set_op.op.maxlen = 1;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = LOOP_MTU_MIN;
+ capa->maxlen.max_input = LOOP_MTU_MAX;
+ capa->maxlen.min_output = LOOP_MTU_MIN;
+ capa->maxlen.max_output = LOOP_MTU_MAX;
+
+ capa->min_input_queue_size = 1;
+ capa->max_input_queue_size = queue_capa.plain.max_size;
+ if (capa->max_input_queue_size == 0)
+ capa->max_input_queue_size = LOOP_MAX_QUEUE_SIZE;
+
+ capa->min_output_queue_size = 1;
+ capa->max_output_queue_size = queue_capa.plain.max_size;
+ if (capa->max_output_queue_size == 0)
+ capa->max_output_queue_size = LOOP_MAX_QUEUE_SIZE;
odp_pktio_config_init(&capa->config);
+ capa->config.enable_loop = 1;
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
+ capa->config.pktin.bit.ipv4_chksum = 1;
+ capa->config.pktin.bit.tcp_chksum = 1;
+ capa->config.pktin.bit.udp_chksum = 1;
+ capa->config.pktin.bit.sctp_chksum = 1;
+ capa->config.pktout.bit.ipv4_chksum = 1;
+ capa->config.pktout.bit.tcp_chksum = 1;
+ capa->config.pktout.bit.udp_chksum = 1;
+ capa->config.pktout.bit.sctp_chksum = 1;
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+
+ if (odp_global_ro.disable.ipsec == 0) {
+ capa->config.inbound_ipsec = 1;
+ capa->config.outbound_ipsec = 1;
+ }
+
+ capa->config.pktout.bit.ipv4_chksum_ena =
+ capa->config.pktout.bit.ipv4_chksum;
+ capa->config.pktout.bit.udp_chksum_ena =
+ capa->config.pktout.bit.udp_chksum;
+ capa->config.pktout.bit.tcp_chksum_ena =
+ capa->config.pktout.bit.tcp_chksum;
+ capa->config.pktout.bit.sctp_chksum_ena =
+ capa->config.pktout.bit.sctp_chksum;
+
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+ capa->stats.pktin_queue.counter.discards = 1;
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
+ return 0;
+}
+
+static int loopback_capability(pktio_entry_t *pktio_entry, odp_pktio_capability_t *capa)
+{
+ *capa = pktio_entry->capa;
return 0;
}
-static int loopback_promisc_mode_set(pktio_entry_t *pktio_entry,
- odp_bool_t enable)
+static int loopback_promisc_mode_get(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return 1;
+}
+
+static int loopback_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
{
- pktio_entry->s.pkt_loop.promisc = enable;
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ memset(stats, 0, sizeof(odp_pktio_stats_t));
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ stats_t *qs = &pkt_loop->loopqs[i].stats;
+
+ stats->in_octets += odp_atomic_load_u64(&qs->in_octets);
+ stats->in_packets += odp_atomic_load_u64(&qs->in_packets);
+ stats->in_discards += odp_atomic_load_u64(&qs->in_discards);
+ stats->in_errors += odp_atomic_load_u64(&qs->in_errors);
+ stats->out_octets += odp_atomic_load_u64(&qs->out_octets);
+ stats->out_packets += odp_atomic_load_u64(&qs->out_packets);
+ }
+
return 0;
}
-static int loopback_promisc_mode_get(pktio_entry_t *pktio_entry)
+static int loopback_stats_reset(pktio_entry_t *pktio_entry)
{
- return pktio_entry->s.pkt_loop.promisc ? 1 : 0;
+ pkt_loop_t *pkt_loop = pkt_priv(pktio_entry);
+
+ for (uint32_t i = 0; i < MAX_QUEUES; i++) {
+ stats_t *qs = &pkt_loop->loopqs[i].stats;
+
+ odp_atomic_store_u64(&qs->in_octets, 0);
+ odp_atomic_store_u64(&qs->in_packets, 0);
+ odp_atomic_store_u64(&qs->in_discards, 0);
+ odp_atomic_store_u64(&qs->in_errors, 0);
+ odp_atomic_store_u64(&qs->out_octets, 0);
+ odp_atomic_store_u64(&qs->out_packets, 0);
+ }
+
+ return 0;
}
-static int loopback_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats)
+static int loopback_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
{
- memcpy(stats, &pktio_entry->s.stats, sizeof(odp_pktio_stats_t));
+ stats_t *qs = &pkt_priv(pktio_entry)->loopqs[index].stats;
+
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+ pktin_stats->octets = odp_atomic_load_u64(&qs->in_octets);
+ pktin_stats->packets = odp_atomic_load_u64(&qs->in_packets);
+ pktin_stats->discards = odp_atomic_load_u64(&qs->in_discards);
+ pktin_stats->errors = odp_atomic_load_u64(&qs->in_errors);
+
return 0;
}
-static int loopback_stats_reset(pktio_entry_t *pktio_entry ODP_UNUSED)
+static int loopback_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
{
- memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t));
+ stats_t *qs = &pkt_priv(pktio_entry)->loopqs[index].stats;
+
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+ pktout_stats->octets = odp_atomic_load_u64(&qs->out_octets);
+ pktout_stats->packets = odp_atomic_load_u64(&qs->out_packets);
+
return 0;
}
static int loop_init_global(void)
{
- ODP_PRINT("PKTIO: initialized loop interface.\n");
+ _ODP_PRINT("PKTIO: initialized loop interface.\n");
return 0;
}
-const pktio_if_ops_t loopback_pktio_ops = {
+const pktio_if_ops_t _odp_loopback_pktio_ops = {
.name = "loop",
.print = NULL,
.init_global = loop_init_global,
@@ -257,21 +797,27 @@ const pktio_if_ops_t loopback_pktio_ops = {
.term = NULL,
.open = loopback_open,
.close = loopback_close,
- .start = NULL,
+ .start = loopback_start,
.stop = NULL,
.stats = loopback_stats,
.stats_reset = loopback_stats_reset,
+ .pktin_queue_stats = loopback_pktin_stats,
+ .pktout_queue_stats = loopback_pktout_stats,
.recv = loopback_recv,
.send = loopback_send,
- .mtu_get = loopback_mtu_get,
- .promisc_mode_set = loopback_promisc_mode_set,
+ .maxlen_get = loopback_mtu_get,
+ .maxlen_set = loopback_mtu_set,
+ .promisc_mode_set = NULL,
.promisc_mode_get = loopback_promisc_mode_get,
.mac_get = loopback_mac_addr_get,
+ .mac_set = NULL,
.link_status = loopback_link_status,
+ .link_info = loopback_link_info,
.capability = loopback_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL,
- .input_queues_config = NULL,
- .output_queues_config = NULL,
+ .input_queues_config = loopback_pktin_queue_config,
+ .output_queues_config = loopback_pktout_queue_config,
};
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
deleted file mode 100644
index ae3db34d9..000000000
--- a/platform/linux-generic/pktio/netmap.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifdef ODP_NETMAP
-
-#include <odp_posix_extensions.h>
-
-#include <odp/api/plat/packet_inlines.h>
-#include <odp/api/packet.h>
-
-#include <odp_packet_io_internal.h>
-#include <odp_packet_netmap.h>
-#include <odp_packet_socket.h>
-#include <odp_debug_internal.h>
-#include <protocols/eth.h>
-
-#include <sys/ioctl.h>
-#include <poll.h>
-#include <linux/ethtool.h>
-#include <linux/sockios.h>
-#include <odp_classification_datamodel.h>
-#include <odp_classification_inlines.h>
-#include <odp_classification_internal.h>
-
-#include <inttypes.h>
-
-/* Disable netmap debug prints */
-#ifndef ND
-#define ND(_fmt, ...) do {} while (0)
-#define D(_fmt, ...) do {} while (0)
-#define RD(lps, format, ...) do {} while (0)
-#endif
-
-#define NETMAP_WITH_LIBS
-#include <net/netmap_user.h>
-
-#define NM_WAIT_TIMEOUT 10 /* netmap_wait_for_link() timeout in seconds */
-#define NM_INJECT_RETRIES 10
-
-static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
-static int netmap_stats_reset(pktio_entry_t *pktio_entry);
-
-static int netmap_do_ioctl(pktio_entry_t *pktio_entry, unsigned long cmd,
- int subcmd)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- struct ethtool_value eval;
- struct ifreq ifr;
- int err;
- int fd = pkt_nm->sockfd;
-
- memset(&ifr, 0, sizeof(ifr));
- snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s",
- pktio_entry->s.pkt_nm.if_name);
-
- switch (cmd) {
- case SIOCSIFFLAGS:
- ifr.ifr_flags = pkt_nm->if_flags & 0xffff;
- break;
- case SIOCETHTOOL:
- eval.cmd = subcmd;
- eval.data = 0;
- ifr.ifr_data = (caddr_t)&eval;
- break;
- default:
- break;
- }
- err = ioctl(fd, cmd, &ifr);
- if (err)
- goto done;
-
- switch (cmd) {
- case SIOCGIFFLAGS:
- pkt_nm->if_flags = (ifr.ifr_flags << 16) |
- (0xffff & ifr.ifr_flags);
- break;
- case SIOCETHTOOL:
- if (subcmd == ETHTOOL_GLINK)
- return eval.data;
- break;
- default:
- break;
- }
-done:
- if (err)
- ODP_ERR("ioctl err %d %lu: %s\n", err, cmd, strerror(errno));
-
- return err;
-}
-
-/**
- * Map netmap rings to pktin/pktout queues
- *
- * @param rings Array of netmap descriptor rings
- * @param num_queues Number of pktin/pktout queues
- * @param num_rings Number of matching netmap rings
- */
-static inline void map_netmap_rings(netmap_ring_t *rings,
- unsigned num_queues, unsigned num_rings)
-{
- struct netmap_ring_t *desc_ring;
- unsigned rings_per_queue;
- unsigned remainder;
- unsigned mapped_rings;
- unsigned i;
- unsigned desc_id = 0;
-
- rings_per_queue = num_rings / num_queues;
- remainder = num_rings % num_queues;
-
- if (remainder)
- ODP_DBG("WARNING: Netmap rings mapped unevenly to queues\n");
-
- for (i = 0; i < num_queues; i++) {
- desc_ring = &rings[i].s;
- if (i < remainder)
- mapped_rings = rings_per_queue + 1;
- else
- mapped_rings = rings_per_queue;
-
- desc_ring->first = desc_id;
- desc_ring->cur = desc_id;
- desc_ring->last = desc_ring->first + mapped_rings - 1;
- desc_ring->num = mapped_rings;
-
- desc_id = desc_ring->last + 1;
- }
-}
-
-static int netmap_input_queues_config(pktio_entry_t *pktio_entry,
- const odp_pktin_queue_param_t *p)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
- unsigned num_queues = p->num_queues;
- odp_bool_t lockless;
-
- /* Scheduler synchronizes input queue polls. Only single thread
- * at a time polls a queue */
- if (mode == ODP_PKTIN_MODE_SCHED)
- lockless = 1;
- else
- lockless = (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE);
-
- if (p->hash_enable && num_queues > 1) {
- if (rss_conf_set_fd(pktio_entry->s.pkt_nm.sockfd,
- pktio_entry->s.pkt_nm.if_name,
- &p->hash_proto)) {
- ODP_ERR("Failed to configure input hash\n");
- return -1;
- }
- }
-
- pkt_nm->lockless_rx = lockless;
-
- return 0;
-}
-
-static int netmap_output_queues_config(pktio_entry_t *pktio_entry,
- const odp_pktout_queue_param_t *p)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
-
- pkt_nm->lockless_tx = (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE);
-
- return 0;
-}
-
-/**
- * Close netmap descriptors
- *
- * Can be reopened using netmap_start() function.
- *
- * @param pktio_entry Packet IO entry
- */
-static inline void netmap_close_descriptors(pktio_entry_t *pktio_entry)
-{
- int i, j;
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
-
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
- for (j = 0; j < NM_MAX_DESC; j++) {
- if (pkt_nm->rx_desc_ring[i].s.desc[j] != NULL) {
- nm_close(pkt_nm->rx_desc_ring[i].s.desc[j]);
- pkt_nm->rx_desc_ring[i].s.desc[j] = NULL;
- }
- }
- for (j = 0; j < NM_MAX_DESC; j++) {
- if (pkt_nm->tx_desc_ring[i].s.desc[j] != NULL) {
- nm_close(pkt_nm->tx_desc_ring[i].s.desc[j]);
- pkt_nm->tx_desc_ring[i].s.desc[j] = NULL;
- }
- }
- }
-
- pkt_nm->num_rx_desc_rings = 0;
- pkt_nm->num_tx_desc_rings = 0;
-}
-
-static int netmap_close(pktio_entry_t *pktio_entry)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
-
- netmap_close_descriptors(pktio_entry);
-
- if (pkt_nm->sockfd != -1 && close(pkt_nm->sockfd) != 0) {
- __odp_errno = errno;
- ODP_ERR("close(sockfd): %s\n", strerror(errno));
- return -1;
- }
- return 0;
-}
-
-static int netmap_link_status(pktio_entry_t *pktio_entry)
-{
- if (pktio_entry->s.pkt_nm.is_virtual)
- return 1;
-
- return link_status_fd(pktio_entry->s.pkt_nm.sockfd,
- pktio_entry->s.pkt_nm.if_name);
-}
-
-/**
- * Wait for netmap link to come up
- *
- * @param pktio_entry Packet IO entry
- *
- * @retval 1 link is up
- * @retval 0 link is down
- * @retval <0 on failure
- */
-static inline int netmap_wait_for_link(pktio_entry_t *pktio_entry)
-{
- int i;
- int ret;
-
- /* Wait for the link to come up */
- for (i = 0; i <= NM_WAIT_TIMEOUT; i++) {
- ret = netmap_link_status(pktio_entry);
- if (ret == -1)
- return -1;
- /* nm_open() causes the physical link to reset. When using a
- * direct attached loopback cable there may be a small delay
- * until the opposing end's interface comes back up again. In
- * this case without the additional sleep pktio validation
- * tests fail. */
- if (!pktio_entry->s.pkt_nm.is_virtual)
- sleep(1);
- if (ret == 1)
- return 1;
- }
- ODP_DBG("%s link is down\n", pktio_entry->s.pkt_nm.if_name);
- return 0;
-}
-
-/**
- * Initialize netmap capability values
- *
- * @param pktio_entry Packet IO entry
- */
-static void netmap_init_capability(pktio_entry_t *pktio_entry)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- odp_pktio_capability_t *capa = &pkt_nm->capa;
-
- memset(&pkt_nm->capa, 0, sizeof(odp_pktio_capability_t));
-
- capa->max_input_queues = PKTIO_MAX_QUEUES;
- if (pkt_nm->num_rx_rings < PKTIO_MAX_QUEUES)
- capa->max_input_queues = pkt_nm->num_rx_rings;
- if (capa->max_input_queues > NM_MAX_DESC) {
- /* Have to use a single descriptor to fetch packets from all
- * netmap rings */
- capa->max_input_queues = 1;
- ODP_DBG("Unable to store all %" PRIu32 " rx rings (max %d)\n"
- " max input queues: %u\n", pkt_nm->num_rx_rings,
- NM_MAX_DESC, capa->max_input_queues);
- }
-
- capa->max_output_queues = PKTIO_MAX_QUEUES;
- if (pkt_nm->num_tx_rings < PKTIO_MAX_QUEUES)
- capa->max_output_queues = pkt_nm->num_tx_rings;
- if (capa->max_output_queues > NM_MAX_DESC) {
- capa->max_output_queues = NM_MAX_DESC;
- ODP_DBG("Unable to store all %" PRIu32 " tx rings (max %d)\n"
- " max output queues: %u\n", pkt_nm->num_tx_rings,
- NM_MAX_DESC, capa->max_output_queues);
- }
-
- capa->set_op.op.promisc_mode = 1;
-
- odp_pktio_config_init(&capa->config);
- capa->config.pktin.bit.ts_all = 1;
- capa->config.pktin.bit.ts_ptp = 1;
-}
-
-/**
- * Open a netmap interface
- *
- * In addition to standard interfaces (with or without modified netmap drivers)
- * virtual VALE and pipe interfaces are also supported. These can be used for
- * example for testing packet IO functionality without any physical interfaces.
- *
- * To use virtual interfaces the 'netdev' device name has to begin with 'vale'
- * prefix. A valid VALE device name would be e.g. 'vale0'. Pipe device names
- * have to include also '{NN' (master) or '}NN' (slave) suffix. A valid pipe
- * master would be e.g. 'vale0{0' and a slave to the same pipe 'vale0}0'.
- *
- * Netmap requires standard interface names to begin with 'netmap:' prefix.
- * netmap_open() adds the prefix if it is missing. Virtual interfaces don't
- * require the 'netmap:' prefix.
- *
- * @param id Packet IO handle
- * @param pktio_entry Packet IO entry
- * @param netdev Packet IO device name
- * @param pool Default pool from which to allocate storage for packets
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
- const char *netdev, odp_pool_t pool)
-{
- int i;
- int err;
- int sockfd;
- const char *prefix;
- uint32_t mtu;
- uint32_t buf_size;
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- struct nm_desc *desc;
- struct netmap_ring *ring;
- odp_pktin_hash_proto_t hash_proto;
- odp_pktio_stats_t cur_stats;
-
- if (disable_pktio)
- return -1;
-
- if (pool == ODP_POOL_INVALID)
- return -1;
-
- /* Init pktio entry */
- memset(pkt_nm, 0, sizeof(*pkt_nm));
- pkt_nm->sockfd = -1;
- pkt_nm->pool = pool;
-
- /* max frame len taking into account the l2-offset */
- pkt_nm->max_frame_len = CONFIG_PACKET_MAX_SEG_LEN;
-
- /* allow interface to be opened with or without the 'netmap:' prefix */
- prefix = "netmap:";
- if (strncmp(netdev, "netmap:", 7) == 0)
- netdev += 7;
- if (strncmp(netdev, "vale", 4) == 0) {
- pkt_nm->is_virtual = 1;
- prefix = "";
- }
-
- snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "%s%s", prefix,
- netdev);
- snprintf(pkt_nm->if_name, sizeof(pkt_nm->if_name), "%s", netdev);
-
- /* Dummy open here to check if netmap module is available and to read
- * capability info. */
- desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
- if (desc == NULL) {
- ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
- goto error;
- }
- pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
- pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
-
- netmap_init_capability(pktio_entry);
-
- ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
- buf_size = ring->nr_buf_size;
- nm_close(desc);
-
- for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
- odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
- odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
- }
-
- if (pkt_nm->is_virtual) {
- static unsigned mac;
-
- pkt_nm->capa.max_input_queues = 1;
- pkt_nm->capa.set_op.op.promisc_mode = 0;
- pkt_nm->mtu = buf_size;
- pktio_entry->s.stats_type = STATS_UNSUPPORTED;
- /* Set MAC address for virtual interface */
- pkt_nm->if_mac[0] = 0x2;
- pkt_nm->if_mac[5] = ++mac;
-
- return 0;
- }
-
- sockfd = socket(AF_INET, SOCK_DGRAM, 0);
- if (sockfd == -1) {
- ODP_ERR("Cannot get device control socket\n");
- goto error;
- }
- pkt_nm->sockfd = sockfd;
-
- /* Use either interface MTU (+ ethernet header length) or netmap buffer
- * size as MTU, whichever is smaller. */
- mtu = mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pkt_nm->if_name);
- if (mtu == 0) {
- ODP_ERR("Unable to read interface MTU\n");
- goto error;
- }
- mtu += _ODP_ETHHDR_LEN;
- pkt_nm->mtu = (mtu < buf_size) ? mtu : buf_size;
-
- /* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
- if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
- ODP_DBG("RSS not supported\n");
- pkt_nm->capa.max_input_queues = 1;
- }
-
- err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
- if (err)
- goto error;
- if ((pkt_nm->if_flags & IFF_UP) == 0)
- ODP_DBG("%s is down\n", pkt_nm->if_name);
-
- err = mac_addr_get_fd(sockfd, netdev, pkt_nm->if_mac);
- if (err)
- goto error;
-
- /* netmap uses only ethtool to get statistics counters */
- err = ethtool_stats_get_fd(pktio_entry->s.pkt_nm.sockfd,
- pkt_nm->if_name, &cur_stats);
- if (err) {
- ODP_ERR("netmap pktio %s does not support statistics counters\n",
- pkt_nm->if_name);
- pktio_entry->s.stats_type = STATS_UNSUPPORTED;
- } else {
- pktio_entry->s.stats_type = STATS_ETHTOOL;
- }
-
- (void)netmap_stats_reset(pktio_entry);
-
- return 0;
-
-error:
- netmap_close(pktio_entry);
- return -1;
-}
-
-static int netmap_start(pktio_entry_t *pktio_entry)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- netmap_ring_t *desc_ring;
- struct nm_desc *desc_ptr;
- unsigned i;
- unsigned j;
- unsigned num_rx_desc = 0;
- uint64_t flags;
- odp_pktin_mode_t in_mode = pktio_entry->s.param.in_mode;
- odp_pktout_mode_t out_mode = pktio_entry->s.param.out_mode;
-
- /* If no pktin/pktout queues have been configured. Configure one
- * for each direction. */
- if (!pktio_entry->s.num_in_queue &&
- in_mode != ODP_PKTIN_MODE_DISABLED) {
- odp_pktin_queue_param_t param;
-
- odp_pktin_queue_param_init(&param);
- param.num_queues = 1;
- if (odp_pktin_queue_config(pktio_entry->s.handle, &param))
- return -1;
- }
- if (!pktio_entry->s.num_out_queue &&
- out_mode == ODP_PKTOUT_MODE_DIRECT) {
- odp_pktout_queue_param_t param;
-
- odp_pktout_queue_param_init(&param);
- param.num_queues = 1;
- if (odp_pktout_queue_config(pktio_entry->s.handle, &param))
- return -1;
- }
-
- if (pkt_nm->num_rx_desc_rings == pktio_entry->s.num_in_queue &&
- pkt_nm->num_tx_desc_rings == pktio_entry->s.num_out_queue)
- return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;
-
- netmap_close_descriptors(pktio_entry);
-
- /* Map pktin/pktout queues to netmap rings */
- if (pktio_entry->s.num_in_queue) {
- /* In single queue case only one netmap descriptor is
- * required. */
- num_rx_desc = (pktio_entry->s.num_in_queue == 1) ? 1 :
- pkt_nm->num_rx_rings;
-
- map_netmap_rings(pkt_nm->rx_desc_ring,
- pktio_entry->s.num_in_queue, num_rx_desc);
- }
- if (pktio_entry->s.num_out_queue)
- /* Enough to map only one netmap tx ring per pktout queue */
- map_netmap_rings(pkt_nm->tx_desc_ring,
- pktio_entry->s.num_out_queue,
- pktio_entry->s.num_out_queue);
-
- /* Use nm_open() to parse netmap flags from interface name */
- desc_ptr = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
- if (desc_ptr == NULL) {
- ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
- goto error;
- }
- struct nm_desc base_desc = *desc_ptr;
-
- nm_close(desc_ptr);
-
- base_desc.self = &base_desc;
- base_desc.mem = NULL;
- base_desc.req.nr_ringid = 0;
- if ((base_desc.req.nr_flags & NR_REG_MASK) == NR_REG_ALL_NIC ||
- (base_desc.req.nr_flags & NR_REG_MASK) == NR_REG_ONE_NIC) {
- base_desc.req.nr_flags &= ~NR_REG_MASK;
- if (num_rx_desc == 1)
- base_desc.req.nr_flags |= NR_REG_ALL_NIC;
- else
- base_desc.req.nr_flags |= NR_REG_ONE_NIC;
- }
-
- /* Only the first rx descriptor does mmap */
- desc_ring = pkt_nm->rx_desc_ring;
- flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL;
- desc_ring[0].s.desc[0] = nm_open(pkt_nm->nm_name, NULL, flags,
- &base_desc);
- if (desc_ring[0].s.desc[0] == NULL) {
- ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
- goto error;
- }
- /* Open rest of the rx descriptors (one per netmap ring) */
- flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL | NM_OPEN_NO_MMAP;
- for (i = 0; i < pktio_entry->s.num_in_queue; i++) {
- for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
- if (i == 0 && j == 0) { /* First already opened */
- if (num_rx_desc > 1)
- continue;
- else
- break;
- }
- base_desc.req.nr_ringid = j;
- desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
- flags, &base_desc);
- if (desc_ring[i].s.desc[j] == NULL) {
- ODP_ERR("nm_start(%s) failed\n",
- pkt_nm->nm_name);
- goto error;
- }
- }
- }
- /* Open tx descriptors */
- desc_ring = pkt_nm->tx_desc_ring;
- flags = NM_OPEN_IFNAME | NM_OPEN_NO_MMAP;
-
- if ((base_desc.req.nr_flags & NR_REG_MASK) == NR_REG_ALL_NIC) {
- base_desc.req.nr_flags &= ~NR_REG_ALL_NIC;
- base_desc.req.nr_flags |= NR_REG_ONE_NIC;
- }
-
- for (i = 0; i < pktio_entry->s.num_out_queue; i++) {
- for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
- base_desc.req.nr_ringid = j;
- desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
- flags, &base_desc);
- if (desc_ring[i].s.desc[j] == NULL) {
- ODP_ERR("nm_start(%s) failed\n",
- pkt_nm->nm_name);
- goto error;
- }
- }
- }
- pkt_nm->num_rx_desc_rings = pktio_entry->s.num_in_queue;
- pkt_nm->num_tx_desc_rings = pktio_entry->s.num_out_queue;
- /* Wait for the link to come up */
- return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;
-
-error:
- netmap_close_descriptors(pktio_entry);
- return -1;
-}
-
-static int netmap_stop(pktio_entry_t *pktio_entry ODP_UNUSED)
-{
- return 0;
-}
-
-/**
- * Create ODP packets from netmap packets
- *
- * @param pktio_entry Packet IO entry
- * @param pkt_tbl Array for new ODP packet handles
- * @param slot_tbl Array of netmap ring slots
- * @param slot_num Number of netmap ring slots
- * @param ts Pointer to pktin timestamp
- *
- * @retval Number of created packets
- */
-static inline int netmap_pkt_to_odp(pktio_entry_t *pktio_entry,
- odp_packet_t pkt_tbl[],
- netmap_slot_t slot_tbl[], int16_t slot_num,
- odp_time_t *ts)
-{
- odp_packet_t pkt;
- odp_pool_t pool = pktio_entry->s.pkt_nm.pool;
- odp_packet_hdr_t *pkt_hdr;
- odp_packet_hdr_t parsed_hdr;
- int i;
- int num;
- int alloc_len;
-
- /* Allocate maximum sized packets */
- alloc_len = pktio_entry->s.pkt_nm.mtu;
-
- num = packet_alloc_multi(pool, alloc_len, pkt_tbl, slot_num);
-
- for (i = 0; i < num; i++) {
- netmap_slot_t slot;
- uint16_t len;
-
- slot = slot_tbl[i];
- len = slot.len;
-
- odp_prefetch(slot.buf);
-
- if (odp_unlikely(len > pktio_entry->s.pkt_nm.max_frame_len)) {
- ODP_ERR("RX: frame too big %" PRIu16 " %zu!\n", len,
- pktio_entry->s.pkt_nm.max_frame_len);
- goto fail;
- }
-
- if (odp_unlikely(len < _ODP_ETH_LEN_MIN)) {
- ODP_ERR("RX: Frame truncated: %" PRIu16 "\n", len);
- goto fail;
- }
-
- if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry,
- (const uint8_t *)slot.buf, len,
- len, &pool, &parsed_hdr))
- goto fail;
- }
-
- pkt = pkt_tbl[i];
- pkt_hdr = odp_packet_hdr(pkt);
- pull_tail(pkt_hdr, alloc_len - len);
-
- /* For now copy the data in the mbuf,
- worry about zero-copy later */
- if (odp_packet_copy_from_mem(pkt, 0, len, slot.buf) != 0)
- goto fail;
-
- pkt_hdr->input = pktio_entry->s.handle;
-
- if (pktio_cls_enabled(pktio_entry))
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
- else
- packet_parse_l2(&pkt_hdr->p, len);
-
- packet_set_ts(pkt_hdr, ts);
- }
-
- return i;
-
-fail:
- odp_packet_free_multi(&pkt_tbl[i], num - i);
- return i;
-}
-
-static inline int netmap_recv_desc(pktio_entry_t *pktio_entry,
- struct nm_desc *desc,
- odp_packet_t pkt_table[], int num)
-{
- struct netmap_ring *ring;
- odp_time_t ts_val;
- odp_time_t *ts = NULL;
- netmap_slot_t slot_tbl[num];
- char *buf;
- uint32_t slot_id;
- int i;
- int ring_id = desc->cur_rx_ring;
- int num_rx = 0;
- int num_rings = desc->last_rx_ring - desc->first_rx_ring + 1;
-
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp)
- ts = &ts_val;
-
- for (i = 0; i < num_rings && num_rx != num; i++) {
- if (ring_id > desc->last_rx_ring)
- ring_id = desc->first_rx_ring;
-
- ring = NETMAP_RXRING(desc->nifp, ring_id);
-
- while (!nm_ring_empty(ring) && num_rx != num) {
- slot_id = ring->cur;
- buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
-
- slot_tbl[num_rx].buf = buf;
- slot_tbl[num_rx].len = ring->slot[slot_id].len;
- num_rx++;
-
- ring->cur = nm_ring_next(ring, slot_id);
- }
- ring->head = ring->cur;
- ring_id++;
- }
- desc->cur_rx_ring = ring_id;
-
- if (num_rx) {
- if (ts != NULL)
- ts_val = odp_time_global();
- return netmap_pkt_to_odp(pktio_entry, pkt_table, slot_tbl,
- num_rx, ts);
- }
- return 0;
-}
-
-static int netmap_recv(pktio_entry_t *pktio_entry, int index,
- odp_packet_t pkt_table[], int num)
-{
- struct nm_desc *desc;
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- unsigned first_desc_id = pkt_nm->rx_desc_ring[index].s.first;
- unsigned last_desc_id = pkt_nm->rx_desc_ring[index].s.last;
- unsigned desc_id;
- int num_desc = pkt_nm->rx_desc_ring[index].s.num;
- int i;
- int num_rx = 0;
- int max_fd = 0;
- fd_set empty_rings;
-
- if (odp_unlikely(pktio_entry->s.state != PKTIO_STATE_STARTED))
- return 0;
-
- FD_ZERO(&empty_rings);
-
- if (!pkt_nm->lockless_rx)
- odp_ticketlock_lock(&pkt_nm->rx_desc_ring[index].s.lock);
-
- desc_id = pkt_nm->rx_desc_ring[index].s.cur;
-
- for (i = 0; i < num_desc && num_rx != num; i++) {
- if (desc_id > last_desc_id)
- desc_id = first_desc_id;
-
- desc = pkt_nm->rx_desc_ring[index].s.desc[desc_id];
-
- num_rx += netmap_recv_desc(pktio_entry, desc,
- &pkt_table[num_rx], num - num_rx);
-
- if (num_rx != num) {
- FD_SET(desc->fd, &empty_rings);
- if (desc->fd > max_fd)
- max_fd = desc->fd;
- }
- desc_id++;
- }
- pkt_nm->rx_desc_ring[index].s.cur = desc_id;
-
- if (num_rx != num) {
- struct timeval tout = {.tv_sec = 0, .tv_usec = 0};
-
- if (select(max_fd + 1, &empty_rings, NULL, NULL, &tout) == -1)
- ODP_ERR("RX: select error\n");
- }
- if (!pkt_nm->lockless_rx)
- odp_ticketlock_unlock(&pkt_nm->rx_desc_ring[index].s.lock);
-
- return num_rx;
-}
-
-static int netmap_send(pktio_entry_t *pktio_entry, int index,
- const odp_packet_t pkt_table[], int num)
-{
- pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
- struct pollfd polld;
- struct nm_desc *desc;
- struct netmap_ring *ring;
- int i;
- int nb_tx;
- int desc_id;
- odp_packet_t pkt;
- uint32_t pkt_len;
- unsigned slot_id;
- char *buf;
-
- if (odp_unlikely(pktio_entry->s.state != PKTIO_STATE_STARTED))
- return 0;
-
- /* Only one netmap tx ring per pktout queue */
- desc_id = pkt_nm->tx_desc_ring[index].s.cur;
- desc = pkt_nm->tx_desc_ring[index].s.desc[desc_id];
- ring = NETMAP_TXRING(desc->nifp, desc->cur_tx_ring);
-
- if (!pkt_nm->lockless_tx)
- odp_ticketlock_lock(&pkt_nm->tx_desc_ring[index].s.lock);
-
- polld.fd = desc->fd;
- polld.events = POLLOUT;
-
- for (nb_tx = 0; nb_tx < num; nb_tx++) {
- pkt = pkt_table[nb_tx];
- pkt_len = _odp_packet_len(pkt);
-
- if (pkt_len > pkt_nm->mtu) {
- if (nb_tx == 0)
- __odp_errno = EMSGSIZE;
- break;
- }
- for (i = 0; i < NM_INJECT_RETRIES; i++) {
- if (nm_ring_empty(ring)) {
- poll(&polld, 1, 0);
- continue;
- }
- slot_id = ring->cur;
- ring->slot[slot_id].flags = 0;
- ring->slot[slot_id].len = pkt_len;
-
- buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
-
- if (odp_packet_copy_to_mem(pkt, 0, pkt_len, buf)) {
- i = NM_INJECT_RETRIES;
- break;
- }
- ring->cur = nm_ring_next(ring, slot_id);
- ring->head = ring->cur;
- break;
- }
- if (i == NM_INJECT_RETRIES)
- break;
- }
- /* Send pending packets */
- poll(&polld, 1, 0);
-
- if (!pkt_nm->lockless_tx)
- odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
-
- if (odp_unlikely(nb_tx == 0)) {
- if (__odp_errno != 0)
- return -1;
- } else {
- odp_packet_free_multi(pkt_table, nb_tx);
- }
-
- return nb_tx;
-}
-
-static int netmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
-{
- memcpy(mac_addr, pktio_entry->s.pkt_nm.if_mac, ETH_ALEN);
- return ETH_ALEN;
-}
-
-static uint32_t netmap_mtu_get(pktio_entry_t *pktio_entry)
-{
- return pktio_entry->s.pkt_nm.mtu;
-}
-
-static int netmap_promisc_mode_set(pktio_entry_t *pktio_entry,
- odp_bool_t enable)
-{
- if (pktio_entry->s.pkt_nm.is_virtual) {
- __odp_errno = ENOTSUP;
- return -1;
- }
-
- return promisc_mode_set_fd(pktio_entry->s.pkt_nm.sockfd,
- pktio_entry->s.pkt_nm.if_name, enable);
-}
-
-static int netmap_promisc_mode_get(pktio_entry_t *pktio_entry)
-{
- if (pktio_entry->s.pkt_nm.is_virtual)
- return 0;
-
- return promisc_mode_get_fd(pktio_entry->s.pkt_nm.sockfd,
- pktio_entry->s.pkt_nm.if_name);
-}
-
-static int netmap_capability(pktio_entry_t *pktio_entry,
- odp_pktio_capability_t *capa)
-{
- *capa = pktio_entry->s.pkt_nm.capa;
- return 0;
-}
-
-static int netmap_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats)
-{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(stats, 0, sizeof(*stats));
- return 0;
- }
-
- return sock_stats_fd(pktio_entry,
- stats,
- pktio_entry->s.pkt_nm.sockfd);
-}
-
-static int netmap_stats_reset(pktio_entry_t *pktio_entry)
-{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(&pktio_entry->s.stats, 0,
- sizeof(odp_pktio_stats_t));
- return 0;
- }
-
- return sock_stats_reset_fd(pktio_entry,
- pktio_entry->s.pkt_nm.sockfd);
-}
-
-static void netmap_print(pktio_entry_t *pktio_entry)
-{
- odp_pktin_hash_proto_t hash_proto;
-
- if (rss_conf_get_fd(pktio_entry->s.pkt_nm.sockfd,
- pktio_entry->s.pkt_nm.if_name, &hash_proto))
- rss_conf_print(&hash_proto);
-}
-
-static int netmap_init_global(void)
-{
- if (getenv("ODP_PKTIO_DISABLE_NETMAP")) {
- ODP_PRINT("PKTIO: netmap pktio skipped,"
- " enabled export ODP_PKTIO_DISABLE_NETMAP=1.\n");
- disable_pktio = 1;
- } else {
- ODP_PRINT("PKTIO: initialized netmap pktio,"
- " use export ODP_PKTIO_DISABLE_NETMAP=1 to disable.\n"
- " Netmap prefixes are netmap:eth0 or vale:eth0. Refer to"
- " Netmap documentation for usage information.\n");
- }
- return 0;
-}
-
-const pktio_if_ops_t netmap_pktio_ops = {
- .name = "netmap",
- .print = netmap_print,
- .init_global = netmap_init_global,
- .init_local = NULL,
- .term = NULL,
- .open = netmap_open,
- .close = netmap_close,
- .start = netmap_start,
- .stop = netmap_stop,
- .link_status = netmap_link_status,
- .stats = netmap_stats,
- .stats_reset = netmap_stats_reset,
- .mtu_get = netmap_mtu_get,
- .promisc_mode_set = netmap_promisc_mode_set,
- .promisc_mode_get = netmap_promisc_mode_get,
- .mac_get = netmap_mac_addr_get,
- .capability = netmap_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
- .config = NULL,
- .input_queues_config = netmap_input_queues_config,
- .output_queues_config = netmap_output_queues_config,
- .recv = netmap_recv,
- .send = netmap_send
-};
-
-#endif /* ODP_NETMAP */
diff --git a/platform/linux-generic/pktio/null.c b/platform/linux-generic/pktio/null.c
new file mode 100644
index 000000000..00c45f84a
--- /dev/null
+++ b/platform/linux-generic/pktio/null.c
@@ -0,0 +1,216 @@
+/* Copyright (c) 2018-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+
+#include <odp_packet_io_internal.h>
+
+#include <stdint.h>
+
+static int null_close(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return 0;
+}
+
+static int null_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry ODP_UNUSED,
+ const char *devname, odp_pool_t pool ODP_UNUSED)
+{
+ if (strncmp(devname, "null:", 5) != 0)
+ return -1;
+
+ return 0;
+}
+
+static int null_recv(pktio_entry_t *pktio_entry ODP_UNUSED,
+ int index ODP_UNUSED, odp_packet_t pkt_table[] ODP_UNUSED,
+ int len ODP_UNUSED)
+{
+ return 0;
+}
+
+static int null_fd_set(pktio_entry_t *pktio_entry ODP_UNUSED,
+ int index ODP_UNUSED, fd_set *readfds ODP_UNUSED)
+{
+ return 0;
+}
+
+static int null_recv_tmo(pktio_entry_t *pktio_entry ODP_UNUSED,
+ int index ODP_UNUSED,
+ odp_packet_t pkt_table[] ODP_UNUSED,
+ int num ODP_UNUSED, uint64_t usecs)
+{
+ struct timeval timeout;
+ int maxfd = -1;
+ fd_set readfds;
+
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+ FD_ZERO(&readfds);
+
+ select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+
+ return 0;
+}
+
+static int null_recv_mq_tmo(pktio_entry_t *pktio_entry[] ODP_UNUSED,
+ int index[] ODP_UNUSED, uint32_t num_q ODP_UNUSED,
+ odp_packet_t pkt_table[] ODP_UNUSED,
+ int num ODP_UNUSED, uint32_t *from ODP_UNUSED,
+ uint64_t usecs)
+{
+ struct timeval timeout;
+ int maxfd = -1;
+ fd_set readfds;
+
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+
+ FD_ZERO(&readfds);
+
+ select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+
+ return 0;
+}
+
+static int null_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
+ const odp_packet_t pkt_table[], int num)
+{
+ odp_bool_t set_tx_ts = false;
+
+ if (_odp_pktio_tx_ts_enabled(pktio_entry)) {
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (odp_unlikely(packet_hdr(pkt_table[i])->p.flags.ts_set)) {
+ set_tx_ts = true;
+ break;
+ }
+ }
+ }
+
+ odp_packet_free_multi(pkt_table, num);
+
+ if (odp_unlikely(set_tx_ts))
+ _odp_pktio_tx_ts_set(pktio_entry);
+
+ return num;
+}
+
+#define PKTIO_NULL_MTU (64 * 1024)
+
+static uint32_t null_mtu_get(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return PKTIO_NULL_MTU;
+}
+
+static const uint8_t null_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x05};
+
+static int null_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED,
+ void *mac_addr)
+{
+ memcpy(mac_addr, null_mac, ETH_ALEN);
+ return ETH_ALEN;
+}
+
+static int null_promisc_mode_get(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ /* Promisc mode disabled. Mode does not matter, as packet input does not
+ * return any packets.*/
+ return 0;
+}
+
+static int null_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
+ odp_pktio_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+
+ capa->max_input_queues = ODP_PKTIN_MAX_QUEUES;
+ capa->max_output_queues = ODP_PKTOUT_MAX_QUEUES;
+ capa->set_op.op.promisc_mode = 0;
+
+ odp_pktio_config_init(&capa->config);
+ capa->config.pktin.bit.ts_all = 1;
+ capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+
+ return 0;
+}
+
+static int null_inqueues_config(pktio_entry_t *pktio_entry ODP_UNUSED,
+ const odp_pktin_queue_param_t *p ODP_UNUSED)
+{
+ return 0;
+}
+
+static int null_outqueues_config(pktio_entry_t *pktio_entry ODP_UNUSED,
+ const odp_pktout_queue_param_t *p ODP_UNUSED)
+{
+ return 0;
+}
+
+static int null_init_global(void)
+{
+ _ODP_PRINT("PKTIO: initialized null interface.\n");
+ return 0;
+}
+
+static int null_link_status(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return ODP_PKTIO_LINK_STATUS_UP;
+}
+
+static int null_link_info(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_link_info_t *info)
+{
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ info->media = "virtual";
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ info->status = ODP_PKTIO_LINK_STATUS_UP;
+
+ return 0;
+}
+
+const pktio_if_ops_t _odp_null_pktio_ops = {
+ .name = "null",
+ .print = NULL,
+ .init_global = null_init_global,
+ .init_local = NULL,
+ .term = NULL,
+ .open = null_open,
+ .close = null_close,
+ .start = NULL,
+ .stop = NULL,
+ .recv = null_recv,
+ .recv_tmo = null_recv_tmo,
+ .recv_mq_tmo = null_recv_mq_tmo,
+ .fd_set = null_fd_set,
+ .send = null_send,
+ .maxlen_get = null_mtu_get,
+ .promisc_mode_set = NULL,
+ .promisc_mode_get = null_promisc_mode_get,
+ .mac_get = null_mac_addr_get,
+ .capability = null_capability,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
+ .config = NULL,
+ .input_queues_config = null_inqueues_config,
+ .output_queues_config = null_outqueues_config,
+ .link_status = null_link_status,
+ .link_info = null_link_info
+};
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index e54a56f5f..78b9876d7 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -1,4 +1,5 @@
-/* Copyright (c) 2015, Linaro Limited
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -29,14 +30,26 @@
* doesn't exist it will be created, if it does exist it will
* be overwritten.
* loops the number of times to iterate through the input file, set
- * to 0 to loop indefinitely. The default value is 1.
+ * to 0 to loop indefinitely. The default value is 1. Looping is
+ * only supported in thread mode (ODP_MEM_MODEL_THREAD).
*
* The total length of the string is limited by PKTIO_NAME_LEN.
*/
#include <odp_posix_extensions.h>
-#include <odp_api.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/packet_inlines.h>
+
+#include <odp_parse_internal.h>
+#include <odp_classification_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_global_data.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
@@ -45,9 +58,34 @@
#include <errno.h>
#include <pcap/pcap.h>
#include <pcap/bpf.h>
+#include <stdint.h>
+
+typedef struct {
+ char *fname_rx; /**< name of pcap file for rx */
+ char *fname_tx; /**< name of pcap file for tx */
+ void *rx; /**< rx pcap handle */
+ void *tx; /**< tx pcap handle */
+ void *tx_dump; /**< tx pcap dumper handle */
+ odp_pool_t pool; /**< rx pool */
+ uint32_t mtu; /**< link MTU */
+ int loops; /**< number of times to loop rx pcap */
+ int loop_cnt; /**< number of loops completed */
+ odp_bool_t promisc; /**< promiscuous mode state */
+} pkt_pcap_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_pcap_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_pcap_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_pcap_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
-#define PKTIO_PCAP_MTU (64 * 1024)
-static const char pcap_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x04};
+#define PKTIO_PCAP_MTU_MIN (68 + _ODP_ETHHDR_LEN)
+#define PKTIO_PCAP_MTU_MAX (64 * 1024)
+
+/* Dummy MAC address used in .pcap test files, when not in promisc mode */
+static const uint8_t pcap_mac[] = {0x02, 0x00, 0x00, 0x00, 0x00, 0x02};
static int pcapif_stats_reset(pktio_entry_t *pktio_entry);
@@ -71,7 +109,7 @@ static int _pcapif_parse_devname(pkt_pcap_t *pcap, const char *devname)
} else if (strncmp(tok, "loops=", 6) == 0) {
pcap->loops = atoi(tok + 6);
if (pcap->loops < 0) {
- ODP_ERR("invalid loop count\n");
+ _ODP_ERR("invalid loop count\n");
return -1;
}
}
@@ -87,14 +125,13 @@ static int _pcapif_init_rx(pkt_pcap_t *pcap)
pcap->rx = pcap_open_offline(pcap->fname_rx, errbuf);
if (!pcap->rx) {
- ODP_ERR("failed to open pcap file %s (%s)\n",
- pcap->fname_rx, errbuf);
+ _ODP_ERR("failed to open pcap file %s (%s)\n", pcap->fname_rx, errbuf);
return -1;
}
linktype = pcap_datalink(pcap->rx);
if (linktype != DLT_EN10MB) {
- ODP_ERR("unsupported datalink type: %d\n", linktype);
+ _ODP_ERR("unsupported datalink type: %d\n", linktype);
return -1;
}
@@ -108,42 +145,76 @@ static int _pcapif_init_tx(pkt_pcap_t *pcap)
if (!tx) {
/* if there is no rx pcap_t already open for rx, a dummy
* one needs to be opened for writing the dump */
- tx = pcap_open_dead(DLT_EN10MB, PKTIO_PCAP_MTU);
+ tx = pcap_open_dead(DLT_EN10MB, PKTIO_PCAP_MTU_MAX);
if (!tx) {
- ODP_ERR("failed to open TX dump\n");
+ _ODP_ERR("failed to open TX dump\n");
return -1;
}
pcap->tx = tx;
}
- pcap->buf = malloc(PKTIO_PCAP_MTU);
- if (!pcap->buf) {
- ODP_ERR("failed to malloc temp buffer\n");
- return -1;
- }
-
pcap->tx_dump = pcap_dump_open(tx, pcap->fname_tx);
if (!pcap->tx_dump) {
- ODP_ERR("failed to open dump file %s (%s)\n",
- pcap->fname_tx, pcap_geterr(tx));
+ _ODP_ERR("failed to open dump file %s (%s)\n", pcap->fname_tx, pcap_geterr(tx));
return -1;
}
return pcap_dump_flush(pcap->tx_dump);
}
+static int pcapif_promisc_mode_set(pktio_entry_t *pktio_entry,
+ odp_bool_t enable)
+{
+ char filter_exp[64] = {0};
+ struct bpf_program bpf;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
+
+ if (!pcap->rx) {
+ pcap->promisc = enable;
+ return 0;
+ }
+
+ if (!enable) {
+ char mac_str[18];
+
+ snprintf(mac_str, sizeof(mac_str),
+ "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+ pcap_mac[0], pcap_mac[1], pcap_mac[2],
+ pcap_mac[3], pcap_mac[4], pcap_mac[5]);
+
+ snprintf(filter_exp, sizeof(filter_exp),
+ "ether dst %s or broadcast or multicast",
+ mac_str);
+ }
+
+ if (pcap_compile(pcap->rx, &bpf, filter_exp,
+ 0, PCAP_NETMASK_UNKNOWN) != 0) {
+ _ODP_ERR("failed to compile promisc mode filter: %s\n", pcap_geterr(pcap->rx));
+ return -1;
+ }
+
+ if (pcap_setfilter(pcap->rx, &bpf) != 0) {
+ _ODP_ERR("failed to set promisc mode filter: %s\n", pcap_geterr(pcap->rx));
+ return -1;
+ }
+
+ pcap->promisc = enable;
+
+ return 0;
+}
+
static int pcapif_init(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
const char *devname, odp_pool_t pool)
{
- pkt_pcap_t *pcap = &pktio_entry->s.pkt_pcap;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
int ret;
memset(pcap, 0, sizeof(pkt_pcap_t));
pcap->loop_cnt = 1;
pcap->loops = 1;
pcap->pool = pool;
- pcap->promisc = 1;
+ pcap->mtu = PKTIO_PCAP_MTU_MAX;
ret = _pcapif_parse_devname(pcap, devname);
@@ -158,12 +229,15 @@ static int pcapif_init(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
(void)pcapif_stats_reset(pktio_entry);
+ if (pcapif_promisc_mode_set(pktio_entry, 0))
+ ret = -1;
+
return ret;
}
static int pcapif_close(pktio_entry_t *pktio_entry)
{
- pkt_pcap_t *pcap = &pktio_entry->s.pkt_pcap;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
if (pcap->tx_dump)
pcap_dump_close(pcap->tx_dump);
@@ -174,7 +248,6 @@ static int pcapif_close(pktio_entry_t *pktio_entry)
if (pcap->rx)
pcap_close(pcap->rx);
- free(pcap->buf);
free(pcap->fname_rx);
free(pcap->fname_tx);
@@ -185,6 +258,10 @@ static int _pcapif_reopen(pkt_pcap_t *pcap)
{
char errbuf[PCAP_ERRBUF_SIZE];
+ /* Reopen causes pcap internal failure in process mode */
+ if (odp_global_ro.init_param.mem_model == ODP_MEM_MODEL_PROCESS)
+ return 1;
+
if (pcap->loops != 0 && ++pcap->loop_cnt >= pcap->loops)
return 1;
@@ -193,8 +270,7 @@ static int _pcapif_reopen(pkt_pcap_t *pcap)
pcap->rx = pcap_open_offline(pcap->fname_rx, errbuf);
if (!pcap->rx) {
- ODP_ERR("failed to reopen pcap file %s (%s)\n",
- pcap->fname_rx, errbuf);
+ _ODP_ERR("failed to reopen pcap file %s (%s)\n", pcap->fname_rx, errbuf);
return 1;
}
@@ -202,7 +278,7 @@ static int _pcapif_reopen(pkt_pcap_t *pcap)
}
static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkts[], int len)
+ odp_packet_t pkts[], int num)
{
int i;
struct pcap_pkthdr *hdr;
@@ -210,21 +286,28 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_packet_t pkt;
odp_packet_hdr_t *pkt_hdr;
uint32_t pkt_len;
- pkt_pcap_t *pcap = &pktio_entry->s.pkt_pcap;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
odp_time_t ts_val;
odp_time_t *ts = NULL;
-
- odp_ticketlock_lock(&pktio_entry->s.rxl);
-
- if (pktio_entry->s.state != PKTIO_STATE_STARTED || !pcap->rx) {
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ int packets = 0;
+ uint32_t octets = 0;
+ int num_pkts = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ uint16_t frame_offset = pktio_entry->pktin_frame_offset;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
+
+ odp_ticketlock_lock(&pktio_entry->rxl);
+
+ if (odp_unlikely(!pcap->rx)) {
+ odp_ticketlock_unlock(&pktio_entry->rxl);
return 0;
}
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp)
+ if (opt.bit.ts_all || opt.bit.ts_ptp)
ts = &ts_val;
- for (i = 0; i < len; ) {
+ for (i = 0; i < num; i++) {
int ret;
ret = pcap_next_ex(pcap->rx, &hdr, &data);
@@ -238,40 +321,89 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
pkt_len = hdr->caplen;
- ret = packet_alloc_multi(pcap->pool, pkt_len, &pkt, 1);
+ ret = _odp_packet_alloc_multi(pcap->pool, pkt_len + frame_offset,
+ &pkt, 1);
if (odp_unlikely(ret != 1))
break;
if (ts != NULL)
ts_val = odp_time_global();
- pkt_hdr = odp_packet_hdr(pkt);
+ pkt_hdr = packet_hdr(pkt);
+ if (frame_offset)
+ pull_head(pkt_hdr, frame_offset);
- if (odp_packet_copy_from_mem(pkt, 0, hdr->caplen, data) != 0) {
- ODP_ERR("failed to copy packet data\n");
+ if (odp_packet_copy_from_mem(pkt, 0, pkt_len, data) != 0) {
+ _ODP_ERR("failed to copy packet data\n");
break;
}
- packet_parse_l2(&pkt_hdr->p, pkt_len);
- pktio_entry->s.stats.in_octets += pkt_hdr->frame_len;
+ if (layer) {
+ ret = _odp_packet_parse_common(pkt_hdr, data, pkt_len,
+ pkt_len, layer, opt);
+ if (ret)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
+
+ if (ret < 0) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, data,
+ &new_pool, pkt_hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ if (ret) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &pkt_hdr, new_pool))) {
+ odp_packet_free(pkt);
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ continue;
+ }
+ }
+ }
packet_set_ts(pkt_hdr, ts);
- pkt_hdr->input = pktio_entry->s.handle;
+ pkt_hdr->input = pktio_entry->handle;
- pkts[i] = pkt;
+ if (!pkt_hdr->p.flags.all.error) {
+ octets += pkt_len;
+ packets++;
+ }
- i++;
+ /* Enqueue packets directly to classifier destination queue */
+ if (cls_enabled) {
+ pkts[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(pkts, num_cls, (i + 1 == num));
+ } else {
+ pkts[num_pkts++] = pkt;
+ }
}
- pktio_entry->s.stats.in_ucast_pkts += i;
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(pkts, num_cls, true);
- return i;
+ pktio_entry->stats.in_octets += octets;
+ pktio_entry->stats.in_packets += packets;
+
+ odp_ticketlock_unlock(&pktio_entry->rxl);
+
+ return num_pkts;
}
static int _pcapif_dump_pkt(pkt_pcap_t *pcap, odp_packet_t pkt)
{
struct pcap_pkthdr hdr;
+ uint8_t tx_buf[PKTIO_PCAP_MTU_MAX];
if (!pcap->tx_dump)
return 0;
@@ -280,34 +412,30 @@ static int _pcapif_dump_pkt(pkt_pcap_t *pcap, odp_packet_t pkt)
hdr.len = hdr.caplen;
(void)gettimeofday(&hdr.ts, NULL);
- if (odp_packet_copy_to_mem(pkt, 0, hdr.len, pcap->buf) != 0)
+ if (odp_packet_copy_to_mem(pkt, 0, hdr.len, tx_buf) != 0)
return -1;
- pcap_dump(pcap->tx_dump, &hdr, pcap->buf);
+ pcap_dump(pcap->tx_dump, &hdr, tx_buf);
(void)pcap_dump_flush(pcap->tx_dump);
return 0;
}
static int pcapif_send_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkts[], int len)
+ const odp_packet_t pkts[], int num)
{
- pkt_pcap_t *pcap = &pktio_entry->s.pkt_pcap;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
int i;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
- odp_ticketlock_lock(&pktio_entry->s.txl);
-
- if (pktio_entry->s.state != PKTIO_STATE_STARTED) {
- odp_ticketlock_unlock(&pktio_entry->s.txl);
- return 0;
- }
+ odp_ticketlock_lock(&pktio_entry->txl);
- for (i = 0; i < len; ++i) {
- int pkt_len = odp_packet_len(pkts[i]);
+ for (i = 0; i < num; ++i) {
+ uint32_t pkt_len = odp_packet_len(pkts[i]);
- if (pkt_len > PKTIO_PCAP_MTU) {
+ if (odp_unlikely(pkt_len > pcap->mtu)) {
if (i == 0) {
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ odp_ticketlock_unlock(&pktio_entry->txl);
return -1;
}
break;
@@ -316,20 +444,36 @@ static int pcapif_send_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
if (_pcapif_dump_pkt(pcap, pkts[i]) != 0)
break;
- pktio_entry->s.stats.out_octets += pkt_len;
+ pktio_entry->stats.out_octets += pkt_len;
+
+ if (odp_unlikely(tx_ts_enabled && packet_hdr(pkts[i])->p.flags.ts_set))
+ _odp_pktio_tx_ts_set(pktio_entry);
+
odp_packet_free(pkts[i]);
}
- pktio_entry->s.stats.out_ucast_pkts += i;
+ pktio_entry->stats.out_packets += i;
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ odp_ticketlock_unlock(&pktio_entry->txl);
return i;
}
static uint32_t pcapif_mtu_get(pktio_entry_t *pktio_entry ODP_UNUSED)
{
- return PKTIO_PCAP_MTU;
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
+
+ return pcap->mtu;
+}
+
+static int pcapif_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_pcap_t *pcap = pkt_priv(pktio_entry);
+
+ pcap->mtu = maxlen_input;
+
+ return 0;
}
static int pcapif_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED,
@@ -348,81 +492,79 @@ static int pcapif_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
capa->max_input_queues = 1;
capa->max_output_queues = 1;
capa->set_op.op.promisc_mode = 1;
+ capa->set_op.op.maxlen = 1;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = PKTIO_PCAP_MTU_MIN;
+ capa->maxlen.max_input = PKTIO_PCAP_MTU_MAX;
+ capa->maxlen.min_output = PKTIO_PCAP_MTU_MIN;
+ capa->maxlen.max_output = PKTIO_PCAP_MTU_MAX;
odp_pktio_config_init(&capa->config);
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
- return 0;
-}
-static int pcapif_promisc_mode_set(pktio_entry_t *pktio_entry,
- odp_bool_t enable)
-{
- char filter_exp[64] = {0};
- struct bpf_program bpf;
- pkt_pcap_t *pcap = &pktio_entry->s.pkt_pcap;
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
- if (!pcap->rx) {
- pcap->promisc = enable;
- return 0;
- }
-
- if (!enable) {
- char mac_str[18];
-
- snprintf(mac_str, sizeof(mac_str),
- "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
- pcap_mac[0], pcap_mac[1], pcap_mac[2],
- pcap_mac[3], pcap_mac[4], pcap_mac[5]);
-
- snprintf(filter_exp, sizeof(filter_exp),
- "ether dst %s or broadcast or multicast",
- mac_str);
- }
-
- if (pcap_compile(pcap->rx, &bpf, filter_exp,
- 0, PCAP_NETMASK_UNKNOWN) != 0) {
- ODP_ERR("failed to compile promisc mode filter: %s\n",
- pcap_geterr(pcap->rx));
- return -1;
- }
-
- if (pcap_setfilter(pcap->rx, &bpf) != 0) {
- ODP_ERR("failed to set promisc mode filter: %s\n",
- pcap_geterr(pcap->rx));
- return -1;
- }
-
- pcap->promisc = enable;
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
return 0;
}
static int pcapif_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return pktio_entry->s.pkt_pcap.promisc;
+ return pkt_priv(pktio_entry)->promisc;
}
static int pcapif_stats_reset(pktio_entry_t *pktio_entry)
{
- memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t));
+ memset(&pktio_entry->stats, 0, sizeof(odp_pktio_stats_t));
return 0;
}
static int pcapif_stats(pktio_entry_t *pktio_entry,
odp_pktio_stats_t *stats)
{
- memcpy(stats, &pktio_entry->s.stats, sizeof(odp_pktio_stats_t));
+ memcpy(stats, &pktio_entry->stats, sizeof(odp_pktio_stats_t));
return 0;
}
static int pcapif_init_global(void)
{
- ODP_PRINT("PKTIO: initialized pcap interface.\n");
+ _ODP_PRINT("PKTIO: initialized pcap interface.\n");
+ return 0;
+}
+
+static int pcapif_link_status(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return ODP_PKTIO_LINK_STATUS_UP;
+}
+
+static int pcapif_link_info(pktio_entry_t *pktio_entry ODP_UNUSED, odp_pktio_link_info_t *info)
+{
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ info->media = "virtual";
+ info->pause_rx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = ODP_PKTIO_LINK_PAUSE_OFF;
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ info->status = ODP_PKTIO_LINK_STATUS_UP;
+
return 0;
}
-const pktio_if_ops_t pcap_pktio_ops = {
+const pktio_if_ops_t _odp_pcap_pktio_ops = {
.name = "pcap",
.print = NULL,
.init_global = pcapif_init_global,
@@ -433,14 +575,19 @@ const pktio_if_ops_t pcap_pktio_ops = {
.stats_reset = pcapif_stats_reset,
.recv = pcapif_recv_pkt,
.send = pcapif_send_pkt,
- .mtu_get = pcapif_mtu_get,
+ .maxlen_get = pcapif_mtu_get,
+ .maxlen_set = pcapif_mtu_set,
.promisc_mode_set = pcapif_promisc_mode_set,
.promisc_mode_get = pcapif_promisc_mode_get,
.mac_get = pcapif_mac_addr_get,
+ .mac_set = NULL,
.capability = pcapif_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL,
.input_queues_config = NULL,
.output_queues_config = NULL,
+ .link_status = pcapif_link_status,
+ .link_info = pcapif_link_info
};
diff --git a/platform/linux-generic/pktio/pktio_common.c b/platform/linux-generic/pktio/pktio_common.c
index 611bb451a..097b8cf29 100644
--- a/platform/linux-generic/pktio/pktio_common.c
+++ b/platform/linux-generic/pktio/pktio_common.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2013-2018, Linaro Limited
* Copyright (c) 2013, Nokia Solutions and Networks
* All rights reserved.
*
@@ -6,77 +6,132 @@
*/
#include <odp_packet_io_internal.h>
-#include <odp_classification_internal.h>
#include <errno.h>
+#include <inttypes.h>
-int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
+static int sock_recv_mq_tmo_select(pktio_entry_t * const *entry,
+ const int index[],
+ uint32_t num_q, uint32_t *from,
+ odp_packet_t packets[], int num,
+ uint64_t usecs, fd_set *readfds,
+ int maxfd)
{
- int err = 0;
- odp_pktio_stats_t cur_stats;
+ struct timeval timeout;
+ uint32_t i;
+ int ret;
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(&pktio_entry->s.stats, 0,
- sizeof(odp_pktio_stats_t));
- return 0;
- }
+ for (i = 0; i < num_q; i++) {
+ ret = entry[i]->ops->recv(entry[i], index[i], packets, num);
- memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
+ if (ret > 0 && from)
+ *from = i;
- if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
- (void)ethtool_stats_get_fd(fd,
- pktio_entry->s.name,
- &cur_stats);
- } else if (pktio_entry->s.stats_type == STATS_SYSFS) {
- err = sysfs_stats(pktio_entry, &cur_stats);
- if (err != 0)
- ODP_ERR("stats error\n");
+ if (ret != 0)
+ return ret;
}
- if (err == 0)
- memcpy(&pktio_entry->s.stats, &cur_stats,
- sizeof(odp_pktio_stats_t));
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+
+ if (select(maxfd + 1, readfds, NULL, NULL, &timeout) == 0)
+ return 0;
+
+ for (i = 0; i < num_q; i++) {
+ ret = entry[i]->ops->recv(entry[i], index[i], packets, num);
- return err;
+ if (ret > 0 && from)
+ *from = i;
+
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
}
-int sock_stats_fd(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats,
- int fd)
+int _odp_sock_recv_mq_tmo_try_int_driven(const struct odp_pktin_queue_t queues[],
+ uint32_t num_q, uint32_t *from,
+ odp_packet_t packets[], int num,
+ uint64_t usecs, int *trial_successful)
{
- odp_pktio_stats_t cur_stats;
- int ret = 0;
+ uint32_t i;
+ pktio_entry_t *entry[num_q];
+ int index[num_q];
+ fd_set readfds;
+ int maxfd = -1;
+ int (*impl)(pktio_entry_t *entry[], int index[], uint32_t num_q,
+ odp_packet_t packets[], int num, uint32_t *from,
+ uint64_t wait_usecs) = NULL;
+ int impl_set = 0;
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
- return 0;
+ /* First, we get pktio entries and queue indices. We then see if the
+ implementation function pointers are the same. If they are the
+ same, impl will be set to non-NULL; otherwise it will be NULL. */
+
+ for (i = 0; i < num_q; i++) {
+ entry[i] = get_pktio_entry(queues[i].pktio);
+ index[i] = queues[i].index;
+ if (entry[i] == NULL) {
+ _ODP_DBG("pktio entry %" PRIuPTR " does not exist\n",
+ (uintptr_t)queues[i].pktio);
+ *trial_successful = 0;
+ return -1;
+ }
+
+ if (odp_unlikely(entry[i]->state != PKTIO_STATE_STARTED)) {
+ *trial_successful = 0;
+ return 0;
+ }
+
+ if (entry[i]->ops->recv_mq_tmo == NULL &&
+ entry[i]->ops->fd_set == NULL) {
+ *trial_successful = 0;
+ return 0;
+ }
+ if (!impl_set) {
+ impl = entry[i]->ops->recv_mq_tmo;
+ impl_set = 1;
+ } else {
+ if (impl != entry[i]->ops->recv_mq_tmo)
+ impl = NULL;
+ }
+ }
+
+ /* Check whether we can call the compatible implementation */
+ if (impl != NULL) {
+ *trial_successful = 1;
+ return impl(entry, index, num_q, packets, num, from, usecs);
+ }
+
+ /* Get file descriptor sets of devices. maxfd will be -1 if this
+ fails. */
+ FD_ZERO(&readfds);
+ for (i = 0; i < num_q; i++) {
+ if (entry[i]->ops->fd_set) {
+ int maxfd2;
+
+ maxfd2 = entry[i]->ops->fd_set(entry[i], queues[i].index, &readfds);
+ if (maxfd2 < 0) {
+ maxfd = -1;
+ break;
+ }
+ if (maxfd2 > maxfd)
+ maxfd = maxfd2;
+ } else {
+ maxfd = -1;
+ }
+ }
- memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
- if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
- (void)ethtool_stats_get_fd(fd,
- pktio_entry->s.name,
- &cur_stats);
- } else if (pktio_entry->s.stats_type == STATS_SYSFS) {
- sysfs_stats(pktio_entry, &cur_stats);
+ /* Check whether we can call the select() implementation */
+ if (maxfd >= 0) {
+ *trial_successful = 1;
+ return sock_recv_mq_tmo_select(entry, index, num_q, from,
+ packets, num, usecs,
+ &readfds, maxfd);
}
- stats->in_octets = cur_stats.in_octets -
- pktio_entry->s.stats.in_octets;
- stats->in_ucast_pkts = cur_stats.in_ucast_pkts -
- pktio_entry->s.stats.in_ucast_pkts;
- stats->in_discards = cur_stats.in_discards -
- pktio_entry->s.stats.in_discards;
- stats->in_errors = cur_stats.in_errors -
- pktio_entry->s.stats.in_errors;
- stats->in_unknown_protos = cur_stats.in_unknown_protos -
- pktio_entry->s.stats.in_unknown_protos;
-
- stats->out_octets = cur_stats.out_octets -
- pktio_entry->s.stats.out_octets;
- stats->out_ucast_pkts = cur_stats.out_ucast_pkts -
- pktio_entry->s.stats.out_ucast_pkts;
- stats->out_discards = cur_stats.out_discards -
- pktio_entry->s.stats.out_discards;
- stats->out_errors = cur_stats.out_errors -
- pktio_entry->s.stats.out_errors;
-
- return ret;
+ /* No mechanism worked. Set trial_successful to 0 so that polling will
+ be used by the main implementation. */
+ *trial_successful = 0;
+ return 0;
}
diff --git a/platform/linux-generic/pktio/ring.c b/platform/linux-generic/pktio/ring.c
deleted file mode 100644
index aeda04b26..000000000
--- a/platform/linux-generic/pktio/ring.c
+++ /dev/null
@@ -1,660 +0,0 @@
-/* Copyright (c) 2014, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Derived from FreeBSD's bufring.c
- *
- **************************************************************************
- *
- * Copyright (c) 2007,2008 Kip Macy kmacy@freebsd.org
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. The name of Kip Macy nor the names of other
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- ***************************************************************************/
-
-#include <odp_api.h>
-#include <fcntl.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdbool.h>
-#include <inttypes.h>
-#include <odp_packet_io_ring_internal.h>
-#include <odp_internal.h>
-
-static TAILQ_HEAD(, _ring) odp_ring_list;
-
-#define RING_VAL_IS_POWER_2(x) ((((x) - 1) & (x)) == 0)
-
-/*
- * the enqueue of pointers on the ring.
- */
-#define ENQUEUE_PTRS() do { \
- const uint32_t size = r->prod.size; \
- uint32_t idx = prod_head & mask; \
- if (odp_likely(idx + n < size)) { \
- for (i = 0; i < (n & ((~(unsigned)0x3))); i += 4, idx += 4) { \
- r->ring[idx] = obj_table[i]; \
- r->ring[idx + 1] = obj_table[i + 1]; \
- r->ring[idx + 2] = obj_table[i + 2]; \
- r->ring[idx + 3] = obj_table[i + 3]; \
- } \
- switch (n & 0x3) { \
- case 3: \
- r->ring[idx++] = obj_table[i++]; \
- case 2: \
- r->ring[idx++] = obj_table[i++]; \
- case 1: \
- r->ring[idx++] = obj_table[i++]; \
- } \
- } else { \
- for (i = 0; idx < size; i++, idx++)\
- r->ring[idx] = obj_table[i]; \
- for (idx = 0; i < n; i++, idx++) \
- r->ring[idx] = obj_table[i]; \
- } \
-} while (0)
-
-/*
- * the actual copy of pointers on the ring to obj_table.
- */
-#define DEQUEUE_PTRS() do { \
- uint32_t idx = cons_head & mask; \
- const uint32_t size = r->cons.size; \
- if (odp_likely(idx + n < size)) { \
- for (i = 0; i < (n & (~(unsigned)0x3)); i += 4, idx += 4) {\
- obj_table[i] = r->ring[idx]; \
- obj_table[i + 1] = r->ring[idx + 1]; \
- obj_table[i + 2] = r->ring[idx + 2]; \
- obj_table[i + 3] = r->ring[idx + 3]; \
- } \
- switch (n & 0x3) { \
- case 3: \
- obj_table[i++] = r->ring[idx++]; \
- case 2: \
- obj_table[i++] = r->ring[idx++]; \
- case 1: \
- obj_table[i++] = r->ring[idx++]; \
- } \
- } else { \
- for (i = 0; idx < size; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
- for (idx = 0; i < n; i++, idx++) \
- obj_table[i] = r->ring[idx]; \
- } \
-} while (0)
-
-static odp_rwlock_t qlock; /* rings tailq lock */
-
-/* init tailq_ring */
-void _ring_tailq_init(void)
-{
- TAILQ_INIT(&odp_ring_list);
- odp_rwlock_init(&qlock);
-}
-
-/* create the ring */
-_ring_t *
-_ring_create(const char *name, unsigned count, unsigned flags)
-{
- char ring_name[_RING_NAMESIZE];
- _ring_t *r;
- size_t ring_size;
- uint32_t shm_flag;
- odp_shm_t shm;
-
- if (flags & _RING_SHM_PROC)
- shm_flag = ODP_SHM_PROC | ODP_SHM_EXPORT;
- else
- shm_flag = 0;
-
- /* count must be a power of 2 */
- if (!RING_VAL_IS_POWER_2(count) || (count > _RING_SZ_MASK)) {
- ODP_ERR("Requested size is invalid, must be power of 2,"
- "and do not exceed the size limit %u\n",
- _RING_SZ_MASK);
- __odp_errno = EINVAL;
- return NULL;
- }
-
- snprintf(ring_name, sizeof(ring_name), "%s", name);
- ring_size = count * sizeof(void *) + sizeof(_ring_t);
-
- odp_rwlock_write_lock(&qlock);
- /* reserve a memory zone for this ring.*/
- shm = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE,
- shm_flag);
-
- r = odp_shm_addr(shm);
-
- if (r != NULL) {
- /* init the ring structure */
- snprintf(r->name, sizeof(r->name), "%s", name);
- r->flags = flags;
- r->prod.watermark = count;
- r->prod.sp_enqueue = !!(flags & _RING_F_SP_ENQ);
- r->cons.sc_dequeue = !!(flags & _RING_F_SC_DEQ);
- r->prod.size = count;
- r->cons.size = count;
- r->prod.mask = count - 1;
- r->cons.mask = count - 1;
- r->prod.head = 0;
- r->cons.head = 0;
- r->prod.tail = 0;
- r->cons.tail = 0;
-
- if (!(flags & _RING_NO_LIST))
- TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
- } else {
- __odp_errno = ENOMEM;
- ODP_ERR("Cannot reserve memory\n");
- }
-
- odp_rwlock_write_unlock(&qlock);
- return r;
-}
-
-int _ring_destroy(const char *name)
-{
- odp_shm_t shm = odp_shm_lookup(name);
-
- if (shm != ODP_SHM_INVALID) {
- _ring_t *r = odp_shm_addr(shm);
-
- odp_rwlock_write_lock(&qlock);
- if (!(r->flags & _RING_NO_LIST))
- TAILQ_REMOVE(&odp_ring_list, r, next);
- odp_rwlock_write_unlock(&qlock);
-
- return odp_shm_free(shm);
- }
- return 0;
-}
-
-/*
- * change the high water mark. If *count* is 0, water marking is
- * disabled
- */
-int _ring_set_water_mark(_ring_t *r, unsigned count)
-{
- if (count >= r->prod.size)
- return -EINVAL;
-
- /* if count is 0, disable the watermarking */
- if (count == 0)
- count = r->prod.size;
-
- r->prod.watermark = count;
- return 0;
-}
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- */
-int ___ring_mp_do_enqueue(_ring_t *r, void * const *obj_table,
- unsigned n, enum _ring_queue_behavior behavior)
-{
- uint32_t prod_head, prod_next;
- uint32_t cons_tail, free_entries;
- const unsigned max = n;
- int success;
- unsigned i;
- uint32_t mask = r->prod.mask;
- int ret;
-
- /* move prod.head atomically */
- do {
- /* Reset n to the initial burst count */
- n = max;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = (mask + cons_tail - prod_head);
-
- /* check that we have enough room in ring */
- if (odp_unlikely(n > free_entries)) {
- if (behavior == _RING_QUEUE_FIXED)
- return -ENOBUFS;
- /* No free entry available */
- if (odp_unlikely(free_entries == 0))
- return 0;
-
- n = free_entries;
- }
-
- prod_next = prod_head + n;
- success = __atomic_compare_exchange_n(&r->prod.head,
- &prod_head,
- prod_next,
- false/*strong*/,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED);
- } while (odp_unlikely(success == 0));
-
- /* write entries in ring */
- ENQUEUE_PTRS();
-
- /* if we exceed the watermark */
- if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == _RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | _RING_QUOT_EXCEED);
- } else {
- ret = (behavior == _RING_QUEUE_FIXED) ? 0 : n;
- }
-
- /*
- * If there are other enqueues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (odp_unlikely(r->prod.tail != prod_head))
- odp_cpu_pause();
-
- /* Release our entries and the memory they refer to */
- __atomic_thread_fence(__ATOMIC_RELEASE);
- r->prod.tail = prod_next;
- return ret;
-}
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- */
-int ___ring_sp_do_enqueue(_ring_t *r, void * const *obj_table,
- unsigned n, enum _ring_queue_behavior behavior)
-{
- uint32_t prod_head, cons_tail;
- uint32_t prod_next, free_entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
- int ret;
-
- prod_head = r->prod.head;
- cons_tail = r->cons.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * prod_head > cons_tail). So 'free_entries' is always between 0
- * and size(ring)-1. */
- free_entries = mask + cons_tail - prod_head;
-
- /* check that we have enough room in ring */
- if (odp_unlikely(n > free_entries)) {
- if (behavior == _RING_QUEUE_FIXED)
- return -ENOBUFS;
- /* No free entry available */
- if (odp_unlikely(free_entries == 0))
- return 0;
-
- n = free_entries;
- }
-
- prod_next = prod_head + n;
- r->prod.head = prod_next;
-
- /* write entries in ring */
- ENQUEUE_PTRS();
-
- /* if we exceed the watermark */
- if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
- ret = (behavior == _RING_QUEUE_FIXED) ? -EDQUOT :
- (int)(n | _RING_QUOT_EXCEED);
- } else {
- ret = (behavior == _RING_QUEUE_FIXED) ? 0 : n;
- }
-
- /* Release our entries and the memory they refer to */
- __atomic_thread_fence(__ATOMIC_RELEASE);
- r->prod.tail = prod_next;
- return ret;
-}
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe).
- */
-
-int ___ring_mc_do_dequeue(_ring_t *r, void **obj_table,
- unsigned n, enum _ring_queue_behavior behavior)
-{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- const unsigned max = n;
- int success;
- unsigned i;
- uint32_t mask = r->prod.mask;
-
- /* move cons.head atomically */
- do {
- /* Restore n as it may change every loop */
- n = max;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = (prod_tail - cons_head);
-
- /* Set the actual entries for dequeue */
- if (n > entries) {
- if (behavior == _RING_QUEUE_FIXED)
- return -ENOENT;
- if (odp_unlikely(entries == 0))
- return 0;
-
- n = entries;
- }
-
- cons_next = cons_head + n;
- success = __atomic_compare_exchange_n(&r->cons.head,
- &cons_head,
- cons_next,
- false/*strong*/,
- __ATOMIC_ACQUIRE,
- __ATOMIC_RELAXED);
- } while (odp_unlikely(success == 0));
-
- /* copy in table */
- DEQUEUE_PTRS();
-
- /*
- * If there are other dequeues in progress that preceded us,
- * we need to wait for them to complete
- */
- while (odp_unlikely(r->cons.tail != cons_head))
- odp_cpu_pause();
-
- /* Release our entries and the memory they refer to */
- __atomic_thread_fence(__ATOMIC_RELEASE);
- r->cons.tail = cons_next;
-
- return behavior == _RING_QUEUE_FIXED ? 0 : n;
-}
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).
- */
-int ___ring_sc_do_dequeue(_ring_t *r, void **obj_table,
- unsigned n, enum _ring_queue_behavior behavior)
-{
- uint32_t cons_head, prod_tail;
- uint32_t cons_next, entries;
- unsigned i;
- uint32_t mask = r->prod.mask;
-
- cons_head = r->cons.head;
- prod_tail = r->prod.tail;
- /* The subtraction is done between two unsigned 32bits value
- * (the result is always modulo 32 bits even if we have
- * cons_head > prod_tail). So 'entries' is always between 0
- * and size(ring)-1. */
- entries = prod_tail - cons_head;
-
- if (n > entries) {
- if (behavior == _RING_QUEUE_FIXED)
- return -ENOENT;
- if (odp_unlikely(entries == 0))
- return 0;
-
- n = entries;
- }
-
- cons_next = cons_head + n;
- r->cons.head = cons_next;
-
- /* Acquire the pointers and the memory they refer to */
- __atomic_thread_fence(__ATOMIC_ACQUIRE);
- /* copy in table */
- DEQUEUE_PTRS();
-
- r->cons.tail = cons_next;
- return behavior == _RING_QUEUE_FIXED ? 0 : n;
-}
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- */
-int _ring_mp_enqueue_bulk(_ring_t *r, void * const *obj_table,
- unsigned n)
-{
- return ___ring_mp_do_enqueue(r, obj_table, n,
- _RING_QUEUE_FIXED);
-}
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- */
-int _ring_sp_enqueue_bulk(_ring_t *r, void * const *obj_table,
- unsigned n)
-{
- return ___ring_sp_do_enqueue(r, obj_table, n,
- _RING_QUEUE_FIXED);
-}
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe).
- */
-int _ring_mc_dequeue_bulk(_ring_t *r, void **obj_table, unsigned n)
-{
- return ___ring_mc_do_dequeue(r, obj_table, n,
- _RING_QUEUE_FIXED);
-}
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).
- */
-int _ring_sc_dequeue_bulk(_ring_t *r, void **obj_table, unsigned n)
-{
- return ___ring_sc_do_dequeue(r, obj_table, n,
- _RING_QUEUE_FIXED);
-}
-
-/**
- * Test if a ring is full.
- */
-int _ring_full(const _ring_t *r)
-{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
-
- return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
-}
-
-/**
- * Test if a ring is empty.
- */
-int _ring_empty(const _ring_t *r)
-{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
-
- return !!(cons_tail == prod_tail);
-}
-
-/**
- * Return the number of entries in a ring.
- */
-unsigned _ring_count(const _ring_t *r)
-{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
-
- return (prod_tail - cons_tail) & r->prod.mask;
-}
-
-/**
- * Return the number of free entries in a ring.
- */
-unsigned _ring_free_count(const _ring_t *r)
-{
- uint32_t prod_tail = r->prod.tail;
- uint32_t cons_tail = r->cons.tail;
-
- return (cons_tail - prod_tail - 1) & r->prod.mask;
-}
-
-/* dump the status of the ring on the console */
-void _ring_dump(const _ring_t *r)
-{
- ODP_DBG("ring <%s>@%p\n", r->name, r);
- ODP_DBG(" flags=%x\n", r->flags);
- ODP_DBG(" size=%" PRIu32 "\n", r->prod.size);
- ODP_DBG(" ct=%" PRIu32 "\n", r->cons.tail);
- ODP_DBG(" ch=%" PRIu32 "\n", r->cons.head);
- ODP_DBG(" pt=%" PRIu32 "\n", r->prod.tail);
- ODP_DBG(" ph=%" PRIu32 "\n", r->prod.head);
- ODP_DBG(" used=%u\n", _ring_count(r));
- ODP_DBG(" avail=%u\n", _ring_free_count(r));
- if (r->prod.watermark == r->prod.size)
- ODP_DBG(" watermark=0\n");
- else
- ODP_DBG(" watermark=%" PRIu32 "\n", r->prod.watermark);
-}
-
-/* dump the status of all rings on the console */
-void _ring_list_dump(void)
-{
- const _ring_t *mp = NULL;
-
- odp_rwlock_read_lock(&qlock);
-
- TAILQ_FOREACH(mp, &odp_ring_list, next) {
- _ring_dump(mp);
- }
-
- odp_rwlock_read_unlock(&qlock);
-}
-
-/* search a ring from its name */
-_ring_t *_ring_lookup(const char *name)
-{
- _ring_t *r;
-
- odp_rwlock_read_lock(&qlock);
- TAILQ_FOREACH(r, &odp_ring_list, next) {
- if (strncmp(name, r->name, _RING_NAMESIZE) == 0)
- break;
- }
- odp_rwlock_read_unlock(&qlock);
-
- return r;
-}
-
-/**
- * Enqueue several objects on the ring (multi-producers safe).
- */
-int _ring_mp_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n)
-{
- return ___ring_mp_do_enqueue(r, obj_table, n,
- _RING_QUEUE_VARIABLE);
-}
-
-/**
- * Enqueue several objects on a ring (NOT multi-producers safe).
- */
-int _ring_sp_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n)
-{
- return ___ring_sp_do_enqueue(r, obj_table, n,
- _RING_QUEUE_VARIABLE);
-}
-
-/**
- * Enqueue several objects on a ring.
- */
-int _ring_enqueue_burst(_ring_t *r, void * const *obj_table,
- unsigned n)
-{
- if (r->prod.sp_enqueue)
- return _ring_sp_enqueue_burst(r, obj_table, n);
- else
- return _ring_mp_enqueue_burst(r, obj_table, n);
-}
-
-/**
- * Dequeue several objects from a ring (multi-consumers safe).
- */
-int _ring_mc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n)
-{
- return ___ring_mc_do_dequeue(r, obj_table, n,
- _RING_QUEUE_VARIABLE);
-}
-
-/**
- * Dequeue several objects from a ring (NOT multi-consumers safe).
- */
-int _ring_sc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n)
-{
- return ___ring_sc_do_dequeue(r, obj_table, n,
- _RING_QUEUE_VARIABLE);
-}
-
-/**
- * Dequeue multiple objects from a ring up to a maximum number.
- */
-int _ring_dequeue_burst(_ring_t *r, void **obj_table, unsigned n)
-{
- if (r->cons.sc_dequeue)
- return _ring_sc_dequeue_burst(r, obj_table, n);
- else
- return _ring_mc_dequeue_burst(r, obj_table, n);
-}
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index 7d2396866..2a037e51f 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
- * Copyright (c) 2013, Nokia Solutions and Networks
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,47 +7,52 @@
#include <odp_posix_extensions.h>
+#include <odp/api/align.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_socket_common.h>
+#include <odp_parse_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_io_stats.h>
+#include <odp_debug_internal.h>
+#include <odp_classification_internal.h>
+#include <odp_macros_internal.h>
+
#include <sys/socket.h>
#include <stdio.h>
#include <stdlib.h>
-#include <sys/types.h>
-#include <sys/stat.h>
#include <linux/if_packet.h>
-#include <linux/filter.h>
-#include <ctype.h>
-#include <fcntl.h>
#include <unistd.h>
-#include <bits/wordsize.h>
-#include <net/ethernet.h>
-#include <netinet/ip.h>
#include <arpa/inet.h>
#include <stdint.h>
#include <string.h>
#include <net/if.h>
-#include <inttypes.h>
-#include <poll.h>
#include <sys/ioctl.h>
#include <errno.h>
#include <sys/syscall.h>
-#include <linux/ethtool.h>
-#include <linux/sockios.h>
-#include <odp_api.h>
-#include <odp_packet_socket.h>
-#include <odp_packet_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_align_internal.h>
-#include <odp_debug_internal.h>
-#include <odp_classification_datamodel.h>
-#include <odp_classification_inlines.h>
-#include <odp_classification_internal.h>
-#include <odp/api/hints.h>
+typedef struct {
+ odp_ticketlock_t rx_lock ODP_ALIGNED_CACHE;
+ odp_ticketlock_t tx_lock ODP_ALIGNED_CACHE;
+ int sockfd; /**< socket descriptor */
+ odp_pool_t pool; /**< pool to alloc packets from */
+ uint32_t mtu; /**< maximum transmission unit */
+ uint32_t mtu_max; /**< maximum supported MTU value */
+ unsigned char if_mac[ETH_ALEN]; /**< IF eth mac addr */
+} pkt_sock_t;
-#include <protocols/eth.h>
-#include <protocols/ip.h>
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_sock_t),
+ "PKTIO_PRIVATE_SIZE too small");
-#define MAX_SEGS CONFIG_PACKET_MAX_SEGS
-#define PACKET_JUMBO_LEN (9 * 1024)
+static inline pkt_sock_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_sock_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
@@ -85,403 +90,28 @@ int sendmmsg(int fd, struct mmsghdr *vmessages, unsigned int vlen, int flags)
#endif
}
-
-/** Eth buffer start offset from u32-aligned address to make sure the following
- * header (e.g. IP) starts at a 32-bit aligned address.
- */
-#define ETHBUF_OFFSET (ODP_ALIGN_ROUNDUP(_ODP_ETHHDR_LEN, sizeof(uint32_t)) \
- - _ODP_ETHHDR_LEN)
-
-/** Round up buffer address to get a properly aliged eth buffer, i.e. aligned
- * so that the next header always starts at a 32bit aligned address.
- */
-#define ETHBUF_ALIGN(buf_ptr) ((uint8_t *)ODP_ALIGN_ROUNDUP_PTR((buf_ptr), \
- sizeof(uint32_t)) + ETHBUF_OFFSET)
-
-/**
- * ODP_PACKET_SOCKET_MMSG:
- * ODP_PACKET_SOCKET_MMAP:
- * ODP_PACKET_NETMAP:
- */
-int mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[])
-{
- struct ifreq ethreq;
- int ret;
-
- memset(&ethreq, 0, sizeof(ethreq));
- snprintf(ethreq.ifr_name, IF_NAMESIZE, "%s", name);
- ret = ioctl(fd, SIOCGIFHWADDR, &ethreq);
- if (ret != 0) {
- __odp_errno = errno;
- ODP_ERR("ioctl(SIOCGIFHWADDR): %s: \"%s\".\n", strerror(errno),
- ethreq.ifr_name);
- return -1;
- }
-
- memcpy(mac_dst, (unsigned char *)ethreq.ifr_ifru.ifru_hwaddr.sa_data,
- ETH_ALEN);
- return 0;
-}
-
-/*
- * ODP_PACKET_SOCKET_MMSG:
- * ODP_PACKET_SOCKET_MMAP:
- * ODP_PACKET_NETMAP:
- */
-uint32_t mtu_get_fd(int fd, const char *name)
-{
- struct ifreq ifr;
- int ret;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
- ret = ioctl(fd, SIOCGIFMTU, &ifr);
- if (ret < 0) {
- __odp_errno = errno;
- ODP_DBG("ioctl(SIOCGIFMTU): %s: \"%s\".\n", strerror(errno),
- ifr.ifr_name);
- return 0;
- }
- return ifr.ifr_mtu;
-}
-
-/*
- * ODP_PACKET_SOCKET_MMSG:
- * ODP_PACKET_SOCKET_MMAP:
- * ODP_PACKET_NETMAP:
- */
-int promisc_mode_set_fd(int fd, const char *name, int enable)
-{
- struct ifreq ifr;
- int ret;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
- ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
- if (ret < 0) {
- __odp_errno = errno;
- ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno),
- ifr.ifr_name);
- return -1;
- }
-
- if (enable)
- ifr.ifr_flags |= IFF_PROMISC;
- else
- ifr.ifr_flags &= ~(IFF_PROMISC);
-
- ret = ioctl(fd, SIOCSIFFLAGS, &ifr);
- if (ret < 0) {
- __odp_errno = errno;
- ODP_DBG("ioctl(SIOCSIFFLAGS): %s: \"%s\".\n", strerror(errno),
- ifr.ifr_name);
- return -1;
- }
- return 0;
-}
-
-/*
- * ODP_PACKET_SOCKET_MMSG:
- * ODP_PACKET_SOCKET_MMAP:
- * ODP_PACKET_NETMAP:
- */
-int promisc_mode_get_fd(int fd, const char *name)
-{
- struct ifreq ifr;
- int ret;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
- ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
- if (ret < 0) {
- __odp_errno = errno;
- ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno),
- ifr.ifr_name);
- return -1;
- }
-
- return !!(ifr.ifr_flags & IFF_PROMISC);
-}
-
-int link_status_fd(int fd, const char *name)
-{
- struct ifreq ifr;
- int ret;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
- ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
- if (ret < 0) {
- __odp_errno = errno;
- ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno),
- ifr.ifr_name);
- return -1;
- }
-
- return !!(ifr.ifr_flags & IFF_RUNNING);
-}
-
-/**
- * Get enabled hash options of a packet socket
- *
- * @param fd Socket file descriptor
- * @param name Interface name
- * @param flow_type Packet flow type
- * @param options[out] Enabled hash options
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-static inline int get_rss_hash_options(int fd, const char *name,
- uint32_t flow_type, uint64_t *options)
-{
- struct ifreq ifr;
- struct ethtool_rxnfc rsscmd;
-
- memset(&rsscmd, 0, sizeof(rsscmd));
- *options = 0;
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
-
- rsscmd.cmd = ETHTOOL_GRXFH;
- rsscmd.flow_type = flow_type;
-
- ifr.ifr_data = (caddr_t)&rsscmd;
-
- if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
- return -1;
-
- *options = rsscmd.data;
- return 0;
-}
-
-int rss_conf_get_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto)
-{
- uint64_t options;
- int rss_enabled = 0;
-
- memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
-
- get_rss_hash_options(fd, name, IPV4_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
- hash_proto->proto.ipv4 = 1;
- rss_enabled++;
- }
- get_rss_hash_options(fd, name, TCP_V4_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
- (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
- hash_proto->proto.ipv4_tcp = 1;
- rss_enabled++;
- }
- get_rss_hash_options(fd, name, UDP_V4_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
- (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
- hash_proto->proto.ipv4_udp = 1;
- rss_enabled++;
- }
- get_rss_hash_options(fd, name, IPV6_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
- hash_proto->proto.ipv6 = 1;
- rss_enabled++;
- }
- get_rss_hash_options(fd, name, TCP_V6_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
- (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
- hash_proto->proto.ipv6_tcp = 1;
- rss_enabled++;
- }
- get_rss_hash_options(fd, name, UDP_V6_FLOW, &options);
- if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
- (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
- hash_proto->proto.ipv6_udp = 1;
- rss_enabled++;
- }
- return rss_enabled;
-}
-
-/**
- * Set hash options of a packet socket
- *
- * @param fd Socket file descriptor
- * @param name Interface name
- * @param flow_type Packet flow type
- * @param options Hash options
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-static inline int set_rss_hash(int fd, const char *name,
- uint32_t flow_type, uint64_t options)
-{
- struct ifreq ifr;
- struct ethtool_rxnfc rsscmd;
-
- memset(&rsscmd, 0, sizeof(rsscmd));
-
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
-
- rsscmd.cmd = ETHTOOL_SRXFH;
- rsscmd.flow_type = flow_type;
- rsscmd.data = options;
-
- ifr.ifr_data = (caddr_t)&rsscmd;
-
- if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
- return -1;
-
- return 0;
-}
-
-int rss_conf_set_fd(int fd, const char *name,
- const odp_pktin_hash_proto_t *hash_proto)
-{
- uint64_t options;
- odp_pktin_hash_proto_t cur_hash;
-
- /* Compare to currently set hash protocols */
- rss_conf_get_fd(fd, name, &cur_hash);
-
- if (hash_proto->proto.ipv4_udp && !cur_hash.proto.ipv4_udp) {
- options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
- if (set_rss_hash(fd, name, UDP_V4_FLOW, options))
- return -1;
- }
- if (hash_proto->proto.ipv4_tcp && !cur_hash.proto.ipv4_tcp) {
- options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
- if (set_rss_hash(fd, name, TCP_V4_FLOW, options))
- return -1;
- }
- if (hash_proto->proto.ipv6_udp && !cur_hash.proto.ipv6_udp) {
- options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
- if (set_rss_hash(fd, name, UDP_V6_FLOW, options))
- return -1;
- }
- if (hash_proto->proto.ipv6_tcp && !cur_hash.proto.ipv6_tcp) {
- options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
- if (set_rss_hash(fd, name, TCP_V6_FLOW, options))
- return -1;
- }
- if (hash_proto->proto.ipv4 && !cur_hash.proto.ipv4) {
- options = RXH_IP_SRC | RXH_IP_DST;
- if (set_rss_hash(fd, name, IPV4_FLOW, options))
- return -1;
- }
- if (hash_proto->proto.ipv6 && !cur_hash.proto.ipv6) {
- options = RXH_IP_SRC | RXH_IP_DST;
- if (set_rss_hash(fd, name, IPV6_FLOW, options))
- return -1;
- }
- return 0;
-}
-
-int rss_conf_get_supported_fd(int fd, const char *name,
- odp_pktin_hash_proto_t *hash_proto)
-{
- uint64_t options;
- int rss_supported = 0;
-
- memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
-
- if (!get_rss_hash_options(fd, name, IPV4_FLOW, &options)) {
- if (!set_rss_hash(fd, name, IPV4_FLOW, options)) {
- hash_proto->proto.ipv4 = 1;
- rss_supported++;
- }
- }
- if (!get_rss_hash_options(fd, name, TCP_V4_FLOW, &options)) {
- if (!set_rss_hash(fd, name, TCP_V4_FLOW, options)) {
- hash_proto->proto.ipv4_tcp = 1;
- rss_supported++;
- }
- }
- if (!get_rss_hash_options(fd, name, UDP_V4_FLOW, &options)) {
- if (!set_rss_hash(fd, name, UDP_V4_FLOW, options)) {
- hash_proto->proto.ipv4_udp = 1;
- rss_supported++;
- }
- }
- if (!get_rss_hash_options(fd, name, IPV6_FLOW, &options)) {
- if (!set_rss_hash(fd, name, IPV6_FLOW, options)) {
- hash_proto->proto.ipv6 = 1;
- rss_supported++;
- }
- }
- if (!get_rss_hash_options(fd, name, TCP_V6_FLOW, &options)) {
- if (!set_rss_hash(fd, name, TCP_V6_FLOW, options)) {
- hash_proto->proto.ipv6_tcp = 1;
- rss_supported++;
- }
- }
- if (!get_rss_hash_options(fd, name, UDP_V6_FLOW, &options)) {
- if (!set_rss_hash(fd, name, UDP_V6_FLOW, options)) {
- hash_proto->proto.ipv6_udp = 1;
- rss_supported++;
- }
- }
- return rss_supported;
-}
-
-void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto)
-{ int max_len = 512;
- char str[max_len];
- int len = 0;
- int n = max_len - 1;
-
- len += snprintf(&str[len], n - len, " rss conf\n");
-
- if (hash_proto->proto.ipv4)
- len += snprintf(&str[len], n - len,
- " IPV4\n");
- if (hash_proto->proto.ipv4_tcp)
- len += snprintf(&str[len], n - len,
- " IPV4 TCP\n");
- if (hash_proto->proto.ipv4_udp)
- len += snprintf(&str[len], n - len,
- " IPV4 UDP\n");
- if (hash_proto->proto.ipv6)
- len += snprintf(&str[len], n - len,
- " IPV6\n");
- if (hash_proto->proto.ipv6_tcp)
- len += snprintf(&str[len], n - len,
- " IPV6 TCP\n");
- if (hash_proto->proto.ipv6_udp)
- len += snprintf(&str[len], n - len,
- " IPV6 UDP\n");
- str[len] = '\0';
-
- ODP_PRINT("%s\n", str);
-}
-
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_close(pktio_entry_t *pktio_entry)
{
- pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+
if (pkt_sock->sockfd != -1 && close(pkt_sock->sockfd) != 0) {
- __odp_errno = errno;
- ODP_ERR("close(sockfd): %s\n", strerror(errno));
+ _ODP_ERR("close(sockfd): %s\n", strerror(errno));
return -1;
}
- odp_shm_free(pkt_sock->shm);
-
return 0;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
odp_pool_t pool)
{
int sockfd;
int err;
- int i;
unsigned int if_idx;
struct ifreq ethreq;
struct sockaddr_ll sa_ll;
char shm_name[ODP_SHM_NAME_LEN];
- pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
- uint8_t *addr;
- odp_pktio_stats_t cur_stats;
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
/* Init pktio entry */
memset(pkt_sock, 0, sizeof(*pkt_sock));
@@ -494,22 +124,9 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
snprintf(shm_name, ODP_SHM_NAME_LEN, "%s-%s", "pktio", netdev);
shm_name[ODP_SHM_NAME_LEN - 1] = '\0';
- pkt_sock->shm = odp_shm_reserve(shm_name, PACKET_JUMBO_LEN,
- PACKET_JUMBO_LEN *
- ODP_PACKET_SOCKET_MAX_BURST_RX, 0);
- if (pkt_sock->shm == ODP_SHM_INVALID)
- return -1;
-
- addr = odp_shm_addr(pkt_sock->shm);
- for (i = 0; i < ODP_PACKET_SOCKET_MAX_BURST_RX; i++) {
- pkt_sock->cache_ptr[i] = addr;
- addr += PACKET_JUMBO_LEN;
- }
-
sockfd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (sockfd == -1) {
- __odp_errno = errno;
- ODP_ERR("socket(): %s\n", strerror(errno));
+ _ODP_ERR("socket(): %s\n", strerror(errno));
goto error;
}
pkt_sock->sockfd = sockfd;
@@ -519,20 +136,21 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
snprintf(ethreq.ifr_name, IF_NAMESIZE, "%s", netdev);
err = ioctl(sockfd, SIOCGIFINDEX, &ethreq);
if (err != 0) {
- __odp_errno = errno;
- ODP_ERR("ioctl(SIOCGIFINDEX): %s: \"%s\".\n", strerror(errno),
- ethreq.ifr_name);
+ _ODP_ERR("ioctl(SIOCGIFINDEX): %s: \"%s\".\n", strerror(errno), ethreq.ifr_name);
goto error;
}
if_idx = ethreq.ifr_ifindex;
- err = mac_addr_get_fd(sockfd, netdev, pkt_sock->if_mac);
+ err = _odp_mac_addr_get_fd(sockfd, netdev, pkt_sock->if_mac);
if (err != 0)
goto error;
- pkt_sock->mtu = mtu_get_fd(sockfd, netdev);
+ pkt_sock->mtu = _odp_mtu_get_fd(sockfd, netdev);
if (!pkt_sock->mtu)
goto error;
+ pkt_sock->mtu_max = _ODP_SOCKET_MTU_MAX;
+ if (pkt_sock->mtu > pkt_sock->mtu_max)
+ pkt_sock->mtu_max = pkt_sock->mtu;
/* bind socket to if */
memset(&sa_ll, 0, sizeof(sa_ll));
@@ -540,31 +158,22 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
sa_ll.sll_ifindex = if_idx;
sa_ll.sll_protocol = htons(ETH_P_ALL);
if (bind(sockfd, (struct sockaddr *)&sa_ll, sizeof(sa_ll)) < 0) {
- __odp_errno = errno;
- ODP_ERR("bind(to IF): %s\n", strerror(errno));
+ _ODP_ERR("bind(to IF): %s\n", strerror(errno));
goto error;
}
- err = ethtool_stats_get_fd(pktio_entry->s.pkt_sock.sockfd,
- pktio_entry->s.name,
- &cur_stats);
- if (err != 0) {
- err = sysfs_stats(pktio_entry, &cur_stats);
- if (err != 0) {
- pktio_entry->s.stats_type = STATS_UNSUPPORTED;
- ODP_DBG("pktio: %s unsupported stats\n",
- pktio_entry->s.name);
- } else {
- pktio_entry->s.stats_type = STATS_SYSFS;
- }
- } else {
- pktio_entry->s.stats_type = STATS_ETHTOOL;
- }
+ pktio_entry->stats_type = _odp_sock_stats_type_fd(pktio_entry,
+ pkt_sock->sockfd);
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED)
+ _ODP_DBG("pktio: %s unsupported stats\n", pktio_entry->name);
err = sock_stats_reset(pktio_entry);
if (err != 0)
goto error;
+ odp_ticketlock_init(&pkt_sock->rx_lock);
+ odp_ticketlock_init(&pkt_sock->tx_lock);
+
return 0;
error:
@@ -573,9 +182,6 @@ error:
return -1;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_mmsg_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
const char *devname, odp_pool_t pool)
@@ -585,241 +191,310 @@ static int sock_mmsg_open(odp_pktio_t id ODP_UNUSED,
return sock_setup_pkt(pktio_entry, devname, pool);
}
-static uint32_t _rx_pkt_to_iovec(odp_packet_t pkt,
- struct iovec iovecs[MAX_SEGS])
+static inline uint32_t _rx_pkt_to_iovec(odp_packet_t pkt, struct iovec *iovecs)
{
- odp_packet_seg_t seg = odp_packet_first_seg(pkt);
+ odp_packet_seg_t seg;
uint32_t seg_count = odp_packet_num_segs(pkt);
- uint32_t seg_id = 0;
- uint32_t iov_count = 0;
- uint8_t *ptr;
- uint32_t seglen;
-
- for (seg_id = 0; seg_id < seg_count; ++seg_id) {
- ptr = odp_packet_seg_data(pkt, seg);
- seglen = odp_packet_seg_data_len(pkt, seg);
-
- if (ptr) {
- iovecs[iov_count].iov_base = ptr;
- iovecs[iov_count].iov_len = seglen;
- iov_count++;
- }
- seg = odp_packet_next_seg(pkt, seg);
+ uint32_t i;
+
+ if (odp_likely(seg_count == 1)) {
+ iovecs[0].iov_base = odp_packet_data(pkt);
+ iovecs[0].iov_len = odp_packet_len(pkt);
+ return 1;
}
- return iov_count;
+ seg = odp_packet_first_seg(pkt);
+
+ for (i = 0; i < seg_count; i++) {
+ iovecs[i].iov_base = odp_packet_seg_data(pkt, seg);
+ iovecs[i].iov_len = odp_packet_seg_data_len(pkt, seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+ return i;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkt_table[], int len)
+ odp_packet_t pkt_table[], int num)
{
- pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+ odp_pool_t pool = pkt_sock->pool;
odp_time_t ts_val;
odp_time_t *ts = NULL;
const int sockfd = pkt_sock->sockfd;
- int msgvec_len;
- struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_RX];
+ struct mmsghdr msgvec[num];
+ struct iovec iovecs[num][PKT_MAX_SEGS];
int nb_rx = 0;
+ int nb_cls = 0;
+ int nb_pkts;
int recv_msgs;
- uint8_t **recv_cache;
int i;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ uint16_t frame_offset = pktio_entry->pktin_frame_offset;
+ uint32_t alloc_len = pkt_sock->mtu + frame_offset;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
- if (odp_unlikely(len > ODP_PACKET_SOCKET_MAX_BURST_RX))
- return -1;
+ memset(msgvec, 0, sizeof(msgvec));
- odp_ticketlock_lock(&pktio_entry->s.rxl);
+ nb_pkts = _odp_packet_alloc_multi(pool, alloc_len, pkt_table, num);
+ for (i = 0; i < nb_pkts; i++) {
+ if (frame_offset)
+ pull_head(packet_hdr(pkt_table[i]), frame_offset);
+ msgvec[i].msg_hdr.msg_iovlen =
+ _rx_pkt_to_iovec(pkt_table[i], iovecs[i]);
+ msgvec[i].msg_hdr.msg_iov = iovecs[i];
+ }
+
+ odp_ticketlock_lock(&pkt_sock->rx_lock);
+ recv_msgs = recvmmsg(sockfd, msgvec, nb_pkts, MSG_DONTWAIT, NULL);
+ odp_ticketlock_unlock(&pkt_sock->rx_lock);
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp)
+ if (opt.bit.ts_all || opt.bit.ts_ptp) {
+ ts_val = odp_time_global();
ts = &ts_val;
+ }
- memset(msgvec, 0, sizeof(msgvec));
- recv_cache = pkt_sock->cache_ptr;
+ for (i = 0; i < recv_msgs; i++) {
+ void *base = msgvec[i].msg_hdr.msg_iov->iov_base;
+ struct ethhdr *eth_hdr = base;
+ odp_packet_t pkt = pkt_table[i];
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint16_t pkt_len = msgvec[i].msg_len;
+ int ret;
- if (pktio_cls_enabled(pktio_entry)) {
- struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX];
+ if (odp_unlikely(msgvec[i].msg_hdr.msg_flags & MSG_TRUNC)) {
+ odp_packet_free(pkt);
+ _ODP_DBG("dropped truncated packet\n");
+ continue;
+ }
- for (i = 0; i < (int)len; i++) {
- msgvec[i].msg_hdr.msg_iovlen = 1;
- iovecs[i].iov_base = recv_cache[i];
- iovecs[i].iov_len = PACKET_JUMBO_LEN;
- msgvec[i].msg_hdr.msg_iov = &iovecs[i];
+ ret = odp_packet_trunc_tail(&pkt, odp_packet_len(pkt) - pkt_len,
+ NULL, NULL);
+ if (ret < 0) {
+ _ODP_ERR("trunc_tail failed");
+ odp_packet_free(pkt);
+ continue;
}
- msgvec_len = i;
-
- recv_msgs = recvmmsg(sockfd, msgvec, msgvec_len,
- MSG_DONTWAIT, NULL);
-
- if (ts != NULL)
- ts_val = odp_time_global();
-
- for (i = 0; i < recv_msgs; i++) {
- odp_packet_hdr_t *pkt_hdr;
- odp_packet_t pkt;
- odp_pool_t pool = pkt_sock->pool;
- odp_packet_hdr_t parsed_hdr;
- void *base = msgvec[i].msg_hdr.msg_iov->iov_base;
- struct ethhdr *eth_hdr = base;
- uint16_t pkt_len = msgvec[i].msg_len;
- int num;
-
- /* Don't receive packets sent by ourselves */
- if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
- eth_hdr->h_source)))
- continue;
- if (cls_classify_packet(pktio_entry, base, pkt_len,
- pkt_len, &pool, &parsed_hdr))
- continue;
+ if (layer) {
+ uint8_t buf[PARSE_BYTES];
+ uint16_t seg_len = msgvec[i].msg_hdr.msg_iov->iov_len;
- num = packet_alloc_multi(pool, pkt_len, &pkt, 1);
- if (num != 1)
- continue;
+ /* Make sure there is enough data for the packet
+ * parser in the case of a segmented packet. */
+ if (odp_unlikely(seg_len < PARSE_BYTES && pkt_len > seg_len)) {
+ seg_len = _ODP_MIN(pkt_len, PARSE_BYTES);
+ odp_packet_copy_to_mem(pkt, 0, seg_len, buf);
+ base = buf;
+ }
- pkt_hdr = odp_packet_hdr(pkt);
+ ret = _odp_packet_parse_common(pkt_hdr, base, pkt_len,
+ seg_len, layer, opt);
+ if (ret)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
- if (odp_packet_copy_from_mem(pkt, 0, pkt_len,
- base) != 0) {
+ if (ret < 0) {
odp_packet_free(pkt);
continue;
}
- pkt_hdr->input = pktio_entry->s.handle;
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
- packet_set_ts(pkt_hdr, ts);
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, base,
+ &new_pool, pkt_hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ if (ret) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &pkt_hdr, new_pool))) {
+ odp_packet_free(pkt);
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ continue;
+ }
+ }
+ }
+
+ /* Don't receive packets sent by ourselves */
+ if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
+ eth_hdr->h_source))) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ pkt_hdr->input = pktio_entry->handle;
+ packet_set_ts(pkt_hdr, ts);
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == recv_msgs));
+ } else {
pkt_table[nb_rx++] = pkt;
}
- } else {
- struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_RX]
- [MAX_SEGS];
+ }
- for (i = 0; i < (int)len; i++) {
- int num;
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
- num = packet_alloc_multi(pkt_sock->pool, pkt_sock->mtu,
- &pkt_table[i], 1);
+ /* Free unused pkt buffers */
+ if (i < nb_pkts)
+ odp_packet_free_multi(&pkt_table[i], nb_pkts - i);
- if (odp_unlikely(num != 1)) {
- pkt_table[i] = ODP_PACKET_INVALID;
- break;
- }
+ return nb_rx;
+}
- msgvec[i].msg_hdr.msg_iovlen =
- _rx_pkt_to_iovec(pkt_table[i], iovecs[i]);
+static int sock_fd_set(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
+ fd_set *readfds)
+{
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+ const int sockfd = pkt_sock->sockfd;
- msgvec[i].msg_hdr.msg_iov = iovecs[i];
- }
+ FD_SET(sockfd, readfds);
+ return sockfd;
+}
- /* number of successfully allocated pkt buffers */
- msgvec_len = i;
+static int sock_recv_tmo(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num, uint64_t usecs)
+{
+ struct timeval timeout;
+ int ret;
+ int maxfd;
+ fd_set readfds;
- recv_msgs = recvmmsg(sockfd, msgvec, msgvec_len,
- MSG_DONTWAIT, NULL);
+ ret = sock_mmsg_recv(pktio_entry, index, pkt_table, num);
+ if (ret != 0)
+ return ret;
- if (ts != NULL)
- ts_val = odp_time_global();
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
- for (i = 0; i < recv_msgs; i++) {
- void *base = msgvec[i].msg_hdr.msg_iov->iov_base;
- struct ethhdr *eth_hdr = base;
- odp_packet_hdr_t *pkt_hdr;
- odp_packet_t pkt;
- int ret;
+ FD_ZERO(&readfds);
+ maxfd = sock_fd_set(pktio_entry, index, &readfds);
- pkt = pkt_table[i];
+ while (1) {
+ ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+ if (ret <= 0)
+ return 0;
- /* Don't receive packets sent by ourselves */
- if (odp_unlikely(ethaddrs_equal(pkt_sock->if_mac,
- eth_hdr->h_source))) {
- odp_packet_free(pkt);
- continue;
- }
+ ret = sock_mmsg_recv(pktio_entry, index, pkt_table, num);
+ if (odp_likely(ret))
+ return ret;
- /* Parse and set packet header data */
- ret = odp_packet_trunc_tail(&pkt, odp_packet_len(pkt) -
- msgvec[i].msg_len,
- NULL, NULL);
- if (ret < 0) {
- ODP_ERR("trunk_tail failed");
- odp_packet_free(pkt);
- continue;
- }
+ /* If no packets, continue wait until timeout expires */
+ }
+}
- pkt_hdr = odp_packet_hdr(pkt);
- packet_parse_l2(&pkt_hdr->p, pkt_hdr->frame_len);
- packet_set_ts(pkt_hdr, ts);
- pkt_hdr->input = pktio_entry->s.handle;
+static int sock_recv_mq_tmo(pktio_entry_t *pktio_entry[], int index[],
+ uint32_t num_q, odp_packet_t pkt_table[], int num,
+ uint32_t *from, uint64_t usecs)
+{
+ struct timeval timeout;
+ uint32_t i;
+ int ret;
+ int maxfd = -1, maxfd2;
+ fd_set readfds;
- pkt_table[nb_rx] = pkt;
- nb_rx++;
- }
+ for (i = 0; i < num_q; i++) {
+ ret = sock_mmsg_recv(pktio_entry[i], index[i], pkt_table, num);
+
+ if (ret > 0 && from)
+ *from = i;
- /* Free unused pkt buffers */
- for (; i < msgvec_len; i++)
- odp_packet_free(pkt_table[i]);
+ if (ret != 0)
+ return ret;
}
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ FD_ZERO(&readfds);
- return nb_rx;
+ for (i = 0; i < num_q; i++) {
+ maxfd2 = sock_fd_set(pktio_entry[i], index[i], &readfds);
+ if (maxfd2 > maxfd)
+ maxfd = maxfd2;
+ }
+
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+
+ while (1) {
+ ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+ if (ret <= 0)
+ return ret;
+
+ for (i = 0; i < num_q; i++) {
+ ret = sock_mmsg_recv(pktio_entry[i], index[i],
+ pkt_table, num);
+
+ if (ret > 0 && from)
+ *from = i;
+
+ if (ret)
+ return ret;
+ }
+
+ /* If no packets, continue wait until timeout expires */
+ }
}
-static uint32_t _tx_pkt_to_iovec(odp_packet_t pkt,
- struct iovec iovecs[MAX_SEGS])
+static inline uint32_t _tx_pkt_to_iovec(odp_packet_t pkt, struct iovec *iovecs)
{
- uint32_t pkt_len = odp_packet_len(pkt);
- uint32_t offset = odp_packet_l2_offset(pkt);
- uint32_t iov_count = 0;
-
- while (offset < pkt_len) {
- uint32_t seglen;
-
- iovecs[iov_count].iov_base = odp_packet_offset(pkt, offset,
- &seglen, NULL);
- iovecs[iov_count].iov_len = seglen;
- iov_count++;
- offset += seglen;
+ odp_packet_seg_t seg;
+ int seg_count = odp_packet_num_segs(pkt);
+ int i;
+
+ if (odp_likely(seg_count == 1)) {
+ iovecs[0].iov_base = odp_packet_data(pkt);
+ iovecs[0].iov_len = odp_packet_len(pkt);
+ return 1;
}
- return iov_count;
+
+ seg = odp_packet_first_seg(pkt);
+ for (i = 0; i < seg_count; i++) {
+ iovecs[i].iov_base = odp_packet_seg_data(pkt, seg);
+ iovecs[i].iov_len = odp_packet_seg_data_len(pkt, seg);
+ seg = odp_packet_next_seg(pkt, seg);
+ }
+ return i;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_mmsg_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkt_table[], int len)
+ const odp_packet_t pkt_table[], int num)
{
- pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
- struct mmsghdr msgvec[ODP_PACKET_SOCKET_MAX_BURST_TX];
- struct iovec iovecs[ODP_PACKET_SOCKET_MAX_BURST_TX][MAX_SEGS];
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+ struct mmsghdr msgvec[num];
+ struct iovec iovecs[num][PKT_MAX_SEGS];
int ret;
- int sockfd;
- int n, i;
-
- if (odp_unlikely(len > ODP_PACKET_SOCKET_MAX_BURST_TX))
- return -1;
-
- odp_ticketlock_lock(&pktio_entry->s.txl);
+ int sockfd = pkt_sock->sockfd;
+ int i;
+ int tx_ts_idx = 0;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
- sockfd = pkt_sock->sockfd;
memset(msgvec, 0, sizeof(msgvec));
- for (i = 0; i < len; i++) {
+ for (i = 0; i < num; i++) {
msgvec[i].msg_hdr.msg_iov = iovecs[i];
msgvec[i].msg_hdr.msg_iovlen = _tx_pkt_to_iovec(pkt_table[i],
- iovecs[i]);
+ iovecs[i]);
+ if (tx_ts_enabled && tx_ts_idx == 0) {
+ if (odp_unlikely(packet_hdr(pkt_table[i])->p.flags.ts_set))
+ tx_ts_idx = i + 1;
+ }
}
- for (i = 0; i < len; ) {
- ret = sendmmsg(sockfd, &msgvec[i], len - i, MSG_DONTWAIT);
+ odp_ticketlock_lock(&pkt_sock->tx_lock);
+
+ for (i = 0; i < num; ) {
+ ret = sendmmsg(sockfd, &msgvec[i], num - i, MSG_DONTWAIT);
if (odp_unlikely(ret <= -1)) {
if (i == 0 && SOCK_ERR_REPORT(errno)) {
- __odp_errno = errno;
- ODP_ERR("sendmmsg(): %s\n", strerror(errno));
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ _ODP_ERR("sendmmsg(): %s\n", strerror(errno));
+ odp_ticketlock_unlock(&pkt_sock->tx_lock);
return -1;
}
break;
@@ -828,111 +503,148 @@ static int sock_mmsg_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
i += ret;
}
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ if (odp_unlikely(tx_ts_idx && i >= tx_ts_idx))
+ _odp_pktio_tx_ts_set(pktio_entry);
- for (n = 0; n < i; ++n)
- odp_packet_free(pkt_table[n]);
+ odp_ticketlock_unlock(&pkt_sock->tx_lock);
+
+ odp_packet_free_multi(pkt_table, i);
return i;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static uint32_t sock_mtu_get(pktio_entry_t *pktio_entry)
{
- return pktio_entry->s.pkt_sock.mtu;
+ return pkt_priv(pktio_entry)->mtu;
+}
+
+static int sock_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+ int ret;
+
+ ret = _odp_mtu_set_fd(pkt_sock->sockfd, pktio_entry->name, maxlen_input);
+ if (ret)
+ return ret;
+
+ pkt_sock->mtu = maxlen_input;
+
+ return 0;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_mac_addr_get(pktio_entry_t *pktio_entry,
void *mac_addr)
{
- memcpy(mac_addr, pktio_entry->s.pkt_sock.if_mac, ETH_ALEN);
+ memcpy(mac_addr, pkt_priv(pktio_entry)->if_mac, ETH_ALEN);
return ETH_ALEN;
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pktio_entry->s.pkt_sock.sockfd,
- pktio_entry->s.name, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name, enable);
}
-/*
- * ODP_PACKET_SOCKET_MMSG:
- */
static int sock_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pktio_entry->s.pkt_sock.sockfd,
- pktio_entry->s.name);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name);
}
static int sock_link_status(pktio_entry_t *pktio_entry)
{
- return link_status_fd(pktio_entry->s.pkt_sock.sockfd,
- pktio_entry->s.name);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name);
+}
+
+static int sock_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->name, info);
}
-static int sock_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
+static int sock_capability(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
+ pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
+
memset(capa, 0, sizeof(odp_pktio_capability_t));
capa->max_input_queues = 1;
capa->max_output_queues = 1;
capa->set_op.op.promisc_mode = 1;
+ capa->set_op.op.maxlen = 1;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_input = pkt_sock->mtu_max;
+ capa->maxlen.min_output = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_output = pkt_sock->mtu_max;
odp_pktio_config_init(&capa->config);
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+
+ /* Fill statistics capabilities */
+ _odp_sock_stats_capa(pktio_entry, capa);
+
return 0;
}
static int sock_stats(pktio_entry_t *pktio_entry,
odp_pktio_stats_t *stats)
{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(stats, 0, sizeof(*stats));
- return 0;
- }
-
- return sock_stats_fd(pktio_entry,
- stats,
- pktio_entry->s.pkt_sock.sockfd);
+ return _odp_sock_stats_fd(pktio_entry, stats, pkt_priv(pktio_entry)->sockfd);
}
static int sock_stats_reset(pktio_entry_t *pktio_entry)
{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(&pktio_entry->s.stats, 0,
- sizeof(odp_pktio_stats_t));
- return 0;
- }
+ return _odp_sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[],
+ int num)
+{
+ return _odp_sock_extra_stat_info(pktio_entry, info, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
- return sock_stats_reset_fd(pktio_entry,
- pktio_entry->s.pkt_sock.sockfd);
+static int sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ return _odp_sock_extra_stats(pktio_entry, stats, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ return _odp_sock_extra_stat_counter(pktio_entry, id, stat,
+ pkt_priv(pktio_entry)->sockfd);
}
static int sock_init_global(void)
{
if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMSG")) {
- ODP_PRINT("PKTIO: socket mmsg skipped,"
+ _ODP_PRINT("PKTIO: socket mmsg skipped,"
" enabled export ODP_PKTIO_DISABLE_SOCKET_MMSG=1.\n");
disable_pktio = 1;
} else {
- ODP_PRINT("PKTIO: initialized socket mmsg,"
- "use export ODP_PKTIO_DISABLE_SOCKET_MMSG=1 to disable.\n");
+ _ODP_PRINT("PKTIO: initialized socket mmsg,"
+ " use export ODP_PKTIO_DISABLE_SOCKET_MMSG=1 to disable.\n");
}
return 0;
}
-const pktio_if_ops_t sock_mmsg_pktio_ops = {
+const pktio_if_ops_t _odp_sock_mmsg_pktio_ops = {
.name = "socket",
.print = NULL,
.init_global = sock_init_global,
@@ -944,16 +656,26 @@ const pktio_if_ops_t sock_mmsg_pktio_ops = {
.stop = NULL,
.stats = sock_stats,
.stats_reset = sock_stats_reset,
+ .extra_stat_info = sock_extra_stat_info,
+ .extra_stats = sock_extra_stats,
+ .extra_stat_counter = sock_extra_stat_counter,
.recv = sock_mmsg_recv,
+ .recv_tmo = sock_recv_tmo,
+ .recv_mq_tmo = sock_recv_mq_tmo,
+ .fd_set = sock_fd_set,
.send = sock_mmsg_send,
- .mtu_get = sock_mtu_get,
+ .maxlen_get = sock_mtu_get,
+ .maxlen_set = sock_mtu_set,
.promisc_mode_set = sock_promisc_mode_set,
.promisc_mode_get = sock_promisc_mode_get,
.mac_get = sock_mac_addr_get,
+ .mac_set = NULL,
.link_status = sock_link_status,
+ .link_info = sock_link_info,
.capability = sock_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL,
.input_queues_config = NULL,
.output_queues_config = NULL,
diff --git a/platform/linux-generic/pktio/socket_common.c b/platform/linux-generic/pktio/socket_common.c
new file mode 100644
index 000000000..dabe86aa2
--- /dev/null
+++ b/platform/linux-generic/pktio/socket_common.c
@@ -0,0 +1,297 @@
+/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2019-2020, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <linux/ethtool.h>
+#include <linux/if_packet.h>
+#include <linux/sockios.h>
+#include <errno.h>
+#include <odp_debug_internal.h>
+#include <odp_socket_common.h>
+#include <protocols/eth.h>
+
+/* Fallback for old kernels (< v4.6) */
+#ifndef ETHTOOL_GLINKSETTINGS
+#define ETHTOOL_GLINKSETTINGS 0x0000004c
+
+struct ethtool_link_settings {
+ uint32_t cmd;
+ uint32_t speed;
+ uint8_t duplex;
+ uint8_t port;
+ uint8_t phy_address;
+ uint8_t autoneg;
+ uint8_t mdio_support;
+ uint8_t eth_tp_mdix;
+ uint8_t eth_tp_mdix_ctrl;
+ int8_t link_mode_masks_nwords;
+ uint32_t reserved[8];
+ uint32_t link_mode_masks[0];
+};
+#endif
+
+/**
+ * ODP_PACKET_SOCKET_MMSG:
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int _odp_mac_addr_get_fd(int fd, const char *name, unsigned char mac_dst[])
+{
+ struct ifreq ethreq;
+ int ret;
+
+ memset(&ethreq, 0, sizeof(ethreq));
+ snprintf(ethreq.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFHWADDR, &ethreq);
+ if (ret != 0) {
+ _ODP_ERR("ioctl(SIOCGIFHWADDR): %s: \"%s\".\n", strerror(errno), ethreq.ifr_name);
+ return -1;
+ }
+
+ memcpy(mac_dst, (unsigned char *)ethreq.ifr_ifru.ifru_hwaddr.sa_data,
+ ETH_ALEN);
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMSG:
+ * ODP_PACKET_SOCKET_MMAP:
+ * ODP_PACKET_TAP:
+ */
+uint32_t _odp_mtu_get_fd(int fd, const char *name)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFMTU, &ifr);
+ if (ret < 0) {
+ _ODP_ERR("ioctl(SIOCGIFMTU): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return 0;
+ }
+ return ifr.ifr_mtu + _ODP_ETHHDR_LEN;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMAP:
+ * ODP_PACKET_SOCKET_MMSG:
+ * ODP_PACKET_TAP:
+ */
+int _odp_mtu_set_fd(int fd, const char *name, int mtu)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ifr.ifr_mtu = mtu - _ODP_ETHHDR_LEN;
+
+ ret = ioctl(fd, SIOCSIFMTU, &ifr);
+ if (ret < 0) {
+ _ODP_ERR("ioctl(SIOCSIFMTU): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMSG:
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int _odp_promisc_mode_set_fd(int fd, const char *name, int enable)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
+ if (ret < 0) {
+ _ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+
+ if (enable)
+ ifr.ifr_flags |= IFF_PROMISC;
+ else
+ ifr.ifr_flags &= ~(IFF_PROMISC);
+
+ ret = ioctl(fd, SIOCSIFFLAGS, &ifr);
+ if (ret < 0) {
+ _ODP_DBG("ioctl(SIOCSIFFLAGS): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * ODP_PACKET_SOCKET_MMSG:
+ * ODP_PACKET_SOCKET_MMAP:
+ */
+int _odp_promisc_mode_get_fd(int fd, const char *name)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
+ if (ret < 0) {
+ _ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+
+ return !!(ifr.ifr_flags & IFF_PROMISC);
+}
+
+int _odp_link_status_fd(int fd, const char *name)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
+ if (ret < 0) {
+ _ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return ODP_PKTIO_LINK_STATUS_UNKNOWN;
+ }
+
+ if (ifr.ifr_flags & IFF_RUNNING)
+ return ODP_PKTIO_LINK_STATUS_UP;
+ return ODP_PKTIO_LINK_STATUS_DOWN;
+}
+
+int _odp_link_info_fd(int fd, const char *name, odp_pktio_link_info_t *info)
+{
+ struct ethtool_link_settings hcmd = {.cmd = ETHTOOL_GLINKSETTINGS};
+ struct ethtool_link_settings *ecmd;
+ struct ethtool_pauseparam pcmd = {.cmd = ETHTOOL_GPAUSEPARAM};
+ struct ifreq ifr;
+ int status;
+
+ status = _odp_link_status_fd(fd, name);
+ if (status < 0)
+ return -1;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ /* Link pause status */
+ ifr.ifr_data = (void *)&pcmd;
+ if (ioctl(fd, SIOCETHTOOL, &ifr) && errno != EOPNOTSUPP) {
+ _ODP_ERR("ioctl(SIOCETHTOOL): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+
+ /* Try to perform handshake and fall back to old API if failed */
+ ifr.ifr_data = (void *)&hcmd;
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) {
+ struct ethtool_cmd ecmd_old = {.cmd = ETHTOOL_GSET};
+
+ ifr.ifr_data = (void *)&ecmd_old;
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) {
+ _ODP_ERR("ioctl(SIOCETHTOOL): %s: \"%s\".\n", strerror(errno),
+ ifr.ifr_name);
+ return -1;
+ }
+
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+ info->speed = ethtool_cmd_speed(&ecmd_old);
+ if (info->speed == (uint32_t)SPEED_UNKNOWN)
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+
+ if (ecmd_old.autoneg == AUTONEG_ENABLE)
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_ON;
+ else if (ecmd_old.autoneg == AUTONEG_DISABLE)
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ else
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_UNKNOWN;
+
+ if (ecmd_old.duplex == DUPLEX_HALF)
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_HALF;
+ else if (ecmd_old.duplex == DUPLEX_FULL)
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ else
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_UNKNOWN;
+
+ info->pause_rx = pcmd.rx_pause ? ODP_PKTIO_LINK_PAUSE_ON : ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = pcmd.tx_pause ? ODP_PKTIO_LINK_PAUSE_ON : ODP_PKTIO_LINK_PAUSE_OFF;
+
+ if (ecmd_old.port == PORT_TP)
+ info->media = "copper";
+ else if (ecmd_old.port == PORT_FIBRE)
+ info->media = "fiber";
+ else if (ecmd_old.port == PORT_OTHER)
+ info->media = "other";
+ else
+ info->media = "unknown";
+
+ info->status = status;
+
+ return 0;
+ }
+
+ if (hcmd.link_mode_masks_nwords >= 0 || hcmd.cmd != ETHTOOL_GLINKSETTINGS) {
+ _ODP_ERR("ETHTOOL_GLINKSETTINGS handshake failed\n");
+ return -1;
+ }
+ /* Absolute value indicates kernel recommended 'link_mode_masks_nwords' value. */
+ hcmd.link_mode_masks_nwords = -hcmd.link_mode_masks_nwords;
+
+ /* Reserve space for the three bitmasks (map_supported, map_advertising, map_lp_advertising)
+ * at the end of struct ethtool_link_settings. 'link_mode_masks_nwords' defines the bitmask
+ * length in 32-bit words. */
+ uint8_t data[offsetof(struct ethtool_link_settings, link_mode_masks) +
+ (3 * sizeof(uint32_t) * hcmd.link_mode_masks_nwords)] ODP_ALIGNED_CACHE;
+
+ ecmd = (void *)data;
+ *ecmd = hcmd;
+ ifr.ifr_data = (void *)ecmd;
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0) {
+ _ODP_ERR("ioctl(SIOCETHTOOL): %s: \"%s\".\n", strerror(errno), ifr.ifr_name);
+ return -1;
+ }
+
+ memset(info, 0, sizeof(odp_pktio_link_info_t));
+ if (ecmd->speed == (uint32_t)SPEED_UNKNOWN)
+ info->speed = ODP_PKTIO_LINK_SPEED_UNKNOWN;
+ else
+ info->speed = ecmd->speed;
+
+ if (ecmd->autoneg == AUTONEG_ENABLE)
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_ON;
+ else if (ecmd->autoneg == AUTONEG_DISABLE)
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_OFF;
+ else
+ info->autoneg = ODP_PKTIO_LINK_AUTONEG_UNKNOWN;
+
+ if (ecmd->duplex == DUPLEX_HALF)
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_HALF;
+ else if (ecmd->duplex == DUPLEX_FULL)
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_FULL;
+ else
+ info->duplex = ODP_PKTIO_LINK_DUPLEX_UNKNOWN;
+
+ info->pause_rx = pcmd.rx_pause ? ODP_PKTIO_LINK_PAUSE_ON : ODP_PKTIO_LINK_PAUSE_OFF;
+ info->pause_tx = pcmd.tx_pause ? ODP_PKTIO_LINK_PAUSE_ON : ODP_PKTIO_LINK_PAUSE_OFF;
+
+ if (ecmd->port == PORT_TP)
+ info->media = "copper";
+ else if (ecmd->port == PORT_FIBRE)
+ info->media = "fiber";
+ else if (ecmd->port == PORT_OTHER)
+ info->media = "other";
+ else
+ info->media = "unknown";
+
+ info->status = status;
+
+ return 0;
+}
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 666aae6af..92bf8a4bf 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -1,5 +1,5 @@
-/* Copyright (c) 2013, Linaro Limited
- * Copyright (c) 2013, Nokia Solutions and Networks
+/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2013-2023, Nokia Solutions and Networks
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,7 +7,27 @@
#include <odp_posix_extensions.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/packet_inlines.h>
+
+#include <odp_socket_common.h>
+#include <odp_parse_internal.h>
+#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
+#include <odp_packet_io_stats.h>
+#include <odp_debug_internal.h>
+#include <odp_classification_datamodel.h>
+#include <odp_classification_internal.h>
+#include <odp_global_data.h>
+#include <odp_macros_internal.h>
+
+#include <protocols/eth.h>
+#include <protocols/ip.h>
#include <sys/socket.h>
#include <stdio.h>
@@ -21,51 +41,69 @@
#include <poll.h>
#include <sys/ioctl.h>
#include <errno.h>
+#include <time.h>
+#include <linux/if_packet.h>
+
+/* VLAN flags in tpacket2_hdr status */
+#ifdef TP_STATUS_VLAN_TPID_VALID
+#define VLAN_VALID (TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID)
+#else
+#define VLAN_VALID (TP_STATUS_VLAN_VALID)
+#endif
+
+/* Reserve 4MB memory for frames in a RX/TX ring */
+#define FRAME_MEM_SIZE (4 * 1024 * 1024)
+#define BLOCK_SIZE (4 * 1024)
+
+/** packet mmap ring */
+struct ring {
+ odp_ticketlock_t lock;
+ struct iovec *rd;
+ unsigned int frame_num;
+ int rd_num;
+
+ odp_shm_t shm;
+ int sock;
+ int type;
+ int version;
+ uint8_t *mm_space;
+ size_t mm_len;
+ size_t rd_len;
+ int flen;
+
+ struct tpacket_req req;
+};
-#include <odp_api.h>
-#include <odp_packet_socket.h>
-#include <odp_packet_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_debug_internal.h>
-#include <odp_classification_datamodel.h>
-#include <odp_classification_inlines.h>
-#include <odp_classification_internal.h>
-#include <odp/api/hints.h>
-
-#include <protocols/eth.h>
-#include <protocols/ip.h>
-
-static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
-
-static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock,
- int sock_group_idx)
+ODP_STATIC_ASSERT(offsetof(struct ring, mm_space) <= ODP_CACHE_LINE_SIZE,
+ "ERR_STRUCT_RING");
+
+/** Packet socket using mmap rings for both Rx and Tx */
+typedef struct {
+ /** Packet mmap ring for Rx */
+ struct ring rx_ring ODP_ALIGNED_CACHE;
+ /** Packet mmap ring for Tx */
+ struct ring tx_ring ODP_ALIGNED_CACHE;
+
+ int sockfd ODP_ALIGNED_CACHE;
+ odp_pool_t pool;
+ int mtu; /**< maximum transmission unit */
+ uint32_t mtu_max; /**< maximum supported MTU value */
+ size_t frame_offset; /**< frame start offset from start of pkt buf */
+ uint8_t *mmap_base;
+ unsigned int mmap_len;
+ unsigned char if_mac[ETH_ALEN];
+ struct sockaddr_ll ll;
+} pkt_sock_mmap_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_sock_mmap_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_sock_mmap_t *pkt_priv(pktio_entry_t *pktio_entry)
{
- int sockfd = pkt_sock->sockfd;
- int val;
- int err;
- uint16_t fanout_group;
-
- fanout_group = (uint16_t)(sock_group_idx & 0xffff);
- val = (PACKET_FANOUT_HASH << 16) | fanout_group;
-
- err = setsockopt(sockfd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val));
- if (err != 0) {
- __odp_errno = errno;
- ODP_ERR("setsockopt(PACKET_FANOUT): %s\n", strerror(errno));
- return -1;
- }
- return 0;
+ return (pkt_sock_mmap_t *)(uintptr_t)(pktio_entry->pkt_priv);
}
-union frame_map {
- struct {
- struct tpacket2_hdr tp_h ODP_ALIGNED(TPACKET_ALIGNMENT);
- struct sockaddr_ll s_ll
- ODP_ALIGNED(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)));
- } *v2;
-
- void *raw;
-};
+static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
static int mmap_pkt_socket(void)
{
@@ -74,15 +112,13 @@ static int mmap_pkt_socket(void)
int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
if (sock == -1) {
- __odp_errno = errno;
- ODP_ERR("socket(SOCK_RAW): %s\n", strerror(errno));
+ _ODP_ERR("socket(SOCK_RAW): %s\n", strerror(errno));
return -1;
}
ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver));
if (ret == -1) {
- __odp_errno = errno;
- ODP_ERR("setsockopt(PACKET_VERSION): %s\n", strerror(errno));
+ _ODP_ERR("setsockopt(PACKET_VERSION): %s\n", strerror(errno));
close(sock);
return -1;
}
@@ -90,320 +126,379 @@ static int mmap_pkt_socket(void)
return sock;
}
-static inline int mmap_rx_kernel_ready(struct tpacket2_hdr *hdr)
-{
- return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
-}
-
-static inline void mmap_rx_user_ready(struct tpacket2_hdr *hdr)
-{
- hdr->tp_status = TP_STATUS_KERNEL;
- __sync_synchronize();
-}
-
-static inline int mmap_tx_kernel_ready(struct tpacket2_hdr *hdr)
-{
- return !(hdr->tp_status & (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING));
-}
-
-static inline void mmap_tx_user_ready(struct tpacket2_hdr *hdr)
+static inline unsigned next_frame(unsigned cur_frame, unsigned frame_count)
{
- hdr->tp_status = TP_STATUS_SEND_REQUEST;
- __sync_synchronize();
-}
-
-static uint8_t *pkt_mmap_vlan_insert(uint8_t *l2_hdr_ptr,
- uint16_t mac_offset,
- uint16_t vlan_tci,
- int *pkt_len_ptr)
-{
- _odp_ethhdr_t *eth_hdr;
- _odp_vlanhdr_t *vlan_hdr;
- uint8_t *new_l2_ptr;
- int orig_pkt_len;
-
- /* First try to see if the mac_offset is large enough to accommodate
- * shifting the Ethernet header down to open up space for the IEEE
- * 802.1Q vlan header.
- */
- if (_ODP_VLANHDR_LEN < mac_offset) {
- orig_pkt_len = *pkt_len_ptr;
- new_l2_ptr = l2_hdr_ptr - _ODP_VLANHDR_LEN;
- memmove(new_l2_ptr, l2_hdr_ptr, _ODP_ETHHDR_LEN);
-
- eth_hdr = (_odp_ethhdr_t *)new_l2_ptr;
- vlan_hdr = (_odp_vlanhdr_t *)(new_l2_ptr + _ODP_ETHHDR_LEN);
- vlan_hdr->tci = odp_cpu_to_be_16(vlan_tci);
- vlan_hdr->type = eth_hdr->type;
- eth_hdr->type = odp_cpu_to_be_16(_ODP_ETHTYPE_VLAN);
- *pkt_len_ptr = orig_pkt_len + _ODP_VLANHDR_LEN;
- return new_l2_ptr;
- }
-
- return l2_hdr_ptr;
+ return odp_unlikely(cur_frame + 1 >= frame_count) ? 0 : cur_frame + 1;
}
static inline unsigned pkt_mmap_v2_rx(pktio_entry_t *pktio_entry,
pkt_sock_mmap_t *pkt_sock,
- odp_packet_t pkt_table[], unsigned len,
+ odp_packet_t pkt_table[], unsigned num,
unsigned char if_mac[])
{
- union frame_map ppd;
odp_time_t ts_val;
odp_time_t *ts = NULL;
- unsigned frame_num, next_frame_num;
- uint8_t *pkt_buf;
+ unsigned int frame_num, next_frame_num;
+ uint8_t *pkt_buf, *next_ptr;
int pkt_len;
+ uint32_t alloc_len;
struct ethhdr *eth_hdr;
- unsigned i;
- unsigned nb_rx;
+ unsigned int i;
+ unsigned int nb_rx = 0;
+ unsigned int nb_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
struct ring *ring;
- int ret;
+ odp_pool_t pool = pkt_sock->pool;
+ uint16_t frame_offset = pktio_entry->pktin_frame_offset;
+ uint16_t vlan_len = 0;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp)
+ if (opt.bit.ts_all || opt.bit.ts_ptp)
ts = &ts_val;
ring = &pkt_sock->rx_ring;
frame_num = ring->frame_num;
+ next_ptr = ring->rd[frame_num].iov_base;
- for (i = 0, nb_rx = 0; i < len; i++) {
+ for (i = 0; i < num; i++) {
+ struct tpacket2_hdr *tp_hdr;
+ odp_packet_t pkt;
odp_packet_hdr_t *hdr;
- odp_packet_hdr_t parsed_hdr;
- odp_pool_t pool = pkt_sock->pool;
- int num;
+ int ret;
- if (!mmap_rx_kernel_ready(ring->rd[frame_num].iov_base))
+ tp_hdr = (void *)next_ptr;
+
+ if (tp_hdr->tp_status == TP_STATUS_KERNEL)
break;
+ next_frame_num = next_frame(frame_num, ring->rd_num);
+ next_ptr = ring->rd[next_frame_num].iov_base;
+ odp_prefetch(next_ptr);
+ odp_prefetch(next_ptr + ODP_CACHE_LINE_SIZE);
+
if (ts != NULL)
ts_val = odp_time_global();
- ppd.raw = ring->rd[frame_num].iov_base;
- next_frame_num = (frame_num + 1) % ring->rd_num;
+ pkt_buf = (uint8_t *)(void *)tp_hdr + tp_hdr->tp_mac;
+ pkt_len = tp_hdr->tp_snaplen;
+
+ if (odp_unlikely(pkt_len > pkt_sock->mtu)) {
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
+ frame_num = next_frame_num;
+ _ODP_DBG("dropped oversized packet\n");
+ continue;
+ }
- pkt_buf = (uint8_t *)ppd.raw + ppd.v2->tp_h.tp_mac;
- pkt_len = ppd.v2->tp_h.tp_snaplen;
+ /* Check if packet had a VLAN header */
+ if ((tp_hdr->tp_status & VLAN_VALID) == VLAN_VALID)
+ vlan_len = 4;
+
+ alloc_len = pkt_len + frame_offset + vlan_len;
+ ret = _odp_packet_alloc_multi(pool, alloc_len, &pkt, 1);
+
+ if (odp_unlikely(ret != 1)) {
+ /* Stop receiving packets when pool is empty. Leave
+ * the current frame into the ring. */
+ break;
+ }
/* Don't receive packets sent by ourselves */
eth_hdr = (struct ethhdr *)pkt_buf;
if (odp_unlikely(ethaddrs_equal(if_mac,
eth_hdr->h_source))) {
- mmap_rx_user_ready(ppd.raw); /* drop */
+ odp_packet_free(pkt);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
frame_num = next_frame_num;
continue;
}
- if (ppd.v2->tp_h.tp_status & TP_STATUS_VLAN_VALID)
- pkt_buf = pkt_mmap_vlan_insert(pkt_buf,
- ppd.v2->tp_h.tp_mac,
- ppd.v2->tp_h.tp_vlan_tci,
- &pkt_len);
+ hdr = packet_hdr(pkt);
- if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry, pkt_buf, pkt_len,
- pkt_len, &pool, &parsed_hdr)) {
- mmap_rx_user_ready(ppd.raw); /* drop */
- frame_num = next_frame_num;
- continue;
- }
- }
+ if (frame_offset)
+ pull_head(hdr, frame_offset);
- num = packet_alloc_multi(pool, pkt_len, &pkt_table[nb_rx], 1);
+ if (vlan_len)
+ pull_head(hdr, vlan_len);
- if (odp_unlikely(num != 1)) {
- pkt_table[nb_rx] = ODP_PACKET_INVALID;
- mmap_rx_user_ready(ppd.raw); /* drop */
- frame_num = next_frame_num;
- continue;
- }
- hdr = odp_packet_hdr(pkt_table[nb_rx]);
- ret = odp_packet_copy_from_mem(pkt_table[nb_rx], 0,
- pkt_len, pkt_buf);
+ ret = odp_packet_copy_from_mem(pkt, 0, pkt_len, pkt_buf);
if (ret != 0) {
- odp_packet_free(pkt_table[nb_rx]);
- mmap_rx_user_ready(ppd.raw); /* drop */
+ odp_packet_free(pkt);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
frame_num = next_frame_num;
continue;
}
- hdr->input = pktio_entry->s.handle;
- if (pktio_cls_enabled(pktio_entry))
- copy_packet_cls_metadata(&parsed_hdr, hdr);
- else
- packet_parse_l2(&hdr->p, pkt_len);
+ if (vlan_len) {
+ /* Recreate VLAN header. Move MAC addresses and
+ * insert a VLAN header in between source MAC address
+ * and Ethernet type. */
+ uint8_t *mac;
+ uint16_t *type, *tci;
+
+ push_head(hdr, vlan_len);
+ mac = packet_data(hdr);
+ memmove(mac, mac + vlan_len, 2 * _ODP_ETHADDR_LEN);
+ type = (uint16_t *)(uintptr_t)
+ (mac + 2 * _ODP_ETHADDR_LEN);
+
+ #ifdef TP_STATUS_VLAN_TPID_VALID
+ *type = odp_cpu_to_be_16(tp_hdr->tp_vlan_tpid);
+ #else
+ /* Fallback for old kernels (< v3.14) */
+ uint16_t *type2;
+ static int warning_printed;
+
+ if (warning_printed == 0) {
+ _ODP_DBG("Original TPID value lost. Using 0x8100 for single tagged and 0x88a8 for double tagged.\n");
+ warning_printed = 1;
+ }
+ type2 = (uint16_t *)(uintptr_t)(mac + (2 * _ODP_ETHADDR_LEN) + vlan_len);
+ /* Recreate TPID 0x88a8 for double tagged and 0x8100 for single tagged */
+ if (*type2 == odp_cpu_to_be_16(0x8100))
+ *type = odp_cpu_to_be_16(0x88a8);
+ else
+ *type = odp_cpu_to_be_16(0x8100);
+ #endif
+
+ tci = type + 1;
+ *tci = odp_cpu_to_be_16(tp_hdr->tp_vlan_tci);
+ }
+ if (layer) {
+ ret = _odp_packet_parse_common(hdr, pkt_buf, pkt_len,
+ pkt_len, layer, opt);
+ if (ret)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_errors);
+
+ if (ret < 0) {
+ odp_packet_free(pkt);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
+ frame_num = next_frame_num;
+ continue;
+ }
+
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, pkt_buf,
+ &new_pool, hdr);
+ if (ret < 0)
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+
+ if (ret) {
+ odp_packet_free(pkt);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
+ frame_num = next_frame_num;
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &hdr, new_pool))) {
+ odp_packet_free(pkt);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
+ frame_num = next_frame_num;
+ odp_atomic_inc_u64(&pktio_entry->stats_extra.in_discards);
+ continue;
+ }
+ }
+ }
+
+ hdr->input = pktio_entry->handle;
packet_set_ts(hdr, ts);
- mmap_rx_user_ready(ppd.raw);
+ tp_hdr->tp_status = TP_STATUS_KERNEL;
frame_num = next_frame_num;
- nb_rx++;
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkt_table[nb_cls++] = pkt;
+ nb_cls = _odp_cls_enq(pkt_table, nb_cls, (i + 1 == num));
+ } else {
+ pkt_table[nb_rx++] = pkt;
+ }
}
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(nb_cls))
+ _odp_cls_enq(pkt_table, nb_cls, true);
+
ring->frame_num = frame_num;
+
return nb_rx;
}
-static inline unsigned pkt_mmap_v2_tx(int sock, struct ring *ring,
- const odp_packet_t pkt_table[],
- unsigned len)
+static inline int pkt_mmap_v2_tx(pktio_entry_t *pktio_entry, int sock,
+ struct ring *ring,
+ const odp_packet_t pkt_table[], uint32_t num)
{
- union frame_map ppd;
- uint32_t pkt_len;
- unsigned first_frame_num, frame_num, frame_count;
+ uint32_t i, pkt_len, num_tx, tp_status;
+ uint32_t first_frame_num, frame_num, next_frame_num, frame_count;
int ret;
uint8_t *buf;
- unsigned n, i = 0;
- unsigned nb_tx = 0;
- int send_errno;
+ void *next_ptr;
+ struct tpacket2_hdr *tp_hdr[num];
int total_len = 0;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
+ uint32_t tx_ts_idx = 0;
- first_frame_num = ring->frame_num;
- frame_num = first_frame_num;
+ frame_num = ring->frame_num;
+ first_frame_num = frame_num;
frame_count = ring->rd_num;
+ next_ptr = ring->rd[frame_num].iov_base;
+
+ if (num > frame_count)
+ num = frame_count;
+
+ for (i = 0; i < num; i++) {
+ tp_hdr[i] = next_ptr;
+ tp_status = tp_hdr[i]->tp_status & 0x7;
+
+ if (tp_status != TP_STATUS_AVAILABLE) {
+ if (tp_status == TP_STATUS_WRONG_FORMAT) {
+ _ODP_ERR("Socket mmap: wrong format\n");
+ return -1;
+ }
- while (i < len) {
- ppd.raw = ring->rd[frame_num].iov_base;
- if (!odp_unlikely(mmap_tx_kernel_ready(ppd.raw)))
break;
+ }
+
+ next_frame_num = next_frame(frame_num, frame_count);
+ next_ptr = ring->rd[next_frame_num].iov_base;
+ odp_prefetch(next_ptr);
pkt_len = odp_packet_len(pkt_table[i]);
- ppd.v2->tp_h.tp_snaplen = pkt_len;
- ppd.v2->tp_h.tp_len = pkt_len;
+ tp_hdr[i]->tp_len = pkt_len;
total_len += pkt_len;
- buf = (uint8_t *)ppd.raw + TPACKET2_HDRLEN -
+ buf = (uint8_t *)(void *)tp_hdr[i] + TPACKET2_HDRLEN -
sizeof(struct sockaddr_ll);
odp_packet_copy_to_mem(pkt_table[i], 0, pkt_len, buf);
- mmap_tx_user_ready(ppd.raw);
+ tp_hdr[i]->tp_status = TP_STATUS_SEND_REQUEST;
- if (++frame_num >= frame_count)
- frame_num = 0;
+ frame_num = next_frame_num;
- i++;
+ if (tx_ts_enabled && tx_ts_idx == 0) {
+ if (odp_unlikely(packet_hdr(pkt_table[i])->p.flags.ts_set))
+ tx_ts_idx = i + 1;
+ }
}
- ret = sendto(sock, NULL, 0, MSG_DONTWAIT, NULL, 0);
- send_errno = errno;
-
- /* On success, the return value indicates the number of bytes sent. On
- * failure a value of -1 is returned, even if the failure occurred
- * after some of the packets in the ring have already been sent, so we
- * need to inspect the packet status to determine which were sent. */
- if (odp_likely(ret == total_len)) {
- nb_tx = i;
- ring->frame_num = frame_num;
- } else if (ret == -1) {
- for (frame_num = first_frame_num, n = 0; n < i; ++n) {
- struct tpacket2_hdr *hdr = ring->rd[frame_num].iov_base;
-
- if (odp_likely(hdr->tp_status == TP_STATUS_AVAILABLE ||
- hdr->tp_status == TP_STATUS_SENDING)) {
- nb_tx++;
- } else {
- /* The remaining frames weren't sent, clear
- * their status to indicate we're not waiting
- * for the kernel to process them. */
- hdr->tp_status = TP_STATUS_AVAILABLE;
- }
+ num = i;
+ num_tx = num;
- if (++frame_num >= frame_count)
- frame_num = 0;
- }
+ /* Ping kernel to send packets */
+ ret = send(sock, NULL, 0, MSG_DONTWAIT);
+
+ ring->frame_num = frame_num;
- ring->frame_num = (first_frame_num + nb_tx) % frame_count;
+ if (odp_unlikely(ret != total_len)) {
+ uint32_t frame_sum;
- if (nb_tx == 0 && SOCK_ERR_REPORT(send_errno)) {
- __odp_errno = send_errno;
- /* ENOBUFS indicates that the transmit queue is full,
- * which will happen regularly when overloaded so don't
- * print it */
- if (errno != ENOBUFS)
- ODP_ERR("sendto(pkt mmap): %s\n",
- strerror(send_errno));
+ /* Returns -1 when nothing is sent (send() would block) */
+ if (ret < 0 && errno != EWOULDBLOCK) {
+ _ODP_ERR("Socket mmap: send failed, ret %i, errno %i\n", ret, errno);
return -1;
}
- } else {
- /* Short send, return value is number of bytes sent so use this
- * to determine number of complete frames sent. */
- for (n = 0; n < i && ret > 0; ++n) {
- ret -= odp_packet_len(pkt_table[n]);
- if (ret >= 0)
- nb_tx++;
+
+ /* Check how many first packets have been sent
+ * (TP_STATUS_AVAILABLE or TP_STATUS_SENDING). Assuming that
+ * the rest will not be sent. */
+ for (i = 0; i < num; i++) {
+ tp_status = tp_hdr[i]->tp_status & 0x7;
+
+ if (tp_status == TP_STATUS_SEND_REQUEST)
+ break;
+
+ if (tp_status == TP_STATUS_WRONG_FORMAT) {
+ _ODP_ERR("Socket mmap: wrong format\n");
+ break;
+ }
}
- ring->frame_num = (first_frame_num + nb_tx) % frame_count;
- }
+ num_tx = i;
- for (i = 0; i < nb_tx; ++i)
- odp_packet_free(pkt_table[i]);
+ /* Clear status of not sent packets */
+ for (i = num_tx; i < num; i++)
+ tp_hdr[i]->tp_status = TP_STATUS_AVAILABLE;
- return nb_tx;
-}
+ frame_sum = first_frame_num + num_tx;
+ ring->frame_num = frame_sum;
-static void mmap_fill_ring(struct ring *ring, odp_pool_t pool_hdl, int fanout)
-{
- int pz = getpagesize();
- pool_t *pool;
-
- if (pool_hdl == ODP_POOL_INVALID)
- ODP_ABORT("Invalid pool handle\n");
-
- pool = pool_entry_from_hdl(pool_hdl);
-
- /* Frame has to capture full packet which can fit to the pool block.*/
- ring->req.tp_frame_size = (pool->data_size +
- TPACKET_HDRLEN + TPACKET_ALIGNMENT +
- + (pz - 1)) & (-pz);
-
- /* Calculate how many pages do we need to hold all pool packets
- * and align size to page boundary.
- */
- ring->req.tp_block_size = (ring->req.tp_frame_size *
- pool->num + (pz - 1)) & (-pz);
-
- if (!fanout) {
- /* Single socket is in use. Use 1 block with buf_num frames. */
- ring->req.tp_block_nr = 1;
- } else {
- /* Fanout is in use, more likely taffic split accodring to
- * number of cpu threads. Use cpu blocks and buf_num frames. */
- ring->req.tp_block_nr = odp_cpu_count();
+ if (frame_sum >= frame_count)
+ ring->frame_num = frame_sum - frame_count;
}
- ring->req.tp_frame_nr = ring->req.tp_block_size /
- ring->req.tp_frame_size * ring->req.tp_block_nr;
+ if (odp_unlikely(tx_ts_idx && num_tx >= tx_ts_idx))
+ _odp_pktio_tx_ts_set(pktio_entry);
- ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
- ring->rd_num = ring->req.tp_frame_nr;
- ring->flen = ring->req.tp_frame_size;
+ /* Free sent packets */
+ odp_packet_free_multi(pkt_table, num_tx);
+
+ return num_tx;
}
-static int mmap_setup_ring(int sock, struct ring *ring, int type,
- odp_pool_t pool_hdl, int fanout)
+static int mmap_setup_ring(pkt_sock_mmap_t *pkt_sock, struct ring *ring,
+ int type)
{
+ odp_shm_t shm;
+ uint32_t block_size, block_nr, frame_size, frame_nr;
+ uint32_t ring_size;
+ int flags;
+ int sock = pkt_sock->sockfd;
+ int mtu = pkt_sock->mtu_max;
int ret = 0;
ring->sock = sock;
ring->type = type;
ring->version = TPACKET_V2;
- mmap_fill_ring(ring, pool_hdl, fanout);
+ frame_size = _ODP_ROUNDUP_POWER2_U32(mtu + TPACKET_HDRLEN + TPACKET_ALIGNMENT);
+ block_size = BLOCK_SIZE;
+ if (frame_size > block_size)
+ block_size = frame_size;
+
+ block_nr = FRAME_MEM_SIZE / block_size;
+ frame_nr = (block_size / frame_size) * block_nr;
+ ring_size = frame_nr * sizeof(struct iovec);
+ flags = 0;
+
+ if (odp_global_ro.shm_single_va)
+ flags += ODP_SHM_SINGLE_VA;
+
+ shm = odp_shm_reserve(NULL, ring_size, ODP_CACHE_LINE_SIZE, flags);
+
+ if (shm == ODP_SHM_INVALID) {
+ _ODP_ERR("Reserving shm failed\n");
+ return -1;
+ }
+ ring->shm = shm;
+
+ ring->req.tp_block_size = block_size;
+ ring->req.tp_block_nr = block_nr;
+ ring->req.tp_frame_size = frame_size;
+ ring->req.tp_frame_nr = frame_nr;
+
+ ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
+ ring->rd_num = ring->req.tp_frame_nr;
+ ring->flen = ring->req.tp_frame_size;
+ ring->rd_len = ring_size;
+
+ _ODP_DBG(" tp_block_size %u\n", ring->req.tp_block_size);
+ _ODP_DBG(" tp_block_nr %u\n", ring->req.tp_block_nr);
+ _ODP_DBG(" tp_frame_size %u\n", ring->req.tp_frame_size);
+ _ODP_DBG(" tp_frame_nr %u\n", ring->req.tp_frame_nr);
ret = setsockopt(sock, SOL_PACKET, type, &ring->req, sizeof(ring->req));
if (ret == -1) {
- __odp_errno = errno;
- ODP_ERR("setsockopt(pkt mmap): %s\n", strerror(errno));
+ _ODP_ERR("setsockopt(pkt mmap): %s\n", strerror(errno));
return -1;
}
- ring->rd_len = ring->rd_num * sizeof(*ring->rd);
- ring->rd = malloc(ring->rd_len);
+ ring->rd = odp_shm_addr(shm);
if (!ring->rd) {
- __odp_errno = errno;
- ODP_ERR("malloc(): %s\n", strerror(errno));
+ _ODP_ERR("Reading shm addr failed\n");
return -1;
}
@@ -427,8 +522,7 @@ static int mmap_sock(pkt_sock_mmap_t *pkt_sock)
MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0);
if (pkt_sock->mmap_base == MAP_FAILED) {
- __odp_errno = errno;
- ODP_ERR("mmap rx&tx buffer failed: %s\n", strerror(errno));
+ _ODP_ERR("mmap rx&tx buffer failed: %s\n", strerror(errno));
return -1;
}
@@ -454,11 +548,19 @@ static int mmap_sock(pkt_sock_mmap_t *pkt_sock)
return 0;
}
-static void mmap_unmap_sock(pkt_sock_mmap_t *pkt_sock)
+static int mmap_unmap_sock(pkt_sock_mmap_t *pkt_sock)
{
- munmap(pkt_sock->mmap_base, pkt_sock->mmap_len);
- free(pkt_sock->rx_ring.rd);
- free(pkt_sock->tx_ring.rd);
+ int ret = 0;
+
+ if (pkt_sock->rx_ring.shm != ODP_SHM_INVALID)
+ odp_shm_free(pkt_sock->rx_ring.shm);
+ if (pkt_sock->tx_ring.shm != ODP_SHM_INVALID)
+ odp_shm_free(pkt_sock->tx_ring.shm);
+
+ if (pkt_sock->mmap_base != MAP_FAILED)
+ ret = munmap(pkt_sock->mmap_base, pkt_sock->mmap_len);
+
+ return ret;
}
static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, const char *netdev)
@@ -475,8 +577,7 @@ static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, const char *netdev)
ret = bind(pkt_sock->sockfd, (struct sockaddr *)&pkt_sock->ll,
sizeof(pkt_sock->ll));
if (ret == -1) {
- __odp_errno = errno;
- ODP_ERR("bind(to IF): %s\n", strerror(errno));
+ _ODP_ERR("bind(to IF): %s\n", strerror(errno));
return -1;
}
@@ -485,12 +586,17 @@ static int mmap_bind_sock(pkt_sock_mmap_t *pkt_sock, const char *netdev)
static int sock_mmap_close(pktio_entry_t *entry)
{
- pkt_sock_mmap_t *const pkt_sock = &entry->s.pkt_sock_mmap;
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(entry);
+ int ret;
+
+ ret = mmap_unmap_sock(pkt_sock);
+ if (ret != 0) {
+ _ODP_ERR("mmap_unmap_sock() %s\n", strerror(errno));
+ return -1;
+ }
- mmap_unmap_sock(pkt_sock);
if (pkt_sock->sockfd != -1 && close(pkt_sock->sockfd) != 0) {
- __odp_errno = errno;
- ODP_ERR("close(sockfd): %s\n", strerror(errno));
+ _ODP_ERR("close(sockfd): %s\n", strerror(errno));
return -1;
}
@@ -503,18 +609,17 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
{
int if_idx;
int ret = 0;
- odp_pktio_stats_t cur_stats;
if (disable_pktio)
return -1;
- pkt_sock_mmap_t *const pkt_sock = &pktio_entry->s.pkt_sock_mmap;
- int fanout = 1;
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
/* Init pktio entry */
memset(pkt_sock, 0, sizeof(*pkt_sock));
/* set sockfd to -1, because a valid socked might be initialized to 0 */
pkt_sock->sockfd = -1;
+ pkt_sock->mmap_base = MAP_FAILED;
if (pool == ODP_POOL_INVALID)
return -1;
@@ -523,6 +628,10 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
pkt_sock->frame_offset = 0;
pkt_sock->pool = pool;
+ odp_ticketlock_init(&pkt_sock->rx_ring.lock);
+ odp_ticketlock_init(&pkt_sock->tx_ring.lock);
+ pkt_sock->rx_ring.shm = ODP_SHM_INVALID;
+ pkt_sock->tx_ring.shm = ODP_SHM_INVALID;
pkt_sock->sockfd = mmap_pkt_socket();
if (pkt_sock->sockfd == -1)
goto error;
@@ -531,13 +640,22 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
if (ret != 0)
goto error;
- ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->tx_ring,
- PACKET_TX_RING, pool, fanout);
+ pkt_sock->mtu = _odp_mtu_get_fd(pkt_sock->sockfd, netdev);
+ if (!pkt_sock->mtu)
+ goto error;
+ pkt_sock->mtu_max = _ODP_SOCKET_MTU_MAX;
+ if (pkt_sock->mtu > _ODP_SOCKET_MTU_MAX)
+ pkt_sock->mtu_max = pkt_sock->mtu;
+
+ _ODP_DBG("MTU size: %i\n", pkt_sock->mtu);
+
+ _ODP_DBG("TX ring setup:\n");
+ ret = mmap_setup_ring(pkt_sock, &pkt_sock->tx_ring, PACKET_TX_RING);
if (ret != 0)
goto error;
- ret = mmap_setup_ring(pkt_sock->sockfd, &pkt_sock->rx_ring,
- PACKET_RX_RING, pool, fanout);
+ _ODP_DBG("RX ring setup:\n");
+ ret = mmap_setup_ring(pkt_sock, &pkt_sock->rx_ring, PACKET_RX_RING);
if (ret != 0)
goto error;
@@ -545,42 +663,23 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
if (ret != 0)
goto error;
- ret = mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
+ ret = _odp_mac_addr_get_fd(pkt_sock->sockfd, netdev, pkt_sock->if_mac);
if (ret != 0)
goto error;
if_idx = if_nametoindex(netdev);
if (if_idx == 0) {
- __odp_errno = errno;
- ODP_ERR("if_nametoindex(): %s\n", strerror(errno));
+ _ODP_ERR("if_nametoindex(): %s\n", strerror(errno));
goto error;
}
- pkt_sock->fanout = fanout;
- if (fanout) {
- ret = set_pkt_sock_fanout_mmap(pkt_sock, if_idx);
- if (ret != 0)
- goto error;
- }
-
- ret = ethtool_stats_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
- pktio_entry->s.name,
- &cur_stats);
- if (ret != 0) {
- ret = sysfs_stats(pktio_entry, &cur_stats);
- if (ret != 0) {
- pktio_entry->s.stats_type = STATS_UNSUPPORTED;
- ODP_DBG("pktio: %s unsupported stats\n",
- pktio_entry->s.name);
- } else {
- pktio_entry->s.stats_type = STATS_SYSFS;
- }
- } else {
- pktio_entry->s.stats_type = STATS_ETHTOOL;
- }
+ pktio_entry->stats_type = _odp_sock_stats_type_fd(pktio_entry,
+ pkt_sock->sockfd);
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED)
+ _ODP_DBG("pktio: %s unsupported stats\n", pktio_entry->name);
- ret = sock_stats_reset_fd(pktio_entry,
- pktio_entry->s.pkt_sock_mmap.sockfd);
+ ret = _odp_sock_stats_reset_fd(pktio_entry,
+ pkt_priv(pktio_entry)->sockfd);
if (ret != 0)
goto error;
@@ -591,119 +690,268 @@ error:
return -1;
}
+static int sock_mmap_fd_set(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
+ fd_set *readfds)
+{
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
+ int fd;
+
+ odp_ticketlock_lock(&pktio_entry->rxl);
+ fd = pkt_sock->sockfd;
+ FD_SET(fd, readfds);
+ odp_ticketlock_unlock(&pktio_entry->rxl);
+
+ return fd;
+}
+
static int sock_mmap_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkt_table[], int len)
+ odp_packet_t pkt_table[], int num)
{
- pkt_sock_mmap_t *const pkt_sock = &pktio_entry->s.pkt_sock_mmap;
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
int ret;
- odp_ticketlock_lock(&pktio_entry->s.rxl);
- ret = pkt_mmap_v2_rx(pktio_entry, pkt_sock, pkt_table, len,
+ odp_ticketlock_lock(&pkt_sock->rx_ring.lock);
+ ret = pkt_mmap_v2_rx(pktio_entry, pkt_sock, pkt_table, num,
pkt_sock->if_mac);
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ odp_ticketlock_unlock(&pkt_sock->rx_ring.lock);
return ret;
}
+static int sock_mmap_recv_tmo(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num, uint64_t usecs)
+{
+ struct timeval timeout;
+ int ret;
+ int maxfd;
+ fd_set readfds;
+
+ ret = sock_mmap_recv(pktio_entry, index, pkt_table, num);
+ if (ret != 0)
+ return ret;
+
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+
+ FD_ZERO(&readfds);
+ maxfd = sock_mmap_fd_set(pktio_entry, index, &readfds);
+
+ while (1) {
+ ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+
+ if (ret <= 0)
+ return ret;
+
+ ret = sock_mmap_recv(pktio_entry, index, pkt_table, num);
+
+ if (ret)
+ return ret;
+
+ /* If no packets, continue wait until timeout expires */
+ }
+}
+
+static int sock_mmap_recv_mq_tmo(pktio_entry_t *pktio_entry[], int index[],
+ uint32_t num_q, odp_packet_t pkt_table[], int num,
+ uint32_t *from, uint64_t usecs)
+{
+ struct timeval timeout;
+ uint32_t i;
+ int ret;
+ int maxfd = -1, maxfd2;
+ fd_set readfds;
+
+ for (i = 0; i < num_q; i++) {
+ ret = sock_mmap_recv(pktio_entry[i], index[i], pkt_table, num);
+
+ if (ret > 0 && from)
+ *from = i;
+
+ if (ret != 0)
+ return ret;
+ }
+
+ FD_ZERO(&readfds);
+
+ for (i = 0; i < num_q; i++) {
+ maxfd2 = sock_mmap_fd_set(pktio_entry[i], index[i], &readfds);
+ if (maxfd2 > maxfd)
+ maxfd = maxfd2;
+ }
+
+ timeout.tv_sec = usecs / (1000 * 1000);
+ timeout.tv_usec = usecs - timeout.tv_sec * (1000ULL * 1000ULL);
+
+ while (1) {
+ ret = select(maxfd + 1, &readfds, NULL, NULL, &timeout);
+
+ if (ret <= 0)
+ return ret;
+
+ for (i = 0; i < num_q; i++) {
+ ret = sock_mmap_recv(pktio_entry[i], index[i],
+ pkt_table, num);
+
+ if (ret > 0 && from)
+ *from = i;
+
+ if (ret)
+ return ret;
+ }
+
+ /* If no packets, continue wait until timeout expires */
+ }
+}
+
static int sock_mmap_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkt_table[], int len)
+ const odp_packet_t pkt_table[], int num)
{
int ret;
- pkt_sock_mmap_t *const pkt_sock = &pktio_entry->s.pkt_sock_mmap;
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
- odp_ticketlock_lock(&pktio_entry->s.txl);
- ret = pkt_mmap_v2_tx(pkt_sock->tx_ring.sock, &pkt_sock->tx_ring,
- pkt_table, len);
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ odp_ticketlock_lock(&pkt_sock->tx_ring.lock);
+ ret = pkt_mmap_v2_tx(pktio_entry, pkt_sock->tx_ring.sock,
+ &pkt_sock->tx_ring, pkt_table, num);
+ odp_ticketlock_unlock(&pkt_sock->tx_ring.lock);
return ret;
}
static uint32_t sock_mmap_mtu_get(pktio_entry_t *pktio_entry)
{
- return mtu_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
- pktio_entry->s.name);
+ return _odp_mtu_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name);
+}
+
+static int sock_mmap_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_sock_mmap_t *pkt_sock = pkt_priv(pktio_entry);
+ int ret;
+
+ ret = _odp_mtu_set_fd(pkt_sock->sockfd, pktio_entry->name, maxlen_input);
+ if (ret)
+ return ret;
+
+ pkt_sock->mtu = maxlen_input;
+
+ return 0;
}
static int sock_mmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
- memcpy(mac_addr, pktio_entry->s.pkt_sock_mmap.if_mac, ETH_ALEN);
+ memcpy(mac_addr, pkt_priv(pktio_entry)->if_mac, ETH_ALEN);
return ETH_ALEN;
}
static int sock_mmap_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
- pktio_entry->s.name, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name, enable);
}
static int sock_mmap_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
- pktio_entry->s.name);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name);
}
static int sock_mmap_link_status(pktio_entry_t *pktio_entry)
{
- return link_status_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
- pktio_entry->s.name);
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sockfd,
+ pktio_entry->name);
+}
+
+static int sock_mmap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->name, info);
}
-static int sock_mmap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
+static int sock_mmap_capability(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
+ pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
+
memset(capa, 0, sizeof(odp_pktio_capability_t));
capa->max_input_queues = 1;
capa->max_output_queues = 1;
capa->set_op.op.promisc_mode = 1;
+ capa->set_op.op.maxlen = 1;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_input = pkt_sock->mtu_max;
+ capa->maxlen.min_output = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_output = pkt_sock->mtu_max;
odp_pktio_config_init(&capa->config);
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+
+ /* Fill statistics capabilities */
+ _odp_sock_stats_capa(pktio_entry, capa);
+
return 0;
}
static int sock_mmap_stats(pktio_entry_t *pktio_entry,
odp_pktio_stats_t *stats)
{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(stats, 0, sizeof(*stats));
- return 0;
- }
-
- return sock_stats_fd(pktio_entry,
- stats,
- pktio_entry->s.pkt_sock_mmap.sockfd);
+ return _odp_sock_stats_fd(pktio_entry,
+ stats,
+ pkt_priv(pktio_entry)->sockfd);
}
static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry)
{
- if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
- memset(&pktio_entry->s.stats, 0,
- sizeof(odp_pktio_stats_t));
- return 0;
- }
+ return _odp_sock_stats_reset_fd(pktio_entry,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_mmap_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[],
+ int num)
+{
+ return _odp_sock_extra_stat_info(pktio_entry, info, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_mmap_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ return _odp_sock_extra_stats(pktio_entry, stats, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
- return sock_stats_reset_fd(pktio_entry,
- pktio_entry->s.pkt_sock_mmap.sockfd);
+static int sock_mmap_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ return _odp_sock_extra_stat_counter(pktio_entry, id, stat,
+ pkt_priv(pktio_entry)->sockfd);
}
static int sock_mmap_init_global(void)
{
if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMAP")) {
- ODP_PRINT("PKTIO: socket mmap skipped,"
+ _ODP_PRINT("PKTIO: socket mmap skipped,"
" enabled export ODP_PKTIO_DISABLE_SOCKET_MMAP=1.\n");
disable_pktio = 1;
} else {
- ODP_PRINT("PKTIO: initialized socket mmap,"
+ _ODP_PRINT("PKTIO: initialized socket mmap,"
" use export ODP_PKTIO_DISABLE_SOCKET_MMAP=1 to disable.\n");
}
return 0;
}
-const pktio_if_ops_t sock_mmap_pktio_ops = {
+const pktio_if_ops_t _odp_sock_mmap_pktio_ops = {
.name = "socket_mmap",
.print = NULL,
.init_global = sock_mmap_init_global,
@@ -715,16 +963,26 @@ const pktio_if_ops_t sock_mmap_pktio_ops = {
.stop = NULL,
.stats = sock_mmap_stats,
.stats_reset = sock_mmap_stats_reset,
+ .extra_stat_info = sock_mmap_extra_stat_info,
+ .extra_stats = sock_mmap_extra_stats,
+ .extra_stat_counter = sock_mmap_extra_stat_counter,
.recv = sock_mmap_recv,
+ .recv_tmo = sock_mmap_recv_tmo,
+ .recv_mq_tmo = sock_mmap_recv_mq_tmo,
.send = sock_mmap_send,
- .mtu_get = sock_mmap_mtu_get,
+ .fd_set = sock_mmap_fd_set,
+ .maxlen_get = sock_mmap_mtu_get,
+ .maxlen_set = sock_mmap_mtu_set,
.promisc_mode_set = sock_mmap_promisc_mode_set,
.promisc_mode_get = sock_mmap_promisc_mode_get,
.mac_get = sock_mmap_mac_addr_get,
+ .mac_set = NULL,
.link_status = sock_mmap_link_status,
+ .link_info = sock_mmap_link_info,
.capability = sock_mmap_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL,
.input_queues_config = NULL,
.output_queues_config = NULL,
diff --git a/platform/linux-generic/pktio/socket_xdp.c b/platform/linux-generic/pktio/socket_xdp.c
new file mode 100644
index 000000000..599942657
--- /dev/null
+++ b/platform/linux-generic/pktio/socket_xdp.c
@@ -0,0 +1,1249 @@
+/* Copyright (c) 2022-2023, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+
+#ifdef _ODP_PKTIO_XDP
+
+#include <odp_posix_extensions.h>
+#include <odp/api/cpu.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io_stats.h>
+#include <odp/api/system_info.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_classification_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_parse_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_socket_common.h>
+
+#include <errno.h>
+#include <linux/ethtool.h>
+#include <linux/if_xdp.h>
+#include <linux/sockios.h>
+#include <net/if.h>
+#include <poll.h>
+#include <string.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include <xdp/xsk.h>
+
+#define NUM_DESCS_DEFAULT 1024U
+#define MIN_FRAME_SIZE 2048U
+
+#define MAX_QUEUES (ODP_PKTIN_MAX_QUEUES > ODP_PKTOUT_MAX_QUEUES ? \
+ ODP_PKTIN_MAX_QUEUES : ODP_PKTOUT_MAX_QUEUES)
+
+#define IF_DELIM " "
+#define Q_DELIM ':'
+#define CONF_BASE_STR "pktio_xdp"
+#define RX_DESCS_STR "num_rx_desc"
+#define TX_DESCS_STR "num_tx_desc"
+
+enum {
+ RX_PKT_ALLOC_ERR,
+ RX_DESC_RSV_ERR,
+ TX_PKT_ALLOC_ERR,
+ TX_DESC_RSV_ERR
+};
+
+static const char * const internal_stats_strs[] = {
+ "rx_packet_allocation_errors",
+ "rx_umem_descriptor_reservation_errors",
+ "tx_packet_allocation_errors",
+ "tx_umem_descriptor_reservation_errors"
+};
+
+#define MAX_INTERNAL_STATS _ODP_ARRAY_SIZE(internal_stats_strs)
+
+static const char * const shadow_q_driver_strs[] = {
+ "mlx",
+};
+
+typedef struct {
+ uint64_t rx_dropped;
+ uint64_t rx_inv_descs;
+ uint64_t tx_inv_descs;
+} xdp_sock_stats_t;
+
+typedef struct {
+ odp_ticketlock_t rx_lock ODP_ALIGNED_CACHE;
+ odp_ticketlock_t tx_lock ODP_ALIGNED_CACHE;
+ struct xsk_ring_cons rx;
+ struct xsk_ring_cons compl_q;
+ struct xsk_ring_prod tx;
+ struct xsk_ring_prod fill_q;
+ odp_pktin_queue_stats_t qi_stats;
+ odp_pktout_queue_stats_t qo_stats;
+ xdp_sock_stats_t xdp_stats;
+ struct xsk_socket *xsk;
+ uint64_t i_stats[MAX_INTERNAL_STATS];
+} xdp_sock_t;
+
+typedef struct {
+ struct xsk_ring_prod fill_q;
+ struct xsk_ring_cons compl_q;
+ struct xsk_umem *umem;
+ pool_t *pool;
+ int num_rx_desc;
+ int num_tx_desc;
+ uint32_t ref_cnt;
+} xdp_umem_info_t;
+
+typedef struct {
+ uint32_t rx;
+ uint32_t tx;
+ uint32_t other;
+ uint32_t combined;
+} drv_channels_t;
+
+typedef struct {
+ /* Queue counts for getting/setting driver's ethtool queue configuration. */
+ drv_channels_t drv_channels;
+ /* Packet I/O level requested input queue count. */
+ uint32_t num_in_conf_qs;
+ /* Packet I/O level requested output queue count. */
+ uint32_t num_out_conf_qs;
+ /* Actual internal queue count. */
+ uint32_t num_qs;
+ /* Length of driver's ethtool RSS indirection table. */
+ uint32_t drv_num_rss;
+} q_num_conf_t;
+
+typedef struct {
+ xdp_sock_t qs[MAX_QUEUES];
+ xdp_umem_info_t *umem_info;
+ q_num_conf_t q_num_conf;
+ int pktio_idx;
+ int helper_sock;
+ uint32_t mtu;
+ uint32_t max_mtu;
+ uint32_t bind_q;
+ odp_bool_t lockless_rx;
+ odp_bool_t lockless_tx;
+ odp_bool_t is_shadow_q;
+} xdp_sock_info_t;
+
+typedef struct {
+ odp_packet_hdr_t *pkt_hdr;
+ odp_packet_t pkt;
+ uint8_t *data;
+ uint32_t len;
+} pkt_data_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(xdp_sock_info_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static odp_bool_t disable_pktio;
+
+static int sock_xdp_init_global(void)
+{
+ if (getenv("ODP_PKTIO_DISABLE_SOCKET_XDP")) {
+ _ODP_PRINT("PKTIO: socket xdp skipped,"
+ " enabled export ODP_PKTIO_DISABLE_SOCKET_XDP=1.\n");
+ disable_pktio = true;
+ } else {
+ _ODP_PRINT("PKTIO: initialized socket xdp,"
+ " use export ODP_PKTIO_DISABLE_SOCKET_XDP=1 to disable.\n");
+ }
+
+ return 0;
+}
+
+static inline xdp_sock_info_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (xdp_sock_info_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
+
+static odp_bool_t get_nic_queue_count(int fd, const char *devname, drv_channels_t *cur_channels)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int ret;
+
+ memset(&channels, 0, sizeof(struct ethtool_channels));
+ channels.cmd = ETHTOOL_GCHANNELS;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)&channels;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1) {
+ _ODP_DBG("Unable to query NIC queue capabilities: %s\n", strerror(errno));
+ return false;
+ }
+
+ cur_channels->rx = channels.rx_count;
+ cur_channels->tx = channels.tx_count;
+ cur_channels->other = channels.other_count;
+ cur_channels->combined = channels.combined_count;
+
+ return true;
+}
+
+static odp_bool_t get_nic_rss_indir_count(int fd, const char *devname, uint32_t *drv_num_rss)
+{
+ struct ethtool_rxfh indir;
+ struct ifreq ifr;
+ int ret;
+
+ memset(&indir, 0, sizeof(struct ethtool_rxfh));
+ indir.cmd = ETHTOOL_GRSSH;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)&indir;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1) {
+ _ODP_DBG("Unable to query NIC RSS indirection table size: %s\n", strerror(errno));
+ return false;
+ }
+
+ *drv_num_rss = indir.indir_size;
+
+ return true;
+}
+
+static odp_bool_t is_shadow_q_driver(int fd, const char *devname)
+{
+ struct ethtool_drvinfo info;
+ struct ifreq ifr;
+ int ret;
+
+ memset(&info, 0, sizeof(struct ethtool_drvinfo));
+ info.cmd = ETHTOOL_GDRVINFO;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)&info;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1) {
+ _ODP_DBG("Unable to query NIC driver information: %s\n", strerror(errno));
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < _ODP_ARRAY_SIZE(shadow_q_driver_strs); ++i) {
+ if (strstr(info.driver, shadow_q_driver_strs[i]) != NULL) {
+ _ODP_PRINT("Driver with XDP shadow queues in use: %s, manual RSS"
+ " configuration likely required\n", info.driver);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void parse_options(xdp_umem_info_t *umem_info)
+{
+ if (!_odp_libconfig_lookup_ext_int(CONF_BASE_STR, NULL, RX_DESCS_STR,
+ &umem_info->num_rx_desc) ||
+ !_odp_libconfig_lookup_ext_int(CONF_BASE_STR, NULL, TX_DESCS_STR,
+ &umem_info->num_tx_desc)) {
+ _ODP_ERR("Unable to parse xdp descriptor configuration, using defaults (%d)\n",
+ NUM_DESCS_DEFAULT);
+ goto defaults;
+ }
+
+ if (umem_info->num_rx_desc <= 0 || umem_info->num_tx_desc <= 0 ||
+ !_ODP_CHECK_IS_POWER2(umem_info->num_rx_desc) ||
+ !_ODP_CHECK_IS_POWER2(umem_info->num_tx_desc)) {
+ _ODP_ERR("Invalid xdp descriptor configuration, using defaults (%d)\n",
+ NUM_DESCS_DEFAULT);
+ goto defaults;
+ }
+
+ return;
+
+defaults:
+ umem_info->num_rx_desc = NUM_DESCS_DEFAULT;
+ umem_info->num_tx_desc = NUM_DESCS_DEFAULT;
+}
+
+static int sock_xdp_open(odp_pktio_t pktio, pktio_entry_t *pktio_entry, const char *devname,
+ odp_pool_t pool_hdl)
+{
+ xdp_sock_info_t *priv;
+ pool_t *pool;
+ int ret;
+
+ if (disable_pktio)
+ return -1;
+
+ priv = pkt_priv(pktio_entry);
+ memset(priv, 0, sizeof(xdp_sock_info_t));
+ pool = _odp_pool_entry(pool_hdl);
+ priv->umem_info = (xdp_umem_info_t *)pool->mem_src_data;
+ priv->umem_info->pool = pool;
+ /* Mark transitory kernel-owned packets with the pktio index, so that they can be freed on
+ * close. */
+ priv->pktio_idx = 1 + odp_pktio_index(pktio);
+ /* Querying with ioctl() via AF_XDP socket doesn't seem to work, so
+ * create a helper socket for this. */
+ ret = socket(AF_INET, SOCK_DGRAM, 0);
+
+ if (ret == -1) {
+ _ODP_ERR("Error creating helper socket for xdp: %s\n", strerror(errno));
+ return -1;
+ }
+
+ priv->helper_sock = ret;
+ priv->mtu = _odp_mtu_get_fd(priv->helper_sock, devname);
+
+ if (priv->mtu == 0U)
+ goto mtu_err;
+
+ priv->max_mtu = pool->seg_len;
+
+ for (int i = 0; i < MAX_QUEUES; ++i) {
+ odp_ticketlock_init(&priv->qs[i].rx_lock);
+ odp_ticketlock_init(&priv->qs[i].tx_lock);
+ }
+
+ if (!get_nic_queue_count(priv->helper_sock, devname, &priv->q_num_conf.drv_channels) ||
+ !get_nic_rss_indir_count(priv->helper_sock, devname, &priv->q_num_conf.drv_num_rss))
+ _ODP_WARN("Unable to query NIC queue count/RSS, manual cleanup required\n");
+
+ priv->is_shadow_q = is_shadow_q_driver(priv->helper_sock, pktio_entry->name);
+ parse_options(priv->umem_info);
+ _ODP_DBG("Socket xdp interface (%s):\n", pktio_entry->name);
+ _ODP_DBG(" num_rx_desc: %d\n", priv->umem_info->num_rx_desc);
+ _ODP_DBG(" num_tx_desc: %d\n", priv->umem_info->num_tx_desc);
+
+ return 0;
+
+mtu_err:
+ close(priv->helper_sock);
+
+ return -1;
+}
+
+static odp_bool_t set_nic_queue_count(int fd, const char *devname, drv_channels_t *new_channels)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int ret;
+
+ memset(&channels, 0, sizeof(struct ethtool_channels));
+ channels.cmd = ETHTOOL_SCHANNELS;
+ channels.rx_count = new_channels->rx;
+ channels.tx_count = new_channels->tx;
+ channels.other_count = new_channels->other;
+ channels.combined_count = new_channels->combined;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)&channels;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1) {
+ _ODP_DBG("Unable to set NIC queue count: %s\n", strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t set_nic_rss_indir(int fd, const char *devname, struct ethtool_rxfh *indir)
+{
+ struct ifreq ifr;
+ int ret;
+
+ indir->cmd = ETHTOOL_SRSSH;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)indir;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1) {
+ _ODP_DBG("Unable to set NIC RSS indirection table: %s\n", strerror(errno));
+ return false;
+ }
+
+ return true;
+}
+
+static int sock_xdp_close(pktio_entry_t *pktio_entry)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ struct ethtool_rxfh indir;
+
+ memset(&indir, 0, sizeof(struct ethtool_rxfh));
+
+ if (priv->q_num_conf.num_qs != 0U)
+ (void)set_nic_queue_count(priv->helper_sock, pktio_entry->name,
+ &priv->q_num_conf.drv_channels);
+
+ if (priv->q_num_conf.drv_num_rss != 0U && !priv->is_shadow_q)
+ (void)set_nic_rss_indir(priv->helper_sock, pktio_entry->name, &indir);
+
+ close(priv->helper_sock);
+
+ return 0;
+}
+
+static int umem_create(xdp_umem_info_t *umem_info)
+{
+ struct xsk_umem_config cfg;
+
+ if (umem_info->ref_cnt++ > 0U)
+ return 0;
+
+ /* Fill queue size is recommended to be >= HW RX ring size + AF_XDP RX
+ * ring size, so use size twice the size of AF_XDP RX ring. */
+ cfg.fill_size = umem_info->num_rx_desc * 2U;
+ cfg.comp_size = umem_info->num_tx_desc;
+ cfg.frame_size = umem_info->pool->block_size;
+ cfg.frame_headroom = sizeof(odp_packet_hdr_t) + umem_info->pool->headroom;
+ cfg.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+
+ return xsk_umem__create(&umem_info->umem, umem_info->pool->base_addr,
+ umem_info->pool->shm_size, &umem_info->fill_q, &umem_info->compl_q,
+ &cfg);
+}
+
+static void fill_socket_config(struct xsk_socket_config *config, xdp_umem_info_t *umem_info)
+{
+ config->rx_size = umem_info->num_rx_desc * 2U;
+ config->tx_size = umem_info->num_tx_desc;
+ config->libxdp_flags = 0U;
+ config->xdp_flags = 0U;
+ config->bind_flags = XDP_ZEROCOPY;
+}
+
+static odp_bool_t reserve_fill_queue_elements(xdp_sock_info_t *sock_info, xdp_sock_t *sock,
+ int num)
+{
+ pool_t *pool;
+ odp_packet_t packets[num];
+ int count;
+ struct xsk_ring_prod *fill_q;
+ uint32_t start_idx;
+ int pktio_idx;
+ uint32_t block_size;
+ odp_packet_hdr_t *pkt_hdr;
+
+ pool = sock_info->umem_info->pool;
+ count = odp_packet_alloc_multi(_odp_pool_handle(pool), sock_info->mtu, packets, num);
+
+ if (count <= 0) {
+ ++sock->i_stats[RX_PKT_ALLOC_ERR];
+ return false;
+ }
+
+ fill_q = &sock->fill_q;
+
+ if (xsk_ring_prod__reserve(fill_q, count, &start_idx) == 0U) {
+ odp_packet_free_multi(packets, count);
+ ++sock->i_stats[RX_DESC_RSV_ERR];
+ return false;
+ }
+
+ pktio_idx = sock_info->pktio_idx;
+ block_size = pool->block_size;
+
+ for (int i = 0; i < count; ++i) {
+ pkt_hdr = packet_hdr(packets[i]);
+ pkt_hdr->ms_pktio_idx = pktio_idx;
+ *xsk_ring_prod__fill_addr(fill_q, start_idx++) =
+ pkt_hdr->event_hdr.index.event * block_size;
+ }
+
+ xsk_ring_prod__submit(&sock->fill_q, count);
+
+ return true;
+}
+
+static odp_bool_t create_sockets(xdp_sock_info_t *sock_info, const char *devname)
+{
+ struct xsk_socket_config config;
+ uint32_t bind_q, i;
+ struct xsk_umem *umem;
+ xdp_sock_t *sock;
+ int ret;
+
+ bind_q = sock_info->bind_q;
+ umem = sock_info->umem_info->umem;
+
+ for (i = 0U; i < sock_info->q_num_conf.num_qs;) {
+ sock = &sock_info->qs[i];
+ fill_socket_config(&config, sock_info->umem_info);
+ ret = xsk_socket__create_shared(&sock->xsk, devname, bind_q, umem, &sock->rx,
+ &sock->tx, &sock->fill_q, &sock->compl_q, &config);
+
+ if (ret) {
+ _ODP_ERR("Error creating xdp socket for bind queue %u: %d\n", bind_q, ret);
+ goto err;
+ }
+
+ ++i;
+
+ if (!reserve_fill_queue_elements(sock_info, sock, config.rx_size)) {
+ _ODP_ERR("Unable to reserve fill queue descriptors for queue: %u\n",
+ bind_q);
+ goto err;
+ }
+
+ ++bind_q;
+ }
+
+ /* Ring setup/clean up routines seem to be asynchronous with some drivers and might not be
+ * ready yet after xsk_socket__create_shared(). */
+ sleep(1U);
+
+ return true;
+
+err:
+ for (uint32_t j = 0U; j < i; ++j) {
+ xsk_socket__delete(sock_info->qs[j].xsk);
+ sock_info->qs[j].xsk = NULL;
+ }
+
+ return false;
+}
+
+static void umem_delete(xdp_umem_info_t *umem_info)
+{
+ if (umem_info->ref_cnt-- != 1U)
+ return;
+
+ while (xsk_umem__delete(umem_info->umem) == -EBUSY)
+ continue;
+}
+
+static int sock_xdp_start(pktio_entry_t *pktio_entry)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ int ret;
+ drv_channels_t channels = priv->q_num_conf.drv_channels;
+ struct ethtool_rxfh *indir = calloc(1U, sizeof(struct ethtool_rxfh)
+ + sizeof(((struct ethtool_rxfh *)0)->rss_config[0U])
+ * priv->q_num_conf.drv_num_rss);
+
+ if (indir == NULL) {
+ _ODP_ERR("Error allocating NIC RSS table\n");
+ return -1;
+ }
+
+ ret = umem_create(priv->umem_info);
+
+ if (ret) {
+ _ODP_ERR("Error creating UMEM pool for xdp: %d\n", ret);
+ goto err;
+ }
+
+ priv->q_num_conf.num_qs = _ODP_MAX(priv->q_num_conf.num_in_conf_qs,
+ priv->q_num_conf.num_out_conf_qs);
+ priv->bind_q = priv->is_shadow_q ? priv->q_num_conf.num_qs : 0U;
+ channels.combined = priv->q_num_conf.num_qs;
+
+ if (!set_nic_queue_count(priv->helper_sock, pktio_entry->name, &channels))
+ _ODP_WARN("Unable to configure NIC queue count, manual configuration required\n");
+
+ if (priv->q_num_conf.num_in_conf_qs > 0U && !priv->is_shadow_q) {
+ indir->indir_size = priv->q_num_conf.drv_num_rss;
+
+ for (uint32_t i = 0U; i < indir->indir_size; ++i)
+ indir->rss_config[i] = (i % priv->q_num_conf.num_in_conf_qs);
+
+ if (!set_nic_rss_indir(priv->helper_sock, pktio_entry->name, indir))
+ _ODP_WARN("Unable to configure NIC RSS, manual configuration required\n");
+ }
+
+ if (!create_sockets(priv, pktio_entry->name))
+ goto sock_err;
+
+ return 0;
+
+sock_err:
+ umem_delete(priv->umem_info);
+
+err:
+ free(indir);
+
+ return -1;
+}
+
+static int sock_xdp_stop(pktio_entry_t *pktio_entry)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ pool_t *pool = priv->umem_info->pool;
+ odp_packet_hdr_t *pkt_hdr;
+
+ for (uint32_t i = 0U; i < priv->q_num_conf.num_qs; ++i) {
+ if (priv->qs[i].xsk != NULL) {
+ xsk_socket__delete(priv->qs[i].xsk);
+ priv->qs[i].xsk = NULL;
+ }
+ }
+
+ umem_delete(priv->umem_info);
+ /* Ring setup/clean up routines seem to be asynchronous with some drivers and might not be
+ * ready yet after xsk_socket__delete(). */
+ sleep(1U);
+
+ /* Free all packets that were in fill or completion queues at the time of closing. */
+ for (uint32_t i = 0U; i < pool->num + pool->skipped_blocks; ++i) {
+ pkt_hdr = packet_hdr(packet_from_event_hdr(event_hdr_from_index(pool, i)));
+
+ if (pkt_hdr->ms_pktio_idx == priv->pktio_idx) {
+ pkt_hdr->ms_pktio_idx = 0U;
+ odp_packet_free(packet_handle(pkt_hdr));
+ }
+ }
+
+ return 0;
+}
+
+static int sock_xdp_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ xdp_sock_t *sock;
+ odp_pktin_queue_stats_t qi_stats;
+ odp_pktout_queue_stats_t qo_stats;
+ struct xdp_statistics xdp_stats;
+ socklen_t optlen = sizeof(struct xdp_statistics);
+
+ memset(stats, 0, sizeof(odp_pktio_stats_t));
+
+ for (uint32_t i = 0U; i < priv->q_num_conf.num_qs; ++i) {
+ sock = &priv->qs[i];
+ qi_stats = sock->qi_stats;
+ qo_stats = sock->qo_stats;
+ stats->in_octets += qi_stats.octets;
+ stats->in_packets += qi_stats.packets;
+ stats->in_errors += qi_stats.errors;
+ stats->out_octets += qo_stats.octets;
+ stats->out_packets += qo_stats.packets;
+
+ if (!getsockopt(xsk_socket__fd(sock->xsk), SOL_XDP, XDP_STATISTICS, &xdp_stats,
+ &optlen)) {
+ stats->in_errors += (xdp_stats.rx_dropped - sock->xdp_stats.rx_dropped);
+ stats->in_discards +=
+ (xdp_stats.rx_invalid_descs - sock->xdp_stats.rx_inv_descs);
+ stats->out_discards +=
+ (xdp_stats.tx_invalid_descs - sock->xdp_stats.tx_inv_descs);
+ }
+ }
+
+ return 0;
+}
+
+static int sock_xdp_stats_reset(pktio_entry_t *pktio_entry)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ xdp_sock_t *sock;
+ struct xdp_statistics xdp_stats;
+ socklen_t optlen = sizeof(struct xdp_statistics);
+
+ for (uint32_t i = 0U; i < priv->q_num_conf.num_qs; ++i) {
+ sock = &priv->qs[i];
+ memset(&sock->qi_stats, 0, sizeof(odp_pktin_queue_stats_t));
+ memset(&sock->qo_stats, 0, sizeof(odp_pktout_queue_stats_t));
+ memset(sock->i_stats, 0, sizeof(sock->i_stats));
+
+ if (!getsockopt(xsk_socket__fd(sock->xsk), SOL_XDP, XDP_STATISTICS, &xdp_stats,
+ &optlen)) {
+ sock->xdp_stats.rx_dropped = xdp_stats.rx_dropped;
+ sock->xdp_stats.rx_inv_descs = xdp_stats.rx_invalid_descs;
+ sock->xdp_stats.tx_inv_descs = xdp_stats.tx_invalid_descs;
+ }
+ }
+
+ return 0;
+}
+
+static int sock_xdp_pktin_queue_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ xdp_sock_t *sock;
+ struct xdp_statistics xdp_stats;
+ socklen_t optlen = sizeof(struct xdp_statistics);
+
+ sock = &priv->qs[index];
+ *pktin_stats = sock->qi_stats;
+
+ if (!getsockopt(xsk_socket__fd(sock->xsk), SOL_XDP, XDP_STATISTICS, &xdp_stats, &optlen)) {
+ pktin_stats->errors += (xdp_stats.rx_dropped - sock->xdp_stats.rx_dropped);
+ pktin_stats->discards +=
+ (xdp_stats.rx_invalid_descs - sock->xdp_stats.rx_inv_descs);
+ }
+
+ return 0;
+}
+
+static int sock_xdp_pktout_queue_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ xdp_sock_t *sock;
+ struct xdp_statistics xdp_stats;
+ socklen_t optlen = sizeof(struct xdp_statistics);
+
+ sock = &priv->qs[index];
+ *pktout_stats = sock->qo_stats;
+
+ if (!getsockopt(xsk_socket__fd(sock->xsk), SOL_XDP, XDP_STATISTICS, &xdp_stats, &optlen))
+ pktout_stats->discards +=
+ (xdp_stats.tx_invalid_descs - sock->xdp_stats.tx_inv_descs);
+
+ return 0;
+}
+
+static int sock_xdp_extra_stat_info(pktio_entry_t *pktio_entry, odp_pktio_extra_stat_info_t info[],
+ int num)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ const int total_stats = MAX_INTERNAL_STATS * priv->q_num_conf.num_qs;
+
+ if (info != NULL && num > 0) {
+ for (int i = 0; i < _ODP_MIN(num, total_stats); ++i)
+ snprintf(info[i].name, ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1,
+ "q%" PRIu64 "_%s", i / MAX_INTERNAL_STATS,
+ internal_stats_strs[i % MAX_INTERNAL_STATS]);
+ }
+
+ return total_stats;
+}
+
+static int sock_xdp_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[], int num)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ const int total_stats = MAX_INTERNAL_STATS * priv->q_num_conf.num_qs;
+ uint64_t *i_stats;
+
+ if (stats != NULL && num > 0) {
+ for (int i = 0; i < _ODP_MIN(num, total_stats); ++i) {
+ i_stats = priv->qs[i / MAX_INTERNAL_STATS].i_stats;
+ stats[i] = i_stats[i % MAX_INTERNAL_STATS];
+ }
+ }
+
+ return total_stats;
+}
+
+static int sock_xdp_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id, uint64_t *stat)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ const uint32_t total_stats = MAX_INTERNAL_STATS * priv->q_num_conf.num_qs;
+
+ if (id >= total_stats) {
+ _ODP_ERR("Invalid counter id: %u (allowed range: 0-%u)\n", id, total_stats - 1U);
+ return -1;
+ }
+
+ *stat = priv->qs[id / MAX_INTERNAL_STATS].i_stats[id % MAX_INTERNAL_STATS];
+
+ return 0;
+}
+
+static inline void extract_data(const struct xdp_desc *rx_desc, uint8_t *pool_base_addr,
+ pkt_data_t *pkt_data)
+{
+ uint64_t frame_off;
+ uint64_t pkt_off;
+
+ /* UMEM "addresses" are offsets from start of a registered UMEM area.
+ * Additionally, the packet data offset (where received packet data
+ * starts within a UMEM frame) is encoded to the UMEM address with
+ * XSK_UNALIGNED_BUF_OFFSET_SHIFT left bitshift when XDP_ZEROCOPY and
+ * XDP_UMEM_UNALIGNED_CHUNK_FLAG are enabled. */
+ frame_off = rx_desc->addr;
+ pkt_off = xsk_umem__add_offset_to_addr(frame_off);
+ frame_off = xsk_umem__extract_addr(frame_off);
+ pkt_data->pkt_hdr = xsk_umem__get_data(pool_base_addr, frame_off);
+ pkt_data->pkt = packet_handle(pkt_data->pkt_hdr);
+ pkt_data->data = xsk_umem__get_data(pool_base_addr, pkt_off);
+ pkt_data->len = rx_desc->len;
+}
+
+static uint32_t process_received(pktio_entry_t *pktio_entry, xdp_sock_t *sock, pool_t *pool,
+ uint32_t start_idx, odp_packet_t packets[], int num)
+{
+ struct xsk_ring_cons *rx = &sock->rx;
+ uint8_t *base_addr = pool->base_addr;
+ pkt_data_t pkt_data;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ int ret;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
+ uint64_t errors = 0U, octets = 0U;
+ odp_pktio_t pktio_hdl = pktio_entry->handle;
+ uint32_t num_rx = 0U;
+ uint32_t num_cls = 0U;
+ uint32_t num_pkts = 0U;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+
+ for (int i = 0; i < num; ++i) {
+ extract_data(xsk_ring_cons__rx_desc(rx, start_idx++), base_addr, &pkt_data);
+ pkt_data.pkt_hdr->ms_pktio_idx = 0U;
+ packet_init(pkt_data.pkt_hdr, pkt_data.len);
+ pkt_data.pkt_hdr->seg_data = pkt_data.data;
+ pkt_data.pkt_hdr->event_hdr.base_data = pkt_data.data;
+
+ if (layer) {
+ ret = _odp_packet_parse_common(pkt_data.pkt_hdr, pkt_data.data,
+ pkt_data.len, pkt_data.len,
+ layer, opt);
+
+ if (ret)
+ ++errors;
+
+ if (ret < 0) {
+ odp_packet_free(pkt_data.pkt);
+ continue;
+ }
+
+ if (cls_enabled) {
+ odp_pool_t new_pool;
+
+ ret = _odp_cls_classify_packet(pktio_entry, pkt_data.data,
+ &new_pool, pkt_data.pkt_hdr);
+ if (ret) {
+ odp_packet_free(pkt_data.pkt);
+ continue;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt_data.pkt, &pkt_data.pkt_hdr, new_pool))) {
+ odp_packet_free(pkt_data.pkt);
+ continue;
+ }
+ }
+ }
+
+ pkt_data.pkt_hdr->input = pktio_hdl;
+ num_pkts++;
+ octets += pkt_data.len;
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ packets[num_cls++] = pkt_data.pkt;
+ num_cls = _odp_cls_enq(packets, num_cls, (i + 1 == num));
+ } else {
+ packets[num_rx++] = pkt_data.pkt;
+ }
+ }
+
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(packets, num_cls, true);
+
+ sock->qi_stats.octets += octets;
+ sock->qi_stats.packets += num_pkts;
+ sock->qi_stats.errors += errors;
+
+ return num_rx;
+}
+
+static int sock_xdp_recv(pktio_entry_t *pktio_entry, int index, odp_packet_t packets[], int num)
+{
+ xdp_sock_info_t *priv;
+ xdp_sock_t *sock;
+ struct pollfd fd;
+ uint32_t start_idx = 0U, recvd, procd;
+
+ priv = pkt_priv(pktio_entry);
+ _ODP_ASSERT((uint32_t)index < priv->q_num_conf.num_in_conf_qs);
+ sock = &priv->qs[index];
+
+ if (!priv->lockless_rx)
+ odp_ticketlock_lock(&sock->rx_lock);
+
+ if (odp_unlikely(xsk_ring_prod__needs_wakeup(&sock->fill_q))) {
+ fd.fd = xsk_socket__fd(sock->xsk);
+ fd.events = POLLIN;
+ (void)poll(&fd, 1U, 0);
+ }
+
+ recvd = xsk_ring_cons__peek(&sock->rx, num, &start_idx);
+
+ if (recvd == 0U) {
+ if (!priv->lockless_rx)
+ odp_ticketlock_unlock(&sock->rx_lock);
+ return 0;
+ }
+
+ procd = process_received(pktio_entry, sock, priv->umem_info->pool, start_idx, packets,
+ recvd);
+ xsk_ring_cons__release(&sock->rx, recvd);
+ (void)reserve_fill_queue_elements(priv, sock, recvd);
+
+ if (!priv->lockless_rx)
+ odp_ticketlock_unlock(&sock->rx_lock);
+
+ return procd;
+}
+
+static void handle_pending_tx(xdp_sock_t *sock, uint8_t *base_addr, int num)
+{
+ struct xsk_ring_cons *compl_q;
+ uint32_t sent;
+ uint32_t start_idx;
+ uint64_t frame_off;
+ odp_packet_t pkt;
+
+ if (odp_unlikely(xsk_ring_prod__needs_wakeup(&sock->tx)))
+ (void)sendto(xsk_socket__fd(sock->xsk), NULL, 0U, MSG_DONTWAIT, NULL, 0U);
+
+ compl_q = &sock->compl_q;
+ sent = xsk_ring_cons__peek(compl_q, num, &start_idx);
+
+ if (sent) {
+ odp_packet_t packets[sent];
+
+ for (uint32_t i = 0U; i < sent; ++i) {
+ frame_off = *xsk_ring_cons__comp_addr(compl_q, start_idx++);
+ frame_off = xsk_umem__extract_addr(frame_off);
+ pkt = xsk_umem__get_data(base_addr, frame_off);
+ packets[i] = pkt;
+ packet_hdr(packets[i])->ms_pktio_idx = 0U;
+ }
+
+ odp_packet_free_multi(packets, sent);
+ xsk_ring_cons__release(compl_q, sent);
+ }
+}
+
+static inline void populate_tx_desc(odp_packet_hdr_t *pkt_hdr, pool_t *pool,
+ struct xdp_desc *tx_desc, uint32_t len)
+{
+ uint64_t frame_off;
+ uint64_t pkt_off;
+
+ frame_off = pkt_hdr->event_hdr.index.event * pool->block_size;
+ pkt_off = (uint64_t)(uintptr_t)pkt_hdr->seg_data - (uint64_t)(uintptr_t)pool->base_addr
+ - frame_off;
+ pkt_off <<= XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ tx_desc->addr = frame_off | pkt_off;
+ tx_desc->len = len;
+}
+
+static inline void populate_tx_descs(odp_packet_hdr_t *pkt_hdr, pool_t *pool,
+ struct xsk_ring_prod *tx, int seg_cnt, uint32_t start_idx,
+ int pktio_idx)
+{
+ if (odp_likely(seg_cnt == 1)) {
+ populate_tx_desc(pkt_hdr, pool, xsk_ring_prod__tx_desc(tx, start_idx),
+ pkt_hdr->frame_len);
+ pkt_hdr->ms_pktio_idx = pktio_idx;
+ } else {
+ for (int i = 0; i < seg_cnt; ++i) {
+ populate_tx_desc(pkt_hdr, pool, xsk_ring_prod__tx_desc(tx, start_idx++),
+ pkt_hdr->seg_len);
+ pkt_hdr->ms_pktio_idx = pktio_idx;
+ pkt_hdr = pkt_hdr->seg_next;
+ }
+ }
+}
+
+static int sock_xdp_send(pktio_entry_t *pktio_entry, int index, const odp_packet_t packets[],
+ int num)
+{
+ xdp_sock_info_t *priv;
+ xdp_sock_t *sock;
+ pool_t *pool;
+ odp_pool_t pool_hdl;
+ int pktio_idx, i, seg_cnt;
+ struct xsk_ring_prod *tx;
+ uint8_t *base_addr;
+ odp_packet_t pkt;
+ odp_packet_hdr_t *pkt_hdr;
+ uint32_t tx_descs, start_idx, sent = 0U;
+ uint64_t octets = 0U;
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ priv = pkt_priv(pktio_entry);
+ _ODP_ASSERT((uint32_t)index < priv->q_num_conf.num_out_conf_qs);
+ sock = &priv->qs[index];
+
+ if (!priv->lockless_tx)
+ odp_ticketlock_lock(&sock->tx_lock);
+
+ pool = priv->umem_info->pool;
+ pool_hdl = _odp_pool_handle(pool);
+ pktio_idx = priv->pktio_idx;
+ tx = &sock->tx;
+ base_addr = priv->umem_info->pool->base_addr;
+ tx_descs = priv->umem_info->num_tx_desc;
+
+ for (i = 0; i < num; ++i) {
+ pkt = ODP_PACKET_INVALID;
+ pkt_hdr = packet_hdr(packets[i]);
+ seg_cnt = pkt_hdr->seg_count;
+
+ if (_odp_pool_entry(pkt_hdr->event_hdr.pool) != pool) {
+ pkt = odp_packet_copy(packets[i], pool_hdl);
+
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
+ ++sock->i_stats[TX_PKT_ALLOC_ERR];
+ break;
+ }
+
+ pkt_hdr = packet_hdr(pkt);
+ seg_cnt = pkt_hdr->seg_count;
+ }
+
+ if (xsk_ring_prod__reserve(tx, seg_cnt, &start_idx) == 0U) {
+ handle_pending_tx(sock, base_addr, tx_descs);
+
+ if (xsk_ring_prod__reserve(tx, seg_cnt, &start_idx) == 0U) {
+ if (pkt != ODP_PACKET_INVALID)
+ odp_packet_free(pkt);
+
+ ++sock->i_stats[TX_DESC_RSV_ERR];
+
+ break;
+ }
+ }
+
+ if (pkt != ODP_PACKET_INVALID)
+ odp_packet_free(packets[i]);
+
+ populate_tx_descs(pkt_hdr, pool, tx, seg_cnt, start_idx, pktio_idx);
+ sent += seg_cnt;
+ octets += pkt_hdr->frame_len;
+ }
+
+ xsk_ring_prod__submit(tx, sent);
+ handle_pending_tx(sock, base_addr, tx_descs);
+ sock->qo_stats.octets += octets;
+ sock->qo_stats.packets += i;
+
+ if (!priv->lockless_tx)
+ odp_ticketlock_unlock(&sock->tx_lock);
+
+ return i;
+}
+
+static uint32_t sock_xdp_mtu_get(pktio_entry_t *pktio_entry)
+{
+ return pkt_priv(pktio_entry)->mtu;
+}
+
+static int sock_xdp_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+ int ret;
+
+ ret = _odp_mtu_set_fd(priv->helper_sock, pktio_entry->name, maxlen_input);
+ if (ret)
+ return ret;
+
+ priv->mtu = maxlen_input;
+
+ return 0;
+}
+
+static int sock_xdp_promisc_mode_set(pktio_entry_t *pktio_entry, int enable)
+{
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->helper_sock,
+ pktio_entry->name, enable);
+}
+
+static int sock_xdp_promisc_mode_get(pktio_entry_t *pktio_entry)
+{
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->helper_sock,
+ pktio_entry->name);
+}
+
+static int sock_xdp_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED, void *mac_addr)
+{
+ return _odp_mac_addr_get_fd(pkt_priv(pktio_entry)->helper_sock,
+ pktio_entry->name, mac_addr) ? -1 : ETH_ALEN;
+}
+
+static int sock_xdp_link_status(pktio_entry_t *pktio_entry)
+{
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->helper_sock,
+ pktio_entry->name);
+}
+
+static int sock_xdp_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->helper_sock,
+ pktio_entry->name, info);
+}
+
+static int get_nic_queue_capability(int fd, const char *devname, odp_pktio_capability_t *capa)
+{
+ struct ethtool_channels channels;
+ struct ifreq ifr;
+ int ret;
+ const uint32_t cc = odp_cpu_count();
+ uint32_t max_channels;
+
+ memset(&channels, 0, sizeof(struct ethtool_channels));
+ channels.cmd = ETHTOOL_GCHANNELS;
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname);
+ ifr.ifr_data = (char *)&channels;
+ ret = ioctl(fd, SIOCETHTOOL, &ifr);
+
+ if (ret == -1 || channels.max_combined == 0U) {
+ if (ret == -1 && errno != EOPNOTSUPP) {
+ _ODP_ERR("Unable to query NIC queue capabilities: %s\n", strerror(errno));
+ return -1;
+ }
+
+ channels.max_combined = 1U;
+ }
+
+ max_channels = _ODP_MIN(cc, channels.max_combined);
+ capa->max_input_queues = _ODP_MIN((uint32_t)ODP_PKTIN_MAX_QUEUES, max_channels);
+ capa->max_output_queues = _ODP_MIN((uint32_t)ODP_PKTOUT_MAX_QUEUES, max_channels);
+
+ return 0;
+}
+
+static int sock_xdp_capability(pktio_entry_t *pktio_entry, odp_pktio_capability_t *capa)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+
+ if (get_nic_queue_capability(priv->helper_sock, pktio_entry->name, capa))
+ return -1;
+
+ capa->set_op.op.promisc_mode = 1U;
+ capa->set_op.op.maxlen = 1U;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_input = priv->max_mtu;
+ capa->maxlen.min_output = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_output = priv->max_mtu;
+
+ capa->config.parser.layer = ODP_PROTO_LAYER_ALL;
+
+ capa->stats.pktio.counter.in_octets = 1U;
+ capa->stats.pktio.counter.in_packets = 1U;
+ capa->stats.pktio.counter.in_errors = 1U;
+ capa->stats.pktio.counter.in_discards = 1U;
+ capa->stats.pktio.counter.out_octets = 1U;
+ capa->stats.pktio.counter.out_packets = 1U;
+ capa->stats.pktio.counter.out_discards = 1U;
+
+ capa->stats.pktin_queue.counter.octets = 1U;
+ capa->stats.pktin_queue.counter.packets = 1U;
+ capa->stats.pktin_queue.counter.errors = 1U;
+ capa->stats.pktin_queue.counter.discards = 1U;
+ capa->stats.pktout_queue.counter.octets = 1U;
+ capa->stats.pktout_queue.counter.packets = 1U;
+ capa->stats.pktout_queue.counter.discards = 1U;
+
+ return 0;
+}
+
+static int sock_xdp_input_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *param)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+
+ priv->q_num_conf.num_in_conf_qs = param->num_queues;
+ priv->lockless_rx = pktio_entry->param.in_mode == ODP_PKTIN_MODE_SCHED ||
+ param->op_mode == ODP_PKTIO_OP_MT_UNSAFE;
+
+ return 0;
+}
+
+static int sock_xdp_output_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktout_queue_param_t *param)
+{
+ xdp_sock_info_t *priv = pkt_priv(pktio_entry);
+
+ priv->q_num_conf.num_out_conf_qs = param->num_queues;
+ priv->lockless_tx = param->op_mode == ODP_PKTIO_OP_MT_UNSAFE;
+
+ return 0;
+}
+
+const pktio_if_ops_t _odp_sock_xdp_pktio_ops = {
+ .name = "socket_xdp",
+ .print = NULL,
+ .init_global = sock_xdp_init_global,
+ .init_local = NULL,
+ .term = NULL,
+ .open = sock_xdp_open,
+ .close = sock_xdp_close,
+ .start = sock_xdp_start,
+ .stop = sock_xdp_stop,
+ .stats = sock_xdp_stats,
+ .stats_reset = sock_xdp_stats_reset,
+ .pktin_queue_stats = sock_xdp_pktin_queue_stats,
+ .pktout_queue_stats = sock_xdp_pktout_queue_stats,
+ .extra_stat_info = sock_xdp_extra_stat_info,
+ .extra_stats = sock_xdp_extra_stats,
+ .extra_stat_counter = sock_xdp_extra_stat_counter,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
+ .recv = sock_xdp_recv,
+ .recv_tmo = NULL,
+ .recv_mq_tmo = NULL,
+ .fd_set = NULL,
+ .send = sock_xdp_send,
+ .maxlen_get = sock_xdp_mtu_get,
+ .maxlen_set = sock_xdp_mtu_set,
+ .promisc_mode_set = sock_xdp_promisc_mode_set,
+ .promisc_mode_get = sock_xdp_promisc_mode_get,
+ .mac_get = sock_xdp_mac_addr_get,
+ .mac_set = NULL,
+ .link_status = sock_xdp_link_status,
+ .link_info = sock_xdp_link_info,
+ .capability = sock_xdp_capability,
+ .config = NULL,
+ .input_queues_config = sock_xdp_input_queues_config,
+ .output_queues_config = sock_xdp_output_queues_config
+};
+
+static odp_bool_t sock_xdp_is_mem_src_active(void)
+{
+ return !disable_pktio;
+}
+
+static void sock_xdp_force_mem_src_disable(void)
+{
+ disable_pktio = true;
+}
+
+static void sock_xdp_adjust_block_size(uint8_t *data ODP_UNUSED, uint32_t *block_size,
+ uint32_t *block_offset ODP_UNUSED, uint32_t *flags)
+{
+ const uint32_t size = *block_size + XDP_PACKET_HEADROOM;
+ const uint64_t ps = odp_sys_page_size();
+ /* AF_XDP requires frames to be between 2kB and page size, so with
+ * XDP_ZEROCOPY, if block size is less than 2kB, adjust it to 2kB, if
+ * it is larger than page size, make pool creation fail. */
+ if (disable_pktio)
+ return;
+
+ if (size > ps) {
+ _ODP_ERR("Adjusted pool block size larger than page size: %u > %" PRIu64 "\n",
+ size, ps);
+ *block_size = 0U;
+ }
+
+ *flags |= ODP_SHM_HP;
+ *block_size = _ODP_MAX(size, MIN_FRAME_SIZE);
+}
+
+const _odp_pool_mem_src_ops_t _odp_pool_sock_xdp_mem_src_ops = {
+ .name = "xdp_zc",
+ .is_active = sock_xdp_is_mem_src_active,
+ .force_disable = sock_xdp_force_mem_src_disable,
+ .adjust_size = sock_xdp_adjust_block_size,
+ .bind = NULL,
+ .unbind = NULL
+};
+
+#else
+/* Avoid warning about empty translation unit */
+typedef int _odp_dummy;
+#endif
diff --git a/platform/linux-generic/pktio/stats/ethtool_stats.c b/platform/linux-generic/pktio/stats/ethtool_stats.c
new file mode 100644
index 000000000..bbf0729f1
--- /dev/null
+++ b/platform/linux-generic/pktio/stats/ethtool_stats.c
@@ -0,0 +1,281 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_posix_extensions.h>
+
+#include <odp/api/packet_io_stats.h>
+
+#include <odp_debug_internal.h>
+#include <odp_ethtool_stats.h>
+
+#include <sys/ioctl.h>
+#include <netinet/in.h>
+#include <linux/sockios.h>
+#include <linux/ethtool.h>
+#include <errno.h>
+#include <net/if.h>
+
+/*
+ * Suppress bounds warnings about interior zero length arrays. Such an array
+ * is used intentionally in sset_info.
+ */
+#if __GNUC__ >= 10
+#pragma GCC diagnostic ignored "-Wzero-length-bounds"
+#endif
+
+static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
+{
+ union {
+ struct ethtool_sset_info hdr;
+ /* Reserve space for hdr.data. */
+ uint8_t buf[sizeof(struct ethtool_sset_info) +
+ sizeof(((struct ethtool_sset_info *)0)->data[0])];
+ } sset_info;
+ struct ethtool_drvinfo drvinfo;
+ uint32_t len;
+ struct ethtool_gstrings *strings;
+ ptrdiff_t drvinfo_offset = offsetof(struct ethtool_drvinfo, n_stats);
+
+ sset_info.hdr.cmd = ETHTOOL_GSSET_INFO;
+ sset_info.hdr.reserved = 0;
+ sset_info.hdr.sset_mask = 1ULL << ETH_SS_STATS;
+ ifr->ifr_data = (void *)&sset_info;
+ if (ioctl(fd, SIOCETHTOOL, ifr) == 0) {
+ len = sset_info.hdr.sset_mask ? sset_info.hdr.data[0] : 0;
+ } else if (errno == EOPNOTSUPP && drvinfo_offset != 0) {
+ /* Fallback for old kernel versions */
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr->ifr_data = (void *)&drvinfo;
+ if (ioctl(fd, SIOCETHTOOL, ifr)) {
+ _ODP_ERR("Cannot get stats information: %s\n", strerror(errno));
+ return NULL;
+ }
+ len = *(uint32_t *)(void *)((char *)&drvinfo + drvinfo_offset);
+ } else {
+ return NULL;
+ }
+
+ if (!len) {
+ _ODP_ERR("len is zero");
+ return NULL;
+ }
+
+ strings = calloc(1, sizeof(*strings) + len * ETH_GSTRING_LEN);
+ if (!strings) {
+ _ODP_ERR("alloc failed\n");
+ return NULL;
+ }
+
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = len;
+ ifr->ifr_data = (void *)strings;
+ if (ioctl(fd, SIOCETHTOOL, ifr)) {
+ _ODP_ERR("Cannot get stats information: %s\n", strerror(errno));
+ free(strings);
+ return NULL;
+ }
+
+ return strings;
+}
+
+static int ethtool_stats_get(int fd, const char *name,
+ struct ethtool_gstrings **strings_out,
+ struct ethtool_stats **estats_out,
+ unsigned int *nstats_out)
+{
+ struct ethtool_gstrings *strings;
+ struct ethtool_stats *estats;
+ struct ifreq ifr;
+ unsigned int n_stats;
+ int err;
+
+ memset(&ifr, 0, sizeof(ifr));
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ strings = get_stringset(fd, &ifr);
+ if (!strings)
+ return -1;
+
+ n_stats = strings->len;
+ if (n_stats < 1) {
+ _ODP_ERR("no stats available\n");
+ free(strings);
+ return -1;
+ }
+
+ estats = calloc(1, n_stats * sizeof(uint64_t) +
+ sizeof(struct ethtool_stats));
+ if (!estats) {
+ free(strings);
+ return -1;
+ }
+
+ estats->cmd = ETHTOOL_GSTATS;
+ estats->n_stats = n_stats;
+ ifr.ifr_data = (void *)estats;
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
+ if (err < 0) {
+ free(strings);
+ free(estats);
+ return -1;
+ }
+
+ if (strings_out)
+ *strings_out = strings;
+ else
+ free(strings);
+
+ if (estats_out)
+ *estats_out = estats;
+ else
+ free(estats);
+
+ if (nstats_out)
+ *nstats_out = n_stats;
+
+ return 0;
+}
+
+int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
+{
+ struct ethtool_gstrings *strings;
+ struct ethtool_stats *estats;
+ unsigned int i, n_stats;
+ int cnts = 0;
+
+ if (ethtool_stats_get(fd, name, &strings, &estats, &n_stats))
+ return -1;
+
+ for (i = 0; i < n_stats; i++) {
+ char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
+ uint64_t val = estats->data[i];
+
+ if (!strcmp(cnt, "rx_octets") ||
+ !strcmp(cnt, "rx_bytes")) {
+ stats->in_octets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_packets")) {
+ stats->in_packets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_ucast_packets") ||
+ !strcmp(cnt, "rx_unicast")) {
+ stats->in_ucast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_broadcast") ||
+ !strcmp(cnt, "rx_bcast_packets")) {
+ stats->in_bcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_multicast") ||
+ !strcmp(cnt, "rx_mcast_packets")) {
+ stats->in_mcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_discards") ||
+ !strcmp(cnt, "rx_dropped")) {
+ stats->in_discards = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_errors")) {
+ stats->in_errors = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_octets") ||
+ !strcmp(cnt, "tx_bytes")) {
+ stats->out_octets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_packets")) {
+ stats->out_packets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_ucast_packets") ||
+ !strcmp(cnt, "tx_unicast")) {
+ stats->out_ucast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_broadcast") ||
+ !strcmp(cnt, "tx_bcast_packets")) {
+ stats->out_bcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_multicast") ||
+ !strcmp(cnt, "tx_mcast_packets")) {
+ stats->out_mcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_discards") ||
+ !strcmp(cnt, "tx_dropped")) {
+ stats->out_discards = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_errors")) {
+ stats->out_errors = val;
+ cnts++;
+ }
+ }
+
+ free(strings);
+ free(estats);
+
+ /* Ethtool strings came from kernel driver. Name of that
+ * strings is not universal. Current function needs to be updated
+ * if your driver has different names for counters */
+ if (cnts < 14)
+ return -1;
+
+ return 0;
+}
+
+int _odp_ethtool_extra_stat_info(int fd, const char *name,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ struct ethtool_gstrings *strings;
+ unsigned int i, n_stats;
+
+ if (ethtool_stats_get(fd, name, &strings, NULL, &n_stats))
+ return -1;
+
+ for (i = 0; i < n_stats && i < (unsigned int)num; i++) {
+ char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
+
+ strncpy(info[i].name, cnt, ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+ }
+
+ free(strings);
+
+ return n_stats;
+}
+
+int _odp_ethtool_extra_stats(int fd, const char *name, uint64_t stats[], int num)
+{
+ struct ethtool_stats *estats;
+ unsigned int i, n_stats;
+
+ if (ethtool_stats_get(fd, name, NULL, &estats, &n_stats))
+ return -1;
+
+ for (i = 0; i < n_stats && i < (unsigned int)num; i++)
+ stats[i] = estats->data[i];
+
+ free(estats);
+
+ return n_stats;
+}
+
+int _odp_ethtool_extra_stat_counter(int fd, const char *name, uint32_t id,
+ uint64_t *stat)
+{
+ struct ethtool_stats *estats;
+ unsigned int n_stats;
+ int ret = 0;
+
+ if (ethtool_stats_get(fd, name, NULL, &estats, &n_stats))
+ return -1;
+
+ if (id >= n_stats) {
+ _ODP_ERR("Invalid counter id\n");
+ ret = -1;
+ } else {
+ *stat = estats->data[id];
+ }
+
+ free(estats);
+
+ return ret;
+}
diff --git a/platform/linux-generic/pktio/stats/packet_io_stats.c b/platform/linux-generic/pktio/stats/packet_io_stats.c
new file mode 100644
index 000000000..280aca250
--- /dev/null
+++ b/platform/linux-generic/pktio/stats/packet_io_stats.c
@@ -0,0 +1,192 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_packet_io_stats.h>
+#include <odp_ethtool_stats.h>
+#include <odp_sysfs_stats.h>
+
+#include <string.h>
+
+static int sock_stats_get(pktio_entry_t *e, odp_pktio_stats_t *stats, int fd)
+{
+ int ret = 0;
+
+ memset(stats, 0, sizeof(*stats));
+
+ if (e->stats_type == STATS_ETHTOOL)
+ ret = _odp_ethtool_stats_get_fd(fd, e->name, stats);
+ else if (e->stats_type == STATS_SYSFS)
+ ret = _odp_sysfs_stats(e, stats);
+
+ if (ret)
+ _ODP_ERR("Failed to get pktio statistics.\n");
+
+ return ret;
+}
+
+int _odp_sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
+{
+ odp_pktio_stats_t cur_stats;
+ int ret;
+
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED) {
+ memset(&pktio_entry->stats, 0,
+ sizeof(odp_pktio_stats_t));
+ return 0;
+ }
+
+ ret = sock_stats_get(pktio_entry, &cur_stats, fd);
+
+ if (!ret)
+ memcpy(&pktio_entry->stats, &cur_stats,
+ sizeof(odp_pktio_stats_t));
+
+ return ret;
+}
+
+int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd)
+{
+ odp_pktio_stats_t cur_stats;
+
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED) {
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+ }
+
+ if (sock_stats_get(pktio_entry, &cur_stats, fd))
+ return -1;
+
+ stats->in_octets = cur_stats.in_octets -
+ pktio_entry->stats.in_octets;
+ stats->in_packets = cur_stats.in_packets -
+ pktio_entry->stats.in_packets;
+ stats->in_ucast_pkts = cur_stats.in_ucast_pkts -
+ pktio_entry->stats.in_ucast_pkts;
+ stats->in_bcast_pkts = cur_stats.in_bcast_pkts -
+ pktio_entry->stats.in_bcast_pkts;
+ stats->in_mcast_pkts = cur_stats.in_mcast_pkts -
+ pktio_entry->stats.in_mcast_pkts;
+ stats->in_discards = cur_stats.in_discards -
+ pktio_entry->stats.in_discards;
+ stats->in_errors = cur_stats.in_errors -
+ pktio_entry->stats.in_errors;
+ stats->out_octets = cur_stats.out_octets -
+ pktio_entry->stats.out_octets;
+ stats->out_packets = cur_stats.out_packets -
+ pktio_entry->stats.out_packets;
+ stats->out_ucast_pkts = cur_stats.out_ucast_pkts -
+ pktio_entry->stats.out_ucast_pkts;
+ stats->out_bcast_pkts = cur_stats.out_bcast_pkts -
+ pktio_entry->stats.out_bcast_pkts;
+ stats->out_mcast_pkts = cur_stats.out_mcast_pkts -
+ pktio_entry->stats.out_mcast_pkts;
+ stats->out_discards = cur_stats.out_discards -
+ pktio_entry->stats.out_discards;
+ stats->out_errors = cur_stats.out_errors -
+ pktio_entry->stats.out_errors;
+
+ return 0;
+}
+
+int _odp_sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num,
+ int fd)
+{
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED)
+ return 0;
+
+ if (pktio_entry->stats_type == STATS_ETHTOOL)
+ return _odp_ethtool_extra_stat_info(fd, pktio_entry->name,
+ info, num);
+ else if (pktio_entry->stats_type == STATS_SYSFS)
+ return _odp_sysfs_extra_stat_info(pktio_entry, info, num);
+
+ return 0;
+}
+
+int _odp_sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[], int num,
+ int fd)
+{
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED)
+ return 0;
+
+ if (pktio_entry->stats_type == STATS_ETHTOOL)
+ return _odp_ethtool_extra_stats(fd, pktio_entry->name,
+ stats, num);
+ else if (pktio_entry->stats_type == STATS_SYSFS)
+ return _odp_sysfs_extra_stats(pktio_entry, stats, num);
+
+ return 0;
+}
+
+int _odp_sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat, int fd)
+{
+ if (pktio_entry->stats_type == STATS_UNSUPPORTED)
+ return -1;
+
+ if (pktio_entry->stats_type == STATS_ETHTOOL) {
+ return _odp_ethtool_extra_stat_counter(fd, pktio_entry->name,
+ id, stat);
+ } else if (pktio_entry->stats_type == STATS_SYSFS) {
+ return _odp_sysfs_extra_stat_counter(pktio_entry, id, stat);
+ }
+
+ return 0;
+}
+
+pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd)
+{
+ odp_pktio_stats_t cur_stats;
+
+ if (!_odp_ethtool_stats_get_fd(fd, pktio_entry->name, &cur_stats))
+ return STATS_ETHTOOL;
+
+ if (!_odp_sysfs_stats(pktio_entry, &cur_stats))
+ return STATS_SYSFS;
+
+ return STATS_UNSUPPORTED;
+}
+
+void _odp_sock_stats_capa(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa)
+{
+ capa->stats.pktio.all_counters = 0;
+ capa->stats.pktin_queue.all_counters = 0;
+ capa->stats.pktout_queue.all_counters = 0;
+
+ if (pktio_entry->stats_type == STATS_SYSFS) {
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_ucast_pkts = 1;
+ capa->stats.pktio.counter.in_mcast_pkts = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_ucast_pkts = 1;
+ capa->stats.pktio.counter.out_discards = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+ } else if (pktio_entry->stats_type == STATS_ETHTOOL) {
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_ucast_pkts = 1;
+ capa->stats.pktio.counter.in_bcast_pkts = 1;
+ capa->stats.pktio.counter.in_mcast_pkts = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_ucast_pkts = 1;
+ capa->stats.pktio.counter.out_bcast_pkts = 1;
+ capa->stats.pktio.counter.out_mcast_pkts = 1;
+ capa->stats.pktio.counter.out_discards = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+ }
+}
diff --git a/platform/linux-generic/pktio/stats/sysfs_stats.c b/platform/linux-generic/pktio/stats/sysfs_stats.c
new file mode 100644
index 000000000..2b47d4b83
--- /dev/null
+++ b/platform/linux-generic/pktio/stats/sysfs_stats.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021-2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/packet_io_stats.h>
+
+#include <odp_debug_internal.h>
+#include <odp_sysfs_stats.h>
+
+#include <dirent.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <linux/limits.h>
+
+#define SYSFS_DIR "/sys/class/net/%s/statistics"
+
+static int sysfs_get_val(const char *fname, uint64_t *val)
+{
+ FILE *file;
+ char str[128];
+ int ret = -1;
+
+ file = fopen(fname, "rt");
+ if (file == NULL) {
+ /* do not print debug err if sysfs is not supported by
+ * kernel driver.
+ */
+ if (errno != ENOENT)
+ _ODP_ERR("fopen %s: %s\n", fname, strerror(errno));
+ return 0;
+ }
+
+ if (fgets(str, sizeof(str), file) != NULL)
+ ret = sscanf(str, "%" SCNx64, val);
+
+ (void)fclose(file);
+
+ if (ret != 1) {
+ _ODP_ERR("read %s\n", fname);
+ return -1;
+ }
+
+ return 0;
+}
+
+int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ char fname[256];
+ const char *dev = pktio_entry->name;
+ int ret = 0;
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_bytes", dev);
+ ret -= sysfs_get_val(fname, &stats->in_octets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->in_packets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->in_ucast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/multicast", dev);
+ ret -= sysfs_get_val(fname, &stats->in_mcast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_dropped", dev);
+ ret -= sysfs_get_val(fname, &stats->in_discards);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_errors", dev);
+ ret -= sysfs_get_val(fname, &stats->in_errors);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_bytes", dev);
+ ret -= sysfs_get_val(fname, &stats->out_octets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->out_packets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->out_ucast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_dropped", dev);
+ ret -= sysfs_get_val(fname, &stats->out_discards);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_errors", dev);
+ ret -= sysfs_get_val(fname, &stats->out_errors);
+
+ return ret;
+}
+
+int _odp_sysfs_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ int counters = 0;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ _ODP_ERR("Failed to open sysfs dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ if (info && counters < num)
+ snprintf(info[counters].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN, "%s",
+ e->d_name);
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return counters;
+}
+
+int _odp_sysfs_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ char file_path[PATH_MAX];
+ int counters = 0;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ _ODP_ERR("Failed to open dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ uint64_t val;
+
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ snprintf(file_path, PATH_MAX, "%s/%s", sysfs_dir, e->d_name);
+ if (sysfs_get_val(file_path, &val)) {
+ _ODP_ERR("Failed to read file: %s/n", file_path);
+ counters = -1;
+ break;
+ }
+
+ if (stats && counters < num)
+ stats[counters] = val;
+
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return counters;
+}
+
+int _odp_sysfs_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ char file_path[PATH_MAX];
+ uint32_t counters = 0;
+ int ret = -1;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ _ODP_ERR("Failed to open dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ if (counters == id) {
+ uint64_t val;
+
+ snprintf(file_path, PATH_MAX, "%s/%s",
+ sysfs_dir, e->d_name);
+ if (sysfs_get_val(file_path, &val)) {
+ _ODP_ERR("Failed to read file: %s/n", file_path);
+ } else {
+ *stat = val;
+ ret = 0;
+ }
+ break;
+ }
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return ret;
+}
diff --git a/platform/linux-generic/pktio/sysfs.c b/platform/linux-generic/pktio/sysfs.c
deleted file mode 100644
index be0822ddd..000000000
--- a/platform/linux-generic/pktio/sysfs.c
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_api.h>
-#include <odp_packet_io_internal.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-
-static int sysfs_get_val(const char *fname, uint64_t *val)
-{
- FILE *file;
- char str[128];
- int ret = -1;
-
- file = fopen(fname, "rt");
- if (file == NULL) {
- __odp_errno = errno;
- /* do not print debug err if sysfs is not supported by
- * kernel driver.
- */
- if (errno != ENOENT)
- ODP_ERR("fopen %s: %s\n", fname, strerror(errno));
- return 0;
- }
-
- if (fgets(str, sizeof(str), file) != NULL)
- ret = sscanf(str, "%" SCNx64, val);
-
- (void)fclose(file);
-
- if (ret != 1) {
- ODP_ERR("read %s\n", fname);
- return -1;
- }
-
- return 0;
-}
-
-int sysfs_stats(pktio_entry_t *pktio_entry,
- odp_pktio_stats_t *stats)
-{
- char fname[256];
- const char *dev = pktio_entry->s.name;
- int ret = 0;
-
- sprintf(fname, "/sys/class/net/%s/statistics/rx_bytes", dev);
- ret -= sysfs_get_val(fname, &stats->in_octets);
-
- sprintf(fname, "/sys/class/net/%s/statistics/rx_packets", dev);
- ret -= sysfs_get_val(fname, &stats->in_ucast_pkts);
-
- sprintf(fname, "/sys/class/net/%s/statistics/rx_droppped", dev);
- ret -= sysfs_get_val(fname, &stats->in_discards);
-
- sprintf(fname, "/sys/class/net/%s/statistics/rx_errors", dev);
- ret -= sysfs_get_val(fname, &stats->in_errors);
-
- /* stats->in_unknown_protos is not supported in sysfs */
-
- sprintf(fname, "/sys/class/net/%s/statistics/tx_bytes", dev);
- ret -= sysfs_get_val(fname, &stats->out_octets);
-
- sprintf(fname, "/sys/class/net/%s/statistics/tx_packets", dev);
- ret -= sysfs_get_val(fname, &stats->out_ucast_pkts);
-
- sprintf(fname, "/sys/class/net/%s/statistics/tx_dropped", dev);
- ret -= sysfs_get_val(fname, &stats->out_discards);
-
- sprintf(fname, "/sys/class/net/%s/statistics/tx_errors", dev);
- ret -= sysfs_get_val(fname, &stats->out_errors);
-
- return ret;
-}
diff --git a/platform/linux-generic/pktio/tap.c b/platform/linux-generic/pktio/tap.c
index ac2045606..baac09646 100644
--- a/platform/linux-generic/pktio/tap.c
+++ b/platform/linux-generic/pktio/tap.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015, Ilya Maximets <i.maximets@samsung.com>
+ * Copyright (c) 2021-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -29,6 +30,21 @@
#include <odp_posix_extensions.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/packet_io.h>
+#include <odp/api/random.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/packet_inlines.h>
+
+#include <odp_parse_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_socket_common.h>
+#include <odp_packet_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_classification_internal.h>
+
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
@@ -39,24 +55,55 @@
#include <sys/types.h>
#include <linux/if_tun.h>
-#include <odp_api.h>
-#include <odp_packet_socket.h>
-#include <odp_packet_internal.h>
-#include <odp_packet_io_internal.h>
-#include <odp_classification_internal.h>
+typedef struct {
+ int fd; /**< file descriptor for tap interface*/
+ int skfd; /**< socket descriptor */
+ uint32_t mtu; /**< cached mtu */
+ uint32_t mtu_max; /**< maximum supported MTU value */
+ unsigned char if_mac[ETH_ALEN]; /**< MAC address of pktio side (not a
+ MAC address of kernel interface)*/
+ odp_pool_t pool; /**< pool to alloc packets from */
+} pkt_tap_t;
-#define BUF_SIZE 65536
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_tap_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static inline pkt_tap_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_tap_t *)(uintptr_t)(pktio_entry->pkt_priv);
+}
static int gen_random_mac(unsigned char *mac)
{
mac[0] = 0x7a; /* not multicast and local assignment bit is set */
- if (odp_random_data(mac + 1, 5, false) < 5) {
- ODP_ERR("odp_random_data failed.\n");
+ if (odp_random_data(mac + 1, 5, ODP_RANDOM_BASIC) < 5) {
+ _ODP_ERR("odp_random_data failed.\n");
return -1;
}
return 0;
}
+static int mac_addr_set_fd(int fd, const char *name,
+ const unsigned char mac_dst[])
+{
+ struct ifreq ethreq;
+ int ret;
+
+ memset(&ethreq, 0, sizeof(ethreq));
+ snprintf(ethreq.ifr_name, IF_NAMESIZE, "%s", name);
+
+ ethreq.ifr_hwaddr.sa_family = AF_UNIX;
+ memcpy(ethreq.ifr_hwaddr.sa_data, mac_dst, ETH_ALEN);
+
+ ret = ioctl(fd, SIOCSIFHWADDR, &ethreq);
+ if (ret != 0) {
+ _ODP_ERR("ioctl(SIOCSIFHWADDR): %s: \"%s\".\n", strerror(errno), ethreq.ifr_name);
+ return -1;
+ }
+
+ return 0;
+}
+
static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
pktio_entry_t *pktio_entry,
const char *devname, odp_pool_t pool)
@@ -64,7 +111,7 @@ static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
int fd, skfd, flags;
uint32_t mtu;
struct ifreq ifr;
- pkt_tap_t *tap = &pktio_entry->s.pkt_tap;
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
if (strncmp(devname, "tap:", 4) != 0)
return -1;
@@ -79,8 +126,7 @@ static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
fd = open("/dev/net/tun", O_RDWR);
if (fd < 0) {
- __odp_errno = errno;
- ODP_ERR("failed to open /dev/net/tun: %s\n", strerror(errno));
+ _ODP_ERR("failed to open /dev/net/tun: %s\n", strerror(errno));
return -1;
}
@@ -94,23 +140,19 @@ static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", devname + 4);
if (ioctl(fd, TUNSETIFF, (void *)&ifr) < 0) {
- __odp_errno = errno;
- ODP_ERR("%s: creating tap device failed: %s\n",
- ifr.ifr_name, strerror(errno));
+ _ODP_ERR("%s: creating tap device failed: %s\n", ifr.ifr_name, strerror(errno));
goto tap_err;
}
/* Set nonblocking mode on interface. */
flags = fcntl(fd, F_GETFL, 0);
if (flags < 0) {
- __odp_errno = errno;
- ODP_ERR("fcntl(F_GETFL) failed: %s\n", strerror(errno));
+ _ODP_ERR("fcntl(F_GETFL) failed: %s\n", strerror(errno));
goto tap_err;
}
if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) < 0) {
- __odp_errno = errno;
- ODP_ERR("fcntl(F_SETFL) failed: %s\n", strerror(errno));
+ _ODP_ERR("fcntl(F_SETFL) failed: %s\n", strerror(errno));
goto tap_err;
}
@@ -120,61 +162,102 @@ static int tap_pktio_open(odp_pktio_t id ODP_UNUSED,
/* Create AF_INET socket for network interface related operations. */
skfd = socket(AF_INET, SOCK_DGRAM, 0);
if (skfd < 0) {
- __odp_errno = errno;
- ODP_ERR("socket creation failed: %s\n", strerror(errno));
+ _ODP_ERR("socket creation failed: %s\n", strerror(errno));
goto tap_err;
}
- mtu = mtu_get_fd(skfd, devname + 4);
+ mtu = _odp_mtu_get_fd(skfd, devname + 4);
if (mtu == 0) {
- __odp_errno = errno;
- ODP_ERR("mtu_get_fd failed: %s\n", strerror(errno));
+ _ODP_ERR("_odp_mtu_get_fd failed: %s\n", strerror(errno));
goto sock_err;
}
+ tap->mtu_max = _ODP_SOCKET_MTU_MAX;
+ if (mtu > tap->mtu_max)
+ tap->mtu_max = mtu;
+
+ tap->fd = fd;
+ tap->skfd = skfd;
+ tap->mtu = mtu;
+ tap->pool = pool;
+ return 0;
+sock_err:
+ close(skfd);
+tap_err:
+ close(fd);
+ _ODP_ERR("Tap device alloc failed.\n");
+ return -1;
+}
+
+static int tap_pktio_start(pktio_entry_t *pktio_entry)
+{
+ struct ifreq ifr;
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
- /* Up interface by default. */
- if (ioctl(skfd, SIOCGIFFLAGS, &ifr) < 0) {
- __odp_errno = errno;
- ODP_ERR("ioctl(SIOCGIFFLAGS) failed: %s\n", strerror(errno));
+ odp_memset(&ifr, 0, sizeof(ifr));
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s",
+ (char *)pktio_entry->name + 4);
+
+ /* Up interface by default. */
+ if (ioctl(tap->skfd, SIOCGIFFLAGS, &ifr) < 0) {
+ _ODP_ERR("ioctl(SIOCGIFFLAGS) failed: %s\n", strerror(errno));
goto sock_err;
}
ifr.ifr_flags |= IFF_UP;
ifr.ifr_flags |= IFF_RUNNING;
- if (ioctl(skfd, SIOCSIFFLAGS, &ifr) < 0) {
- __odp_errno = errno;
- ODP_ERR("failed to come up: %s\n", strerror(errno));
+ if (ioctl(tap->skfd, SIOCSIFFLAGS, &ifr) < 0) {
+ _ODP_ERR("failed to come up: %s\n", strerror(errno));
goto sock_err;
}
- tap->fd = fd;
- tap->skfd = skfd;
- tap->mtu = mtu;
- tap->pool = pool;
return 0;
sock_err:
- close(skfd);
-tap_err:
- close(fd);
- ODP_ERR("Tap device alloc failed.\n");
+ _ODP_ERR("Tap device open failed.\n");
+ return -1;
+}
+
+static int tap_pktio_stop(pktio_entry_t *pktio_entry)
+{
+ struct ifreq ifr;
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
+
+ odp_memset(&ifr, 0, sizeof(ifr));
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s",
+ (char *)pktio_entry->name + 4);
+
+ /* Up interface by default. */
+ if (ioctl(tap->skfd, SIOCGIFFLAGS, &ifr) < 0) {
+ _ODP_ERR("ioctl(SIOCGIFFLAGS) failed: %s\n", strerror(errno));
+ goto sock_err;
+ }
+
+ ifr.ifr_flags &= ~IFF_UP;
+ ifr.ifr_flags &= ~IFF_RUNNING;
+
+ if (ioctl(tap->skfd, SIOCSIFFLAGS, &ifr) < 0) {
+ _ODP_ERR("failed to come up: %s\n", strerror(errno));
+ goto sock_err;
+ }
+
+ return 0;
+sock_err:
+ _ODP_ERR("Tap device open failed.\n");
return -1;
}
static int tap_pktio_close(pktio_entry_t *pktio_entry)
{
int ret = 0;
- pkt_tap_t *tap = &pktio_entry->s.pkt_tap;
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
if (tap->fd != -1 && close(tap->fd) != 0) {
- __odp_errno = errno;
- ODP_ERR("close(tap->fd): %s\n", strerror(errno));
+ _ODP_ERR("close(tap->fd): %s\n", strerror(errno));
ret = -1;
}
if (tap->skfd != -1 && close(tap->skfd) != 0) {
- __odp_errno = errno;
- ODP_ERR("close(tap->skfd): %s\n", strerror(errno));
+ _ODP_ERR("close(tap->skfd): %s\n", strerror(errno));
ret = -1;
}
@@ -186,102 +269,135 @@ static odp_packet_t pack_odp_pkt(pktio_entry_t *pktio_entry, const void *data,
{
odp_packet_t pkt;
odp_packet_hdr_t *pkt_hdr;
- odp_packet_hdr_t parsed_hdr;
int num;
+ uint16_t frame_offset = pktio_entry->pktin_frame_offset;
+ const odp_proto_layer_t layer = pktio_entry->parse_layer;
+ const odp_pktin_config_opt_t opt = pktio_entry->config.pktin;
- if (pktio_cls_enabled(pktio_entry)) {
- if (cls_classify_packet(pktio_entry, data, len, len,
- &pktio_entry->s.pkt_tap.pool,
- &parsed_hdr)) {
- return ODP_PACKET_INVALID;
- }
- }
-
- num = packet_alloc_multi(pktio_entry->s.pkt_tap.pool, len, &pkt, 1);
-
+ num = _odp_packet_alloc_multi(pkt_priv(pktio_entry)->pool,
+ len + frame_offset, &pkt, 1);
if (num != 1)
return ODP_PACKET_INVALID;
+ pkt_hdr = packet_hdr(pkt);
+
+ if (frame_offset)
+ pull_head(pkt_hdr, frame_offset);
+
if (odp_packet_copy_from_mem(pkt, 0, len, data) < 0) {
- ODP_ERR("failed to copy packet data\n");
+ _ODP_ERR("failed to copy packet data\n");
odp_packet_free(pkt);
return ODP_PACKET_INVALID;
}
- pkt_hdr = odp_packet_hdr(pkt);
+ if (layer) {
+ if (_odp_packet_parse_common(pkt_hdr, data, len, len, layer,
+ opt) < 0) {
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
- if (pktio_cls_enabled(pktio_entry))
- copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
- else
- packet_parse_l2(&pkt_hdr->p, len);
+ if (pktio_cls_enabled(pktio_entry)) {
+ odp_pool_t new_pool;
+
+ if (_odp_cls_classify_packet(pktio_entry, data,
+ &new_pool, pkt_hdr)) {
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ if (odp_unlikely(_odp_pktio_packet_to_pool(
+ &pkt, &pkt_hdr, new_pool))) {
+ odp_packet_free(pkt);
+ return ODP_PACKET_INVALID;
+ }
+ }
+ }
packet_set_ts(pkt_hdr, ts);
- pkt_hdr->input = pktio_entry->s.handle;
+ pkt_hdr->input = pktio_entry->handle;
return pkt;
}
static int tap_pktio_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- odp_packet_t pkts[], int len)
+ odp_packet_t pkts[], int num)
{
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
ssize_t retval;
int i;
- uint8_t buf[BUF_SIZE];
- pkt_tap_t *tap = &pktio_entry->s.pkt_tap;
+ uint32_t mtu = tap->mtu;
+ uint8_t buf[mtu];
odp_time_t ts_val;
odp_time_t *ts = NULL;
+ int num_rx = 0;
+ int num_cls = 0;
+ const int cls_enabled = pktio_cls_enabled(pktio_entry);
+ odp_packet_t pkt;
- odp_ticketlock_lock(&pktio_entry->s.rxl);
+ odp_ticketlock_lock(&pktio_entry->rxl);
- if (pktio_entry->s.config.pktin.bit.ts_all ||
- pktio_entry->s.config.pktin.bit.ts_ptp)
+ if (pktio_entry->config.pktin.bit.ts_all ||
+ pktio_entry->config.pktin.bit.ts_ptp)
ts = &ts_val;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < num; i++) {
do {
- retval = read(tap->fd, buf, BUF_SIZE);
+ retval = read(tap->fd, buf, mtu);
} while (retval < 0 && errno == EINTR);
if (ts != NULL)
ts_val = odp_time_global();
if (retval < 0) {
- __odp_errno = errno;
break;
}
- pkts[i] = pack_odp_pkt(pktio_entry, buf, retval, ts);
- if (pkts[i] == ODP_PACKET_INVALID)
+ pkt = pack_odp_pkt(pktio_entry, buf, retval, ts);
+ if (pkt == ODP_PACKET_INVALID)
break;
+
+ if (cls_enabled) {
+ /* Enqueue packets directly to classifier destination queue */
+ pkts[num_cls++] = pkt;
+ num_cls = _odp_cls_enq(pkts, num_cls, (i + 1 == num));
+ } else {
+ pkts[num_rx++] = pkt;
+ }
}
- odp_ticketlock_unlock(&pktio_entry->s.rxl);
+ /* Enqueue remaining classified packets */
+ if (odp_unlikely(num_cls))
+ _odp_cls_enq(pkts, num_cls, true);
- return i;
+ odp_ticketlock_unlock(&pktio_entry->rxl);
+
+ return num_rx;
}
static int tap_pktio_send_lockless(pktio_entry_t *pktio_entry,
- const odp_packet_t pkts[], int len)
+ const odp_packet_t pkts[], int num)
{
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
ssize_t retval;
int i, n;
uint32_t pkt_len;
- uint8_t buf[BUF_SIZE];
- pkt_tap_t *tap = &pktio_entry->s.pkt_tap;
+ uint32_t mtu = tap->mtu;
+ uint8_t tx_ts_enabled = _odp_pktio_tx_ts_enabled(pktio_entry);
+ uint8_t buf[mtu];
- for (i = 0; i < len; i++) {
+ for (i = 0; i < num; i++) {
pkt_len = odp_packet_len(pkts[i]);
- if (pkt_len > tap->mtu) {
+ if (odp_unlikely(pkt_len > mtu)) {
if (i == 0) {
- __odp_errno = EMSGSIZE;
return -1;
}
break;
}
if (odp_packet_copy_to_mem(pkts[i], 0, pkt_len, buf) < 0) {
- ODP_ERR("failed to copy packet data\n");
+ _ODP_ERR("failed to copy packet data\n");
break;
}
@@ -291,19 +407,22 @@ static int tap_pktio_send_lockless(pktio_entry_t *pktio_entry,
if (retval < 0) {
if (i == 0 && SOCK_ERR_REPORT(errno)) {
- __odp_errno = errno;
- ODP_ERR("write(): %s\n", strerror(errno));
+ _ODP_ERR("write(): %s\n", strerror(errno));
return -1;
}
break;
} else if ((uint32_t)retval != pkt_len) {
- ODP_ERR("sent partial ethernet packet\n");
+ _ODP_ERR("sent partial ethernet packet\n");
if (i == 0) {
- __odp_errno = EMSGSIZE;
return -1;
}
break;
}
+
+ if (tx_ts_enabled) {
+ if (odp_unlikely(packet_hdr(pkts[i])->p.flags.ts_set))
+ _odp_pktio_tx_ts_set(pktio_entry);
+ }
}
for (n = 0; n < i; n++)
@@ -313,15 +432,15 @@ static int tap_pktio_send_lockless(pktio_entry_t *pktio_entry,
}
static int tap_pktio_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
- const odp_packet_t pkts[], int len)
+ const odp_packet_t pkts[], int num)
{
int ret;
- odp_ticketlock_lock(&pktio_entry->s.txl);
+ odp_ticketlock_lock(&pktio_entry->txl);
- ret = tap_pktio_send_lockless(pktio_entry, pkts, len);
+ ret = tap_pktio_send_lockless(pktio_entry, pkts, num);
- odp_ticketlock_unlock(&pktio_entry->s.txl);
+ odp_ticketlock_unlock(&pktio_entry->txl);
return ret;
}
@@ -330,49 +449,102 @@ static uint32_t tap_mtu_get(pktio_entry_t *pktio_entry)
{
uint32_t ret;
- ret = mtu_get_fd(pktio_entry->s.pkt_tap.skfd,
- pktio_entry->s.name + 4);
+ ret = _odp_mtu_get_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->name + 4);
if (ret > 0)
- pktio_entry->s.pkt_tap.mtu = ret;
+ pkt_priv(pktio_entry)->mtu = ret;
return ret;
}
+static int tap_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
+ int ret;
+
+ ret = _odp_mtu_set_fd(tap->skfd, pktio_entry->name + 4, maxlen_input);
+ if (ret)
+ return ret;
+
+ tap->mtu = maxlen_input;
+
+ return 0;
+}
+
static int tap_promisc_mode_set(pktio_entry_t *pktio_entry,
odp_bool_t enable)
{
- return promisc_mode_set_fd(pktio_entry->s.pkt_tap.skfd,
- pktio_entry->s.name + 4, enable);
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->name + 4, enable);
}
static int tap_promisc_mode_get(pktio_entry_t *pktio_entry)
{
- return promisc_mode_get_fd(pktio_entry->s.pkt_tap.skfd,
- pktio_entry->s.name + 4);
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->name + 4);
}
static int tap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
- memcpy(mac_addr, pktio_entry->s.pkt_tap.if_mac, ETH_ALEN);
+ memcpy(mac_addr, pkt_priv(pktio_entry)->if_mac, ETH_ALEN);
return ETH_ALEN;
}
+static int tap_mac_addr_set(pktio_entry_t *pktio_entry, const void *mac_addr)
+{
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
+
+ memcpy(tap->if_mac, mac_addr, ETH_ALEN);
+
+ return mac_addr_set_fd(tap->fd, (char *)pktio_entry->name + 4,
+ tap->if_mac);
+}
+
+static int tap_link_status(pktio_entry_t *pktio_entry)
+{
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->skfd,
+ pktio_entry->name + 4);
+}
+
+static int tap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->skfd, pktio_entry->name + 4, info);
+}
+
static int tap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
odp_pktio_capability_t *capa)
{
+ pkt_tap_t *tap = pkt_priv(pktio_entry);
+
memset(capa, 0, sizeof(odp_pktio_capability_t));
capa->max_input_queues = 1;
capa->max_output_queues = 1;
capa->set_op.op.promisc_mode = 1;
+ capa->set_op.op.mac_addr = 1;
+ capa->set_op.op.maxlen = 1;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_input = tap->mtu_max;
+ capa->maxlen.min_output = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_output = tap->mtu_max;
odp_pktio_config_init(&capa->config);
capa->config.pktin.bit.ts_all = 1;
capa->config.pktin.bit.ts_ptp = 1;
+
+ capa->config.pktout.bit.ts_ena = 1;
+ capa->config.pktout.bit.tx_compl_ena = 1;
+ capa->tx_compl.mode_all = 1;
+ capa->tx_compl.mode_event = 1;
+ capa->tx_compl.mode_poll = 1;
+
return 0;
}
-const pktio_if_ops_t tap_pktio_ops = {
+const pktio_if_ops_t _odp_tap_pktio_ops = {
.name = "tap",
.print = NULL,
.init_global = NULL,
@@ -380,16 +552,21 @@ const pktio_if_ops_t tap_pktio_ops = {
.term = NULL,
.open = tap_pktio_open,
.close = tap_pktio_close,
- .start = NULL,
- .stop = NULL,
+ .start = tap_pktio_start,
+ .stop = tap_pktio_stop,
.recv = tap_pktio_recv,
.send = tap_pktio_send,
- .mtu_get = tap_mtu_get,
+ .maxlen_get = tap_mtu_get,
+ .maxlen_set = tap_mtu_set,
.promisc_mode_set = tap_promisc_mode_set,
.promisc_mode_get = tap_promisc_mode_get,
.mac_get = tap_mac_addr_get,
+ .mac_set = tap_mac_addr_set,
+ .link_status = tap_link_status,
+ .link_info = tap_link_info,
.capability = tap_capability,
- .pktin_ts_res = NULL,
- .pktin_ts_from_ns = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
.config = NULL
};
diff --git a/platform/linux-generic/test/.gitignore b/platform/linux-generic/test/.gitignore
new file mode 100644
index 000000000..88eb4dce8
--- /dev/null
+++ b/platform/linux-generic/test/.gitignore
@@ -0,0 +1,3 @@
+*.log
+*.trs
+*.env
diff --git a/platform/linux-generic/test/Makefile.am b/platform/linux-generic/test/Makefile.am
new file mode 100644
index 000000000..7aca5fd3f
--- /dev/null
+++ b/platform/linux-generic/test/Makefile.am
@@ -0,0 +1,58 @@
+include $(top_srcdir)/test/Makefile.inc
+TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
+
+if WITH_OPENSSL_CRYPTO
+TESTS_ENVIRONMENT += WITH_OPENSSL_CRYPTO=1
+else
+TESTS_ENVIRONMENT += WITH_OPENSSL_CRYPTO=0
+endif
+
+SUBDIRS =
+TESTS =
+
+if test_vald
+TESTS += validation/api/pktio/pktio_run.sh \
+ validation/api/pktio/pktio_run_tap.sh \
+ validation/api/shmem/shmem_linux$(EXEEXT)
+
+SUBDIRS += validation/api/pktio \
+ validation/api/shmem \
+ pktio_ipc \
+ example \
+ performance
+
+if WITH_ML
+TESTS += validation/api/ml/ml_linux$(EXEEXT)
+SUBDIRS += validation/api/ml
+endif
+
+if ODP_PKTIO_PCAP
+TESTS += validation/api/pktio/pktio_run_pcap.sh
+endif
+if PKTIO_DPDK
+TESTS += validation/api/pktio/pktio_run_dpdk.sh
+endif
+TESTS += pktio_ipc/pktio_ipc_run.sh
+SUBDIRS += pktio_ipc
+else
+#performance tests refer to pktio_env
+if test_perf
+SUBDIRS += validation/api/pktio \
+ performance
+endif
+endif
+
+TEST_EXTENSIONS = .sh
+
+TESTNAME = linux-generic
+
+TESTENV = tests-$(TESTNAME).env
+
+test_DATA = $(TESTENV)
+
+DISTCLEANFILES = $(TESTENV)
+.PHONY: $(TESTENV)
+$(TESTENV):
+ echo "TESTS=\"$(TESTS)\"" > $@
+ echo "$(TESTS_ENVIRONMENT)" >> $@
+ echo "$(LOG_COMPILER)" >> $@
diff --git a/platform/linux-generic/test/example/Makefile.am b/platform/linux-generic/test/example/Makefile.am
new file mode 100644
index 000000000..947647cd4
--- /dev/null
+++ b/platform/linux-generic/test/example/Makefile.am
@@ -0,0 +1,11 @@
+SUBDIRS = \
+ classifier \
+ generator \
+ ipsec_api \
+ ipsec_crypto \
+ l2fwd_simple \
+ l3fwd \
+ packet \
+ ping \
+ simple_pipeline \
+ switch
diff --git a/platform/linux-generic/test/example/classifier/Makefile.am b/platform/linux-generic/test/example/classifier/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/classifier/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/classifier/pktio_env b/platform/linux-generic/test/example/classifier/pktio_env
new file mode 100644
index 000000000..eb8f2a80a
--- /dev/null
+++ b/platform/linux-generic/test/example/classifier/pktio_env
@@ -0,0 +1,44 @@
+#!/bin/sh
+#
+# Copyright (c) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create one pcap interface
+# which uses udp64.pcap to inject traffic.
+#
+# Network set-up
+# +---------+ +-----------+
+# |pcap intf| IF0<---> | Classifier|
+# +--------- +-----------+
+#
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP in=${PCAP_IN}"
+
+IF0=pcap:in=${PCAP_IN}
+TIME_OUT_VAL=1
+CPASS_COUNT_ARG1=100
+CPASS_COUNT_ARG2=100
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0;
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/generator/Makefile.am b/platform/linux-generic/test/example/generator/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/generator/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/generator/pktio_env b/platform/linux-generic/test/example/generator/pktio_env
new file mode 100644
index 000000000..06af667e8
--- /dev/null
+++ b/platform/linux-generic/test/example/generator/pktio_env
@@ -0,0 +1,34 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# Generator uses null interfaces to validate udp mode.
+#
+# Network set-up
+# IF0 ---> null:0
+
+IF0=null:0
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/ipsec_api/Makefile.am b/platform/linux-generic/test/example/ipsec_api/Makefile.am
new file mode 100644
index 000000000..2535ad466
--- /dev/null
+++ b/platform/linux-generic/test/example/ipsec_api/Makefile.am
@@ -0,0 +1,21 @@
+EXTRA_DIST = pktio_env
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/example/ipsec_api/pktio_env b/platform/linux-generic/test/example/ipsec_api/pktio_env
new file mode 100644
index 000000000..b3a073631
--- /dev/null
+++ b/platform/linux-generic/test/example/ipsec_api/pktio_env
@@ -0,0 +1,77 @@
+#!/bin/sh
+#
+# Copyright (C) 2021, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# ipsec_api application uses two loop devices loop0 and loop1.
+#
+
+if [ "$0" == "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+ exit 1
+fi
+
+# Absolute path to the .env file.
+LINUX_ENV_PATH=$PWD/../../platform/linux-generic/test
+
+TESTENV="tests-linux-generic.env"
+
+if [ -f $LINUX_ENV_PATH/$TESTENV ]; then
+ source $LINUX_ENV_PATH/$TESTENV
+else
+ echo "BUG: unable to find $TESTENV!"
+ echo "$TESTENV has to be in following directory: "
+ echo " $LINUX_ENV_PATH"
+ exit 1
+fi
+
+# Skip IPsec example tests when there's no OpenSSL.
+if [ -n "$WITH_OPENSSL_CRYPTO" ] && [ ${WITH_OPENSSL_CRYPTO} -eq 0 ]; then
+ echo "Crypto not supported. Skipping."
+ exit 77
+fi
+
+if [ -n "$ODPH_PROC_MODE" ] && [ ${ODPH_PROC_MODE} -ne 0 ]; then
+ echo "Process mode not supported. Skipping."
+ exit 77
+fi
+
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "IPsec Live / Router mode test. Skipping."
+ exit 77
+fi
+
+IF0=p7p1
+IF1=p8p1
+
+NEXT_HOP_MAC0=08:00:27:76:B5:E0
+NEXT_HOP_MAC1=08:00:27:F5:8B:DB
+
+LIF0=loop1
+LIF1=loop2
+
+IF_LIST=$LIF0,$LIF1
+ROUTE_IF_INB=$LIF0
+ROUTE_IF_OUTB=$LIF1
+OUT_IF=$LIF1
+IN_IF=$LIF0
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/ipsec_crypto/Makefile.am b/platform/linux-generic/test/example/ipsec_crypto/Makefile.am
new file mode 100644
index 000000000..2535ad466
--- /dev/null
+++ b/platform/linux-generic/test/example/ipsec_crypto/Makefile.am
@@ -0,0 +1,21 @@
+EXTRA_DIST = pktio_env
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/example/ipsec_crypto/pktio_env b/platform/linux-generic/test/example/ipsec_crypto/pktio_env
new file mode 100644
index 000000000..fd770ac41
--- /dev/null
+++ b/platform/linux-generic/test/example/ipsec_crypto/pktio_env
@@ -0,0 +1,77 @@
+#!/bin/sh
+#
+# Copyright (C) 2021, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# ipsec_api application uses two loop devices loop0 and loop1.
+#
+
+if [ "$0" == "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+ exit 1
+fi
+
+# Absolute path to the .env file.
+LINUX_ENV_PATH=$PWD/../../platform/linux-generic/test
+
+TESTENV="tests-linux-generic.env"
+
+if [ -f $LINUX_ENV_PATH/$TESTENV ]; then
+ source $LINUX_ENV_PATH/$TESTENV
+else
+ echo "BUG: unable to find $TESTENV!"
+ echo "$TESTENV has to be in following directory: "
+ echo " $LINUX_ENV_PATH"
+ exit 1
+fi
+
+# Skip IPsec example tests when there's no OpenSSL.
+if [ -n "$WITH_OPENSSL_CRYPTO" ] && [ ${WITH_OPENSSL_CRYPTO} -eq 0 ]; then
+ echo "Crypto not supported. Skipping."
+ exit 77
+fi
+
+if [ -n "$ODPH_PROC_MODE" ] && [ ${ODPH_PROC_MODE} -ne 0 ]; then
+ echo "Process mode not supported. Skipping."
+ exit 77
+fi
+
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "Live / Router mode test. Skipping."
+ exit 77
+fi
+
+IF0=p7p1
+IF1=p8p1
+
+NEXT_HOP_MAC0=08:00:27:76:B5:E0
+NEXT_HOP_MAC1=08:00:27:F5:8B:DB
+
+LIF0=loop1
+LIF1=loop2
+
+IF_LIST=$LIF0,$LIF1
+ROUTE_IF_INB=$LIF0
+ROUTE_IF_OUTB=$LIF1
+OUT_IF=$LIF1
+IN_IF=$LIF0
+
+validate_result()
+{
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/example/l2fwd_simple/Makefile.am b/platform/linux-generic/test/example/l2fwd_simple/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/l2fwd_simple/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/l2fwd_simple/pktio_env b/platform/linux-generic/test/example/l2fwd_simple/pktio_env
new file mode 100644
index 000000000..e1fbe87cc
--- /dev/null
+++ b/platform/linux-generic/test/example/l2fwd_simple/pktio_env
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP_IN = ${PCAP_IN}"
+
+IF0=pcap:in=${PCAP_IN}
+IF1=pcap:out=pcapout.pcap
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ if [ `stat -c %s pcapout.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
+ echo "File sizes disagree"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-generic/test/example/l3fwd/Makefile.am b/platform/linux-generic/test/example/l3fwd/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/l3fwd/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/l3fwd/pktio_env b/platform/linux-generic/test/example/l3fwd/pktio_env
new file mode 100644
index 000000000..a692d79bc
--- /dev/null
+++ b/platform/linux-generic/test/example/l3fwd/pktio_env
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+echo "using PCAP_IN = ${PCAP_IN}, PCAP_OUT = ${PCAP_OUT}"
+
+IF0=pcap:in=${PCAP_IN}
+IF1=pcap:out=${PCAP_OUT}
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ echo "Pass: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-generic/test/example/packet/Makefile.am b/platform/linux-generic/test/example/packet/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/packet/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/packet/pktio_env b/platform/linux-generic/test/example/packet/pktio_env
new file mode 100644
index 000000000..4e1914e2e
--- /dev/null
+++ b/platform/linux-generic/test/example/packet/pktio_env
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+echo "using PCAP in=${PCAP_IN}:out=${PCAP_OUT} size %${PCAP_IN_SIZE}"
+
+IF0=pcap:in=${PCAP_IN}:loops=10
+IF1=pcap:in=${PCAP_IN}:out=${PCAP_OUT}
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "Error: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-generic/test/example/ping/Makefile.am b/platform/linux-generic/test/example/ping/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/ping/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/ping/pktio_env b/platform/linux-generic/test/example/ping/pktio_env
new file mode 100644
index 000000000..90106da9d
--- /dev/null
+++ b/platform/linux-generic/test/example/ping/pktio_env
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name icmp_echo_req.pcap -print -quit`
+PCAP_OUT="pcapout.pcap"
+PCAP_IN_SIZE=`stat -c %s ${PCAP_IN}`
+echo "using PCAP in=${PCAP_IN}:out=${PCAP_OUT} size %${PCAP_IN_SIZE}"
+
+IF0=pcap:in=${PCAP_IN}:out=${PCAP_OUT}
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ PCAP_OUT_SIZE=`stat -c %s ${PCAP_OUT}`
+ if [ ${PCAP_IN_SIZE} -ne ${PCAP_OUT_SIZE} ]; then
+ echo "Error: in:${PCAP_IN_SIZE} out:${PCAP_OUT_SIZE}"
+ exit 1
+ fi
+
+ echo "pcap in size:${PCAP_IN_SIZE} pcap out size:${PCAP_OUT_SIZE}"
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-generic/test/example/simple_pipeline/Makefile.am b/platform/linux-generic/test/example/simple_pipeline/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/simple_pipeline/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/simple_pipeline/pktio_env b/platform/linux-generic/test/example/simple_pipeline/pktio_env
new file mode 100644
index 000000000..e1fbe87cc
--- /dev/null
+++ b/platform/linux-generic/test/example/simple_pipeline/pktio_env
@@ -0,0 +1,47 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 <---> IF1
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "using PCAP_IN = ${PCAP_IN}"
+
+IF0=pcap:in=${PCAP_IN}
+IF1=pcap:out=pcapout.pcap
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ if [ `stat -c %s pcapout.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
+ echo "File sizes disagree"
+ exit 1
+ fi
+
+ rm -f pcapout.pcap
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1."
+ return 0
+}
diff --git a/platform/linux-generic/test/example/switch/Makefile.am b/platform/linux-generic/test/example/switch/Makefile.am
new file mode 100644
index 000000000..2ffced539
--- /dev/null
+++ b/platform/linux-generic/test/example/switch/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = pktio_env
diff --git a/platform/linux-generic/test/example/switch/pktio_env b/platform/linux-generic/test/example/switch/pktio_env
new file mode 100644
index 000000000..78201cec7
--- /dev/null
+++ b/platform/linux-generic/test/example/switch/pktio_env
@@ -0,0 +1,54 @@
+#!/bin/sh
+#
+# Copyright (C) 2020, Marvell
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Script to setup interfaces used for running application on linux-generic.
+#
+# For linux-generic the default behavior is to create two pcap interfaces
+# and one interface uses udp64.pcap to inject traffic. An output pcap file
+# is generated via second interface.
+#
+# Network set-up
+# IF0 |---> IF1
+# |---> IF2
+# |---> IF3
+
+NUM_RX_PORT=3
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+echo "Switch test using PCAP_IN = ${PCAP_IN}"
+
+IF0=pcap:in=${PCAP_IN}
+IF1=pcap:out=pcapout1.pcap
+IF2=pcap:out=pcapout2.pcap
+IF3=pcap:out=pcapout3.pcap
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ for i in `seq 1 $NUM_RX_PORT`;
+ do
+ if [ `stat -c %s pcapout${i}.pcap` -ne `stat -c %s ${PCAP_IN}` ]; then
+ echo "Error: Output file $i size not matching"
+ exit 1
+ fi
+ rm -f pcapout${i}.pcap
+ done
+}
+
+setup_interfaces()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1, $IF2, $IF3."
+ return 0
+}
+
+cleanup_interfaces()
+{
+ echo "pktio: cleaning up test interfaces $IF0, $IF1, $IF2, $IF3."
+ return 0
+}
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
new file mode 100644
index 000000000..fa3b6982f
--- /dev/null
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.28"
+
+timer: {
+ # Enable inline timer implementation
+ inline = 1
+}
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
new file mode 100644
index 000000000..fb1418348
--- /dev/null
+++ b/platform/linux-generic/test/packet_align.conf
@@ -0,0 +1,21 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.28"
+
+pool: {
+ pkt: {
+ # Non-zero, larger than cache line size, power of two value.
+ base_align = 128
+ }
+
+ buf: {
+ # Non-zero, larger than cache line size, power of two value.
+ min_align = 256
+ }
+}
+
+pktio: {
+ # Ethernet header offset is 2 bytes, so that Ethernet payload
+ # starts at 16 byte alignment.
+ pktin_frame_offset = 2
+}
diff --git a/platform/linux-generic/test/performance/Makefile.am b/platform/linux-generic/test/performance/Makefile.am
new file mode 100644
index 000000000..4070f09f2
--- /dev/null
+++ b/platform/linux-generic/test/performance/Makefile.am
@@ -0,0 +1 @@
+SUBDIRS = dmafwd
diff --git a/platform/linux-generic/test/performance/dmafwd/Makefile.am b/platform/linux-generic/test/performance/dmafwd/Makefile.am
new file mode 100644
index 000000000..91d42cc74
--- /dev/null
+++ b/platform/linux-generic/test/performance/dmafwd/Makefile.am
@@ -0,0 +1,18 @@
+EXTRA_DIST = pktio_env
+
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/performance/dmafwd/pktio_env b/platform/linux-generic/test/performance/dmafwd/pktio_env
new file mode 100644
index 000000000..91075973e
--- /dev/null
+++ b/platform/linux-generic/test/performance/dmafwd/pktio_env
@@ -0,0 +1,57 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+PCAP_OUT=dmafwd_out.pcap
+IF0=pcap:in=${PCAP_IN}:out=${PCAP_OUT}
+DUMP=tcpdump
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "ERROR: Platform specific env file has to be sourced."
+fi
+
+validate_result()
+{
+ local RET=0
+
+ if command -v ${DUMP}; then
+ local VALIN=valin
+ local VALOUT=valout
+
+ ${DUMP} -r ${PCAP_IN} -t -x > ${VALIN}
+ ${DUMP} -r ${PCAP_OUT} -t -x > ${VALOUT}
+ diff ${VALIN} ${VALOUT}
+ RET=$?
+ rm -f ${VALIN}
+ rm -f ${VALOUT}
+ else
+ echo "WARNING: No ${DUMP} available, using \"stat\" for diff"
+ local SZIN=$(stat -c %s ${PCAP_IN})
+ local SZOUT=$(stat -c %s ${PCAP_OUT})
+
+ if [ ${SZIN} -ne ${SZOUT} ]; then
+ RET=1
+ fi
+ fi
+
+ rm -f ${PCAP_OUT}
+
+ if [ $RET -ne 0 ]; then
+ echo "ERROR: Input and output captures do not match, exiting"
+ exit 1
+ fi
+
+ return 0
+}
+
+setup_interfaces()
+{
+ return 0
+}
+
+cleanup_interfaces()
+{
+ return 0
+}
diff --git a/platform/linux-generic/test/pktio_ipc/.gitignore b/platform/linux-generic/test/pktio_ipc/.gitignore
new file mode 100644
index 000000000..49ee4fd29
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/.gitignore
@@ -0,0 +1,2 @@
+pktio_ipc1
+pktio_ipc2
diff --git a/platform/linux-generic/test/pktio_ipc/Makefile.am b/platform/linux-generic/test/pktio_ipc/Makefile.am
new file mode 100644
index 000000000..b9623cc76
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/Makefile.am
@@ -0,0 +1,31 @@
+include $(top_srcdir)/test/Makefile.inc
+TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
+
+test_PROGRAMS = pktio_ipc1\
+ pktio_ipc2
+
+pktio_ipc1_SOURCES = pktio_ipc1.c ipc_common.c ipc_common.h
+pktio_ipc2_SOURCES = pktio_ipc2.c ipc_common.c ipc_common.h
+
+dist_check_SCRIPTS = pktio_ipc_run.sh
+test_SCRIPTS = $(dist_check_SCRIPTS)
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/pktio_ipc/ipc_common.c b/platform/linux-generic/test/pktio_ipc/ipc_common.c
new file mode 100644
index 000000000..128a7c6e1
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/ipc_common.c
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
+ */
+
+#include "ipc_common.h"
+
+/** Start time in seconds */
+int start_time_sec;
+/** Run time in seconds */
+int run_time_sec;
+/** Pid of the master process */
+int master_pid;
+
+int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
+ odp_packet_t pkt_tbl[], int num)
+{
+ int ret;
+ int sent = 0;
+ odp_time_t start_time;
+ odp_time_t end_time;
+ odp_pktout_queue_t pktout;
+ int i;
+
+ memset(&pktout, 0, sizeof(pktout));
+ start_time = odp_time_local();
+ end_time = odp_time_add_ns(start_time, ODP_TIME_SEC_IN_NS);
+
+ if (odp_pktout_queue(pktio, &pktout, 1) != 1) {
+ ODPH_ERR("no output queue\n");
+ return -1;
+ }
+
+ while (sent != num) {
+ ret = odp_pktout_send(pktout, &pkt_tbl[sent], num - sent);
+ if (ret < 0) {
+ ODPH_ERR("odp_pktout_send return %d\n", ret);
+ for (i = sent; i < num; i++)
+ odp_packet_free(pkt_tbl[i]);
+ return -1;
+ }
+
+ sent += ret;
+
+ if (odp_time_cmp(end_time, odp_time_local()) < 0) {
+ for (i = sent; i < num; i++)
+ odp_packet_free(pkt_tbl[i]);
+ ODPH_ERR("Send Timeout!\n");
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+odp_pktio_t create_pktio(odp_pool_t pool, int master_pid)
+{
+ odp_pktio_param_t pktio_param;
+ odp_pktio_t ipc_pktio;
+ char name[30];
+
+ odp_pktio_param_init(&pktio_param);
+
+ if (master_pid)
+ sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
+ else
+ sprintf(name, TEST_IPC_PKTIO_NAME);
+
+ printf("pid: %d, create IPC pktio %s\n", getpid(), name);
+ ipc_pktio = odp_pktio_open(name, pool, &pktio_param);
+ if (ipc_pktio == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Error: ipc pktio %s create failed.\n", name);
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktin_queue_config(ipc_pktio, NULL)) {
+ ODPH_ERR("Input queue config failed\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ if (odp_pktout_queue_config(ipc_pktio, NULL)) {
+ ODPH_ERR("Output queue config failed\n");
+ return ODP_PKTIO_INVALID;
+ }
+
+ return ipc_pktio;
+}
+
+/**
+ * Parse and store the command line arguments
+ *
+ * @param argc argument count
+ * @param argv[] argument vector
+ * @param appl_args Store application arguments here
+ */
+void parse_args(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ static struct option longopts[] = {
+ {"start-timeout", required_argument, NULL, 's'},
+ {"run-time", required_argument, NULL, 't'},
+ {"pid", required_argument, NULL, 'p'}, /* master process pid */
+ {"help", no_argument, NULL, 'h'}, /* return 'h' */
+ {NULL, 0, NULL, 0}
+ };
+
+ start_time_sec = 0; /* wait forever if time is 0 */
+ run_time_sec = 0; /* loop forever if time to run is 0 */
+ master_pid = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, "+s:t:p:h",
+ longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 's':
+ start_time_sec = atoi(optarg);
+ break;
+ case 't':
+ run_time_sec = atoi(optarg);
+ break;
+ case 'p':
+ master_pid = atoi(optarg);
+ break;
+ case 'h':
+ default:
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+ }
+ }
+
+ optind = 1; /* reset 'extern optind' from the getopt lib */
+}
+
+/**
+ * Print system and application info
+ */
+void print_info(char *progname)
+{
+ odp_sys_info_print();
+
+ printf("Running ODP appl: \"%s\"\n",
+ progname);
+ printf("\n\n");
+ fflush(NULL);
+}
+
+/**
+ * Print usage information
+ */
+void usage(char *progname)
+{
+ printf("\n"
+ "Usage: %s OPTIONS\n"
+ "\n"
+ "OpenDataPlane odp-linux ipc test application.\n"
+ "\n"
+ "Optional OPTIONS\n"
+ " -h, --help Display help and exit.\n"
+ " -p, --pid PID of the master process.\n"
+ " -t, --run-time Time to run in seconds.\n"
+ " -s, --start-timeout Maximum time for pktio startup.\n"
+ "\n", NO_PATH(progname)
+ );
+}
diff --git a/platform/linux-generic/test/pktio_ipc/ipc_common.h b/platform/linux-generic/test/pktio_ipc/ipc_common.h
new file mode 100644
index 000000000..94ec21460
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/ipc_common.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
+ */
+
+#define _POSIX_C_SOURCE 200809L
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+#include <sched.h>
+
+#include <stdlib.h>
+#include <inttypes.h>
+#include <string.h>
+#include <getopt.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <signal.h>
+#include <sys/wait.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+/** @def SHM_PKT_POOL_SIZE
+ * @brief Size of the shared memory block
+ */
+#define SHM_PKT_POOL_SIZE 8192
+
+/** @def SHM_PKT_POOL_BUF_SIZE
+ * @brief Buffer size of the packet pool buffer
+ */
+#define SHM_PKT_POOL_BUF_SIZE 100
+
+/** @def MAX_PKT_BURST
+ * @brief Maximum number of packet bursts
+ */
+#define MAX_PKT_BURST 16
+
+/** Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
+ strrchr((file_name), '/') + 1 : (file_name))
+
+#define TEST_SEQ_MAGIC 0x92749451
+#define TEST_SEQ_MAGIC_2 0x81638340
+
+#define TEST_ALLOC_MAGIC 0x1234adcd
+
+#define TEST_IPC_PKTIO_NAME "ipc:ipktio"
+#define TEST_IPC_PKTIO_PID_NAME "ipc:%d:ipktio"
+
+/** Can be any name, same or not the same. */
+#define TEST_IPC_POOL_NAME "ipc_packet_pool"
+
+/** magic number and sequence at start of packet payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+ odp_u32be_t seq;
+} pkt_head_t;
+
+/** magic number at end of packet payload */
+typedef struct ODP_PACKED {
+ odp_u32be_t magic;
+} pkt_tail_t;
+
+/** Start time in seconds */
+extern int start_time_sec;
+
+/** Run time in seconds */
+extern int run_time_sec;
+
+/** PID of the master process */
+extern int master_pid;
+
+/* helper funcs */
+void parse_args(int argc, char *argv[]);
+void print_info(char *progname);
+void usage(char *progname);
+
+/**
+ * Create a ipc pktio handle.
+ *
+ * @param pool Pool to associate with device for packet RX/TX
+ * @param master_pid Pid of master process
+ *
+ * @return The handle of the created pktio object.
+ * @retval ODP_PKTIO_INVALID if the create fails.
+ */
+odp_pktio_t create_pktio(odp_pool_t pool, int master_pid);
+
+/** Spin and send all packet from table
+ *
+ * @param pktio pktio device
+ * @param pkt_tbl packets table
+ * @param num number of packets
+ */
+int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
+ odp_packet_t pkt_tbl[],
+ int num);
diff --git a/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c b/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c
new file mode 100644
index 000000000..df7a5ca3f
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
+ */
+
+#include "ipc_common.h"
+
+/**
+ * @file
+ * @example pktio_ipc1.c ODP IPC example application.
+ * This application works in pair with pktio_ipc2 application.
+ * It opens ipc pktio, allocates packets, sets magic number and
+ * sends packets to ipc pktio. Then app reads packets and checks
+ * that magic number was properly updated and there is no packet
+ * loss (i.e. sequesce counter continiusly incrementing.)
+ */
+
+/**
+ * Packet IO loopback worker thread using bursts from/to IO resources
+ *
+ * @param arg thread arguments of type 'thread_args_t *'
+ */
+static int pktio_run_loop(odp_pool_t pool)
+{
+ int pkts;
+ odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ uint64_t cnt = 0; /* increasing counter on each send packet */
+ uint64_t cnt_recv = 0; /* increasing counter to validate
+ cnt on receive */
+ uint64_t stat_pkts = 0;
+ uint64_t stat_pkts_alloc = 0;
+ uint64_t stat_pkts_prev = 0;
+ uint64_t stat_errors = 0;
+ uint64_t stat_free = 0;
+ odp_time_t start_cycle;
+ odp_time_t current_cycle;
+ odp_time_t cycle;
+ odp_time_t diff;
+ odp_time_t wait;
+ int ret;
+ odp_pktin_queue_t pktin;
+ char name[30];
+ int sync_cnt = 0;
+
+ if (master_pid)
+ sprintf(name, TEST_IPC_PKTIO_PID_NAME, master_pid);
+ else
+ sprintf(name, TEST_IPC_PKTIO_NAME);
+
+ wait = odp_time_local_from_ns(start_time_sec * ODP_TIME_SEC_IN_NS);
+ start_cycle = odp_time_local();
+ current_cycle = start_cycle;
+
+ for (;;) {
+ if (start_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit 1, start_time_sec %d\n",
+ start_time_sec);
+ return -1;
+ }
+ }
+
+ ipc_pktio = create_pktio(pool, master_pid);
+ if (ipc_pktio != ODP_PKTIO_INVALID)
+ break;
+ if (!master_pid)
+ break;
+
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
+ }
+
+ if (ipc_pktio == ODP_PKTIO_INVALID)
+ return -1;
+
+ if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
+ ODPH_ERR("no input queue\n");
+ return -1;
+ }
+
+ /* start ipc pktio, i.e. wait until other process connects */
+ for (;;) {
+ if (start_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit 2, start_time_sec %d\n",
+ start_time_sec);
+ goto exit;
+ }
+ }
+
+ ret = odp_pktio_start(ipc_pktio);
+ if (!ret)
+ break;
+
+ /* Reduce polling frequency to once per 50ms */
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
+ }
+
+ /* packets loop */
+ wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
+ start_cycle = odp_time_local();
+ for (;;) {
+ int i;
+
+ /* 1. exit loop if time specified */
+ if (run_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ ODPH_DBG("exit after %d seconds\n",
+ run_time_sec);
+ break;
+ }
+ }
+
+ /* 2. Receive packets back from ipc_pktio, validate magic
+ * number sequence counter and free that packet
+ */
+ while (1) {
+ pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
+ if (pkts <= 0)
+ break;
+
+ for (i = 0; i < pkts; i++) {
+ odp_packet_t pkt = pkt_tbl[i];
+ pkt_head_t head;
+ pkt_tail_t tail;
+ size_t off;
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ ODPH_ERR("invalid l4 offset\n");
+ }
+
+ off += ODPH_UDPHDR_LEN;
+ ret = odp_packet_copy_to_mem(pkt, off,
+ sizeof(head),
+ &head);
+ if (ret) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ ODPH_DBG("error\n");
+ continue;
+ }
+
+ if (head.magic == TEST_ALLOC_MAGIC) {
+ stat_free++;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (head.magic != TEST_SEQ_MAGIC_2) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ ODPH_DBG("error\n");
+ continue;
+ }
+
+ off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
+ ret = odp_packet_copy_to_mem(pkt, off,
+ sizeof(tail),
+ &tail);
+ if (ret) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (tail.magic != TEST_SEQ_MAGIC) {
+ stat_errors++;
+ stat_free++;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ cnt_recv++;
+
+ if (head.seq != cnt_recv && sync_cnt) {
+ stat_errors++;
+ odp_packet_free(pkt);
+ ODPH_DBG("head.seq %d - cnt_recv "
+ "%" PRIu64 " = %" PRIu64 "\n",
+ head.seq, cnt_recv,
+ head.seq - cnt_recv);
+ cnt_recv = head.seq;
+ stat_free++;
+ continue;
+ }
+
+ stat_pkts++;
+ odp_packet_free(pkt);
+ }
+ }
+
+ /* 3. emulate that pkts packets were received */
+ ret = odp_random_data((uint8_t *)&pkts, sizeof(pkts),
+ ODP_RANDOM_BASIC);
+ if (ret != sizeof(pkts)) {
+ ODPH_ABORT("random failed");
+ break;
+ }
+ pkts = ((pkts & 0xffff) % MAX_PKT_BURST) + 1;
+
+ for (i = 0; i < pkts; i++) {
+ odp_packet_t pkt;
+
+ pkt = odp_packet_alloc(pool, SHM_PKT_POOL_BUF_SIZE);
+ if (pkt == ODP_PACKET_INVALID)
+ break;
+
+ stat_pkts_alloc++;
+ odp_packet_l4_offset_set(pkt, 30);
+ pkt_tbl[i] = pkt;
+ }
+
+ pkts = i;
+
+ /* 4. Copy counter and magic numbers to that packets */
+ for (i = 0; i < pkts; i++) {
+ pkt_head_t head;
+ pkt_tail_t tail;
+ size_t off;
+ odp_packet_t pkt = pkt_tbl[i];
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID)
+ ODPH_ABORT("packet L4 offset not set");
+
+ head.magic = TEST_SEQ_MAGIC;
+ head.seq = cnt++;
+
+ off += ODPH_UDPHDR_LEN;
+ ret = odp_packet_copy_from_mem(pkt, off, sizeof(head),
+ &head);
+ if (ret)
+ ODPH_ABORT("unable to copy in head data");
+
+ tail.magic = TEST_SEQ_MAGIC;
+ off = odp_packet_len(pkt) - sizeof(pkt_tail_t);
+ ret = odp_packet_copy_from_mem(pkt, off, sizeof(tail),
+ &tail);
+ if (ret)
+ ODPH_ABORT("unable to copy in tail data");
+ }
+
+ /* 5. Send packets to ipc_pktio */
+ ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, pkts);
+ if (ret < 0) {
+ ODPH_DBG("unable to sending to ipc pktio\n");
+ break;
+ }
+
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, current_cycle);
+ if (odp_time_cmp(odp_time_local_from_ns(ODP_TIME_SEC_IN_NS),
+ diff) < 0) {
+ current_cycle = cycle;
+ if (!sync_cnt && stat_errors == (MAX_PKT_BURST + 2)) {
+ stat_errors = 0;
+ sync_cnt = 1;
+ }
+ printf("\rpkts: %" PRIu64 ", alloc %" PRIu64 ","
+ " errors %" PRIu64 ", pps %" PRIu64 ","
+ " free %" PRIu64 ".",
+ stat_pkts, stat_pkts_alloc, stat_errors,
+ (stat_pkts + stat_pkts_alloc - stat_pkts_prev),
+ stat_free);
+ fflush(stdout);
+ stat_pkts_prev = stat_pkts + stat_pkts_alloc;
+ }
+ }
+
+ /* cleanup and exit */
+ ret = odp_pktio_stop(ipc_pktio);
+ if (ret) {
+ ODPH_DBG("odp_pktio_stop error %d\n", ret);
+ return -1;
+ }
+
+exit:
+ ret = odp_pktio_close(ipc_pktio);
+ if (ret) {
+ ODPH_DBG("odp_pktio_close error %d\n", ret);
+ return -1;
+ }
+
+ return (stat_errors || stat_pkts < 1000) ? -1 : 0;
+}
+
+/**
+ * ODP packet example main function
+ */
+int main(int argc, char *argv[])
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ odp_instance_t instance;
+ int ret;
+ cpu_set_t cpu_set;
+ odp_cpumask_t mask;
+ int cpu;
+ pid_t pid;
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, NULL, NULL)) {
+ ODPH_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_cpumask_default_worker(&mask, 0);
+ cpu = odp_cpumask_first(&mask);
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpu, &cpu_set);
+
+ pid = getpid();
+
+ if (sched_setaffinity(pid, sizeof(cpu_set_t), &cpu_set)) {
+ printf("Set CPU affinity failed.\n");
+ return -1;
+ }
+
+ printf("ipc_pktio1 %d run on cpu %d\n", pid, cpu);
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_WORKER)) {
+ ODPH_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Print both system and application information */
+ print_info(NO_PATH(argv[0]));
+
+ /* Create packet pool */
+ memset(&params, 0, sizeof(params));
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE;
+ params.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_pool_print(pool);
+
+ ret = pktio_run_loop(pool);
+
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Error: odp_pool_destroy() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: odp_term_local() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: odp_term_global() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ODPH_DBG("return %d\n", ret);
+ return ret;
+}
diff --git a/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c b/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c
new file mode 100644
index 000000000..fc3b6833a
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c
@@ -0,0 +1,268 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
+ */
+
+/**
+ * @file
+ *
+ * @example pktio_ipc2.c ODP IPC example application.
+ * This application works in pair with pktio_ipc1 application.
+ * It opens ipc pktio, reads packets and updates magic number.
+ * Also it allocates some packets from internal pool and sends
+ * to ipc pktio.
+ */
+
+#include "ipc_common.h"
+
+static int ipc_second_process(int master_pid)
+{
+ odp_pktio_t ipc_pktio = ODP_PKTIO_INVALID;
+ odp_pool_param_t params;
+ odp_pool_t pool;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ odp_packet_t alloc_pkt;
+ int pkts;
+ int ret;
+ int i;
+ odp_time_t start_cycle;
+ odp_time_t cycle;
+ odp_time_t diff;
+ odp_time_t wait;
+ uint64_t stat_pkts = 0;
+ odp_pktin_queue_t pktin;
+
+ /* Create packet pool */
+ memset(&params, 0, sizeof(params));
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE;
+ params.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create(TEST_IPC_POOL_NAME, &params);
+ if (pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error: packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ wait = odp_time_local_from_ns(start_time_sec * ODP_TIME_SEC_IN_NS);
+ start_cycle = odp_time_local();
+
+ for (;;) {
+ /* exit loop if time specified */
+ if (start_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit 1, start_time_sec %d\n",
+ start_time_sec);
+ goto not_started;
+ }
+ }
+
+ ipc_pktio = create_pktio(pool, master_pid);
+ if (ipc_pktio != ODP_PKTIO_INVALID)
+ break;
+ if (!master_pid)
+ break;
+
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
+ }
+
+ if (ipc_pktio == ODP_PKTIO_INVALID) {
+ odp_pool_destroy(pool);
+ return -1;
+ }
+
+ memset(&pktin, 0, sizeof(pktin)); /* not needed but makes GCC happy */
+ if (odp_pktin_queue(ipc_pktio, &pktin, 1) != 1) {
+ odp_pool_destroy(pool);
+ ODPH_ERR("no input queue\n");
+ return -1;
+ }
+
+ /* start ipc pktio, i.e. wait until other process connects */
+ for (;;) {
+ /* 1. exit loop if time specified */
+ if (start_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ printf("timeout exit 2, start_time_sec %d\n",
+ start_time_sec);
+ goto not_started;
+ }
+ }
+
+ ret = odp_pktio_start(ipc_pktio);
+ if (!ret)
+ break;
+
+ /* Reduce polling frequency to once per 50ms */
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
+ }
+
+ wait = odp_time_local_from_ns(run_time_sec * ODP_TIME_SEC_IN_NS);
+ start_cycle = odp_time_local();
+ for (;;) {
+ /* exit loop if time specified */
+ if (run_time_sec) {
+ cycle = odp_time_local();
+ diff = odp_time_diff(cycle, start_cycle);
+ if (odp_time_cmp(wait, diff) < 0) {
+ ODPH_DBG("exit after %d seconds\n",
+ run_time_sec);
+ break;
+ }
+ }
+
+ /* recv some packets and change MAGIC to MAGIC_2 */
+ pkts = odp_pktin_recv(pktin, pkt_tbl, MAX_PKT_BURST);
+ if (pkts <= 0)
+ continue;
+
+ for (i = 0; i < pkts; i++) {
+ odp_packet_t pkt = pkt_tbl[i];
+ pkt_head_t head;
+ size_t off;
+
+ off = odp_packet_l4_offset(pkt);
+ if (off == ODP_PACKET_OFFSET_INVALID) {
+ ODPH_ERR("invalid l4 offset\n");
+ for (int j = i; j < pkts; j++)
+ odp_packet_free(pkt_tbl[j]);
+ break;
+ }
+
+ off += ODPH_UDPHDR_LEN;
+ ret = odp_packet_copy_to_mem(pkt, off, sizeof(head),
+ &head);
+ if (ret)
+ ODPH_ABORT("unable copy out head data");
+
+ if (head.magic != TEST_SEQ_MAGIC) {
+ ODPH_ERR("Wrong head magic! %x", head.magic);
+ for (int j = i; j < pkts; j++)
+ odp_packet_free(pkt_tbl[j]);
+ break;
+ }
+
+ /* Modify magic number in packet */
+ head.magic = TEST_SEQ_MAGIC_2;
+ ret = odp_packet_copy_from_mem(pkt, off, sizeof(head),
+ &head);
+ if (ret)
+ ODPH_ABORT("unable to copy in head data");
+ }
+
+ /* send all packets back */
+ ret = ipc_odp_packet_send_or_free(ipc_pktio, pkt_tbl, i);
+ if (ret < 0)
+ ODPH_ABORT("can not send packets\n");
+
+ stat_pkts += ret;
+
+ /* alloc packet from local pool, set magic to ALLOC_MAGIC,
+ * and send it.*/
+ alloc_pkt = odp_packet_alloc(pool, SHM_PKT_POOL_BUF_SIZE);
+ if (alloc_pkt != ODP_PACKET_INVALID) {
+ pkt_head_t head;
+ size_t off;
+
+ odp_packet_l4_offset_set(alloc_pkt, 30);
+
+ head.magic = TEST_ALLOC_MAGIC;
+
+ off = odp_packet_l4_offset(alloc_pkt);
+ off += ODPH_UDPHDR_LEN;
+ ret = odp_packet_copy_from_mem(alloc_pkt, off,
+ sizeof(head),
+ &head);
+ if (ret)
+ ODPH_ABORT("unable to copy in head data");
+
+ pkt_tbl[0] = alloc_pkt;
+ ret = ipc_odp_packet_send_or_free(ipc_pktio,
+ pkt_tbl, 1);
+ if (ret < 0)
+ ODPH_ABORT("can not send packets\n");
+ stat_pkts += 1;
+ }
+ }
+
+ /* cleanup and exit */
+ ret = odp_pktio_stop(ipc_pktio);
+ if (ret) {
+ ODPH_DBG("ipc2: odp_pktio_stop error %d\n", ret);
+ return -1;
+ }
+
+not_started:
+ ret = odp_pktio_close(ipc_pktio);
+ if (ret) {
+ ODPH_DBG("ipc2: odp_pktio_close error %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_pool_destroy(pool);
+ if (ret)
+ ODPH_DBG("ipc2: pool_destroy error %d\n", ret);
+
+ return stat_pkts > 1000 ? 0 : -1;
+}
+
+int main(int argc, char *argv[])
+{
+ odp_instance_t instance;
+ int ret;
+ cpu_set_t cpu_set;
+ odp_cpumask_t mask;
+ int cpu;
+ pid_t pid;
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv);
+
+ if (odp_init_global(&instance, NULL, NULL)) {
+ ODPH_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_cpumask_default_worker(&mask, 0);
+ cpu = odp_cpumask_first(&mask);
+ ret = odp_cpumask_next(&mask, cpu);
+ if (ret != -1)
+ cpu = ret;
+
+ CPU_ZERO(&cpu_set);
+ CPU_SET(cpu, &cpu_set);
+
+ pid = getpid();
+
+ if (sched_setaffinity(pid, sizeof(cpu_set_t), &cpu_set)) {
+ printf("Set CPU affinity failed to cpu %d.\n", cpu);
+ return -1;
+ }
+
+ printf("ipc_pktio2 %d run on cpu %d\n", pid, cpu);
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_WORKER)) {
+ ODPH_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ret = ipc_second_process(master_pid);
+
+ if (odp_term_local()) {
+ ODPH_ERR("Error: odp_term_local() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(instance)) {
+ ODPH_ERR("Error: odp_term_global() failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/platform/linux-generic/test/pktio_ipc/pktio_ipc_run.sh b/platform/linux-generic/test/pktio_ipc/pktio_ipc_run.sh
new file mode 100755
index 000000000..b181668e8
--- /dev/null
+++ b/platform/linux-generic/test/pktio_ipc/pktio_ipc_run.sh
@@ -0,0 +1,85 @@
+#!/bin/sh
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2015-2018 Linaro Limited
+# Copyright (c) 2023 Nokia
+#
+
+# directories where test binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone (./pktio_ipc_run) intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=./pktio_ipc:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../platform/linux-generic/test/pktio_ipc:$PATH
+PATH=.:$PATH
+
+STARTTIME=30
+RUNTIME=1
+
+run()
+{
+ local ret=0
+
+ echo "==== run pktio_ipc1 then pktio_ipc2 ===="
+ pktio_ipc1${EXEEXT} -s ${STARTTIME} -t ${RUNTIME} &
+ IPC_PID=$!
+
+ pktio_ipc2${EXEEXT} -p ${IPC_PID} -s ${STARTTIME} -t ${RUNTIME}
+ ret=$?
+
+ (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
+ if [ $? -eq 0 ]; then
+ wait $IPC_PID
+ echo "pktio_ipc1${EXEEXT} was killed"
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ rm -rf /dev/shm/${UID}/odp-${IPC_PID}* 2>&1 > /dev/null
+ else
+ echo "normal exit of 2 application"
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ fi
+
+ if [ $ret -ne 0 ]; then
+ echo "!!!First stage FAILED $ret!!!"
+ exit $ret
+ else
+ echo "First stage PASSED"
+ fi
+
+ echo "==== run pktio_ipc2 then pktio_ipc1 ===="
+ pktio_ipc2${EXEEXT} -s ${STARTTIME} -t ${RUNTIME} &
+ IPC_PID=$!
+
+ pktio_ipc1${EXEEXT} -p ${IPC_PID} -s ${STARTTIME} -t ${RUNTIME}
+ ret=$?
+
+ (kill ${IPC_PID} 2>&1 > /dev/null ) > /dev/null
+ if [ $? -eq 0 ]; then
+ wait $IPC_PID
+ echo "pktio_ipc2${EXEEXT} was killed"
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ rm -rf /dev/shm/${UID}/odp-${IPC_PID}* 2>&1 > /dev/null
+ else
+ echo "normal exit of 2 application"
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ fi
+
+ if [ $ret -ne 0 ]; then
+ echo "!!! FAILED !!!"
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ rm -rf /dev/shm/${UID}/odp-${IPC_PID}* 2>&1 > /dev/null
+ exit $ret
+ else
+ ls -l /dev/shm/${UID}/odp* 2> /dev/null
+ echo "Second stage PASSED"
+ fi
+
+ echo "!!!PASSED!!!"
+ exit 0
+}
+
+case "$1" in
+ *) run ;;
+esac
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
new file mode 100644
index 000000000..f4c6f7952
--- /dev/null
+++ b/platform/linux-generic/test/process-mode.conf
@@ -0,0 +1,9 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.28"
+
+# Shared memory options
+shm: {
+ # Increase the amount of single VA memory
+ single_va_size_kb = 1048576
+}
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
new file mode 100644
index 000000000..8a6d0ac98
--- /dev/null
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -0,0 +1,13 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.28"
+
+# Test scheduler with an odd spread value and without dynamic load balance
+sched_basic: {
+ prio_spread = 3
+ load_balance = 0
+ powersave: {
+ poll_time_nsec = 5000
+ sleep_time_nsec = 50000
+ }
+}
diff --git a/platform/linux-generic/test/stash-custom.conf b/platform/linux-generic/test/stash-custom.conf
new file mode 100644
index 000000000..6a2496303
--- /dev/null
+++ b/platform/linux-generic/test/stash-custom.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.28"
+
+# Test overflow safe stash variant
+stash: {
+ strict_size = 0
+}
diff --git a/platform/linux-generic/test/validation/api/Makefile.inc b/platform/linux-generic/test/validation/api/Makefile.inc
new file mode 100644
index 000000000..cda6237ea
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/Makefile.inc
@@ -0,0 +1 @@
+include $(top_srcdir)/test/validation/api/Makefile.inc
diff --git a/platform/linux-generic/test/validation/api/ml/.gitignore b/platform/linux-generic/test/validation/api/ml/.gitignore
new file mode 100644
index 000000000..e31f902c4
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/.gitignore
@@ -0,0 +1 @@
+ml_linux
diff --git a/platform/linux-generic/test/validation/api/ml/Makefile.am b/platform/linux-generic/test/validation/api/ml/Makefile.am
new file mode 100644
index 000000000..f4b9e9755
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/Makefile.am
@@ -0,0 +1,34 @@
+include ../Makefile.inc
+
+test_PROGRAMS = ml_linux
+ml_linux_SOURCES = ml_linux.c
+
+EXTRA_DIST = \
+ batch_add_gen.py \
+ batch_add.onnx \
+ gen_models.sh \
+ README.md \
+ requirements.txt \
+ simple_linear_gen.py \
+ simple_linear.onnx
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(EXTRA_DIST); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/validation/api/ml/README.md b/platform/linux-generic/test/validation/api/ml/README.md
new file mode 100644
index 000000000..80ad30e96
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/README.md
@@ -0,0 +1,23 @@
+# How to run ML validation test
+
+Simple onnx models are used to test ML API.
+
+## Generate models
+
+### Install python requirements
+
+```bash
+python3 -m pip install -r <this directory>/requirements.txt
+```
+
+### Generate models for validation tests
+
+```bash
+<this directory>/gen_models.sh
+```
+
+## Run ML validation tests
+
+```bash
+<this directory>/ml_linux
+```
diff --git a/platform/linux-generic/test/validation/api/ml/batch_add.onnx b/platform/linux-generic/test/validation/api/ml/batch_add.onnx
new file mode 100644
index 000000000..43485f463
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/batch_add.onnx
Binary files differ
diff --git a/platform/linux-generic/test/validation/api/ml/batch_add_gen.py b/platform/linux-generic/test/validation/api/ml/batch_add_gen.py
new file mode 100644
index 000000000..33515bd2f
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/batch_add_gen.py
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+
+import onnx
+from onnx import helper
+from onnx import TensorProto
+
+graph = helper.make_graph(
+ [ # nodes
+ helper.make_node("Add", ["x1", "x2"], ["y"], "Batch Add"),
+ ],
+ "Batch Add", # name
+ [ # inputs
+ helper.make_tensor_value_info('x1', TensorProto.DOUBLE, ["c", 3]),
+ helper.make_tensor_value_info('x2', TensorProto.DOUBLE, ["c", 3]),
+ ],
+ [ # outputs
+ helper.make_tensor_value_info('y', TensorProto.DOUBLE, ["c", 3]),
+ ]
+)
+
+model = helper.make_model(
+ graph,
+ opset_imports=[helper.make_opsetid("", 14)],
+ producer_name='ODP validation tests',
+ model_version=1,
+ doc_string="y = x1 + x2",
+ ir_version = 8
+)
+
+onnx.save(model, 'batch_add.onnx')
diff --git a/platform/linux-generic/test/validation/api/ml/gen_models.sh b/platform/linux-generic/test/validation/api/ml/gen_models.sh
new file mode 100755
index 000000000..d88f3c432
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/gen_models.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+#
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+
+set -e
+
+# cd to the directory where this script is in
+cd "$( dirname "${BASH_SOURCE[0]}" )"
+
+python3 simple_linear_gen.py
+
+python3 batch_add_gen.py
diff --git a/platform/linux-generic/test/validation/api/ml/ml_linux.c b/platform/linux-generic/test/validation/api/ml/ml_linux.c
new file mode 100644
index 000000000..28e18fbb5
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/ml_linux.c
@@ -0,0 +1,1167 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <libgen.h>
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+#include "odp_cunit_common.h"
+
+#define TIMEOUT 5
+#define MODEL_NAME "Test"
+#define NUM_INPUTS 1
+#define NUM_OUTPUTS 1
+#define RUN_NUM 2
+#define BUF_LEN 256
+#define CONFIG_MAX_MODEL_SIZE 500
+
+#define COMPL_POOL_NAME "ML compl pool"
+#define NUM_COMPL 10
+
+/**
+ * About model simple_linear.onnx being tested in this suite
+ *
+ * Model info:
+ * Version: 1
+ * Inputs: name: x, type: int32, shape: [1]
+ * Outputs: name: y, type: int32, shape: [1]
+ *
+ * The model is of form y = 3 * x + 4
+ * Thus when x = 5, the output y should be 19.
+ */
+typedef struct global_t {
+ int disabled;
+ odp_ml_capability_t ml_capa;
+ odp_ml_config_t ml_config;
+ odp_ml_model_param_t model_param;
+ odp_ml_model_t ml_model;
+ odp_pool_t compl_pool;
+ odp_queue_t queue;
+ odp_ml_data_t data;
+ odp_ml_data_seg_t input_seg;
+ odp_ml_data_seg_t output_seg;
+ odp_ml_run_param_t run_param;
+ uint64_t wait_ns;
+ int32_t x;
+ int32_t y;
+ int32_t y_expected;
+
+} global_t;
+
+static global_t global;
+
+static int fill_model_param(const char *model_name, odp_ml_model_param_t *model_param)
+{
+ size_t size;
+ char *pos;
+ char *exe_dir;
+ size_t exe_dir_len;
+ FILE *model_file;
+ char exe_path[BUF_LEN];
+ ssize_t exe_path_len;
+ char model_path[BUF_LEN];
+
+ /* Model file is placed in the same directory as the executable ml_linux */
+ exe_path_len = readlink("/proc/self/exe", exe_path, BUF_LEN - 1);
+ if (exe_path_len != -1) {
+ exe_path[exe_path_len] = '\0';
+
+ pos = strstr(exe_path, ".libs");
+ if (pos)
+ *(pos + 5) = '\0';
+
+ exe_dir = dirname(exe_path);
+ exe_dir_len = strlen(exe_dir);
+
+ memcpy(model_path, exe_dir, exe_dir_len);
+ model_path[exe_dir_len] = '/';
+ model_path[exe_dir_len + 1] = '\0';
+
+ strncat(model_path, model_name, BUF_LEN - strlen(model_path) - 1);
+ ODPH_DBG("model_path: %s\n", model_path);
+ model_file = fopen(model_path, "rb");
+ } else { /* Can't get executable path, try to find model file at current dir*/
+ model_file = fopen(model_name, "rb");
+ }
+
+ if (model_file == NULL) {
+ perror("Failed to open model file");
+ return -1;
+ }
+
+ /* Get the model file size in bytes */
+ fseek(model_file, 0, SEEK_END);
+ model_param->size = ftell(model_file);
+ rewind(model_file);
+
+ model_param->model = malloc(model_param->size);
+ if (!model_param->model) {
+ ODPH_ERR("\n\nMemory allocation failed\n");
+ fclose(model_file);
+ return -1;
+ }
+ size = fread(model_param->model, model_param->size, 1, model_file);
+
+ fclose(model_file);
+ if (size != 1) {
+ ODPH_ERR("\n\nRead model file failed\n");
+ return -1;
+ }
+
+ model_param->max_compl_id = 0;
+
+ return 0;
+}
+
+static int ml_suite_init(void)
+{
+ odp_ml_capability_t *ml_capa = &global.ml_capa;
+ odp_queue_param_t queue_param;
+ odp_ml_compl_pool_param_t ml_pool_param;
+
+ memset(&global, 0, sizeof(global_t));
+ global.queue = ODP_QUEUE_INVALID;
+ global.compl_pool = ODP_POOL_INVALID;
+
+ if (odp_ml_capability(ml_capa)) {
+ ODPH_ERR("ML capability failed\n");
+ return -1;
+ }
+
+ if (ml_capa->max_models == 0) {
+ global.disabled = 1;
+ ODPH_DBG("ML test disabled\n");
+ return 0;
+ }
+
+ /* Configure ML */
+ odp_ml_config_init(&global.ml_config);
+ global.ml_config.max_models_created = ml_capa->max_models;
+ global.ml_config.max_models_loaded = ml_capa->max_models_loaded;
+ global.ml_config.max_model_size = CONFIG_MAX_MODEL_SIZE;
+
+ if (ml_capa->load.compl_mode_mask & ODP_ML_COMPL_MODE_SYNC)
+ global.ml_config.load_mode_mask |= ODP_ML_COMPL_MODE_SYNC;
+
+ if (ml_capa->load.compl_mode_mask & ODP_ML_COMPL_MODE_POLL)
+ global.ml_config.load_mode_mask |= ODP_ML_COMPL_MODE_POLL;
+
+ if (ml_capa->load.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ global.ml_config.load_mode_mask |= ODP_ML_COMPL_MODE_EVENT;
+
+ if (ml_capa->run.compl_mode_mask & ODP_ML_COMPL_MODE_SYNC)
+ global.ml_config.run_mode_mask |= ODP_ML_COMPL_MODE_SYNC;
+
+ if (ml_capa->run.compl_mode_mask & ODP_ML_COMPL_MODE_POLL)
+ global.ml_config.run_mode_mask |= ODP_ML_COMPL_MODE_POLL;
+
+ if (ml_capa->run.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ global.ml_config.run_mode_mask |= ODP_ML_COMPL_MODE_EVENT;
+
+ if (odp_ml_config(&global.ml_config)) {
+ ODPH_ERR("\n\nConfiguring ML failed\n");
+ return -1;
+ }
+
+ global.x = 5;
+ global.wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+ global.y_expected = 19; /* y = 3 * x + 4 = 3 * 5 + 4 = 19 */
+
+ /* Prepare data for running model inference */
+ odp_ml_run_param_init(&global.run_param);
+
+ global.data.num_input_seg = NUM_INPUTS;
+ global.data.input_seg = &global.input_seg;
+ global.input_seg.size = sizeof(int32_t);
+ global.input_seg.addr = &global.x;
+
+ global.data.num_output_seg = NUM_OUTPUTS;
+ global.data.output_seg = &global.output_seg;
+ global.output_seg.size = sizeof(int32_t);
+ global.output_seg.addr = &global.y;
+
+ if (fill_model_param("simple_linear.onnx", &global.model_param))
+ return -1;
+
+ /* Create ML model */
+ global.ml_model = odp_ml_model_create(MODEL_NAME, &global.model_param);
+ if (global.ml_model == ODP_ML_MODEL_INVALID) {
+ ODPH_ERR("Create ML model failed\n");
+ goto error;
+ }
+
+ /* Asynchronous mode with event completion is not supported */
+ if (!((ml_capa->load.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT) ||
+ (ml_capa->run.compl_mode_mask & ODP_ML_COMPL_MODE_EVENT)))
+ return 0;
+
+ /* Create a queue for sending ML completion event to */
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ global.queue = odp_queue_create("ML compl queue", &queue_param);
+ if (global.queue == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Queue create failed\n");
+ goto error;
+ }
+
+ /* Create an ML job completion pool */
+ if (ml_capa->pool.max_num < NUM_COMPL) {
+ ODPH_ERR("Too small ML compl pool %u\n", ml_capa->pool.max_num);
+ goto error;
+ }
+
+ odp_ml_compl_pool_param_init(&ml_pool_param);
+ ml_pool_param.num = NUM_COMPL;
+
+ global.compl_pool = odp_ml_compl_pool_create(COMPL_POOL_NAME, &ml_pool_param);
+ if (global.compl_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Create ML completion pool failed\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ free(global.model_param.model);
+ return -1;
+}
+
+static int ml_suite_term(void)
+{
+ if (global.compl_pool != ODP_POOL_INVALID &&
+ odp_pool_destroy(global.compl_pool)) {
+ ODPH_ERR("Completion pool destroy failed\n");
+ return -1;
+ }
+
+ if (global.ml_model && odp_ml_model_destroy(global.ml_model)) {
+ ODPH_ERR("Destroy ML model failed\n");
+ return -1;
+ }
+
+ if (global.queue != ODP_QUEUE_INVALID &&
+ odp_queue_destroy(global.queue)) {
+ ODPH_ERR("Destroy ML queue failed\n");
+ return -1;
+ }
+
+ free(global.model_param.model);
+
+ return 0;
+}
+
+static int check_ml_support(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static int check_load_sync(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_SYNC)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_load_poll(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_POLL)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_load_event(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ if (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_EVENT)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_run_sync(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ /* Model run test uses synchronous load */
+ if ((global.ml_config.run_mode_mask & ODP_ML_COMPL_MODE_SYNC) &&
+ (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_SYNC))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_run_poll(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ /* Poll mode model run test uses synchronous load */
+ if ((global.ml_config.run_mode_mask & ODP_ML_COMPL_MODE_POLL) &&
+ (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_SYNC))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_run_event(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ /* Poll mode model run test uses synchronous load */
+ if ((global.ml_config.run_mode_mask & ODP_ML_COMPL_MODE_EVENT) &&
+ (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_SYNC))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static int check_run_poll_event(void)
+{
+ if (global.disabled)
+ return ODP_TEST_INACTIVE;
+
+ /* test_ml_run_start_multi uses synchronous load, poll mode and event mode run */
+ if ((global.ml_config.run_mode_mask & ODP_ML_COMPL_MODE_EVENT) &&
+ (global.ml_config.run_mode_mask & ODP_ML_COMPL_MODE_POLL) &&
+ (global.ml_config.load_mode_mask & ODP_ML_COMPL_MODE_SYNC))
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+static void test_ml_debug(void)
+{
+ uint64_t u64;
+
+ u64 = odp_ml_model_to_u64(global.ml_model);
+ CU_ASSERT(u64 != odp_ml_model_to_u64(ODP_ML_MODEL_INVALID));
+ printf("\n ML model handle: 0x%" PRIx64 "\n", u64);
+
+ odp_ml_model_print(global.ml_model);
+}
+
+static void test_ml_model_create(void)
+{
+ uint32_t i;
+ /* One for global.ml_model */
+ uint32_t max_models = global.ml_config.max_models_created - 1;
+ odp_ml_model_t models[max_models];
+
+ for (i = 0; i < max_models; i++) {
+ models[i] = odp_ml_model_create(NULL, &global.model_param);
+
+ if (models[i] == ODP_ML_MODEL_INVALID) {
+ ODPH_ERR("ML model create failed: %u / %u\n", i, max_models);
+ break;
+ }
+ }
+
+ CU_ASSERT(i == max_models);
+ max_models = i;
+
+ /* Destroy valid models */
+ for (i = 0; i < max_models; i++)
+ CU_ASSERT_FATAL(odp_ml_model_destroy(models[i]) == 0);
+}
+
+static void test_ml_model_lookup(void)
+{
+ odp_ml_model_t model2;
+ odp_ml_model_t model_lookup;
+
+ /* Look up model with the same name, should find one with equal handle */
+ model_lookup = odp_ml_model_lookup(MODEL_NAME);
+ CU_ASSERT_FATAL(model_lookup != ODP_ML_MODEL_INVALID);
+ CU_ASSERT(odp_ml_model_to_u64(global.ml_model) == odp_ml_model_to_u64(model_lookup));
+
+ /* Look up model with a different name, should return invalid handle */
+ model_lookup = odp_ml_model_lookup("diff");
+ CU_ASSERT_FATAL(model_lookup == ODP_ML_MODEL_INVALID);
+
+ model2 = odp_ml_model_create(MODEL_NAME, &global.model_param);
+ CU_ASSERT_FATAL(model2 != ODP_ML_MODEL_INVALID);
+ CU_ASSERT(odp_ml_model_to_u64(global.ml_model) != odp_ml_model_to_u64(model2));
+
+ model_lookup = odp_ml_model_lookup(MODEL_NAME);
+ CU_ASSERT(odp_ml_model_to_u64(model_lookup) == odp_ml_model_to_u64(global.ml_model) ||
+ odp_ml_model_to_u64(model_lookup) == odp_ml_model_to_u64(model2));
+
+ CU_ASSERT(odp_ml_model_destroy(model2) == 0);
+}
+
+static void test_ml_model_info(void)
+{
+ int ret;
+ uint32_t num_ret;
+ odp_ml_model_info_t ml_info;
+ odp_ml_input_info_t input_info[2];
+ odp_ml_output_info_t output_info[2];
+
+ /* Verify model info about global.ml_model, namely, simple_linear.onnx */
+ memset(&ml_info, 0x88, sizeof(odp_ml_model_info_t));
+ ret = odp_ml_model_info(global.ml_model, &ml_info);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT(!strcmp(ml_info.name, MODEL_NAME));
+ CU_ASSERT(ml_info.model_version == 1);
+ CU_ASSERT(ml_info.num_inputs == NUM_INPUTS);
+ CU_ASSERT(ml_info.num_outputs == NUM_OUTPUTS);
+
+ num_ret = odp_ml_model_input_info(global.ml_model, input_info, NUM_INPUTS);
+ CU_ASSERT(num_ret == NUM_INPUTS);
+ CU_ASSERT(!strcmp(input_info[0].name, "x"));
+ CU_ASSERT(input_info[0].shape.num_dim == 1);
+ CU_ASSERT(input_info[0].shape.dim[0] == 1);
+ CU_ASSERT((int)input_info[0].data_type == ODP_ML_DATA_TYPE_INT32);
+
+ /* When num is 0, return normally, and input_info is ignored */
+ num_ret = odp_ml_model_input_info(global.ml_model, input_info, 0);
+ CU_ASSERT(num_ret == NUM_INPUTS);
+
+ /* When num is bigger than actual number of inputs, extra input_info is left untouched */
+ input_info[1].data_type = (odp_ml_data_type_t)-1;
+ num_ret = odp_ml_model_input_info(global.ml_model, input_info, NUM_INPUTS + 1);
+ CU_ASSERT(num_ret == NUM_INPUTS);
+ CU_ASSERT(!strcmp(input_info[0].name, "x"));
+ CU_ASSERT(input_info[0].shape.num_dim == 1);
+ CU_ASSERT(input_info[0].shape.dim[0] == 1);
+ CU_ASSERT((int)input_info[0].data_type == ODP_ML_DATA_TYPE_INT32);
+ /* input_info[1] is left untouched */
+ CU_ASSERT(input_info[1].data_type == (odp_ml_data_type_t)-1);
+
+ num_ret = odp_ml_model_output_info(global.ml_model, output_info, NUM_OUTPUTS);
+ CU_ASSERT(num_ret == NUM_OUTPUTS);
+ CU_ASSERT(!strcmp(output_info[0].name, "y"));
+ CU_ASSERT(output_info[0].shape.num_dim == 1);
+ CU_ASSERT(output_info[0].shape.dim[0] == 1);
+ CU_ASSERT((int)output_info[0].data_type == ODP_ML_DATA_TYPE_INT32);
+
+ /* When num is 0, return normally, and input_info is ignored */
+ num_ret = odp_ml_model_output_info(global.ml_model, output_info, 0);
+ CU_ASSERT(num_ret == NUM_OUTPUTS);
+
+ /* When num is bigger than actual number of inputs, extra output_info is left untouched */
+ num_ret = odp_ml_model_output_info(global.ml_model, output_info, NUM_OUTPUTS + 1);
+ output_info[1].shape.num_dim = 98876;
+ CU_ASSERT(num_ret == NUM_OUTPUTS);
+ CU_ASSERT(!strcmp(output_info[0].name, "y"));
+ CU_ASSERT(output_info[0].shape.num_dim == 1);
+ CU_ASSERT(output_info[0].shape.dim[0] == 1);
+ CU_ASSERT((int)output_info[0].data_type == ODP_ML_DATA_TYPE_INT32);
+ /* output_info[1] is left untouched */
+ CU_ASSERT(output_info[1].shape.num_dim == 98876);
+}
+
+static void test_ml_model_load(void)
+{
+ int ret;
+ odp_ml_model_t test_model;
+ odp_ml_load_result_t result;
+
+ test_model = odp_ml_model_create(NULL, &global.model_param);
+ CU_ASSERT_FATAL(test_model != ODP_ML_MODEL_INVALID);
+
+ ret = odp_ml_model_load(test_model, &result);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT(result.error_code == 0);
+
+ ret = odp_ml_model_unload(test_model, NULL);
+ CU_ASSERT(ret == 0);
+
+ CU_ASSERT(odp_ml_model_destroy(test_model) == 0);
+}
+
+/* Test asynchronous model loading in ODP_ML_COMPL_MODE_POLL mode */
+static void test_ml_model_load_async_poll(void)
+{
+ int ret;
+ odp_ml_load_result_t result;
+ odp_ml_compl_param_t compl_param;
+ int dummy = 6;
+ void *user_ptr = &dummy;
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+
+ memset(&result, 0, sizeof(result));
+ odp_ml_compl_param_init(&compl_param);
+ compl_param.mode = ODP_ML_COMPL_MODE_POLL;
+ compl_param.compl_id = 0;
+ compl_param.user_ptr = user_ptr;
+
+ ret = odp_ml_model_load_start(global.ml_model, &compl_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* When odp_ml_model_load_start() succeeded, continue to check completion status */
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_model_load_status(global.ml_model, 0, &result);
+ if (ret)
+ break;
+
+ /* ret = 0 meaning run has not finished, continue to check status */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT(ret > 0);
+ CU_ASSERT(result.error_code == 0);
+ CU_ASSERT(result.user_ptr == user_ptr);
+ /* odp_ml_model_load does not modify data in user_ptr */
+ if (result.user_ptr)
+ CU_ASSERT(*(int *)result.user_ptr == dummy);
+
+ ret = odp_ml_model_unload_start(global.ml_model, &compl_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* When odp_ml_model_unload_start() succeeded, continue to check completion
+ * status */
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_model_unload_status(global.ml_model, 0, &result);
+ if (ret)
+ break;
+
+ /* ret = 0 meaning run has not finished, continue to check status */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT_FATAL(ret > 0);
+ CU_ASSERT(result.error_code == 0);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ /* odp_ml_model_unload does not modify data in user_ptr */
+ if (result.user_ptr)
+ CU_ASSERT(*(int *)result.user_ptr == dummy);
+}
+
+static int
+get_result_from_ml_compl_event(odp_ml_load_result_t *load_result, odp_ml_run_result_t *run_result)
+{
+ int ret;
+ odp_event_t ev;
+ odp_ml_compl_t compl;
+ odp_event_type_t ev_type;
+ odp_queue_t from_queue = ODP_QUEUE_INVALID;
+ uint64_t sched_wait = odp_schedule_wait_time(global.wait_ns);
+
+ /* Run event scheduler to find the ml completion event */
+ for (int i = 0; i < TIMEOUT; i++) {
+ ev = odp_schedule(&from_queue, sched_wait);
+ if (ev != ODP_EVENT_INVALID)
+ break;
+ }
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ if (ev == ODP_EVENT_INVALID) {
+ ODPH_ERR("Timeout while waiting for completion event\n");
+ return -1;
+ }
+
+ ev_type = odp_event_type(ev);
+ CU_ASSERT(from_queue == global.queue);
+ CU_ASSERT(ev_type == ODP_EVENT_ML_COMPL);
+ if (from_queue != global.queue || ev_type != ODP_EVENT_ML_COMPL) {
+ odp_event_free(ev);
+ ODPH_ERR("Received unexpected event while waiting for completion\n");
+ return -1;
+ }
+
+ compl = odp_ml_compl_from_event(ev);
+ CU_ASSERT(compl != ODP_ML_COMPL_INVALID);
+
+ if (load_result) {
+ CU_ASSERT(odp_ml_compl_load_result(compl, NULL) == 0);
+ ret = odp_ml_compl_load_result(compl, load_result);
+ } else {
+ CU_ASSERT(odp_ml_compl_run_result(compl, NULL) == 0);
+ ret = odp_ml_compl_run_result(compl, run_result);
+ }
+
+ CU_ASSERT(ret == 0);
+ odp_ml_compl_free(compl);
+
+ return ret;
+}
+
+/* Test asynchronous model loading in ODP_ML_COMPL_MODE_EVENT mode */
+static void test_ml_model_load_async_event(void)
+{
+ int ret;
+ odp_ml_compl_t compl;
+ odp_ml_load_result_t result;
+ odp_ml_compl_param_t compl_param;
+ int dummy = 6;
+ void *user_ptr = &dummy;
+
+ compl = odp_ml_compl_alloc(global.compl_pool);
+ CU_ASSERT_FATAL(compl != ODP_ML_COMPL_INVALID);
+
+ odp_ml_compl_param_init(&compl_param);
+ compl_param.mode = ODP_ML_COMPL_MODE_EVENT;
+ compl_param.event = odp_ml_compl_to_event(compl);
+ compl_param.queue = global.queue;
+ compl_param.user_ptr = user_ptr;
+
+ ret = odp_ml_model_load_start(global.ml_model, &compl_param);
+ CU_ASSERT(ret == 0);
+
+ /* Return when odp_ml_model_load_start() failed */
+ if (ret) {
+ odp_ml_compl_free(compl);
+ ODPH_ERR("ML model odp_ml_model_load_start() failed\n");
+ return;
+ }
+
+ /* Run event scheduler to find the ml completion event and verify it */
+ if (get_result_from_ml_compl_event(&result, NULL))
+ return;
+
+ CU_ASSERT(result.error_code == 0);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ /* Model load does not modify data in user_ptr */
+ if (result.user_ptr)
+ CU_ASSERT(*(int *)result.user_ptr == dummy);
+
+ compl = odp_ml_compl_alloc(global.compl_pool);
+ CU_ASSERT(compl != ODP_ML_COMPL_INVALID);
+
+ if (compl == ODP_ML_COMPL_INVALID)
+ return;
+
+ compl_param.event = odp_ml_compl_to_event(compl);
+ ret = odp_ml_model_unload_start(global.ml_model, &compl_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ /* Run event scheduler to find the ml completion event and verify it */
+ if (get_result_from_ml_compl_event(&result, NULL))
+ return;
+
+ CU_ASSERT(result.error_code == 0);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ /* odp_ml_model_unload does not modify data in user_ptr */
+ if (result.user_ptr)
+ CU_ASSERT(*(int *)result.user_ptr == dummy);
+}
+
+/* About model batch_add.onnx being tested in this function
+ *
+ * Model info:
+ * Version: 1
+ * Inputs:
+ * inputs[0]: name: x1, type: double, shape: [c, 3]
+ * inputs[1]: name: x2, type: double, shape: [c, 3]
+ * Outputs:
+ * Outputs[0]: name: y, type: double, shape: [c, 3]
+ *
+ * The model computes element-wise sum of input tensors x1 and x2 and stores them
+ * in y. The first dimension of input and output tensors represent batch size,
+ * thus it must be the same for all tensors here. The dynamic dimension size
+ * in the output tensor here can be deduced from the given batch size, thus no
+ * need for the implementation to fill it.
+ */
+#define NUM_COLUMN 3
+#define MAX_BATCH_SIZE 4
+#define SIZE (NUM_COLUMN * MAX_BATCH_SIZE * sizeof(double))
+static void run_model_batch_add(void)
+{
+ int ret;
+ odp_ml_data_t data;
+ odp_ml_model_t model;
+ odp_ml_data_seg_t input_segs[SIZE * 2];
+ odp_ml_data_seg_t output_segs[SIZE];
+ odp_ml_run_result_t result;
+ odp_ml_run_param_t run_param;
+ odp_ml_model_param_t model_param;
+
+ double y[12];
+ double y_expected[12];
+ uint32_t batch_size = MAX_BATCH_SIZE;
+ double x1[12] = {97, 47, 62, 19, 93, 59, 67, 42, 28, 55, 46, 31};
+ double x2[12] = {81, 56, 27, 4, 69, 12, 91, 98, 23, 90, 52, 64};
+
+ for (int i = 0; i < 12; i++)
+ y_expected[i] = x1[i] + x2[i];
+
+ odp_ml_model_param_init(&model_param);
+
+ odp_ml_data_format_t input_format[2] = {
+ {
+ .data_type = ODP_ML_DATA_TYPE_FP64,
+ .data_type_size = 8,
+ .shape.type = ODP_ML_SHAPE_BATCH,
+ .shape.num_dim = 2,
+ .shape.dim = {ODP_ML_DIM_DYNAMIC, NUM_COLUMN},
+ .shape.dim_max = {MAX_BATCH_SIZE, NUM_COLUMN}
+ },
+ {
+ .data_type = ODP_ML_DATA_TYPE_FP64,
+ .data_type_size = 8,
+ .shape.type = ODP_ML_SHAPE_BATCH,
+ .shape.num_dim = 2,
+ .shape.dim = {ODP_ML_DIM_DYNAMIC, NUM_COLUMN},
+ .shape.dim_max = {MAX_BATCH_SIZE, NUM_COLUMN}
+ }
+ };
+
+ model_param.extra_info.num_inputs = 2;
+ model_param.extra_info.input_format = input_format;
+
+ /* Verify model info about matrix_mul.onnx */
+ if (fill_model_param("batch_add.onnx", &model_param))
+ return;
+
+ model = odp_ml_model_create("batch_add", &model_param);
+ free(model_param.model);
+ CU_ASSERT(model != ODP_ML_MODEL_INVALID);
+ if (!model)
+ return;
+
+ if (odp_ml_model_load(model, NULL)) {
+ CU_ASSERT(odp_ml_model_destroy(model) == 0);
+ return;
+ }
+
+ odp_ml_model_print(model);
+
+ /* Prepare parameters for running inference */
+ odp_ml_run_param_init(&run_param);
+ run_param.result = &result;
+
+ data.num_input_seg = 2;
+ data.input_seg = input_segs;
+ input_segs[0].addr = x1;
+ input_segs[1].addr = x2;
+
+ data.num_output_seg = 1;
+ data.output_seg = output_segs;
+ output_segs[0].size = sizeof(y);
+ output_segs[0].addr = y;
+
+ /* Test different batch sizes */
+ for (int i = 0; i < MAX_BATCH_SIZE; i++) {
+ run_param.batch_size = batch_size;
+ input_segs[0].size = sizeof(double) * NUM_COLUMN * batch_size;
+ input_segs[1].size = sizeof(double) * NUM_COLUMN * batch_size;
+ ret = odp_ml_run(model, &data, &run_param);
+ CU_ASSERT(ret == 1);
+ if (ret != 1)
+ goto fail;
+
+ for (uint32_t j = 0; j < batch_size * NUM_COLUMN; j++)
+ CU_ASSERT(y[j] == y_expected[j]);
+
+ batch_size--;
+ }
+
+ /* Test also without run results */
+ run_param.result = NULL;
+ ret = odp_ml_run(model, &data, &run_param);
+ CU_ASSERT(ret == 1);
+
+ /* Test different segment sizes */
+ batch_size = MAX_BATCH_SIZE;
+ odp_ml_run_param_init(&run_param);
+ run_param.result = &result;
+ run_param.batch_size = batch_size;
+ data.input_seg = input_segs;
+ data.output_seg = output_segs;
+
+ for (int seg_size = SIZE; seg_size > 0; seg_size--) {
+ int num_seg = (SIZE + seg_size - 1) / seg_size;
+
+ if ((uint32_t)num_seg > global.ml_capa.max_segs_per_input ||
+ (uint32_t)num_seg > global.ml_capa.max_segs_per_output)
+ break;
+
+ data.num_input_seg = num_seg * 2;
+ data.num_output_seg = num_seg;
+
+ for (int seg = 0; seg < num_seg; seg++) {
+ int size = seg_size;
+
+ if (seg == num_seg - 1)
+ size = SIZE - seg * seg_size;
+
+ input_segs[seg].addr = (char *)x1 + seg * seg_size;
+ input_segs[seg].size = size;
+ input_segs[seg + num_seg].addr = (char *)x2 + seg * seg_size;
+ input_segs[seg + num_seg].size = size;
+ output_segs[seg].addr = (char *)y + seg * seg_size;
+ output_segs[seg].size = size;
+ }
+
+ memset(y, 0, sizeof(y));
+ ret = odp_ml_run(model, &data, &run_param);
+ CU_ASSERT(ret == 1);
+ if (ret != 1)
+ goto fail;
+
+ for (uint32_t j = 0; j < batch_size * NUM_COLUMN; j++)
+ CU_ASSERT(y[j] == y_expected[j]);
+ }
+
+fail:
+ CU_ASSERT_FATAL(odp_ml_model_unload(model, NULL) == 0);
+ CU_ASSERT(odp_ml_model_destroy(model) == 0);
+}
+
+static void run_global_ml_model(void)
+{
+ int ret = 0;
+ odp_ml_run_result_t result;
+
+ ret = odp_ml_model_load(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ global.run_param.result = &result;
+
+ ret = odp_ml_run(global.ml_model, &global.data, &global.run_param);
+ CU_ASSERT(ret == 1);
+ CU_ASSERT(!result.error_code);
+ CU_ASSERT(*(int32_t *)global.output_seg.addr == global.y_expected);
+
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+ global.run_param.result = NULL;
+}
+
+static void test_ml_run(void)
+{
+ run_global_ml_model();
+ run_model_batch_add();
+}
+
+static void test_ml_run_multi(void)
+{
+ int ret;
+ int32_t y;
+ int32_t x = 8;
+ int32_t y_expected = 28;
+ odp_ml_data_t data[RUN_NUM];
+ odp_ml_data_seg_t input_seg;
+ odp_ml_data_seg_t output_seg;
+ odp_ml_run_param_t param[RUN_NUM];
+ odp_ml_run_result_t result[RUN_NUM];
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+
+ ret = odp_ml_model_load(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ param[0] = global.run_param;
+ param[0].result = &result[0];
+ odp_ml_run_param_init(&param[1]);
+ param[1].result = &result[1];
+
+ /* Prepare data for running model inference */
+ data[0] = global.data;
+ data[1].num_input_seg = NUM_INPUTS;
+ data[1].input_seg = &input_seg;
+ input_seg.size = sizeof(int32_t);
+ input_seg.addr = &x;
+
+ data[1].num_output_seg = NUM_OUTPUTS;
+ data[1].output_seg = &output_seg;
+ output_seg.size = sizeof(int32_t);
+ output_seg.addr = &y;
+
+ int num_completed = 0;
+
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_run_multi(global.ml_model, data + num_completed, param + num_completed,
+ RUN_NUM - num_completed);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ break;
+
+ num_completed += ret;
+
+ if (num_completed >= RUN_NUM)
+ break;
+
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT(num_completed == RUN_NUM);
+ CU_ASSERT(!result[0].error_code);
+ CU_ASSERT(!result[1].error_code);
+ CU_ASSERT(*(int32_t *)global.output_seg.addr == global.y_expected);
+ CU_ASSERT(*(int32_t *)output_seg.addr == y_expected);
+
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+/* Test asynchronous inference running in ODP_ML_COMPL_MODE_EVENT mode */
+static void test_ml_model_run_async_event(void)
+{
+ int ret;
+ void *user_ptr;
+ odp_ml_compl_t compl;
+ odp_ml_run_result_t result;
+ odp_ml_data_seg_t *outputs;
+ odp_ml_compl_param_t compl_param;
+
+ /* Load model in order to run inference */
+ ret = odp_ml_model_load(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ compl = odp_ml_compl_alloc(global.compl_pool);
+ CU_ASSERT_FATAL(compl != ODP_ML_COMPL_INVALID);
+
+ odp_ml_compl_param_init(&compl_param);
+ compl_param.mode = ODP_ML_COMPL_MODE_EVENT;
+ compl_param.event = odp_ml_compl_to_event(compl);
+ compl_param.queue = global.queue;
+
+ /* user_ptr structure maintains the output data pointer for output retrieval */
+ user_ptr = &global.output_seg;
+ compl_param.user_ptr = user_ptr;
+
+ memset(global.output_seg.addr, 0, global.output_seg.size);
+ ret = odp_ml_run_start(global.ml_model, &global.data, &compl_param, NULL);
+ CU_ASSERT_FATAL(ret == 1);
+
+ /* Run event scheduler to find the ml completion event and verify it */
+ if (get_result_from_ml_compl_event(NULL, &result))
+ return;
+
+ CU_ASSERT(!result.error_code);
+ CU_ASSERT(result.user_ptr == user_ptr);
+
+ outputs = (odp_ml_data_seg_t *)result.user_ptr;
+ CU_ASSERT(*(int32_t *)outputs[0].addr == global.y_expected);
+
+ /* Unload model */
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+/* Test asynchronous inference running in ODP_ML_COMPL_MODE_POLL mode */
+static void test_ml_model_run_async_poll(void)
+{
+ int ret;
+ void *user_ptr;
+ odp_ml_run_result_t result;
+ odp_ml_data_seg_t *outputs;
+ odp_ml_compl_param_t compl_param;
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+
+ memset(&result, 0, sizeof(result));
+ /* Load model in order to run inference */
+ ret = odp_ml_model_load(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_ml_compl_param_init(&compl_param);
+ compl_param.mode = ODP_ML_COMPL_MODE_POLL;
+ compl_param.compl_id = 0;
+
+ /* user_ptr structure maintains the output data pointer for output retrieval */
+ user_ptr = &global.output_seg;
+ compl_param.user_ptr = user_ptr;
+
+ memset(global.output_seg.addr, 0, global.output_seg.size);
+ ret = odp_ml_run_start(global.ml_model, &global.data, &compl_param, NULL);
+ CU_ASSERT_FATAL(ret == 1);
+
+ /* When odp_ml_run_start() succeeded, continue to check completion status */
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_run_status(global.ml_model, 0, &result);
+ if (ret)
+ break;
+
+ /* ret = 0 meaning run has not finished, continue to check status */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ outputs = (odp_ml_data_seg_t *)result.user_ptr;
+
+ CU_ASSERT(ret > 0);
+ CU_ASSERT(!result.error_code);
+ CU_ASSERT(result.user_ptr == user_ptr);
+ CU_ASSERT(*(int32_t *)outputs[0].addr == global.y_expected);
+
+ /* Unload model */
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void test_ml_run_start_multi(void)
+{
+ int ret;
+ int32_t y;
+ odp_ml_compl_t compl;
+ odp_ml_data_t data[RUN_NUM];
+ odp_ml_data_seg_t input_seg;
+ odp_ml_data_seg_t output_seg;
+ odp_ml_data_seg_t *outputs[RUN_NUM];
+ odp_ml_compl_param_t compl_param[RUN_NUM];
+ odp_ml_run_result_t run_result[RUN_NUM];
+ int32_t x = 5;
+ int32_t y_expected = 19;
+ uint64_t wait_ns = 500 * ODP_TIME_MSEC_IN_NS;
+
+ /* Load model in order to run inference */
+ ret = odp_ml_model_load(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+
+ compl = odp_ml_compl_alloc(global.compl_pool);
+ CU_ASSERT_FATAL(compl != ODP_ML_COMPL_INVALID);
+
+ /* Prepare data for running model inference */
+ data[0] = global.data;
+
+ data[1].num_input_seg = NUM_INPUTS;
+ data[1].input_seg = &input_seg;
+ input_seg.size = sizeof(int32_t);
+ input_seg.addr = &x;
+
+ data[1].num_output_seg = NUM_OUTPUTS;
+ data[1].output_seg = &output_seg;
+ output_seg.size = sizeof(int32_t);
+ output_seg.addr = &y;
+
+ /* Two completion parameters: one use event mode, another poll mode */
+ odp_ml_compl_param_init(&compl_param[0]);
+ compl_param[0].mode = ODP_ML_COMPL_MODE_EVENT;
+ compl_param[0].event = odp_ml_compl_to_event(compl);
+ compl_param[0].queue = global.queue;
+ /* user_ptr structure maintains the output data pointer for output retrieval */
+ compl_param[0].user_ptr = &global.output_seg;
+
+ odp_ml_compl_param_init(&compl_param[1]);
+ compl_param[1].mode = ODP_ML_COMPL_MODE_POLL;
+ compl_param[1].compl_id = 0;
+ /* user_ptr structure maintains the output data pointer for output retrieval */
+ compl_param[1].user_ptr = &output_seg;
+
+ memset(global.output_seg.addr, 0, sizeof(int32_t));
+
+ int num_completed = 0;
+
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_run_start_multi(global.ml_model, data + num_completed,
+ compl_param + num_completed, NULL,
+ RUN_NUM - num_completed);
+ CU_ASSERT(ret >= 0);
+ if (ret < 0)
+ break;
+
+ num_completed += ret;
+
+ if (num_completed >= RUN_NUM)
+ break;
+
+ odp_time_wait_ns(wait_ns);
+ }
+
+ CU_ASSERT(num_completed == RUN_NUM);
+
+ /* Run event scheduler to find the ml completion event and verify it */
+ if (get_result_from_ml_compl_event(NULL, &run_result[0])) {
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ return;
+ }
+
+ CU_ASSERT(!run_result[0].error_code);
+ CU_ASSERT(run_result[0].user_ptr == &global.output_seg);
+ outputs[0] = (odp_ml_data_seg_t *)run_result[0].user_ptr;
+ CU_ASSERT(*(int32_t *)outputs[0][0].addr == global.y_expected);
+
+ /* Check completion status for the poll mode */
+ for (int i = 0; i < TIMEOUT; i++) {
+ ret = odp_ml_run_status(global.ml_model, 0, &run_result[1]);
+ if (ret)
+ break;
+
+ /* ret = 0 meaning run has not finished, continue to check status */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ outputs[1] = (odp_ml_data_seg_t *)run_result[1].user_ptr;
+ CU_ASSERT(ret > 0);
+ CU_ASSERT(!run_result[1].error_code);
+ CU_ASSERT(run_result[1].user_ptr == &output_seg);
+ CU_ASSERT(*(int32_t *)outputs[1][0].addr == y_expected);
+
+ /* Unload model */
+ ret = odp_ml_model_unload(global.ml_model, NULL);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void test_ml_model_extra_stat_info(void)
+{
+ int ret;
+
+ ret = odp_ml_model_extra_stat_info(global.ml_model, NULL, 0);
+ CU_ASSERT(ret >= 0);
+}
+
+static void test_ml_model_extra_stats(void)
+{
+ int ret;
+
+ ret = odp_ml_model_extra_stats(global.ml_model, NULL, 0);
+ CU_ASSERT(ret >= 0);
+}
+
+odp_testinfo_t ml_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(test_ml_debug, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_create, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_lookup, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_info, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_load, check_load_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_load_async_poll, check_load_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_load_async_event, check_load_event),
+ /* Synchronous load/unload is used load/unload model before/after model run */
+ ODP_TEST_INFO_CONDITIONAL(test_ml_run, check_run_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_run_multi, check_run_sync),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_run_async_event, check_run_event),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_run_async_poll, check_run_poll),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_run_start_multi, check_run_poll_event),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_extra_stat_info, check_ml_support),
+ ODP_TEST_INFO_CONDITIONAL(test_ml_model_extra_stats, check_ml_support),
+ ODP_TEST_INFO_NULL
+};
+
+odp_suiteinfo_t ml_suites[] = {
+ {"ML", ml_suite_init, ml_suite_term, ml_suite},
+ ODP_SUITE_INFO_NULL
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(ml_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/platform/linux-generic/test/validation/api/ml/requirements.txt b/platform/linux-generic/test/validation/api/ml/requirements.txt
new file mode 100644
index 000000000..2dcba7a3a
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/requirements.txt
@@ -0,0 +1,2 @@
+onnx
+numpy
diff --git a/platform/linux-generic/test/validation/api/ml/simple_linear.onnx b/platform/linux-generic/test/validation/api/ml/simple_linear.onnx
new file mode 100644
index 000000000..45c4b95b9
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/simple_linear.onnx
Binary files differ
diff --git a/platform/linux-generic/test/validation/api/ml/simple_linear_gen.py b/platform/linux-generic/test/validation/api/ml/simple_linear_gen.py
new file mode 100644
index 000000000..b3e6124cd
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/ml/simple_linear_gen.py
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2023 Nokia
+#
+
+import onnx
+from onnx import helper
+from onnx import TensorProto
+
+weight = helper.make_tensor(name='w', data_type=TensorProto.INT32, dims=[1], vals=[3])
+w = helper.make_node('Constant', inputs=[], outputs=['w'], name='weight', value=weight)
+
+bias = helper.make_tensor(name='b', data_type=TensorProto.INT32, dims=[1], vals=[4])
+b = helper.make_node('Constant', inputs=[], outputs=['b'], name='bias', value=bias)
+
+# The functional nodes:
+mul = helper.make_node('Mul', inputs=['x', 'w'], outputs=['wx'], name='Mul')
+add = helper.make_node('Add', inputs=['wx', 'b'], outputs=['y'], name='Add')
+
+# Create the graph
+g = helper.make_graph([w, mul, b, add], 'linear',
+ [helper.make_tensor_value_info('x', TensorProto.INT32, [1])],
+ [helper.make_tensor_value_info('y', TensorProto.INT32, [1])]
+)
+
+model = helper.make_model(
+ producer_name='ODP validation tests',
+ model_version=1,
+ doc_string="y = 3x + 4",
+ graph=g,
+ opset_imports=[helper.make_opsetid("", 13)]
+)
+
+# Save the model
+onnx.save(model, 'simple_linear.onnx')
diff --git a/platform/linux-generic/test/validation/api/pktio/.gitignore b/platform/linux-generic/test/validation/api/pktio/.gitignore
new file mode 100644
index 000000000..7e563b8b3
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/.gitignore
@@ -0,0 +1,2 @@
+*.log
+*.trs
diff --git a/platform/linux-generic/test/validation/api/pktio/Makefile.am b/platform/linux-generic/test/validation/api/pktio/Makefile.am
new file mode 100644
index 000000000..1646743fe
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/Makefile.am
@@ -0,0 +1,32 @@
+dist_check_SCRIPTS = pktio_env \
+ pktio_run.sh \
+ pktio_run_tap.sh
+
+if ODP_PKTIO_PCAP
+dist_check_SCRIPTS += pktio_run_pcap.sh
+endif
+if PKTIO_DPDK
+dist_check_SCRIPTS += pktio_run_dpdk.sh
+endif
+
+test_SCRIPTS = $(dist_check_SCRIPTS)
+
+# If building out-of-tree, make check will not copy the scripts and data to the
+# $(builddir) assuming that all commands are run locally. However this prevents
+# running tests on a remote target using LOG_COMPILER.
+# So copy all script and data files explicitly here.
+all-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ if [ -e $(srcdir)/$$f ]; then \
+ mkdir -p $(builddir)/$$(dirname $$f); \
+ cp -f $(srcdir)/$$f $(builddir)/$$f; \
+ fi \
+ done \
+ fi
+clean-local:
+ if [ "x$(srcdir)" != "x$(builddir)" ]; then \
+ for f in $(dist_check_SCRIPTS); do \
+ rm -f $(builddir)/$$f; \
+ done \
+ fi
diff --git a/platform/linux-generic/test/validation/api/pktio/pktio_env b/platform/linux-generic/test/validation/api/pktio/pktio_env
new file mode 100644
index 000000000..ba9998e6a
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/pktio_env
@@ -0,0 +1,120 @@
+#!/bin/sh
+#
+# Copyright (c) 2015-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+# Test script wrapper for running ODP pktio apps on linux-generic.
+#
+# For linux-generic the default behavior is to create two pairs of
+# virtual Ethernet interfaces and provide the names of these via
+# environment variables to pktio apps, the interfaces will be removed
+# before the script exits.
+#
+# Note that the creation of virtual Ethernet devices depends on having
+# CONFIG_VETH enabled in the kernel, if not enabled the env setup will be skipped.
+#
+# Network set up
+# IF0 <---> IF1
+# IF2 <---> IF3
+IF0=pktiop0p1
+IF1=pktiop1p0
+IF2=pktiop2p3
+IF3=pktiop3p2
+
+if [ "$0" = "$BASH_SOURCE" ]; then
+ echo "Error: Platform specific env file has to be sourced."
+fi
+
+check_for_root()
+{
+ if [ "$(id -u)" != "0" ]; then
+ echo "check_for_root(): need to be root to setup VETH"
+ return 1
+ fi
+ return 0
+}
+
+# wait for a network interface's operational state to be "up"
+wait_for_iface_up()
+{
+ iface=$1
+ cnt=0
+
+ while [ $cnt -lt 50 ]; do
+ read operstate < /sys/class/net/$iface/operstate
+
+ if [ $? -ne 0 ]; then
+ break
+ elif [ "$operstate" = "up" ]; then
+ return 0
+ fi
+
+ sleep 0.1
+ cnt=`expr $cnt + 1`
+ done
+
+ return 1
+}
+
+setup_pktio_env()
+{
+ echo "pktio: setting up test interfaces $IF0, $IF1, $IF2, $IF3."
+
+ check_for_root
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+
+ for iface in $IF0 $IF1 $IF2 $IF3; do
+ ip link show $iface 2> /dev/null
+ if [ $? -eq 0 ]; then
+ echo "pktio: interface $iface already exist $?"
+ return 2
+ fi
+ done
+
+ if [ "$1" = "clean" ]; then
+ trap cleanup_pktio_env EXIT
+ fi
+
+ ip link add $IF0 type veth peer name $IF1
+ if [ $? -ne 0 ]; then
+ echo "pktio: error: unable to create veth pair"
+ return 3
+ fi
+ ip link add $IF2 type veth peer name $IF3
+ if [ $? -ne 0 ]; then
+ echo "pktio: error: unable to create veth pair"
+ return 4
+ fi
+
+ for iface in $IF0 $IF1 $IF2 $IF3; do
+ ip link set $iface mtu 9216 up
+ ifconfig $iface -arp
+ done
+
+ # check that the interface has come up before starting the test
+ for iface in $IF0 $IF1 $IF2 $IF3; do
+ wait_for_iface_up $iface
+ if [ $? -ne 0 ]; then
+ echo "pktio: interface $iface failed to come up"
+ return 5
+ fi
+ done
+}
+
+cleanup_pktio_env()
+{
+ echo "pktio: removing test interfaces $IF0, $IF1, $IF2, $IF3"
+ check_for_root
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+
+ for iface in $IF0 $IF1 $IF2 $IF3; do
+ ip link del $iface 2> /dev/null
+ done
+ return 0
+}
diff --git a/platform/linux-generic/test/validation/api/pktio/pktio_run.sh b/platform/linux-generic/test/validation/api/pktio/pktio_run.sh
new file mode 100755
index 000000000..cac297768
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/pktio_run.sh
@@ -0,0 +1,117 @@
+#!/bin/sh
+#
+# Copyright (c) 2015-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Proceed the pktio tests. This script expects at least one argument:
+# setup) setup the pktio test environment
+# cleanup) cleanup the pktio test environment
+# run) run the pktio tests (setup, run, cleanup)
+# extra arguments are passed unchanged to the test itself (pktio_main)
+# Without arguments, "run" is assumed and no extra argument is passed to the
+# test (legacy mode).
+#
+
+# directories where pktio_main binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone (./pktio_run) intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=${TEST_DIR}/api/pktio:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../../../test/validation/api/pktio:$PATH
+PATH=.:$PATH
+
+pktio_main_path=$(which pktio_main${EXEEXT})
+if [ -x "$pktio_main_path" ] ; then
+ echo "running with pktio_main: $pktio_run_path"
+else
+ echo "cannot find pktio_main: please set you PATH for it."
+ exit 1
+fi
+
+# directory where platform test sources are, including scripts
+TEST_SRC_DIR=$(dirname $0)
+
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+# Use installed pktio env or for make check take it from platform directory
+if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
+ . ${TEST_SRC_DIR}/pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
+ echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
+ exit 1
+fi
+
+run_test()
+{
+ local ret=0
+
+ # environment variables are used to control which socket method is
+ # used, so try each combination to ensure decent coverage.
+ for distype in MMAP MMSG; do
+ unset ODP_PKTIO_DISABLE_SOCKET_${distype}
+ done
+
+ for distype in SKIP MMAP; do
+ if [ "$disabletype" != "SKIP" ]; then
+ export ODP_PKTIO_DISABLE_SOCKET_${distype}=y
+ fi
+ pktio_main${EXEEXT} $*
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+ done
+
+ if [ $ret -ne 0 ]; then
+ echo "!!! FAILED !!!"
+ fi
+
+ return $ret
+}
+
+run()
+{
+ # need to be root to run tests with real interfaces
+ if [ "$(id -u)" != "0" ]; then
+ exit $ret
+ fi
+
+ if [ "$ODP_PKTIO_IF0" = "" ]; then
+ # no interfaces specified, use default veth interfaces
+ # setup by the pktio_env script
+ setup_pktio_env clean
+ if [ $? != 0 ]; then
+ echo "Failed to setup test environment, skipping test."
+ exit $TEST_SKIPPED
+ fi
+ export ODP_PKTIO_IF0=$IF0
+ export ODP_PKTIO_IF1=$IF1
+ fi
+
+ run_test
+ ret=$?
+
+ exit $ret
+}
+
+if [ $# != 0 ]; then
+ action=$1
+ shift
+fi
+
+case "$action" in
+ setup) setup_pktio_env ;;
+ cleanup) cleanup_pktio_env ;;
+ run) run ;;
+ *) run ;;
+esac
diff --git a/platform/linux-generic/test/validation/api/pktio/pktio_run_dpdk.sh b/platform/linux-generic/test/validation/api/pktio/pktio_run_dpdk.sh
new file mode 100755
index 000000000..39b540228
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/pktio_run_dpdk.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+# Copyright (c) 2016-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# Proceed the pktio tests. This script expects at least one argument:
+# setup) setup the pktio test environment
+# cleanup) cleanup the pktio test environment
+# run) run the pktio tests (setup, run, cleanup)
+# extra arguments are passed unchanged to the test itself (pktio_main)
+# Without arguments, "run" is assumed and no extra argument is passed to the
+# test (legacy mode).
+#
+
+# directories where pktio_main binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone (./pktio_run) intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=${TEST_DIR}/api/pktio:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../../../test/validation/api/pktio:$PATH
+PATH=.:$PATH
+
+pktio_main_path=$(which pktio_main${EXEEXT})
+if [ -x "$pktio_main_path" ] ; then
+ echo "running with pktio_main: $pktio_run_path"
+else
+ echo "cannot find pktio_main: please set you PATH for it."
+fi
+
+# directory where platform test sources are, including scripts
+TEST_SRC_DIR=$(dirname $0)
+
+# exit codes expected by automake for skipped tests
+TEST_SKIPPED=77
+
+# Use installed pktio env or for make check take it from platform directory
+if [ -f "./pktio_env" ]; then
+ . ./pktio_env
+elif [ -f ${TEST_SRC_DIR}/pktio_env ]; then
+ . ${TEST_SRC_DIR}/pktio_env
+else
+ echo "BUG: unable to find pktio_env!"
+ echo "pktio_env has to be in current directory or in platform/\$ODP_PLATFORM/test."
+ echo "ODP_PLATFORM=\"$ODP_PLATFORM\""
+ exit 1
+fi
+
+run_test()
+{
+ local ret=0
+
+ pktio_main${EXEEXT} $*
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ echo "!!! FAILED !!!"
+ fi
+
+ exit $ret
+}
+
+run()
+{
+ # need to be root to set the interface.
+ if [ "$(id -u)" != "0" ]; then
+ echo "pktio: need to be root to setup DPDK interfaces."
+ return $TEST_SKIPPED
+ fi
+
+ if [ "$ODP_PKTIO_IF0" = "" ]; then
+ setup_pktio_env clean
+ export ODP_PKTIO_DPDK_PARAMS="--no-pci --vdev net_pcap0,iface=$IF0 --vdev net_pcap1,iface=$IF1"
+ export ODP_PKTIO_IF0=dpdk:0
+ export ODP_PKTIO_IF1=dpdk:1
+ fi
+
+ run_test
+}
+
+if [ $# != 0 ]; then
+ action=$1
+ shift
+fi
+
+case "$1" in
+ setup) setup_pktio_env ;;
+ cleanup) cleanup_pktio_env ;;
+ run) run ;;
+ *) run ;;
+esac
diff --git a/platform/linux-generic/test/validation/api/pktio/pktio_run_pcap.sh b/platform/linux-generic/test/validation/api/pktio/pktio_run_pcap.sh
new file mode 100755
index 000000000..290bc81d5
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/pktio_run_pcap.sh
@@ -0,0 +1,39 @@
+#!/bin/sh
+#
+# Copyright (c) 2015-2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+# any parameter passed as arguments to this script is passed unchanged to
+# the test itself (pktio_main)
+
+# directories where pktio_main binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=${TEST_DIR}/api/pktio:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../../../test/validation/api/pktio:$PATH
+PATH=.:$PATH
+
+pktio_main_path=$(which pktio_main${EXEEXT})
+if [ -x "$pktio_main_path" ] ; then
+ echo "running with $pktio_main_path"
+else
+ echo "cannot find pktio_main${EXEEXT}: please set you PATH for it."
+ exit 1
+fi
+
+export ODP_PKTIO_TEST_DISABLE_START_STOP=1
+
+PCAP_FNAME=vald.pcap
+export ODP_PKTIO_IF0="pcap:out=${PCAP_FNAME}"
+export ODP_PKTIO_IF1="pcap:in=${PCAP_FNAME}"
+pktio_main${EXEEXT} $*
+ret=$?
+rm -f ${PCAP_FNAME}
+exit $ret
diff --git a/platform/linux-generic/test/validation/api/pktio/pktio_run_tap.sh b/platform/linux-generic/test/validation/api/pktio/pktio_run_tap.sh
new file mode 100755
index 000000000..ecfe5a126
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/pktio/pktio_run_tap.sh
@@ -0,0 +1,119 @@
+#!/bin/sh
+#
+# Copyright (c) 2015, Ilya Maximets <i.maximets@samsung.com>
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+
+# any parameter passed as arguments to this script is passed unchanged to
+# the test itself (pktio_main)
+
+# directories where pktio_main binary can be found:
+# -in the validation dir when running make check (intree or out of tree)
+# -in the script directory, when running after 'make install', or
+# -in the validation when running standalone intree.
+# -in the current directory.
+# running stand alone out of tree requires setting PATH
+PATH=${TEST_DIR}/api/pktio:$PATH
+PATH=$(dirname $0):$PATH
+PATH=$(dirname $0)/../../../../../../test/validation/api/pktio:$PATH
+PATH=.:$PATH
+
+pktio_main_path=$(which pktio_main${EXEEXT})
+if [ -x "$pktio_main_path" ] ; then
+ echo "running with $pktio_main_path"
+else
+ echo "cannot find pktio_main${EXEEXT}: please set you PATH for it."
+fi
+
+# exit code expected by automake for skipped tests
+TEST_SKIPPED=77
+
+TAP_BASE_NAME=iotap_vald
+IF0=${TAP_BASE_NAME}0
+IF1=${TAP_BASE_NAME}1
+BR=${TAP_BASE_NAME}_br
+
+export ODP_PKTIO_IF0="tap:$IF0"
+export ODP_PKTIO_IF1="tap:$IF1"
+
+tap_cleanup()
+{
+ ret=$?
+
+ for iface in $IF0 $IF1; do
+ ip link set dev $iface nomaster
+ done
+
+ ip link delete $BR type bridge
+
+ for iface in $IF0 $IF1; do
+ ip tuntap del mode tap $iface
+ done
+
+ trap - EXIT
+ exit $ret
+}
+
+tap_setup()
+{
+ if [ "$(id -u)" != "0" ]; then
+ echo "pktio: need to be root to setup TAP interfaces."
+ return $TEST_SKIPPED
+ fi
+
+ for iface in $IF0 $IF1 $BR; do
+ ip link show $iface 2> /dev/null
+ if [ $? -eq 0 ]; then
+ echo "pktio: interface $iface already exist $?"
+ return 2
+ fi
+ done
+
+ trap tap_cleanup EXIT
+
+ for iface in $IF0 $IF1; do
+ ip tuntap add mode tap $iface
+ if [ $? -ne 0 ]; then
+ echo "pktio: error: unable to create TAP device $iface"
+ return 3
+ fi
+ done
+
+ ip link add name $BR type bridge
+ if [ $? -ne 0 ]; then
+ echo "pktio: error: unable to create bridge $BR"
+ return 3
+ fi
+
+ for iface in $IF0 $IF1; do
+ ip link set dev $iface master $BR
+ if [ $? -ne 0 ]; then
+ echo "pktio: error: unable to add $iface to bridge $BR"
+ return 4
+ fi
+ done
+
+ for iface in $IF0 $IF1 $BR; do
+ ifconfig $iface -arp
+ sysctl -w net.ipv6.conf.${iface}.disable_ipv6=1
+ ip link set dev $iface mtu 9216 up
+ done
+
+ return 0
+}
+
+tap_setup
+ret=$?
+if [ $ret -ne 0 ]; then
+ echo "pktio: tap_setup() FAILED!"
+ exit $TEST_SKIPPED
+fi
+
+# Using ODP_WAIT_FOR_NETWORK to prevent fail if tap still not enabled in bridge
+ODP_WAIT_FOR_NETWORK=yes pktio_main${EXEEXT} $*
+ret=$?
+
+exit $ret
diff --git a/platform/linux-generic/test/validation/api/shmem/.gitignore b/platform/linux-generic/test/validation/api/shmem/.gitignore
new file mode 100644
index 000000000..74195f576
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/.gitignore
@@ -0,0 +1,3 @@
+shmem_linux
+shmem_odp1
+shmem_odp2
diff --git a/platform/linux-generic/test/validation/api/shmem/Makefile.am b/platform/linux-generic/test/validation/api/shmem/Makefile.am
new file mode 100644
index 000000000..309eceb92
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/Makefile.am
@@ -0,0 +1,14 @@
+include ../Makefile.inc
+
+#the main test program is shmem_linux, which, in turn, starts a shmem_odp:
+test_PROGRAMS = shmem_linux shmem_odp1 shmem_odp2
+
+#shmem_linux is stand alone, pure linux (no ODP):
+shmem_linux_SOURCES = shmem_linux.c shmem_linux.h shmem_common.h
+shmem_linux_LDFLAGS =
+shmem_linux_LDADD =
+
+#shmem_odp1 and shmem_odp2 are the 2 ODP processes:
+shmem_odp1_SOURCES = shmem_odp1.c shmem_odp1.h shmem_common.h
+
+shmem_odp2_SOURCES = shmem_odp2.c shmem_odp2.h shmem_common.h
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_common.h b/platform/linux-generic/test/validation/api/shmem/shmem_common.h
new file mode 100644
index 000000000..33df4476c
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_common.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _COMMON_TEST_SHMEM_H_
+#define _COMMON_TEST_SHMEM_H_
+
+#define SHM_NAME "odp_linux_shared_mem"
+#define DEFAULT_SHM_DIR "/dev/shm"
+#define FIFO_NAME_FMT "%s/%d/shmem_test_fifo-%d"
+#define ALIGN_SIZE (128)
+#define TEST_SHARE_FOO (0xf0f0f0f0)
+#define TEST_SHARE_BAR (0xf0f0f0f)
+#define TEST_FAILURE 'F'
+#define TEST_SUCCESS 'S'
+
+typedef struct {
+ uint32_t foo;
+ uint32_t bar;
+} test_shared_linux_data_t;
+
+#endif
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_linux.c b/platform/linux-generic/test/validation/api/shmem/shmem_linux.c
new file mode 100644
index 000000000..03a9255f7
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_linux.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/* this test makes sure that odp shared memory created with the ODP_SHM_PROC
+ * flag is visible under linux, and checks that memory created with the
+ * ODP_SHM_EXPORT flag is visible by other ODP instances.
+ * It therefore checks both that the link
+ * name under /dev/shm is correct, and also checks that the memory contents
+ * is indeed shared.
+ * we want:
+ * -the odp test to run using C UNIT
+ * -the main process to return the correct return code.
+ * (for the autotools test harness)
+ *
+ * To achieve this, the flow of operations is as follows:
+ *
+ * linux process (main, non odp) |
+ * (shmem_linux.c) |
+ * |
+ * |
+ * |
+ * main() |
+ * forks odp_app1 process |
+ * wait for named pipe creation |
+ * |
+ * | ODP_APP1 process
+ * | (shmem_odp1.c)
+ * |
+ * | allocate shmem
+ * | populate shmem
+ * | create named pipe
+ * | wait for test report in fifo...
+ * read shared memory |
+ * check if memory contents is OK |
+ * If not OK, write "F" in fifo and |
+ * exit with failure code. | -------------------
+ * |
+ * forks odp app2 process | ODP APP2 process
+ * wait for child termination & status| (shmem_odp2.c)
+ * | lookup ODP_APP1 shared memory,
+ * | check if memory contents is OK
+ * | Exit(0) on success, exit(1) on fail
+ * If child failed, write "F" in fifo |
+ * exit with failure code. | -------------------
+ * |
+ * OK, write "S" in fifo, |
+ * wait for child termination & status|
+ * terminate with same status as child|
+ * | ODP APP1 process
+ * | (shmem_odp1.c)
+ * |
+ * | ...(continued)
+ * | read S(success) or F(fail) from fifo
+ * | report success or failure to C-Unit
+ * | Exit(0) on success, exit(1) on fail
+ * wait for child termination & status |
+ * terminate with same status as child |
+ * |
+ * \|/
+ * time
+ */
+
+#include <stdint.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <linux/limits.h>
+#include <stdio.h>
+#include <errno.h>
+#include <sys/mman.h>
+#include <libgen.h>
+#include <linux/limits.h>
+#include <inttypes.h>
+#include <pwd.h>
+#include <stdlib.h>
+#include "shmem_linux.h"
+#include "shmem_common.h"
+
+#define ODP_APP1_NAME "shmem_odp1" /* name of the odp1 program, in this dir */
+#define ODP_APP2_NAME "shmem_odp2" /* name of the odp2 program, in this dir */
+/* odp-<pid>-shm-<name> */
+#define DEVNAME_DEFAULT_DIR "/dev/shm"
+#define DEVNAME_FMT "%s/%d/odp-%" PRIu64 "-shm-%s"
+#define MAX_FIFO_WAIT 30 /* Max time waiting for the fifo (sec) */
+
+/*
+ * read the attributes of an externally shared mem object:
+ * input: ext_odp_pid, blockname: the remote ODP instance and the exported
+ * block name to be searched.
+ * Output: filename: the memory block underlying file to be opened
+ * (the given buffer should be big enough i.e. at
+ * least ISHM_FILENAME_MAXLEN bytes)
+ * The 3 following parameters are really here for debug
+ * as they are really meaningless in a non-odp process:
+ * len: the block real length (bytes, multiple of page sz)
+ * flags: the _ishm flags setting the block was created with
+ * align: the alignment setting the block was created with
+ *
+ * return 0 on success, non zero on error
+ */
+static int read_shmem_attributes(uint64_t ext_odp_pid, const char *blockname,
+ char *filename, uint64_t *len,
+ uint32_t *flags, uint64_t *user_len,
+ uint32_t *user_flags, uint32_t *align,
+ uint64_t *offset)
+{
+ char shm_attr_filename[PATH_MAX];
+ FILE *export_file;
+ char *shm_dir = getenv("ODP_SHM_DIR");
+
+ sprintf(shm_attr_filename, DEVNAME_FMT,
+ shm_dir ? shm_dir : DEVNAME_DEFAULT_DIR,
+ getuid(),
+ ext_odp_pid, blockname);
+
+ /* O_CREAT flag not given => failure if shm_attr_filename does not
+ * already exist */
+ export_file = fopen(shm_attr_filename, "r");
+ if (export_file == NULL)
+ return -1;
+
+ if (fscanf(export_file, "ODP exported shm block info: ") != 0)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "ishm_blockname: %*s ") != 0)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "file: %s ", filename) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "length: %" PRIu64 " ", len) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "flags: %" PRIu32 " ", flags) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "user_length: %" PRIu64 " ", user_len) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "user_flags: %" PRIu32 " ", user_flags) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "align: %" PRIu32 " ", align) != 1)
+ goto export_file_read_err;
+
+ if (fscanf(export_file, "offset: %" PRIu64 " ", offset) != 1)
+ goto export_file_read_err;
+
+ fclose(export_file);
+ return 0;
+
+export_file_read_err:
+ fclose(export_file);
+ return -1;
+}
+
+void test_success(char *fifo_name, int fd, pid_t odp_app)
+{
+ int status;
+ int nb_char;
+ char result = TEST_SUCCESS;
+ /* write "Success" to the FIFO */
+ nb_char = write(fd, &result, sizeof(char));
+ close(fd);
+ /* wait for the odp app1 to terminate */
+ waitpid(odp_app, &status, 0);
+ /* if the write failed, report an error anyway */
+ if (nb_char != 1)
+ status = 1;
+ unlink(fifo_name);
+ exit(status); /* the status reported by the odp side is returned */
+}
+
+void test_failure(char *fifo_name, int fd, pid_t odp_app)
+{
+ int status;
+ char result;
+
+ int nb_char __attribute__((unused)); /*ignored: we fail anyway */
+
+ result = TEST_FAILURE;
+ /* write "Failure" to the FIFO */
+ nb_char = write(fd, &result, sizeof(char));
+ close(fd);
+ /* wait for the odp app1 to terminate */
+ waitpid(odp_app, &status, 0);
+ unlink(fifo_name);
+ exit(1); /* error */
+}
+
+int main(int argc __attribute__((unused)), char *argv[])
+{
+ char prg_name[PATH_MAX];
+ char odp_name1[PATH_MAX];
+ char odp_name2[PATH_MAX];
+ int nb_sec;
+ int size;
+ pid_t odp_app1;
+ pid_t odp_app2;
+ char *odp_params1 = NULL;
+ char *odp_params2[3];
+ char pid1[10];
+ char fifo_name[PATH_MAX]; /* fifo for linux->odp feedback */
+ int fifo_fd = -1;
+ char shm_filename[PATH_MAX];/* shared mem device name, under /dev/shm */
+ uint64_t len;
+ uint64_t offset;
+ uint32_t flags;
+ uint64_t user_len;
+ uint32_t user_flags;
+ uint32_t align;
+ int shm_fd;
+ test_shared_linux_data_t *addr;
+ int app2_status;
+ uid_t uid = getuid();
+ char *shm_dir = getenv("ODP_SHM_DIR");
+ const char *exeext = getenv("EXEEXT");
+
+ if (exeext == NULL)
+ exeext = "";
+
+ /* odp_app1 is in the same directory as this file: */
+ strncpy(prg_name, argv[0], PATH_MAX - 1);
+ sprintf(odp_name1, "%s/%s%s", dirname(prg_name), ODP_APP1_NAME, exeext);
+
+ /* start the ODP application: */
+ odp_app1 = fork();
+ if (odp_app1 < 0) /* error */
+ exit(1);
+
+ if (odp_app1 == 0) { /* child */
+ execv(odp_name1, &odp_params1); /* no return unless error */
+ fprintf(stderr, "execv failed: %s\n", strerror(errno));
+ }
+
+ /* wait max 30 sec for the fifo to be created by the ODP side.
+ * Just die if time expire as there is no fifo to communicate
+ * through... */
+ sprintf(fifo_name, FIFO_NAME_FMT,
+ shm_dir ? shm_dir : DEFAULT_SHM_DIR,
+ uid, odp_app1);
+ for (nb_sec = 0; nb_sec < MAX_FIFO_WAIT; nb_sec++) {
+ fifo_fd = open(fifo_name, O_WRONLY);
+ if (fifo_fd >= 0)
+ break;
+ sleep(1);
+ }
+ if (fifo_fd < 0)
+ exit(1);
+ printf("pipe found\n");
+
+ /* the linux named pipe has now been found, meaning that the
+ * ODP application is up and running, and has allocated shmem.
+ * check to see if linux can see the created shared memory: */
+
+ /* read the shared memory attributes (includes the shm filename): */
+ if (read_shmem_attributes(odp_app1, SHM_NAME,
+ shm_filename, &len, &flags,
+ &user_len, &user_flags, &align,
+ &offset) != 0) {
+ printf("error read_shmem_attributes\n");
+ test_failure(fifo_name, fifo_fd, odp_app1);
+ }
+
+ /* open the shm filename (which is either on /dev/shm/ or on hugetlbfs)
+ * O_CREAT flag not given => failure if shm_devname does not already
+ * exist */
+ shm_fd = open(shm_filename, O_RDONLY,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (shm_fd == -1) {
+ fprintf(stderr, "unable to open %s\n", shm_filename);
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
+ }
+
+ /* linux ODP guarantees page size alignment. Larger alignment may
+ * fail as 2 different processes will have fully unrelated
+ * virtual spaces.
+ */
+ size = sizeof(test_shared_linux_data_t);
+
+ addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, offset);
+ if (addr == MAP_FAILED) {
+ fprintf(stderr, "shmem_linux: mmap failed: %s\n",
+ strerror(errno));
+ test_failure(fifo_name, fifo_fd, odp_app1);
+ }
+
+ /* check that we see what the ODP application wrote in the memory */
+ if ((addr->foo != TEST_SHARE_FOO) || (addr->bar != TEST_SHARE_BAR)) {
+ fprintf(stderr, "ERROR: addr->foo %x addr->bar %x\n",
+ addr->foo, addr->bar);
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
+ }
+
+ /* odp_app2 is in the same directory as this file: */
+ strncpy(prg_name, argv[0], PATH_MAX - 1);
+ sprintf(odp_name2, "%s/%s%s", dirname(prg_name), ODP_APP2_NAME, exeext);
+
+ /* start the second ODP application with pid of ODP_APP1 as parameter:*/
+ sprintf(pid1, "%d", odp_app1);
+ odp_params2[0] = odp_name2;
+ odp_params2[1] = pid1;
+ odp_params2[2] = NULL;
+ odp_app2 = fork();
+ if (odp_app2 < 0) /* error */
+ exit(1);
+
+ if (odp_app2 == 0) { /* child */
+ execv(odp_name2, odp_params2); /* no return unless error */
+ fprintf(stderr, "execv failed: %s\n", strerror(errno));
+ }
+
+ /* wait for the second ODP application to terminate:
+ * status is OK if that second ODP application could see the
+ * memory shared by the first one. */
+ waitpid(odp_app2, &app2_status, 0);
+
+ if (app2_status)
+ test_failure(fifo_name, fifo_fd, odp_app1); /* no return */
+
+ /* everything looked good: */
+ test_success(fifo_name, fifo_fd, odp_app1);
+}
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_linux.h b/platform/linux-generic/test/validation/api/shmem/shmem_linux.h
new file mode 100644
index 000000000..24646ae7c
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_linux.h
@@ -0,0 +1,9 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+void test_success(char *fifo_name, int fd, pid_t odp_app);
+void test_failure(char *fifo_name, int fd, pid_t odp_app);
+int main(int argc, char *argv[]);
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_odp1.c b/platform/linux-generic/test/validation/api/shmem/shmem_odp1.c
new file mode 100644
index 000000000..98148d6c7
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_odp1.c
@@ -0,0 +1,91 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <linux/limits.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+#include <odp_cunit_common.h>
+#include "shmem_odp1.h"
+#include "shmem_common.h"
+
+#define TEST_SHARE_FOO (0xf0f0f0f0)
+#define TEST_SHARE_BAR (0xf0f0f0f)
+
+void shmem_test_odp_shm_proc(void)
+{
+ char fifo_name[PATH_MAX];
+ int fd;
+ odp_shm_t shm;
+ test_shared_data_t *test_shared_data;
+ char test_result;
+ char *shm_dir = getenv("ODP_SHM_DIR");
+
+ printf("start with pid %d\n", getpid());
+ /* reminder: ODP_SHM_PROC => export to linux, ODP_SHM_EXPORT=>to odp */
+ shm = odp_shm_reserve(SHM_NAME,
+ sizeof(test_shared_data_t),
+ ALIGN_SIZE, ODP_SHM_PROC | ODP_SHM_EXPORT);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
+ test_shared_data = odp_shm_addr(shm);
+ CU_ASSERT_FATAL(NULL != test_shared_data);
+ test_shared_data->foo = TEST_SHARE_FOO;
+ test_shared_data->bar = TEST_SHARE_BAR;
+
+ odp_mb_full();
+
+ /* open the fifo: this will indicate to linux process that it can
+ * start the shmem lookups and check if it sees the data */
+ sprintf(fifo_name, FIFO_NAME_FMT,
+ shm_dir ? shm_dir : DEFAULT_SHM_DIR,
+ getuid(), getpid());
+ CU_ASSERT_FATAL(mkfifo(fifo_name, 0666) == 0);
+
+ /* read from the fifo: the linux process result: */
+ printf("shmem_odp1: opening fifo: %s\n", fifo_name);
+ fd = open(fifo_name, O_RDONLY);
+ CU_ASSERT_FATAL(fd >= 0);
+
+ printf("shmem_odp1: reading fifo: %s\n", fifo_name);
+ CU_ASSERT(read(fd, &test_result, sizeof(char)) == 1);
+ printf("shmem_odp1: read fifo: %d\n", test_result);
+ printf("shmem_odp1: closing fifo: %s\n", fifo_name);
+ close(fd);
+ CU_ASSERT_FATAL(test_result == TEST_SUCCESS);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+}
+
+odp_testinfo_t shmem_suite[] = {
+ ODP_TEST_INFO(shmem_test_odp_shm_proc),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t shmem_suites[] = {
+ {"Shared Memory", NULL, NULL, shmem_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(&argc, argv))
+ return -1;
+
+ ret = odp_cunit_register(shmem_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_odp1.h b/platform/linux-generic/test/validation/api/shmem/shmem_odp1.h
new file mode 100644
index 000000000..241637e58
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_odp1.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+void shmem_test_odp_shm_proc(void);
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_odp2.c b/platform/linux-generic/test/validation/api/shmem/shmem_odp2.c
new file mode 100644
index 000000000..14ad2d82e
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_odp2.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#include <linux/limits.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdlib.h>
+
+#include <odp_cunit_common.h>
+#include "shmem_odp2.h"
+#include "shmem_common.h"
+
+#define TEST_SHARE_FOO (0xf0f0f0f0)
+#define TEST_SHARE_BAR (0xf0f0f0f)
+
+/* The C unit test harness is run by ODP1 app which will be told the return
+ * status of this process. See top of shmem_linux.c for chart flow of events
+ */
+int main(int argc, char *argv[])
+{
+ odp_instance_t odp1;
+ odp_instance_t odp2;
+ odp_shm_t shm;
+ odp_shm_info_t info;
+ test_shared_data_t *test_shared_data;
+
+ /* odp init: */
+ if (0 != odp_init_global(&odp2, NULL, NULL)) {
+ ODPH_ERR("odp_init_global() failed\n");
+ return 1;
+ }
+ if (0 != odp_init_local(odp2, ODP_THREAD_CONTROL)) {
+ ODPH_ERR("odp_init_local() failed\n");
+ return 1;
+ }
+
+ /* test: map ODP1 memory and check its contents:
+ * The pid of the ODP instantiation process sharing its memory
+ * is given as first arg. In linux-generic ODP, this pid is actually
+ * the ODP instance */
+ if (argc != 2) {
+ ODPH_ERR("One single parameter expected, %d found\n", argc);
+ return 1;
+ }
+ odp1 = (odp_instance_t)atoi(argv[1]);
+
+ printf("shmem_odp2: trying to grab %s from pid %d\n",
+ SHM_NAME, (int)odp1);
+ shm = odp_shm_import(SHM_NAME, odp1, SHM_NAME);
+ if (shm == ODP_SHM_INVALID) {
+ ODPH_ERR("odp_shm_import() failed\n");
+ return 1;
+ }
+
+ /* check that the read size matches the allocated size (in other ODP):*/
+ if ((odp_shm_info(shm, &info)) ||
+ (info.size != sizeof(*test_shared_data))) {
+ ODPH_ERR("odp_shm_info() failed\n");
+ return 1;
+ }
+
+ test_shared_data = odp_shm_addr(shm);
+ if (test_shared_data == NULL) {
+ ODPH_ERR("odp_shm_addr() failed\n");
+ return 1;
+ }
+
+ if (test_shared_data->foo != TEST_SHARE_FOO) {
+ ODPH_ERR("Invalid data TEST_SHARE_FOO\n");
+ return 1;
+ }
+
+ if (test_shared_data->bar != TEST_SHARE_BAR) {
+ ODPH_ERR("Invalid data TEST_SHARE_BAR\n");
+ return 1;
+ }
+
+ if (odp_shm_free(shm) != 0) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return 1;
+ }
+
+ /* odp term: */
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return 1;
+ }
+
+ if (0 != odp_term_global(odp2)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return 1;
+ }
+
+ printf("%s SUCCESS\n", __FILE__);
+ return 0;
+}
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_odp2.h b/platform/linux-generic/test/validation/api/shmem/shmem_odp2.h
new file mode 100644
index 000000000..0493d6caa
--- /dev/null
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_odp2.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+int main(int argc, char *argv[]);