aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.shippable.yml17
-rw-r--r--.travis.yml507
-rw-r--r--CHANGELOG312
-rw-r--r--DEPENDENCIES40
-rw-r--r--Makefile.am18
-rw-r--r--config/odp-linux-generic.conf114
-rw-r--r--configure.ac21
-rw-r--r--doc/application-api-guide/Doxyfile1
-rw-r--r--doc/implementers-guide/implementers-guide.adoc163
-rw-r--r--doc/platform-api-guide/Doxyfile4
-rw-r--r--doc/users-guide/Makefile.am1
-rw-r--r--doc/users-guide/users-guide-comp.adoc168
-rw-r--r--doc/users-guide/users-guide.adoc2
-rw-r--r--example/classifier/odp_classifier.c27
-rw-r--r--example/generator/odp_generator.c190
-rw-r--r--example/ipsec/odp_ipsec.c159
-rw-r--r--example/ipsec_api/odp_ipsec.c151
-rw-r--r--example/ipsec_offload/odp_ipsec_offload.c115
-rw-r--r--example/l2fwd_simple/odp_l2fwd_simple.c122
-rw-r--r--example/l3fwd/odp_l3fwd.c122
-rw-r--r--example/m4/configure.m412
-rw-r--r--example/packet/.gitignore1
-rw-r--r--example/packet/Makefile.am9
-rw-r--r--example/packet/odp_packet_dump.c685
-rw-r--r--example/packet/odp_pktio.c46
-rwxr-xr-xexample/packet/packet_dump_run.sh20
-rw-r--r--example/switch/odp_switch.c46
-rw-r--r--example/sysinfo/odp_sysinfo.c232
-rw-r--r--example/time/time_global_test.c15
-rw-r--r--example/timer/odp_timer_accuracy.c3
-rw-r--r--example/timer/odp_timer_simple.c3
-rw-r--r--example/timer/odp_timer_test.c20
-rw-r--r--example/traffic_mgmt/odp_traffic_mgmt.c64
-rw-r--r--helper/Makefile.am1
-rw-r--r--helper/chksum.c75
-rw-r--r--helper/include/odp/helper/chksum.h42
-rw-r--r--helper/include/odp/helper/ip.h2
-rw-r--r--helper/include/odp/helper/odph_api.h1
-rw-r--r--helper/include/odp/helper/sctp.h49
-rw-r--r--helper/include/odp/helper/threads.h26
-rw-r--r--helper/iplookuptable.c73
-rw-r--r--helper/test/odpthreads.c13
-rw-r--r--helper/threads.c70
-rw-r--r--include/Makefile.am11
-rw-r--r--include/odp/api/abi-default/classification.h4
-rw-r--r--include/odp/api/abi-default/cpumask.h3
-rw-r--r--include/odp/api/abi-default/event.h3
-rw-r--r--include/odp/api/abi-default/ipsec.h2
-rw-r--r--include/odp/api/abi-default/packet.h2
-rw-r--r--include/odp/api/abi-default/pool.h2
-rw-r--r--include/odp/api/abi-default/schedule_types.h10
-rw-r--r--include/odp/api/abi-default/thread.h2
-rw-r--r--include/odp/api/abi-default/timer.h6
-rw-r--r--include/odp/api/abi-default/traffic_mngr.h8
-rw-r--r--include/odp/api/comp.h30
-rw-r--r--include/odp/api/spec/comp.h10
-rw-r--r--include/odp/api/spec/crypto.h37
-rw-r--r--include/odp/api/spec/event.h42
-rw-r--r--include/odp/api/spec/init.h32
-rw-r--r--include/odp/api/spec/ipsec.h50
-rw-r--r--include/odp/api/spec/queue.h277
-rw-r--r--include/odp/api/spec/queue_types.h316
-rw-r--r--include/odp/api/spec/schedule.h120
-rw-r--r--include/odp/api/spec/schedule_types.h117
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/mips64-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/cpu.h1
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/comp.h7
-rw-r--r--include/odp_api.h1
-rw-r--r--m4/ax_prog_doxygen.m4177
-rw-r--r--m4/odp_dpdk.m48
-rw-r--r--platform/linux-generic/Makefile.am9
-rw-r--r--platform/linux-generic/arch/aarch64/odp_atomic.h49
-rw-r--r--platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c71
-rw-r--r--platform/linux-generic/arch/default/odp_sysinfo_parse.c16
-rw-r--r--platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h1
-rw-r--r--platform/linux-generic/arch/x86/odp_sysinfo_parse.c32
-rw-r--r--platform/linux-generic/dumpconfig/.gitignore1
-rw-r--r--platform/linux-generic/dumpconfig/Makefile.am10
-rw-r--r--platform/linux-generic/dumpconfig/dumpconfig.c43
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/classification.h4
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/comp.h34
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event.h3
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/ipsec.h2
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/pool.h2
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/timer.h6
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h7
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h4
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inline_types.h26
-rw-r--r--platform/linux-generic/include/odp/api/plat/queue_inlines.h29
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h19
-rw-r--r--platform/linux-generic/include/odp_config_internal.h33
-rw-r--r--platform/linux-generic/include/odp_debug_internal.h10
-rw-r--r--platform/linux-generic/include/odp_global_data.h18
-rw-r--r--platform/linux-generic/include/odp_init_internal.h4
-rw-r--r--platform/linux-generic/include/odp_ipsec_internal.h45
-rw-r--r--platform/linux-generic/include/odp_ishm_internal.h54
-rw-r--r--platform/linux-generic/include/odp_ishmphy_internal.h6
-rw-r--r--platform/linux-generic/include/odp_ishmpool_internal.h1
-rw-r--r--platform/linux-generic/include/odp_libconfig_internal.h1
-rw-r--r--platform/linux-generic/include/odp_packet_dpdk.h45
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h29
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h3
-rw-r--r--platform/linux-generic/include/odp_packet_io_ring_internal.h9
-rw-r--r--platform/linux-generic/include/odp_packet_socket.h2
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h18
-rw-r--r--platform/linux-generic/include/odp_queue_basic_internal.h36
-rw-r--r--platform/linux-generic/include/odp_queue_if.h61
-rw-r--r--platform/linux-generic/include/odp_queue_scalable_internal.h10
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h62
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_internal.h169
-rw-r--r--platform/linux-generic/include/odp_ring_spsc_internal.h33
-rw-r--r--platform/linux-generic/include/odp_ring_st_internal.h34
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h42
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_config.h3
-rw-r--r--platform/linux-generic/include/odp_shm_internal.h41
-rw-r--r--platform/linux-generic/include/odp_sysinfo_internal.h19
-rw-r--r--platform/linux-generic/include/odp_timer_internal.h22
-rw-r--r--platform/linux-generic/include/odp_traffic_mngr_internal.h39
-rw-r--r--platform/linux-generic/include/protocols/sctp.h49
-rw-r--r--platform/linux-generic/m4/configure.m46
-rw-r--r--platform/linux-generic/m4/performance.m410
-rw-r--r--platform/linux-generic/miniz/miniz.c619
-rw-r--r--platform/linux-generic/miniz/miniz.h363
-rw-r--r--platform/linux-generic/miniz/miniz_common.h68
-rw-r--r--platform/linux-generic/miniz/miniz_tdef.c1564
-rw-r--r--platform/linux-generic/miniz/miniz_tdef.h183
-rw-r--r--platform/linux-generic/miniz/miniz_tinfl.c725
-rw-r--r--platform/linux-generic/miniz/miniz_tinfl.h146
-rw-r--r--platform/linux-generic/odp_classification.c160
-rw-r--r--platform/linux-generic/odp_comp.c678
-rw-r--r--platform/linux-generic/odp_cpumask.c42
-rw-r--r--platform/linux-generic/odp_cpumask_task.c16
-rw-r--r--platform/linux-generic/odp_crypto_null.c10
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c12
-rw-r--r--platform/linux-generic/odp_event.c10
-rw-r--r--platform/linux-generic/odp_fdserver.c26
-rw-r--r--platform/linux-generic/odp_hash_crc_gen.c60
-rw-r--r--platform/linux-generic/odp_init.c115
-rw-r--r--platform/linux-generic/odp_ipsec.c159
-rw-r--r--platform/linux-generic/odp_ipsec_events.c10
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c305
-rw-r--r--platform/linux-generic/odp_ishm.c978
-rw-r--r--platform/linux-generic/odp_ishmphy.c146
-rw-r--r--platform/linux-generic/odp_ishmpool.c26
-rw-r--r--platform/linux-generic/odp_libconfig.c98
-rw-r--r--platform/linux-generic/odp_packet.c103
-rw-r--r--platform/linux-generic/odp_packet_io.c215
-rw-r--r--platform/linux-generic/odp_pcapng.c46
-rw-r--r--platform/linux-generic/odp_pool.c164
-rw-r--r--platform/linux-generic/odp_queue_basic.c588
-rw-r--r--platform/linux-generic/odp_queue_if.c60
-rw-r--r--platform/linux-generic/odp_queue_lf.c23
-rw-r--r--platform/linux-generic/odp_queue_scalable.c169
-rw-r--r--platform/linux-generic/odp_queue_spsc.c40
-rw-r--r--platform/linux-generic/odp_schedule_basic.c537
-rw-r--r--platform/linux-generic/odp_schedule_if.c71
-rw-r--r--platform/linux-generic/odp_schedule_iquery.c1585
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c319
-rw-r--r--platform/linux-generic/odp_schedule_sp.c194
-rw-r--r--platform/linux-generic/odp_shared_memory.c33
-rw-r--r--platform/linux-generic/odp_system_info.c48
-rw-r--r--platform/linux-generic/odp_thread.c50
-rw-r--r--platform/linux-generic/odp_timer.c199
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c996
-rw-r--r--platform/linux-generic/pktio/dpdk.c642
-rw-r--r--platform/linux-generic/pktio/dpdk_parse.c8
-rw-r--r--platform/linux-generic/pktio/ipc.c18
-rw-r--r--platform/linux-generic/pktio/loop.c29
-rw-r--r--platform/linux-generic/pktio/netmap.c6
-rw-r--r--platform/linux-generic/pktio/pcap.c13
-rw-r--r--platform/linux-generic/pktio/ring.c72
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c33
-rw-r--r--platform/linux-generic/test/Makefile.am10
-rw-r--r--platform/linux-generic/test/inline-timer.conf8
-rw-r--r--platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c73
-rw-r--r--platform/linux-generic/test/performance/.gitignore2
-rw-r--r--platform/linux-generic/test/performance/Makefile.am13
-rwxr-xr-xplatform/linux-generic/test/performance/odp_scheduling_run_proc.sh30
-rw-r--r--platform/linux-generic/test/pktio_ipc/pktio_ipc1.c3
-rw-r--r--platform/linux-generic/test/pktio_ipc/pktio_ipc2.c3
-rw-r--r--platform/linux-generic/test/process-mode.conf9
-rw-r--r--platform/linux-generic/test/ring/ring_stress.c61
-rw-r--r--platform/linux-generic/test/validation/api/shmem/shmem_linux.c17
-rw-r--r--scripts/Dockerfile28
-rwxr-xr-xscripts/build-pktio-dpdk40
-rwxr-xr-xscripts/checkpatch.pl4
-rwxr-xr-xscripts/ci/build.sh22
-rwxr-xr-xscripts/ci/build_arm64.sh14
-rwxr-xr-xscripts/ci/build_armhf.sh16
-rwxr-xr-xscripts/ci/build_i386.sh15
-rwxr-xr-xscripts/ci/build_powerpc.sh15
-rwxr-xr-xscripts/ci/build_x86_64.sh8
-rwxr-xr-xscripts/ci/check.sh17
-rwxr-xr-xscripts/ci/check_inline_timer.sh16
-rwxr-xr-xscripts/ci/coverage.sh30
-rwxr-xr-xscripts/ci/distcheck.sh19
-rw-r--r--test/common/odp_cunit_common.c17
-rw-r--r--test/common/test_packet_parser.h26
-rw-r--r--test/m4/configure.m411
-rw-r--r--test/performance/odp_bench_packet.c29
-rw-r--r--test/performance/odp_cpu_bench.c49
-rw-r--r--test/performance/odp_crypto.c18
-rw-r--r--test/performance/odp_ipsec.c28
-rw-r--r--test/performance/odp_l2fwd.c40
-rwxr-xr-xtest/performance/odp_l2fwd_run.sh2
-rw-r--r--test/performance/odp_pktio_ordered.c55
-rwxr-xr-xtest/performance/odp_pktio_ordered_run.sh3
-rw-r--r--test/performance/odp_pktio_perf.c56
-rw-r--r--test/performance/odp_pool_perf.c2
-rw-r--r--test/performance/odp_queue_perf.c383
-rw-r--r--test/performance/odp_sched_latency.c32
-rw-r--r--test/performance/odp_sched_perf.c148
-rw-r--r--test/performance/odp_sched_pktio.c543
-rwxr-xr-xtest/performance/odp_sched_pktio_run.sh2
-rw-r--r--test/performance/odp_scheduling.c42
-rwxr-xr-xtest/performance/odp_scheduling_run.sh9
-rw-r--r--test/validation/api/Makefile.am2
-rw-r--r--test/validation/api/atomic/atomic.c237
-rw-r--r--test/validation/api/barrier/barrier.c13
-rw-r--r--test/validation/api/classification/odp_classification_common.c2
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c144
-rw-r--r--test/validation/api/classification/odp_classification_tests.c28
-rw-r--r--test/validation/api/comp/.gitignore1
-rw-r--r--test/validation/api/comp/Makefile.am7
-rw-r--r--test/validation/api/comp/comp.c591
-rw-r--r--test/validation/api/comp/test_vectors.h1997
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c24
-rw-r--r--test/validation/api/init/init_main_ok.c6
-rw-r--r--test/validation/api/ipsec/ipsec.c66
-rw-r--r--test/validation/api/ipsec/ipsec.h3
-rw-r--r--test/validation/api/ipsec/ipsec_test_in.c92
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c70
-rw-r--r--test/validation/api/lock/lock.c13
-rw-r--r--test/validation/api/packet/packet.c80
-rw-r--r--test/validation/api/pktio/parser.c47
-rw-r--r--test/validation/api/pktio/pktio.c428
-rw-r--r--test/validation/api/pool/pool.c266
-rw-r--r--test/validation/api/queue/queue.c67
-rw-r--r--test/validation/api/scheduler/scheduler.c475
-rw-r--r--test/validation/api/shmem/shmem.c14
-rw-r--r--test/validation/api/system/system.c6
-rw-r--r--test/validation/api/thread/thread.c88
-rw-r--r--test/validation/api/time/time.c4
-rw-r--r--test/validation/api/timer/timer.c166
249 files changed, 19235 insertions, 6814 deletions
diff --git a/.shippable.yml b/.shippable.yml
index 53e094bcb..0bf68f330 100644
--- a/.shippable.yml
+++ b/.shippable.yml
@@ -7,8 +7,6 @@ compiler:
env:
- CONF="--disable-test-perf --disable-test-perf-proc"
- CONF="--disable-abi-compat --disable-test-perf --disable-test-perf-proc"
- # - CONF="--enable-schedule-sp"
- # - CONF="--enable-schedule-iquery"
# - CONF="--enable-dpdk-zero-copy"
# - CROSS_ARCH="arm64"
# - CROSS_ARCH="armhf" CFLAGS="-march=armv7-a"
@@ -16,18 +14,9 @@ env:
# - CROSS_ARCH="i386"
build:
- pre_ci:
- # use Dockerfile to install additional CI dependencies
- - docker build -t=odp/dev ./scripts
-
- # use image built in 'pre_ci' for CI job
- pre_ci_boot:
- image_name: odp/dev
- image_tag: latest
- pull: false
- options:
-
ci:
+ - apt-get update
+ - apt-get install --no-install-recommends -yy asciidoctor autoconf automake build-essential ccache clang doxygen gcc graphviz libconfig-dev libcunit1-dev libnuma-dev libpcap-dev libssl-dev libtool mscgen xsltproc
- mkdir -p $HOME/odp-shmdir
- export CI=true ODP_SHM_DIR=$HOME/odp-shmdir ODP_TEST_OUT_XML=yes
- ./bootstrap
@@ -39,8 +28,6 @@ build:
- ./scripts/shippable-post.sh basic
- ODP_SCHEDULER=sp make check
- ./scripts/shippable-post.sh sp
- - ODP_SCHEDULER=iquery make check
- - ./scripts/shippable-post.sh iquery
- ODP_SCHEDULER=scalable make check
- ./scripts/shippable-post.sh scalable
- rm -rf $HOME/odp-shmdir
diff --git a/.travis.yml b/.travis.yml
index 26bbe1a32..c308d5eda 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -7,6 +7,12 @@
# pushing to github/coverity_scan will also launch a static analysis
# See https://scan.coverity.com/travis_ci
+#
+# Travis uses Docker images which mainained here:
+# https://github.com/Linaro/odp-docker-images
+# CI scirpts are maintained under ./scripts/ci/ directory
+# which passed into container during the test run.
+
language: c
sudo: required
dist: trusty
@@ -14,16 +20,7 @@ stages:
- "build only"
- test
-addons:
- apt:
- packages:
- - gcc
- - clang-3.8
- - automake autoconf libtool libssl-dev graphviz mscgen
- - libconfig-dev
- - codespell
- - libpcap-dev
- - libnuma-dev
+#addons:
# coverity_scan:
# project:
# name: "$TRAVIS_REPO_SLUG"
@@ -36,11 +33,13 @@ cache:
ccache: true
pip: true
directories:
- - dpdk
- netmap
- - $HOME/cunit-install
- $HOME/doxygen-install
+compiler:
+ - gcc
+ - clang
+
env:
global:
#
@@ -48,296 +47,157 @@ env:
# for individual commit validation. But you you want to track tests history
# you need generated new one at https://codecov.io specific for your repo.
- CODECOV_TOKEN=a733c34c-5f5c-4ff1-af4b-e9f5edb1ab5e
- - DPDK_VERS="17.11.3"
+ - OS="ubuntu_16.04"
+ - CHECK=1
+ - NETMAP=0
matrix:
- CONF=""
- CONF="--disable-abi-compat"
+ - NETMAP=1 CONF=""
+ - CHECK=0 ARCH="arm64"
+ - CHECK=0 ARCH="armhf"
+ - CHECK=0 ARCH="powerpc"
+ - CHECK=0 ARCH="i386"
+ - CHECK=0 ARCH="arm64" CONF="--disable-abi-compat"
+ - CHECK=0 ARCH="armhf" CONF="--disable-abi-compat"
+ - CHECK=0 ARCH="powerpc" CONF="--disable-abi-compat"
+ - CHECK=0 ARCH="i386" CONF="--disable-abi-compat"
- CONF="--enable-deprecated"
- - CONF="--enable-dpdk-zero-copy"
- - CONF="--disable-static-applications"
+ - CONF="--enable-dpdk-zero-copy --disable-static-applications"
+ - NETMAP=1 CONF="--disable-static-applications"
- CONF="--disable-host-optimization"
- CONF="--disable-host-optimization --disable-abi-compat"
- - CONF="--enable-pcapng-support"
+ - CHECK=0 ARCH="x86_64" CONF="--enable-pcapng-support"
+ - CHECK=0 ARCH="x86_64" OS="centos_7"
- CONF="--without-openssl"
- - DPDK_SHARED="y" CONF="--disable-static-applications"
-
-compiler:
- - gcc
- - clang-3.8
-
-before_install:
-
- # Install cross toolchains, etc
- # apt-get update may fail thanks to Ubuntu removing Packages/indices while not removing relevant parts of Release file
- - if [ -n "$CROSS_ARCH" ] ;
- then
- BUILD_GNU_TYPE=`dpkg-architecture -a"$CROSS_ARCH" -qDEB_BUILD_GNU_TYPE` ;
- CROSS_GNU_TYPE=`dpkg-architecture -a"$CROSS_ARCH" -qDEB_HOST_GNU_TYPE` ;
- CROSS_MULTIARCH=`dpkg-architecture -a"$CROSS_ARCH" -qDEB_HOST_MULTIARCH` ;
- CROSS="--host="$CROSS_GNU_TYPE" --build="$BUILD_GNU_TYPE"" ;
- sudo dpkg --add-architecture "$CROSS_ARCH" ;
- PKGS="build-essential libc6-dev:$CROSS_ARCH libssl-dev:$CROSS_ARCH zlib1g-dev:$CROSS_ARCH libconfig-dev:$CROSS_ARCH libstdc++-4.8-dev:$CROSS_ARCH libpcap0.8-dev:$CROSS_ARCH" ;
- if [ "$CROSS_ARCH" = "i386" ] ;
- then
- PKGS="$PKGS g++-multilib" ;
- else
- PKGS="$PKGS g++-$CROSS_GNU_TYPE" ;
- sudo sed -e 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list -i ;
- sudo -E apt-add-repository -y "deb [arch=$CROSS_ARCH] http://ports.ubuntu.com trusty main" ;
- sudo -E apt-add-repository -y "deb [arch=$CROSS_ARCH] http://ports.ubuntu.com trusty-updates main" ;
- fi ;
- if [ "$CROSS_ARCH" != "armhf" ] ;
- then
- PKGS="$PKGS libnuma-dev:$CROSS_ARCH" ;
- fi ;
- sudo cat /etc/apt/sources.list ;
- sudo -E apt-get -y update || true ;
- sudo -E apt-get -y --no-install-suggests --no-install-recommends --force-yes install $PKGS || exit 1 ;
- export PKG_CONFIG_PATH=/usr/lib/${CROSS_MULTIARCH}/pkgconfig:/usr/${CROSS_MULTIARCH}/lib/pkgconfig ;
- fi
- - if [ "${CC#clang}" != "${CC}" ] ;
- then
- if [ -n "$CROSS_ARCH" ] ;
- then
- export CC="${CC} --target=$CROSS_GNU_TYPE" ;
- if [ "$CROSS_ARCH" = "i386" ] ;
- then
- DPDK_CFLAGS="-m32" ;
- else
- DPDK_CROSS="$CROSS_GNU_TYPE-" ;
- DPDK_CFLAGS="--target=$CROSS_GNU_TYPE" ;
- fi
- fi ;
- export CXX="${CC/clang/clang++}";
- elif [ "$CROSS_ARCH" = "i386" ] ;
- then
- export CC="gcc -m32" ;
- export CXX="g++ -m32" ;
- DPDK_CFLAGS="-m32" ;
- elif [ -n "$CROSS_ARCH" ] ;
- then
- export CC="$CROSS_GNU_TYPE"-gcc ;
- export CXX="$CROSS_GNU_TYPE"-g++ ;
- DPDK_CROSS="$CROSS_GNU_TYPE-" ;
- fi
- - if test ! -L /usr/lib/ccache/${CC%% *} ; then sudo ln -s -t /usr/lib/ccache/ `which ${CC%% *}` ; fi
- - ccache -s
- # Install cunit for the validation tests because distro version is too old and fails C99 compile
- - sudo apt-get remove libcunit1-dev libcunit1
- - export LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH"
- - |
- if [ ! -f "$HOME/cunit-install/$CROSS_ARCH/lib/libcunit.a" ]; then
- export CUNIT_VERSION=2.1-3
- curl -sSOL https://github.com/Linaro/libcunit/releases/download/${CUNIT_VERSION}/CUnit-${CUNIT_VERSION}.tar.bz2
- tar -jxf *.bz2
- pushd CUnit*
- libtoolize --force --copy
- aclocal
- autoheader
- automake --add-missing --include-deps --copy
- autoconf
- ./configure --prefix=$HOME/cunit-install/$CROSS_ARCH --enable-debug --enable-automated --enable-basic --enable-console --enable-examples --enable-test $CROSS || cat config.log
- make -j $(nproc)
- sudo make install
- popd
- fi
- - export PKG_CONFIG_PATH="$HOME/cunit-install/$CROSS_ARCH/lib/pkgconfig:${PKG_CONFIG_PATH}"
- - find $HOME/cunit-install
-
- # workaround for tap driver issues on recent Travis images
- # Allow forwaring on virtual interfaces used for testing
- - sudo iptables --policy FORWARD ACCEPT
+ - OS="ubuntu_18.04"
+matrix:
+ exclude:
+ - compiler: gcc
+ env: CHECK=0 ARCH="arm64"
+ - compiler: gcc
+ env: CHECK=0 ARCH="i386"
install:
- - echo 1000 | sudo tee /proc/sys/vm/nr_hugepages
- - sudo mkdir -p /mnt/huge
- - sudo mount -t hugetlbfs nodev /mnt/huge
-
- - if [ -z "$CROSS_ARCH" ] ;
- then
- sudo apt-get -qq update ;
- sudo apt-get install linux-headers-`uname -r` ;
- fi
- - gem install asciidoctor
-
- # DPDK pktio. Cache will be updated automatically to ${DPDK_VERS}
- - |
- case "$CROSS_ARCH" in
- "arm64")
- DPDK_TARGET="arm64-armv8a-linuxapp-"
- ;;
- "armhf")
- DPDK_TARGET="arm-armv7a-linuxapp-"
- ;;
- "i386")
- DPDK_TARGET="i686-native-linuxapp-"
- ;;
- "")
- DPDK_TARGET="x86_64-native-linuxapp-"
- DPDK_MACHINE=snb
- ;;
- esac
- - |
- if [ -n "$DPDK_TARGET" ] ; then
- if [ "${CC#clang}" != "${CC}" ] ; then
- DPDKCC=clang ;
- else
- DPDKCC=gcc ;
- fi
- if [ -n "$DPDK_SHARED" ] ; then
- TARGET="${DPDK_TARGET}$DPDKCC"-shared
- LIBDPDKEXT=so
- export LD_LIBRARY_PATH="`pwd`/${TARGET}:$LD_LIBRARY_PATH"
- echo $LD_LIBRARY_PATH
- else
- TARGET="${DPDK_TARGET}$DPDKCC"
- LIBDPDKEXT=a
- fi
- DPDK_TARGET="${DPDK_TARGET}gcc"
- CACHED_DPDK_VERS=`fgrep Version dpdk/pkg/dpdk.spec | cut -d " " -f 2`
- if [ ! -d dpdk -o "${CACHED_DPDK_VERS}" != "${DPDK_VERS}" ]; then
- rm -rf dpdk
- mkdir dpdk
- pushd dpdk
- git init
- git -c advice.detachedHead=false fetch -q --depth=1 http://dpdk.org/git/dpdk-stable v${DPDK_VERS}
- git checkout -f FETCH_HEAD
- popd
- fi
- if [ ! -f "dpdk/${TARGET}/usr/local/lib/libdpdk.$LIBDPDKEXT" ]; then
- pushd dpdk
- git log --oneline --decorate
- # AArch64 && ARMv7 fixup
- sed -i -e 's/40900/40800/g' lib/librte_eal/common/include/arch/arm/rte_vect.h
- sed -i -e 's/!(/!(defined(__arm__) \&\& defined(__clang__) || /g' lib/librte_eal/common/include/arch/arm/rte_byteorder.h
- sed -i -e 's/__GNUC__/defined(__arm__) \&\& defined(__clang__) || __GNUC__/' lib/librte_eal/common/include/generic/rte_byteorder.h
- sed -i -e 's,\$(CC),\0 $(EXTRA_CFLAGS),g' lib/librte_acl/Makefile
- make config T=${DPDK_TARGET} O=${TARGET}
- pushd ${TARGET}
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
- # OCTEON TX driver includes ARM v8.1 instructions
- sed -ri 's,(CONFIG_RTE_LIBRTE_OCTEONTX_PMD=).*,\1n,' .config
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_OCTEONTX_SSOVF=).*,\1n,' .config
- sed -ri 's,(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL=).*,\1n,' .config
- if test -n "${DPDK_MACHINE}" ; then
- sed -ri 's,(CONFIG_RTE_MACHINE=).*,\1"'${DPDK_MACHINE}'",' .config
- fi
- if test -n "${DPDK_SHARED}" ; then
- sed -ri 's,(CONFIG_RTE_BUILD_SHARED_LIB=).*,\1y,' .config
- fi
- if test -n "$CROSS_ARCH" ; then
- sed -ri -e 's,(CONFIG_RTE_EAL_IGB_UIO=).*,\1n,' .config
- sed -ri -e 's,(CONFIG_RTE_KNI_KMOD=).*,\1n,' .config
- fi
- sed -ri -e 's,(CONFIG_RTE_TOOLCHAIN=).*,\1"'${DPDKCC}'",' .config
- sed -ri -e '/CONFIG_RTE_TOOLCHAIN_.*/d' .config
- echo CONFIG_RTE_TOOLCHAIN_${DPDKCC^^}=y >> .config
- popd
- make build O=${TARGET} EXTRA_CFLAGS="-fPIC $DPDK_CFLAGS" CROSS="$DPDK_CROSS" CC="$CC" HOSTCC=gcc -j $(nproc)
- make install O=${TARGET} DESTDIR=${TARGET}
- rm -r ./doc ./${TARGET}/app ./${TARGET}/build
- popd
- fi
- EXTRA_CONF="$EXTRA_CONF --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local"
- fi
-
-# Netmap pktio
- - |
- if [ -z "$CROSS_ARCH" ]; then
- if [ ! -f "netmap/LINUX/netmap.ko" ]; then
- git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v11.2 https://github.com/luigirizzo/netmap.git
- pushd netmap/LINUX
- ./configure
- make -j $(nproc)
- popd
- fi
- sudo insmod ./netmap/LINUX/netmap.ko
- EXTRA_CONF="$EXTRA_CONF --with-netmap-path=`pwd`/netmap"
+ - if [ ${NETMAP} -eq 1 ] ; then
+ echo "Installing NETMAP";
+ sudo apt-get install linux-headers-`uname -r` ;
+ CDIR=`pwd` ;
+ git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=v11.2 https://github.com/luigirizzo/netmap.git;
+ pushd netmap/LINUX;
+ ./configure --drivers= ;
+ make -j $(nproc);
+ popd;
+ sudo insmod ./netmap/LINUX/netmap.ko;
+ CONF="$CONF --with-netmap-path=/odp/netmap";
fi
-
script:
- - ./bootstrap
- - ./configure --prefix=$HOME/odp-install
- --enable-user-guides
- --enable-debug=full
- --enable-helper-linux
- $CROSS $EXTRA_CONF $CONF
- - make -j $(nproc)
- - mkdir /dev/shm/odp
- # Run all tests only for default configuration
- - if [ -z "$CROSS_ARCH" ] ; then
- if [ -n "$CONF" ] ; then
- sudo ODP_CONFIG_FILE="`pwd`/config/odp-linux-generic.conf" LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" ODP_SHM_DIR=/dev/shm/odp make check ;
- else
- sudo ODP_SCHEDULER=basic LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" ODP_SHM_DIR=/dev/shm/odp make check ;
- sudo ODP_SCHEDULER=sp LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" ODP_SHM_DIR=/dev/shm/odp make check ;
- sudo ODP_SCHEDULER=iquery LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" ODP_SHM_DIR=/dev/shm/odp make check ;
- sudo ODP_SCHEDULER=scalable LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" ODP_SHM_DIR=/dev/shm/odp make check ;
- fi
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - if [ ${CHECK} -eq 0 ] ; then
+ docker run -i -t -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF="${CONF}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-${OS} /odp/scripts/ci/build_${ARCH}.sh ;
+ else
+ echo "Running test" ;
+ docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF="${CONF}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-${OS} /odp/scripts/ci/check.sh ;
fi
- - make install
-
- - echo "Checking linking and run from install..."
- - pushd $HOME
- - echo "Dynamic link.."
- - ${CC} ${CFLAGS} ${OLDPWD}/example/hello/odp_hello.c -o odp_hello_inst_dynamic `PKG_CONFIG_PATH=${HOME}/odp-install/lib/pkgconfig:${PKG_CONFIG_PATH} pkg-config --cflags --libs libodp-linux`
- - if [ -z "$CROSS_ARCH" ] ; then
- LD_LIBRARY_PATH="${HOME}/odp-install/lib:$LD_LIBRARY_PATH" ./odp_hello_inst_dynamic ;
- fi
- - |
- # it is not possible to do static linking if we only have shared DPDK library. Compiler complains about missing -ldpdk
- if [ -z "$TARGET" -o -z "$DPDK_SHARED" ] ; then
- echo "Static link.."
- ${CC} ${CFLAGS} ${OLDPWD}/example/hello/odp_hello.c -o odp_hello_inst_static `PKG_CONFIG_PATH=${HOME}/odp-install/lib/pkgconfig:${PKG_CONFIG_PATH} pkg-config --cflags --libs libodp-linux --static` -static || exit 1
- if [ -z "$CROSS_ARCH" ] ; then
- ./odp_hello_inst_static;
- fi
- fi
- - popd
- - ccache -s
-
jobs:
include:
- stage: test
env: TEST=coverage
compiler: gcc
script:
- - sudo pip install coverage
- - ./bootstrap
- - ./configure --prefix=$HOME/odp-install
- --enable-user-guides
- --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local
- --with-netmap-path=`pwd`/netmap CFLAGS="-O0 -coverage"
- CXXFLAGS="-O0 -coverage" LDFLAGS="--coverage"
- --enable-debug=full
- --enable-helper-linux
- - CCACHE_DISABLE=1 make -j $(nproc)
- - mkdir -p /dev/shm/odp
- - sudo CCACHE_DISABLE=1 ODP_SCHEDULER=basic ODP_SHM_DIR=/dev/shm/odp LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make check
- - sudo CCACHE_DISABLE=1 ODP_SCHEDULER=sp ODP_SHM_DIR=/dev/shm/odp LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make check
- - sudo CCACHE_DISABLE=1 ODP_SCHEDULER=iquery ODP_SHM_DIR=/dev/shm/odp LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make check
- - sudo CCACHE_DISABLE=1 ODP_SCHEDULER=scalable ODP_SHM_DIR=/dev/shm/odp LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make check
- - bash <(curl -s https://codecov.io/bash) -X coveragepy
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CODECOV_TOKEN="${CODECOV_TOKEN}"
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/coverage.sh
+ - stage: test
+ env: TEST=scheduler_sp
+ compiler: gcc
+ script:
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF=""
+ -e ODP_SCHEDULER=sp
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh
+ - stage: test
+ env: TEST=scheduler_scalable
+ compiler: gcc
+ script:
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF=""
+ -e ODP_SCHEDULER=scalable
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh
+ - stage: test
+ env: TEST=process_mode
+ install:
+ - true
+ compiler: gcc
+ script:
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF=""
+ -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/process-mode.conf
+ -e ODPH_PROC_MODE=1
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check.sh
+ - stage: test
+ env: TEST=inline_timer
+ install:
+ - true
+ compiler: gcc
+ script:
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF=""
+ -e ODP_CONFIG_FILE=/odp/platform/linux-generic/test/inline-timer.conf
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/check_inline_timer.sh
- stage: test
env: TEST=distcheck
compiler: gcc
script:
- - ./bootstrap
- - ./configure --prefix=$HOME/odp-install
- --enable-user-guides
- - sudo PATH="$PATH" LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make distcheck
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF="--enable-user-guides"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/distcheck.sh
- stage: test
- env: TEST=distcheck-non-abi
+ env: TEST=distcheck_nonabi
compiler: gcc
script:
- - ./bootstrap
- - ./configure --prefix=$HOME/odp-install
- --enable-user-guides
- - sudo PATH="$PATH" LD_LIBRARY_PATH="$HOME/cunit-install/$CROSS_ARCH/lib:$LD_LIBRARY_PATH" make distcheck DISTCHECK__CONFIGURE_FLAGS=--disable-abi-compat
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run --privileged -i -t
+ -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ -e CONF="--enable-user-guides --disable-abi-compat"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/distcheck.sh
- stage: "build only"
env: TEST=doxygen
compiler: gcc
+ addons:
+ apt:
+ packages:
+ - libconfig-dev
+ - libssl-dev
+ - cmake
+ - graphviz
install:
# Updated Doxygen
- |
@@ -364,85 +224,42 @@ jobs:
true
fi
- stage: "build only"
- env: CONF=""
- compiler: gcc
- install: true
+ env: ARCH=x86_64
+ install:
+ - true
script:
- - ./bootstrap
- - ./configure --enable-helper-linux
- - make -j $(nproc)
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run -i -t -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/build_${ARCH}.sh
- stage: "build only"
- env: CONF=""
- compiler: clang-3.8
- install: true
+ env: ARCH=x86_64
+ compiler: clang
+ install:
+ - true
script:
- - ./bootstrap
- - ./configure --enable-helper-linux
- - make -j $(nproc)
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run -i -t -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/build_${ARCH}.sh
- stage: "build only"
- env: CROSS_ARCH="i386"
- compiler: gcc
- install: true
+ env: ARCH=arm64
+ install:
+ - true
script:
- - ./bootstrap
- - ./configure --enable-helper-linux $CROSS
- - make -j $(nproc)
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run -i -t -v `pwd`:/odp
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/build_${ARCH}.sh
- stage: "build only"
- env: CROSS_ARCH="arm64"
- compiler: gcc
- install: true
+ env: ARCH=i386
+ install:
+ - true
script:
- - ./bootstrap
- - ./configure --enable-helper-linux $CROSS
- - make -j $(nproc)
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="arm64"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="arm64"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="armhf" CFLAGS="-march=armv7-a"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="armhf" CFLAGS="-march=armv7-a"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="powerpc"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="powerpc"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="i386"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="i386"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="arm64" CONF="--disable-abi-compat"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="arm64" CONF="--disable-abi-compat"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="armhf" CFLAGS="-march=armv7-a" CONF="--disable-abi-compat"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="armhf" CFLAGS="-march=armv7-a" CONF="--disable-abi-compat"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="powerpc" CONF="--disable-abi-compat"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="powerpc" CONF="--disable-abi-compat"
- - stage: test
- compiler: gcc
- env: CROSS_ARCH="i386" CONF="--disable-abi-compat"
- - stage: test
- compiler: clang-3.8
- env: CROSS_ARCH="i386" CONF="--disable-abi-compat"
+ - if [ -z "${DOCKER_NAMESPACE}" ] ; then export DOCKER_NAMESPACE="opendataplane"; fi
+ - docker run -i -t -v `pwd`:/odp --shm-size 8g
+ -e CC="${CC}"
+ ${DOCKER_NAMESPACE}/travis-odp-lng-ubuntu_16.04 /odp/scripts/ci/build_${ARCH}.sh
- stage: test
canfail: yes
env: TEST=checkpatch
diff --git a/CHANGELOG b/CHANGELOG
index 7ee5670dc..a0bdf558f 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,315 @@
+== OpenDataPlane (1.20.0.0)
+=== Summary of Changes
+ODP v1.20.0.0 is a refresh of ODP, incorporating significant configurability
+and performance improvements as well as new APIs and API restructures.
+
+==== APIs
+===== Symbol `ODP_SHM_NULL` Removed.
+An invalid `odp_shm_t` has the value `ODP_SHM_INVALID`, consistent with other
+ODP types. The legacy synonym `ODP_SHM_NULL` is now removed for consistency.
+
+===== New 3GPP Crypto Algorithm Support
+New support for 3GPP crypto algorithms is added by defining symbols for
+
+* `ODP_CIPHER_ALG_KASUMI_F8`
+* `ODP_CIPHER_ALG_SNOW3G_UEA2`
+* `ODP_CIPHER_ALG_ZUC_EEA3`
+
+In addition new authentication algorithm symbols are defined for
+
+* `ODP_AUTH_ALG_KASUMI_F9`
+* `ODP_AUTH_ALG_SNOW3G_UIA2`
+* `ODP_AUTH_ALG_ZUC_EIA3`
+
+These values are returned as ODP capabilities as well as being accepted in
+crypto session creation for implementations that indicate support for them.
+
+===== Crypto Capability for Bitwise Operation
+The new `bit_mode` capability Boolean is added to the
+`odp_crypto_cipher_capability_t` struct to indicate that an implementation
+supports operating in bit mode. When operating in bit
+mode, field offsets and lengths are expressed in terms of bits rather than
+bytes. However, such lengths must always be specified in multiples of 8.
+
+===== Improved Crypto Spec Documentation
+The ODP crypto API specification is tightened to specify default values for
+cipher and authentication algorithms. Also documented when key and IV
+parameters need to be set.
+
+===== IPsec Extensions
+IPsec requires "salt" (extra keying material) when the GMAC authentication
+algorithm is used. To accommodate this the `auth_key_extra` field is added to
+the `odp_ipsec_crypto_param_t` struct and documentation is added clarifying
+when this field is needed and how it should be used.
+
+===== Classifier Type Rename
+The `odp_pmr_t` type name for an invalid value is renamed from `ODP_PMR_INVAL`
+to `ODP_PMR_INVALID` for consistency with the rest of ODP type names. The old
+symbol is still available when ODP is configured with
+`--enable-deprecated`.
+
+===== New API for Packet Event Subtypes
+The `odp_packet_subtype()` API is added that returns the subtype of a packet
+event directly.
+
+===== Streamlined Packet Parsing Results
+The `odp_packet_parse_result()` API is added that returns the result of
+packet parsing as a single `odp_packet_parse_result_t` struct. This can
+offer efficiency improvements for applications that need all parse results
+rather than making individual parse result calls.
+
+===== PktIO Extensions to Support per-Queue Configuration
+PktIO interfaces support multiple input queues to enable increased parallelism
+in I/O processing. Previously, all of these input queues were required to
+belong to the same scheduler group. The `odp_pktin_queue_param_t` struct is
+now extended with an optional `odp_pktin_queue_param_ovr_t` struct that
+permits individual pktin queues to be assigned to separate scheduler groups.
+This may permit improved performance for advanced application use cases.
+
+===== Timer Pool Capabilities
+The `odp_timer_capability_t` struct is extended to return three additional
+pieces of information:
+
+`max_pools_combined`::
+The total number of timer pools that can be created combining different
+clock sources
+
+`max_pools`::
+The maximum number of timer pools for a given clock source.
+
+`max_timers`::
+The maximum number of timers in a single pool. A zero value means number is
+limited only by available memory.
+
+===== Add Scheduler mix/max/default Priority Functions
+Three new APIs: `odp_schedule_max_prio()`, `odp_schedule_min_prio()`, and
+`odp_schedule_default_prio()` are added that return the min, max, and default
+values specified for the `prio` field in the `odp_schedule_param_t` struct.
+
+With the introduction of these scheduling priority functions the previously
+defined macros (`ODP_SCHED_PRIO_HIGHEST`, `ODP_SCHED_PRIO_NORMAL`, and
+`ODP_SCHED_PRIO_LOWEST`) are now deprecated and should no longer be used.
+
+===== Specification of `odp_schedule_prio_t` as an `int`
+Previously, the `odp_schedule_prio_t` type definition was left to each
+implementation. With the addition of explicit schedule priority ranges, this
+type is now specified to be an `int` to permit efficient implementation
+(including inlining) of these functions.
+
+====== New Scheduler APIs
+The new scheduler APIs `odp_schedule_multi_wait()` and
+`odp_schedule_multi_no_wait()` are added to provide more efficiently
+implementable versions of these functions. The existing scheduler APIs remain
+unchanged. These new APIs can simply provide a fastpath for some
+applications/implementations as an alternative to specifying a parameter on
+`odp_schedule_multi()`.
+
+===== Memory Model in `odp_init_global()`
+The `odp_init_t` parameter passed to `odp_init_global()` is extended to
+add the `mem_model` field. This field is defined by the new `odp_mem_model_t`
+struct and is used to specify whether the application will be using a
+thread (`ODP_MEM_MODEL_THREAD`) or process (`ODP_MEM_MODEL_PROCESS`)
+memory model. The default is a thread model is used for compatibility with
+previous levels of ODP.
+
+==== ABI Changes
+A number of changes to the ODP ABI have also been made in this release to
+improve application binary portability.
+
+===== Strong Typing for Timer Pools
+The `odp_timer_pool_t` is now strongly typed.
+
+===== Consistent Initialization
+The values of the various `ODP_xxx_INVALID` symbols for ODP abstract types in
+the `odp-linux` reference implementation are now consistently zeros. This
+reduces errors and improves portability.
+
+=== Implementation Improvements
+==== Configuration File
+A new configuration file mechanism is introduced that makes use of
+https://www.hyperrealm.com/libconfig/libconfig_manual.html[libconfig] to
+enable various runtime ODP parameters to be specified dynamically.
+
+Default configuration values for the `odp-linux` reference implementation are
+contained in the `config/odp-linux-generic.conf` file. Users may override
+these default values by supplying their own configuration file. At
+`odp_init_global()` time, if the `ODP_CONFIG_FILE` environment variable is set,
+this is used to locate the path to the override configuration file.
+
+==== Process Mode Support
+The `odp-linux` reference implementation now supports applications that run in
+process mode (`mem_model` = `ODP_MEM_MODEL_PROCESS`) as well as the default
+thread mode. This support only applies within a single ODP instance, so any
+`fork()` calls must be done only _after_ `odp_init_global()` has been called
+to initialize ODP on a root process.
+
+==== Removal of `iQuery` Scheduler
+The `iQuery` scheduler is removed from the `odp-linux` reference
+implementation, as it offers no performance advantages and has not seen
+application use.
+
+==== Number of CPUs
+The `odp-linux` reference implementation now supports up to 256 CPUs by
+default (increased from 128).
+
+==== Support for Large Burst Sizes
+The `odp-linux` reference implementation now supports large burst sizes for
+both I/O and non-I/O scheduled events. Large bursts (when available) are
+received directly to the application without any stashing for improved
+throughput. Burst sizes are configurable via the new configuration file
+mechanism, as described above.
+
+==== `--without-openssl` Warnings
+When building `odp-linux` using `--without-openssl` a warning will be issued
+cautioning that strong cryptography will not be available.
+
+==== Inline Queue Enq/Deq APIs
+The various enq/deq APIs are now subject to inlining when `odp-linux` is
+built with `--disable-abi-compat`.
+
+==== Configurable Timer Controls
+Inline timers are now controlled via a config file option. Timer polling
+frequency is similarly controlled via the config file.
+
+==== Huge Page Configuration
+The config file is now used to specify the huge page usage limit.
+
+==== Single and Multi-Consumer/Producer Rings
+The queue implementation in `odp-linux` now automatically makes use of
+optimized single and multi-consumer/producer rings to significantly speed
+up enq/deq processing.
+
+==== `odp_shm_print_all()` Improvements
+The output from `odp_shm_print_all()` is reworked to provide more useful
+and comprehensive shared memory usage information in `odp-linux`.
+
+==== IPsec Improvements
+SA lifetime checking is now more scalable to multiple threads. This
+significantly reduces overhead for multithreaded IPsec applications.
+
+==== Native Builds
+When running in non-ABI compatibility mode, `odp-linux` now enables
+native machine-specific optimizations for the CPU architecture of the
+local machine.
+
+=== Validation Test Improvements
+==== SCTP Test Packets
+SCTP test packets are now used in parser testing. SCTP headers are added to
+ODP and ODP helpers and SCTP checksums are now inserted and verified as part
+of validation testing.
+
+==== `odp_packet_reset()` Test
+The packet validation test suite now properly tests `odp_packet_reset()`.
+
+=== Helper Changes
+In support of process mode, ODP helper functions have been changed to
+better match these new capabilities
+
+==== New `enum`
+The `odph_linux_thread_type_t enum` has been replaced with the new
+`odp_mem_model_t` type.
+
+==== Helper Options
+The new `odph_options()` getter function is added that returns
+applicable options in effect via the new `odph_helper_options_t` struct.
+This currently includes the memory model (thread or process) that is in use.
+
+==== SCTP Helpers
+The new helper APIs `odph_sctp_chksum_set()` and `odph_sctp_chksum_verify()`
+are added to facilitate working with SCTP packet checksums.
+
+=== Performance Test Improvements
+==== Pool Performance
+A new `odp_pool_perf` test has been added that stress-tests ODP pool
+functions in a multithreaded environment to generate performance statistics.
+
+==== Scheduler Performance
+A new `odp_sched_perf` test has been added that stress-tests the scheduler
+in a multithreaded environment.
+
+==== CPU Performance
+A new `odp_cpu_bench` performance test has been added that runs
+compute-intensive packet operations in a multithreaded environment and prints
+the observed maximum throughput for each thread.
+
+=== Example Improvements
+==== Classifier Example changes
+The `odp_classifier` example program now uses a reduced number of threads by
+default to reduce elapsed run time. `ODP_THREAD_COUNT_MAX` is also now used as
+the max worker count.
+
+==== Generator Improvements
+The `odp_generator` example has numerous cleanups and performance improvements.
+
+==== IPsec Example
+The `odp_ipsec` example now properly stops and closes pktio devices on exit.
+
+==== Packet Dumping
+A new `odp_packet_dump` example is added that prints received packets to the
+terminal. This is useful for debugging packet I/O interfaces.
+
+==== Sysinfo Example
+A new `odp_sysinfo` example is provided that prints system information. Useful
+for checking the ODP environment during debugging. This includes providing
+detailed information about the various crypto facilities supported, as well
+as the feature flags used at build time (_e.g.,_ if the binary was built with
+ARMv8.0 or ARMv8.1 instructions).
+
+==== Traffic Manager Example
+The traffic manager example now properly destroys all TM queues it creates
+for improved reliability. It also now always prints a proper termination
+summary message.
+
+=== Bug Fixes
+==== Numbered Bugs/Issues
+===== https://bugs.linaro.org/show_bug.cgi?id=3983[Bug 3983]
+Compile fails on OpenSuSE 42.2 Leap with error: negative width in bit field
+'__error_if_negative'
+
+===== https://bugs.linaro.org/show_bug.cgi?id=3989[Bug 3989]
+odp_system_info_init() issues
+
+===== https://bugs.linaro.org/show_bug.cgi?id=3999[Bug 3999]
+IPsec antireplay check drops packets when sequence number jumps.
+
+===== https://bugs.linaro.org/show_bug.cgi?id=4002[Bug 4002]
+IPsec SA creation must fail for ESN-enabled SAs
+
+===== https://bugs.linaro.org/show_bug.cgi?id=4013[Bug 4013]
+Per-SA IPv4 ID allocation may cause duplicate IDs.
+
+===== https://bugs.linaro.org/show_bug.cgi?id=4017[Bug 4017]
+Unexpected IP ID causes IPsec API validation to fail
+
+===== https://github.com/Linaro/odp/issues/662[Issue 662]
+rte_mempool_ops_alloc() is not dpdk api
+
+==== Unnumbered Bugs/Issues
+* Fixed enq/deq issues encountered on architectures with weak memory ordering.
+* Return 0 from `odp_sys_huge_page_size_all()` if hugepages are not
+supported/detected. Tests modified to not treat this as an error.
+* Set `ODP_CACHE_LINE_SIZE` to 128 on ppc64le systems.
+* iplookuptable fix putting values into table
+* DPDK pktio support now works properly across multiple ODP instances.
+* Zero timer pool memory on reserve (fixes timer failures due to uninitialized
+variables).
+* `-march=native` disabled for `clang`. This fixes a known issue with recent
+levels of clang.
+
+=== Known Issues
+==== https://bugs.linaro.org/show_bug.cgi?id=3998[Bug 3998]
+IPsec extended sequence number support is missing
+
+==== https://bugs.linaro.org/show_bug.cgi?id=4014[Bug 4014]
+Separate IP ID allocation for transport and tunnel mode SAs may cause
+duplicate IDs
+
+==== https://bugs.linaro.org/show_bug.cgi?id=4018[Bug 4018]
+Unexpected IV causes IPsec API validation to fail
+
+==== https://bugs.linaro.org/show_bug.cgi?id=4040[Bug 4040]
+Clang build fails on Ubuntu 18.04
+
== OpenDataPlane (1.19.0.2)
=== Summary of Changes
ODP v1.19.0.2 is the second service update for the Tiger Moth release. It
diff --git a/DEPENDENCIES b/DEPENDENCIES
index 6b345b9c3..10a9861ca 100644
--- a/DEPENDENCIES
+++ b/DEPENDENCIES
@@ -164,7 +164,14 @@ Prerequisites for building the OpenDataPlane (ODP) API
3.4 DPDK packet I/O support (optional)
- Use DPDK for ODP packet I/O.
+ Use DPDK for ODP packet I/O. Currently supported DPDK versions are v17.11,
+ v18.02, v18.05 and v18.11.
+
+ Note: only packet I/O is accelerated with DPDK. Use
+ https://github.com/Linaro/odp-dpdk.git
+ for fully accelerated odp dpdk platform.
+
+3.4.1 DPDK pktio requirements
DPDK pktio adds a depency to NUMA library.
# Debian/Ubuntu
@@ -172,17 +179,30 @@ Prerequisites for building the OpenDataPlane (ODP) API
# CentOS/RedHat/Fedora
$ sudo yum install numactl-devel
- Note: only packet I/O is accelerated with DPDK. Use
- https://github.com/Linaro/odp-dpdk.git
- for fully accelerated odp dpdk platform.
+3.4.2 Native DPDK install
+ # Debian/Ubuntu starting from 18.04
+ $ sudo apt-get install dpdk-dev
+
+3.4.2 Built DPDK from src
+ git clone --branch=17.11 http://dpdk.org/git/dpdk-stable dpdk
+
+ #Make and edit DPDK configuration
+ TARGET="x86_64-native-linuxapp-gcc"
+ make config T=${TARGET} O=${TARGET}
+ pushd ${TARGET}
-3.4.1 Building DPDK and ODP with DPDK pktio support
+ #To use I/O without DPDK supported NIC's enable pcap pmd:
+ sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
+ popd
- DPDK packet I/O has been tested to work with DPDK v17.11.
+ #Build DPDK
+ make build O=${TARGET} EXTRA_CFLAGS="-fPIC"
+ make install O=${TARGET} DESTDIR=${TARGET}
- Follow steps in ./scripts/build-pktio-dpdk
+ #compile ODP
+ ./configure --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local
-3.4.2 Setup system
+3.4.3 Setup system
# Load DPDK modules
$ sudo /sbin/modprobe uio
@@ -194,13 +214,13 @@ Prerequisites for building the OpenDataPlane (ODP) API
512 x 2MB hugepages. All this can be done with the DPDK setup script
(<dpdk-dir>/tools/dpdk-setup.sh).
-3.4.3 Running ODP with DPDK pktio
+3.4.4 Running ODP with DPDK pktio
ODP applications will try use DPDK for packet I/O by default. If some other
I/O type is desired instead, DPDK I/O can be disabled by setting the
environment variable ODP_PKTIO_DISABLE_DPDK.
- DPDK interfaces are accessed using indices. For example, two first DPDK
+ DPDK interfaces are accessed using indexes. For example, two first DPDK
interfaces can be used with the odp_l2fwd example as follows:
$ cd <odp_dir>
$ sudo ./test/performance/odp_l2fwd -i 0,1 -c 2 -m 0
diff --git a/Makefile.am b/Makefile.am
index 4f3e00208..f651e9cac 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5,18 +5,26 @@ AM_DISTCHECK_CONFIGURE_FLAGS = --enable-user-guides \
if PLATFORM_IS_LINUX_GENERIC
PLATFORM_DIR = platform/linux-generic
+PLATFORM_DUMPCONF_DIR = platform/linux-generic/dumpconfig
PLATFORM_TEST_DIR = platform/linux-generic/test
endif
SUBDIRS = \
include \
$(PLATFORM_DIR) \
+ $(PLATFORM_DUMPCONF_DIR) \
helper \
- test \
- $(PLATFORM_TEST_DIR) \
- helper/test \
- doc \
- example
+ doc
+
+if WITH_EXAMPLES
+SUBDIRS += example
+endif
+
+if WITH_TESTS
+SUBDIRS += test
+SUBDIRS += helper/test
+SUBDIRS += $(PLATFORM_TEST_DIR)
+endif
@DX_RULES@
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf
index 85d5414ba..e00e4a518 100644
--- a/config/odp-linux-generic.conf
+++ b/config/odp-linux-generic.conf
@@ -16,7 +16,42 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.0.1"
+config_file_version = "0.1.6"
+
+# Shared memory options
+shm: {
+ # Number of cached default size huge pages. These pages are allocated
+ # during odp_init_global() and freed back to the kernel in
+ # odp_term_global(). A value of zero means no pages are cached.
+ # No negative values should be used here, they are reserved for future
+ # implementations.
+ #
+ # ODP will reserve as many huge pages as possible, which may be less
+ # than requested here if the system does not have enough huge pages
+ # available.
+ #
+ # When using process mode threads, this value should be set to 0
+ # because the current implementation won't work properly otherwise.
+ num_cached_hp = 0
+
+ # Huge page usage limit in kilobytes. Memory reservations larger than
+ # this value are done using huge pages (if available). Smaller
+ # reservations are done using normal pages to conserve memory.
+ huge_page_limit_kb = 64
+
+ # Amount of memory pre-reserved for ODP_SHM_SINGLE_VA usage in kilobytes
+ single_va_size_kb = 131072
+}
+
+# Pool options
+pool: {
+ # Packet pool options
+ pkt: {
+ # Maximum number of packets per pool. Power of two minus one
+ # results optimal memory usage (e.g. (256 * 1024) - 1).
+ max_num = 262143
+ }
+}
# DPDK pktio options
pktio_dpdk: {
@@ -49,21 +84,68 @@ queue_basic: {
}
sched_basic: {
- # Priority level spread. Each priority level is spread into multiple
- # scheduler internal queues. A higher spread value typically improves
- # parallelism and thus is better for high thread counts, but causes
- # uneven service level for low thread counts. Typically, optimal
- # value is the number of threads using the scheduler.
+ # Priority level spread
+ #
+ # Each priority level is spread into multiple scheduler internal queues.
+ # This value defines the number of those queues. Minimum value is 1.
+ # Each thread prefers one of the queues over other queues. A higher
+ # spread value typically improves parallelism and thus is better for
+ # high thread counts, but causes uneven service level for low thread
+ # counts. Typically, optimal value is the number of threads using
+ # the scheduler.
prio_spread = 4
- # Default burst sizes for high and low priority queues. The default
- # and higher priority levels are considered as high. Scheduler
- # rounds up number of requested events up to these values. In general,
- # larger requests are not round down. So, larger bursts than these may
- # received when requested. A large burst size improves throughput,
- # but decreases application responsiveness to high priority events
- # due to head of line blocking cause by a burst of low priority
- # events.
- burst_size_hi = 32
- burst_size_low = 16
+ # Weight of the preferred scheduler internal queue
+ #
+ # Each thread prefers one of the internal queues over other queues.
+ # This value controls how many times the preferred queue is polled
+ # between a poll to another internal queue. Minimum value is 1. A higher
+ # value typically improves parallelism as threads work mostly on their
+ # preferred queues, but causes uneven service level for low thread
+ # counts as non-preferred queues are served less often
+ prio_spread_weight = 63
+
+ # Burst size configuration per priority. The first array element
+ # represents the highest queue priority. The scheduler tries to get
+ # burst_size_default[prio] events from a queue and stashes those that
+ # cannot be passed to the application immediately. More events than the
+ # default burst size may be returned from application request, but no
+ # more than burst_size_max[prio].
+ #
+ # Large burst sizes improve throughput, but decrease application
+ # responsiveness to higher priority events due to head of line blocking
+ # caused by a burst of lower priority events.
+ burst_size_default = [ 32, 32, 32, 32, 32, 16, 8, 4]
+ burst_size_max = [255, 255, 255, 255, 255, 16, 16, 8]
+
+ # Automatically updated schedule groups
+ #
+ # API specification defines that ODP_SCHED_GROUP_ALL,
+ # _WORKER and _CONTROL are updated automatically. These options can be
+ # used to disable these group when not used. Set value to 0 to disable
+ # a group. Performance may improve when unused groups are disabled.
+ group_enable: {
+ all = 1
+ worker = 1
+ control = 1
+ }
+}
+
+timer: {
+ # Use inline timer implementation
+ #
+ # By default, timer processing is done in background threads (thread per
+ # timer pool). With inline implementation timers are processed on worker
+ # cores instead. When using inline timers the application has to call
+ # odp_schedule() or odp_queue_deq() to actuate timer processing.
+ #
+ # Set to 1 to enable
+ inline = 0
+
+ # Inline timer poll interval
+ #
+ # When set to 1 inline timers are polled during every schedule round.
+ # Increasing the value reduces timer processing overhead while
+ # decreasing accuracy. Ignored when inline timer is not enabled.
+ inline_poll_interval = 10
}
diff --git a/configure.ac b/configure.ac
index 0eacac323..8efb85253 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,9 +3,9 @@ AC_PREREQ([2.5])
# Set correct API version
##########################################################################
m4_define([odpapi_generation_version], [1])
-m4_define([odpapi_major_version], [19])
+m4_define([odpapi_major_version], [20])
m4_define([odpapi_minor_version], [0])
-m4_define([odpapi_point_version], [2])
+m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
AC_INIT([OpenDataPlane],[odpapi_version],[lng-odp@lists.linaro.org])
@@ -200,6 +200,12 @@ AC_ARG_ENABLE([abi-compat],
abi_compat=no
#if there is no ABI compatibility the .so numbers are meaningless
ODP_LIBSO_VERSION=0:0:0
+ # do not try -march=native for clang due to possible failures on
+ # clang optimizations
+ $CC --version | grep -q clang
+ if test $? -ne 0; then
+ ODP_CHECK_CFLAG([-march=native])
+ fi
fi])
AM_CONDITIONAL(ODP_ABI_COMPAT, [test "x$ODP_ABI_COMPAT" = "x1"])
@@ -283,6 +289,8 @@ AM_CONDITIONAL([ARCH_IS_X86_64], [test "x${ARCH_ABI}" = "xx86_64-linux"])
DX_HTML_FEATURE(ON)
DX_PDF_FEATURE(OFF)
DX_PS_FEATURE(OFF)
+DX_ENV_APPEND(WITH_PLATFORM, $with_platform)
+
DX_INIT_DOXYGEN($PACKAGE_NAME,
${srcdir}/doc/application-api-guide/Doxyfile,
${builddir}/doc/application-api-guide/output,
@@ -381,11 +389,13 @@ AC_MSG_RESULT([
includedir: ${includedir}
testdir: ${testdir}
WITH_ARCH: ${WITH_ARCH}
+ with_openssl: ${with_openssl}
cc: ${CC}
cc version: ${CC_VERSION}
cppflags: ${CPPFLAGS}
cflags: ${CFLAGS}
+ ld: ${LD}
ldflags: ${LDFLAGS}
libs: ${LIBS}
defs: ${DEFS}
@@ -396,12 +406,17 @@ AC_MSG_RESULT([
debug: ${enable_debug}
cunit: ${cunit_support}
static tests linkage: ${enable_static_applications}
+ with_examples: ${with_examples}
+ with_tests: ${with_tests}
test_vald: ${test_vald}
test_perf: ${test_perf}
- test_perf_proc: ${test_perf_proc}
test_cpp: ${test_cpp}
test_helper: ${test_helper}
test_example: ${test_example}
user_guides: ${user_guides}
pcapng: ${have_pcapng}
])
+
+AS_IF([test "${with_openssl}" = "no"],
+ [AC_MSG_WARN([Strong cryptography is not available without OpenSSL])]
+ )
diff --git a/doc/application-api-guide/Doxyfile b/doc/application-api-guide/Doxyfile
index 00c18df2e..b101153d1 100644
--- a/doc/application-api-guide/Doxyfile
+++ b/doc/application-api-guide/Doxyfile
@@ -4,6 +4,7 @@ PROJECT_NAME = "API Reference Manual"
PROJECT_NUMBER = $(VERSION)
PROJECT_LOGO = $(SRCDIR)/doc/images/ODP-Logo-HQ.svg
INPUT = $(SRCDIR)/doc/application-api-guide \
+ include \
$(SRCDIR)/include
EXAMPLE_PATH = $(SRCDIR)/example $(SRCDIR)
WARNINGS = NO
diff --git a/doc/implementers-guide/implementers-guide.adoc b/doc/implementers-guide/implementers-guide.adoc
index f0ba03369..36b143b9e 100644
--- a/doc/implementers-guide/implementers-guide.adoc
+++ b/doc/implementers-guide/implementers-guide.adoc
@@ -684,4 +684,167 @@ creating a managed binary is itself a secondary compilation and optimization
step. The difference is that performing this step is a system rather than a
developer responsibility.
+== Configuration
+Each ODP implementation will choose various sizes, limits, and similar
+internal parameters that are well matched to its design and platform
+capabilities. However, it is often useful to expose at least some of these
+parameters and allow users to select specific values to use either
+at compile time or runtime. This section discusses options for doing this,
+using the configuration options offered in the `odp-linux` reference
+implementation as an example.
+
+=== Static Configuration
+Static configuration requires the ODP implementation to be recompiled. The
+reasons for choosing static configuration vary but can involve both design
+simplicity (_e.g.,_ arrays can be statically configured) or performance
+considerations (_e.g.,_ including optional debug code). Two approaches to
+static configuration are `#define` statements and use of autotools.
+
+==== `#define` Statements
+Certain implementation limits can best be represented by `#define` statements
+that are set at compile time. Examples of this can be seen in the `odp-linux`
+reference implementation in the file
+`platform/linux-generic/include/odp_config_internal.h`.
+
+.Compile-time implementation limits (excerpt)
+[source,c]
+-----
+/*
+ * Maximum number of CPUs supported. Maximum CPU ID is CONFIG_NUM_CPU - 1.
+ */
+#define CONFIG_NUM_CPU 256
+
+/*
+ * Maximum number of pools
+ */
+#define ODP_CONFIG_POOLS 64
+-----
+
+Here two fundamental limits, the number of CPUs supported and the maximum
+number of pools that can be created via the `odp_pool_create()` API are
+defined. By using `#define`, the implementation can configure supporting
+structures (bit strings and arrays) statically, and can also allow static
+compile-time validation/consistency checks to be done using facilities like
+`ODP_STATIC_ASSERT()`. This results in more efficient code since these limits
+need not be computed at runtime.
+
+Users are able to change these limits (potentially within documented absolute
+bounds) by changing the relevant source file and recompiling that ODP
+implementation.
+
+==== Use of `autotools configure`
+The ODP reference implementation, like many open source projects, makes use of
+https://www.gnu.org/software/automake/faq/autotools-faq.html[autotools]
+to simplify project configuration and support for various build targets.
+These same tools permit compile-time configuration options to be specified
+without requiring changes to source files.
+
+In addition to the "standard" `configure` options for specifying prefixes,
+target install paths, etc., the `odp-linux` reference implementation supports
+a large number of static configuration options that control how ODP is
+built. Use the `./configure --help` command for a complete list. Here we
+discuss simply a few for illustrative purposes:
+
+`--enable-debug`::
+The ODP API specification simply says that "results are undefined" when
+invalid parameters are passed to ODP APIs. This is done for performance
+reasons so that implementations don't need to insert extraneous parameter
+checking that would impact runtime performance in fast-path operations. While
+this is a reasonable trade off, it can complicate application debugging.
+To address this, the ODP implementation makes use of the `ODP_ASSERT()` macro
+that by default disappears at compile time unless the `--enable-debug`
+configuration option was specified. Running with a debug build of ODP trades
+off performance for improved parameter/bounds checking to make application
+debugging easier.
+
+`--enable-user-guides`::
+By default, building ODP only builds the code. When this option is specified,
+the supporting user documentation (including this file) is also built.
+
+`--disable-abi-compat`::
+By default ODP builds with support for the ODP ABI, which permits application
+binary portability across different ODP implementations targeting the same
+Instruction Set Architecture (ISA). While this is useful in cloud/host
+environments, it does involve some performance cost to provide binary
+compatibility. For embedded use of ODP, disabling ABI compatibility means
+tighter code can be generated by inlining more of the ODP implementation
+into the calling application code. When built without ABI compatibility,
+moving an application to another ODP implementation requires that the
+application be recompiled. For most embedded uses this is a reasonable
+trade off in exchange for better application performance on a specific
+target platform.
+
+=== Dynamic Configuration
+While compile-time limits have the advantage of simplicity, they are also
+not very flexible since they require an ODP implementation to be regenerated
+to change them. The alternative is for implementations to support _dynamic
+configuration_ that enables ODP to change implementation behavior without
+source changes or recompilation.
+
+The options for dynamic configuration include: command line parameters,
+environment variables, and configuration files.
+
+==== Command line parameters
+Applications that accept a command line passed to their `main()` function can
+use this to tailor how they use ODP. This may involve self-imposed limits
+driven by the application or these can specify arguments that are to be
+passed to ODP initialization via the `odp_init_global()` API. The syntax of
+that API is:
+[source,c]
+-----
+int odp_init_global(odp_instance_t *instance,
+ const odp_init_t *params,
+ const odp_platform_init_t *platform_params);
+-----
+and the `odp_init_t` struct is used to pass platform-independent parameters
+that control ODP behavior while the `odp_platform_init_t` struct is used to
+pass platform-specific parameters. The `odp-linux` reference platform does
+not make use of these platform-specific parameters, however the `odp-dpdk`
+reference implementation uses these to allow applications to pass DPDK
+initialization parameters to it via these params.
+
+ODP itself uses the `odp_init_t` parameters to allow applications to specify
+override logging and abort functions. These routines are called to perform
+these functions on behalf of the ODP implementation, thus better allowing
+ODP to interoperate with application-defined logging and error handling
+facilities.
+
+==== Environment variables
+Linux environment variables set via the shell provide a convenient means of
+passing dynamic configuration values. Each ODP implementation defines which
+environment variables it looks for and how they are used. For example, the
+`odp-dpdk` implementation uses the variable `ODP_PLATFORM_PARAMS` as an
+alternate means of passing DPDK initialization parameters.
+
+Another important environment variable that ODP uses is `ODP_CONFIG_FILE`
+that is used to specify the file path of a _configuration override file_, as
+described in the next section.
+
+==== Configuration files
+The https://hyperrealm.github.io/libconfig/[libconfig] library provides a
+standard set of APIs and tools for parsing configuration files. ODP uses this
+to provide a range of dynamic configuration options that users may
+wish to specify.
+
+ODP uses a _base configuration file_ that contains system-wide defaults, and
+is located in the `config/odp-linux-generic.conf` file within the ODP
+distribution. This specifies a range of overridable configuration options that
+control things like shared memory usage, queue and scheduler limits and tuning
+parameters, timer processing options, as well as I/O parameters for various
+pktio classes.
+
+While users of ODP may modify this base file before building it, users can
+also supply an override configuration file that sets specific values of
+interest while leaving other parameters set to their defaults as defined by
+the base configuration file. As noted earlier, the `ODP_CONFIG_FILE`
+environment variable is used to point to the override file to be used.
+
+=== Summary
+There is a place for both static and dynamic configuration in any ODP
+implementation. This section described some of the most common and
+discussed how the ODP-supplied reference implementations make use of them.
+Other ODP implementations are free to copy and/or build on these, or use
+whatever other mechanisms are native to the platforms supported by those ODP
+implementations.
+
include::../glossary.adoc[]
diff --git a/doc/platform-api-guide/Doxyfile b/doc/platform-api-guide/Doxyfile
index d716b4a3c..02e325cc6 100644
--- a/doc/platform-api-guide/Doxyfile
+++ b/doc/platform-api-guide/Doxyfile
@@ -5,8 +5,8 @@ PROJECT_NUMBER = $(VERSION)
PROJECT_LOGO = $(SRCDIR)/doc/images/ODP-Logo-HQ.svg
INPUT = $(SRCDIR)/doc/application-api-guide \
$(SRCDIR)/doc/platform-api-guide \
+ include/odp/api \
$(SRCDIR)/include/odp/api \
$(SRCDIR)/platform/$(WITH_PLATFORM)/doc \
- $(SRCDIR)/platform/$(WITH_PLATFORM)/include/odp/api \
- $(SRCDIR)/platform/$(WITH_PLATFORM)/arch/$(WITH_ARCH)
+ $(SRCDIR)/platform/$(WITH_PLATFORM)/include/odp/api
EXAMPLE_PATH = $(SRCDIR)/example $(SRCDIR)/platform $(SRCDIR)
diff --git a/doc/users-guide/Makefile.am b/doc/users-guide/Makefile.am
index 6b2e818d0..f5386dfa9 100644
--- a/doc/users-guide/Makefile.am
+++ b/doc/users-guide/Makefile.am
@@ -2,6 +2,7 @@ include ../Makefile.inc
SRC = users-guide.adoc \
users-guide-cls.adoc \
+ users-guide-comp.adoc \
users-guide-crypto.adoc \
users-guide-ipsec.adoc \
users-guide-packet.adoc \
diff --git a/doc/users-guide/users-guide-comp.adoc b/doc/users-guide/users-guide-comp.adoc
new file mode 100644
index 000000000..11a39a0c2
--- /dev/null
+++ b/doc/users-guide/users-guide-comp.adoc
@@ -0,0 +1,168 @@
+== Compression services
+ODP provides APIs to perform compression and decompression operations required
+by applications. ODP compression APIs are session based and provide
+compression algorithm offload services, with and without associated
+integrity hashing. This section covers the main compression APIs.
+
+ODP provides support for the following compression algorithms:
+
+`ODP_COMP_ALG_NONE`::
+The null compression algorithm. Used for testing as well as to
+specify hash-only operations.
+`ODP_COMP_ALG_DEFLATE`::
+The deflate compression algorithm specified by
+https://www.ietf.org/rfc/rfc1951.txt[RFC 1951].
+`ODP_COMP_ALG_ZLIB`::
+The ZLIB compression algorithm specified by
+https://www.ietf.org/rfc/rfc1950.txt[RFC 1950].
+`ODP_COMP_ALG_LZS`::
+The LZS compression algorithm as specified by ANSI X3.241.
+
+The following hash algorithms are also defined to be used in conjunction
+with these compression algorithms:
+
+`ODP_COMP_HASH_ALG_NONE`::
+A dummy that specifies no associated hashing is to be performed.
+`ODP_COMP_HASH_ALG_SHA1`::
+SHA-1 hashing with a 64-bit digest length.
+`ODP_COMP_HASH_ALG_SHA256`::
+SHA-2 hashing with a 256-bit digest length.
+
+=== Compression Sessions
+ODP compression services are session based and operate on input packets and
+deliver output packets. A compression session (`odp_comp_session_t`) provides
+the context for controlling the operations performed on packets. All of the
+packets processed by a session share the parameters that define the
+session.
+
+ODP supports synchronous and asynchronous compression sessions. For
+asynchronous sessions, the output of a compression operation is posted to
+a queue defined as the completion queue in its session parameters.
+
+Other session parameters include: the type of operation (compression or
+decompression), the operating mode (synchronous or asynchronous), the
+compression and hashing algorithms to be used, as well as any parameters
+needed by those algorithms to configure them. For asynchronous compression
+sessions, the application also specifies whether queue order must be
+maintained. Additional throughput may be achieved in some implementations if
+strict ordering is not required.
+
+The parameters that describe the characteristics of a compression session
+are encoded in the `odp_comp_session_param_t` struct that is passed to the
+`odp_comp_session_create()` API. A successful call returns an
+`odp_comp_session_t` handle that is then used as an input parameter to
+compression operation calls.
+
+When an application is finished with a compression session, the
+`odp_comp_session_destroy()` API is used to release the resources
+associated with an `odp_comp_session_t`.
+
+=== Compression operations
+After session creation, a compression operation can be applied to a packet
+in one of two ways: synchronous and asynchronous, depending on how the
+session was created.
+
+==== Synchronous compression operations
+Synchronous compression operations take the following form:
+
+.Invoking synchronous compression operations
+[source,c]
+-----
+int odp_comp_op(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[]);
+-----
+An input packet array is compressed/decompressed into a supplied output
+packet array under the control of a supplied parameter struct.
+
+The supplied `odp_comp_packet_op_param_t` struct looks as follows:
+
+.ODP compression parameter structure
+[source,c]
+-----
+/**
+ * Compression per packet operation parameters
+ */
+typedef struct odp_comp_packet_op_param_t {
+ /** Session handle */
+ odp_comp_session_t session;
+
+ /** Input data range to process. where,
+ *
+ * offset - starting offset
+ * length - length of data for compression operation
+ * */
+ odp_packet_data_range_t in_data_range;
+
+ /** Output packet data range.
+ * Indicates where processed packet will be written. where,
+ *
+ * offset - starting offset
+ * length - length of buffer available for output
+ *
+ * Output packet data is not modified outside of this provided data
+ * range. If output data length is not sufficient for compression
+ * operation ODP_COMP_STATUS_OUT_OF_SPACE_TERM error will occur
+ */
+ odp_packet_data_range_t out_data_range;
+} odp_comp_packet_op_param_t;
+-----
+Note that this struct points to the session used to control the operation and
+specifies the input and output packet data ranges to be used for the
+operation. For input, the output data range must be sufficiently sized to
+contain the result of the operation to avoid an out of space error. Upon
+output, this range is updated to reflect the actual data written. This
+information can then be used to trim off any excess padding before
+continuing processing of the output packet(s).
+
+==== Asynchronous compression operations
+Asynchronous compression operations are invoked with a slightly
+different API:
+
+.Invoking asynchronous compression operations
+[source,c]
+-----
+int odp_comp_op_enq(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[]);
+-----
+Here the session pointed to by the `odp_comp_packet_op_param_t` indicates
+the completion queue to be used for the operation, so a zero return from
+`odp_comp_op_enq()` means only that the operation was successfully
+initiated.
+
+The resulting completion queue can then be polled either directly
+via `odp_queue_deq()` or indirectly via the scheduler. The result is
+presented as an event of type `ODP_EVENT_PACKET` with subtype
+`ODP_EVENT_PACKET_COMP`.
+
+When receiving this event, the `odp_comp_packet_from_event()` API is used to
+convert the event into a usable `odp_packet_t`, and the `odp_comp_result()`
+API is used to retrieve the `odp_comp_packet_result_t` metadata associated
+with this packet. This struct looks as follows:
+
+.Compression output result
+[source,c]
+-----
+/**
+ * Compression packet operation result
+ */
+typedef struct odp_comp_packet_result_t {
+ /** Operation status code */
+ odp_comp_status_t status;
+
+ /** Input packet handle */
+ odp_packet_t pkt_in;
+
+ /** Output packet data range
+ * Specifies offset and length of data resulting from compression
+ * operation. When hashing is configured output_data_range.len equals
+ * length of output data + length of digest.
+ */
+ odp_packet_data_range_t output_data_range;
+} odp_comp_packet_result_t;
+-----
+Note that if the originating `odp_comp_op_enq()` call specified an array of
+input packets, each of these generates a separate result event. The order of
+these events on the completion queue associated with the compression session is
+controlled by the session's `packet_order` flag. If this flag is set then the
+results will be in the same order as the original input list. If not, then
+results are free to be reordered to make them available as soon as possible.
diff --git a/doc/users-guide/users-guide.adoc b/doc/users-guide/users-guide.adoc
index 8ed581f57..dce457ff8 100644
--- a/doc/users-guide/users-guide.adoc
+++ b/doc/users-guide/users-guide.adoc
@@ -1200,6 +1200,8 @@ include::users-guide-crypto.adoc[]
include::users-guide-ipsec.adoc[]
+include::users-guide-comp.adoc[]
+
include::users-guide-tm.adoc[]
include::users-guide-cls.adoc[]
diff --git a/example/classifier/odp_classifier.c b/example/classifier/odp_classifier.c
index ca1219b70..274ffaf41 100644
--- a/example/classifier/odp_classifier.c
+++ b/example/classifier/odp_classifier.c
@@ -73,6 +73,7 @@ typedef struct {
unsigned int cpu_count; /**< Number of CPUs to use */
uint32_t time; /**< Number of seconds to run */
char *if_name; /**< pointer to interface names */
+ int shutdown; /**< Shutdown threads if !0 */
} appl_args_t;
enum packet_mode {
@@ -80,8 +81,6 @@ enum packet_mode {
APPL_MODE_REPLY /**< Packet is sent back */
};
-static int shutdown; /**< Shutdown threads if !0 */
-
/* helper funcs */
static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len);
static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len);
@@ -272,7 +271,7 @@ static int pktio_receive_thread(void *arg)
for (;;) {
odp_pktio_t pktio_tmp;
- if (shutdown)
+ if (appl->shutdown)
break;
/* Use schedule to get buf from any input queue */
@@ -469,6 +468,7 @@ static void configure_cos(odp_cos_t default_cos, appl_args_t *args)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_pool_t pool;
int num_workers;
@@ -482,10 +482,21 @@ int main(int argc, char *argv[])
odp_shm_t shm;
int ret;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -545,6 +556,9 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/* odp_pool_print(pool); */
odp_atomic_init_u64(&args->total_packets, 0);
@@ -574,7 +588,7 @@ int main(int argc, char *argv[])
print_cls_statistics(args);
odp_pktio_stop(pktio);
- shutdown = 1;
+ args->shutdown = 1;
odph_odpthreads_join(thread_tbl);
for (i = 0; i < args->policy_count; i++) {
@@ -803,9 +817,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:t:i:p:m:t:h";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->cpu_count = 1; /* Use one worker by default */
while (1) {
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c
index 0785e3ec8..bd6af7955 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -15,6 +15,7 @@
#include <unistd.h>
#include <inttypes.h>
#include <sys/time.h>
+#include <signal.h>
#include <example_debug.h>
@@ -31,6 +32,7 @@
#define MAX_UDP_TX_BURST 512
#define DEFAULT_RX_BURST 32
#define MAX_RX_BURST 512
+#define STATS_INTERVAL 10 /* Interval between stats prints (sec) */
#define APPL_MODE_UDP 0 /**< UDP mode */
#define APPL_MODE_PING 1 /**< ping mode */
@@ -132,10 +134,6 @@ typedef struct {
} rx;
};
odp_pool_t pool; /**< Pool for packet IO */
- odp_timer_pool_t tp; /**< Timer pool handle */
- odp_queue_t tq; /**< Queue for timeouts */
- odp_timer_t tim; /**< Timer handle */
- odp_timeout_t tmo_ev; /**< Timeout event */
int mode; /**< Thread mode */
} thread_args_t;
@@ -151,14 +149,13 @@ typedef struct {
int thread_cnt;
int tx_burst_size;
int rx_burst_size;
+ /** Barrier to sync threads execution */
+ odp_barrier_t barrier;
} args_t;
/** Global pointer to args */
static args_t *args;
-/** Barrier to sync threads execution */
-static odp_barrier_t barrier;
-
/** Packet processing function types */
typedef odp_packet_t (*setup_pkt_ref_fn_t)(odp_pool_t,
odp_pktout_config_opt_t *);
@@ -172,25 +169,12 @@ static void usage(char *progname);
static int scan_ip(char *buf, unsigned int *paddr);
static void print_global_stats(int num_workers);
-/**
- * Sleep for the specified amount of milliseconds
- * Use ODP timer, busy wait until timer expired and timeout event received
- */
-static void millisleep(uint32_t ms,
- odp_timer_pool_t tp,
- odp_timer_t tim,
- odp_queue_t q,
- odp_timeout_t tmo)
+static void sig_handler(int signo ODP_UNUSED)
{
- uint64_t ticks = odp_timer_ns_to_tick(tp, 1000000ULL * ms);
- odp_event_t ev = odp_timeout_to_event(tmo);
- int rc = odp_timer_set_rel(tim, ticks, &ev);
-
- if (rc != ODP_TIMER_SUCCESS)
- EXAMPLE_ABORT("odp_timer_set_rel() failed\n");
- /* Spin waiting for timeout event */
- while ((ev = odp_queue_deq(q)) == ODP_EVENT_INVALID)
- (void)0;
+ int i;
+
+ for (i = 0; i < args->thread_cnt; i++)
+ args->thread[i].stop = 1;
}
/**
@@ -758,7 +742,7 @@ static int gen_send_thread(void *arg)
printf(" [%02i] created mode: SEND\n", thr);
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&args->barrier);
for (;;) {
if (thr_args->stop)
@@ -801,17 +785,9 @@ static int gen_send_thread(void *arg)
counters->ctr_pkt_snd += pkt_array_size - burst_size;
- if (args->appl.interval != 0) {
- printf(" [%02i] send pkt no:%ju seq %ju\n",
- thr,
- counters->ctr_seq,
- counters->ctr_seq % 0xffff);
- millisleep(args->appl.interval,
- thr_args->tp,
- thr_args->tim,
- thr_args->tq,
- thr_args->tmo_ev);
- }
+ if (args->appl.interval != 0)
+ odp_time_wait_ns((uint64_t)args->appl.interval *
+ ODP_TIME_MSEC_IN_NS);
counters->ctr_seq += seq_step;
}
@@ -929,7 +905,7 @@ static int gen_recv_thread(void *arg)
burst_size = args->rx_burst_size;
printf(" [%02i] created mode: RECEIVE SCHEDULER\n", thr);
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&args->barrier);
for (;;) {
if (thr_args->stop)
@@ -980,7 +956,7 @@ static int gen_recv_direct_thread(void *arg)
burst_size = args->rx_burst_size;
printf(" [%02i] created mode: RECEIVE\n", thr);
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&args->barrier);
for (;;) {
if (thr_args->stop)
@@ -1043,10 +1019,10 @@ static void print_global_stats(int num_workers)
uint64_t pkts_rcv = 0, pkts_rcv_prev = 0;
uint64_t pps_rcv = 0, maximum_pps_rcv = 0;
uint64_t stall, pkts_snd_drop;
- int verbose_interval = 20, i;
+ int verbose_interval = STATS_INTERVAL, i;
odp_thrmask_t thrd_mask;
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&args->barrier);
wait = odp_time_local_from_ns(verbose_interval * ODP_TIME_SEC_IN_NS);
next = odp_time_sum(odp_time_local(), wait);
@@ -1123,6 +1099,7 @@ static void print_global_stats(int num_workers)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_pool_t pool;
int num_workers;
@@ -1132,18 +1109,27 @@ int main(int argc, char *argv[])
odp_cpumask_t cpumask;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_pool_param_t params;
- odp_timer_pool_param_t tparams;
- odp_timer_pool_t tp;
- odp_pool_t tmop;
- odp_queue_t tq;
- odp_event_t ev;
interface_t *ifs;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
- odp_timer_capability_t timer_capa;
+
+ /* Signal handler has to be registered before global init in case ODP
+ * implementation creates internal threads/processes. */
+ signal(SIGINT, sig_handler);
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1213,6 +1199,9 @@ int main(int argc, char *argv[])
args->rx_burst_size = args->appl.rx_burst;
}
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/* Create packet pool */
odp_pool_param_init(&params);
params.pkt.seg_len = POOL_PKT_LEN;
@@ -1228,36 +1217,6 @@ int main(int argc, char *argv[])
}
odp_pool_print(pool);
- /* Create timer pool */
- if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
- EXAMPLE_ERR("Error: get timer capacity failed.\n");
- exit(EXIT_FAILURE);
- }
- tparams.res_ns = MAX(1 * ODP_TIME_MSEC_IN_NS,
- timer_capa.highest_res_ns);
- tparams.min_tmo = 0;
- tparams.max_tmo = 10000 * ODP_TIME_SEC_IN_NS;
- tparams.num_timers = num_workers; /* One timer per worker */
- tparams.priv = 0; /* Shared */
- tparams.clk_src = ODP_CLOCK_CPU;
- tp = odp_timer_pool_create("timer_pool", &tparams);
- if (tp == ODP_TIMER_POOL_INVALID) {
- EXAMPLE_ERR("Timer pool create failed.\n");
- exit(EXIT_FAILURE);
- }
- odp_timer_pool_start();
-
- /* Create timeout pool */
- odp_pool_param_init(&params);
- params.tmo.num = tparams.num_timers; /* One timeout per timer */
- params.type = ODP_POOL_TIMEOUT;
-
- tmop = odp_pool_create("timeout_pool", &params);
- if (tmop == ODP_POOL_INVALID) {
- EXAMPLE_ERR("Error: timeout pool create failed.\n");
- exit(EXIT_FAILURE);
- }
-
ifs = malloc(sizeof(interface_t) * args->appl.if_count);
for (i = 0; i < args->appl.if_count; ++i) {
@@ -1293,7 +1252,7 @@ int main(int argc, char *argv[])
thr_params.instance = instance;
/* num workers + print thread */
- odp_barrier_init(&barrier, num_workers + 1);
+ odp_barrier_init(&args->barrier, num_workers + 1);
if (args->appl.mode == APPL_MODE_PING) {
odp_cpumask_t cpu_mask;
@@ -1304,27 +1263,10 @@ int main(int argc, char *argv[])
cpu_first = odp_cpumask_first(&cpumask);
odp_cpumask_set(&cpu_mask, cpu_first);
- tq = odp_queue_create("", NULL);
- if (tq == ODP_QUEUE_INVALID) {
- EXAMPLE_ERR("queue_create failed\n");
- abort();
- }
thr_args = &args->thread[PING_THR_RX];
if (!args->appl.sched)
thr_args->rx.pktin = ifs[0].pktin[0];
thr_args->pool = pool;
- thr_args->tp = tp;
- thr_args->tq = tq;
- thr_args->tim = odp_timer_alloc(tp, tq, NULL);
- if (thr_args->tim == ODP_TIMER_INVALID) {
- EXAMPLE_ERR("timer_alloc failed\n");
- abort();
- }
- thr_args->tmo_ev = odp_timeout_alloc(tmop);
- if (thr_args->tmo_ev == ODP_TIMEOUT_INVALID) {
- EXAMPLE_ERR("timeout_alloc failed\n");
- abort();
- }
thr_args->mode = args->appl.mode;
memset(&thr_params, 0, sizeof(thr_params));
@@ -1339,27 +1281,10 @@ int main(int argc, char *argv[])
odph_odpthreads_create(&thread_tbl[PING_THR_RX],
&cpu_mask, &thr_params);
- tq = odp_queue_create("", NULL);
- if (tq == ODP_QUEUE_INVALID) {
- EXAMPLE_ERR("queue_create failed\n");
- abort();
- }
thr_args = &args->thread[PING_THR_TX];
thr_args->tx.pktout = ifs[0].pktout[0];
thr_args->tx.pktout_cfg = &ifs[0].config.pktout;
thr_args->pool = pool;
- thr_args->tp = tp;
- thr_args->tq = tq;
- thr_args->tim = odp_timer_alloc(tp, tq, NULL);
- if (thr_args->tim == ODP_TIMER_INVALID) {
- EXAMPLE_ERR("timer_alloc failed\n");
- abort();
- }
- thr_args->tmo_ev = odp_timeout_alloc(tmop);
- if (thr_args->tmo_ev == ODP_TIMEOUT_INVALID) {
- EXAMPLE_ERR("timeout_alloc failed\n");
- abort();
- }
thr_args->mode = args->appl.mode;
cpu_next = odp_cpumask_next(&cpumask, cpu_first);
odp_cpumask_zero(&cpu_mask);
@@ -1428,24 +1353,7 @@ int main(int argc, char *argv[])
args->thread[i].counters.ctr_seq = start_seq;
}
- tq = odp_queue_create("", NULL);
- if (tq == ODP_QUEUE_INVALID) {
- EXAMPLE_ERR("queue_create failed\n");
- abort();
- }
args->thread[i].pool = pool;
- args->thread[i].tp = tp;
- args->thread[i].tq = tq;
- args->thread[i].tim = odp_timer_alloc(tp, tq, NULL);
- if (args->thread[i].tim == ODP_TIMER_INVALID) {
- EXAMPLE_ERR("timer_alloc failed\n");
- abort();
- }
- args->thread[i].tmo_ev = odp_timeout_alloc(tmop);
- if (args->thread[i].tmo_ev == ODP_TIMEOUT_INVALID) {
- EXAMPLE_ERR("timeout_alloc failed\n");
- abort();
- }
args->thread[i].mode = args->appl.mode;
if (args->appl.mode == APPL_MODE_UDP) {
@@ -1485,22 +1393,6 @@ int main(int argc, char *argv[])
for (i = 0; i < args->appl.if_count; ++i)
odp_pktio_stop(ifs[i].pktio);
- for (i = 0; i < num_workers; ++i) {
- odp_timer_cancel(args->thread[i].tim, &ev);
- odp_timer_free(args->thread[i].tim);
- odp_timeout_free(args->thread[i].tmo_ev);
- }
-
- for (i = 0; i < num_workers; ++i) {
- while (1) {
- ev = odp_queue_deq(args->thread[i].tq);
- if (ev == ODP_EVENT_INVALID)
- break;
- odp_event_free(ev);
- }
- odp_queue_destroy(args->thread[i].tq);
- }
-
for (i = 0; i < args->appl.if_count; ++i)
odp_pktio_close(ifs[i].pktio);
free(ifs);
@@ -1508,9 +1400,6 @@ int main(int argc, char *argv[])
free(args->appl.if_str);
if (0 != odp_pool_destroy(pool))
fprintf(stderr, "unable to destroy pool \"pool\"\n");
- odp_timer_pool_destroy(tp);
- if (0 != odp_pool_destroy(tmop))
- fprintf(stderr, "unable to destroy pool \"tmop\"\n");
if (0 != odp_shm_free(shm))
fprintf(stderr, "unable to free \"shm\"\n");
odp_term_local();
@@ -1563,9 +1452,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+I:a:b:s:d:p:i:m:n:t:w:c:x:he:j:f:k"
":yr:z";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->mode = -1; /* Invalid, must be changed by parsing */
appl_args->number = -1;
appl_args->payload = 56;
diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c
index 2624af5d2..1bbf7a000 100644
--- a/example/ipsec/odp_ipsec.c
+++ b/example/ipsec/odp_ipsec.c
@@ -66,6 +66,8 @@ static int create_stream_db_entry(char *input ODP_UNUSED)
/* maximum number of worker threads */
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_POLL_QUEUES 256
+
/**
* Parsed command line application arguments
*/
@@ -79,21 +81,30 @@ typedef struct {
} appl_args_t;
/**
- * Grouping of both parsed CL args and thread specific args - alloc together
+ * Grouping of both parsed CL args and global application data
*/
typedef struct {
/** Application (parsed) arguments */
appl_args_t appl;
-} args_t;
+ odp_shm_t shm;
+ odp_pool_t ctx_pool;
+ odp_pool_t out_pool;
+ odp_pool_t pkt_pool;
+ /** ATOMIC queue for IPsec sequence number assignment */
+ odp_queue_t seqnumq;
+ /** ORDERED queue for per packet crypto API completion events */
+ odp_queue_t completionq;
+ /** Synchronize threads before packet processing begins */
+ odp_barrier_t sync_barrier;
+ odp_queue_t poll_queues[MAX_POLL_QUEUES];
+ int num_polled_queues;
+} global_data_t;
/* helper funcs */
static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
static void print_info(char *progname, appl_args_t *appl_args);
static void usage(char *progname);
-/** Global pointer to args */
-static args_t *args;
-
/**
* Buffer pool for packet IO
*/
@@ -101,8 +112,6 @@ static args_t *args;
#define SHM_PKT_POOL_BUF_SIZE 4096
#define SHM_PKT_POOL_SIZE (SHM_PKT_POOL_BUF_COUNT * SHM_PKT_POOL_BUF_SIZE)
-static odp_pool_t pkt_pool = ODP_POOL_INVALID;
-
/**
* Buffer pool for crypto session output packets
*/
@@ -110,17 +119,6 @@ static odp_pool_t pkt_pool = ODP_POOL_INVALID;
#define SHM_OUT_POOL_BUF_SIZE 4096
#define SHM_OUT_POOL_SIZE (SHM_OUT_POOL_BUF_COUNT * SHM_OUT_POOL_BUF_SIZE)
-static odp_pool_t out_pool = ODP_POOL_INVALID;
-
-/** ATOMIC queue for IPsec sequence number assignment */
-static odp_queue_t seqnumq;
-
-/** ORDERED queue (eventually) for per packet crypto API completion events */
-static odp_queue_t completionq;
-
-/** Synchronize threads before packet processing begins */
-static odp_barrier_t sync_barrier;
-
/**
* Packet processing states/steps
*/
@@ -184,7 +182,7 @@ typedef struct {
#define SHM_CTX_POOL_BUF_COUNT (SHM_PKT_POOL_BUF_COUNT + SHM_OUT_POOL_BUF_COUNT)
#define SHM_CTX_POOL_SIZE (SHM_CTX_POOL_BUF_COUNT * SHM_CTX_POOL_BUF_SIZE)
-static odp_pool_t ctx_pool = ODP_POOL_INVALID;
+static global_data_t *global;
/**
* Get per packet processing context from packet buffer
@@ -210,7 +208,7 @@ pkt_ctx_t *get_pkt_ctx_from_pkt(odp_packet_t pkt)
static
pkt_ctx_t *alloc_pkt_ctx(odp_packet_t pkt)
{
- odp_buffer_t ctx_buf = odp_buffer_alloc(ctx_pool);
+ odp_buffer_t ctx_buf = odp_buffer_alloc(global->ctx_pool);
pkt_ctx_t *ctx;
if (odp_unlikely(ODP_BUFFER_INVALID == ctx_buf))
@@ -245,11 +243,6 @@ typedef odp_event_t (*schedule_func_t) (odp_queue_t *);
static queue_create_func_t queue_create;
static schedule_func_t schedule;
-#define MAX_POLL_QUEUES 256
-
-static odp_queue_t poll_queues[MAX_POLL_QUEUES];
-static int num_polled_queues;
-
/**
* odp_queue_create wrapper to enable polling versus scheduling
*/
@@ -275,7 +268,7 @@ odp_queue_t polled_odp_queue_create(const char *name,
my_queue = odp_queue_create(name, &qp);
if (ODP_QUEUE_TYPE_SCHED == type) {
- poll_queues[num_polled_queues++] = my_queue;
+ global->poll_queues[global->num_polled_queues++] = my_queue;
printf("%s: adding %"PRIu64"\n", __func__,
odp_queue_to_u64(my_queue));
}
@@ -298,10 +291,10 @@ odp_event_t polled_odp_schedule_cb(odp_queue_t *from)
int idx = 0;
while (1) {
- if (idx >= num_polled_queues)
+ if (idx >= global->num_polled_queues)
idx = 0;
- odp_queue_t queue = poll_queues[idx++];
+ odp_queue_t queue = global->poll_queues[idx++];
odp_event_t buf;
buf = odp_queue_deq(queue);
@@ -337,8 +330,8 @@ void ipsec_init_pre(void)
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- completionq = queue_create("completion", &qparam);
- if (ODP_QUEUE_INVALID == completionq) {
+ global->completionq = queue_create("completion", &qparam);
+ if (ODP_QUEUE_INVALID == global->completionq) {
EXAMPLE_ERR("Error: completion queue creation failed\n");
exit(EXIT_FAILURE);
}
@@ -348,8 +341,8 @@ void ipsec_init_pre(void)
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- seqnumq = queue_create("seqnum", &qparam);
- if (ODP_QUEUE_INVALID == seqnumq) {
+ global->seqnumq = queue_create("seqnum", &qparam);
+ if (ODP_QUEUE_INVALID == global->seqnumq) {
EXAMPLE_ERR("Error: sequence number queue creation failed\n");
exit(EXIT_FAILURE);
}
@@ -361,9 +354,9 @@ void ipsec_init_pre(void)
params.pkt.num = SHM_PKT_POOL_BUF_COUNT;
params.type = ODP_POOL_PACKET;
- out_pool = odp_pool_create("out_pool", &params);
+ global->out_pool = odp_pool_create("out_pool", &params);
- if (ODP_POOL_INVALID == out_pool) {
+ if (ODP_POOL_INVALID == global->out_pool) {
EXAMPLE_ERR("Error: message pool create failed.\n");
exit(EXIT_FAILURE);
}
@@ -414,8 +407,8 @@ void ipsec_init_post(crypto_api_mode_e api_mode)
tun,
api_mode,
entry->input,
- completionq,
- out_pool)) {
+ global->completionq,
+ global->out_pool)) {
EXAMPLE_ERR("Error: IPSec cache entry failed.\n"
);
exit(EXIT_FAILURE);
@@ -480,7 +473,7 @@ void initialize_intf(char *intf)
/*
* Open a packet IO instance for thread and get default output queue
*/
- pktio = odp_pktio_open(intf, pkt_pool, &pktio_param);
+ pktio = odp_pktio_open(intf, global->pkt_pool, &pktio_param);
if (ODP_PKTIO_INVALID == pktio) {
EXAMPLE_ERR("Error: pktio create failed for %s\n", intf);
exit(EXIT_FAILURE);
@@ -1052,7 +1045,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
printf("Pktio thread [%02i] starts\n", thr);
- odp_barrier_wait(&sync_barrier);
+ odp_barrier_wait(&global->sync_barrier);
/* Loop packets */
for (;;) {
@@ -1067,7 +1060,8 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
/* Determine new work versus completion or sequence number */
if (ODP_EVENT_PACKET == odp_event_types(ev, &subtype)) {
pkt = odp_packet_from_event(ev);
- if (seqnumq == dispatchq || completionq == dispatchq) {
+ if (global->seqnumq == dispatchq ||
+ global->completionq == dispatchq) {
ctx = get_pkt_ctx_from_pkt(pkt);
} else {
ctx = alloc_pkt_ctx(pkt);
@@ -1132,7 +1126,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
ctx->state = PKT_STATE_TRANSMIT;
} else {
ctx->state = PKT_STATE_IPSEC_OUT_SEQ;
- if (odp_queue_enq(seqnumq, ev))
+ if (odp_queue_enq(global->seqnumq, ev))
rc = PKT_DROP;
}
break;
@@ -1192,6 +1186,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
int
main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
int num_workers;
int i;
@@ -1201,6 +1196,7 @@ main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_pool_param_t params;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
/* create by default scheduled queues */
@@ -1213,8 +1209,18 @@ main(int argc, char *argv[])
schedule = polled_odp_schedule_cb;
}
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1226,16 +1232,17 @@ main(int argc, char *argv[])
}
/* Reserve memory for args from shared mem */
- shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE,
- 0);
+ shm = odp_shm_reserve("shm_args", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
- args = odp_shm_addr(shm);
+ global = odp_shm_addr(shm);
- if (NULL == args) {
+ if (NULL == global) {
EXAMPLE_ERR("Error: shared mem alloc failed.\n");
exit(EXIT_FAILURE);
}
- memset(args, 0, sizeof(*args));
+ memset(global, 0, sizeof(global_data_t));
+ global->shm = shm;
/* Must init our databases before parsing args */
ipsec_init_pre();
@@ -1243,14 +1250,14 @@ main(int argc, char *argv[])
init_stream_db();
/* Parse and store the application arguments */
- parse_args(argc, argv, &args->appl);
+ parse_args(argc, argv, &global->appl);
/* Print both system and application information */
- print_info(NO_PATH(argv[0]), &args->appl);
+ print_info(NO_PATH(argv[0]), &global->appl);
num_workers = MAX_WORKERS;
- if (args->appl.cpu_count && args->appl.cpu_count < MAX_WORKERS)
- num_workers = args->appl.cpu_count;
+ if (global->appl.cpu_count && global->appl.cpu_count < MAX_WORKERS)
+ num_workers = global->appl.cpu_count;
/* Get default worker cpumask */
num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
@@ -1261,7 +1268,7 @@ main(int argc, char *argv[])
printf("cpu mask: %s\n", cpumaskstr);
/* Create a barrier to synchronize thread startup */
- odp_barrier_init(&sync_barrier, num_workers);
+ odp_barrier_init(&global->sync_barrier, num_workers);
/* Create packet buffer pool */
odp_pool_param_init(&params);
@@ -1270,9 +1277,9 @@ main(int argc, char *argv[])
params.pkt.num = SHM_PKT_POOL_BUF_COUNT;
params.type = ODP_POOL_PACKET;
- pkt_pool = odp_pool_create("packet_pool", &params);
+ global->pkt_pool = odp_pool_create("packet_pool", &params);
- if (ODP_POOL_INVALID == pkt_pool) {
+ if (ODP_POOL_INVALID == global->pkt_pool) {
EXAMPLE_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
}
@@ -1283,24 +1290,26 @@ main(int argc, char *argv[])
params.buf.num = SHM_CTX_POOL_BUF_COUNT;
params.type = ODP_POOL_BUFFER;
- ctx_pool = odp_pool_create("ctx_pool", &params);
+ global->ctx_pool = odp_pool_create("ctx_pool", &params);
- if (ODP_POOL_INVALID == ctx_pool) {
+ if (ODP_POOL_INVALID == global->ctx_pool) {
EXAMPLE_ERR("Error: context pool create failed.\n");
exit(EXIT_FAILURE);
}
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/* Populate our IPsec cache */
printf("Using %s mode for crypto API\n\n",
- (CRYPTO_API_SYNC == args->appl.mode) ? "SYNC" :
- (CRYPTO_API_ASYNC_IN_PLACE == args->appl.mode) ?
+ (CRYPTO_API_SYNC == global->appl.mode) ? "SYNC" :
+ (CRYPTO_API_ASYNC_IN_PLACE == global->appl.mode) ?
"ASYNC_IN_PLACE" : "ASYNC_NEW_BUFFER");
- ipsec_init_post(args->appl.mode);
+ ipsec_init_post(global->appl.mode);
/* Initialize interfaces (which resolves FWD DB entries */
- for (i = 0; i < args->appl.if_count; i++) {
- initialize_intf(args->appl.if_names[i]);
- }
+ for (i = 0; i < global->appl.if_count; i++)
+ initialize_intf(global->appl.if_names[i]);
/* If we have test streams build them before starting workers */
resolve_stream_db();
@@ -1331,12 +1340,23 @@ main(int argc, char *argv[])
odph_odpthreads_join(thread_tbl);
}
- free(args->appl.if_names);
- free(args->appl.if_str);
+ /* Stop and close used pktio devices */
+ for (i = 0; i < global->appl.if_count; i++) {
+ odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]);
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) {
+ EXAMPLE_ERR("Error: failed to close pktio %s\n",
+ global->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ free(global->appl.if_names);
+ free(global->appl.if_str);
- shm = odp_shm_lookup("shm_args");
- if (odp_shm_free(shm) != 0)
- EXAMPLE_ERR("Error: shm free shm_args failed\n");
shm = odp_shm_lookup("shm_ipsec_cache");
if (odp_shm_free(shm) != 0)
EXAMPLE_ERR("Error: shm free shm_ipsec_cache failed\n");
@@ -1355,6 +1375,10 @@ main(int argc, char *argv[])
shm = odp_shm_lookup("stream_db");
if (odp_shm_free(shm) != 0)
EXAMPLE_ERR("Error: shm free stream_db failed\n");
+ if (odp_shm_free(global->shm)) {
+ EXAMPLE_ERR("Error: shm free global data failed\n");
+ exit(EXIT_FAILURE);
+ }
printf("Exit\n\n");
@@ -1393,9 +1417,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:i:m:h:r:p:a:e:t:s:";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
printf("\nParsing command line options\n");
appl_args->cpu_count = 1; /* use one worker by default */
diff --git a/example/ipsec_api/odp_ipsec.c b/example/ipsec_api/odp_ipsec.c
index f827eeeb0..ab0fa3c5c 100644
--- a/example/ipsec_api/odp_ipsec.c
+++ b/example/ipsec_api/odp_ipsec.c
@@ -66,6 +66,8 @@ static int create_stream_db_entry(char *input ODP_UNUSED)
/* maximum number of worker threads */
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_POLL_QUEUES 256
+
/**
* Parsed command line application arguments
*/
@@ -80,21 +82,27 @@ typedef struct {
} appl_args_t;
/**
- * Grouping of both parsed CL args and thread specific args - alloc together
+ * Grouping of both parsed CL args and global application data
*/
typedef struct {
/** Application (parsed) arguments */
appl_args_t appl;
-} args_t;
+ odp_shm_t shm;
+ odp_pool_t ctx_pool;
+ odp_pool_t pkt_pool;
+ /** ORDERED queue for per packet crypto API completion events */
+ odp_queue_t completionq;
+ /** Synchronize threads before packet processing begins */
+ odp_barrier_t sync_barrier;
+ odp_queue_t poll_queues[MAX_POLL_QUEUES];
+ int num_polled_queues;
+} global_data_t;
/* helper funcs */
static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
static void print_info(char *progname, appl_args_t *appl_args);
static void usage(char *progname);
-/** Global pointer to args */
-static args_t *args;
-
/**
* Buffer pool for packet IO
*/
@@ -102,14 +110,6 @@ static args_t *args;
#define SHM_PKT_POOL_BUF_SIZE 4096
#define SHM_PKT_POOL_SIZE (SHM_PKT_POOL_BUF_COUNT * SHM_PKT_POOL_BUF_SIZE)
-static odp_pool_t pkt_pool = ODP_POOL_INVALID;
-
-/** ORDERED queue (eventually) for per packet crypto API completion events */
-static odp_queue_t completionq = ODP_QUEUE_INVALID;
-
-/** Synchronize threads before packet processing begins */
-static odp_barrier_t sync_barrier;
-
/**
* Packet processing states/steps
*/
@@ -146,7 +146,7 @@ typedef struct {
#define SHM_CTX_POOL_BUF_COUNT (SHM_PKT_POOL_BUF_COUNT)
#define SHM_CTX_POOL_SIZE (SHM_CTX_POOL_BUF_COUNT * SHM_CTX_POOL_BUF_SIZE)
-static odp_pool_t ctx_pool = ODP_POOL_INVALID;
+static global_data_t *global;
/**
* Allocate per packet processing context and associate it with
@@ -159,7 +159,7 @@ static odp_pool_t ctx_pool = ODP_POOL_INVALID;
static
pkt_ctx_t *alloc_pkt_ctx(odp_packet_t pkt)
{
- odp_buffer_t ctx_buf = odp_buffer_alloc(ctx_pool);
+ odp_buffer_t ctx_buf = odp_buffer_alloc(global->ctx_pool);
pkt_ctx_t *ctx;
if (odp_unlikely(ODP_BUFFER_INVALID == ctx_buf))
@@ -194,11 +194,6 @@ typedef odp_event_t (*schedule_func_t) (odp_queue_t *);
static queue_create_func_t queue_create;
static schedule_func_t schedule;
-#define MAX_POLL_QUEUES 256
-
-static odp_queue_t poll_queues[MAX_POLL_QUEUES];
-static int num_polled_queues;
-
/**
* odp_queue_create wrapper to enable polling versus scheduling
*/
@@ -224,7 +219,7 @@ odp_queue_t polled_odp_queue_create(const char *name,
my_queue = odp_queue_create(name, &qp);
if (ODP_QUEUE_TYPE_SCHED == type) {
- poll_queues[num_polled_queues++] = my_queue;
+ global->poll_queues[global->num_polled_queues++] = my_queue;
printf("%s: adding %" PRIu64 "\n", __func__,
odp_queue_to_u64(my_queue));
}
@@ -247,10 +242,10 @@ odp_event_t polled_odp_schedule_cb(odp_queue_t *from)
int idx = 0;
while (1) {
- if (idx >= num_polled_queues)
+ if (idx >= global->num_polled_queues)
idx = 0;
- odp_queue_t queue = poll_queues[idx++];
+ odp_queue_t queue = global->poll_queues[idx++];
odp_event_t buf;
buf = odp_queue_deq(queue);
@@ -285,8 +280,8 @@ void ipsec_init_pre(void)
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- completionq = queue_create("completion", &qparam);
- if (ODP_QUEUE_INVALID == completionq) {
+ global->completionq = queue_create("completion", &qparam);
+ if (ODP_QUEUE_INVALID == global->completionq) {
EXAMPLE_ERR("Error: completion queue creation failed\n");
exit(EXIT_FAILURE);
}
@@ -326,7 +321,7 @@ void ipsec_init_post(odp_ipsec_op_mode_t api_mode)
ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_ALL;
ipsec_config.inbound_mode = api_mode;
ipsec_config.outbound_mode = api_mode;
- ipsec_config.inbound.default_queue = completionq;
+ ipsec_config.inbound.default_queue = global->completionq;
if (odp_ipsec_config(&ipsec_config) != ODP_IPSEC_OK) {
EXAMPLE_ERR("Error: failure setting IPSec config\n");
exit(EXIT_FAILURE);
@@ -358,7 +353,7 @@ void ipsec_init_post(odp_ipsec_op_mode_t api_mode)
auth_sa,
tun,
entry->input,
- completionq)) {
+ global->completionq)) {
EXAMPLE_ERR("Error: IPSec cache entry failed.\n"
);
exit(EXIT_FAILURE);
@@ -445,7 +440,7 @@ void initialize_intf(char *intf)
/*
* Open a packet IO instance for thread and get default output queue
*/
- pktio = odp_pktio_open(intf, pkt_pool, &pktio_param);
+ pktio = odp_pktio_open(intf, global->pkt_pool, &pktio_param);
if (ODP_PKTIO_INVALID == pktio) {
EXAMPLE_ERR("Error: pktio create failed for %s\n", intf);
exit(EXIT_FAILURE);
@@ -481,10 +476,10 @@ void initialize_intf(char *intf)
odp_pktio_config_init(&config);
if (check_stream_db_in(intf) &&
- args->appl.mode == ODP_IPSEC_OP_MODE_INLINE)
+ global->appl.mode == ODP_IPSEC_OP_MODE_INLINE)
config.inbound_ipsec = capa.config.inbound_ipsec;
if (check_stream_db_out(intf) &&
- args->appl.mode == ODP_IPSEC_OP_MODE_INLINE)
+ global->appl.mode == ODP_IPSEC_OP_MODE_INLINE)
config.outbound_ipsec = capa.config.outbound_ipsec;
if (odp_pktio_config(pktio, &config) != 0) {
@@ -571,7 +566,7 @@ pkt_disposition_e do_route_fwd_db(odp_packet_t pkt, pkt_ctx_t *ctx)
memcpy(&ctx->eth.src, entry->src_mac, ODPH_ETHADDR_LEN);
ctx->eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
- if (args->appl.mode != ODP_IPSEC_OP_MODE_INLINE) {
+ if (global->appl.mode != ODP_IPSEC_OP_MODE_INLINE) {
odp_packet_l2_offset_set(pkt, 0);
odp_packet_copy_from_mem(pkt, 0, ODPH_ETHHDR_LEN,
&ctx->eth);
@@ -620,7 +615,7 @@ pkt_disposition_e do_ipsec_in_classify(odp_packet_t *ppkt)
return PKT_CONTINUE;
memset(&in_param, 0, sizeof(in_param));
- if (args->appl.lookup) {
+ if (global->appl.lookup) {
in_param.num_sa = 0;
in_param.sa = NULL;
} else {
@@ -629,7 +624,7 @@ pkt_disposition_e do_ipsec_in_classify(odp_packet_t *ppkt)
}
/* Issue crypto request */
- if (args->appl.mode != ODP_IPSEC_OP_MODE_SYNC) {
+ if (global->appl.mode != ODP_IPSEC_OP_MODE_SYNC) {
rc = odp_ipsec_in_enq(ppkt, 1, &in_param);
if (rc <= 0)
return PKT_DROP;
@@ -682,7 +677,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t *ppkt, pkt_ctx_t *ctx)
out_param.opt = NULL;
/* Issue crypto request */
- if (args->appl.mode == ODP_IPSEC_OP_MODE_INLINE) {
+ if (global->appl.mode == ODP_IPSEC_OP_MODE_INLINE) {
odp_ipsec_out_inline_param_t inline_param;
inline_param.pktio = ctx->pktio;
@@ -693,7 +688,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t *ppkt, pkt_ctx_t *ctx)
return PKT_DROP;
return PKT_DONE;
- } else if (args->appl.mode != ODP_IPSEC_OP_MODE_SYNC) {
+ } else if (global->appl.mode != ODP_IPSEC_OP_MODE_SYNC) {
rc = odp_ipsec_out_enq(ppkt, 1, &out_param);
if (rc <= 0)
return PKT_DROP;
@@ -737,7 +732,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
printf("Pktio thread [%02i] starts\n", thr);
- odp_barrier_wait(&sync_barrier);
+ odp_barrier_wait(&global->sync_barrier);
/* Loop packets */
for (;;) {
@@ -890,6 +885,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
int
main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
int num_workers;
int i;
@@ -899,6 +895,7 @@ main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_pool_param_t params;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
/* create by default scheduled queues */
@@ -911,8 +908,18 @@ main(int argc, char *argv[])
schedule = polled_odp_schedule_cb;
}
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -924,16 +931,17 @@ main(int argc, char *argv[])
}
/* Reserve memory for args from shared mem */
- shm = odp_shm_reserve("shm_args", sizeof(args_t), ODP_CACHE_LINE_SIZE,
- 0);
+ shm = odp_shm_reserve("shm_args", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
- args = odp_shm_addr(shm);
+ global = odp_shm_addr(shm);
- if (NULL == args) {
+ if (NULL == global) {
EXAMPLE_ERR("Error: shared mem alloc failed.\n");
exit(EXIT_FAILURE);
}
- memset(args, 0, sizeof(*args));
+ memset(global, 0, sizeof(global_data_t));
+ global->shm = shm;
/* Must init our databases before parsing args */
ipsec_init_pre();
@@ -941,14 +949,14 @@ main(int argc, char *argv[])
init_stream_db();
/* Parse and store the application arguments */
- parse_args(argc, argv, &args->appl);
+ parse_args(argc, argv, &global->appl);
/* Print both system and application information */
- print_info(NO_PATH(argv[0]), &args->appl);
+ print_info(NO_PATH(argv[0]), &global->appl);
num_workers = MAX_WORKERS;
- if (args->appl.cpu_count && args->appl.cpu_count < MAX_WORKERS)
- num_workers = args->appl.cpu_count;
+ if (global->appl.cpu_count && global->appl.cpu_count < MAX_WORKERS)
+ num_workers = global->appl.cpu_count;
/* Get default worker cpumask */
num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
@@ -959,7 +967,7 @@ main(int argc, char *argv[])
printf("cpu mask: %s\n", cpumaskstr);
/* Create a barrier to synchronize thread startup */
- odp_barrier_init(&sync_barrier, num_workers);
+ odp_barrier_init(&global->sync_barrier, num_workers);
/* Create packet buffer pool */
odp_pool_param_init(&params);
@@ -968,9 +976,9 @@ main(int argc, char *argv[])
params.pkt.num = SHM_PKT_POOL_BUF_COUNT;
params.type = ODP_POOL_PACKET;
- pkt_pool = odp_pool_create("packet_pool", &params);
+ global->pkt_pool = odp_pool_create("packet_pool", &params);
- if (ODP_POOL_INVALID == pkt_pool) {
+ if (ODP_POOL_INVALID == global->pkt_pool) {
EXAMPLE_ERR("Error: packet pool create failed.\n");
exit(EXIT_FAILURE);
}
@@ -981,23 +989,26 @@ main(int argc, char *argv[])
params.buf.num = SHM_CTX_POOL_BUF_COUNT;
params.type = ODP_POOL_BUFFER;
- ctx_pool = odp_pool_create("ctx_pool", &params);
+ global->ctx_pool = odp_pool_create("ctx_pool", &params);
- if (ODP_POOL_INVALID == ctx_pool) {
+ if (ODP_POOL_INVALID == global->ctx_pool) {
EXAMPLE_ERR("Error: context pool create failed.\n");
exit(EXIT_FAILURE);
}
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/* Populate our IPsec cache */
printf("Using %s mode for IPsec API\n\n",
- (ODP_IPSEC_OP_MODE_SYNC == args->appl.mode) ? "SYNC" :
- (ODP_IPSEC_OP_MODE_ASYNC == args->appl.mode) ? "ASYNC" :
+ (ODP_IPSEC_OP_MODE_SYNC == global->appl.mode) ? "SYNC" :
+ (ODP_IPSEC_OP_MODE_ASYNC == global->appl.mode) ? "ASYNC" :
"INLINE");
- ipsec_init_post(args->appl.mode);
+ ipsec_init_post(global->appl.mode);
/* Initialize interfaces (which resolves FWD DB entries */
- for (i = 0; i < args->appl.if_count; i++)
- initialize_intf(args->appl.if_names[i]);
+ for (i = 0; i < global->appl.if_count; i++)
+ initialize_intf(global->appl.if_names[i]);
/* If we have test streams build them before starting workers */
resolve_stream_db();
@@ -1029,12 +1040,23 @@ main(int argc, char *argv[])
odph_odpthreads_join(thread_tbl);
}
- free(args->appl.if_names);
- free(args->appl.if_str);
+ /* Stop and close used pktio devices */
+ for (i = 0; i < global->appl.if_count; i++) {
+ odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]);
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) {
+ EXAMPLE_ERR("Error: failed to close pktio %s\n",
+ global->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ free(global->appl.if_names);
+ free(global->appl.if_str);
- shm = odp_shm_lookup("shm_args");
- if (odp_shm_free(shm) != 0)
- EXAMPLE_ERR("Error: shm free shm_args failed\n");
shm = odp_shm_lookup("shm_ipsec_cache");
if (odp_shm_free(shm) != 0)
EXAMPLE_ERR("Error: shm free shm_ipsec_cache failed\n");
@@ -1053,6 +1075,10 @@ main(int argc, char *argv[])
shm = odp_shm_lookup("stream_db");
if (odp_shm_free(shm) != 0)
EXAMPLE_ERR("Error: shm free stream_db failed\n");
+ if (odp_shm_free(global->shm)) {
+ EXAMPLE_ERR("Error: shm free global data failed\n");
+ exit(EXIT_FAILURE);
+ }
printf("Exit\n\n");
@@ -1092,9 +1118,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:i:h:lm:r:p:a:e:t:s:";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->cpu_count = 1; /* use one worker by default */
printf("\nParsing command line options\n");
diff --git a/example/ipsec_offload/odp_ipsec_offload.c b/example/ipsec_offload/odp_ipsec_offload.c
index 63c8f1264..4d95b2e5b 100644
--- a/example/ipsec_offload/odp_ipsec_offload.c
+++ b/example/ipsec_offload/odp_ipsec_offload.c
@@ -48,6 +48,8 @@
/* maximum number of worker threads */
#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_COMPL_QUEUES 32
+
/**
* Parsed command line application arguments
*/
@@ -61,12 +63,19 @@ typedef struct {
} appl_args_t;
/**
- * Grouping of both parsed CL args and thread specific args - alloc together
+ * Grouping of both parsed CL args and global application data
*/
typedef struct {
/** Application (parsed) arguments */
appl_args_t appl;
-} args_t;
+ odp_pool_t pkt_pool;
+ /** Atomic queue IPSEC completion events */
+ odp_queue_t completionq[MAX_COMPL_QUEUES];
+ /** Synchronize threads before packet processing begins */
+ odp_barrier_t sync_barrier;
+ int num_compl_queues;
+ int num_workers;
+} global_data_t;
/* helper funcs */
static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
@@ -74,7 +83,7 @@ static void print_info(char *progname, appl_args_t *appl_args);
static void usage(char *progname);
/** Global pointer to args */
-static args_t *args;
+static global_data_t *global;
/**
* Buffer pool for packet IO
@@ -83,11 +92,6 @@ static args_t *args;
#define SHM_PKT_POOL_BUF_SIZE 4096
#define SHM_PKT_POOL_SIZE (SHM_PKT_POOL_BUF_COUNT * SHM_PKT_POOL_BUF_SIZE)
-static odp_pool_t pkt_pool = ODP_POOL_INVALID;
-
-/** Synchronize threads before packet processing begins */
-static odp_barrier_t sync_barrier;
-
/**
* Packet processing result codes
*/
@@ -98,15 +102,8 @@ typedef enum {
PKT_DONE /**< Finished with packet, stop processing */
} pkt_disposition_e;
-#define MAX_COMPL_QUEUES 32
#define GET_THR_QUEUE_ID(x) ((odp_thread_id() - 1) % (x))
-/** Atomic queue IPSEC completion events */
-static odp_queue_t completionq[MAX_COMPL_QUEUES];
-
-static int num_compl_queues;
-static int num_workers;
-
/**
* Calculate hash value on given 2-tuple i.e. sip, dip
*
@@ -154,9 +151,9 @@ void ipsec_init_post(void)
sa_db_entry_t *auth_sa = NULL;
tun_db_entry_t *tun = NULL;
- queue_id %= num_workers;
- if (num_compl_queues < num_workers)
- num_compl_queues++;
+ queue_id %= global->num_workers;
+ if (global->num_compl_queues < global->num_workers)
+ global->num_compl_queues++;
queue_id++;
if (entry->esp) {
cipher_sa = find_sa_db_entry(&entry->src_subnet,
@@ -172,11 +169,13 @@ void ipsec_init_post(void)
}
if (cipher_sa && auth_sa) {
+ odp_queue_t queue = global->completionq[queue_id - 1];
+
if (create_ipsec_cache_entry(cipher_sa,
auth_sa,
tun,
entry->input,
- completionq[queue_id - 1])
+ queue)
) {
EXAMPLE_ABORT("Error: IPSec cache entry failed.\n");
}
@@ -213,7 +212,7 @@ static void initialize_intf(char *intf, int queue_type)
/*
* Open a packet IO instance for thread and get default output queue
*/
- pktio = odp_pktio_open(intf, pkt_pool, &pktio_param);
+ pktio = odp_pktio_open(intf, global->pkt_pool, &pktio_param);
if (ODP_PKTIO_INVALID == pktio)
EXAMPLE_ABORT("Error: pktio create failed for %s\n", intf);
@@ -431,7 +430,7 @@ int pktio_thread(void *arg EXAMPLE_UNUSED)
odp_event_t ev = ODP_EVENT_INVALID;
printf("Pktio thread [%02i] starts\n", thr);
- odp_barrier_wait(&sync_barrier);
+ odp_barrier_wait(&global->sync_barrier);
/* Loop packets */
for (;;) {
@@ -523,26 +522,26 @@ main(int argc, char *argv[])
if (odp_init_local(instance, ODP_THREAD_CONTROL))
EXAMPLE_ABORT("Error: ODP local init failed.\n");
/* Reserve memory for arguments from shared memory */
- shm = odp_shm_reserve("shm_args", sizeof(args_t),
+ shm = odp_shm_reserve("shm_args", sizeof(global_data_t),
ODP_CACHE_LINE_SIZE, 0);
- args = odp_shm_addr(shm);
+ global = odp_shm_addr(shm);
- if (NULL == args)
+ if (NULL == global)
EXAMPLE_ABORT("Error: shared mem alloc failed.\n");
- memset(args, 0, sizeof(*args));
+ memset(global, 0, sizeof(global_data_t));
/* Must init our databases before parsing args */
ipsec_init_pre();
init_fwd_db();
/* Parse and store the application arguments */
- parse_args(argc, argv, &args->appl);
+ parse_args(argc, argv, &global->appl);
/*Initialize route table for user given parameter*/
init_routing_table();
/* Print both system and application information */
- print_info(NO_PATH(argv[0]), &args->appl);
+ print_info(NO_PATH(argv[0]), &global->appl);
if (odp_ipsec_capability(&capa))
EXAMPLE_ABORT("Error: Capability not configured.\n");
@@ -559,15 +558,16 @@ main(int argc, char *argv[])
if (odp_ipsec_config(&config))
EXAMPLE_ABORT("Error: IPSec not configured.\n");
- num_workers = MAX_WORKERS;
- if (args->appl.cpu_count && args->appl.cpu_count < MAX_WORKERS)
- num_workers = args->appl.cpu_count;
+ global->num_workers = MAX_WORKERS;
+ if (global->appl.cpu_count && global->appl.cpu_count < MAX_WORKERS)
+ global->num_workers = global->appl.cpu_count;
/*
* By default CPU #0 runs Linux kernel background tasks.
* Start mapping thread from CPU #1
*/
- num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ global->num_workers = odp_cpumask_default_worker(&cpumask,
+ global->num_workers);
(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
/*
@@ -576,20 +576,21 @@ main(int argc, char *argv[])
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = args->appl.queue_type;
+ qparam.sched.sync = global->appl.queue_type;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- for (i = 0; i < num_workers; i++) {
- completionq[i] = odp_queue_create("completion", &qparam);
- if (ODP_QUEUE_INVALID == completionq[i])
+ for (i = 0; i < global->num_workers; i++) {
+ global->completionq[i] = odp_queue_create("completion",
+ &qparam);
+ if (ODP_QUEUE_INVALID == global->completionq[i])
EXAMPLE_ABORT("Error: completion queue creation failed\n");
}
- printf("num worker threads: %i\n", num_workers);
+ printf("num worker threads: %i\n", global->num_workers);
printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
printf("cpu mask: %s\n", cpumaskstr);
/* Create a barrier to synchronize thread startup */
- odp_barrier_init(&sync_barrier, num_workers);
+ odp_barrier_init(&global->sync_barrier, global->num_workers);
/* Create packet buffer pool */
odp_pool_param_init(&params);
@@ -598,21 +599,25 @@ main(int argc, char *argv[])
params.pkt.num = SHM_PKT_POOL_BUF_COUNT;
params.type = ODP_POOL_PACKET;
- pkt_pool = odp_pool_create("packet_pool", &params);
+ global->pkt_pool = odp_pool_create("packet_pool", &params);
- if (ODP_POOL_INVALID == pkt_pool)
+ if (ODP_POOL_INVALID == global->pkt_pool)
EXAMPLE_ABORT("Error: packet pool create failed.\n");
ipsec_init_post();
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/* Initialize interfaces (which resolves FWD DB entries */
- for (i = 0; i < args->appl.if_count; i++)
- initialize_intf(args->appl.if_names[i], args->appl.queue_type);
+ for (i = 0; i < global->appl.if_count; i++)
+ initialize_intf(global->appl.if_names[i],
+ global->appl.queue_type);
printf(" Configured queues SYNC type: [%s]\n",
- (args->appl.queue_type == 0) ?
+ (global->appl.queue_type == 0) ?
"PARALLEL" :
- (args->appl.queue_type == 1) ?
+ (global->appl.queue_type == 1) ?
"ATOMIC" : "ORDERED");
memset(&thr_params, 0, sizeof(thr_params));
thr_params.start = pktio_thread;
@@ -625,8 +630,28 @@ main(int argc, char *argv[])
&thr_params);
odph_odpthreads_join(thread_tbl);
- free(args->appl.if_names);
- free(args->appl.if_str);
+ /* Stop and close used pktio devices */
+ for (i = 0; i < global->appl.if_count; i++) {
+ odp_pktio_t pktio = odp_pktio_lookup(global->appl.if_names[i]);
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) {
+ EXAMPLE_ERR("Error: failed to close pktio %s\n",
+ global->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ free(global->appl.if_names);
+ free(global->appl.if_str);
+
+ if (odp_shm_free(shm)) {
+ EXAMPLE_ERR("Error: shm free global data\n");
+ exit(EXIT_FAILURE);
+ }
+
printf("Exit\n\n");
return 0;
}
diff --git a/example/l2fwd_simple/odp_l2fwd_simple.c b/example/l2fwd_simple/odp_l2fwd_simple.c
index fcdd81832..f7e7ba539 100644
--- a/example/l2fwd_simple/odp_l2fwd_simple.c
+++ b/example/l2fwd_simple/odp_l2fwd_simple.c
@@ -17,20 +17,22 @@
#define MAX_PKT_BURST 32
#define MAX_WORKERS 1
-static int exit_thr;
-static int wait_sec;
-
-struct {
+typedef struct {
odp_pktio_t if0, if1;
odp_pktin_queue_t if0in, if1in;
odp_pktout_queue_t if0out, if1out;
odph_ethaddr_t src, dst;
-} global;
+ odp_shm_t shm;
+ int exit_thr;
+ int wait_sec;
+} global_data_t;
+
+static global_data_t *global;
static void sig_handler(int signo ODP_UNUSED)
{
printf("sig_handler!\n");
- exit_thr = 1;
+ global->exit_thr = 1;
}
static odp_pktio_t create_pktio(const char *name, odp_pool_t pool,
@@ -89,25 +91,25 @@ static int run_worker(void *arg ODP_UNUSED)
int pkts, sent, tx_drops, i;
uint64_t wait_time = odp_pktin_wait_time(ODP_TIME_SEC_IN_NS);
- if (odp_pktio_start(global.if0)) {
+ if (odp_pktio_start(global->if0)) {
printf("unable to start input interface\n");
exit(1);
}
printf("started input interface\n");
- if (odp_pktio_start(global.if1)) {
+ if (odp_pktio_start(global->if1)) {
printf("unable to start output interface\n");
exit(1);
}
printf("started output interface\n");
printf("started all\n");
- while (!exit_thr) {
- pkts = odp_pktin_recv_tmo(global.if0in, pkt_tbl, MAX_PKT_BURST,
+ while (!global->exit_thr) {
+ pkts = odp_pktin_recv_tmo(global->if0in, pkt_tbl, MAX_PKT_BURST,
wait_time);
if (odp_unlikely(pkts <= 0)) {
- if (wait_sec > 0)
- if (!(--wait_sec))
+ if (global->wait_sec > 0)
+ if (!(--global->wait_sec))
break;
continue;
}
@@ -121,10 +123,10 @@ static int run_worker(void *arg ODP_UNUSED)
return 0;
}
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- eth->src = global.src;
- eth->dst = global.dst;
+ eth->src = global->src;
+ eth->dst = global->dst;
}
- sent = odp_pktout_send(global.if1out, pkt_tbl, pkts);
+ sent = odp_pktout_send(global->if1out, pkt_tbl, pkts);
if (sent < 0)
sent = 0;
tx_drops = pkts - sent;
@@ -137,21 +139,53 @@ static int run_worker(void *arg ODP_UNUSED)
int main(int argc, char **argv)
{
+ odph_helper_options_t helper_options;
odp_pool_t pool;
odp_pool_param_t params;
odp_cpumask_t cpumask;
odph_odpthread_t thd[MAX_WORKERS];
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
odph_ethaddr_t correct_src;
uint32_t mtu1, mtu2;
+ odp_shm_t shm;
- /* let helper collect its own arguments (e.g. --odph_proc) */
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ printf("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init_param, NULL)) {
+ printf("Error: ODP global init failed.\n");
+ exit(1);
+ }
+
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Error: ODP local init failed.\n");
+ exit(1);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("_appl_global_data", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ printf("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(global_data_t));
+ global->shm = shm;
if (argc > 7 ||
- odph_eth_addr_parse(&global.dst, argv[3]) != 0 ||
- odph_eth_addr_parse(&global.src, argv[4]) != 0) {
+ odph_eth_addr_parse(&global->dst, argv[3]) != 0 ||
+ odph_eth_addr_parse(&global->src, argv[4]) != 0) {
printf("Usage: odp_l2fwd_simple eth0 eth1 01:02:03:04:05:06"
" 07:08:09:0a:0b:0c [-t sec]\n");
printf("Where eth0 and eth1 are the used interfaces"
@@ -161,20 +195,10 @@ int main(int argc, char **argv)
exit(1);
}
if (argc == 7 && !strncmp(argv[5], "-t", 2))
- wait_sec = atoi(argv[6]);
+ global->wait_sec = atoi(argv[6]);
- if (wait_sec)
- printf("running test for %d sec\n", wait_sec);
-
- if (odp_init_global(&instance, NULL, NULL)) {
- printf("Error: ODP global init failed.\n");
- exit(1);
- }
-
- if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- printf("Error: ODP local init failed.\n");
- exit(1);
- }
+ if (global->wait_sec)
+ printf("running test for %d sec\n", global->wait_sec);
/* Create packet pool */
odp_pool_param_init(&params);
@@ -190,23 +214,25 @@ int main(int argc, char **argv)
exit(1);
}
- global.if0 = create_pktio(argv[1], pool, &global.if0in, &global.if0out);
- global.if1 = create_pktio(argv[2], pool, &global.if1in, &global.if1out);
+ global->if0 = create_pktio(argv[1], pool, &global->if0in,
+ &global->if0out);
+ global->if1 = create_pktio(argv[2], pool, &global->if1in,
+ &global->if1out);
/* Do some operations to increase code coverage in tests */
- if (odp_pktio_mac_addr(global.if0, &correct_src, sizeof(correct_src))
+ if (odp_pktio_mac_addr(global->if0, &correct_src, sizeof(correct_src))
!= sizeof(correct_src))
printf("Warning: can't get MAC address\n");
- else if (memcmp(&correct_src, &global.src, sizeof(correct_src)) != 0)
+ else if (memcmp(&correct_src, &global->src, sizeof(correct_src)) != 0)
printf("Warning: src MAC invalid\n");
- odp_pktio_promisc_mode_set(global.if0, true);
- odp_pktio_promisc_mode_set(global.if1, true);
- (void)odp_pktio_promisc_mode(global.if0);
- (void)odp_pktio_promisc_mode(global.if1);
+ odp_pktio_promisc_mode_set(global->if0, true);
+ odp_pktio_promisc_mode_set(global->if1, true);
+ (void)odp_pktio_promisc_mode(global->if0);
+ (void)odp_pktio_promisc_mode(global->if1);
- mtu1 = odp_pktin_maxlen(global.if0);
- mtu2 = odp_pktout_maxlen(global.if1);
+ mtu1 = odp_pktin_maxlen(global->if0);
+ mtu2 = odp_pktout_maxlen(global->if1);
if (mtu1 && mtu2 && mtu1 > mtu2)
printf("Warning: input MTU bigger than output MTU\n");
@@ -223,11 +249,25 @@ int main(int argc, char **argv)
odph_odpthreads_create(thd, &cpumask, &thr_params);
odph_odpthreads_join(thd);
+ if (odp_pktio_stop(global->if0) || odp_pktio_close(global->if0)) {
+ printf("Error: failed to close interface %s\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+ if (odp_pktio_stop(global->if1) || odp_pktio_close(global->if1)) {
+ printf("Error: failed to close interface %s\n", argv[2]);
+ exit(EXIT_FAILURE);
+ }
+
if (odp_pool_destroy(pool)) {
printf("Error: pool destroy\n");
exit(EXIT_FAILURE);
}
+ if (odp_shm_free(global->shm)) {
+ printf("Error: shm free global data\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_term_local()) {
printf("Error: term local\n");
exit(EXIT_FAILURE);
diff --git a/example/l3fwd/odp_l3fwd.c b/example/l3fwd/odp_l3fwd.c
index 34e2bfce8..708c4df90 100644
--- a/example/l3fwd/odp_l3fwd.c
+++ b/example/l3fwd/odp_l3fwd.c
@@ -80,20 +80,24 @@ typedef struct {
int error_check; /* Check packets for errors */
} app_args_t;
-struct {
+typedef struct {
app_args_t cmd_args;
struct l3fwd_pktio_s l3fwd_pktios[MAX_NB_PKTIO];
odph_odpthread_t l3fwd_workers[MAX_NB_WORKER];
struct thread_arg_s worker_args[MAX_NB_WORKER];
odph_ethaddr_t eth_dest_mac[MAX_NB_PKTIO];
+ /** Global barrier to synchronize main and workers */
+ odp_barrier_t barrier;
+ /** Shm for storing global data */
+ odp_shm_t shm;
+ /** Break workers loop if set to 1 */
+ int exit_threads;
/* forward func, hash or lpm */
int (*fwd_func)(odp_packet_t pkt, int sif);
-} global;
+} global_data_t;
-/** Global barrier to synchronize main and workers */
-static odp_barrier_t barrier;
-static int exit_threads; /**< Break workers loop if set to 1 */
+static global_data_t *global;
static int create_pktio(const char *name, odp_pool_t pool,
struct l3fwd_pktio_s *fwd_pktio)
@@ -122,7 +126,7 @@ static int create_pktio(const char *name, odp_pool_t pool,
}
odp_pktio_config_init(&config);
- config.parser.layer = global.cmd_args.error_check ?
+ config.parser.layer = global->cmd_args.error_check ?
ODP_PROTO_LAYER_ALL :
ODP_PROTO_LAYER_L4;
odp_pktio_config(pktio, &config);
@@ -145,7 +149,7 @@ static void setup_fwd_db(void)
int if_idx;
app_args_t *args;
- args = &global.cmd_args;
+ args = &global->cmd_args;
if (args->hash_mode)
init_fwd_hash_cache();
else
@@ -157,9 +161,9 @@ static void setup_fwd_db(void)
fib_tbl_insert(entry->subnet.addr, if_idx,
entry->subnet.depth);
if (args->dest_mac_changed[if_idx])
- global.eth_dest_mac[if_idx] = entry->dst_mac;
+ global->eth_dest_mac[if_idx] = entry->dst_mac;
else
- entry->dst_mac = global.eth_dest_mac[if_idx];
+ entry->dst_mac = global->eth_dest_mac[if_idx];
}
}
@@ -237,8 +241,8 @@ static inline int l3fwd_pkt_lpm(odp_packet_t pkt, int sif)
if (ret)
dif = sif;
- eth->dst = global.eth_dest_mac[dif];
- eth->src = global.l3fwd_pktios[dif].mac_addr;
+ eth->dst = global->eth_dest_mac[dif];
+ eth->src = global->l3fwd_pktios[dif].mac_addr;
return dif;
}
@@ -265,7 +269,7 @@ static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
pkt = pkt_tbl[i];
err = 0;
- if (global.cmd_args.error_check)
+ if (global->cmd_args.error_check)
err = odp_packet_has_error(pkt);
if (odp_unlikely(err || !odp_packet_has_ipv4(pkt))) {
@@ -286,7 +290,7 @@ static int run_worker(void *arg)
odp_pktin_queue_t inq;
int input_ifs[thr_arg->nb_pktio];
odp_pktin_queue_t input_queues[thr_arg->nb_pktio];
- odp_pktout_queue_t output_queues[global.cmd_args.if_count];
+ odp_pktout_queue_t output_queues[global->cmd_args.if_count];
odp_packet_t pkt_tbl[MAX_PKT_BURST];
odp_packet_t *tbl;
int pkts, drop, sent;
@@ -296,16 +300,16 @@ static int run_worker(void *arg)
int num_pktio = 0;
/* Copy all required handles to local memory */
- for (i = 0; i < global.cmd_args.if_count; i++) {
+ for (i = 0; i < global->cmd_args.if_count; i++) {
int txq_idx = thr_arg->pktio[i].txq_idx;
- output_queues[i] = global.l3fwd_pktios[i].ifout[txq_idx];
+ output_queues[i] = global->l3fwd_pktios[i].ifout[txq_idx];
if_idx = thr_arg->pktio[i].if_idx;
for (j = 0; j < thr_arg->pktio[i].nb_rxq; j++) {
int rxq_idx = thr_arg->pktio[i].rxq[j];
- inq = global.l3fwd_pktios[if_idx].ifin[rxq_idx];
+ inq = global->l3fwd_pktios[if_idx].ifin[rxq_idx];
input_ifs[num_pktio] = if_idx;
input_queues[num_pktio] = inq;
num_pktio++;
@@ -318,9 +322,9 @@ static int run_worker(void *arg)
if_idx = input_ifs[pktio];
inq = input_queues[pktio];
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&global->barrier);
- while (!exit_threads) {
+ while (!global->exit_threads) {
if (num_pktio > 1) {
if_idx = input_ifs[pktio];
inq = input_queues[pktio];
@@ -340,12 +344,12 @@ static int run_worker(void *arg)
if (odp_unlikely(pkts < 1))
continue;
- dif = global.fwd_func(pkt_tbl[0], if_idx);
+ dif = global->fwd_func(pkt_tbl[0], if_idx);
tbl = &pkt_tbl[0];
while (pkts) {
dst_port = dif;
for (i = 1; i < pkts; i++) {
- dif = global.fwd_func(tbl[i], if_idx);
+ dif = global->fwd_func(tbl[i], if_idx);
if (dif != dst_port)
break;
}
@@ -695,10 +699,10 @@ static void setup_worker_qconf(app_args_t *args)
if (!args->qconf_count) {
if (nb_worker > if_count) {
for (i = 0; i < nb_worker; i++) {
- arg = &global.worker_args[i];
+ arg = &global->worker_args[i];
arg->thr_idx = i;
j = i % if_count;
- port = &global.l3fwd_pktios[j];
+ port = &global->l3fwd_pktios[j];
arg->pktio[0].rxq[0] =
port->rxq_idx % port->nb_rxq;
arg->pktio[0].nb_rxq = 1;
@@ -709,9 +713,9 @@ static void setup_worker_qconf(app_args_t *args)
} else {
for (i = 0; i < if_count; i++) {
j = i % nb_worker;
- arg = &global.worker_args[j];
+ arg = &global->worker_args[j];
arg->thr_idx = j;
- port = &global.l3fwd_pktios[i];
+ port = &global->l3fwd_pktios[i];
rxq_idx = arg->pktio[i].nb_rxq;
pktio = arg->nb_pktio;
arg->pktio[pktio].rxq[rxq_idx] =
@@ -744,7 +748,7 @@ static void setup_worker_qconf(app_args_t *args)
q->if_idx, q->rxq_idx, q->core_idx);
queue_mask[q->if_idx][q->rxq_idx] = 1;
- port = &global.l3fwd_pktios[q->if_idx];
+ port = &global->l3fwd_pktios[q->if_idx];
if (port->rxq_idx < q->rxq_idx)
EXAMPLE_ABORT("Error queue (%d, %d, %d), queue should"
" be in sequence and start from 0, queue"
@@ -760,7 +764,7 @@ static void setup_worker_qconf(app_args_t *args)
port->rxq_idx = q->rxq_idx + 1;
/* put the queue into worker_args */
- arg = &global.worker_args[q->core_idx];
+ arg = &global->worker_args[q->core_idx];
/* Check if interface already has queues configured */
for (j = 0; j < args->if_count; j++) {
@@ -778,9 +782,9 @@ static void setup_worker_qconf(app_args_t *args)
}
/* distribute tx queues among threads */
for (i = 0; i < args->worker_count; i++) {
- arg = &global.worker_args[i];
+ arg = &global->worker_args[i];
for (j = 0; j < args->if_count; j++) {
- port = &global.l3fwd_pktios[j];
+ port = &global->l3fwd_pktios[j];
arg->pktio[j].txq_idx =
port->txq_idx % port->nb_txq;
port->txq_idx++;
@@ -796,7 +800,7 @@ static void setup_worker_qconf(app_args_t *args)
const char *name;
int nb_rxq, nb_txq;
- port = &global.l3fwd_pktios[i];
+ port = &global->l3fwd_pktios[i];
name = args->if_names[i];
odp_pktin_queue_param_init(&in_queue_param);
odp_pktout_queue_param_init(&out_queue_param);
@@ -855,7 +859,7 @@ static void print_qconf_table(app_args_t *args)
"port/id", "rxq", "thread");
for (i = 0; i < args->worker_count; i++) {
- thr_arg = &global.worker_args[i];
+ thr_arg = &global->worker_args[i];
for (j = 0; j < args->if_count; j++) {
if (!thr_arg->pktio[j].nb_rxq)
continue;
@@ -900,7 +904,7 @@ static int print_speed_stats(int num_workers, int duration, int timeout)
timeout = 1;
}
/* Wait for all threads to be ready*/
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&global->barrier);
do {
pkts = 0;
@@ -909,9 +913,9 @@ static int print_speed_stats(int num_workers, int duration, int timeout)
sleep(timeout);
for (i = 0; i < num_workers; i++) {
- pkts += global.worker_args[i].packets;
- rx_drops += global.worker_args[i].rx_drops;
- tx_drops += global.worker_args[i].tx_drops;
+ pkts += global->worker_args[i].packets;
+ rx_drops += global->worker_args[i].rx_drops;
+ tx_drops += global->worker_args[i].tx_drops;
}
if (stats_enabled) {
pps = (pkts - pkts_prev) / timeout;
@@ -961,17 +965,28 @@ int main(int argc, char **argv)
exit(1);
}
- /* Clear global argument and initialize the dest mac as 2:0:0:0:0:x */
- memset(&global, 0, sizeof(global));
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("_appl_global_data", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ printf("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(global_data_t));
+ global->shm = shm;
+
+ /* Initialize the dest mac as 2:0:0:0:0:x */
mac[0] = 2;
for (i = 0; i < MAX_NB_PKTIO; i++) {
mac[ODPH_ETHADDR_LEN - 1] = (uint8_t)i;
- memcpy(global.eth_dest_mac[i].addr, mac, ODPH_ETHADDR_LEN);
+ memcpy(global->eth_dest_mac[i].addr, mac, ODPH_ETHADDR_LEN);
}
/* Initialize the thread arguments */
for (i = 0; i < MAX_NB_WORKER; i++) {
- thr_arg = &global.worker_args[i];
+ thr_arg = &global->worker_args[i];
for (j = 0; j < MAX_NB_PKTIO; j++) {
thr_arg->thr_idx = INVALID_ID;
thr_arg->pktio[j].txq_idx = INVALID_ID;
@@ -982,7 +997,7 @@ int main(int argc, char **argv)
}
/* Parse cmdline arguments */
- args = &global.cmd_args;
+ args = &global->cmd_args;
parse_cmdline_args(argc, argv, args);
/* Init l3fwd table */
@@ -1030,7 +1045,7 @@ int main(int argc, char **argv)
char *if_name;
if_name = args->if_names[i];
- port = &global.l3fwd_pktios[i];
+ port = &global->l3fwd_pktios[i];
if (create_pktio(if_name, pool, port)) {
printf("Error: create pktio %s\n", if_name);
exit(1);
@@ -1054,9 +1069,9 @@ int main(int argc, char **argv)
/* Decide ip lookup method */
if (args->hash_mode)
- global.fwd_func = l3fwd_pkt_hash;
+ global->fwd_func = l3fwd_pkt_hash;
else
- global.fwd_func = l3fwd_pkt_lpm;
+ global->fwd_func = l3fwd_pkt_lpm;
/* Start all the available ports */
for (i = 0; i < args->if_count; i++) {
@@ -1065,7 +1080,7 @@ int main(int argc, char **argv)
char buf[32];
if_name = args->if_names[i];
- port = &global.l3fwd_pktios[i];
+ port = &global->l3fwd_pktios[i];
/* start pktio */
if (odp_pktio_start(port->pktio)) {
printf("unable to start pktio: %s\n", if_name);
@@ -1082,7 +1097,7 @@ int main(int argc, char **argv)
printf("start pktio: %s, mac %s\n", if_name, buf);
}
- odp_barrier_init(&barrier, nb_worker + 1);
+ odp_barrier_init(&global->barrier, nb_worker + 1);
memset(&thr_params, 0, sizeof(thr_params));
thr_params.start = run_worker;
@@ -1095,7 +1110,7 @@ int main(int argc, char **argv)
struct thread_arg_s *arg;
odp_cpumask_t thr_mask;
- arg = &global.worker_args[i];
+ arg = &global->worker_args[i];
odp_cpumask_zero(&thr_mask);
odp_cpumask_set(&thr_mask, cpu);
thr_params.arg = arg;
@@ -1105,12 +1120,22 @@ int main(int argc, char **argv)
}
print_speed_stats(nb_worker, args->duration, PRINT_INTERVAL);
- exit_threads = 1;
+ global->exit_threads = 1;
/* wait for other threads to join */
for (i = 0; i < nb_worker; i++)
odph_odpthreads_join(&thread_tbl[i]);
+ /* Stop and close used pktio devices */
+ for (i = 0; i < args->if_count; i++) {
+ odp_pktio_t pktio = global->l3fwd_pktios[i].pktio;
+
+ if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) {
+ printf("Error: failed to close pktio\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
/* if_names share a single buffer, so only one free */
free(args->if_names[0]);
@@ -1138,6 +1163,11 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ if (odp_shm_free(global->shm)) {
+ printf("Error: shm free global data\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_term_local()) {
printf("Error: term local\n");
exit(EXIT_FAILURE);
diff --git a/example/m4/configure.m4 b/example/m4/configure.m4
index cbac09140..ee4f44bae 100644
--- a/example/m4/configure.m4
+++ b/example/m4/configure.m4
@@ -1,5 +1,15 @@
##########################################################################
-# Enable/disable test-example
+# Build and install example applications
+##########################################################################
+AC_ARG_WITH([examples],
+ [AS_HELP_STRING([--without-examples],
+ [don't build and install example applications])],
+ [],
+ [with_examples=yes])
+AM_CONDITIONAL([WITH_EXAMPLES], [test x$with_examples != xno])
+
+##########################################################################
+# Test examples during 'make check'
##########################################################################
AC_ARG_ENABLE([test-example],
[AS_HELP_STRING([--enable-test-example], [run basic test against examples])],
diff --git a/example/packet/.gitignore b/example/packet/.gitignore
index 4610a1922..02752853e 100644
--- a/example/packet/.gitignore
+++ b/example/packet/.gitignore
@@ -1,3 +1,4 @@
+odp_packet_dump
odp_pktio
*.log
*.trs
diff --git a/example/packet/Makefile.am b/example/packet/Makefile.am
index 228c3506d..6b5fe2312 100644
--- a/example/packet/Makefile.am
+++ b/example/packet/Makefile.am
@@ -1,12 +1,15 @@
include $(top_srcdir)/example/Makefile.inc
-bin_PROGRAMS = odp_pktio
+bin_PROGRAMS = odp_packet_dump \
+ odp_pktio
+
+odp_packet_dump_SOURCES = odp_packet_dump.c
odp_pktio_SOURCES = odp_pktio.c
if test_example
if HAVE_PCAP
-TESTS = pktio_run.sh
+TESTS = packet_dump_run.sh pktio_run.sh
endif
endif
-EXTRA_DIST = pktio_run.sh udp64.pcap
+EXTRA_DIST = packet_dump_run.sh pktio_run.sh udp64.pcap
diff --git a/example/packet/odp_packet_dump.c b/example/packet/odp_packet_dump.c
new file mode 100644
index 000000000..4e3aec8f1
--- /dev/null
+++ b/example/packet/odp_packet_dump.c
@@ -0,0 +1,685 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <odp_api.h>
+
+#define MAX_PKTIOS 32
+#define MAX_PKTIO_NAME 255
+#define MAX_PKT_NUM 1024
+#define MAX_FILTERS 32
+
+typedef struct test_options_t {
+ uint64_t num_packet;
+ uint32_t data_offset;
+ uint32_t data_len;
+ int verbose;
+ int num_pktio;
+ char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
+ int num_filter_l3;
+ int filter_l3[MAX_FILTERS];
+ int num_filter_l4;
+ int filter_l4[MAX_FILTERS];
+
+} test_options_t;
+
+typedef struct test_global_t {
+ test_options_t opt;
+ odp_pool_t pool;
+ int stop;
+
+ struct {
+ odp_pktio_t pktio;
+ int started;
+
+ } pktio[MAX_PKTIOS];
+
+} test_global_t;
+
+static test_global_t test_global;
+
+static void sig_handler(int signo)
+{
+ (void)signo;
+
+ test_global.stop = 1;
+ odp_mb_full();
+}
+
+static void print_usage(void)
+{
+ printf("\n"
+ "Print received packets\n"
+ "\n"
+ "OPTIONS:\n"
+ " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n"
+ " -n, --num_packet <number> Exit after this many packets. Use 0 to run infinitely. Default 0.\n"
+ " -o, --data_offset <number> Data print start offset in bytes. Default 0.\n"
+ " -l, --data_length <number> Data print length in bytes. Default 0.\n"
+ " --filter_l3 <type> Print only packets with matching L3 type. Comma-separated\n"
+ " list (no spaces) of ODP L3 type values (e.g. value of ODP_PROTO_L3_TYPE_IPV4).\n"
+ " --filter_l4 <type> Print only packets with matching L4 type. Comma-separated\n"
+ " list (no spaces) of ODP L4 type values (e.g. value of ODP_PROTO_L4_TYPE_TCP).\n"
+ " -v, --verbose Print extra packet information.\n"
+ " -h, --help Display help and exit.\n\n");
+}
+
+static int parse_int_list(char *str, int integer[], int max_num)
+{
+ int str_len, len;
+ int i = 0;
+
+ str_len = strlen(str);
+
+ while (str_len > 0) {
+ len = strcspn(str, ",");
+ str[len] = 0;
+
+ if (i == max_num) {
+ printf("Error: maximum number of options is %i\n",
+ max_num);
+ return -1;
+ }
+
+ integer[i] = atoi(str);
+
+ str_len -= len + 1;
+ str += len + 1;
+ i++;
+ }
+
+ return i;
+}
+
+static int parse_options(int argc, char *argv[], test_global_t *global)
+{
+ int i, opt, long_index;
+ char *name, *str;
+ int len, str_len, num;
+
+ const struct option longopts[] = {
+ {"interface", required_argument, NULL, 'i'},
+ {"num_packet", required_argument, NULL, 'n'},
+ {"data_offset", required_argument, NULL, 'o'},
+ {"data_length", required_argument, NULL, 'l'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"help", no_argument, NULL, 'h'},
+ {"filter_l3", required_argument, NULL, 0 },
+ {"filter_l4", required_argument, NULL, 1 },
+ {NULL, 0, NULL, 0}
+ };
+ const char *shortopts = "+i:n:o:l:vh";
+ int ret = 0;
+
+ while (1) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 0:
+ /* --filter_l3 */
+ num = parse_int_list(optarg, global->opt.filter_l3,
+ MAX_FILTERS);
+ global->opt.num_filter_l3 = num;
+
+ if (num < 0)
+ ret = -1;
+ break;
+ case 1:
+ /* --filter_l4 */
+ num = parse_int_list(optarg, global->opt.filter_l4,
+ MAX_FILTERS);
+ global->opt.num_filter_l4 = num;
+
+ if (num < 0)
+ ret = -1;
+ break;
+ case 'i':
+ i = 0;
+ str = optarg;
+ str_len = strlen(str);
+
+ while (str_len > 0) {
+ len = strcspn(str, ",");
+ str_len -= len + 1;
+
+ if (i == MAX_PKTIOS) {
+ printf("Error: Too many interfaces\n");
+ ret = -1;
+ break;
+ }
+
+ if (len > MAX_PKTIO_NAME) {
+ printf("Error: Too long interface name %s\n",
+ str);
+ ret = -1;
+ break;
+ }
+
+ name = global->opt.pktio_name[i];
+ memcpy(name, str, len);
+ str += len + 1;
+ i++;
+ }
+
+ global->opt.num_pktio = i;
+
+ break;
+ case 'o':
+ global->opt.data_offset = atoi(optarg);
+ break;
+ case 'l':
+ global->opt.data_len = atoi(optarg);
+ break;
+ case 'n':
+ global->opt.num_packet = atoll(optarg);
+ break;
+ case 'v':
+ global->opt.verbose = 1;
+ break;
+ case 'h':
+ default:
+ print_usage();
+ return -1;
+ }
+ }
+
+ if (global->opt.num_pktio == 0) {
+ printf("Error: At least one pktio interface needed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int open_pktios(test_global_t *global)
+{
+ odp_pool_param_t pool_param;
+ odp_pktio_param_t pktio_param;
+ odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
+ odp_pktio_t pktio;
+ odp_pktio_config_t pktio_config;
+ odp_pktin_queue_param_t pktin_param;
+ char *name;
+ int i, num_pktio;
+ uint32_t num_pkt = MAX_PKT_NUM;
+
+ num_pktio = global->opt.num_pktio;
+
+ if (odp_pool_capability(&pool_capa)) {
+ printf("Error: Pool capability failed.\n");
+ return -1;
+ }
+
+ if (pool_capa.pkt.max_num < MAX_PKT_NUM)
+ num_pkt = pool_capa.pkt.max_num;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.num = num_pkt;
+ pool_param.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("packet pool", &pool_param);
+
+ global->pool = pool;
+
+ if (pool == ODP_POOL_INVALID) {
+ printf("Error: Pool create.\n");
+ return -1;
+ }
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DISABLED;
+
+ for (i = 0; i < num_pktio; i++)
+ global->pktio[i].pktio = ODP_PKTIO_INVALID;
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_pktio; i++) {
+ name = global->opt.pktio_name[i];
+ pktio = odp_pktio_open(name, pool, &pktio_param);
+
+ if (pktio == ODP_PKTIO_INVALID) {
+ printf("Error (%s): Pktio open failed.\n", name);
+ return -1;
+ }
+
+ global->pktio[i].pktio = pktio;
+
+ odp_pktio_print(pktio);
+
+ odp_pktio_config_init(&pktio_config);
+ pktio_config.pktin.bit.ts_all = 1;
+ pktio_config.parser.layer = ODP_PROTO_LAYER_ALL;
+
+ odp_pktio_config(pktio, &pktio_config);
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ pktin_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ pktin_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ pktin_param.num_queues = 1;
+
+ if (odp_pktin_queue_config(pktio, &pktin_param)) {
+ printf("Error (%s): Pktin config failed.\n", name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static int start_pktios(test_global_t *global)
+{
+ int i;
+
+ for (i = 0; i < global->opt.num_pktio; i++) {
+ if (odp_pktio_start(global->pktio[i].pktio)) {
+ printf("Error (%s): Pktio start failed.\n",
+ global->opt.pktio_name[i]);
+
+ return -1;
+ }
+
+ global->pktio[i].started = 1;
+ }
+
+ return 0;
+}
+
+static int stop_pktios(test_global_t *global)
+{
+ odp_pktio_t pktio;
+ int i, ret = 0;
+
+ for (i = 0; i < global->opt.num_pktio; i++) {
+ pktio = global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID || global->pktio[i].started == 0)
+ continue;
+
+ if (odp_pktio_stop(pktio)) {
+ printf("Error (%s): Pktio stop failed.\n",
+ global->opt.pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ return ret;
+}
+
+static void empty_queues(void)
+{
+ odp_event_t ev;
+ uint64_t wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS / 2);
+
+ /* Drop all events from all queues */
+ while (1) {
+ ev = odp_schedule(NULL, wait_time);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static int close_pktios(test_global_t *global)
+{
+ odp_pktio_t pktio;
+ odp_pool_t pool;
+ int i, ret = 0;
+
+ for (i = 0; i < global->opt.num_pktio; i++) {
+ pktio = global->pktio[i].pktio;
+
+ if (pktio == ODP_PKTIO_INVALID)
+ continue;
+
+ if (odp_pktio_close(pktio)) {
+ printf("Error (%s): Pktio close failed.\n",
+ global->opt.pktio_name[i]);
+ ret = -1;
+ }
+ }
+
+ pool = global->pool;
+
+ if (pool == ODP_POOL_INVALID)
+ return ret;
+
+ if (odp_pool_destroy(pool)) {
+ printf("Error: Pool destroy failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static void print_mac_addr(uint8_t *addr)
+{
+ printf("%02x:%02x:%02x:%02x:%02x:%02x\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+}
+
+static void print_ipv4_addr(uint8_t *addr)
+{
+ printf("%u.%u.%u.%u\n",
+ addr[0], addr[1], addr[2], addr[3]);
+}
+
+static void print_port(uint8_t *ptr)
+{
+ uint16_t *port = (uint16_t *)(uintptr_t)ptr;
+
+ printf("%u\n", odp_be_to_cpu_16(*port));
+}
+
+static void print_data(odp_packet_t pkt, uint32_t offset, uint32_t len)
+{
+ const uint32_t bytes_per_row = 16;
+ const uint32_t num_char = 1 + (bytes_per_row * 3) + 1;
+ uint8_t data[bytes_per_row];
+ char row[num_char];
+ uint32_t copy_len, i, j;
+ uint32_t data_len = odp_packet_len(pkt);
+
+ if (offset > data_len)
+ return;
+
+ if (offset + len > data_len)
+ len = data_len - offset;
+
+ while (len) {
+ i = 0;
+
+ if (len > bytes_per_row)
+ copy_len = bytes_per_row;
+ else
+ copy_len = len;
+
+ odp_packet_copy_to_mem(pkt, offset, copy_len, data);
+
+ i += snprintf(&row[i], num_char - i, " ");
+
+ for (j = 0; j < copy_len; j++)
+ i += snprintf(&row[i], num_char - i, " %02x", data[j]);
+
+ row[i] = 0;
+ printf("%s\n", row);
+
+ len -= copy_len;
+ offset += copy_len;
+ }
+}
+
+static int print_packet(test_global_t *global, odp_packet_t pkt,
+ uint64_t num_packet)
+{
+ odp_pktio_t pktio;
+ odp_pktio_info_t pktio_info;
+ odp_time_t time;
+ uint64_t sec, nsec;
+ uint32_t offset;
+ int i, type, match;
+ int num_filter_l3 = global->opt.num_filter_l3;
+ int num_filter_l4 = global->opt.num_filter_l4;
+ uint8_t *data = odp_packet_data(pkt);
+ uint32_t seg_len = odp_packet_seg_len(pkt);
+ uint32_t l2_offset = odp_packet_l2_offset(pkt);
+ uint32_t l3_offset = odp_packet_l3_offset(pkt);
+ uint32_t l4_offset = odp_packet_l4_offset(pkt);
+ int tcp = odp_packet_has_tcp(pkt);
+ int udp = odp_packet_has_udp(pkt);
+ int sctp = odp_packet_has_sctp(pkt);
+
+ if (odp_packet_has_ts(pkt))
+ time = odp_packet_ts(pkt);
+ else
+ time = odp_time_local();
+
+ /* Filter based on L3 type */
+ if (num_filter_l3) {
+ match = 0;
+
+ for (i = 0; i < num_filter_l3; i++) {
+ type = global->opt.filter_l3[i];
+
+ if (type == odp_packet_l3_type(pkt)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (!match)
+ return 0;
+ }
+
+ /* Filter based on L4 type */
+ if (num_filter_l4) {
+ match = 0;
+
+ for (i = 0; i < num_filter_l4; i++) {
+ type = global->opt.filter_l4[i];
+
+ if (type == odp_packet_l4_type(pkt)) {
+ match = 1;
+ break;
+ }
+ }
+
+ if (!match)
+ return 0;
+ }
+
+ nsec = odp_time_to_ns(time);
+ sec = nsec / ODP_TIME_SEC_IN_NS;
+ nsec = nsec - (sec * ODP_TIME_SEC_IN_NS);
+ pktio = odp_packet_input(pkt);
+
+ odp_pktio_info(pktio, &pktio_info);
+
+ printf("PACKET [%" PRIu64 "]\n", num_packet);
+ printf(" time: %" PRIu64 ".%09" PRIu64 " sec\n", sec, nsec);
+ printf(" interface name: %s\n", pktio_info.name);
+ printf(" packet length: %u bytes\n", odp_packet_len(pkt));
+
+ /* L2 */
+ if (odp_packet_has_eth(pkt)) {
+ printf(" Ethernet offset: %u bytes\n", l2_offset);
+ offset = l2_offset;
+ if (offset + 6 <= seg_len) {
+ printf(" dst address: ");
+ print_mac_addr(data + offset);
+ }
+
+ offset = l2_offset + 6;
+ if (offset + 6 <= seg_len) {
+ printf(" src address: ");
+ print_mac_addr(data + offset);
+ }
+ } else if (odp_packet_has_l2(pkt)) {
+ printf(" L2 (%i) offset: %u bytes\n",
+ odp_packet_l2_type(pkt), l2_offset);
+ }
+
+ /* L3 */
+ if (odp_packet_has_ipv4(pkt)) {
+ printf(" IPv4 offset: %u bytes\n", l3_offset);
+ offset = l3_offset + 12;
+ if (offset + 4 <= seg_len) {
+ printf(" src address: ");
+ print_ipv4_addr(data + offset);
+ }
+
+ offset = l3_offset + 16;
+ if (offset + 4 <= seg_len) {
+ printf(" dst address: ");
+ print_ipv4_addr(data + offset);
+ }
+ } else if (odp_packet_has_ipv6(pkt)) {
+ printf(" IPv6 offset: %u bytes\n", l3_offset);
+ } else if (odp_packet_has_l3(pkt)) {
+ printf(" L3 (%i) offset: %u bytes\n",
+ odp_packet_l3_type(pkt), l3_offset);
+ }
+
+ /* L4 */
+ if (tcp || udp || sctp) {
+ if (tcp)
+ printf(" TCP offset: %u bytes\n", l4_offset);
+ else if (udp)
+ printf(" UDP offset: %u bytes\n", l4_offset);
+ else
+ printf(" SCTP offset: %u bytes\n", l4_offset);
+
+ offset = l4_offset;
+ if (offset + 2 <= seg_len) {
+ printf(" src port: ");
+ print_port(data + offset);
+ }
+
+ offset = l4_offset + 2;
+ if (offset + 2 <= seg_len) {
+ printf(" dst port: ");
+ print_port(data + offset);
+ }
+ } else if (odp_packet_has_l4(pkt)) {
+ printf(" L4 (%i) offset: %u bytes\n",
+ odp_packet_l4_type(pkt), l4_offset);
+ }
+
+ /* User defined data range */
+ if (global->opt.data_len)
+ print_data(pkt, global->opt.data_offset, global->opt.data_len);
+
+ if (global->opt.verbose)
+ odp_packet_print(pkt);
+
+ printf("\n");
+
+ return 1;
+}
+
+static int receive_packets(test_global_t *global)
+{
+ odp_event_t ev;
+ odp_packet_t pkt;
+ int printed;
+ uint64_t num_packet = 0;
+
+ while (!global->stop) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ if (odp_event_type(ev) != ODP_EVENT_PACKET) {
+ printf("Bad event type: %i\n", odp_event_type(ev));
+ odp_event_free(ev);
+ continue;
+ }
+
+ pkt = odp_packet_from_event(ev);
+
+ printed = print_packet(global, pkt, num_packet);
+
+ odp_packet_free(pkt);
+
+ if (!printed)
+ continue;
+
+ num_packet++;
+ if (global->opt.num_packet &&
+ num_packet >= global->opt.num_packet)
+ break;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ odp_instance_t instance;
+ test_global_t *global;
+ int ret = 0;
+
+ global = &test_global;
+ memset(global, 0, sizeof(test_global_t));
+
+ signal(SIGINT, sig_handler);
+
+ if (parse_options(argc, argv, global))
+ return -1;
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(&instance, NULL, NULL)) {
+ printf("Error: Global init failed.\n");
+ return -1;
+ }
+
+ /* Init this thread */
+ if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
+ printf("Error: Local init failed.\n");
+ return -1;
+ }
+
+ global->pool = ODP_POOL_INVALID;
+
+ odp_schedule_config(NULL);
+
+ odp_sys_info_print();
+
+ if (open_pktios(global)) {
+ printf("Error: pktio open failed\n");
+ return -1;
+ }
+
+ if (start_pktios(global)) {
+ printf("Error: pktio start failed\n");
+ return -1;
+ }
+
+ if (receive_packets(global)) {
+ printf("Error: packet receive failed\n");
+ return -1;
+ }
+
+ if (stop_pktios(global)) {
+ printf("Error: pktio stop failed\n");
+ return -1;
+ }
+
+ empty_queues();
+
+ if (close_pktios(global)) {
+ printf("Error: pktio close failed\n");
+ return -1;
+ }
+
+ if (odp_term_local()) {
+ printf("Error: term local failed.\n");
+ return -1;
+ }
+
+ if (odp_term_global(instance)) {
+ printf("Error: term global failed.\n");
+ return -1;
+ }
+
+ return ret;
+}
diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
index 7755efcb9..b1c4a79c8 100644
--- a/example/packet/odp_pktio.c
+++ b/example/packet/odp_pktio.c
@@ -85,6 +85,8 @@ typedef struct {
typedef struct {
/** Application (parsed) arguments */
appl_args_t appl;
+ /** Shm for global data */
+ odp_shm_t shm;
/** Thread specific arguments */
thread_args_t thread[MAX_WORKERS];
/** Flag to exit worker threads */
@@ -341,6 +343,7 @@ static int pktio_ifburst_thread(void *arg)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_pool_t pool;
int num_workers;
@@ -350,19 +353,22 @@ int main(int argc, char *argv[])
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_pool_param_t params;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
+ odp_shm_t shm;
- args = calloc(1, sizeof(args_t));
- if (args == NULL) {
- EXAMPLE_ERR("Error: args mem alloc failed.\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
exit(EXIT_FAILURE);
}
- /* Parse and store the application arguments */
- parse_args(argc, argv, &args->appl);
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
EXAMPLE_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -373,6 +379,21 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("_appl_global_data", sizeof(args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ args = odp_shm_addr(shm);
+ if (args == NULL) {
+ EXAMPLE_ERR("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(args, 0, sizeof(args_t));
+ args->shm = shm;
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &args->appl);
+
/* Print both system and application information */
print_info(NO_PATH(argv[0]), &args->appl);
@@ -403,6 +424,9 @@ int main(int argc, char *argv[])
}
odp_pool_print(pool);
+ /* Config and start scheduler */
+ odp_schedule_config(NULL);
+
/* Create a pktio instance for each interface */
for (i = 0; i < args->appl.if_count; ++i)
create_pktio(args->appl.if_names[i], pool, args->appl.mode);
@@ -467,9 +491,14 @@ int main(int argc, char *argv[])
free(args->appl.if_names);
free(args->appl.if_str);
- free(args);
odp_pool_destroy(pool);
+
+ if (odp_shm_free(args->shm)) {
+ EXAMPLE_ERR("Error: shm free global data\n");
+ exit(EXIT_FAILURE);
+ }
+
odp_term_local();
return odp_term_global(instance);
}
@@ -568,9 +597,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:i:+m:t:h";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->cpu_count = 1; /* use one worker by default */
appl_args->mode = APPL_MODE_PKT_SCHED;
appl_args->time = 0; /**< loop forever */
diff --git a/example/packet/packet_dump_run.sh b/example/packet/packet_dump_run.sh
new file mode 100755
index 000000000..fe43aa272
--- /dev/null
+++ b/example/packet/packet_dump_run.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Copyright (c) 2018, Linaro Limited
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+#
+
+PCAP_IN=`find . ${TEST_DIR} $(dirname $0) -name udp64.pcap -print -quit`
+
+echo "Packet dump test using PCAP_IN = ${PCAP_IN}"
+
+./odp_packet_dump${EXEEXT} -i pcap:in=${PCAP_IN}:loops=10 -n 10 -o 0 -l 64
+STATUS=$?
+if [ "$STATUS" -ne 0 ]; then
+ echo "Error: status was: $STATUS, expected 0"
+ exit 1
+fi
+
+exit 0
diff --git a/example/switch/odp_switch.c b/example/switch/odp_switch.c
index 18771ea4b..a0162145b 100644
--- a/example/switch/odp_switch.c
+++ b/example/switch/odp_switch.c
@@ -61,8 +61,6 @@ typedef struct {
char *if_str; /**< Storage for interface names */
} appl_args_t;
-static int exit_threads; /**< Break workers loop if set to 1 */
-
/**
* Statistics
*/
@@ -118,6 +116,10 @@ typedef struct {
appl_args_t appl; /**< Parsed application arguments */
thread_args_t thread[MAX_WORKERS]; /**< Thread specific arguments */
odp_pool_t pool; /**< Packet pool */
+ /** Global barrier to synchronize main and workers */
+ odp_barrier_t barrier;
+ /** Break workers loop if set to 1 */
+ int exit_threads;
/** Table of pktio handles */
struct {
odp_pktio_t pktio;
@@ -136,9 +138,6 @@ typedef struct {
/** Global pointer to args */
static args_t *gbl_args;
-/** Global barrier to synchronize main and workers */
-static odp_barrier_t barrier;
-
/**
* Calculate MAC table index using Ethernet address hash
*
@@ -333,7 +332,7 @@ static int print_speed_stats(int num_workers, stats_t (*thr_stats)[MAX_PKTIOS],
timeout = 1;
}
/* Wait for all threads to be ready*/
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
do {
uint64_t rx_pkts[MAX_PKTIOS] = {0};
@@ -595,9 +594,9 @@ static int run_worker(void *arg)
pktin = thr_args->rx_pktio[pktio].pktin;
port_in = thr_args->rx_pktio[pktio].port_idx;
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
int sent;
unsigned drops;
@@ -759,9 +758,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:+t:+a:i:h";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->cpu_count = 1; /* use one worker by default */
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 10; /* get and print pps stats second */
@@ -871,6 +867,7 @@ static void gbl_args_init(args_t *args)
int main(int argc, char **argv)
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
int i, j;
int cpu;
@@ -883,10 +880,21 @@ int main(int argc, char **argv)
stats_t (*stats)[MAX_PKTIOS];
int if_count;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ printf("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
printf("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -974,7 +982,7 @@ int main(int argc, char **argv)
memset(thread_tbl, 0, sizeof(thread_tbl));
- odp_barrier_init(&barrier, num_workers + 1);
+ odp_barrier_init(&gbl_args->barrier, num_workers + 1);
stats = gbl_args->stats;
@@ -1014,12 +1022,22 @@ int main(int argc, char **argv)
ret = print_speed_stats(num_workers, gbl_args->stats,
gbl_args->appl.time, gbl_args->appl.accuracy);
- exit_threads = 1;
+ gbl_args->exit_threads = 1;
/* Master thread waits for other threads to exit */
for (i = 0; i < num_workers; ++i)
odph_odpthreads_join(&thread_tbl[i]);
+ /* Stop and close used pktio devices */
+ for (i = 0; i < if_count; i++) {
+ odp_pktio_t pktio = gbl_args->pktios[i].pktio;
+
+ if (odp_pktio_stop(pktio) || odp_pktio_close(pktio)) {
+ printf("Error: failed to close pktio\n");
+ exit(EXIT_FAILURE);
+ }
+ }
+
free(gbl_args->appl.if_names);
free(gbl_args->appl.if_str);
diff --git a/example/sysinfo/odp_sysinfo.c b/example/sysinfo/odp_sysinfo.c
index e177319bb..709f25d92 100644
--- a/example/sysinfo/odp_sysinfo.c
+++ b/example/sysinfo/odp_sysinfo.c
@@ -25,52 +25,206 @@ static const char *support_level(odp_support_t support)
}
}
+static const char *cipher_alg_name(odp_cipher_alg_t cipher)
+{
+ switch (cipher) {
+ case ODP_CIPHER_ALG_NULL:
+ return "null";
+ case ODP_CIPHER_ALG_DES:
+ return "des";
+ case ODP_CIPHER_ALG_3DES_CBC:
+ return "3des_cbc";
+ case ODP_CIPHER_ALG_AES_CBC:
+ return "aes_cbc";
+ case ODP_CIPHER_ALG_AES_CTR:
+ return "aes_ctr";
+ case ODP_CIPHER_ALG_AES_GCM:
+ return "aes_gcm";
+ case ODP_CIPHER_ALG_AES_CCM:
+ return "aes_ccm";
+ case ODP_CIPHER_ALG_CHACHA20_POLY1305:
+ return "chacha20_poly1305";
+ default:
+ return "Unknown";
+ }
+}
+
+static const char *auth_alg_name(odp_auth_alg_t auth)
+{
+ switch (auth) {
+ case ODP_AUTH_ALG_NULL:
+ return "null";
+ case ODP_AUTH_ALG_MD5_HMAC:
+ return "md5_hmac";
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ return "sha1_hmac";
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ return "sha256_hmac";
+ case ODP_AUTH_ALG_SHA384_HMAC:
+ return "sha384_hmac";
+ case ODP_AUTH_ALG_SHA512_HMAC:
+ return "sha512_hmac";
+ case ODP_AUTH_ALG_AES_XCBC_MAC:
+ return "aes_xcbc_mac";
+ case ODP_AUTH_ALG_AES_GCM:
+ return "aes_gcm";
+ case ODP_AUTH_ALG_AES_GMAC:
+ return "aes_gmac";
+ case ODP_AUTH_ALG_AES_CCM:
+ return "aes_ccm";
+ case ODP_AUTH_ALG_AES_CMAC:
+ return "aes_cmac";
+ case ODP_AUTH_ALG_CHACHA20_POLY1305:
+ return "chacha20_poly1305";
+ default:
+ return "Unknown";
+ }
+}
+
static void print_cipher_algos(odp_crypto_cipher_algos_t ciphers)
{
if (ciphers.bit.null)
- printf("null ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_NULL));
if (ciphers.bit.des)
- printf("des ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_DES));
if (ciphers.bit.trides_cbc)
- printf("trides_cbc ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_3DES_CBC));
if (ciphers.bit.aes_cbc)
- printf("aes_cbc ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_AES_CBC));
if (ciphers.bit.aes_ctr)
- printf("aes_ctr ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_AES_CTR));
if (ciphers.bit.aes_gcm)
- printf("aes_gcm ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_AES_GCM));
if (ciphers.bit.aes_ccm)
- printf("aes_ccm ");
+ printf("%s ", cipher_alg_name(ODP_CIPHER_ALG_AES_CCM));
if (ciphers.bit.chacha20_poly1305)
- printf("chacha20_poly1305 ");
+ printf("%s ",
+ cipher_alg_name(ODP_CIPHER_ALG_CHACHA20_POLY1305));
}
static void print_auth_algos(odp_crypto_auth_algos_t auths)
{
if (auths.bit.null)
- printf("null ");
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_NULL));
+ if (auths.bit.md5_hmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_MD5_HMAC));
+ if (auths.bit.sha1_hmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_SHA1_HMAC));
+ if (auths.bit.sha256_hmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_SHA256_HMAC));
+ if (auths.bit.sha384_hmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_SHA384_HMAC));
+ if (auths.bit.sha512_hmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_SHA512_HMAC));
+ if (auths.bit.aes_gcm)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_AES_GCM));
+ if (auths.bit.aes_gmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_AES_GMAC));
+ if (auths.bit.aes_ccm)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_AES_CCM));
+ if (auths.bit.aes_cmac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_AES_CMAC));
+ if (auths.bit.aes_xcbc_mac)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_AES_XCBC_MAC));
+ if (auths.bit.chacha20_poly1305)
+ printf("%s ", auth_alg_name(ODP_AUTH_ALG_CHACHA20_POLY1305));
+}
+
+static void print_cipher_capa(odp_cipher_alg_t cipher)
+{
+ int caps = odp_crypto_cipher_capability(cipher, NULL, 0);
+ int rc, i;
+
+ if (caps <= 0)
+ return;
+
+ odp_crypto_cipher_capability_t capa[caps];
+
+ rc = odp_crypto_cipher_capability(cipher, capa, caps);
+ if (rc < 0)
+ return;
+
+ printf(" %s:\n", cipher_alg_name(cipher));
+ for (i = 0; i < rc; i++)
+ printf(" key %d iv %d\n",
+ capa[i].key_len, capa[i].iv_len);
+}
+
+static void print_auth_capa(odp_auth_alg_t auth)
+{
+ int caps = odp_crypto_auth_capability(auth, NULL, 0);
+ int rc, i;
+
+ if (caps <= 0)
+ return;
+
+ odp_crypto_auth_capability_t capa[caps];
+
+ rc = odp_crypto_auth_capability(auth, capa, caps);
+ if (rc < 0)
+ return;
+
+ printf(" %s:\n", auth_alg_name(auth));
+ for (i = 0; i < rc; i++) {
+ printf(" digest %d", capa[i].digest_len);
+ if (capa[i].key_len != 0)
+ printf(" key %d", capa[i].key_len);
+ if (capa[i].iv_len != 0)
+ printf(" iv %d", capa[i].iv_len);
+ if (capa[i].aad_len.max != 0)
+ printf(" aad %d, %d, %d",
+ capa[i].aad_len.min, capa[i].aad_len.max,
+ capa[i].aad_len.inc);
+ printf("\n");
+ }
+}
+
+static void print_cipher_caps(odp_crypto_cipher_algos_t ciphers)
+{
+ if (ciphers.bit.null)
+ print_cipher_capa(ODP_CIPHER_ALG_NULL);
+ if (ciphers.bit.des)
+ print_cipher_capa(ODP_CIPHER_ALG_DES);
+ if (ciphers.bit.trides_cbc)
+ print_cipher_capa(ODP_CIPHER_ALG_3DES_CBC);
+ if (ciphers.bit.aes_cbc)
+ print_cipher_capa(ODP_CIPHER_ALG_AES_CBC);
+ if (ciphers.bit.aes_ctr)
+ print_cipher_capa(ODP_CIPHER_ALG_AES_CTR);
+ if (ciphers.bit.aes_gcm)
+ print_cipher_capa(ODP_CIPHER_ALG_AES_GCM);
+ if (ciphers.bit.aes_ccm)
+ print_cipher_capa(ODP_CIPHER_ALG_AES_CCM);
+ if (ciphers.bit.chacha20_poly1305)
+ print_cipher_capa(ODP_CIPHER_ALG_CHACHA20_POLY1305);
+}
+
+static void print_auth_caps(odp_crypto_auth_algos_t auths)
+{
+ if (auths.bit.null)
+ print_auth_capa(ODP_AUTH_ALG_NULL);
if (auths.bit.md5_hmac)
- printf("md5_hmac ");
+ print_auth_capa(ODP_AUTH_ALG_MD5_HMAC);
if (auths.bit.sha1_hmac)
- printf("sha1_hmac ");
+ print_auth_capa(ODP_AUTH_ALG_SHA1_HMAC);
if (auths.bit.sha256_hmac)
- printf("sha256_hmac ");
+ print_auth_capa(ODP_AUTH_ALG_SHA256_HMAC);
if (auths.bit.sha384_hmac)
- printf("sha384_hmac ");
+ print_auth_capa(ODP_AUTH_ALG_SHA384_HMAC);
if (auths.bit.sha512_hmac)
- printf("sha512_hmac ");
+ print_auth_capa(ODP_AUTH_ALG_SHA512_HMAC);
if (auths.bit.aes_gcm)
- printf("aes_gcm ");
+ print_auth_capa(ODP_AUTH_ALG_AES_GCM);
if (auths.bit.aes_gmac)
- printf("aes_gmac ");
+ print_auth_capa(ODP_AUTH_ALG_AES_GMAC);
if (auths.bit.aes_ccm)
- printf("aes_ccm ");
+ print_auth_capa(ODP_AUTH_ALG_AES_CCM);
if (auths.bit.aes_cmac)
- printf("aes_cmac ");
+ print_auth_capa(ODP_AUTH_ALG_AES_CMAC);
if (auths.bit.aes_xcbc_mac)
- printf("aes_xcbc_mac ");
+ print_auth_capa(ODP_AUTH_ALG_AES_XCBC_MAC);
if (auths.bit.chacha20_poly1305)
- printf("chacha20_poly1305 ");
+ print_auth_capa(ODP_AUTH_ALG_CHACHA20_POLY1305);
}
int main(void)
@@ -84,6 +238,7 @@ int main(void)
odp_queue_capability_t queue_capa;
odp_timer_capability_t timer_capa;
odp_crypto_capability_t crypto_capa;
+ odp_schedule_capability_t schedule_capa;
uint64_t huge_page[MAX_HUGE_PAGES];
char ava_mask_str[ODP_CPUMASK_STR_SIZE];
char work_mask_str[ODP_CPUMASK_STR_SIZE];
@@ -139,6 +294,11 @@ int main(void)
return -1;
}
+ if (odp_schedule_capability(&schedule_capa)) {
+ printf("schedule capability failed\n");
+ return -1;
+ }
+
if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa)) {
printf("timer capability failed\n");
return -1;
@@ -239,21 +399,17 @@ int main(void)
printf("\n");
printf(" SCHEDULER\n");
printf(" max ordered locks: %" PRIu32 "\n",
- queue_capa.max_ordered_locks);
- printf(" max groups: %u\n", queue_capa.max_sched_groups);
- printf(" priorities: %u\n", queue_capa.sched_prios);
- printf(" sched.max_num: %" PRIu32 "\n",
- queue_capa.sched.max_num);
- printf(" sched.max_size: %" PRIu32 "\n",
- queue_capa.sched.max_size);
- printf(" sched.lf.max_num: %" PRIu32 "\n",
- queue_capa.sched.lockfree.max_num);
- printf(" sched.lf.max_size: %" PRIu32 "\n",
- queue_capa.sched.lockfree.max_size);
- printf(" sched.wf.max_num: %" PRIu32 "\n",
- queue_capa.sched.waitfree.max_num);
- printf(" sched.wf.max_size: %" PRIu32 "\n",
- queue_capa.sched.waitfree.max_size);
+ schedule_capa.max_ordered_locks);
+ printf(" max groups: %u\n", schedule_capa.max_groups);
+ printf(" priorities: %u\n", schedule_capa.max_prios);
+ printf(" sched.max_queues: %" PRIu32 "\n",
+ schedule_capa.max_queues);
+ printf(" sched.max_queue_size: %" PRIu32 "\n",
+ schedule_capa.max_queue_size);
+ printf(" sched.lf_queues: %ssupported\n",
+ schedule_capa.lockfree_queues ? "" : "not ");
+ printf(" sched.wf_queues: %ssupported\n",
+ schedule_capa.waitfree_queues ? "" : "not ");
printf("\n");
printf(" TIMER\n");
@@ -271,9 +427,15 @@ int main(void)
printf(" cipher algorithms: ");
print_cipher_algos(crypto_capa.ciphers);
printf("\n");
+ print_cipher_caps(crypto_capa.ciphers);
printf(" auth algorithms: ");
print_auth_algos(crypto_capa.auths);
printf("\n");
+ print_auth_caps(crypto_capa.auths);
+ printf("\n");
+
+ printf(" SHM MEMORY BLOCKS:\n");
+ odp_shm_print_all();
printf("\n");
printf("***********************************************************\n");
diff --git a/example/time/time_global_test.c b/example/time/time_global_test.c
index 317f9a270..6c56d6c6e 100644
--- a/example/time/time_global_test.c
+++ b/example/time/time_global_test.c
@@ -254,16 +254,25 @@ int main(int argc, char *argv[])
odp_shm_t shm_glbls = ODP_SHM_INVALID;
odp_shm_t shm_log = ODP_SHM_INVALID;
int log_size, log_enries_num;
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
- /* let helper collect its own arguments (e.g. --odph_proc) */
+ printf("\nODP global time test starts\n");
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
- printf("\nODP global time test starts\n");
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
err = 1;
EXAMPLE_ERR("ODP global init failed.\n");
goto end;
diff --git a/example/timer/odp_timer_accuracy.c b/example/timer/odp_timer_accuracy.c
index 3b0d7e380..9409e3404 100644
--- a/example/timer/odp_timer_accuracy.c
+++ b/example/timer/odp_timer_accuracy.c
@@ -426,6 +426,9 @@ int main(int argc, char *argv[])
odp_sys_info_print();
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
num = test_global.opt.num;
test_global.timer = calloc(num, sizeof(odp_timer_t));
diff --git a/example/timer/odp_timer_simple.c b/example/timer/odp_timer_simple.c
index 116f8ba60..ddefb0d2a 100644
--- a/example/timer/odp_timer_simple.c
+++ b/example/timer/odp_timer_simple.c
@@ -81,6 +81,9 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
goto err;
}
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/*
* Create a queue for timer test
*/
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 9e431b477..ca3e8ddf5 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -274,9 +274,6 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const char *shortopts = "+c:r:m:x:p:t:h";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
/* defaults */
odp_timer_capability(ODP_CLOCK_CPU, &timer_capa);
@@ -334,6 +331,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
int num_workers;
odp_queue_t queue;
@@ -345,6 +343,7 @@ int main(int argc, char *argv[])
odp_cpumask_t cpumask;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
odp_shm_t shm = ODP_SHM_INVALID;
test_globals_t *gbls = NULL;
@@ -352,7 +351,17 @@ int main(int argc, char *argv[])
printf("\nODP timer example starts\n");
- if (odp_init_global(&instance, NULL, NULL)) {
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ EXAMPLE_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init_param, NULL)) {
err = 1;
printf("ODP global init failed.\n");
goto err_global;
@@ -409,6 +418,9 @@ int main(int argc, char *argv[])
printf("period: %i usec\n", gbls->args.period_us);
printf("timeouts: %i\n", gbls->args.tmo_count);
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
/*
* Create pool for timeouts
*/
diff --git a/example/traffic_mgmt/odp_traffic_mgmt.c b/example/traffic_mgmt/odp_traffic_mgmt.c
index 2af106ab8..dbc659589 100644
--- a/example/traffic_mgmt/odp_traffic_mgmt.c
+++ b/example/traffic_mgmt/odp_traffic_mgmt.c
@@ -230,7 +230,7 @@ static odp_tm_t odp_tm_test;
static odp_pool_t odp_pool;
-static odp_tm_queue_t queue_num_tbls[NUM_SVC_CLASSES][TM_QUEUES_PER_CLASS + 1];
+static odp_tm_queue_t queue_num_tbls[NUM_SVC_CLASSES][TM_QUEUES_PER_CLASS];
static uint32_t next_queue_nums[NUM_SVC_CLASSES];
static uint8_t random_buf[RANDOM_BUF_LEN];
@@ -434,7 +434,7 @@ static int config_example_user(odp_tm_node_t cos_tm_node,
return rc;
svc_class_queue_num = next_queue_nums[svc_class]++;
- queue_num_tbls[svc_class][svc_class_queue_num + 1] =
+ queue_num_tbls[svc_class][svc_class_queue_num] =
tm_queue;
}
}
@@ -633,7 +633,7 @@ static int traffic_generator(uint32_t pkts_to_send)
while (pkt_cnt < pkts_to_send) {
svc_class = pkt_service_class();
queue_num = random_16() & (TM_QUEUES_PER_CLASS - 1);
- tm_queue = queue_num_tbls[svc_class][queue_num + 1];
+ tm_queue = queue_num_tbls[svc_class][queue_num];
pkt_len = ((uint32_t)((random_8() & 0x7F) + 2)) * 32;
pkt_len = MIN(pkt_len, 1500);
pkt = make_odp_packet(pkt_len);
@@ -743,6 +743,47 @@ static void signal_handler(int signal)
abort();
}
+static int destroy_tm_queues(void)
+{
+ int i;
+ int class;
+ int ret;
+
+ for (i = 0; i < NUM_SVC_CLASSES; i++)
+ for (class = 0; class < TM_QUEUES_PER_CLASS; class++) {
+ odp_tm_queue_t tm_queue;
+ odp_tm_queue_info_t info;
+
+ tm_queue = queue_num_tbls[i][class];
+
+ ret = odp_tm_queue_info(tm_queue, &info);
+ if (ret) {
+ printf("Err: odp_tm_queue_info %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_tm_node_disconnect(info.next_tm_node);
+ if (ret) {
+ printf("Err: odp_tm_node_disconnect %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_tm_queue_disconnect(tm_queue);
+ if (ret) {
+ printf("odp_tm_queue_disconnect %d\n", ret);
+ return -1;
+ }
+
+ ret = odp_tm_queue_destroy(tm_queue);
+ if (ret) {
+ printf("odp_tm_queue_destroy %d\n", ret);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
int main(int argc, char *argv[])
{
struct sigaction signal_action;
@@ -796,6 +837,12 @@ int main(int argc, char *argv[])
odp_tm_stats_print(odp_tm_test);
+ rc = destroy_tm_queues();
+ if (rc != 0) {
+ printf("Error: destroy_tm_queues() failed, rc = %d\n", rc);
+ return -1;
+ }
+
rc = odp_pool_destroy(odp_pool);
if (rc != 0) {
printf("Error: odp_pool_destroy() failed, rc = %d\n", rc);
@@ -814,11 +861,12 @@ int main(int argc, char *argv[])
return -1;
}
- /* Trying to keep this example as simple as possible we avoid
- * clean termination of TM queues. This will error on global
- * termination code
- */
- (void)odp_term_global(instance);
+ rc = odp_term_global(instance);
+ if (rc != 0) {
+ printf("Error: odp_term_global() failed, rc = %d\n", rc);
+ return -1;
+ }
+ printf("Quit\n");
return 0;
}
diff --git a/helper/Makefile.am b/helper/Makefile.am
index d1b0359cc..518a6a944 100644
--- a/helper/Makefile.am
+++ b/helper/Makefile.am
@@ -22,6 +22,7 @@ helperinclude_HEADERS = \
include/odp/helper/odph_hashtable.h\
include/odp/helper/odph_iplookuptable.h\
include/odp/helper/odph_lineartable.h\
+ include/odp/helper/sctp.h \
include/odp/helper/strong_types.h\
include/odp/helper/tcp.h\
include/odp/helper/table.h\
diff --git a/helper/chksum.c b/helper/chksum.c
index da5625a78..7c7c20a2f 100644
--- a/helper/chksum.c
+++ b/helper/chksum.c
@@ -9,6 +9,7 @@
#include <odp.h>
#include <odp/helper/ip.h>
#include <odp/helper/udp.h>
+#include <odp/helper/sctp.h>
#include <odp/helper/tcp.h>
#include <odp/helper/chksum.h>
#include <stddef.h>
@@ -351,3 +352,77 @@ int odph_udp_tcp_chksum(odp_packet_t odp_pkt,
return ret_code;
}
+
+static uint32_t odph_packet_crc32c(odp_packet_t pkt,
+ uint32_t offset,
+ uint32_t length,
+ uint32_t init_val)
+{
+ uint32_t sum = init_val;
+
+ if (offset + length > odp_packet_len(pkt))
+ return sum;
+
+ while (length > 0) {
+ uint32_t seg_len;
+ void *data = odp_packet_offset(pkt, offset, &seg_len, NULL);
+
+ if (seg_len > length)
+ seg_len = length;
+
+ sum = odp_hash_crc32c(data, seg_len, sum);
+ length -= seg_len;
+ offset += seg_len;
+ }
+
+ return sum;
+}
+
+int odph_sctp_chksum_set(odp_packet_t pkt)
+{
+ uint32_t l4_offset = odp_packet_l4_offset(pkt);
+ uint32_t sum = 0;
+
+ if (!odp_packet_has_sctp(pkt))
+ return -1;
+
+ if (l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
+
+ odp_packet_copy_from_mem(pkt,
+ l4_offset + ODPH_SCTPHDR_LEN - 4,
+ 4,
+ &sum);
+
+ sum = ~odph_packet_crc32c(pkt, l4_offset,
+ odp_packet_len(pkt) - l4_offset,
+ ~0);
+ return odp_packet_copy_from_mem(pkt,
+ l4_offset + ODPH_SCTPHDR_LEN - 4,
+ 4,
+ &sum);
+}
+
+int odph_sctp_chksum_verify(odp_packet_t pkt)
+{
+ uint32_t l4_offset = odp_packet_l4_offset(pkt);
+ uint32_t sum;
+ uint32_t temp = 0;
+
+ if (!odp_packet_has_sctp(pkt))
+ return -1;
+
+ sum = odph_packet_crc32c(pkt, l4_offset,
+ ODPH_SCTPHDR_LEN - 4,
+ ~0);
+ sum = odp_hash_crc32c(&temp, 4, sum);
+ sum = ~odph_packet_crc32c(pkt, l4_offset + ODPH_SCTPHDR_LEN,
+ odp_packet_len(pkt) - l4_offset -
+ ODPH_SCTPHDR_LEN,
+ sum);
+
+ odp_packet_copy_to_mem(pkt, l4_offset + ODPH_SCTPHDR_LEN - 4,
+ 4, &temp);
+
+ return (temp == sum) ? 0 : 2;
+}
diff --git a/helper/include/odp/helper/chksum.h b/helper/include/odp/helper/chksum.h
index c03abeb7b..148b98601 100644
--- a/helper/include/odp/helper/chksum.h
+++ b/helper/include/odp/helper/chksum.h
@@ -190,6 +190,48 @@ static inline int odph_udp_chksum_verify(odp_packet_t odp_pkt)
}
/**
+ * Generate SCTP checksum
+ *
+ * This function supports SCTP over either IPv4 or IPV6 - including handling
+ * any IPv4 header options and any IPv6 extension headers. However it
+ * does not handle tunneled pkts (i.e. any case where there is more than
+ * one IPv4/IPv6 header).
+ * This function also handles non-contiguous pkts. In particular it can
+ * handle arbitrary packet segmentation, including cases where the segments
+ * are not 2 byte aligned, nor have a length that is a multiple of 2. This
+ * function also can handle jumbo frames (at least up to 10K).
+ *
+ * This function will insert the calculated CRC32-c checksum into the proper
+ * location in the SCTP header.
+ *
+ * @param odp_pkt Calculate and insert chksum for this SCTP pkt, which can
+ * be over IPv4 or IPv6.
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odph_sctp_chksum_set(odp_packet_t odp_pkt);
+
+/**
+ * Verify SCTP checksum
+ *
+ * This function supports SCTP over either IPv4 or IPV6 - including handling
+ * any IPv4 header options and any IPv6 extension headers. However it
+ * does not handle tunneled pkts (i.e. any case where there is more than
+ * one IPv4/IPv6 header).
+ * This function also handles non-contiguous pkts. In particular it can
+ * handle arbitrary packet segmentation, including cases where the segments
+ * are not 2 byte aligned, nor have a length that is a multiple of 2. This
+ * function also can handle jumbo frames (at least up to 10K).
+ *
+ * @param odp_pkt Calculate and compare the chksum for this SCTP pkt,
+ * which can be over IPv4 or IPv6.
+ * @retval <0 on failure
+ * @retval 0 if the incoming chksum field is correct
+ * @retval 2 when the chksum field is incorrect
+ */
+int odph_sctp_chksum_verify(odp_packet_t odp_pkt);
+
+/**
* @}
*/
diff --git a/helper/include/odp/helper/ip.h b/helper/include/odp/helper/ip.h
index cdc430627..b5bfed78c 100644
--- a/helper/include/odp/helper/ip.h
+++ b/helper/include/odp/helper/ip.h
@@ -259,6 +259,8 @@ typedef struct ODP_PACKED {
#define ODPH_IPPROTO_AH 0x33 /**< Authentication Header (51) */
#define ODPH_IPPROTO_ESP 0x32 /**< Encapsulating Security Payload (50) */
#define ODPH_IPPROTO_ICMPV6 0x3A /**< Internet Control Message Protocol (58) */
+#define ODPH_IPPROTO_SCTP 0x84 /**< Stream Control Transmission protocol
+ (132) */
#define ODPH_IPPROTO_INVALID 0xFF /**< Reserved invalid by IANA */
/**@}*/
diff --git a/helper/include/odp/helper/odph_api.h b/helper/include/odp/helper/odph_api.h
index 8ad8a27fe..d46ab2ad2 100644
--- a/helper/include/odp/helper/odph_api.h
+++ b/helper/include/odp/helper/odph_api.h
@@ -27,6 +27,7 @@ extern "C" {
#include <odp/helper/ipsec.h>
#include <odp/helper/odph_lineartable.h>
#include <odp/helper/odph_iplookuptable.h>
+#include <odp/helper/sctp.h>
#include <odp/helper/strong_types.h>
#include <odp/helper/tcp.h>
#include <odp/helper/table.h>
diff --git a/helper/include/odp/helper/sctp.h b/helper/include/odp/helper/sctp.h
new file mode 100644
index 000000000..c81bd6313
--- /dev/null
+++ b/helper/include/odp/helper/sctp.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2014-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP SCTP header
+ */
+
+#ifndef ODPH_SCTP_H_
+#define ODPH_SCTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+
+/** @addtogroup odph_header ODPH HEADER
+ * @{
+ */
+
+/** SCTP header length */
+#define ODPH_SCTPHDR_LEN 12
+
+/** SCTP header */
+typedef struct ODP_PACKED {
+ odp_u16be_t src_port; /**< Source port */
+ odp_u16be_t dst_port; /**< Destination port */
+ odp_u32be_t tag; /**< Verification tag */
+ odp_u32be_t chksum; /**< SCTP header and data checksum */
+} odph_sctphdr_t;
+
+/** @internal Compile time assert */
+ODP_STATIC_ASSERT(sizeof(odph_sctphdr_t) == ODPH_SCTPHDR_LEN,
+ "ODPH_SCTPHDR_T__SIZE_ERROR");
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/helper/include/odp/helper/threads.h b/helper/include/odp/helper/threads.h
index 9d03c7192..6cee2522d 100644
--- a/helper/include/odp/helper/threads.h
+++ b/helper/include/odp/helper/threads.h
@@ -54,13 +54,6 @@ typedef struct {
int status; /**< Process state change status */
} odph_linux_process_t;
-/** odpthread linux type: whether an ODP thread is a linux thread or process */
-typedef enum odph_odpthread_linuxtype_e {
- ODPTHREAD_NOT_STARTED = 0,
- ODPTHREAD_PROCESS,
- ODPTHREAD_PTHREAD
-} odph_odpthread_linuxtype_t;
-
/** odpthread parameters for odp threads (pthreads and processes) */
typedef struct {
int (*start)(void *); /**< Thread entry point function */
@@ -71,7 +64,7 @@ typedef struct {
/** The odpthread starting arguments, used both in process or thread mode */
typedef struct {
- odph_odpthread_linuxtype_t linuxtype; /**< process or pthread */
+ odp_mem_model_t mem_model; /**< process or thread */
odph_odpthread_params_t thr_params; /**< odpthread start parameters */
} odph_odpthread_start_args_t;
@@ -95,6 +88,11 @@ typedef struct {
};
} odph_odpthread_t;
+/** Linux helper options */
+typedef struct {
+ odp_mem_model_t mem_model; /**< Process or thread */
+} odph_helper_options_t;
+
/**
* Creates and launches odpthreads (as linux threads or processes)
*
@@ -161,6 +159,18 @@ int odph_odpthread_getaffinity(void);
int odph_parse_options(int argc, char *argv[]);
/**
+ * Get linux helper options
+ *
+ * Return used ODP helper options. odph_parse_options() must be called before
+ * using this function.
+ *
+ * @param[out] options ODP helper options
+ *
+ * @return 0 on success, -1 on failure
+ */
+int odph_options(odph_helper_options_t *options);
+
+/**
* @}
*/
diff --git a/helper/iplookuptable.c b/helper/iplookuptable.c
index 61f634022..84b4e2cbb 100644
--- a/helper/iplookuptable.c
+++ b/helper/iplookuptable.c
@@ -83,9 +83,9 @@ typedef struct trie_node {
} trie_node_t;
/** Number of L2\L3 entries(subtrees) per cache cube. */
-#define CACHE_NUM_SUBTREE (1 << 13)
+#define CACHE_NUM_SUBTREE (4 * 1024)
/** Number of trie nodes per cache cube. */
-#define CACHE_NUM_TRIE (1 << 20)
+#define CACHE_NUM_TRIE (4 * 1024)
/** @typedef cache_type_t
* Cache node type
@@ -187,12 +187,34 @@ cache_alloc_new_pool(
{
odp_pool_t pool;
odp_pool_param_t param;
+ odp_pool_capability_t pool_capa;
odp_queue_t queue = tbl->free_slots[type];
odp_buffer_t buffer;
char pool_name[ODPH_TABLE_NAME_LEN + 8];
uint32_t size = 0, num = 0;
+ if (odp_pool_capability(&pool_capa)) {
+ ODPH_ERR("pool capa failed\n");
+ return -1;
+ }
+
+ if (pool_capa.buf.max_num) {
+ if (pool_capa.buf.max_num < CACHE_NUM_TRIE ||
+ pool_capa.buf.max_num < CACHE_NUM_SUBTREE) {
+ ODPH_ERR("pool size too small\n");
+ return -1;
+ }
+ }
+
+ if (pool_capa.buf.max_size) {
+ if (pool_capa.buf.max_size < ENTRY_SIZE * ENTRY_NUM_SUBTREE ||
+ pool_capa.buf.max_size < sizeof(trie_node_t)) {
+ ODPH_ERR("buffer size too small\n");
+ return -1;
+ }
+ }
+
/* Create new pool (new free buffers). */
odp_pool_param_init(&param);
param.type = ODP_POOL_BUFFER;
@@ -223,7 +245,11 @@ cache_alloc_new_pool(
while ((buffer = odp_buffer_alloc(pool))
!= ODP_BUFFER_INVALID) {
cache_init_buffer(buffer, type, size);
- odp_queue_enq(queue, odp_buffer_to_event(buffer));
+ if (odp_queue_enq(queue, odp_buffer_to_event(buffer))) {
+ ODPH_DBG("queue enqueue failed\n");
+ odp_buffer_free(buffer);
+ break;
+ }
}
tbl->cache_count[type]++;
@@ -449,10 +475,28 @@ odph_table_t odph_iplookup_table_create(const char *name,
odp_shm_t shm_tbl;
odp_queue_t queue;
odp_queue_param_t qparam;
+ odp_queue_capability_t queue_capa;
unsigned i;
- uint32_t impl_size, l1_size;
+ uint32_t impl_size, l1_size, queue_size;
char queue_name[ODPH_TABLE_NAME_LEN + 2];
+ if (odp_queue_capability(&queue_capa)) {
+ ODPH_ERR("queue capa failed\n");
+ return NULL;
+ }
+
+ if (queue_capa.plain.max_size) {
+ if (queue_capa.plain.max_size < CACHE_NUM_TRIE ||
+ queue_capa.plain.max_size < CACHE_NUM_SUBTREE) {
+ ODPH_ERR("queue size too small\n");
+ return NULL;
+ }
+ }
+
+ queue_size = CACHE_NUM_TRIE;
+ if (CACHE_NUM_SUBTREE > CACHE_NUM_TRIE)
+ queue_size = CACHE_NUM_SUBTREE;
+
/* Check for valid parameters */
if (strlen(name) == 0) {
ODPH_DBG("invalid parameters\n");
@@ -502,6 +546,7 @@ odph_table_t odph_iplookup_table_create(const char *name,
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_PLAIN;
+ qparam.size = queue_size;
sprintf(queue_name, "%s_%d", name, i);
queue = odp_queue_create(queue_name, &qparam);
if (queue == ODP_QUEUE_INVALID) {
@@ -585,24 +630,25 @@ prefix_insert_into_lx(
odph_iplookup_table_impl *tbl, prefix_entry_t *entry,
uint8_t cidr, odp_buffer_t nexthop, uint8_t level)
{
- uint8_t ret = 0;
+ int ret = 0;
uint32_t i = 0, limit = (1 << (level - cidr));
prefix_entry_t *e = entry, *ne = NULL;
for (i = 0; i < limit; i++, e++) {
- if (e->child == 1) {
- if (e->cidr > cidr)
- continue;
+ if (e->cidr > cidr)
+ continue;
+ if (e->child == 1) {
e->cidr = cidr;
/* push to next level */
ne = (prefix_entry_t *)e->ptr;
ret = prefix_insert_into_lx(
tbl, ne, cidr, nexthop, cidr + 8);
+ if (ret == -1)
+ return -1;
+ if (ret == 0)
+ return ret;
} else {
- if (e->cidr > cidr)
- continue;
-
e->child = 0;
e->cidr = cidr;
e->nexthop = nexthop;
@@ -678,8 +724,9 @@ odph_iplookup_table_put_value(odph_table_t tbl, void *key, void *value)
nexthop = *((odp_buffer_t *)value);
- if (prefix->cidr == 0)
+ if (prefix->cidr == 0 || prefix->cidr > 32)
return -1;
+
prefix->ip = prefix->ip & (0xffffffff << (IP_LENGTH - prefix->cidr));
/* insert into trie */
@@ -899,7 +946,7 @@ odph_iplookup_table_remove_value(odph_table_t tbl, void *key)
ip = prefix->ip;
cidr = prefix->cidr;
- if (cidr == 0)
+ if (cidr == 0 || cidr > 32)
return -EINVAL;
prefix_entry_t *entry = &impl->l1e[ip >> 16];
diff --git a/helper/test/odpthreads.c b/helper/test/odpthreads.c
index ad48ec2d5..55db37e0d 100644
--- a/helper/test/odpthreads.c
+++ b/helper/test/odpthreads.c
@@ -64,18 +64,27 @@ static int worker_fn(void *arg ODPH_UNUSED)
/* Create additional dataplane opdthreads */
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_params_t thr_params;
odph_odpthread_t thread_tbl[NUMBER_WORKERS];
odp_cpumask_t cpu_mask;
+ odp_init_t init_param;
int num_workers;
int cpu, affinity;
int ret;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- /* let helper collect its own arguments (e.g. --odph_proc) */
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ ODPH_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (odp_init_global(&odp_instance, NULL, NULL)) {
+ if (odp_init_global(&odp_instance, &init_param, NULL)) {
ODPH_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
diff --git a/helper/threads.c b/helper/threads.c
index 86d6bf7be..01bc33eac 100644
--- a/helper/threads.c
+++ b/helper/threads.c
@@ -10,6 +10,7 @@
#define _GNU_SOURCE
#endif
#include <sched.h>
+#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/wait.h>
@@ -20,9 +21,9 @@
#include <odp/helper/threads.h>
#include "odph_debug.h"
-static struct {
- int proc; /* true when process mode is required, false otherwise */
-} helper_options;
+#define FAILED_CPU -1
+
+static odph_helper_options_t helper_options;
/*
* wrapper for odpthreads, either implemented as linux threads or processes.
@@ -41,7 +42,7 @@ static void *_odph_thread_run_start_routine(void *arg)
/* ODP thread local init */
if (odp_init_local(thr_params->instance, thr_params->thr_type)) {
ODPH_ERR("Local init failed\n");
- if (start_args->linuxtype == ODPTHREAD_PROCESS)
+ if (start_args->mem_model == ODP_MEM_MODEL_PROCESS)
_exit(EXIT_FAILURE);
return (void *)-1;
}
@@ -49,7 +50,7 @@ static void *_odph_thread_run_start_routine(void *arg)
ODPH_DBG("helper: ODP %s thread started as linux %s. (pid=%d)\n",
thr_params->thr_type == ODP_THREAD_WORKER ?
"worker" : "control",
- (start_args->linuxtype == ODPTHREAD_PTHREAD) ?
+ (start_args->mem_model == ODP_MEM_MODEL_THREAD) ?
"pthread" : "process",
(int)getpid());
@@ -60,7 +61,7 @@ static void *_odph_thread_run_start_routine(void *arg)
ODPH_ERR("Local term failed\n");
/* for process implementation of odp threads, just return status... */
- if (start_args->linuxtype == ODPTHREAD_PROCESS)
+ if (start_args->mem_model == ODP_MEM_MODEL_PROCESS)
_exit(status);
/* threads implementation return void* pointers: cast status to that. */
@@ -80,14 +81,14 @@ static int _odph_linux_process_create(odph_odpthread_t *thread_tbl,
CPU_ZERO(&cpu_set);
CPU_SET(cpu, &cpu_set);
- thread_tbl->start_args.thr_params = *thr_params; /* copy */
- thread_tbl->start_args.linuxtype = ODPTHREAD_PROCESS;
+ thread_tbl->start_args.thr_params = *thr_params; /* copy */
+ thread_tbl->start_args.mem_model = ODP_MEM_MODEL_PROCESS;
thread_tbl->cpu = cpu;
pid = fork();
if (pid < 0) {
ODPH_ERR("fork() failed\n");
- thread_tbl->start_args.linuxtype = ODPTHREAD_NOT_STARTED;
+ thread_tbl->cpu = FAILED_CPU;
return -1;
}
@@ -135,8 +136,8 @@ static int odph_linux_thread_create(odph_odpthread_t *thread_tbl,
pthread_attr_setaffinity_np(&thread_tbl->thread.attr,
sizeof(cpu_set_t), &cpu_set);
- thread_tbl->start_args.thr_params = *thr_params; /* copy */
- thread_tbl->start_args.linuxtype = ODPTHREAD_PTHREAD;
+ thread_tbl->start_args.thr_params = *thr_params; /* copy */
+ thread_tbl->start_args.mem_model = ODP_MEM_MODEL_THREAD;
ret = pthread_create(&thread_tbl->thread.thread_id,
&thread_tbl->thread.attr,
@@ -144,7 +145,7 @@ static int odph_linux_thread_create(odph_odpthread_t *thread_tbl,
&thread_tbl->start_args);
if (ret != 0) {
ODPH_ERR("Failed to start thread on cpu #%d\n", cpu);
- thread_tbl->start_args.linuxtype = ODPTHREAD_NOT_STARTED;
+ thread_tbl->cpu = FAILED_CPU;
return ret;
}
@@ -178,7 +179,7 @@ int odph_odpthreads_create(odph_odpthread_t *thread_tbl,
cpu = odp_cpumask_first(mask);
for (i = 0; i < num; i++) {
- if (!helper_options.proc) {
+ if (helper_options.mem_model == ODP_MEM_MODEL_THREAD) {
if (odph_linux_thread_create(&thread_tbl[i],
cpu,
thr_params))
@@ -214,9 +215,13 @@ int odph_odpthreads_join(odph_odpthread_t *thread_tbl)
/* joins linux threads or wait for processes */
do {
+ if (thread_tbl[i].cpu == FAILED_CPU) {
+ ODPH_DBG("ODP thread %d not started.\n", i);
+ continue;
+ }
/* pthreads: */
- switch (thread_tbl[i].start_args.linuxtype) {
- case ODPTHREAD_PTHREAD:
+ if (thread_tbl[i].start_args.mem_model ==
+ ODP_MEM_MODEL_THREAD) {
/* Wait thread to exit */
ret = pthread_join(thread_tbl[i].thread.thread_id,
&thread_ret);
@@ -233,10 +238,7 @@ int odph_odpthreads_join(odph_odpthread_t *thread_tbl)
}
}
pthread_attr_destroy(&thread_tbl[i].thread.attr);
- break;
-
- case ODPTHREAD_PROCESS:
-
+ } else {
/* processes: */
pid = waitpid(thread_tbl[i].proc.pid, &status, 0);
@@ -262,16 +264,7 @@ int odph_odpthreads_join(odph_odpthread_t *thread_tbl)
signo, strsignal(signo), (int)pid);
retval = -1;
}
- break;
-
- case ODPTHREAD_NOT_STARTED:
- ODPH_DBG("No join done on not started ODPthread.\n");
- break;
- default:
- ODPH_DBG("Invalid case statement value!\n");
- break;
}
-
} while (!thread_tbl[i++].last);
return (retval < 0) ? retval : terminated;
@@ -332,14 +325,22 @@ int odph_odpthread_getaffinity(void)
int odph_parse_options(int argc, char *argv[])
{
+ char *env;
int i, j;
- helper_options.proc = 0;
+ helper_options.mem_model = ODP_MEM_MODEL_THREAD;
+
+ /* Enable process mode using environment variable. Setting environment
+ * variable is easier for CI testing compared to command line
+ * argument. */
+ env = getenv("ODPH_PROC_MODE");
+ if (env && atoi(env))
+ helper_options.mem_model = ODP_MEM_MODEL_PROCESS;
/* Find and remove option */
for (i = 0; i < argc;) {
if (strcmp(argv[i], "--odph_proc") == 0) {
- helper_options.proc = 1;
+ helper_options.mem_model = ODP_MEM_MODEL_PROCESS;
for (j = i; j < argc - 1; j++)
argv[j] = argv[j + 1];
@@ -353,3 +354,12 @@ int odph_parse_options(int argc, char *argv[])
return argc;
}
+
+int odph_options(odph_helper_options_t *options)
+{
+ memset(options, 0, sizeof(odph_helper_options_t));
+
+ options->mem_model = helper_options.mem_model;
+
+ return 0;
+}
diff --git a/include/Makefile.am b/include/Makefile.am
index 512002f87..49ed0164e 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -16,6 +16,7 @@ odpapiinclude_HEADERS = \
odp/api/byteorder.h \
odp/api/chksum.h \
odp/api/classification.h \
+ odp/api/comp.h \
odp/api/cpu.h \
odp/api/cpumask.h \
odp/api/crypto.h \
@@ -65,6 +66,7 @@ odpapispecinclude_HEADERS = \
odp/api/spec/byteorder.h \
odp/api/spec/chksum.h \
odp/api/spec/classification.h \
+ odp/api/spec/comp.h \
odp/api/spec/cpu.h \
odp/api/spec/cpumask.h \
odp/api/spec/crypto.h \
@@ -82,6 +84,7 @@ odpapispecinclude_HEADERS = \
odp/api/spec/packet_io_stats.h \
odp/api/spec/pool.h \
odp/api/spec/queue.h \
+ odp/api/spec/queue_types.h \
odp/api/spec/random.h \
odp/api/spec/rwlock.h \
odp/api/spec/rwlock_recursive.h \
@@ -116,6 +119,7 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/buffer.h \
odp/api/abi-default/byteorder.h \
odp/api/abi-default/classification.h \
+ odp/api/abi-default/comp.h \
odp/api/abi-default/cpu.h \
odp/api/abi-default/cpumask.h \
odp/api/abi-default/crypto.h \
@@ -158,6 +162,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/buffer.h \
odp/arch/arm32-linux/odp/api/abi/byteorder.h \
odp/arch/arm32-linux/odp/api/abi/classification.h \
+ odp/arch/arm32-linux/odp/api/abi/comp.h \
odp/arch/arm32-linux/odp/api/abi/cpu.h \
odp/arch/arm32-linux/odp/api/abi/cpumask.h \
odp/arch/arm32-linux/odp/api/abi/crypto.h \
@@ -196,6 +201,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/buffer.h \
odp/arch/arm64-linux/odp/api/abi/byteorder.h \
odp/arch/arm64-linux/odp/api/abi/classification.h \
+ odp/arch/arm64-linux/odp/api/abi/comp.h \
odp/arch/arm64-linux/odp/api/abi/cpu.h \
odp/arch/arm64-linux/odp/api/abi/cpumask.h \
odp/arch/arm64-linux/odp/api/abi/crypto.h \
@@ -234,6 +240,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/buffer.h \
odp/arch/default-linux/odp/api/abi/byteorder.h \
odp/arch/default-linux/odp/api/abi/classification.h \
+ odp/arch/default-linux/odp/api/abi/comp.h \
odp/arch/default-linux/odp/api/abi/cpu.h \
odp/arch/default-linux/odp/api/abi/cpumask.h \
odp/arch/default-linux/odp/api/abi/crypto.h \
@@ -272,6 +279,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/mips64-linux/odp/api/abi/buffer.h \
odp/arch/mips64-linux/odp/api/abi/byteorder.h \
odp/arch/mips64-linux/odp/api/abi/classification.h \
+ odp/arch/mips64-linux/odp/api/abi/comp.h \
odp/arch/mips64-linux/odp/api/abi/cpu.h \
odp/arch/mips64-linux/odp/api/abi/cpumask.h \
odp/arch/mips64-linux/odp/api/abi/crypto.h \
@@ -310,6 +318,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/buffer.h \
odp/arch/power64-linux/odp/api/abi/byteorder.h \
odp/arch/power64-linux/odp/api/abi/classification.h \
+ odp/arch/power64-linux/odp/api/abi/comp.h \
odp/arch/power64-linux/odp/api/abi/cpu.h \
odp/arch/power64-linux/odp/api/abi/cpumask.h \
odp/arch/power64-linux/odp/api/abi/crypto.h \
@@ -348,6 +357,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/buffer.h \
odp/arch/x86_32-linux/odp/api/abi/byteorder.h \
odp/arch/x86_32-linux/odp/api/abi/classification.h \
+ odp/arch/x86_32-linux/odp/api/abi/comp.h \
odp/arch/x86_32-linux/odp/api/abi/cpu.h \
odp/arch/x86_32-linux/odp/api/abi/cpumask.h \
odp/arch/x86_32-linux/odp/api/abi/crypto.h \
@@ -386,6 +396,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/buffer.h \
odp/arch/x86_64-linux/odp/api/abi/byteorder.h \
odp/arch/x86_64-linux/odp/api/abi/classification.h \
+ odp/arch/x86_64-linux/odp/api/abi/comp.h \
odp/arch/x86_64-linux/odp/api/abi/cpu.h \
odp/arch/x86_64-linux/odp/api/abi/cpumask.h \
odp/arch/x86_64-linux/odp/api/abi/crypto.h \
diff --git a/include/odp/api/abi-default/classification.h b/include/odp/api/abi-default/classification.h
index 95545e6e5..e0573fd40 100644
--- a/include/odp/api/abi-default/classification.h
+++ b/include/odp/api/abi-default/classification.h
@@ -26,8 +26,8 @@ typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_pmr_t;
typedef _odp_abi_cos_t *odp_cos_t;
typedef _odp_abi_pmr_t *odp_pmr_t;
-#define ODP_COS_INVALID ((odp_cos_t)~0)
-#define ODP_PMR_INVALID ((odp_pmr_t)~0)
+#define ODP_COS_INVALID ((odp_cos_t)0)
+#define ODP_PMR_INVALID ((odp_pmr_t)0)
#if ODP_DEPRECATED_API
#define ODP_PMR_INVAL ODP_PMR_INVALID
diff --git a/include/odp/api/abi-default/cpumask.h b/include/odp/api/abi-default/cpumask.h
index 5e7f24bf6..66445f7da 100644
--- a/include/odp/api/abi-default/cpumask.h
+++ b/include/odp/api/abi-default/cpumask.h
@@ -23,8 +23,9 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/align.h>
+#include <sched.h>
-#define ODP_CPUMASK_SIZE 1024
+#define ODP_CPUMASK_SIZE (sizeof(cpu_set_t) * 8)
#define ODP_CPUMASK_STR_SIZE ((ODP_CPUMASK_SIZE + 3) / 4 + 3)
diff --git a/include/odp/api/abi-default/event.h b/include/odp/api/abi-default/event.h
index b0eee9cb6..3f88681b7 100644
--- a/include/odp/api/abi-default/event.h
+++ b/include/odp/api/abi-default/event.h
@@ -36,7 +36,8 @@ typedef enum odp_event_subtype_t {
ODP_EVENT_NO_SUBTYPE = 0,
ODP_EVENT_PACKET_BASIC = 1,
ODP_EVENT_PACKET_CRYPTO = 2,
- ODP_EVENT_PACKET_IPSEC = 3
+ ODP_EVENT_PACKET_IPSEC = 3,
+ ODP_EVENT_PACKET_COMP = 4
} odp_event_subtype_t;
/**
diff --git a/include/odp/api/abi-default/ipsec.h b/include/odp/api/abi-default/ipsec.h
index 7ec433721..2c95fd4f5 100644
--- a/include/odp/api/abi-default/ipsec.h
+++ b/include/odp/api/abi-default/ipsec.h
@@ -28,7 +28,7 @@ typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_ipsec_sa_t;
typedef _odp_abi_ipsec_sa_t *odp_ipsec_sa_t;
-#define ODP_IPSEC_SA_INVALID ((odp_ipsec_sa_t)0xffffffff)
+#define ODP_IPSEC_SA_INVALID ((odp_ipsec_sa_t)0)
/**
* @}
diff --git a/include/odp/api/abi-default/packet.h b/include/odp/api/abi-default/packet.h
index d3bd5f6e4..c8eecbacf 100644
--- a/include/odp/api/abi-default/packet.h
+++ b/include/odp/api/abi-default/packet.h
@@ -27,7 +27,7 @@ typedef _odp_abi_packet_t *odp_packet_t;
typedef _odp_abi_packet_seg_t *odp_packet_seg_t;
#define ODP_PACKET_INVALID ((odp_packet_t)0)
-#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)0xffffffff)
+#define ODP_PACKET_SEG_INVALID ((odp_packet_seg_t)0)
#define ODP_PACKET_OFFSET_INVALID 0xffff
typedef uint8_t odp_proto_l2_type_t;
diff --git a/include/odp/api/abi-default/pool.h b/include/odp/api/abi-default/pool.h
index f3489ff9c..4c67e309b 100644
--- a/include/odp/api/abi-default/pool.h
+++ b/include/odp/api/abi-default/pool.h
@@ -22,7 +22,7 @@ typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_pool_t;
typedef _odp_abi_pool_t *odp_pool_t;
-#define ODP_POOL_INVALID ((odp_pool_t)0xffffffff)
+#define ODP_POOL_INVALID ((odp_pool_t)0)
#define ODP_POOL_NAME_LEN 32
diff --git a/include/odp/api/abi-default/schedule_types.h b/include/odp/api/abi-default/schedule_types.h
index 31ee27f1a..6c7730cd9 100644
--- a/include/odp/api/abi-default/schedule_types.h
+++ b/include/odp/api/abi-default/schedule_types.h
@@ -21,15 +21,13 @@ extern "C" {
* @{
*/
-typedef int odp_schedule_prio_t;
+#define ODP_SCHED_PRIO_HIGHEST (odp_schedule_max_prio())
-#define ODP_SCHED_PRIO_HIGHEST 0
+#define ODP_SCHED_PRIO_NORMAL (odp_schedule_default_prio())
-#define ODP_SCHED_PRIO_NORMAL 4
+#define ODP_SCHED_PRIO_LOWEST (odp_schedule_min_prio())
-#define ODP_SCHED_PRIO_LOWEST 7
-
-#define ODP_SCHED_PRIO_DEFAULT ODP_SCHED_PRIO_NORMAL
+#define ODP_SCHED_PRIO_DEFAULT (odp_schedule_default_prio())
typedef int odp_schedule_sync_t;
diff --git a/include/odp/api/abi-default/thread.h b/include/odp/api/abi-default/thread.h
index 0420e2451..e31651a26 100644
--- a/include/odp/api/abi-default/thread.h
+++ b/include/odp/api/abi-default/thread.h
@@ -21,7 +21,7 @@ extern "C" {
* @{
*/
-#define ODP_THREAD_COUNT_MAX 128
+#define ODP_THREAD_COUNT_MAX 256
/**
* @}
diff --git a/include/odp/api/abi-default/timer.h b/include/odp/api/abi-default/timer.h
index 6654ab9ca..566d199e0 100644
--- a/include/odp/api/abi-default/timer.h
+++ b/include/odp/api/abi-default/timer.h
@@ -32,17 +32,17 @@ typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_timer_pool_t;
typedef _odp_abi_timer_pool_t *odp_timer_pool_t;
-#define ODP_TIMER_POOL_INVALID NULL
+#define ODP_TIMER_POOL_INVALID ((odp_timer_pool_t)0)
#define ODP_TIMER_POOL_NAME_LEN 32
typedef _odp_abi_timer_t *odp_timer_t;
-#define ODP_TIMER_INVALID ((odp_timer_t)0xffffffff)
+#define ODP_TIMER_INVALID ((odp_timer_t)0)
typedef _odp_abi_timeout_t *odp_timeout_t;
-#define ODP_TIMEOUT_INVALID ((odp_timeout_t)NULL)
+#define ODP_TIMEOUT_INVALID ((odp_timeout_t)0)
/**
* @}
diff --git a/include/odp/api/abi-default/traffic_mngr.h b/include/odp/api/abi-default/traffic_mngr.h
index b7b04b831..9c01ef98f 100644
--- a/include/odp/api/abi-default/traffic_mngr.h
+++ b/include/odp/api/abi-default/traffic_mngr.h
@@ -28,7 +28,7 @@ extern "C" {
* systems that may be created. On some platforms this might be much more
* limited to as little as one hardware TM system.
*/
-#define ODP_TM_MAX_NUM_SYSTEMS 64
+#define ODP_TM_MAX_NUM_SYSTEMS 8
/** The ODP_TM_MAX_PRIORITIES constant specifies the largest range of
* priorities that any TM system can support. All strict priority values MUST
@@ -57,7 +57,7 @@ extern "C" {
/** The ODP_TM_MAX_TM_QUEUES constant is the largest number of tm_queues
* that can be handled by any one TM system.
*/
-#define ODP_TM_MAX_TM_QUEUES (16 * 1024 * 1024)
+#define ODP_TM_MAX_TM_QUEUES (4 * 1024)
/** The ODP_TM_MAX_NUM_OUTPUTS constant is the largest number of outputs that
* can be configured for any one TM system.
@@ -67,13 +67,13 @@ extern "C" {
/** The ODP_TM_MAX_NUM_TM_NODES constant is the largest number of tm_nodes that
* can be in existence for any one TM system.
*/
-#define ODP_TM_MAX_NUM_TM_NODES (1024 * 1024)
+#define ODP_TM_MAX_NUM_TM_NODES (4 * 1024)
/** The ODP_TM_MAX_TM_NODE_FANIN constant is the largest number of fan-in
* "inputs" that can be simultaneously connected to a single tm_node.
* *TBD* Does this need to be as large as ODP_TM_MAX_TM_QUEUES? *TBD*
*/
-#define ODP_TM_MAX_TM_NODE_FANIN (1024 * 1024)
+#define ODP_TM_MAX_TM_NODE_FANIN (4 * 1024)
/** The ODP_TM_MIN_SHAPER_BW constant is the smallest amount of bandwidth that
* can a shaper's peak or commit rate can be set to. It is in units of
diff --git a/include/odp/api/comp.h b/include/odp/api/comp.h
new file mode 100644
index 000000000..59d4f52a3
--- /dev/null
+++ b/include/odp/api/comp.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP crypto
+ */
+
+#ifndef ODP_API_COMP_H_
+#define ODP_API_COMP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/comp.h>
+#include <odp/api/abi/event.h>
+#include <odp/api/abi/queue.h>
+
+#include <odp/api/spec/comp.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/spec/comp.h b/include/odp/api/spec/comp.h
index a5eb5a232..e02719193 100644
--- a/include/odp/api/spec/comp.h
+++ b/include/odp/api/spec/comp.h
@@ -10,8 +10,8 @@
* ODP Compression
*/
-#ifndef ODP_API_COMP_H_
-#define ODP_API_COMP_H_
+#ifndef ODP_API_SPEC_COMP_H_
+#define ODP_API_SPEC_COMP_H_
#include <odp/visibility_begin.h>
#include <odp/api/support.h>
@@ -121,7 +121,7 @@ typedef union odp_comp_hash_algos_t {
/** hash algorithms */
struct {
/** ODP_COMP_HASH_ALG_NONE */
- uint32_t none : 1,
+ uint32_t none : 1;
/** ODP_COMP_HASH_ALG_SHA1 */
uint32_t sha1 : 1;
@@ -181,7 +181,7 @@ typedef struct odp_comp_capability_t {
/** Synchronous compression mode support (ODP_COMP_OP_MODE_SYNC) */
odp_support_t sync;
- /** Aynchronous compression mode support (ODP_COMP_OP_MODE_SSYNC) */
+ /** Aynchronous compression mode support (ODP_COMP_OP_MODE_ASYNC) */
odp_support_t async;
} odp_comp_capability_t;
@@ -343,7 +343,7 @@ typedef struct odp_comp_packet_result_t {
/** Output packet data range
* Specifies offset and length of data resulting from compression
* operation. When hashing is configured output_data_range.len equals
- * length of output data + 'digest+len'
+ * length of output data + length of digest.
*/
odp_packet_data_range_t output_data_range;
} odp_comp_packet_result_t;
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index 843fdefe8..cfb2bc42c 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -422,7 +422,10 @@ typedef odp_packet_data_range_t ODP_DEPRECATE(odp_crypto_data_range_t);
* Crypto API session creation parameters
*/
typedef struct odp_crypto_session_param_t {
- /** Encode vs. decode operation */
+ /** Encode vs. decode operation
+ *
+ * The default value is ODP_CRYPTO_OP_ENCODE.
+ */
odp_crypto_op_t op;
/** Authenticate cipher vs. plain text
@@ -435,18 +438,31 @@ typedef struct odp_crypto_session_param_t {
*
* true: Authenticate cipher text
* false: Authenticate plain text
+ *
+ * The default value is false.
*/
odp_bool_t auth_cipher_text;
- /** Preferred sync vs. async for odp_crypto_operation() */
+ /** Preferred sync vs. async for odp_crypto_operation()
+ *
+ * The default value is ODP_CRYPTO_SYNC.
+ */
odp_crypto_op_mode_t pref_mode;
- /** Operation mode when using packet interface: sync or async */
+ /** Operation mode when using packet interface: sync or async
+ *
+ * The default value is ODP_CRYPTO_SYNC.
+ */
odp_crypto_op_mode_t op_mode;
/** Cipher algorithm
*
- * Use odp_crypto_capability() for supported algorithms.
+ * Select cipher algorithm to be used. ODP_CIPHER_ALG_NULL indicates
+ * that ciphering is disabled. Use odp_crypto_capability() for
+ * supported algorithms. Note that some algorithms restrict choice of
+ * the pairing authentication algorithm. When ciphering is enabled
+ * cipher key and IV need to be set. The default value is
+ * ODP_CIPHER_ALG_NULL.
*/
odp_cipher_alg_t cipher_alg;
@@ -467,7 +483,18 @@ typedef struct odp_crypto_session_param_t {
/** Authentication algorithm
*
- * Use odp_crypto_capability() for supported algorithms.
+ * Select authentication algorithm to be used. ODP_AUTH_ALG_NULL
+ * indicates that authentication is disabled. Use
+ * odp_crypto_capability() for supported algorithms. Note that some
+ * algorithms restrict choice of the pairing cipher algorithm. When
+ * single algorithm provides both ciphering and authentication
+ * (i.e. Authenticated Encryption), authentication side key
+ * (auth_key) and IV (auth_iv) are ignored, and cipher side values are
+ * used instead. These algorithms ignore authentication side key
+ * and IV: ODP_AUTH_ALG_AES_GCM, ODP_AUTH_ALG_AES_CCM and
+ * ODP_AUTH_ALG_CHACHA20_POLY1305. Otherwise, all authentication side
+ * parameters must be set when authentication is enabled. The default
+ * value is ODP_AUTH_ALG_NULL.
*/
odp_auth_alg_t auth_alg;
diff --git a/include/odp/api/spec/event.h b/include/odp/api/spec/event.h
index d9f7ab73d..162fd5a72 100644
--- a/include/odp/api/spec/event.h
+++ b/include/odp/api/spec/event.h
@@ -78,6 +78,10 @@ extern "C" {
* List of event subtypes:
* - ODP_EVENT_PACKET_BASIC
* - Packet event (odp_packet_t) with basic packet metadata
+ * - ODP_EVENT_PACKET_COMP
+ * - Packet event (odp_packet_t) generated as a result of a compression/
+ * decompression operation. It contains compression specific metadata in
+ * addition to the basic packet metadata.
* - ODP_EVENT_PACKET_CRYPTO
* - Packet event (odp_packet_t) generated as a result of a Crypto
* operation. It contains crypto specific metadata in addition to the
@@ -210,6 +214,44 @@ void odp_event_free_multi(const odp_event_t event[], int num);
void odp_event_free_sp(const odp_event_t event[], int num);
/**
+ * Event flow id value
+ *
+ * Returns the flow id value set in the event.
+ * Usage of flow id enables scheduler to maintain multiple synchronization
+ * contexts per single queue. For example, when multiple flows are assigned to
+ * an atomic queue, events of a single flow (events from the same queue with
+ * the same flow id value) are guaranteed to be processed by only single thread
+ * at a time. For packets received through packet input initial
+ * event flow id will be same as flow hash generated for packets. The hash
+ * algorithm and therefore the resulting flow id value is implementation
+ * specific. Use pktio API configuration options to select the fields used for
+ * initial flow id calculation. For all other events initial flow id is zero
+ * An application can change event flow id using odp_event_flow_id_set().
+ *
+ * @param event Event handle
+ *
+ * @return Flow id of the event
+ *
+ */
+uint32_t odp_event_flow_id(odp_event_t event);
+
+/**
+ * Set event flow id value
+ *
+ * Store the event flow id for the event and sets the flow id flag.
+ * When scheduler is configured as flow aware, scheduled queue synchronization
+ * will be based on this id within each queue.
+ * When scheduler is configured as flow unaware, event flow id is ignored by
+ * the implementation.
+ * The value of flow id must be less than the number of flows configured in the
+ * scheduler.
+ *
+ * @param event Event handle
+ * @param flow_id Flow event id to be set.
+ */
+void odp_event_flow_id_set(odp_event_t event, uint32_t flow_id);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/init.h b/include/odp/api/spec/init.h
index ee33e7cd4..c37af464d 100644
--- a/include/odp/api/spec/init.h
+++ b/include/odp/api/spec/init.h
@@ -107,6 +107,32 @@ typedef int (*odp_log_func_t)(odp_log_level_t level, const char *fmt, ...);
typedef void (*odp_abort_func_t)(void) ODP_NORETURN;
/**
+ * Application memory model
+ */
+typedef enum {
+ /** Thread memory model: by default all memory is shareable between
+ * threads.
+ *
+ * Within a single ODP instance all ODP handles and pointers to ODP
+ * allocated data may be shared amongst threads independent of data
+ * allocation time (e.g. before or after thread creation). */
+ ODP_MEM_MODEL_THREAD = 0,
+
+ /** Process memory model: by default all memory is not shareable between
+ * processes.
+ *
+ * Within a single ODP instance all ODP handles and pointers to ODP
+ * allocated data (excluding non-single VA SHM blocks) may be shared
+ * amongst processes independent of data allocation time (e.g. before
+ * or after fork).
+ *
+ * @see ODP_SHM_SINGLE_VA
+ */
+ ODP_MEM_MODEL_PROCESS
+
+} odp_mem_model_t;
+
+/**
* Global initialization parameters
*
* These parameters may be used at global initialization time to configure and
@@ -172,6 +198,12 @@ typedef struct odp_init_t {
*/
odp_feature_t not_used;
+ /** Application memory model. The main application thread has to call
+ * odp_init_global() and odp_init_local() before creating threads that
+ * share ODP data. The default value is ODP_MEM_MODEL_THREAD.
+ */
+ odp_mem_model_t mem_model;
+
/** Shared memory parameters */
struct {
/** Maximum memory usage in bytes. This is the maximum
diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h
index 2d1c4d9ba..1b65e8d06 100644
--- a/include/odp/api/spec/ipsec.h
+++ b/include/odp/api/spec/ipsec.h
@@ -364,27 +364,65 @@ typedef enum odp_ipsec_tunnel_type_t {
* IPSEC crypto parameters
*/
typedef struct odp_ipsec_crypto_param_t {
- /** Cipher algorithm */
+ /** Cipher algorithm
+ *
+ * Select cipher algorithm to be used. ODP_CIPHER_ALG_NULL indicates
+ * that ciphering is disabled. See 'ciphers' field of
+ * odp_ipsec_capability_t for supported cipher algorithms. Algorithm
+ * descriptions can be found from odp_cipher_alg_t documentation. Note
+ * that some algorithms restrict choice of the pairing authentication
+ * algorithm. When ciphering is enabled, cipher key and potential extra
+ * key material (cipher_key_extra) need to be set. The default value
+ * is ODP_CIPHER_ALG_NULL.
+ */
odp_cipher_alg_t cipher_alg;
/** Cipher key */
odp_crypto_key_t cipher_key;
- /** Extra keying material for cipher key
+ /** Extra keying material for cipher algorithm
*
* Additional data used as salt or nonce if the algorithm requires it,
* other algorithms ignore this field. These algorithms require this
- * field set:
- * - AES_GCM: 4 bytes of salt
- **/
+ * field to be set:
+ * - ODP_CIPHER_ALG_AES_CTR: 4 bytes of nonce
+ * - ODP_CIPHER_ALG_AES_GCM: 4 bytes of salt
+ * - ODP_CIPHER_ALG_AES_CCM: 3 bytes of salt
+ * - ODP_CIPHER_ALG_CHACHA20_POLY1305: 4 bytes of salt
+ */
odp_crypto_key_t cipher_key_extra;
- /** Authentication algorithm */
+ /** Authentication algorithm
+ *
+ * Select authentication algorithm to be used. ODP_AUTH_ALG_NULL
+ * indicates that authentication is disabled. See 'auths' field of
+ * odp_ipsec_capability_t for supported authentication algorithms.
+ * Algorithm descriptions can be found from odp_auth_alg_t
+ * documentation. Note that some algorithms restrict choice of the
+ * pairing cipher algorithm. When single algorithm provides both
+ * ciphering and authentication (i.e. Authenticated Encryption),
+ * authentication side key information ('auth_key' and
+ * 'auth_key_extra') is ignored, and cipher side values are
+ * used instead. These algorithms ignore authentication side key
+ * information: ODP_AUTH_ALG_AES_GCM, ODP_AUTH_ALG_AES_CCM and
+ * ODP_AUTH_ALG_CHACHA20_POLY1305. Otherwise, authentication side
+ * parameters must be set when authentication is enabled. The default
+ * value is ODP_AUTH_ALG_NULL.
+ */
odp_auth_alg_t auth_alg;
/** Authentication key */
odp_crypto_key_t auth_key;
+ /** Extra keying material for authentication algorithm
+ *
+ * Additional data used as salt or nonce if the algorithm requires it,
+ * other algorithms ignore this field. These algorithms require this
+ * field to be set:
+ * - ODP_AUTH_ALG_AES_GMAC: 4 bytes of salt
+ */
+ odp_crypto_key_t auth_key_extra;
+
} odp_ipsec_crypto_param_t;
/** IPv4 header parameters */
diff --git a/include/odp/api/spec/queue.h b/include/odp/api/spec/queue.h
index 3015d7799..2f5e1230f 100644
--- a/include/odp/api/spec/queue.h
+++ b/include/odp/api/spec/queue.h
@@ -19,8 +19,8 @@
extern "C" {
#endif
-#include <odp/api/schedule_types.h>
#include <odp/api/event.h>
+#include <odp/api/spec/queue_types.h>
/** @defgroup odp_queue ODP QUEUE
* Macros and operation on a queue.
@@ -43,272 +43,6 @@ extern "C" {
*/
/**
- * Queue type
- */
-typedef enum odp_queue_type_t {
- /** Plain queue
- *
- * Plain queues offer simple FIFO storage of events. Application may
- * dequeue directly from these queues. */
- ODP_QUEUE_TYPE_PLAIN = 0,
-
- /** Scheduled queue
- *
- * Scheduled queues are connected to the scheduler. Application must
- * not dequeue events directly from these queues but use the scheduler
- * instead. */
- ODP_QUEUE_TYPE_SCHED
-} odp_queue_type_t;
-
-/**
- * Queue operation mode
- */
-typedef enum odp_queue_op_mode_t {
- /** Multithread safe operation
- *
- * Queue operation (enqueue or dequeue) is multithread safe. Any
- * number of application threads may perform the operation
- * concurrently. */
- ODP_QUEUE_OP_MT = 0,
-
- /** Not multithread safe operation
- *
- * Queue operation (enqueue or dequeue) may not be multithread safe.
- * Application ensures synchronization between threads so that
- * simultaneously only single thread attempts the operation on
- * the same queue. */
- ODP_QUEUE_OP_MT_UNSAFE,
-
- /** Disabled
- *
- * Direct enqueue or dequeue operation from application is disabled.
- * An attempt to enqueue/dequeue directly will result undefined
- * behaviour. Various ODP functions (e.g. packet input, timer,
- * crypto, scheduler, etc) are able to perform enqueue or
- * dequeue operations normally on the queue.
- * */
- ODP_QUEUE_OP_DISABLED
-
-} odp_queue_op_mode_t;
-
-/**
- * Non-blocking level
- *
- * A non-blocking level defines implementation guarantees for application
- * progress when multiple threads operate on the same resource (e.g. a queue)
- * simultaneously. The first level (ODP_BLOCKING) does not have any block
- * freedom guarantees, but a suspending thread may block the other threads for
- * the entire time it remains suspended (infinitely if crashed).
- * On the contrary, actual non-blocking levels provide guarantees of progress:
- *
- * ODP_NONBLOCKING_LF: A non-blocking and lock-free implementation guarantees
- * that at least one of the threads successfully completes
- * its operations, regardless of what other threads do.
- * Application progress is guaranteed, but individual
- * threads may starve while trying to execute their
- * operations on the shared resource.
- *
- * ODP_NONBLOCKING_WF: A non-blocking and wait-free implementation guarantees
- * application progress with starvation freedom. All
- * threads are guaranteed to complete their operations in
- * a bounded number of steps, regardless of what other
- * threads do.
- *
- * Non-blocking levels are listed from the weakest to the strongest guarantee of
- * block freedom. Performance of a non-blocking implementation may be lower than
- * the blocking one. Non-blocking guarantees are important e.g. for real-time
- * applications when real-time and non real-time threads share a resource.
- */
-typedef enum odp_nonblocking_t {
- /** Blocking implementation. A suspeding thread may block all other
- * threads, i.e. no block freedom guarantees. This is the lowest level.
- */
- ODP_BLOCKING = 0,
-
- /** Non-blocking and lock-free implementation. Other threads can make
- * progress while a thread is suspended. Starvation freedom is not
- * guaranteed.
- */
- ODP_NONBLOCKING_LF,
-
- /** Non-blocking and wait-free implementation. Other threads can make
- * progress while a thread is suspended. Starvation freedom is
- * guaranteed.
- */
- ODP_NONBLOCKING_WF
-
-} odp_nonblocking_t;
-
-/**
- * Queue capabilities
- */
-typedef struct odp_queue_capability_t {
- /** Maximum number of event queues of any type (default size). Use
- * this in addition to queue type specific 'max_num', if both queue
- * types are used simultaneously. */
- uint32_t max_queues;
-
- /** Maximum number of ordered locks per queue */
- uint32_t max_ordered_locks;
-
- /** Maximum number of scheduling groups */
- unsigned max_sched_groups;
-
- /** Number of scheduling priorities */
- unsigned sched_prios;
-
- /** Plain queue capabilities */
- struct {
- /** Maximum number of plain (ODP_BLOCKING) queues of the
- * default size. */
- uint32_t max_num;
-
- /** Maximum number of events a plain (ODP_BLOCKING) queue can
- * store simultaneously. The value of zero means that plain
- * queues do not have a size limit, but a single queue can
- * store all available events. */
- uint32_t max_size;
-
- /** Lock-free (ODP_NONBLOCKING_LF) implementation capabilities.
- * The specification is the same as for the blocking
- * implementation. */
- struct {
- /** Maximum number of queues. Lock-free queues are not
- * supported when zero. */
- uint32_t max_num;
-
- /** Maximum queue size */
- uint32_t max_size;
-
- } lockfree;
-
- /** Wait-free (ODP_NONBLOCKING_WF) implementation capabilities.
- * The specification is the same as for the blocking
- * implementation. */
- struct {
- /** Maximum number of queues. Wait-free queues are not
- * supported when zero. */
- uint32_t max_num;
-
- /** Maximum queue size */
- uint32_t max_size;
-
- } waitfree;
-
- } plain;
-
- /** Scheduled queue capabilities */
- struct {
- /** Maximum number of scheduled (ODP_BLOCKING) queues of the
- * default size. */
- uint32_t max_num;
-
- /** Maximum number of events a scheduled (ODP_BLOCKING) queue
- * can store simultaneously. The value of zero means that
- * scheduled queues do not have a size limit, but a single
- * queue can store all available events. */
- uint32_t max_size;
-
- /** Lock-free (ODP_NONBLOCKING_LF) implementation capabilities.
- * The specification is the same as for the blocking
- * implementation. */
- struct {
- /** Maximum number of queues. Lock-free queues are not
- * supported when zero. */
- uint32_t max_num;
-
- /** Maximum queue size */
- uint32_t max_size;
-
- } lockfree;
-
- /** Wait-free (ODP_NONBLOCKING_WF) implementation capabilities.
- * The specification is the same as for the blocking
- * implementation. */
- struct {
- /** Maximum number of queues. Wait-free queues are not
- * supported when zero. */
- uint32_t max_num;
-
- /** Maximum queue size */
- uint32_t max_size;
-
- } waitfree;
-
- } sched;
-
-} odp_queue_capability_t;
-
-/**
- * ODP Queue parameters
- */
-typedef struct odp_queue_param_t {
- /** Queue type
- *
- * Valid values for other parameters in this structure depend on
- * the queue type. */
- odp_queue_type_t type;
-
- /** Enqueue mode
- *
- * Default value for both queue types is ODP_QUEUE_OP_MT. Application
- * may enable performance optimizations by defining MT_UNSAFE or
- * DISABLED modes when applicable. */
- odp_queue_op_mode_t enq_mode;
-
- /** Dequeue mode
- *
- * For PLAIN queues, the default value is ODP_QUEUE_OP_MT. Application
- * may enable performance optimizations by defining MT_UNSAFE or
- * DISABLED modes when applicable. However, when a plain queue is input
- * to the implementation (e.g. a queue for packet output), the
- * parameter is ignored in queue creation and the value is
- * ODP_QUEUE_OP_DISABLED.
- *
- * For SCHED queues, the parameter is ignored in queue creation and
- * the value is ODP_QUEUE_OP_DISABLED. */
- odp_queue_op_mode_t deq_mode;
-
- /** Scheduler parameters
- *
- * These parameters are considered only when queue type is
- * ODP_QUEUE_TYPE_SCHED. */
- odp_schedule_param_t sched;
-
- /** Non-blocking level
- *
- * Queue implementation must guarantee at least this level of block
- * freedom for queue enqueue and dequeue/schedule operations.
- * The default value is ODP_BLOCKING. */
- odp_nonblocking_t nonblocking;
-
- /** Queue context pointer
- *
- * User defined context pointer associated with the queue. The same
- * pointer can be accessed with odp_queue_context() and
- * odp_queue_context_set() calls. The implementation may read the
- * pointer for prefetching the context data. Default value of the
- * pointer is NULL. */
- void *context;
-
- /** Queue context data length
- *
- * User defined context data length in bytes for prefetching.
- * The implementation may use this value as a hint for the number of
- * context data bytes to prefetch. Default value is zero (no hint). */
- uint32_t context_len;
-
- /** Queue size
- *
- * The queue must be able to store at minimum this many events
- * simultaneously. The value must not exceed 'max_size' queue
- * capability. The value of zero means implementation specific
- * default size. */
- uint32_t size;
-
-} odp_queue_param_t;
-
-/**
* Queue create
*
* Create a queue according to the queue parameters. Queue type is specified by
@@ -528,15 +262,6 @@ uint64_t odp_queue_to_u64(odp_queue_t hdl);
void odp_queue_param_init(odp_queue_param_t *param);
/**
- * Queue information
- * Retrieve information about a queue with odp_queue_info()
- */
-typedef struct odp_queue_info_t {
- const char *name; /**< queue name */
- odp_queue_param_t param; /**< queue parameters */
-} odp_queue_info_t;
-
-/**
* Retrieve information about a queue
*
* Invalid queue handles or handles to free/destroyed queues leads to
diff --git a/include/odp/api/spec/queue_types.h b/include/odp/api/spec/queue_types.h
new file mode 100644
index 000000000..c8f310461
--- /dev/null
+++ b/include/odp/api/spec/queue_types.h
@@ -0,0 +1,316 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP queue types
+ */
+
+#ifndef ODP_API_SPEC_QUEUE_TYPES_H_
+#define ODP_API_SPEC_QUEUE_TYPES_H_
+#include <odp/visibility_begin.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/schedule_types.h>
+#include <odp/api/deprecated.h>
+
+/** @addtogroup odp_queue
+ * @{
+ */
+
+/**
+ * Queue type
+ */
+typedef enum odp_queue_type_t {
+ /** Plain queue
+ *
+ * Plain queues offer simple FIFO storage of events. Application may
+ * dequeue directly from these queues. */
+ ODP_QUEUE_TYPE_PLAIN = 0,
+
+ /** Scheduled queue
+ *
+ * Scheduled queues are connected to the scheduler. Application must
+ * not dequeue events directly from these queues but use the scheduler
+ * instead. */
+ ODP_QUEUE_TYPE_SCHED
+} odp_queue_type_t;
+
+/**
+ * Queue operation mode
+ */
+typedef enum odp_queue_op_mode_t {
+ /** Multithread safe operation
+ *
+ * Queue operation (enqueue or dequeue) is multithread safe. Any
+ * number of application threads may perform the operation
+ * concurrently. */
+ ODP_QUEUE_OP_MT = 0,
+
+ /** Not multithread safe operation
+ *
+ * Queue operation (enqueue or dequeue) may not be multithread safe.
+ * Application ensures synchronization between threads so that
+ * simultaneously only single thread attempts the operation on
+ * the same queue. */
+ ODP_QUEUE_OP_MT_UNSAFE,
+
+ /** Disabled
+ *
+ * Direct enqueue or dequeue operation from application is disabled.
+ * An attempt to enqueue/dequeue directly will result undefined
+ * behaviour. Various ODP functions (e.g. packet input, timer,
+ * crypto, scheduler, etc) are able to perform enqueue or
+ * dequeue operations normally on the queue.
+ * */
+ ODP_QUEUE_OP_DISABLED
+
+} odp_queue_op_mode_t;
+
+/**
+ * Non-blocking level
+ *
+ * A non-blocking level defines implementation guarantees for application
+ * progress when multiple threads operate on the same resource (e.g. a queue)
+ * simultaneously. The first level (ODP_BLOCKING) does not have any block
+ * freedom guarantees, but a suspending thread may block the other threads for
+ * the entire time it remains suspended (infinitely if crashed).
+ * On the contrary, actual non-blocking levels provide guarantees of progress:
+ *
+ * ODP_NONBLOCKING_LF: A non-blocking and lock-free implementation guarantees
+ * that at least one of the threads successfully completes
+ * its operations, regardless of what other threads do.
+ * Application progress is guaranteed, but individual
+ * threads may starve while trying to execute their
+ * operations on the shared resource.
+ *
+ * ODP_NONBLOCKING_WF: A non-blocking and wait-free implementation guarantees
+ * application progress with starvation freedom. All
+ * threads are guaranteed to complete their operations in
+ * a bounded number of steps, regardless of what other
+ * threads do.
+ *
+ * Non-blocking levels are listed from the weakest to the strongest guarantee of
+ * block freedom. Performance of a non-blocking implementation may be lower than
+ * the blocking one. Non-blocking guarantees are important e.g. for real-time
+ * applications when real-time and non real-time threads share a resource.
+ */
+typedef enum odp_nonblocking_t {
+ /** Blocking implementation. A suspeding thread may block all other
+ * threads, i.e. no block freedom guarantees. This is the lowest level.
+ */
+ ODP_BLOCKING = 0,
+
+ /** Non-blocking and lock-free implementation. Other threads can make
+ * progress while a thread is suspended. Starvation freedom is not
+ * guaranteed.
+ */
+ ODP_NONBLOCKING_LF,
+
+ /** Non-blocking and wait-free implementation. Other threads can make
+ * progress while a thread is suspended. Starvation freedom is
+ * guaranteed.
+ */
+ ODP_NONBLOCKING_WF
+
+} odp_nonblocking_t;
+
+/**
+ * Queue capabilities
+ */
+typedef struct odp_queue_capability_t {
+ /** Maximum number of event queues of any type (default size). Use
+ * this in addition to queue type specific 'max_num', if both queue
+ * types are used simultaneously. */
+ uint32_t max_queues;
+
+ /** @deprecated Use max_ordered_locks field of
+ * odp_schedule_capability_t instead */
+ uint32_t ODP_DEPRECATE(max_ordered_locks);
+
+ /** @deprecated Use max_groups field of odp_schedule_capability_t
+ * instead */
+ unsigned int ODP_DEPRECATE(max_sched_groups);
+
+ /** @deprecated Use max_prios field of odp_schedule_capability_t
+ * instead */
+ unsigned int ODP_DEPRECATE(sched_prios);
+
+ /** Plain queue capabilities */
+ struct {
+ /** Maximum number of plain (ODP_BLOCKING) queues of the
+ * default size. */
+ uint32_t max_num;
+
+ /** Maximum number of events a plain (ODP_BLOCKING) queue can
+ * store simultaneously. The value of zero means that plain
+ * queues do not have a size limit, but a single queue can
+ * store all available events. */
+ uint32_t max_size;
+
+ /** Lock-free (ODP_NONBLOCKING_LF) implementation capabilities.
+ * The specification is the same as for the blocking
+ * implementation. */
+ struct {
+ /** Maximum number of queues. Lock-free queues are not
+ * supported when zero. */
+ uint32_t max_num;
+
+ /** Maximum queue size */
+ uint32_t max_size;
+
+ } lockfree;
+
+ /** Wait-free (ODP_NONBLOCKING_WF) implementation capabilities.
+ * The specification is the same as for the blocking
+ * implementation. */
+ struct {
+ /** Maximum number of queues. Wait-free queues are not
+ * supported when zero. */
+ uint32_t max_num;
+
+ /** Maximum queue size */
+ uint32_t max_size;
+
+ } waitfree;
+
+ } plain;
+
+ /** @deprecated Use queue capabilities in odp_schedule_capability_t
+ * instead */
+ struct {
+ /** Maximum number of scheduled (ODP_BLOCKING) queues of the
+ * default size. */
+ uint32_t max_num;
+
+ /** Maximum number of events a scheduled (ODP_BLOCKING) queue
+ * can store simultaneously. The value of zero means that
+ * scheduled queues do not have a size limit, but a single
+ * queue can store all available events. */
+ uint32_t max_size;
+
+ /** Lock-free (ODP_NONBLOCKING_LF) implementation capabilities.
+ * The specification is the same as for the blocking
+ * implementation. */
+ struct {
+ /** Maximum number of queues. Lock-free queues are not
+ * supported when zero. */
+ uint32_t max_num;
+
+ /** Maximum queue size */
+ uint32_t max_size;
+
+ } lockfree;
+
+ /** Wait-free (ODP_NONBLOCKING_WF) implementation capabilities.
+ * The specification is the same as for the blocking
+ * implementation. */
+ struct {
+ /** Maximum number of queues. Wait-free queues are not
+ * supported when zero. */
+ uint32_t max_num;
+
+ /** Maximum queue size */
+ uint32_t max_size;
+
+ } waitfree;
+
+ } ODP_DEPRECATE(sched);
+
+} odp_queue_capability_t;
+
+/**
+ * ODP Queue parameters
+ */
+typedef struct odp_queue_param_t {
+ /** Queue type
+ *
+ * Valid values for other parameters in this structure depend on
+ * the queue type. */
+ odp_queue_type_t type;
+
+ /** Enqueue mode
+ *
+ * Default value for both queue types is ODP_QUEUE_OP_MT. Application
+ * may enable performance optimizations by defining MT_UNSAFE or
+ * DISABLED modes when applicable. */
+ odp_queue_op_mode_t enq_mode;
+
+ /** Dequeue mode
+ *
+ * For PLAIN queues, the default value is ODP_QUEUE_OP_MT. Application
+ * may enable performance optimizations by defining MT_UNSAFE or
+ * DISABLED modes when applicable. However, when a plain queue is input
+ * to the implementation (e.g. a queue for packet output), the
+ * parameter is ignored in queue creation and the value is
+ * ODP_QUEUE_OP_DISABLED.
+ *
+ * For SCHED queues, the parameter is ignored in queue creation and
+ * the value is ODP_QUEUE_OP_DISABLED. */
+ odp_queue_op_mode_t deq_mode;
+
+ /** Scheduler parameters
+ *
+ * These parameters are considered only when queue type is
+ * ODP_QUEUE_TYPE_SCHED. */
+ odp_schedule_param_t sched;
+
+ /** Non-blocking level
+ *
+ * Queue implementation must guarantee at least this level of block
+ * freedom for queue enqueue and dequeue/schedule operations.
+ * The default value is ODP_BLOCKING. */
+ odp_nonblocking_t nonblocking;
+
+ /** Queue context pointer
+ *
+ * User defined context pointer associated with the queue. The same
+ * pointer can be accessed with odp_queue_context() and
+ * odp_queue_context_set() calls. The implementation may read the
+ * pointer for prefetching the context data. Default value of the
+ * pointer is NULL. */
+ void *context;
+
+ /** Queue context data length
+ *
+ * User defined context data length in bytes for prefetching.
+ * The implementation may use this value as a hint for the number of
+ * context data bytes to prefetch. Default value is zero (no hint). */
+ uint32_t context_len;
+
+ /** Queue size
+ *
+ * The queue must be able to store at minimum this many events
+ * simultaneously. The value must not exceed 'max_size' queue
+ * capability. The value of zero means implementation specific
+ * default size. */
+ uint32_t size;
+
+} odp_queue_param_t;
+
+/**
+ * Queue information
+ * Retrieve information about a queue with odp_queue_info()
+ */
+typedef struct odp_queue_info_t {
+ const char *name; /**< queue name */
+ odp_queue_param_t param; /**< queue parameters */
+} odp_queue_info_t;
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#include <odp/visibility_end.h>
+#endif
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index bbc749836..fa66f2600 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -113,6 +113,35 @@ int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
int num);
/**
+ * Schedule, wait for events
+ *
+ * Like odp_schedule_multi(), but waits infinitely for events.
+ *
+ * @param[out] from Output parameter for the source queue (where the event
+ * was dequeued from). Ignored if NULL.
+ * @param[out] events Event array for output
+ * @param num Maximum number of events to output
+ *
+ * @return Number of events outputted (1 ... num)
+ */
+int odp_schedule_multi_wait(odp_queue_t *from, odp_event_t events[], int num);
+
+/**
+ * Schedule, do not wait for events
+ *
+ * Like odp_schedule_multi(), but does not wait for events.
+ *
+ * @param[out] from Output parameter for the source queue (where the event
+ * was dequeued from). Ignored if NULL.
+ * @param[out] events Event array for output
+ * @param num Maximum number of events to output
+ *
+ * @return Number of events outputted (0 ... num)
+ */
+int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
+ int num);
+
+/**
* Pause scheduling
*
* Pause global scheduling for this thread. After this call, all schedule calls
@@ -181,13 +210,104 @@ void odp_schedule_release_ordered(void);
void odp_schedule_prefetch(int num);
/**
+ * Maximum scheduling priority level
+ *
+ * This is the maximum value that can be set to 'prio' field in
+ * odp_schedule_param_t (e.g. @see odp_queue_create()). Queues with a higher
+ * priority value are served with higher priority than queues with a lower
+ * priority value.
+ *
+ * @return Maximum scheduling priority level
+ */
+int odp_schedule_max_prio(void);
+
+/**
+ * Minimum scheduling priority level
+ *
+ * This is the minimum value that can be set to 'prio' field in
+ * odp_schedule_param_t (e.g. @see odp_queue_create()). Queues with a higher
+ * priority value are served with higher priority than queues with a lower
+ * priority value.
+ *
+ * @return Minimum scheduling priority level
+ */
+int odp_schedule_min_prio(void);
+
+/**
+ * Default scheduling priority level
+ *
+ * This is the default value of 'prio' field in odp_schedule_param_t
+ * (e.g. @see odp_queue_param_init()). The default value should be suitable for
+ * an application that uses single priority level for all its queues (uses
+ * scheduler only for load balancing and synchronization). Typically,
+ * the default value is between minimum and maximum values, but with a few
+ * priority levels it may be close or equal to those.
+ *
+ * @return Default scheduling priority level
+ */
+int odp_schedule_default_prio(void);
+
+/**
* Number of scheduling priorities
*
+ * The number of priority levels support by the scheduler. It equals to
+ * odp_schedule_max_prio() - odp_schedule_min_prio() + 1.
+ *
* @return Number of scheduling priorities
*/
int odp_schedule_num_prio(void);
/**
+ * Initialize schedule configuration options
+ *
+ * Initialize an odp_schedule_config_t to its default values.
+ *
+ * @param[out] config Pointer to schedule configuration structure
+ */
+void odp_schedule_config_init(odp_schedule_config_t *config);
+
+/**
+ * Global schedule configuration
+ *
+ * Initialize and configure scheduler with global configuration options
+ * to schedule events across different scheduled queues.
+ * This function must be called only once and before scheduler is used
+ * (any other scheduler function is called except odp_schedule_capability() and
+ * odp_schedule_config_init()) or any queues are created (by application itself
+ * or by other ODP modules).
+ * An application can pass NULL value to use default configuration. It will
+ * have the same result as filling the structure with
+ * odp_schedule_config_init() and then passing it to odp_schedule_config().
+ *
+ * The initialization sequeunce should be,
+ * odp_schedule_capability()
+ * odp_schedule_config_init()
+ * odp_schedule_config()
+ * odp_schedule()
+ *
+ * @param config Pointer to scheduler configuration structure or NULL for the
+ * default configuration
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @see odp_schedule_capability(), odp_schedule_config_init()
+ */
+int odp_schedule_config(const odp_schedule_config_t *config);
+
+/**
+ * Query scheduler capabilities
+ *
+ * Outputs schedule capabilities on success.
+ *
+ * @param[out] capa Pointer to capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_schedule_capability(odp_schedule_capability_t *capa);
+
+/**
* Schedule group create
*
* Creates a schedule group with the thread mask. Only threads in the
diff --git a/include/odp/api/spec/schedule_types.h b/include/odp/api/spec/schedule_types.h
index 44eb663a2..2acec0dba 100644
--- a/include/odp/api/spec/schedule_types.h
+++ b/include/odp/api/spec/schedule_types.h
@@ -14,6 +14,8 @@
#define ODP_API_SPEC_SCHEDULE_TYPES_H_
#include <odp/visibility_begin.h>
+#include <odp/api/support.h>
+
#ifdef __cplusplus
extern "C" {
#endif
@@ -23,30 +25,27 @@ extern "C" {
*/
/**
- * @typedef odp_schedule_prio_t
- * Scheduler priority level
- */
-
-/**
* @def ODP_SCHED_PRIO_HIGHEST
- * Highest scheduling priority
+ * This macro is equivalent of calling odp_schedule_max_prio() and will be
+ * deprecated. Use direct function call instead.
*/
/**
* @def ODP_SCHED_PRIO_NORMAL
- * Normal scheduling priority
+ * This macro is equivalent of calling odp_schedule_default_prio() and will be
+ * deprecated. Use direct function call instead.
*/
/**
* @def ODP_SCHED_PRIO_LOWEST
- * Lowest scheduling priority
+ * This macro is equivalent of calling odp_schedule_min_prio() and will be
+ * deprecated. Use direct function call instead.
*/
/**
* @def ODP_SCHED_PRIO_DEFAULT
- * Default scheduling priority. User does not care about the selected priority
- * level - throughput, load balancing and synchronization features are more
- * important than priority scheduling.
+ * This macro is equivalent of calling odp_schedule_default_prio() and will be
+ * deprecated. Use direct function call instead.
*/
/**
@@ -79,6 +78,9 @@ extern "C" {
* requests another event from the scheduler, which implicitly releases the
* context. User may allow the scheduler to release the context earlier than
* that by calling odp_schedule_release_atomic().
+ * When scheduler is enabled as flow-aware, the event flow id value affects
+ * scheduling of the event and synchronization is maintained per flow within
+ * each queue.
*/
/**
@@ -105,6 +107,9 @@ extern "C" {
* (e.g. freed or stored) within the context are considered missing from
* reordering and are skipped at this time (but can be ordered again within
* another context).
+ * When scheduler is enabled as flow-aware, the event flow id value affects
+ * scheduling of the event and synchronization is maintained per flow within
+ * each queue.
*/
/**
@@ -136,11 +141,20 @@ extern "C" {
* Predefined scheduler group of all control threads
*/
+/**
+ * Scheduling priority level
+ *
+ * Priority level is an integer value between odp_schedule_min_prio() and
+ * odp_schedule_max_prio(). Queues with a higher priority value are served with
+ * higher priority than queues with a lower priority value.
+ */
+typedef int odp_schedule_prio_t;
+
/** Scheduler parameters */
typedef struct odp_schedule_param_t {
/** Priority level
*
- * Default value is ODP_SCHED_PRIO_DEFAULT. */
+ * Default value is returned by odp_schedule_default_prio(). */
odp_schedule_prio_t prio;
/** Synchronization method
@@ -160,6 +174,85 @@ typedef struct odp_schedule_param_t {
} odp_schedule_param_t;
/**
+ * Scheduler capabilities
+ */
+typedef struct odp_schedule_capability_t {
+ /** Maximum number of ordered locks per queue */
+ uint32_t max_ordered_locks;
+
+ /** Maximum number of scheduling groups */
+ uint32_t max_groups;
+
+ /** Number of scheduling priorities */
+ uint32_t max_prios;
+
+ /** Maximum number of scheduled (ODP_BLOCKING) queues of the default
+ * size. */
+ uint32_t max_queues;
+
+ /** Maximum number of events a scheduled (ODP_BLOCKING) queue can store
+ * simultaneously. The value of zero means that scheduled queues do not
+ * have a size limit, but a single queue can store all available
+ * events. */
+ uint32_t max_queue_size;
+
+ /** Maximum flow ID per queue
+ *
+ * Valid flow ID range in flow aware mode of scheduling is from 0 to
+ * this maximum value. So, maximum number of flows per queue is this
+ * value plus one. A value of 0 indicates that flow aware mode is not
+ * supported. */
+ uint32_t max_flow_id;
+
+ /** Lock-free (ODP_NONBLOCKING_LF) queues support.
+ * The specification is the same as for the blocking implementation. */
+ odp_support_t lockfree_queues;
+
+ /** Wait-free (ODP_NONBLOCKING_WF) queues support.
+ * The specification is the same as for the blocking implementation. */
+ odp_support_t waitfree_queues;
+
+} odp_schedule_capability_t;
+
+/**
+ * Schedule configuration
+ */
+typedef struct odp_schedule_config_t {
+ /** Maximum number of scheduled queues to be supported.
+ *
+ * @see odp_schedule_capability_t
+ */
+ uint32_t num_queues;
+
+ /** Maximum number of events required to be stored simultaneously in
+ * scheduled queue. This number must not exceed 'max_queue_size'
+ * capability. A value of 0 configures default queue size supported by
+ * the implementation.
+ */
+ uint32_t queue_size;
+
+ /** Maximum flow ID per queue
+ *
+ * This value must not exceed 'max_flow_id' capability. Flow aware
+ * mode of scheduling is enabled when the value is greater than 0.
+ * The default value is 0.
+ *
+ * Application can assign events to specific flows by calling
+ * odp_event_flow_id_set() before enqueuing events into a scheduled
+ * queue. When in flow aware mode, the event flow id value affects
+ * scheduling of the event and synchronization is maintained per flow
+ * within each queue.
+ *
+ * Depeding on implementation, there may be much more flows supported
+ * than queues, as flows are lightweight entities.
+ *
+ * @see odp_schedule_capability_t, odp_event_flow_id()
+ */
+ uint32_t max_flow_id;
+
+} odp_schedule_config_t;
+
+/**
* @}
*/
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/comp.h b/include/odp/arch/arm32-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/comp.h b/include/odp/arch/arm64-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/comp.h b/include/odp/arch/default-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/mips64-linux/odp/api/abi/comp.h b/include/odp/arch/mips64-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/mips64-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/comp.h b/include/odp/arch/power64-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/cpu.h b/include/odp/arch/power64-linux/odp/api/abi/cpu.h
index 90bb87875..9e3338d60 100644
--- a/include/odp/arch/power64-linux/odp/api/abi/cpu.h
+++ b/include/odp/arch/power64-linux/odp/api/abi/cpu.h
@@ -5,4 +5,5 @@
*/
#define _ODP_NEED_GENERIC_CPU_PAUSE
+#define ODP_CACHE_LINE_SIZE 128
#include <odp/api/abi-default/cpu.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/comp.h b/include/odp/arch/x86_32-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/comp.h b/include/odp/arch/x86_64-linux/odp/api/abi/comp.h
new file mode 100644
index 000000000..a8f6439f4
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/comp.h
@@ -0,0 +1,7 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/abi-default/comp.h>
diff --git a/include/odp_api.h b/include/odp_api.h
index 2185e60a5..66b39e31e 100644
--- a/include/odp_api.h
+++ b/include/odp_api.h
@@ -24,6 +24,7 @@ extern "C" {
#include <odp/api/align.h>
#include <odp/api/hash.h>
#include <odp/api/chksum.h>
+#include <odp/api/comp.h>
#include <odp/api/hints.h>
#include <odp/api/debug.h>
#include <odp/api/byteorder.h>
diff --git a/m4/ax_prog_doxygen.m4 b/m4/ax_prog_doxygen.m4
index 426ba0dfb..ed1dc83b4 100644
--- a/m4/ax_prog_doxygen.m4
+++ b/m4/ax_prog_doxygen.m4
@@ -1,5 +1,5 @@
# ===========================================================================
-# http://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html
+# https://www.gnu.org/software/autoconf-archive/ax_prog_doxygen.html
# ===========================================================================
#
# SYNOPSIS
@@ -21,7 +21,7 @@
# The DX_*_FEATURE macros control the default setting for the given
# Doxygen feature. Supported features are 'DOXYGEN' itself, 'DOT' for
# generating graphics, 'HTML' for plain HTML, 'CHM' for compressed HTML
-# help (for MS users), 'CHI' for generating a seperate .chi file by the
+# help (for MS users), 'CHI' for generating a separate .chi file by the
# .chm file, and 'MAN', 'RTF', 'XML', 'PDF' and 'PS' for the appropriate
# output formats. The environment variable DOXYGEN_PAPER_SIZE may be
# specified to override the default 'a4wide' paper size.
@@ -97,7 +97,7 @@
# and this notice are preserved. This file is offered as-is, without any
# warranty.
-#serial 18
+#serial 24
## ----------##
## Defaults. ##
@@ -164,7 +164,7 @@ AC_DEFUN([DX_TEST_FEATURE], [test "$DX_FLAG_$1" = 1])
AC_DEFUN([DX_CHECK_DEPEND], [
test "$DX_FLAG_$1" = "$2" \
|| AC_MSG_ERROR([doxygen-DX_CURRENT_FEATURE ifelse([$2], 1,
- requires, contradicts) doxygen-DX_CURRENT_FEATURE])
+ requires, contradicts) doxygen-$1])
])
# DX_CLEAR_DEPEND(FEATURE, REQUIRED_FEATURE, REQUIRED_STATE)
@@ -265,14 +265,10 @@ m4_define([DX_loop], m4_dquote(m4_if(m4_eval(3 < m4_count($@)), 1,
[m4_for([DX_i], 4, m4_count($@), 2, [, m4_eval(DX_i[/2])])],
[])))dnl
-# Environment variables used inside Doxyfile:
+# Environment variables used inside doxygen.cfg:
DX_ENV_APPEND(SRCDIR, $srcdir)
-DX_ENV_APPEND(BUILDDIR, $builddir)
-DX_ENV_APPEND(VERSION, $VERSION)
-DX_ENV_APPEND(WITH_PLATFORM, $with_platform)
DX_ENV_APPEND(PROJECT, $DX_PROJECT)
DX_ENV_APPEND(VERSION, $PACKAGE_VERSION)
-DX_ENV_APPEND(WITH_ARCH, $ARCH_DIR)
# Doxygen itself:
DX_ARG_ABLE(doc, [generate any doxygen documentation],
@@ -325,8 +321,8 @@ DX_ARG_ABLE(chm, [generate doxygen compressed HTML help documentation],
DX_ENV_APPEND(GENERATE_HTMLHELP, YES)],
[DX_ENV_APPEND(GENERATE_HTMLHELP, NO)])
-# Seperate CHI file generation.
-DX_ARG_ABLE(chi, [generate doxygen seperate compressed HTML help index file],
+# Separate CHI file generation.
+DX_ARG_ABLE(chi, [generate doxygen separate compressed HTML help index file],
[DX_CHECK_DEPEND(chm, 1)],
[DX_CLEAR_DEPEND(chm, 1)],
[],
@@ -382,94 +378,82 @@ a4wide|a4|letter|legal|executive)
esac
# Rules:
-if test $DX_FLAG_html -eq 1; then
- DX_SNIPPET_html="## ------------------------------- ##
+AS_IF([[test $DX_FLAG_html -eq 1]],
+[[DX_SNIPPET_html="## ------------------------------- ##
## Rules specific for HTML output. ##
## ------------------------------- ##
-DX_CLEAN_HTML = \$(DX_DOCDIR)/html[]dnl
+DX_CLEAN_HTML = \$(DX_DOCDIR)/html]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/html]])
+ \$(DX_DOCDIR]DX_i[)/html]])[
-"
-else
- DX_SNIPPET_html=""
-fi
-if test $DX_FLAG_chi -eq 1; then
- DX_SNIPPET_chi="
-DX_CLEAN_CHI = \$(DX_DOCDIR)/\$(PACKAGE).chi[]dnl
+"]],
+[[DX_SNIPPET_html=""]])
+AS_IF([[test $DX_FLAG_chi -eq 1]],
+[[DX_SNIPPET_chi="
+DX_CLEAN_CHI = \$(DX_DOCDIR)/\$(PACKAGE).chi]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).chi]])"
-else
- DX_SNIPPET_chi=""
-fi
-if test $DX_FLAG_chm -eq 1; then
- DX_SNIPPET_chm="## ------------------------------ ##
+ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).chi]])["]],
+[[DX_SNIPPET_chi=""]])
+AS_IF([[test $DX_FLAG_chm -eq 1]],
+[[DX_SNIPPET_chm="## ------------------------------ ##
## Rules specific for CHM output. ##
## ------------------------------ ##
-DX_CLEAN_CHM = \$(DX_DOCDIR)/chm[]dnl
+DX_CLEAN_CHM = \$(DX_DOCDIR)/chm]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/chm]])\
+ \$(DX_DOCDIR]DX_i[)/chm]])[\
${DX_SNIPPET_chi}
-"
-else
- DX_SNIPPET_chm=""
-fi
-if test $DX_FLAG_man -eq 1; then
- DX_SNIPPET_man="## ------------------------------ ##
+"]],
+[[DX_SNIPPET_chm=""]])
+AS_IF([[test $DX_FLAG_man -eq 1]],
+[[DX_SNIPPET_man="## ------------------------------ ##
## Rules specific for MAN output. ##
## ------------------------------ ##
-DX_CLEAN_MAN = \$(DX_DOCDIR)/man[]dnl
+DX_CLEAN_MAN = \$(DX_DOCDIR)/man]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/man]])
+ \$(DX_DOCDIR]DX_i[)/man]])[
-"
-else
- DX_SNIPPET_man=""
-fi
-if test $DX_FLAG_rtf -eq 1; then
- DX_SNIPPET_rtf="## ------------------------------ ##
+"]],
+[[DX_SNIPPET_man=""]])
+AS_IF([[test $DX_FLAG_rtf -eq 1]],
+[[DX_SNIPPET_rtf="## ------------------------------ ##
## Rules specific for RTF output. ##
## ------------------------------ ##
-DX_CLEAN_RTF = \$(DX_DOCDIR)/rtf[]dnl
+DX_CLEAN_RTF = \$(DX_DOCDIR)/rtf]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/rtf]])
+ \$(DX_DOCDIR]DX_i[)/rtf]])[
-"
-else
- DX_SNIPPET_rtf=""
-fi
-if test $DX_FLAG_xml -eq 1; then
- DX_SNIPPET_xml="## ------------------------------ ##
+"]],
+[[DX_SNIPPET_rtf=""]])
+AS_IF([[test $DX_FLAG_xml -eq 1]],
+[[DX_SNIPPET_xml="## ------------------------------ ##
## Rules specific for XML output. ##
## ------------------------------ ##
-DX_CLEAN_XML = \$(DX_DOCDIR)/xml[]dnl
+DX_CLEAN_XML = \$(DX_DOCDIR)/xml]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/xml]])
+ \$(DX_DOCDIR]DX_i[)/xml]])[
-"
-else
- DX_SNIPPET_xml=""
-fi
-if test $DX_FLAG_ps -eq 1; then
- DX_SNIPPET_ps="## ----------------------------- ##
+"]],
+[[DX_SNIPPET_xml=""]])
+AS_IF([[test $DX_FLAG_ps -eq 1]],
+[[DX_SNIPPET_ps="## ----------------------------- ##
## Rules specific for PS output. ##
## ----------------------------- ##
-DX_CLEAN_PS = \$(DX_DOCDIR)/\$(PACKAGE).ps[]dnl
+DX_CLEAN_PS = \$(DX_DOCDIR)/\$(PACKAGE).ps]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).ps]])
+ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).ps]])[
DX_PS_GOAL = doxygen-ps
doxygen-ps: \$(DX_CLEAN_PS)
-m4_foreach([DX_i], [DX_loop],
+]m4_foreach([DX_i], [DX_loop],
[[\$(DX_DOCDIR]DX_i[)/\$(PACKAGE).ps: \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag
\$(DX_V_LATEX)cd \$(DX_DOCDIR]DX_i[)/latex; \\
rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \\
@@ -485,25 +469,22 @@ m4_foreach([DX_i], [DX_loop],
done; \\
\$(DX_DVIPS) -o ../\$(PACKAGE).ps refman.dvi
-]])dnl
-"
-else
- DX_SNIPPET_ps=""
-fi
-if test $DX_FLAG_pdf -eq 1; then
- DX_SNIPPET_pdf="## ------------------------------ ##
+]])["]],
+[[DX_SNIPPET_ps=""]])
+AS_IF([[test $DX_FLAG_pdf -eq 1]],
+[[DX_SNIPPET_pdf="## ------------------------------ ##
## Rules specific for PDF output. ##
## ------------------------------ ##
-DX_CLEAN_PDF = \$(DX_DOCDIR)/\$(PACKAGE).pdf[]dnl
+DX_CLEAN_PDF = \$(DX_DOCDIR)/\$(PACKAGE).pdf]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).pdf]])
+ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).pdf]])[
DX_PDF_GOAL = doxygen-pdf
doxygen-pdf: \$(DX_CLEAN_PDF)
-m4_foreach([DX_i], [DX_loop],
+]m4_foreach([DX_i], [DX_loop],
[[\$(DX_DOCDIR]DX_i[)/\$(PACKAGE).pdf: \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag
\$(DX_V_LATEX)cd \$(DX_DOCDIR]DX_i[)/latex; \\
rm -f *.aux *.toc *.idx *.ind *.ilg *.log *.out; \\
@@ -519,31 +500,26 @@ m4_foreach([DX_i], [DX_loop],
done; \\
mv refman.pdf ../\$(PACKAGE).pdf
-]])dnl
-"
-else
- DX_SNIPPET_pdf=""
-fi
-if test $DX_FLAG_ps -eq 1 -o $DX_FLAG_pdf -eq 1; then
- DX_SNIPPET_latex="## ------------------------------------------------- ##
+]])["]],
+[[DX_SNIPPET_pdf=""]])
+AS_IF([[test $DX_FLAG_ps -eq 1 -o $DX_FLAG_pdf -eq 1]],
+[[DX_SNIPPET_latex="## ------------------------------------------------- ##
## Rules specific for LaTeX (shared for PS and PDF). ##
## ------------------------------------------------- ##
DX_V_LATEX = \$(_DX_v_LATEX_\$(V))
_DX_v_LATEX_ = \$(_DX_v_LATEX_\$(AM_DEFAULT_VERBOSITY))
-_DX_v_LATEX_0 = @echo \" LATEX \" \$[]][[]@;
+_DX_v_LATEX_0 = @echo \" LATEX \" \$][@;
-DX_CLEAN_LATEX = \$(DX_DOCDIR)/latex[]dnl
+DX_CLEAN_LATEX = \$(DX_DOCDIR)/latex]dnl
m4_foreach([DX_i], [m4_shift(DX_loop)], [[\\
- \$(DX_DOCDIR]DX_i[)/latex]])
+ \$(DX_DOCDIR]DX_i[)/latex]])[
-"
-else
- DX_SNIPPET_latex=""
-fi
+"]],
+[[DX_SNIPPET_latex=""]])
-if test $DX_FLAG_doc -eq 1; then
- DX_SNIPPET_doc="## --------------------------------- ##
+AS_IF([[test $DX_FLAG_doc -eq 1]],
+[[DX_SNIPPET_doc="## --------------------------------- ##
## Format-independent Doxygen rules. ##
## --------------------------------- ##
@@ -563,23 +539,24 @@ _DX_v_DXGEN_0 = @echo \" DXGEN \" \$<;
.INTERMEDIATE: doxygen-run \$(DX_PS_GOAL) \$(DX_PDF_GOAL)
-doxygen-run:[]m4_foreach([DX_i], [DX_loop],
- [[ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag]])
+doxygen-run:]m4_foreach([DX_i], [DX_loop],
+ [[ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag]])[
doxygen-doc: doxygen-run \$(DX_PS_GOAL) \$(DX_PDF_GOAL)
-m4_foreach([DX_i], [DX_loop],
+]m4_foreach([DX_i], [DX_loop],
[[\$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag: \$(DX_CONFIG]DX_i[) \$(pkginclude_HEADERS)
\$(A""M_V_at)rm -rf \$(DX_DOCDIR]DX_i[)
\$(DX_V_DXGEN)\$(DX_ENV) DOCDIR=\$(DX_DOCDIR]DX_i[) \$(DX_DOXYGEN) \$(DX_CONFIG]DX_i[)
\$(A""M_V_at)echo Timestamp >\$][@
]])dnl
-DX_CLEANFILES = \\
+[DX_CLEANFILES = \\]
m4_foreach([DX_i], [DX_loop],
-[[ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag \\
+[[ \$(DX_DOCDIR]DX_i[)/doxygen_sqlite3.db \\
+ \$(DX_DOCDIR]DX_i[)/\$(PACKAGE).tag \\
]])dnl
- -r \\
+[ -r \\
\$(DX_CLEAN_HTML) \\
\$(DX_CLEAN_CHM) \\
\$(DX_CLEAN_CHI) \\
@@ -588,19 +565,17 @@ m4_foreach([DX_i], [DX_loop],
\$(DX_CLEAN_XML) \\
\$(DX_CLEAN_PS) \\
\$(DX_CLEAN_PDF) \\
- \$(DX_CLEAN_LATEX)"
-else
- DX_SNIPPET_doc=""
-fi
+ \$(DX_CLEAN_LATEX)"]],
+[[DX_SNIPPET_doc=""]])
AC_SUBST([DX_RULES],
["${DX_SNIPPET_doc}"])dnl
AM_SUBST_NOTMAKE([DX_RULES])
#For debugging:
-echo DX_FLAG_doc=$DX_FLAG_doc
+#echo DX_FLAG_doc=$DX_FLAG_doc
#echo DX_FLAG_dot=$DX_FLAG_dot
#echo DX_FLAG_man=$DX_FLAG_man
-echo DX_FLAG_html=$DX_FLAG_html
+#echo DX_FLAG_html=$DX_FLAG_html
#echo DX_FLAG_chm=$DX_FLAG_chm
#echo DX_FLAG_chi=$DX_FLAG_chi
#echo DX_FLAG_rtf=$DX_FLAG_rtf
diff --git a/m4/odp_dpdk.m4 b/m4/odp_dpdk.m4
index 2ef5253c8..2f6662e2b 100644
--- a/m4/odp_dpdk.m4
+++ b/m4/odp_dpdk.m4
@@ -6,6 +6,10 @@ AC_DEFUN([ODP_DPDK_PMDS], [dnl
AS_VAR_SET([DPDK_PMDS], ["-Wl,--whole-archive,"])
for filename in "$1"/librte_pmd_*.a; do
cur_driver=`basename "$filename" .a | sed -e 's/^lib//'`
+
+# Match pattern is filled to 'filename' once if no matches are found
+AS_IF([test "x$cur_driver" = "xrte_pmd_*"], [break])
+
AS_VAR_APPEND([DPDK_PMDS], [-l$cur_driver,])
AS_CASE([$cur_driver],
[rte_pmd_nfp], [AS_VAR_APPEND([DPDK_LIBS], [" -lm"])],
@@ -121,9 +125,9 @@ AC_DEFUN([ODP_DPDK], [dnl
AS_IF([test "x$1" = "xsystem"], [dnl
DPDK_CPPFLAGS="-isystem /usr/include/dpdk"
DPDK_LDFLAGS=""
- DPDK_LIB_PATH="`$CC --print-file-name=libdpdk.so`"
+ DPDK_LIB_PATH="`$CC $CFLAGS $LDFLAGS --print-file-name=libdpdk.so`"
if test "$DPDK_LIB_PATH" = "libdpdk.so" ; then
- DPDK_LIB_PATH="`$CC --print-file-name=libdpdk.a`"
+ DPDK_LIB_PATH="`$CC $CFLAGS $LDFLAGS --print-file-name=libdpdk.a`"
AS_IF([test "$DPDK_LIB_PATH" = "libdpdk.a"],
[AC_MSG_FAILURE([Could not locate system DPDK library directory])])
else
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 13101cfdc..1a5ca7a7d 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -52,6 +52,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/buffer.h \
include-abi/odp/api/abi/byteorder.h \
include-abi/odp/api/abi/classification.h \
+ include-abi/odp/api/abi/comp.h \
include-abi/odp/api/abi/cpumask.h \
include-abi/odp/api/abi/crypto.h \
include-abi/odp/api/abi/debug.h \
@@ -99,7 +100,6 @@ noinst_HEADERS = \
include/odp_global_data.h \
include/odp_init_internal.h \
include/odp_ipsec_internal.h \
- include/odp_ishm_internal.h \
include/odp_ishmphy_internal.h \
include/odp_ishmpool_internal.h \
include/odp_libconfig_internal.h \
@@ -127,6 +127,7 @@ noinst_HEADERS = \
include/odp_queue_lf.h \
include/odp_queue_scalable_internal.h \
include/odp_ring_internal.h \
+ include/odp_ring_mpmc_internal.h \
include/odp_ring_spsc_internal.h \
include/odp_ring_st_internal.h \
include/odp_schedule_if.h \
@@ -142,6 +143,7 @@ noinst_HEADERS = \
include/protocols/eth.h \
include/protocols/ip.h \
include/protocols/ipsec.h \
+ include/protocols/sctp.h \
include/protocols/tcp.h \
include/protocols/thash.h \
include/protocols/udp.h
@@ -155,6 +157,10 @@ __LIB__libodp_linux_la_SOURCES = \
odp_buffer.c \
odp_chksum.c \
odp_classification.c \
+ odp_comp.c \
+ miniz/miniz.c miniz/miniz.h miniz/miniz_common.h \
+ miniz/miniz_tdef.c miniz/miniz_tdef.h \
+ miniz/miniz_tinfl.c miniz/miniz_tinfl.h \
odp_cpumask.c \
odp_cpumask_task.c \
odp_errno.c \
@@ -187,7 +193,6 @@ __LIB__libodp_linux_la_SOURCES = \
odp_rwlock_recursive.c \
odp_schedule_basic.c \
odp_schedule_if.c \
- odp_schedule_iquery.c \
odp_schedule_scalable.c \
odp_schedule_scalable_ordered.c \
odp_schedule_sp.c \
diff --git a/platform/linux-generic/arch/aarch64/odp_atomic.h b/platform/linux-generic/arch/aarch64/odp_atomic.h
index 8b6c3aec0..8a0e7ce2b 100644
--- a/platform/linux-generic/arch/aarch64/odp_atomic.h
+++ b/platform/linux-generic/arch/aarch64/odp_atomic.h
@@ -108,32 +108,45 @@ static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
#else
-static inline __int128 casp(__int128 *var, __int128 old, __int128 neu, int mo)
+static inline __int128_t cas_u128(__int128_t *ptr, __int128_t old_val,
+ __int128_t new_val, int mo)
{
+ /* CASP instructions require that the first register number is paired */
+ register uint64_t old0 __asm__ ("x0");
+ register uint64_t old1 __asm__ ("x1");
+ register uint64_t new0 __asm__ ("x2");
+ register uint64_t new1 __asm__ ("x3");
+
+ old0 = (uint64_t)old_val;
+ old1 = (uint64_t)(old_val >> 64);
+ new0 = (uint64_t)new_val;
+ new1 = (uint64_t)(new_val >> 64);
+
if (mo == __ATOMIC_RELAXED) {
- __asm__ volatile("casp %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
+ __asm__ volatile("casp %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
: "memory");
} else if (mo == __ATOMIC_ACQUIRE) {
- __asm__ volatile("caspa %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
+ __asm__ volatile("caspa %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
: "memory");
} else if (mo == __ATOMIC_ACQ_REL) {
- __asm__ volatile("caspal %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
+ __asm__ volatile("caspal %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
: "memory");
} else if (mo == __ATOMIC_RELEASE) {
- __asm__ volatile("caspl %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
+ __asm__ volatile("caspl %[old0], %[old1], %[new0], %[new1], [%[ptr]]"
+ : [old0] "+r" (old0), [old1] "+r" (old1)
+ : [new0] "r" (new0), [new1] "r" (new1), [ptr] "r" (ptr)
: "memory");
} else {
abort();
}
- return old;
+
+ return ((__int128)old0) | (((__int128)old1) << 64);
}
static inline bool
@@ -147,7 +160,7 @@ __lockfree_compare_exchange_16(register __int128 *var, __int128 *exp,
__int128 expected;
expected = *exp;
- old = casp(var, expected, neu, mo_success);
+ old = cas_u128(var, expected, neu, mo_success);
*exp = old; /* Always update, atomically read value */
return old == expected;
}
@@ -160,7 +173,7 @@ static inline __int128 __lockfree_exchange_16(__int128 *var, __int128 neu,
do {
expected = *var;
- old = casp(var, expected, neu, mo);
+ old = cas_u128(var, expected, neu, mo);
} while (old != expected);
return old;
}
@@ -173,7 +186,7 @@ static inline __int128 __lockfree_fetch_and_16(__int128 *var, __int128 mask,
do {
expected = *var;
- old = casp(var, expected, expected & mask, mo);
+ old = cas_u128(var, expected, expected & mask, mo);
} while (old != expected);
return old;
}
@@ -186,7 +199,7 @@ static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
do {
expected = *var;
- old = casp(var, expected, expected | mask, mo);
+ old = cas_u128(var, expected, expected | mask, mo);
} while (old != expected);
return old;
}
diff --git a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
index 85aec6a65..3a1486dd2 100644
--- a/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/aarch64/odp_sysinfo_parse.c
@@ -12,7 +12,6 @@
#include <odp_sysinfo_internal.h>
#include <odp_debug_internal.h>
-#define DUMMY_MAX_MHZ 1000
#define TMP_STR_LEN 64
static void aarch64_impl_str(char *str, int maxlen, int implementer)
@@ -202,6 +201,76 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
void sys_info_print_arch(void)
{
+ const char *ndef = "n/a";
+
+ /* Avoid compiler warning about unused variable */
+ (void)ndef;
+
+ /* See ARM C Language Extensions documentation for details */
+ printf("ARM FEATURES:\n");
+
+ printf(" __ARM_ARCH ");
+#ifdef __ARM_ARCH
+ printf("%i\n", __ARM_ARCH);
+#else
+ printf("%s\n", ndef);
+#endif
+
+ printf(" __ARM_ARCH_ISA_A64 ");
+#ifdef __ARM_ARCH_ISA_A64
+ printf("%i\n", __ARM_ARCH_ISA_A64);
+#else
+ printf("%s\n", ndef);
+#endif
+
+#if defined(__ARM_ARCH) && __ARM_ARCH >= 8
+ /* Actually, this checks for new NEON instructions in
+ * v8.1, but is currently the only way to distinguish
+ * v8.0 and >=v8.1. */
+ printf(" ARMv8 ISA version ");
+#ifdef __ARM_FEATURE_QRDMX
+ printf("v8.1 or higher\n");
+#else
+ printf("v8.0\n");
+#endif
+#endif
+
+#ifdef __ARM_FEATURE_QRDMX
+ /* Actually, this checks for new NEON instructions in
+ * v8.1, but is currently the only way to distinguish
+ * v8.0 and >=v8.1. */
+ printf(" ARMv8.1 instructions\n");
+#endif
+
+ printf(" __ARM_NEON ");
+#ifdef __ARM_NEON
+ printf("%i\n", __ARM_NEON);
+#else
+ printf("%s\n", ndef);
+#endif
+
+ printf(" __ARM_FEATURE_IDIV ");
+#ifdef __ARM_FEATURE_IDIV
+ printf("%i\n", __ARM_FEATURE_IDIV);
+#else
+ printf("%s\n", ndef);
+#endif
+
+ printf(" __ARM_FEATURE_CRYPTO ");
+#ifdef __ARM_FEATURE_CRYPTO
+ printf("%i\n", __ARM_FEATURE_CRYPTO);
+#else
+ printf("%s\n", ndef);
+#endif
+
+ printf(" __ARM_FEATURE_CRC32 ");
+#ifdef __ARM_FEATURE_CRC32
+ printf("%i\n", __ARM_FEATURE_CRC32);
+#else
+ printf("%s\n", ndef);
+#endif
+
+ printf("\n");
}
uint64_t odp_cpu_arch_hz_current(int id)
diff --git a/platform/linux-generic/arch/default/odp_sysinfo_parse.c b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
index b93788872..da3f2eb3f 100644
--- a/platform/linux-generic/arch/default/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/default/odp_sysinfo_parse.c
@@ -7,24 +7,10 @@
#include "config.h"
#include <odp_sysinfo_internal.h>
-#include <odp_debug_internal.h>
-#include <string.h>
-
-#define DUMMY_MAX_MHZ 1400
int cpuinfo_parser(FILE *file ODP_UNUSED, system_info_t *sysinfo)
{
- int i;
-
- ODP_DBG("Warning: use dummy values for freq and model string\n");
- for (i = 0; i < CONFIG_NUM_CPU; i++) {
- ODP_PRINT("WARN: cpu[%i] uses dummy max frequency %u MHz\n",
- i, DUMMY_MAX_MHZ);
- sysinfo->cpu_hz_max[i] = DUMMY_MAX_MHZ * 1000000;
- strcpy(sysinfo->model_str[i], "UNKNOWN");
- }
-
- return 0;
+ return _odp_dummy_cpuinfo(sysinfo);
}
void sys_info_print_arch(void)
diff --git a/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h b/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h
index 90bb87875..9e3338d60 100644
--- a/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h
+++ b/platform/linux-generic/arch/powerpc/odp/api/abi/cpu.h
@@ -5,4 +5,5 @@
*/
#define _ODP_NEED_GENERIC_CPU_PAUSE
+#define ODP_CACHE_LINE_SIZE 128
#include <odp/api/abi-default/cpu.h>
diff --git a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
index 504aa3efa..7124e84f2 100644
--- a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
+++ b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
@@ -15,34 +15,54 @@ int cpuinfo_parser(FILE *file, system_info_t *sysinfo)
char str[1024];
char *pos, *pos_end;
double ghz = 0.0;
+ double mhz = 0.0;
uint64_t hz;
int id = 0;
+ bool freq_set = false;
strcpy(sysinfo->cpu_arch_str, "x86");
while (fgets(str, sizeof(str), file) != NULL && id < CONFIG_NUM_CPU) {
pos = strstr(str, "model name");
if (pos) {
+ freq_set = false;
+
/* Copy model name between : and @ characters */
pos = strchr(str, ':');
pos_end = strchr(str, '@');
- if (pos == NULL || pos_end == NULL)
+ if (pos == NULL)
continue;
- *(pos_end - 1) = '\0';
+ if (pos_end != NULL)
+ *(pos_end - 1) = '\0';
+
strncpy(sysinfo->model_str[id], pos + 2,
MODEL_STR_SIZE - 1);
if (sysinfo->cpu_hz_max[id]) {
+ freq_set = true;
id++;
continue;
}
/* max frequency needs to be set */
- if (sscanf(pos_end, "@ %lfGHz", &ghz) == 1) {
+ if (pos_end != NULL &&
+ sscanf(pos_end, "@ %lfGHz", &ghz) == 1) {
hz = (uint64_t)(ghz * 1000000000.0);
- sysinfo->cpu_hz_max[id] = hz;
+ sysinfo->cpu_hz_max[id++] = hz;
+ freq_set = true;
+ }
+ } else if (!freq_set &&
+ strstr(str, "bogomips") != NULL) {
+ pos = strchr(str, ':');
+ if (pos == NULL)
+ continue;
+
+ if (sscanf(pos + 2, "%lf", &mhz) == 1) {
+ /* On typical x86 BogoMIPS is freq * 2 */
+ hz = (uint64_t)(mhz * 1000000.0 / 2);
+ sysinfo->cpu_hz_max[id++] = hz;
+ freq_set = true;
}
- id++;
}
}
@@ -63,6 +83,8 @@ uint64_t odp_cpu_arch_hz_current(int id)
double mhz = 0.0;
file = fopen("/proc/cpuinfo", "rt");
+ if (!file)
+ return 0;
/* find the correct processor instance */
while (fgets(str, sizeof(str), file) != NULL) {
diff --git a/platform/linux-generic/dumpconfig/.gitignore b/platform/linux-generic/dumpconfig/.gitignore
new file mode 100644
index 000000000..44752b565
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/.gitignore
@@ -0,0 +1 @@
+odp_linuxgen_dumpconfig
diff --git a/platform/linux-generic/dumpconfig/Makefile.am b/platform/linux-generic/dumpconfig/Makefile.am
new file mode 100644
index 000000000..933424f0a
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/Makefile.am
@@ -0,0 +1,10 @@
+include $(top_srcdir)/Makefile.inc
+
+AM_CPPFLAGS = -I$(top_builddir)/platform/$(with_platform)/include
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/include
+
+bin_PROGRAMS = odp_linuxgen_dumpconfig
+
+odp_linuxgen_dumpconfig_SOURCES = dumpconfig.c
+
+TESTS = odp_linuxgen_dumpconfig
diff --git a/platform/linux-generic/dumpconfig/dumpconfig.c b/platform/linux-generic/dumpconfig/dumpconfig.c
new file mode 100644
index 000000000..a04f5c2dd
--- /dev/null
+++ b/platform/linux-generic/dumpconfig/dumpconfig.c
@@ -0,0 +1,43 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <odp_libconfig_config.h>
+
+int main(void)
+{
+ unsigned int i;
+ const char *filename;
+ FILE *f;
+ char c;
+
+ printf("# Builtin platform config\n\n");
+ for (i = 0; i < sizeof(config_builtin); i++)
+ printf("%c", config_builtin[i]);
+
+ filename = getenv("ODP_CONFIG_FILE");
+ if (filename == NULL)
+ return 0;
+
+ printf("# Overridden section with ODP_CONFIG_FILE=%s\n\n", filename);
+
+ f = fopen(filename, "r");
+ if (f == NULL) {
+ fprintf(stderr, "Error: open file %s\n", filename);
+ return -1;
+ }
+
+ while (1) {
+ c = fgetc(f);
+ if (feof(f))
+ break;
+ printf("%c", c);
+ }
+
+ fclose(f);
+ return 0;
+}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/classification.h b/platform/linux-generic/include-abi/odp/api/abi/classification.h
index 21db75375..843e15f6c 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/classification.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/classification.h
@@ -25,10 +25,10 @@ extern "C" {
*/
typedef ODP_HANDLE_T(odp_cos_t);
-#define ODP_COS_INVALID _odp_cast_scalar(odp_cos_t, ~0)
+#define ODP_COS_INVALID _odp_cast_scalar(odp_cos_t, 0)
typedef ODP_HANDLE_T(odp_pmr_t);
-#define ODP_PMR_INVALID _odp_cast_scalar(odp_pmr_t, ~0)
+#define ODP_PMR_INVALID _odp_cast_scalar(odp_pmr_t, 0)
#if ODP_DEPRECATED_API
#define ODP_PMR_INVAL ODP_PMR_INVALID
diff --git a/platform/linux-generic/include-abi/odp/api/abi/comp.h b/platform/linux-generic/include-abi/odp/api/abi/comp.h
new file mode 100644
index 000000000..ac3d3a4a9
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/comp.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_ABI_COMP_H_
+#define ODP_ABI_COMP_H_
+
+#include <odp/api/plat/strong_types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/** @ingroup odp_compression
+ * @{
+ */
+
+typedef ODP_HANDLE_T(odp_comp_session_t);
+
+#define ODP_COMP_SESSION_INVALID _odp_cast_scalar(odp_comp_session_t, 0)
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event.h b/platform/linux-generic/include-abi/odp/api/abi/event.h
index d7bd57c0f..a8024654c 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/event.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/event.h
@@ -40,7 +40,8 @@ typedef enum odp_event_subtype_t {
ODP_EVENT_NO_SUBTYPE = 0,
ODP_EVENT_PACKET_BASIC = 1,
ODP_EVENT_PACKET_CRYPTO = 2,
- ODP_EVENT_PACKET_IPSEC = 3
+ ODP_EVENT_PACKET_IPSEC = 3,
+ ODP_EVENT_PACKET_COMP = 4
} odp_event_subtype_t;
/* Inlined functions for non-ABI compat mode */
diff --git a/platform/linux-generic/include-abi/odp/api/abi/ipsec.h b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h
index b91da7959..a04bb1741 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/ipsec.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/ipsec.h
@@ -26,7 +26,7 @@ extern "C" {
typedef ODP_HANDLE_T(odp_ipsec_sa_t);
-#define ODP_IPSEC_SA_INVALID _odp_cast_scalar(odp_ipsec_sa_t, 0xffffffff)
+#define ODP_IPSEC_SA_INVALID _odp_cast_scalar(odp_ipsec_sa_t, 0)
/**
* @}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/pool.h b/platform/linux-generic/include-abi/odp/api/abi/pool.h
index cd161d5a9..9ac1cf673 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/pool.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/pool.h
@@ -27,7 +27,7 @@ extern "C" {
typedef ODP_HANDLE_T(odp_pool_t);
-#define ODP_POOL_INVALID _odp_cast_scalar(odp_pool_t, 0xffffffff)
+#define ODP_POOL_INVALID _odp_cast_scalar(odp_pool_t, 0)
#define ODP_POOL_NAME_LEN 32
diff --git a/platform/linux-generic/include-abi/odp/api/abi/timer.h b/platform/linux-generic/include-abi/odp/api/abi/timer.h
index db5c8250b..c08da1ce3 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/timer.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/timer.h
@@ -26,17 +26,17 @@ extern "C" {
typedef ODP_HANDLE_T(odp_timer_pool_t);
-#define ODP_TIMER_POOL_INVALID NULL
+#define ODP_TIMER_POOL_INVALID _odp_cast_scalar(odp_timer_pool_t, 0)
#define ODP_TIMER_POOL_NAME_LEN 32
typedef ODP_HANDLE_T(odp_timer_t);
-#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0xffffffff)
+#define ODP_TIMER_INVALID _odp_cast_scalar(odp_timer_t, 0)
typedef ODP_HANDLE_T(odp_timeout_t);
-#define ODP_TIMEOUT_INVALID _odp_cast_scalar(odp_timeout_t, NULL)
+#define ODP_TIMEOUT_INVALID _odp_cast_scalar(odp_timeout_t, 0)
/**
* @}
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
index ca3abbfab..71065172d 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -112,7 +112,7 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 11;
+ uint32_t reserved1: 10;
/*
* Init flags
@@ -136,6 +136,7 @@ typedef union {
uint32_t l3_chksum_err: 1; /* L3 checksum error */
uint32_t tcp_err: 1; /* TCP error */
uint32_t udp_err: 1; /* UDP error */
+ uint32_t sctp_err: 1; /* SCTP error */
uint32_t l4_chksum_err: 1; /* L4 checksum error */
uint32_t ipsec_err: 1; /* IPsec error */
uint32_t crypto_err: 1; /* Crypto packet operation error */
@@ -143,9 +144,9 @@ typedef union {
/* Flag groups */
struct {
- uint32_t reserved2: 11;
+ uint32_t reserved2: 10;
uint32_t other: 13; /* All other flags */
- uint32_t error: 8; /* All error flags */
+ uint32_t error: 9; /* All error flags */
} all;
} _odp_packet_flags_t;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index ec5beb819..3851af4c7 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -87,12 +87,12 @@ extern const _odp_pool_inline_offset_t _odp_pool_inline;
#include <odp/api/plat/strong_types.h>
static inline uint32_t _odp_packet_seg_to_ndx(odp_packet_seg_t seg)
{
- return _odp_typeval(seg);
+ return _odp_typeval(seg) - 1;
}
static inline odp_packet_seg_t _odp_packet_seg_from_ndx(uint32_t ndx)
{
- return _odp_cast_scalar(odp_packet_seg_t, ndx);
+ return _odp_cast_scalar(odp_packet_seg_t, ndx + 1);
}
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
index 3a8df7a4b..f81a84782 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/queue_inline_types.h
@@ -12,6 +12,7 @@ extern "C" {
#endif
#include <stdint.h>
+#include <odp/api/spec/queue_types.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
@@ -26,6 +27,31 @@ typedef struct _odp_queue_inline_offset_t {
} _odp_queue_inline_offset_t;
+/* Queue API functions */
+typedef struct {
+ odp_queue_t (*queue_create)(const char *name,
+ const odp_queue_param_t *param);
+ int (*queue_destroy)(odp_queue_t queue);
+ odp_queue_t (*queue_lookup)(const char *name);
+ int (*queue_capability)(odp_queue_capability_t *capa);
+ int (*queue_context_set)(odp_queue_t queue, void *context,
+ uint32_t len);
+ int (*queue_enq)(odp_queue_t queue, odp_event_t ev);
+ int (*queue_enq_multi)(odp_queue_t queue, const odp_event_t events[],
+ int num);
+ odp_event_t (*queue_deq)(odp_queue_t queue);
+ int (*queue_deq_multi)(odp_queue_t queue, odp_event_t events[],
+ int num);
+ odp_queue_type_t (*queue_type)(odp_queue_t queue);
+ odp_schedule_sync_t (*queue_sched_type)(odp_queue_t queue);
+ odp_schedule_prio_t (*queue_sched_prio)(odp_queue_t queue);
+ odp_schedule_group_t (*queue_sched_group)(odp_queue_t queue);
+ uint32_t (*queue_lock_count)(odp_queue_t queue);
+ uint64_t (*queue_to_u64)(odp_queue_t queue);
+ void (*queue_param_init)(odp_queue_param_t *param);
+ int (*queue_info)(odp_queue_t queue, odp_queue_info_t *info);
+} _odp_queue_api_fn_t;
+
/** @endcond */
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp/api/plat/queue_inlines.h b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
index f802c96e7..c557b4ba3 100644
--- a/platform/linux-generic/include/odp/api/plat/queue_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/queue_inlines.h
@@ -12,11 +12,16 @@
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
+extern const _odp_queue_api_fn_t *_odp_queue_api;
#ifndef _ODP_NO_INLINE
/* Inline functions by default */
#define _ODP_INLINE static inline
- #define odp_queue_context __odp_queue_context
+ #define odp_queue_context __odp_queue_context
+ #define odp_queue_enq __odp_queue_enq
+ #define odp_queue_enq_multi __odp_queue_enq_multi
+ #define odp_queue_deq __odp_queue_deq
+ #define odp_queue_deq_multi __odp_queue_deq_multi
#else
#define _ODP_INLINE
#endif
@@ -31,6 +36,28 @@ _ODP_INLINE void *odp_queue_context(odp_queue_t handle)
return context;
}
+_ODP_INLINE int odp_queue_enq(odp_queue_t queue, odp_event_t ev)
+{
+ return _odp_queue_api->queue_enq(queue, ev);
+}
+
+_ODP_INLINE int odp_queue_enq_multi(odp_queue_t queue,
+ const odp_event_t events[], int num)
+{
+ return _odp_queue_api->queue_enq_multi(queue, events, num);
+}
+
+_ODP_INLINE odp_event_t odp_queue_deq(odp_queue_t queue)
+{
+ return _odp_queue_api->queue_deq(queue);
+}
+
+_ODP_INLINE int odp_queue_deq_multi(odp_queue_t queue,
+ odp_event_t events[], int num)
+{
+ return _odp_queue_api->queue_deq_multi(queue, events, num);
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 48e74dd21..5e0b2bcc2 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -54,6 +54,9 @@ ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
/* Check that buffer index fit into bit field */
ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
+/* Type size limits number of flow IDs supported */
+#define BUF_HDR_MAX_FLOW_ID 255
+
/* Common buffer header */
struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
/* Combined pool and buffer index */
@@ -94,6 +97,9 @@ struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
/* Event type. Maybe different than pool type (crypto compl event) */
int8_t event_type;
+ /* Event flow id */
+ uint8_t flow_id;
+
/* Initial buffer tail pointer */
uint8_t *buf_end;
@@ -120,9 +126,18 @@ static inline odp_buffer_t buf_from_buf_hdr(odp_buffer_hdr_t *hdr)
return (odp_buffer_t)hdr;
}
-static inline odp_event_t event_from_buf_hdr(odp_buffer_hdr_t *hdr)
+static inline uint32_t event_flow_id(odp_event_t ev)
{
- return (odp_event_t)hdr;
+ odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
+
+ return buf_hdr->flow_id;
+}
+
+static inline void event_flow_id_set(odp_event_t ev, uint32_t flow_id)
+{
+ odp_buffer_hdr_t *buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)ev;
+
+ buf_hdr->flow_id = flow_id;
}
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index 14fa4f6cf..810576b9a 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -27,11 +27,9 @@ extern "C" {
#define ODP_CONFIG_QUEUES 1024
/*
- * Maximum queue depth. Maximum number of elements that can be stored in a
- * queue. This value is used only when the size is not explicitly provided
- * during queue creation.
+ * Queues reserved for ODP internal use
*/
-#define CONFIG_QUEUE_SIZE 4096
+#define NUM_INTERNAL_QUEUES 64
/*
* Maximum number of ordered locks per queue
@@ -44,14 +42,6 @@ extern "C" {
#define ODP_CONFIG_PKTIO_ENTRIES 64
/*
- * Minimum buffer alignment
- *
- * This defines the minimum supported buffer alignment. Requests for values
- * below this will be rounded up to this value.
- */
-#define ODP_CONFIG_BUFFER_ALIGN_MIN 64
-
-/*
* Maximum buffer alignment
*
* This defines the maximum supported buffer alignment. Requests for values
@@ -125,7 +115,9 @@ extern "C" {
* defined segment length (seg_len in odp_pool_param_t) will be rounded up into
* this value.
*/
-#define CONFIG_PACKET_SEG_LEN_MIN CONFIG_PACKET_MAX_SEG_LEN
+#define CONFIG_PACKET_SEG_LEN_MIN ((2 * 1024) + \
+ CONFIG_PACKET_HEADROOM + \
+ CONFIG_PACKET_TAILROOM)
/* Maximum number of shared memory blocks.
*
@@ -134,16 +126,6 @@ extern "C" {
#define ODP_CONFIG_SHM_BLOCKS (ODP_CONFIG_POOLS + 48)
/*
- * Size of the virtual address space pre-reserver for ISHM
- *
- * This is just virtual space preallocation size, not memory allocation.
- * This address space is used by ISHM to map things at a common address in
- * all ODP threads (when the _ODP_ISHM_SINGLE_VA flag is used).
- * In bytes.
- */
-#define ODP_CONFIG_ISHM_VA_PREALLOC_SZ (536870912L)
-
-/*
* Maximum event burst size
*
* This controls the burst size on various enqueue, dequeue, etc calls. Large
@@ -152,9 +134,10 @@ extern "C" {
#define CONFIG_BURST_SIZE 32
/*
- * Maximum number of events in a pool
+ * Maximum number of events in a pool. Power of two minus one results optimal
+ * memory usage for the ring.
*/
-#define CONFIG_POOL_MAX_NUM (1 * 1024 * 1024)
+#define CONFIG_POOL_MAX_NUM ((1024 * 1024) - 1)
/*
* Maximum number of events in a thread local pool cache
diff --git a/platform/linux-generic/include/odp_debug_internal.h b/platform/linux-generic/include/odp_debug_internal.h
index 2e92dd74f..4dbe01b59 100644
--- a/platform/linux-generic/include/odp_debug_internal.h
+++ b/platform/linux-generic/include/odp_debug_internal.h
@@ -34,14 +34,14 @@ extern "C" {
#define ODP_ASSERT(cond) \
do { if ((ODP_DEBUG == 1) && (!(cond))) { \
ODP_ERR("%s\n", #cond); \
- odp_global_data.abort_fn(); } \
+ odp_global_ro.abort_fn(); } \
} while (0)
/**
* This macro is used to indicate when a given function is not implemented
*/
#define ODP_UNIMPLEMENTED() \
- odp_global_data.log_fn(ODP_LOG_UNIMPLEMENTED, \
+ odp_global_ro.log_fn(ODP_LOG_UNIMPLEMENTED, \
"%s:%d:The function %s() is not implemented\n", \
__FILE__, __LINE__, __func__)
/**
@@ -66,14 +66,14 @@ extern "C" {
#define ODP_ABORT(fmt, ...) \
do { \
ODP_LOG(ODP_LOG_ABORT, fmt, ##__VA_ARGS__); \
- odp_global_data.abort_fn(); \
+ odp_global_ro.abort_fn(); \
} while (0)
/**
* ODP LOG macro.
*/
#define ODP_LOG(level, fmt, ...) \
- odp_global_data.log_fn(level, "%s:%d:%s():" fmt, __FILE__, \
+ odp_global_ro.log_fn(level, "%s:%d:%s():" fmt, __FILE__, \
__LINE__, __func__, ##__VA_ARGS__)
/**
@@ -81,7 +81,7 @@ extern "C" {
* specifically for dumping internal data.
*/
#define ODP_PRINT(fmt, ...) \
- odp_global_data.log_fn(ODP_LOG_PRINT, fmt, ##__VA_ARGS__)
+ odp_global_ro.log_fn(ODP_LOG_PRINT, fmt, ##__VA_ARGS__)
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_global_data.h b/platform/linux-generic/include/odp_global_data.h
index 009862a8b..a12d12973 100644
--- a/platform/linux-generic/include/odp_global_data.h
+++ b/platform/linux-generic/include/odp_global_data.h
@@ -13,6 +13,7 @@ extern "C" {
#include <odp/api/init.h>
#include <odp/api/cpumask.h>
+#include <odp/api/random.h>
#include <sys/types.h>
#include <pthread.h>
#include <stdint.h>
@@ -36,13 +37,17 @@ typedef struct {
char *default_huge_page_dir;
} hugepage_info_t;
-struct odp_global_data_s {
+/* Read-only global data. Members should not be modified after global init
+ * to enable process more support. */
+struct odp_global_data_ro_t {
+ odp_init_t init_param;
/* directory for odp mmaped files */
char *shm_dir;
/* overload default with env */
int shm_dir_from_env;
uint64_t shm_max_memory;
uint64_t shm_max_size;
+ int shm_single_va;
pid_t main_pid;
char uid[UID_MAXLEN];
odp_log_func_t log_fn;
@@ -54,13 +59,22 @@ struct odp_global_data_s {
int num_cpus_installed;
config_t libconfig_default;
config_t libconfig_runtime;
+ odp_random_kind_t ipsec_rand_kind;
+};
+
+/* Modifiable global data. Memory region is shared and synchronized amongst all
+ * worker processes. */
+struct odp_global_data_rw_t {
int inotify_pcapng_fd;
int inotify_watch_fd;
pthread_t inotify_thread;
int inotify_pcapng_is_running;
+ odp_bool_t dpdk_initialized;
+ odp_bool_t inline_timers;
};
-extern struct odp_global_data_s odp_global_data;
+extern struct odp_global_data_ro_t odp_global_ro;
+extern struct odp_global_data_rw_t *odp_global_rw;
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_init_internal.h b/platform/linux-generic/include/odp_init_internal.h
index 79f710af5..57ea79f98 100644
--- a/platform/linux-generic/include/odp_init_internal.h
+++ b/platform/linux-generic/include/odp_init_internal.h
@@ -54,6 +54,9 @@ int odp_crypto_term_global(void);
int _odp_crypto_init_local(void);
int _odp_crypto_term_local(void);
+int _odp_comp_init_global(void);
+int _odp_comp_term_global(void);
+
int odp_timer_init_global(const odp_init_t *params);
int odp_timer_term_global(void);
int odp_timer_disarm_all(void);
@@ -87,6 +90,7 @@ int _odp_ipsec_events_term_global(void);
int _odp_cpu_cycles_init_global(void);
int _odp_hash_init_global(void);
+int _odp_hash_term_global(void);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h
index dfde4d574..3d7d40a37 100644
--- a/platform/linux-generic/include/odp_ipsec_internal.h
+++ b/platform/linux-generic/include/odp_ipsec_internal.h
@@ -94,6 +94,30 @@ int _odp_ipsec_status_send(odp_queue_t queue,
struct ipsec_sa_s {
odp_atomic_u32_t ODP_ALIGNED_CACHE state;
+ /*
+ * State that gets updated very frequently. Grouped separately
+ * to avoid false cache line sharing with other data.
+ */
+ struct ODP_ALIGNED_CACHE {
+ /* Statistics for soft/hard expiration */
+ odp_atomic_u64_t bytes;
+ odp_atomic_u64_t packets;
+
+ union {
+ struct {
+ odp_atomic_u64_t antireplay;
+ } in;
+
+ struct {
+ /*
+ * 64-bit sequence number that is also used as
+ * CTR/GCM IV
+ */
+ odp_atomic_u64_t seq;
+ } out;
+ };
+ } hot;
+
uint32_t ipsec_sa_idx;
odp_ipsec_sa_t ipsec_sa_hdl;
@@ -108,10 +132,6 @@ struct ipsec_sa_s {
uint64_t hard_limit_bytes;
uint64_t hard_limit_packets;
- /* Statistics for soft/hard expiration */
- odp_atomic_u64_t bytes;
- odp_atomic_u64_t packets;
-
odp_crypto_session_t session;
void *context;
odp_queue_t queue;
@@ -150,12 +170,9 @@ struct ipsec_sa_s {
odp_u32be_t lookup_dst_ipv4;
uint8_t lookup_dst_ipv6[_ODP_IPV6ADDR_LEN];
};
- odp_atomic_u64_t antireplay;
} in;
struct {
- odp_atomic_u64_t counter; /* for CTR/GCM */
- odp_atomic_u32_t seq;
odp_ipsec_frag_mode_t frag_mode;
uint32_t mtu;
@@ -164,9 +181,6 @@ struct ipsec_sa_s {
odp_ipsec_ipv4_param_t param;
odp_u32be_t src_ip;
odp_u32be_t dst_ip;
-
- /* 32-bit from which low 16 are used */
- odp_atomic_u32_t hdr_id;
} tun_ipv4;
struct {
odp_ipsec_ipv6_param_t param;
@@ -207,6 +221,11 @@ uint32_t _odp_ipsec_cipher_iv_len(odp_cipher_alg_t cipher);
/* Return digest length required for the cipher for IPsec use */
uint32_t _odp_ipsec_auth_digest_len(odp_auth_alg_t auth);
+/*
+ * Get SA entry from handle without obtaining a reference
+ */
+ipsec_sa_t *_odp_ipsec_sa_entry_from_hdl(odp_ipsec_sa_t sa);
+
/**
* Obtain SA reference
*/
@@ -251,6 +270,12 @@ int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
*/
int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
odp_ipsec_op_status_t *status);
+
+/**
+ * Allocate an IPv4 ID for an outgoing packet.
+ */
+uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa);
+
/**
* Try inline IPsec processing of provided packet.
*
diff --git a/platform/linux-generic/include/odp_ishm_internal.h b/platform/linux-generic/include/odp_ishm_internal.h
deleted file mode 100644
index 56c7f5a93..000000000
--- a/platform/linux-generic/include/odp_ishm_internal.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_ISHM_INTERNAL_H_
-#define ODP_ISHM_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <sys/types.h>
-
-/* flags available at ishm_reserve: */
-#define _ODP_ISHM_SINGLE_VA 1
-#define _ODP_ISHM_LOCK 2
-#define _ODP_ISHM_EXPORT 4 /*create export descr file in /tmp */
-
-/**
- * Shared memory block info
- */
-typedef struct _odp_ishm_info_t {
- const char *name; /**< Block name */
- void *addr; /**< Block address */
- uint64_t size; /**< Block size in bytes */
- uint64_t page_size; /**< Memory page size */
- uint32_t flags; /**< _ODP_ISHM_* flags */
- uint32_t user_flags;/**< user specific flags */
-} _odp_ishm_info_t;
-
-int _odp_ishm_reserve(const char *name, uint64_t size, int fd, uint32_t align,
- uint32_t flags, uint32_t user_flags);
-int _odp_ishm_free_by_index(int block_index);
-int _odp_ishm_free_by_name(const char *name);
-int _odp_ishm_free_by_address(void *addr);
-void *_odp_ishm_lookup_by_index(int block_index);
-int _odp_ishm_lookup_by_name(const char *name);
-int _odp_ishm_lookup_by_address(void *addr);
-int _odp_ishm_find_exported(const char *remote_name,
- pid_t external_odp_pid,
- const char *local_name);
-void *_odp_ishm_address(int block_index);
-int _odp_ishm_info(int block_index, _odp_ishm_info_t *info);
-int _odp_ishm_status(const char *title);
-int _odp_ishm_cleanup_files(const char *dirpath);
-void _odp_ishm_print(int block_index);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_ishmphy_internal.h b/platform/linux-generic/include/odp_ishmphy_internal.h
index 05e3fcec7..2bc9911ce 100644
--- a/platform/linux-generic/include/odp_ishmphy_internal.h
+++ b/platform/linux-generic/include/odp_ishmphy_internal.h
@@ -13,9 +13,9 @@ extern "C" {
#include <stdint.h>
-void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align);
-int _odp_ishmphy_unbook_va(void);
-void *_odp_ishmphy_map(int fd, void *start, uint64_t size, int flags);
+void *_odp_ishmphy_reserve_single_va(uint64_t len, int fd);
+int _odp_ishmphy_free_single_va(void);
+void *_odp_ishmphy_map(int fd, uint64_t size, uint64_t offset, int flags);
int _odp_ishmphy_unmap(void *start, uint64_t len, int flags);
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_ishmpool_internal.h b/platform/linux-generic/include/odp_ishmpool_internal.h
index 94bcddaeb..9db5a2b4b 100644
--- a/platform/linux-generic/include/odp_ishmpool_internal.h
+++ b/platform/linux-generic/include/odp_ishmpool_internal.h
@@ -46,7 +46,6 @@ int _odp_ishm_pool_destroy(_odp_ishm_pool_t *pool);
void *_odp_ishm_pool_alloc(_odp_ishm_pool_t *pool, uint64_t size);
int _odp_ishm_pool_free(_odp_ishm_pool_t *pool, void *addr);
int _odp_ishm_pool_status(const char *title, _odp_ishm_pool_t *pool);
-_odp_ishm_pool_t *_odp_ishm_pool_lookup(const char *pool_name);
void _odp_ishm_pool_init(void);
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_libconfig_internal.h b/platform/linux-generic/include/odp_libconfig_internal.h
index 727f68863..da574012e 100644
--- a/platform/linux-generic/include/odp_libconfig_internal.h
+++ b/platform/linux-generic/include/odp_libconfig_internal.h
@@ -21,6 +21,7 @@ int _odp_libconfig_init_global(void);
int _odp_libconfig_term_global(void);
int _odp_libconfig_lookup_int(const char *path, int *value);
+int _odp_libconfig_lookup_array(const char *path, int value[], int max_num);
int _odp_libconfig_lookup_ext_int(const char *base_path,
const char *local_path,
diff --git a/platform/linux-generic/include/odp_packet_dpdk.h b/platform/linux-generic/include/odp_packet_dpdk.h
index d457cfa30..1b660babf 100644
--- a/platform/linux-generic/include/odp_packet_dpdk.h
+++ b/platform/linux-generic/include/odp_packet_dpdk.h
@@ -7,29 +7,44 @@
#ifndef ODP_PACKET_DPDK_H
#define ODP_PACKET_DPDK_H
+#include <stdint.h>
+
#include <odp/api/packet_io.h>
+#include <odp_packet_internal.h>
+#include <odp_pool_internal.h>
+
struct rte_mbuf;
-/** Cache for storing packets */
+/**
+ * Calculate size of zero-copy DPDK packet pool object
+ */
+uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size);
+
+/**
+ * Create zero-copy DPDK packet pool
+ */
+int _odp_dpdk_pool_create(pool_t *pool);
+
/** Packet parser using DPDK interface */
-int dpdk_packet_parse_common(packet_parser_t *pkt_hdr,
- const uint8_t *ptr,
- uint32_t pkt_len,
- uint32_t seg_len,
- struct rte_mbuf *mbuf,
- int layer,
- odp_pktin_config_opt_t pktin_cfg);
-
-static inline int dpdk_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
- struct rte_mbuf *mbuf,
- odp_pktio_parser_layer_t layer,
- odp_pktin_config_opt_t pktin_cfg)
+int _odp_dpdk_packet_parse_common(packet_parser_t *pkt_hdr,
+ const uint8_t *ptr,
+ uint32_t pkt_len,
+ uint32_t seg_len,
+ struct rte_mbuf *mbuf,
+ int layer,
+ odp_pktin_config_opt_t pktin_cfg);
+
+static inline int _odp_dpdk_packet_parse_layer(odp_packet_hdr_t *pkt_hdr,
+ struct rte_mbuf *mbuf,
+ odp_pktio_parser_layer_t layer,
+ odp_pktin_config_opt_t pktin_cfg)
{
uint32_t seg_len = pkt_hdr->buf_hdr.seg[0].len;
void *base = pkt_hdr->buf_hdr.seg[0].data;
- return dpdk_packet_parse_common(&pkt_hdr->p, base, pkt_hdr->frame_len,
- seg_len, mbuf, layer, pktin_cfg);
+ return _odp_dpdk_packet_parse_common(&pkt_hdr->p, base,
+ pkt_hdr->frame_len, seg_len, mbuf,
+ layer, pktin_cfg);
}
#endif
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index 9b995c61c..603e7cf49 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -25,6 +25,7 @@ extern "C" {
#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
+#include <odp/api/comp.h>
#include <odp_ipsec_internal.h>
#include <odp/api/abi/packet.h>
#include <odp_queue_if.h>
@@ -95,9 +96,6 @@ typedef struct {
* Members below are not initialized by packet_init()
*/
- /* Type of extra data */
- uint8_t extra_type;
-
/* Flow hash value */
uint32_t flow_hash;
@@ -105,19 +103,22 @@ typedef struct {
odp_time_t timestamp;
/* Classifier destination queue */
- void *dst_queue;
+ odp_queue_t dst_queue;
- /* Result for crypto packet op */
- odp_crypto_packet_result_t crypto_op_result;
- /* Context for IPsec */
- odp_ipsec_packet_result_t ipsec_ctx;
+ union {
+ struct {
+ /* Result for crypto packet op */
+ odp_crypto_packet_result_t crypto_op_result;
+
+ /* Context for IPsec */
+ odp_ipsec_packet_result_t ipsec_ctx;
+ };
+
+ /* Result for comp packet op */
+ odp_comp_packet_result_t comp_op_result;
+ };
-#ifdef ODP_PKTIO_DPDK
- /* Extra space for packet descriptors. E.g. DPDK mbuf. Keep as the last
- * member before data. */
- uint8_t ODP_ALIGNED_CACHE extra[PKT_EXTRA_LEN];
-#endif
/* Packet data storage */
uint8_t data[0];
} odp_packet_hdr_t;
@@ -303,6 +304,8 @@ int _odp_packet_cmp_data(odp_packet_t pkt, uint32_t offset,
int _odp_packet_ipv4_chksum_insert(odp_packet_t pkt);
int _odp_packet_tcp_chksum_insert(odp_packet_t pkt);
int _odp_packet_udp_chksum_insert(odp_packet_t pkt);
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt);
+
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index d2446d19f..53db2c907 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -53,6 +53,8 @@ struct pktio_if_ops;
#if defined(ODP_NETMAP)
#define PKTIO_PRIVATE_SIZE 74752
+#elif defined(ODP_PKTIO_DPDK) && ODP_CACHE_LINE_SIZE == 128
+#define PKTIO_PRIVATE_SIZE 10240
#elif defined(ODP_PKTIO_DPDK)
#define PKTIO_PRIVATE_SIZE 5632
#else
@@ -105,7 +107,6 @@ struct pktio_entry {
struct {
odp_queue_t queue;
- void *queue_int;
odp_pktin_queue_t pktin;
} in_queue[PKTIO_MAX_QUEUES];
diff --git a/platform/linux-generic/include/odp_packet_io_ring_internal.h b/platform/linux-generic/include/odp_packet_io_ring_internal.h
index 889a65597..6b4e06a42 100644
--- a/platform/linux-generic/include/odp_packet_io_ring_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_ring_internal.h
@@ -398,9 +398,14 @@ int _ring_mc_dequeue_burst(_ring_t *r, void **obj_table, unsigned n);
void _ring_list_dump(void);
/**
- * initialise ring tailq
+ * Initialize ring tailq
*/
-void _ring_tailq_init(void);
+int _ring_tailq_init(void);
+
+/**
+ * Terminate ring tailq
+ */
+int _ring_tailq_term(void);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_socket.h b/platform/linux-generic/include/odp_packet_socket.h
index 6f9443a3d..16cee16bc 100644
--- a/platform/linux-generic/include/odp_packet_socket.h
+++ b/platform/linux-generic/include/odp_packet_socket.h
@@ -20,6 +20,7 @@
#include <odp/api/pool.h>
#include <odp/api/packet.h>
#include <odp/api/packet_io.h>
+#include <odp/api/shared_memory.h>
#include <linux/version.h>
@@ -43,6 +44,7 @@ struct ring {
unsigned frame_num;
int rd_num;
+ odp_shm_t shm;
int sock;
int type;
int version;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index e3de2b65d..94f859de0 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -38,7 +38,7 @@ typedef struct ODP_ALIGNED_CACHE {
ring_t hdr;
/* Ring data: buffer handles */
- uint32_t buf[CONFIG_POOL_MAX_NUM];
+ uint32_t buf[CONFIG_POOL_MAX_NUM + 1];
} pool_ring_t;
@@ -67,11 +67,15 @@ typedef struct pool_t {
uint32_t max_len;
uint32_t uarea_size;
uint32_t block_size;
+ uint32_t block_offset;
uint8_t *base_addr;
uint8_t *uarea_base_addr;
/* Used by DPDK zero-copy pktio */
- uint8_t mem_from_huge_pages;
+ uint32_t dpdk_elt_size;
+ uint32_t skipped_blocks;
+ uint8_t pool_in_use;
+ uint8_t mem_from_huge_pages;
pool_destroy_cb_fn ext_destroy;
void *ext_desc;
@@ -85,6 +89,11 @@ typedef struct pool_t {
typedef struct pool_table_t {
pool_t pool[ODP_CONFIG_POOLS];
odp_shm_t shm;
+
+ struct {
+ uint32_t pkt_max_num;
+ } config;
+
} pool_table_t;
extern pool_table_t *pool_tbl;
@@ -96,7 +105,7 @@ static inline pool_t *pool_entry(uint32_t pool_idx)
static inline pool_t *pool_entry_from_hdl(odp_pool_t pool_hdl)
{
- return &pool_tbl->pool[_odp_typeval(pool_hdl)];
+ return &pool_tbl->pool[_odp_typeval(pool_hdl) - 1];
}
static inline odp_buffer_hdr_t *buf_hdl_to_hdr(odp_buffer_t buf)
@@ -110,7 +119,8 @@ static inline odp_buffer_hdr_t *buf_hdr_from_index(pool_t *pool,
uint64_t block_offset;
odp_buffer_hdr_t *buf_hdr;
- block_offset = buffer_idx * (uint64_t)pool->block_size;
+ block_offset = (buffer_idx * (uint64_t)pool->block_size) +
+ pool->block_offset;
/* clang requires cast to uintptr_t */
buf_hdr = (odp_buffer_hdr_t *)(uintptr_t)&pool->base_addr[block_offset];
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h
index 654b9e312..c8ed978ce 100644
--- a/platform/linux-generic/include/odp_queue_basic_internal.h
+++ b/platform/linux-generic/include/odp_queue_basic_internal.h
@@ -22,6 +22,7 @@ extern "C" {
#include <odp/api/hints.h>
#include <odp/api/ticketlock.h>
#include <odp_config_internal.h>
+#include <odp_ring_mpmc_internal.h>
#include <odp_ring_st_internal.h>
#include <odp_ring_spsc_internal.h>
#include <odp_queue_lf.h>
@@ -33,21 +34,30 @@ extern "C" {
#define QUEUE_STATUS_SCHED 4
struct queue_entry_s {
- odp_ticketlock_t ODP_ALIGNED_CACHE lock;
- union {
- ring_st_t ring_st;
- ring_spsc_t ring_spsc;
- };
- int status;
-
+ /* The first cache line is read only */
queue_enq_fn_t ODP_ALIGNED_CACHE enqueue;
queue_deq_fn_t dequeue;
queue_enq_multi_fn_t enqueue_multi;
queue_deq_multi_fn_t dequeue_multi;
+ uint32_t *ring_data;
+ uint32_t ring_mask;
+ uint32_t index;
+ odp_queue_t handle;
+ odp_queue_type_t type;
- uint32_t index;
- odp_queue_t handle;
- odp_queue_type_t type;
+ /* MPMC ring (2 cache lines). */
+ ring_mpmc_t ring_mpmc;
+
+ odp_ticketlock_t lock;
+ union {
+ ring_st_t ring_st;
+ ring_spsc_t ring_spsc;
+ };
+
+ odp_atomic_u64_t num_timers;
+ int status;
+
+ queue_deq_multi_fn_t orig_dequeue_multi;
odp_queue_param_t param;
odp_pktin_queue_t pktin;
odp_pktout_queue_t pktout;
@@ -96,10 +106,14 @@ static inline odp_queue_t queue_from_index(uint32_t queue_id)
return (odp_queue_t)qentry_from_index(queue_id);
}
+static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
+{
+ return (queue_entry_t *)(uintptr_t)handle;
+}
+
void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size);
/* Functions for schedulers */
-void sched_queue_destroy_finalize(uint32_t queue_index);
void sched_queue_set_status(uint32_t queue_index, int status);
int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int num,
int update_status);
diff --git a/platform/linux-generic/include/odp_queue_if.h b/platform/linux-generic/include/odp_queue_if.h
index 2eaf8771e..5fe28dac1 100644
--- a/platform/linux-generic/include/odp_queue_if.h
+++ b/platform/linux-generic/include/odp_queue_if.h
@@ -18,52 +18,29 @@ extern "C" {
#define QUEUE_MULTI_MAX CONFIG_BURST_SIZE
-/* Queue API functions */
-typedef struct {
- odp_queue_t (*queue_create)(const char *name,
- const odp_queue_param_t *param);
- int (*queue_destroy)(odp_queue_t queue);
- odp_queue_t (*queue_lookup)(const char *name);
- int (*queue_capability)(odp_queue_capability_t *capa);
- int (*queue_context_set)(odp_queue_t queue, void *context,
- uint32_t len);
- int (*queue_enq)(odp_queue_t queue, odp_event_t ev);
- int (*queue_enq_multi)(odp_queue_t queue, const odp_event_t events[],
- int num);
- odp_event_t (*queue_deq)(odp_queue_t queue);
- int (*queue_deq_multi)(odp_queue_t queue, odp_event_t events[],
- int num);
- odp_queue_type_t (*queue_type)(odp_queue_t queue);
- odp_schedule_sync_t (*queue_sched_type)(odp_queue_t queue);
- odp_schedule_prio_t (*queue_sched_prio)(odp_queue_t queue);
- odp_schedule_group_t (*queue_sched_group)(odp_queue_t queue);
- uint32_t (*queue_lock_count)(odp_queue_t queue);
- uint64_t (*queue_to_u64)(odp_queue_t hdl);
- void (*queue_param_init)(odp_queue_param_t *param);
- int (*queue_info)(odp_queue_t queue, odp_queue_info_t *info);
-} queue_api_t;
-
typedef int (*queue_init_global_fn_t)(void);
typedef int (*queue_term_global_fn_t)(void);
typedef int (*queue_init_local_fn_t)(void);
typedef int (*queue_term_local_fn_t)(void);
-typedef void *(*queue_from_ext_fn_t)(odp_queue_t handle);
-typedef odp_queue_t (*queue_to_ext_fn_t)(void *q_int);
-typedef int (*queue_enq_fn_t)(void *q_int, odp_buffer_hdr_t *);
-typedef int (*queue_enq_multi_fn_t)(void *q_int, odp_buffer_hdr_t **, int);
-typedef odp_buffer_hdr_t *(*queue_deq_fn_t)(void *q_int);
-typedef int (*queue_deq_multi_fn_t)(void *q_int, odp_buffer_hdr_t **, int);
-typedef odp_pktout_queue_t (*queue_get_pktout_fn_t)(void *q_int);
-typedef void (*queue_set_pktout_fn_t)(void *q_int, odp_pktio_t pktio,
+typedef int (*queue_enq_fn_t)(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr);
+typedef int (*queue_enq_multi_fn_t)(odp_queue_t queue,
+ odp_buffer_hdr_t **buf_hdr, int num);
+typedef odp_buffer_hdr_t *(*queue_deq_fn_t)(odp_queue_t queue);
+typedef int (*queue_deq_multi_fn_t)(odp_queue_t queue,
+ odp_buffer_hdr_t **buf_hdr, int num);
+typedef odp_pktout_queue_t (*queue_get_pktout_fn_t)(odp_queue_t queue);
+typedef void (*queue_set_pktout_fn_t)(odp_queue_t queue, odp_pktio_t pktio,
int index);
-typedef odp_pktin_queue_t (*queue_get_pktin_fn_t)(void *q_int);
-typedef void (*queue_set_pktin_fn_t)(void *q_int, odp_pktio_t pktio,
+typedef odp_pktin_queue_t (*queue_get_pktin_fn_t)(odp_queue_t queue);
+typedef void (*queue_set_pktin_fn_t)(odp_queue_t queue, odp_pktio_t pktio,
int index);
-typedef void (*queue_set_enq_deq_fn_t)(void *q_int,
+typedef void (*queue_set_enq_deq_fn_t)(odp_queue_t queue,
queue_enq_fn_t enq,
queue_enq_multi_fn_t enq_multi,
queue_deq_fn_t deq,
queue_deq_multi_fn_t deq_multi);
+typedef void (*queue_timer_add_fn_t)(odp_queue_t queue);
+typedef void (*queue_timer_rem_fn_t)(odp_queue_t queue);
/* Queue functions towards other internal components */
typedef struct {
@@ -71,17 +48,17 @@ typedef struct {
queue_term_global_fn_t term_global;
queue_init_local_fn_t init_local;
queue_term_local_fn_t term_local;
- queue_from_ext_fn_t from_ext;
- queue_to_ext_fn_t to_ext;
- queue_enq_fn_t enq;
- queue_enq_multi_fn_t enq_multi;
- queue_deq_fn_t deq;
- queue_deq_multi_fn_t deq_multi;
queue_get_pktout_fn_t get_pktout;
queue_set_pktout_fn_t set_pktout;
queue_get_pktin_fn_t get_pktin;
queue_set_pktin_fn_t set_pktin;
queue_set_enq_deq_fn_t set_enq_deq_fn;
+ queue_timer_add_fn_t timer_add;
+ queue_timer_rem_fn_t timer_rem;
+
+ /* Original queue dequeue multi function (before override). May be used
+ * by an overriding dequeue function. */
+ queue_deq_multi_fn_t orig_deq_multi;
} queue_fn_t;
extern const queue_fn_t *queue_fn;
diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h
index 71aaa3ba7..3c582076c 100644
--- a/platform/linux-generic/include/odp_queue_scalable_internal.h
+++ b/platform/linux-generic/include/odp_queue_scalable_internal.h
@@ -35,12 +35,14 @@ struct queue_entry_s {
sched_elem_t sched_elem;
odp_ticketlock_t ODP_ALIGNED_CACHE lock;
+ odp_atomic_u64_t num_timers;
int status;
queue_enq_fn_t ODP_ALIGNED_CACHE enqueue;
queue_deq_fn_t dequeue;
queue_enq_multi_fn_t enqueue_multi;
queue_deq_multi_fn_t dequeue_multi;
+ queue_deq_multi_fn_t orig_dequeue_multi;
uint32_t index;
odp_queue_t handle;
@@ -80,14 +82,14 @@ static inline uint32_t queue_to_id(odp_queue_t handle)
return qentry_from_ext(handle)->s.index;
}
-static inline queue_entry_t *qentry_from_int(void *handle)
+static inline queue_entry_t *qentry_from_int(odp_queue_t handle)
{
- return (queue_entry_t *)handle;
+ return (queue_entry_t *)(uintptr_t)handle;
}
-static inline void *qentry_to_int(queue_entry_t *qentry)
+static inline odp_queue_t qentry_to_int(queue_entry_t *qentry)
{
- return qentry;
+ return (odp_queue_t)qentry;
}
static inline odp_queue_t queue_get_handle(queue_entry_t *queue)
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index 97673bef4..ad2f37ef2 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -18,16 +18,14 @@ extern "C" {
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/plat/cpu_inlines.h>
-/* Ring empty, not a valid data value. */
-#define RING_EMPTY ((uint32_t)-1)
-
/* Ring of uint32_t data
*
* Ring stores head and tail counters. Ring indexes are formed from these
* counters with a mask (mask = ring_size - 1), which requires that ring size
* must be a power of two. Also ring size must be larger than the maximum
- * number of data items that will be stored on it (there's no check against
- * overwriting). */
+ * number of data items that will be stored on it as write operations are
+ * assumed to succeed eventually (after readers complete their current
+ * operations). */
typedef struct ODP_ALIGNED_CACHE {
/* Writer head and tail */
odp_atomic_u32_t w_head;
@@ -36,6 +34,7 @@ typedef struct ODP_ALIGNED_CACHE {
/* Reader head and tail */
odp_atomic_u32_t r_head;
+ odp_atomic_u32_t r_tail;
uint32_t data[0];
} ring_t;
@@ -56,10 +55,11 @@ static inline void ring_init(ring_t *ring)
odp_atomic_init_u32(&ring->w_head, 0);
odp_atomic_init_u32(&ring->w_tail, 0);
odp_atomic_init_u32(&ring->r_head, 0);
+ odp_atomic_init_u32(&ring->r_tail, 0);
}
/* Dequeue data from the ring head */
-static inline uint32_t ring_deq(ring_t *ring, uint32_t mask)
+static inline uint32_t ring_deq(ring_t *ring, uint32_t mask, uint32_t *data)
{
uint32_t head, tail, new_head;
@@ -73,17 +73,25 @@ static inline uint32_t ring_deq(ring_t *ring, uint32_t mask)
tail = odp_atomic_load_acq_u32(&ring->w_tail);
if (head == tail)
- return RING_EMPTY;
+ return 0;
new_head = head + 1;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head,
- __ATOMIC_ACQ_REL,
+ __ATOMIC_ACQUIRE,
__ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read
- * does not move above from here. */
- return ring->data[new_head & mask];
+ /* Read data. */
+ *data = ring->data[new_head & mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head))
+ odp_cpu_pause();
+
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+
+ return 1;
}
/* Dequeue multiple data from the ring head. Num is smaller than ring size. */
@@ -112,14 +120,20 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask,
new_head = head + num;
} while (odp_unlikely(cas_mo_u32(&ring->r_head, &head, new_head,
- __ATOMIC_ACQ_REL,
+ __ATOMIC_ACQUIRE,
__ATOMIC_ACQUIRE) == 0));
- /* Read data. CAS acquire-release ensures that data read
- * does not move above from here. */
+ /* Read data. */
for (i = 0; i < num; i++)
data[i] = ring->data[(head + 1 + i) & mask];
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != head))
+ odp_cpu_pause();
+
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+
return num;
}
@@ -127,16 +141,24 @@ static inline uint32_t ring_deq_multi(ring_t *ring, uint32_t mask,
static inline void ring_enq(ring_t *ring, uint32_t mask, uint32_t data)
{
uint32_t old_head, new_head;
+ uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */
old_head = odp_atomic_fetch_inc_u32(&ring->w_head);
new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when
+ * a reader has been left behind (e.g. due to an interrupt) and is
+ * still reading the same slot. */
+ while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail)
+ >= size))
+ odp_cpu_pause();
+
/* Write data */
ring->data[new_head & mask] = data;
/* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head))
odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */
@@ -148,17 +170,25 @@ static inline void ring_enq_multi(ring_t *ring, uint32_t mask, uint32_t data[],
uint32_t num)
{
uint32_t old_head, new_head, i;
+ uint32_t size = mask + 1;
/* Reserve a slot in the ring for writing */
old_head = odp_atomic_fetch_add_u32(&ring->w_head, num);
new_head = old_head + 1;
+ /* Wait for the last reader to finish. This prevents overwrite when
+ * a reader has been left behind (e.g. due to an interrupt) and is
+ * still reading these slots. */
+ while (odp_unlikely(new_head - odp_atomic_load_acq_u32(&ring->r_tail)
+ >= size))
+ odp_cpu_pause();
+
/* Write data */
for (i = 0; i < num; i++)
ring->data[(new_head + i) & mask] = data[i];
/* Wait until other writers have updated the tail */
- while (odp_unlikely(odp_atomic_load_acq_u32(&ring->w_tail) != old_head))
+ while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head))
odp_cpu_pause();
/* Release the new writer tail, readers acquire it. */
diff --git a/platform/linux-generic/include/odp_ring_mpmc_internal.h b/platform/linux-generic/include/odp_ring_mpmc_internal.h
new file mode 100644
index 000000000..74bbb8fc7
--- /dev/null
+++ b/platform/linux-generic/include/odp_ring_mpmc_internal.h
@@ -0,0 +1,169 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_RING_MPMC_INTERNAL_H_
+#define ODP_RING_MPMC_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+#include <odp/api/cpu.h>
+#include <odp/api/hints.h>
+#include <odp_align_internal.h>
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
+
+/* Ring of uint32_t data
+ *
+ * Ring stores head and tail counters. Ring indexes are formed from these
+ * counters with a mask (mask = ring_size - 1), which requires that ring size
+ * must be a power of two.
+ *
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * | E | E | | | | | | | | | | E | E | E | E | E |
+ * +---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+ * ^ ^ ^ ^
+ * | | | |
+ * r_tail r_head w_tail w_head
+ *
+ */
+typedef struct {
+ odp_atomic_u32_t ODP_ALIGNED_CACHE r_head;
+ odp_atomic_u32_t r_tail;
+
+ odp_atomic_u32_t ODP_ALIGNED_CACHE w_head;
+ odp_atomic_u32_t w_tail;
+
+} ring_mpmc_t;
+
+static inline int ring_mpmc_cas_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+}
+
+/* Initialize ring */
+static inline void ring_mpmc_init(ring_mpmc_t *ring)
+{
+ odp_atomic_init_u32(&ring->w_head, 0);
+ odp_atomic_init_u32(&ring->w_tail, 0);
+ odp_atomic_init_u32(&ring->r_head, 0);
+ odp_atomic_init_u32(&ring->r_tail, 0);
+}
+
+/* Dequeue data from the ring head. Num is smaller than ring size. */
+static inline uint32_t ring_mpmc_deq_multi(ring_mpmc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask,
+ uint32_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, w_tail, num_data, i;
+
+ /* Load acquires ensure that w_tail load happens after r_head load,
+ * and thus r_head value is always behind or equal to w_tail value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new r_head. */
+ do {
+ old_head = odp_atomic_load_acq_u32(&ring->r_head);
+ odp_prefetch(&ring_data[(old_head + 1) & ring_mask]);
+ w_tail = odp_atomic_load_acq_u32(&ring->w_tail);
+ num_data = w_tail - old_head;
+
+ /* Ring is empty */
+ if (num_data == 0)
+ return 0;
+
+ /* Try to take all available */
+ if (num > num_data)
+ num = num_data;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->r_head, &old_head,
+ new_head) == 0));
+
+ /* Read data. This will not move above load acquire of r_head. */
+ for (i = 0; i < num; i++)
+ data[i] = ring_data[(old_head + 1 + i) & ring_mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new reader tail, writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r_tail, new_head);
+
+ return num;
+}
+
+/* Enqueue multiple data into the ring tail. Num is smaller than ring size. */
+static inline uint32_t ring_mpmc_enq_multi(ring_mpmc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask,
+ const uint32_t data[],
+ uint32_t num)
+{
+ uint32_t old_head, new_head, r_tail, num_free, i;
+ uint32_t size = ring_mask + 1;
+
+ /* Load acquires ensure that w_head load happens after r_tail load,
+ * and thus r_tail value is always behind or equal to w_head value.
+ * When CAS operation succeeds, this thread owns data between old
+ * and new w_head. */
+ do {
+ r_tail = odp_atomic_load_acq_u32(&ring->r_tail);
+ old_head = odp_atomic_load_acq_u32(&ring->w_head);
+
+ num_free = size - (old_head - r_tail);
+
+ /* Ring is full */
+ if (num_free == 0)
+ return 0;
+
+ /* Try to use all available */
+ if (num > num_free)
+ num = num_free;
+
+ new_head = old_head + num;
+
+ } while (odp_unlikely(ring_mpmc_cas_u32(&ring->w_head, &old_head,
+ new_head) == 0));
+
+ /* Write data. This will not move above load acquire of w_head. */
+ for (i = 0; i < num; i++)
+ ring_data[(old_head + 1 + i) & ring_mask] = data[i];
+
+ /* Wait until other writers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->w_tail) != old_head))
+ odp_cpu_pause();
+
+ /* Release the new writer tail, readers acquire it. */
+ odp_atomic_store_rel_u32(&ring->w_tail, new_head);
+
+ return num;
+}
+
+/* Check if ring is empty */
+static inline int ring_mpmc_is_empty(ring_mpmc_t *ring)
+{
+ uint32_t head = odp_atomic_load_u32(&ring->r_head);
+ uint32_t tail = odp_atomic_load_u32(&ring->w_tail);
+
+ return head == tail;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_spsc_internal.h b/platform/linux-generic/include/odp_ring_spsc_internal.h
index e38bda1da..de122bf57 100644
--- a/platform/linux-generic/include/odp_ring_spsc_internal.h
+++ b/platform/linux-generic/include/odp_ring_spsc_internal.h
@@ -29,31 +29,27 @@ extern "C" {
typedef struct {
odp_atomic_u32_t head;
odp_atomic_u32_t tail;
- uint32_t mask;
- uint32_t *data;
} ring_spsc_t;
/* Initialize ring. Ring size must be a power of two. */
-static inline void ring_spsc_init(ring_spsc_t *ring, uint32_t *data,
- uint32_t size)
+static inline void ring_spsc_init(ring_spsc_t *ring)
{
odp_atomic_init_u32(&ring->head, 0);
odp_atomic_init_u32(&ring->tail, 0);
- ring->mask = size - 1;
- ring->data = data;
}
/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
-static inline uint32_t ring_spsc_deq_multi(ring_spsc_t *ring, uint32_t data[],
+static inline uint32_t ring_spsc_deq_multi(ring_spsc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask, uint32_t data[],
uint32_t max_num)
{
- uint32_t head, tail, mask, idx;
+ uint32_t head, tail, idx;
uint32_t num, i;
tail = odp_atomic_load_acq_u32(&ring->tail);
head = odp_atomic_load_u32(&ring->head);
- mask = ring->mask;
num = tail - head;
/* Empty */
@@ -63,11 +59,11 @@ static inline uint32_t ring_spsc_deq_multi(ring_spsc_t *ring, uint32_t data[],
if (num > max_num)
num = max_num;
- idx = head & mask;
+ idx = head & ring_mask;
for (i = 0; i < num; i++) {
- data[i] = ring->data[idx];
- idx = (idx + 1) & mask;
+ data[i] = ring_data[idx];
+ idx = (idx + 1) & ring_mask;
}
odp_atomic_store_rel_u32(&ring->head, head + num);
@@ -77,16 +73,17 @@ static inline uint32_t ring_spsc_deq_multi(ring_spsc_t *ring, uint32_t data[],
/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
static inline uint32_t ring_spsc_enq_multi(ring_spsc_t *ring,
+ uint32_t *ring_data,
+ uint32_t ring_mask,
const uint32_t data[],
uint32_t num_data)
{
- uint32_t head, tail, mask, size, idx;
+ uint32_t head, tail, size, idx;
uint32_t num, i;
head = odp_atomic_load_acq_u32(&ring->head);
tail = odp_atomic_load_u32(&ring->tail);
- mask = ring->mask;
- size = mask + 1;
+ size = ring_mask + 1;
num = size - (tail - head);
/* Full */
@@ -96,11 +93,11 @@ static inline uint32_t ring_spsc_enq_multi(ring_spsc_t *ring,
if (num > num_data)
num = num_data;
- idx = tail & mask;
+ idx = tail & ring_mask;
for (i = 0; i < num; i++) {
- ring->data[idx] = data[i];
- idx = (idx + 1) & mask;
+ ring_data[idx] = data[i];
+ idx = (idx + 1) & ring_mask;
}
odp_atomic_store_rel_u32(&ring->tail, tail + num);
diff --git a/platform/linux-generic/include/odp_ring_st_internal.h b/platform/linux-generic/include/odp_ring_st_internal.h
index 5fb37d4ef..1bc18cda0 100644
--- a/platform/linux-generic/include/odp_ring_st_internal.h
+++ b/platform/linux-generic/include/odp_ring_st_internal.h
@@ -19,30 +19,25 @@ extern "C" {
typedef struct {
uint32_t head;
uint32_t tail;
- uint32_t mask;
- uint32_t *data;
-
} ring_st_t;
/* Initialize ring. Ring size must be a power of two. */
-static inline void ring_st_init(ring_st_t *ring, uint32_t *data, uint32_t size)
+static inline void ring_st_init(ring_st_t *ring)
{
ring->head = 0;
ring->tail = 0;
- ring->mask = size - 1;
- ring->data = data;
}
/* Dequeue data from the ring head. Max_num is smaller than ring size.*/
-static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t data[],
+static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t *ring_data,
+ uint32_t ring_mask, uint32_t data[],
uint32_t max_num)
{
- uint32_t head, tail, mask, idx;
+ uint32_t head, tail, idx;
uint32_t num, i;
head = ring->head;
tail = ring->tail;
- mask = ring->mask;
num = tail - head;
/* Empty */
@@ -52,11 +47,11 @@ static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t data[],
if (num > max_num)
num = max_num;
- idx = head & mask;
+ idx = head & ring_mask;
for (i = 0; i < num; i++) {
- data[i] = ring->data[idx];
- idx = (idx + 1) & mask;
+ data[i] = ring_data[idx];
+ idx = (idx + 1) & ring_mask;
}
ring->head = head + num;
@@ -65,16 +60,17 @@ static inline uint32_t ring_st_deq_multi(ring_st_t *ring, uint32_t data[],
}
/* Enqueue data into the ring tail. Num_data is smaller than ring size. */
-static inline uint32_t ring_st_enq_multi(ring_st_t *ring, const uint32_t data[],
+static inline uint32_t ring_st_enq_multi(ring_st_t *ring, uint32_t *ring_data,
+ uint32_t ring_mask,
+ const uint32_t data[],
uint32_t num_data)
{
- uint32_t head, tail, mask, size, idx;
+ uint32_t head, tail, size, idx;
uint32_t num, i;
head = ring->head;
tail = ring->tail;
- mask = ring->mask;
- size = mask + 1;
+ size = ring_mask + 1;
num = size - (tail - head);
/* Full */
@@ -84,11 +80,11 @@ static inline uint32_t ring_st_enq_multi(ring_st_t *ring, const uint32_t data[],
if (num > num_data)
num = num_data;
- idx = tail & mask;
+ idx = tail & ring_mask;
for (i = 0; i < num; i++) {
- ring->data[idx] = data[i];
- idx = (idx + 1) & mask;
+ ring_data[idx] = data[i];
+ idx = (idx + 1) & ring_mask;
}
ring->tail = tail + num;
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index 8f082aaaf..15c915904 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -19,6 +19,15 @@ extern "C" {
/* Number of ordered locks per queue */
#define SCHEDULE_ORDERED_LOCKS_PER_QUEUE 2
+typedef struct schedule_config_t {
+ struct {
+ int all;
+ int worker;
+ int control;
+ } group_enable;
+
+} schedule_config_t;
+
typedef void (*schedule_pktio_start_fn_t)(int pktio_index,
int num_in_queue,
int in_queue_idx[],
@@ -32,7 +41,7 @@ typedef int (*schedule_init_queue_fn_t)(uint32_t queue_index,
typedef void (*schedule_destroy_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_sched_queue_fn_t)(uint32_t queue_index);
typedef int (*schedule_unsched_queue_fn_t)(uint32_t queue_index);
-typedef int (*schedule_ord_enq_multi_fn_t)(void *q_int,
+typedef int (*schedule_ord_enq_multi_fn_t)(odp_queue_t queue,
void *buf_hdr[], int num, int *ret);
typedef int (*schedule_init_global_fn_t)(void);
typedef int (*schedule_term_global_fn_t)(void);
@@ -44,10 +53,9 @@ typedef void (*schedule_order_unlock_lock_fn_t)(void);
typedef void (*schedule_order_lock_start_fn_t)(void);
typedef void (*schedule_order_lock_wait_fn_t)(void);
typedef uint32_t (*schedule_max_ordered_locks_fn_t)(void);
-typedef void (*schedule_save_context_fn_t)(uint32_t queue_index);
+typedef void (*schedule_get_config_fn_t)(schedule_config_t *config);
typedef struct schedule_fn_t {
- int status_sync;
schedule_pktio_start_fn_t pktio_start;
schedule_thr_add_fn_t thr_add;
schedule_thr_rem_fn_t thr_rem;
@@ -66,10 +74,7 @@ typedef struct schedule_fn_t {
schedule_order_lock_wait_fn_t wait_order_lock;
schedule_order_unlock_lock_fn_t order_unlock_lock;
schedule_max_ordered_locks_fn_t max_ordered_locks;
-
- /* Called only when status_sync is set */
- schedule_unsched_queue_fn_t unsched_queue;
- schedule_save_context_fn_t save_context;
+ schedule_get_config_fn_t get_config;
} schedule_fn_t;
@@ -79,20 +84,35 @@ extern const schedule_fn_t *sched_fn;
/* Interface for the scheduler */
int sched_cb_pktin_poll(int pktio_index, int pktin_index,
odp_buffer_hdr_t *hdr_tbl[], int num);
-int sched_cb_pktin_poll_old(int pktio_index, int num_queue, int index[]);
int sched_cb_pktin_poll_one(int pktio_index, int rx_queue, odp_event_t evts[]);
void sched_cb_pktio_stop_finalize(int pktio_index);
+/* For debugging */
+#ifdef ODP_DEBUG
+extern int _odp_schedule_configured;
+#endif
+
/* API functions */
typedef struct {
- uint64_t (*schedule_wait_time)(uint64_t);
- odp_event_t (*schedule)(odp_queue_t *, uint64_t);
- int (*schedule_multi)(odp_queue_t *, uint64_t, odp_event_t [], int);
+ uint64_t (*schedule_wait_time)(uint64_t ns);
+ int (*schedule_capability)(odp_schedule_capability_t *capa);
+ void (*schedule_config_init)(odp_schedule_config_t *config);
+ int (*schedule_config)(const odp_schedule_config_t *config);
+ odp_event_t (*schedule)(odp_queue_t *from, uint64_t wait);
+ int (*schedule_multi)(odp_queue_t *from, uint64_t wait,
+ odp_event_t events[], int num);
+ int (*schedule_multi_wait)(odp_queue_t *from, odp_event_t events[],
+ int num);
+ int (*schedule_multi_no_wait)(odp_queue_t *from, odp_event_t events[],
+ int num);
void (*schedule_pause)(void);
void (*schedule_resume)(void);
void (*schedule_release_atomic)(void);
void (*schedule_release_ordered)(void);
void (*schedule_prefetch)(int);
+ int (*schedule_min_prio)(void);
+ int (*schedule_max_prio)(void);
+ int (*schedule_default_prio)(void);
int (*schedule_num_prio)(void);
odp_schedule_group_t (*schedule_group_create)(const char *,
const odp_thrmask_t *);
diff --git a/platform/linux-generic/include/odp_schedule_scalable_config.h b/platform/linux-generic/include/odp_schedule_scalable_config.h
index a84dc0724..3462d047b 100644
--- a/platform/linux-generic/include/odp_schedule_scalable_config.h
+++ b/platform/linux-generic/include/odp_schedule_scalable_config.h
@@ -9,6 +9,9 @@
#ifndef ODP_SCHEDULE_SCALABLE_CONFIG_H_
#define ODP_SCHEDULE_SCALABLE_CONFIG_H_
+/* Maximum number of events that can be stored in a queue */
+#define CONFIG_SCAL_QUEUE_SIZE 4096
+
/*
* Default scaling factor for the scheduler group
*
diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h
index a835b8f32..c98f07deb 100644
--- a/platform/linux-generic/include/odp_shm_internal.h
+++ b/platform/linux-generic/include/odp_shm_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2018, Linaro Limited
+/* Copyright (c) 2016-2018, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -11,13 +11,44 @@
extern "C" {
#endif
+#include <sys/types.h>
+#include <inttypes.h>
+
#include <odp/api/shared_memory.h>
-#define SHM_DEVNAME_MAXLEN (ODP_SHM_NAME_LEN + 16)
-#define SHM_DEVNAME_FORMAT "/odp-%d-%s" /* /dev/shm/odp-<pid>-<name> */
+/* flags available at ishm_reserve: */
+#define _ODP_ISHM_SINGLE_VA 1
+#define _ODP_ISHM_LOCK 2
+#define _ODP_ISHM_EXPORT 4 /* create export descr file in /tmp */
+#define _ODP_ISHM_USE_HP 8 /* allocate memory from huge pages */
+
+/**
+ * Shared memory block info
+ */
+typedef struct _odp_ishm_info_t {
+ const char *name; /**< Block name */
+ void *addr; /**< Block address */
+ uint64_t size; /**< Block size in bytes */
+ uint64_t page_size; /**< Memory page size */
+ uint32_t flags; /**< _ODP_ISHM_* flags */
+ uint32_t user_flags;/**< user specific flags */
+} _odp_ishm_info_t;
+
+odp_shm_t _odp_shm_reserve(const char *name, uint64_t size, uint32_t align,
+ uint32_t flags, uint32_t extra_flags);
-#define _ODP_SHM_PROC_NOCREAT 0x40 /**< Do not create shm if not exist */
-#define _ODP_SHM_O_EXCL 0x80 /**< Do not create shm if exist */
+int _odp_ishm_reserve(const char *name, uint64_t size, int fd, uint32_t align,
+ uint64_t offset, uint32_t flags, uint32_t user_flags);
+int _odp_ishm_free_by_index(int block_index);
+int _odp_ishm_lookup_by_name(const char *name);
+int _odp_ishm_find_exported(const char *remote_name,
+ pid_t external_odp_pid,
+ const char *local_name);
+void *_odp_ishm_address(int block_index);
+int _odp_ishm_info(int block_index, _odp_ishm_info_t *info);
+int _odp_ishm_status(const char *title);
+int _odp_ishm_cleanup_files(const char *dirpath);
+void _odp_ishm_print(int block_index);
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_sysinfo_internal.h b/platform/linux-generic/include/odp_sysinfo_internal.h
index 2f01d18e4..492542303 100644
--- a/platform/linux-generic/include/odp_sysinfo_internal.h
+++ b/platform/linux-generic/include/odp_sysinfo_internal.h
@@ -12,12 +12,31 @@ extern "C" {
#endif
#include <odp_global_data.h>
+#include <odp_debug_internal.h>
+#include <string.h>
+
+#define DUMMY_MAX_MHZ 1400
int cpuinfo_parser(FILE *file, system_info_t *sysinfo);
uint64_t odp_cpu_hz_current(int id);
uint64_t odp_cpu_arch_hz_current(int id);
void sys_info_print_arch(void);
+static inline int _odp_dummy_cpuinfo(system_info_t *sysinfo)
+{
+ int i;
+
+ ODP_DBG("Warning: use dummy values for freq and model string\n");
+ for (i = 0; i < CONFIG_NUM_CPU; i++) {
+ ODP_PRINT("WARN: cpu[%i] uses dummy max frequency %u MHz\n",
+ i, DUMMY_MAX_MHZ);
+ sysinfo->cpu_hz_max[i] = DUMMY_MAX_MHZ * 1000000;
+ strcpy(sysinfo->model_str[i], "UNKNOWN");
+ }
+
+ return 0;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index 8dda9e6ff..cd80778a5 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -19,9 +19,7 @@
#include <odp_buffer_internal.h>
#include <odp_pool_internal.h>
#include <odp/api/timer.h>
-
-/* Minimum number of scheduling rounds between checking timer pools. */
-#define CONFIG_TIMER_RUN_RATELIMIT_ROUNDS 1
+#include <odp_global_data.h>
/**
* Internal Timeout header
@@ -38,22 +36,14 @@ typedef struct {
odp_timer_t timer;
} odp_timeout_hdr_t;
-/*
- * Whether to run timer pool processing 'inline' (on worker cores) or in
- * background threads (thread-per-timerpool).
- *
- * If the application will use both scheduler and timer this flag is set
- * to true, otherwise false. This application conveys this information via
- * the 'not_used' bits in odp_init_t which are passed to odp_global_init().
- */
-extern odp_bool_t inline_timers;
-
-unsigned _timer_run(void);
+/* A larger decrement value should be used after receiving events compared to
+ * an 'empty' call. */
+unsigned int _timer_run(int dec);
/* Static inline wrapper to minimize modification of schedulers. */
-static inline unsigned timer_run(void)
+static inline unsigned int timer_run(int dec)
{
- return inline_timers ? _timer_run() : 0;
+ return odp_global_rw->inline_timers ? _timer_run(dec) : 0;
}
#endif
diff --git a/platform/linux-generic/include/odp_traffic_mngr_internal.h b/platform/linux-generic/include/odp_traffic_mngr_internal.h
index 919831a3d..040bb117a 100644
--- a/platform/linux-generic/include/odp_traffic_mngr_internal.h
+++ b/platform/linux-generic/include/odp_traffic_mngr_internal.h
@@ -83,13 +83,6 @@ typedef uint64_t tm_handle_t;
#define PF_REACHED_EGRESS 0x40
#define PF_ERROR 0x80
-typedef struct {
- uint32_t num_allocd;
- uint32_t num_used;
- uint32_t num_freed;
- void **array_ptrs; /* Ptr to an array of num_allocd void * ptrs. */
-} dynamic_tbl_t;
-
#define ODP_TM_NUM_PROFILES 4
typedef enum {
@@ -99,6 +92,11 @@ typedef enum {
TM_WRED_PROFILE
} profile_kind_t;
+typedef enum {
+ TM_STATUS_FREE = 0,
+ TM_STATUS_RESERVED
+} tm_status_t;
+
typedef struct tm_queue_obj_s tm_queue_obj_t;
typedef struct tm_node_obj_s tm_node_obj_t;
@@ -110,6 +108,7 @@ typedef struct {
_odp_int_name_t name_tbl_id;
odp_tm_threshold_t thresholds_profile;
uint32_t ref_cnt;
+ tm_status_t status;
} tm_queue_thresholds_t;
typedef struct {
@@ -122,6 +121,7 @@ typedef struct {
odp_tm_percent_t max_drop_prob;
odp_bool_t enable_wred;
odp_bool_t use_byte_fullness;
+ tm_status_t status;
} tm_wred_params_t;
typedef struct {
@@ -160,6 +160,7 @@ typedef struct {
uint32_t ref_cnt;
odp_tm_sched_mode_t sched_modes[ODP_TM_MAX_PRIORITIES];
uint16_t inverted_weights[ODP_TM_MAX_PRIORITIES];
+ tm_status_t status;
} tm_sched_params_t;
typedef enum {
@@ -195,6 +196,7 @@ typedef struct {
int8_t len_adjust;
odp_bool_t dual_rate;
odp_bool_t enabled;
+ tm_status_t status;
} tm_shaper_params_t;
typedef enum { NO_CALLBACK, UNDELAY_PKT } tm_shaper_callback_reason_t;
@@ -253,7 +255,7 @@ typedef struct {
uint8_t num_priorities;
uint8_t highest_priority;
uint8_t locked;
- tm_sched_state_t sched_states[0];
+ tm_sched_state_t sched_states[ODP_TM_MAX_PRIORITIES];
} tm_schedulers_obj_t;
struct tm_queue_obj_s {
@@ -264,7 +266,7 @@ struct tm_queue_obj_s {
uint32_t pkts_dequeued_cnt;
uint32_t pkts_consumed_cnt;
_odp_int_pkt_queue_t _odp_int_pkt_queue;
- tm_wred_node_t *tm_wred_node;
+ tm_wred_node_t tm_wred_node;
odp_packet_t pkt;
odp_packet_t sent_pkt;
uint32_t timer_seq;
@@ -282,13 +284,14 @@ struct tm_queue_obj_s {
uint8_t tm_idx;
uint8_t delayed_cnt;
uint8_t blocked_cnt;
- void *tm_qentry;
+ tm_status_t status;
+ odp_queue_t queue;
};
struct tm_node_obj_s {
void *user_context;
- tm_wred_node_t *tm_wred_node;
- tm_schedulers_obj_t *schedulers_obj;
+ tm_wred_node_t tm_wred_node;
+ tm_schedulers_obj_t schedulers_obj;
tm_shaper_obj_t *fanin_list_head;
tm_shaper_obj_t *fanin_list_tail;
tm_shaper_obj_t shaper_obj;
@@ -301,6 +304,7 @@ struct tm_node_obj_s {
uint8_t level; /* Primarily for debugging */
uint8_t tm_idx;
uint8_t marked;
+ tm_status_t status;
};
typedef struct {
@@ -369,8 +373,8 @@ struct tm_system_s {
void *trace_buffer;
uint32_t next_queue_num;
- tm_queue_obj_t **queue_num_tbl;
- input_work_queue_t *input_work_queue;
+ tm_queue_obj_t *queue_num_tbl[ODP_TM_MAX_TM_QUEUES];
+ input_work_queue_t input_work_queue;
tm_queue_cnts_t priority_queue_cnts;
tm_queue_cnts_t total_queue_cnts;
pkt_desc_t egress_pkt_desc;
@@ -379,7 +383,7 @@ struct tm_system_s {
_odp_timer_wheel_t _odp_int_timer_wheel;
_odp_int_sorted_pool_t _odp_int_sorted_pool;
- tm_node_obj_t *root_node;
+ tm_node_obj_t root_node;
odp_tm_egress_t egress;
odp_tm_requirements_t requirements;
odp_tm_capabilities_t capabilities;
@@ -396,6 +400,7 @@ struct tm_system_s {
uint8_t tm_idx;
uint8_t first_enq;
odp_bool_t is_idle;
+ tm_status_t status;
uint64_t shaper_green_cnt;
uint64_t shaper_yellow_cnt;
@@ -408,15 +413,13 @@ struct tm_system_s {
* while the input work queue is shared - timers are not. */
struct tm_system_group_s {
- tm_system_group_t *prev;
- tm_system_group_t *next;
-
odp_barrier_t tm_group_barrier;
tm_system_t *first_tm_system;
uint32_t num_tm_systems;
uint32_t first_enq;
pthread_t thread;
pthread_attr_t attr;
+ tm_status_t status;
};
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/protocols/sctp.h b/platform/linux-generic/include/protocols/sctp.h
new file mode 100644
index 000000000..dea8a8a6e
--- /dev/null
+++ b/platform/linux-generic/include/protocols/sctp.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2016-2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP SCTP header
+ */
+
+#ifndef ODP_SCTP_H_
+#define ODP_SCTP_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_api.h>
+
+/** @addtogroup odp_header ODP HEADER
+ * @{
+ */
+
+/** SCTP header length */
+#define _ODP_SCTPHDR_LEN 12
+
+/** SCTP header */
+typedef struct ODP_PACKED {
+ odp_u16be_t src_port; /**< Source port */
+ odp_u16be_t dst_port; /**< Destination port */
+ odp_u32be_t tag; /**< Verification tag */
+ odp_u32be_t chksum; /**< SCTP header and data checksum */
+} _odp_sctphdr_t;
+
+/** @internal Compile time assert */
+ODP_STATIC_ASSERT(sizeof(_odp_sctphdr_t) == _ODP_SCTPHDR_LEN,
+ "_ODP_SCTPHDR_T__SIZE_ERROR");
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index e0fd099be..1dd14cd4d 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -20,18 +20,16 @@ m4_include([platform/linux-generic/m4/odp_netmap.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
ODP_SCHEDULER
-m4_include([platform/linux-generic/m4/performance.m4])
-
AC_CONFIG_COMMANDS_PRE([dnl
AM_CONDITIONAL([PLATFORM_IS_LINUX_GENERIC],
[test "${with_platform}" = "linux-generic"])
AC_CONFIG_FILES([platform/linux-generic/Makefile
platform/linux-generic/libodp-linux.pc
+ platform/linux-generic/dumpconfig/Makefile
platform/linux-generic/test/Makefile
platform/linux-generic/test/validation/api/shmem/Makefile
platform/linux-generic/test/validation/api/pktio/Makefile
platform/linux-generic/test/mmap_vlan_ins/Makefile
platform/linux-generic/test/pktio_ipc/Makefile
- platform/linux-generic/test/ring/Makefile
- platform/linux-generic/test/performance/Makefile])
+ platform/linux-generic/test/ring/Makefile])
])
diff --git a/platform/linux-generic/m4/performance.m4 b/platform/linux-generic/m4/performance.m4
deleted file mode 100644
index f2e7107c0..000000000
--- a/platform/linux-generic/m4/performance.m4
+++ /dev/null
@@ -1,10 +0,0 @@
-##########################################################################
-# Enable/disable test-perf-proc
-##########################################################################
-AC_ARG_ENABLE([test-perf-proc],
- [AS_HELP_STRING([--enable-test-perf-proc], [run test in test/performance in process mode])],
- [test_perf_proc=$enableval],
- [test_perf_proc=yes])
-AC_CONFIG_COMMANDS_PRE([dnl
-AM_CONDITIONAL([test_perf_proc], [test x$test_perf_proc = xyes ])
-])
diff --git a/platform/linux-generic/miniz/miniz.c b/platform/linux-generic/miniz/miniz.c
new file mode 100644
index 000000000..d0e39ec4a
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz.c
@@ -0,0 +1,619 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz.h"
+
+typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
+typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
+typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- zlib-style API's */
+
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
+{
+ mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
+ size_t block_len = buf_len % 5552;
+ if (!ptr)
+ return MZ_ADLER32_INIT;
+ while (buf_len)
+ {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
+ {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ return (s2 << 16) + s1;
+}
+
+/* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */
+#if 0
+ mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
+ {
+ static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
+ 0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
+ mz_uint32 crcu32 = (mz_uint32)crc;
+ if (!ptr)
+ return MZ_CRC32_INIT;
+ crcu32 = ~crcu32;
+ while (buf_len--)
+ {
+ mz_uint8 b = *ptr++;
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
+ crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
+ }
+ return ~crcu32;
+ }
+#else
+/* Faster, but larger CPU cache footprint.
+ */
+mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
+{
+ static const mz_uint32 s_crc_table[256] =
+ {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535,
+ 0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD,
+ 0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D,
+ 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4,
+ 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
+ 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC,
+ 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB,
+ 0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F,
+ 0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB,
+ 0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA,
+ 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE,
+ 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A,
+ 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409,
+ 0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
+ 0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739,
+ 0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268,
+ 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0,
+ 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8,
+ 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF,
+ 0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703,
+ 0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7,
+ 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE,
+ 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
+ 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6,
+ 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D,
+ 0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5,
+ 0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605,
+ 0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+ };
+
+ mz_uint32 crc32 = (mz_uint32)crc ^ 0xFFFFFFFF;
+ const mz_uint8 *pByte_buf = (const mz_uint8 *)ptr;
+
+ while (buf_len >= 4)
+ {
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF];
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF];
+ pByte_buf += 4;
+ buf_len -= 4;
+ }
+
+ while (buf_len)
+ {
+ crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
+ ++pByte_buf;
+ --buf_len;
+ }
+
+ return ~crc32;
+}
+#endif
+
+void mz_free(void *p)
+{
+ MZ_FREE(p);
+}
+
+void *miniz_def_alloc_func(void *opaque, size_t items, size_t size)
+{
+ (void)opaque, (void)items, (void)size;
+ return MZ_MALLOC(items * size);
+}
+void miniz_def_free_func(void *opaque, void *address)
+{
+ (void)opaque, (void)address;
+ MZ_FREE(address);
+}
+void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size)
+{
+ (void)opaque, (void)address, (void)items, (void)size;
+ return MZ_REALLOC(address, items * size);
+}
+
+const char *mz_version(void)
+{
+ return MZ_VERSION;
+}
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+int mz_deflateInit(mz_streamp pStream, int level)
+{
+ return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY);
+}
+
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy)
+{
+ tdefl_compressor *pComp;
+ mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);
+
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = MZ_ADLER32_INIT;
+ pStream->msg = NULL;
+ pStream->reserved = 0;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = miniz_def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = miniz_def_free_func;
+
+ pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pComp;
+
+ if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY)
+ {
+ mz_deflateEnd(pStream);
+ return MZ_PARAM_ERROR;
+ }
+
+ return MZ_OK;
+}
+
+int mz_deflateReset(mz_streamp pStream)
+{
+ if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree))
+ return MZ_STREAM_ERROR;
+ pStream->total_in = pStream->total_out = 0;
+ tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags);
+ return MZ_OK;
+}
+
+int mz_deflate(mz_streamp pStream, int flush)
+{
+ size_t in_bytes, out_bytes;
+ mz_ulong orig_total_in, orig_total_out;
+ int mz_status = MZ_OK;
+
+ if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out))
+ return MZ_STREAM_ERROR;
+ if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+
+ if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE)
+ return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;
+
+ orig_total_in = pStream->total_in;
+ orig_total_out = pStream->total_out;
+ for (;;)
+ {
+ tdefl_status defl_status;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+
+ defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush);
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);
+
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (defl_status < 0)
+ {
+ mz_status = MZ_STREAM_ERROR;
+ break;
+ }
+ else if (defl_status == TDEFL_STATUS_DONE)
+ {
+ mz_status = MZ_STREAM_END;
+ break;
+ }
+ else if (!pStream->avail_out)
+ break;
+ else if ((!pStream->avail_in) && (flush != MZ_FINISH))
+ {
+ if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out))
+ break;
+ return MZ_BUF_ERROR; /* Can't make forward progress without some input.
+ */
+ }
+ }
+ return mz_status;
+}
+
+int mz_deflateEnd(mz_streamp pStream)
+{
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state)
+ {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len)
+{
+ (void)pStream;
+ /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */
+ return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
+}
+
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level)
+{
+ int status;
+ mz_stream stream;
+ memset(&stream, 0, sizeof(stream));
+
+ /* In case mz_ulong is 64-bits (argh I hate longs). */
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_deflateInit(&stream, level);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_deflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END)
+ {
+ mz_deflateEnd(&stream);
+ return (status == MZ_OK) ? MZ_BUF_ERROR : status;
+ }
+
+ *pDest_len = stream.total_out;
+ return mz_deflateEnd(&stream);
+}
+
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
+{
+ return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION);
+}
+
+mz_ulong mz_compressBound(mz_ulong source_len)
+{
+ return mz_deflateBound(NULL, source_len);
+}
+
+int mz_inflateInit2(mz_streamp pStream, int window_bits)
+{
+ inflate_state *pDecomp;
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))
+ return MZ_PARAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = 0;
+ pStream->msg = NULL;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ pStream->reserved = 0;
+ if (!pStream->zalloc)
+ pStream->zalloc = miniz_def_alloc_func;
+ if (!pStream->zfree)
+ pStream->zfree = miniz_def_free_func;
+
+ pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state));
+ if (!pDecomp)
+ return MZ_MEM_ERROR;
+
+ pStream->state = (struct mz_internal_state *)pDecomp;
+
+ tinfl_init(&pDecomp->m_decomp);
+ pDecomp->m_dict_ofs = 0;
+ pDecomp->m_dict_avail = 0;
+ pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
+ pDecomp->m_first_call = 1;
+ pDecomp->m_has_flushed = 0;
+ pDecomp->m_window_bits = window_bits;
+
+ return MZ_OK;
+}
+
+int mz_inflateInit(mz_streamp pStream)
+{
+ return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
+}
+
+int mz_inflateReset(mz_streamp pStream)
+{
+ inflate_state *pDecomp;
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+
+ pStream->data_type = 0;
+ pStream->adler = 0;
+ pStream->msg = NULL;
+ pStream->total_in = 0;
+ pStream->total_out = 0;
+ pStream->reserved = 0;
+
+ pDecomp = (inflate_state *)pStream->state;
+
+ tinfl_init(&pDecomp->m_decomp);
+ pDecomp->m_dict_ofs = 0;
+ pDecomp->m_dict_avail = 0;
+ pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
+ pDecomp->m_first_call = 1;
+ pDecomp->m_has_flushed = 0;
+ /* pDecomp->m_window_bits = window_bits */;
+
+ return MZ_OK;
+}
+
+int mz_inflate(mz_streamp pStream, int flush)
+{
+ inflate_state *pState;
+ mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
+ size_t in_bytes, out_bytes, orig_avail_in;
+ tinfl_status status;
+
+ if ((!pStream) || (!pStream->state))
+ return MZ_STREAM_ERROR;
+ if (flush == MZ_PARTIAL_FLUSH)
+ flush = MZ_SYNC_FLUSH;
+ if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+
+ pState = (inflate_state *)pStream->state;
+ if (pState->m_window_bits > 0)
+ decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
+ orig_avail_in = pStream->avail_in;
+
+ first_call = pState->m_first_call;
+ pState->m_first_call = 0;
+ if (pState->m_last_status < 0)
+ return MZ_DATA_ERROR;
+
+ if (pState->m_has_flushed && (flush != MZ_FINISH))
+ return MZ_STREAM_ERROR;
+ pState->m_has_flushed |= (flush == MZ_FINISH);
+
+ if ((flush == MZ_FINISH) && (first_call))
+ {
+ /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */
+ decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
+ in_bytes = pStream->avail_in;
+ out_bytes = pStream->avail_out;
+ status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags);
+ pState->m_last_status = status;
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+ pStream->next_out += (mz_uint)out_bytes;
+ pStream->avail_out -= (mz_uint)out_bytes;
+ pStream->total_out += (mz_uint)out_bytes;
+
+ if (status < 0)
+ return MZ_DATA_ERROR;
+ else if (status != TINFL_STATUS_DONE)
+ {
+ pState->m_last_status = TINFL_STATUS_FAILED;
+ return MZ_BUF_ERROR;
+ }
+ return MZ_STREAM_END;
+ }
+ /* flush != MZ_FINISH then we must assume there's more input. */
+ if (flush != MZ_FINISH)
+ decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;
+
+ if (pState->m_dict_avail)
+ {
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+ return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
+ }
+
+ for (;;)
+ {
+ in_bytes = pStream->avail_in;
+ out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;
+
+ status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
+ pState->m_last_status = status;
+
+ pStream->next_in += (mz_uint)in_bytes;
+ pStream->avail_in -= (mz_uint)in_bytes;
+ pStream->total_in += (mz_uint)in_bytes;
+ pStream->adler = tinfl_get_adler32(&pState->m_decomp);
+
+ pState->m_dict_avail = (mz_uint)out_bytes;
+
+ n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
+ memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
+ pStream->next_out += n;
+ pStream->avail_out -= n;
+ pStream->total_out += n;
+ pState->m_dict_avail -= n;
+ pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
+
+ if (status < 0)
+ return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */
+ else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
+ return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */
+ else if (flush == MZ_FINISH)
+ {
+ /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */
+ if (status == TINFL_STATUS_DONE)
+ return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
+ /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */
+ else if (!pStream->avail_out)
+ return MZ_BUF_ERROR;
+ }
+ else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail))
+ break;
+ }
+
+ return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
+}
+
+int mz_inflateEnd(mz_streamp pStream)
+{
+ if (!pStream)
+ return MZ_STREAM_ERROR;
+ if (pStream->state)
+ {
+ pStream->zfree(pStream->opaque, pStream->state);
+ pStream->state = NULL;
+ }
+ return MZ_OK;
+}
+
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
+{
+ mz_stream stream;
+ int status;
+ memset(&stream, 0, sizeof(stream));
+
+ /* In case mz_ulong is 64-bits (argh I hate longs). */
+ if ((source_len | *pDest_len) > 0xFFFFFFFFU)
+ return MZ_PARAM_ERROR;
+
+ stream.next_in = pSource;
+ stream.avail_in = (mz_uint32)source_len;
+ stream.next_out = pDest;
+ stream.avail_out = (mz_uint32)*pDest_len;
+
+ status = mz_inflateInit(&stream);
+ if (status != MZ_OK)
+ return status;
+
+ status = mz_inflate(&stream, MZ_FINISH);
+ if (status != MZ_STREAM_END)
+ {
+ mz_inflateEnd(&stream);
+ return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status;
+ }
+ *pDest_len = stream.total_out;
+
+ return mz_inflateEnd(&stream);
+}
+
+const char *mz_error(int err)
+{
+ static struct
+ {
+ int m_err;
+ const char *m_pDesc;
+ } s_error_descs[] =
+ {
+ { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" }
+ };
+ mz_uint i;
+ for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
+ if (s_error_descs[i].m_err == err)
+ return s_error_descs[i].m_pDesc;
+ return NULL;
+}
+
+#endif /*MINIZ_NO_ZLIB_APIS */
+
+#ifdef __cplusplus
+}
+#endif
+
+/*
+ This is free and unencumbered software released into the public domain.
+
+ Anyone is free to copy, modify, publish, use, compile, sell, or
+ distribute this software, either in source code form or as a compiled
+ binary, for any purpose, commercial or non-commercial, and by any
+ means.
+
+ In jurisdictions that recognize copyright laws, the author or authors
+ of this software dedicate any and all copyright interest in the
+ software to the public domain. We make this dedication for the benefit
+ of the public at large and to the detriment of our heirs and
+ successors. We intend this dedication to be an overt act of
+ relinquishment in perpetuity of all present and future rights to this
+ software under copyright law.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ For more information, please refer to <http://unlicense.org/>
+*/
diff --git a/platform/linux-generic/miniz/miniz.h b/platform/linux-generic/miniz/miniz.h
new file mode 100644
index 000000000..a2f90d907
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz.h
@@ -0,0 +1,363 @@
+/* miniz.c 2.1.0 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
+ See "unlicense" statement at the end of this file.
+ Rich Geldreich <richgel99@gmail.com>, last updated Oct. 13, 2013
+ Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt
+
+ Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define
+ MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).
+
+ * Low-level Deflate/Inflate implementation notes:
+
+ Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or
+ greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses
+ approximately as well as zlib.
+
+ Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function
+ coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory
+ block large enough to hold the entire file.
+
+ The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation.
+
+ * zlib-style API notes:
+
+ miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in
+ zlib replacement in many apps:
+ The z_stream struct, optional memory allocation callbacks
+ deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
+ inflateInit/inflateInit2/inflate/inflateReset/inflateEnd
+ compress, compress2, compressBound, uncompress
+ CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines.
+ Supports raw deflate streams or standard zlib streams with adler-32 checking.
+
+ Limitations:
+ The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries.
+ I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but
+ there are no guarantees that miniz.c pulls this off perfectly.
+
+ * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by
+ Alex Evans. Supports 1-4 bytes/pixel images.
+
+ * ZIP archive API notes:
+
+ The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to
+ get the job done with minimal fuss. There are simple API's to retrieve file information, read files from
+ existing archives, create new archives, append new files to existing archives, or clone archive data from
+ one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h),
+ or you can specify custom file read/write callbacks.
+
+ - Archive reading: Just call this function to read a single file from a disk archive:
+
+ void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name,
+ size_t *pSize, mz_uint zip_flags);
+
+ For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central
+ directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files.
+
+ - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file:
+
+ int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags);
+
+ The locate operation can optionally check file comments too, which (as one example) can be used to identify
+ multiple versions of the same file in an archive. This function uses a simple linear search through the central
+ directory, so it's not very fast.
+
+ Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and
+ retrieve detailed info on each file by calling mz_zip_reader_file_stat().
+
+ - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data
+ to disk and builds an exact image of the central directory in memory. The central directory image is written
+ all at once at the end of the archive file when the archive is finalized.
+
+ The archive writer can optionally align each file's local header and file data to any power of 2 alignment,
+ which can be useful when the archive will be read from optical media. Also, the writer supports placing
+ arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still
+ readable by any ZIP tool.
+
+ - Archive appending: The simple way to add a single file to an archive is to call this function:
+
+ mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name,
+ const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);
+
+ The archive will be created if it doesn't already exist, otherwise it'll be appended to.
+ Note the appending is done in-place and is not an atomic operation, so if something goes wrong
+ during the operation it's possible the archive could be left without a central directory (although the local
+ file headers and file data will be fine, so the archive will be recoverable).
+
+ For more complex archive modification scenarios:
+ 1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to
+ preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the
+ compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and
+ you're done. This is safe but requires a bunch of temporary disk space or heap memory.
+
+ 2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(),
+ append new files as needed, then finalize the archive which will write an updated central directory to the
+ original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a
+ possibility that the archive's central directory could be lost with this method if anything goes wrong, though.
+
+ - ZIP archive support limitations:
+ No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files.
+ Requires streams capable of seeking.
+
+ * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the
+ below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it.
+
+ * Important: For best perf. be sure to customize the below macros for your target platform:
+ #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
+ #define MINIZ_LITTLE_ENDIAN 1
+ #define MINIZ_HAS_64BIT_REGISTERS 1
+
+ * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz
+ uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files
+ (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
+*/
+#pragma once
+
+/* Defines to completely disable specific portions of miniz.c:
+ If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */
+
+/* Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression API's. */
+/*#define MINIZ_NO_ZLIB_APIS */
+
+#include <stddef.h>
+
+#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
+/* MINIZ_X86_OR_X64_CPU is only used to help set the below macros. */
+#define MINIZ_X86_OR_X64_CPU 1
+#else
+#define MINIZ_X86_OR_X64_CPU 0
+#endif
+
+#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
+/* Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. */
+#define MINIZ_LITTLE_ENDIAN 1
+#else
+#define MINIZ_LITTLE_ENDIAN 0
+#endif
+
+/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses. */
+#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
+/* #define MINIZ_UNALIGNED_USE_MEMCPY */
+
+#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__) || defined (__arch64__)
+/* Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions). */
+#define MINIZ_HAS_64BIT_REGISTERS 1
+#else
+#define MINIZ_HAS_64BIT_REGISTERS 0
+#endif
+
+#include "miniz_common.h"
+#include "miniz_tdef.h"
+#include "miniz_tinfl.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- zlib-style API Definitions. */
+
+/* For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! */
+typedef unsigned long mz_ulong;
+
+/* mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap. */
+void mz_free(void *p);
+
+#define MZ_ADLER32_INIT (1)
+/* mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL. */
+mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);
+
+#define MZ_CRC32_INIT (0)
+/* mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL. */
+mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);
+
+/* Compression strategies. */
+enum
+{
+ MZ_DEFAULT_STRATEGY = 0,
+ MZ_FILTERED = 1,
+ MZ_HUFFMAN_ONLY = 2,
+ MZ_RLE = 3,
+ MZ_FIXED = 4
+};
+
+/* Method */
+#define MZ_DEFLATED 8
+
+/* Heap allocation callbacks.
+Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long. */
+typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
+typedef void (*mz_free_func)(void *opaque, void *address);
+typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size);
+
+/* Compression levels: 0-9 are the standard zlib-style levels, 10 is best possible compression (not zlib compatible, and may be very slow), MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. */
+enum
+{
+ MZ_NO_COMPRESSION = 0,
+ MZ_BEST_SPEED = 1,
+ MZ_BEST_COMPRESSION = 9,
+ MZ_UBER_COMPRESSION = 10,
+ MZ_DEFAULT_LEVEL = 6,
+ MZ_DEFAULT_COMPRESSION = -1
+};
+
+#define MZ_VERSION "10.1.0"
+#define MZ_VERNUM 0xA100
+#define MZ_VER_MAJOR 10
+#define MZ_VER_MINOR 1
+#define MZ_VER_REVISION 0
+#define MZ_VER_SUBREVISION 0
+
+#ifndef MINIZ_NO_ZLIB_APIS
+
+/* Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other values are for advanced use (refer to the zlib docs). */
+enum
+{
+ MZ_NO_FLUSH = 0,
+ MZ_PARTIAL_FLUSH = 1,
+ MZ_SYNC_FLUSH = 2,
+ MZ_FULL_FLUSH = 3,
+ MZ_FINISH = 4,
+ MZ_BLOCK = 5
+};
+
+/* Return status codes. MZ_PARAM_ERROR is non-standard. */
+enum
+{
+ MZ_OK = 0,
+ MZ_STREAM_END = 1,
+ MZ_NEED_DICT = 2,
+ MZ_ERRNO = -1,
+ MZ_STREAM_ERROR = -2,
+ MZ_DATA_ERROR = -3,
+ MZ_MEM_ERROR = -4,
+ MZ_BUF_ERROR = -5,
+ MZ_VERSION_ERROR = -6,
+ MZ_PARAM_ERROR = -10000
+};
+
+/* Window bits */
+#define MZ_DEFAULT_WINDOW_BITS 15
+
+struct mz_internal_state;
+
+/* Compression/decompression stream struct. */
+typedef struct mz_stream_s
+{
+ const unsigned char *next_in; /* pointer to next byte to read */
+ unsigned int avail_in; /* number of bytes available at next_in */
+ mz_ulong total_in; /* total number of bytes consumed so far */
+
+ unsigned char *next_out; /* pointer to next byte to write */
+ unsigned int avail_out; /* number of bytes that can be written to next_out */
+ mz_ulong total_out; /* total number of bytes produced so far */
+
+ char *msg; /* error msg (unused) */
+ struct mz_internal_state *state; /* internal state, allocated by zalloc/zfree */
+
+ mz_alloc_func zalloc; /* optional heap allocation function (defaults to malloc) */
+ mz_free_func zfree; /* optional heap free function (defaults to free) */
+ void *opaque; /* heap alloc function user pointer */
+
+ int data_type; /* data_type (unused) */
+ mz_ulong adler; /* adler32 of the source or uncompressed data */
+ mz_ulong reserved; /* not used */
+} mz_stream;
+
+typedef mz_stream *mz_streamp;
+
+/* Returns the version string of miniz.c. */
+const char *mz_version(void);
+
+/* mz_deflateInit() initializes a compressor with default options: */
+/* Parameters: */
+/* pStream must point to an initialized mz_stream struct. */
+/* level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. */
+/* level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio. */
+/* (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) */
+/* Return values: */
+/* MZ_OK on success. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_PARAM_ERROR if the input parameters are bogus. */
+/* MZ_MEM_ERROR on out of memory. */
+int mz_deflateInit(mz_streamp pStream, int level);
+
+/* mz_deflateInit2() is like mz_deflate(), except with more control: */
+/* Additional parameters: */
+/* method must be MZ_DEFLATED */
+/* window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer) */
+/* mem_level must be between [1, 9] (it's checked but ignored by miniz.c) */
+int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy);
+
+/* Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). */
+int mz_deflateReset(mz_streamp pStream);
+
+/* mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible. */
+/* Parameters: */
+/* pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
+/* flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. */
+/* Return values: */
+/* MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full). */
+/* MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_PARAM_ERROR if one of the parameters is invalid. */
+/* MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.) */
+int mz_deflate(mz_streamp pStream, int flush);
+
+/* mz_deflateEnd() deinitializes a compressor: */
+/* Return values: */
+/* MZ_OK on success. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+int mz_deflateEnd(mz_streamp pStream);
+
+/* mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH. */
+mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);
+
+/* Single-call compression functions mz_compress() and mz_compress2(): */
+/* Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure. */
+int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
+int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level);
+
+/* mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress(). */
+mz_ulong mz_compressBound(mz_ulong source_len);
+
+/* Initializes a decompressor. */
+int mz_inflateInit(mz_streamp pStream);
+
+/* mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window size and whether or not the stream has been wrapped with a zlib header/footer: */
+/* window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate). */
+int mz_inflateInit2(mz_streamp pStream, int window_bits);
+
+/* Quickly resets a compressor without having to reallocate anything. Same as calling mz_inflateEnd() followed by mz_inflateInit()/mz_inflateInit2(). */
+int mz_inflateReset(mz_streamp pStream);
+
+/* Decompresses the input stream to the output, consuming only as much of the input as needed, and writing as much to the output as possible. */
+/* Parameters: */
+/* pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
+/* flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. */
+/* On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both sized large enough to decompress the entire stream in a single call (this is slightly faster). */
+/* MZ_FINISH implies that there are no more source bytes available beside what's already in the input buffer, and that the output buffer is large enough to hold the rest of the decompressed data. */
+/* Return values: */
+/* MZ_OK on success. Either more input is needed but not available, and/or there's more output to be written but the output buffer is full. */
+/* MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. For zlib streams, the adler-32 of the decompressed data has also been verified. */
+/* MZ_STREAM_ERROR if the stream is bogus. */
+/* MZ_DATA_ERROR if the deflate stream is invalid. */
+/* MZ_PARAM_ERROR if one of the parameters is invalid. */
+/* MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the inflater needs more input to continue, or if the output buffer is not large enough. Call mz_inflate() again */
+/* with more input data, or with more room in the output buffer (except when using single call decompression, described above). */
+int mz_inflate(mz_streamp pStream, int flush);
+
+/* Deinitializes a decompressor. */
+int mz_inflateEnd(mz_streamp pStream);
+
+/* Single-call decompression. */
+/* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */
+int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
+
+/* Returns a string description of the specified error code, or NULL if the error code is invalid. */
+const char *mz_error(int err);
+
+#endif /* MINIZ_NO_ZLIB_APIS */
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_common.h b/platform/linux-generic/miniz/miniz_common.h
new file mode 100644
index 000000000..0945775c8
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_common.h
@@ -0,0 +1,68 @@
+#pragma once
+#include <assert.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+/* ------------------- Types and macros */
+typedef unsigned char mz_uint8;
+typedef signed short mz_int16;
+typedef unsigned short mz_uint16;
+typedef unsigned int mz_uint32;
+typedef unsigned int mz_uint;
+typedef int64_t mz_int64;
+typedef uint64_t mz_uint64;
+typedef int mz_bool;
+
+#define MZ_FALSE (0)
+#define MZ_TRUE (1)
+
+/* Works around MSVC's spammy "warning C4127: conditional expression is constant" message. */
+#ifdef _MSC_VER
+#define MZ_MACRO_END while (0, 0)
+#else
+#define MZ_MACRO_END while (0)
+#endif
+
+#define MZ_ASSERT(x) assert(x)
+
+#define MZ_MALLOC(x) NULL
+#define MZ_FREE(x) (void)x, ((void)0)
+#define MZ_REALLOC(p, x) NULL
+
+#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
+#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
+#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
+#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
+#else
+#define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
+#define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
+#endif
+
+#define MZ_READ_LE64(p) (((mz_uint64)MZ_READ_LE32(p)) | (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) << 32U))
+
+#ifdef _MSC_VER
+#define MZ_FORCEINLINE __forceinline
+#elif defined(__GNUC__)
+#define MZ_FORCEINLINE __inline__ __attribute__((__always_inline__))
+#else
+#define MZ_FORCEINLINE inline
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern void *miniz_def_alloc_func(void *opaque, size_t items, size_t size);
+extern void miniz_def_free_func(void *opaque, void *address);
+extern void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size);
+
+#define MZ_UINT16_MAX (0xFFFFU)
+#define MZ_UINT32_MAX (0xFFFFFFFFU)
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tdef.c b/platform/linux-generic/miniz/miniz_tdef.c
new file mode 100644
index 000000000..477a1c5df
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tdef.c
@@ -0,0 +1,1564 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz_tdef.h"
+#include "miniz.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- Low-level Compression (independent from all decompression API's) */
+
+/* Purposely making these tables static for faster init and thread safety. */
+static const mz_uint16 s_tdefl_len_sym[256] =
+ {
+ 257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
+ 273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
+ 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
+ 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
+ 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
+ 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
+ 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
+ 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
+ };
+
+static const mz_uint8 s_tdefl_len_extra[256] =
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
+ };
+
+static const mz_uint8 s_tdefl_small_dist_sym[512] =
+ {
+ 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
+ 14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
+ 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
+ };
+
+static const mz_uint8 s_tdefl_small_dist_extra[512] =
+ {
+ 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7
+ };
+
+static const mz_uint8 s_tdefl_large_dist_sym[128] =
+ {
+ 0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
+ 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
+ };
+
+static const mz_uint8 s_tdefl_large_dist_extra[128] =
+ {
+ 0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
+ };
+
+/* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */
+typedef struct
+{
+ mz_uint16 m_key, m_sym_index;
+} tdefl_sym_freq;
+static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1)
+{
+ mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
+ tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
+ MZ_CLEAR_OBJ(hist);
+ for (i = 0; i < num_syms; i++)
+ {
+ mz_uint freq = pSyms0[i].m_key;
+ hist[freq & 0xFF]++;
+ hist[256 + ((freq >> 8) & 0xFF)]++;
+ }
+ while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
+ total_passes--;
+ for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
+ {
+ const mz_uint32 *pHist = &hist[pass << 8];
+ mz_uint offsets[256], cur_ofs = 0;
+ for (i = 0; i < 256; i++)
+ {
+ offsets[i] = cur_ofs;
+ cur_ofs += pHist[i];
+ }
+ for (i = 0; i < num_syms; i++)
+ pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
+ {
+ tdefl_sym_freq *t = pCur_syms;
+ pCur_syms = pNew_syms;
+ pNew_syms = t;
+ }
+ }
+ return pCur_syms;
+}
+
+/* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. */
+static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
+{
+ int root, leaf, next, avbl, used, dpth;
+ if (n == 0)
+ return;
+ else if (n == 1)
+ {
+ A[0].m_key = 1;
+ return;
+ }
+ A[0].m_key += A[1].m_key;
+ root = 0;
+ leaf = 2;
+ for (next = 1; next < n - 1; next++)
+ {
+ if (leaf >= n || A[root].m_key < A[leaf].m_key)
+ {
+ A[next].m_key = A[root].m_key;
+ A[root++].m_key = (mz_uint16)next;
+ }
+ else
+ A[next].m_key = A[leaf++].m_key;
+ if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key))
+ {
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
+ A[root++].m_key = (mz_uint16)next;
+ }
+ else
+ A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
+ }
+ A[n - 2].m_key = 0;
+ for (next = n - 3; next >= 0; next--)
+ A[next].m_key = A[A[next].m_key].m_key + 1;
+ avbl = 1;
+ used = dpth = 0;
+ root = n - 2;
+ next = n - 1;
+ while (avbl > 0)
+ {
+ while (root >= 0 && (int)A[root].m_key == dpth)
+ {
+ used++;
+ root--;
+ }
+ while (avbl > used)
+ {
+ A[next--].m_key = (mz_uint16)(dpth);
+ avbl--;
+ }
+ avbl = 2 * used;
+ dpth++;
+ used = 0;
+ }
+}
+
+/* Limits canonical Huffman code table's max code size. */
+enum
+{
+ TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32
+};
+static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
+{
+ int i;
+ mz_uint32 total = 0;
+ if (code_list_len <= 1)
+ return;
+ for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
+ pNum_codes[max_code_size] += pNum_codes[i];
+ for (i = max_code_size; i > 0; i--)
+ total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
+ while (total != (1UL << max_code_size))
+ {
+ pNum_codes[max_code_size]--;
+ for (i = max_code_size - 1; i > 0; i--)
+ if (pNum_codes[i])
+ {
+ pNum_codes[i]--;
+ pNum_codes[i + 1] += 2;
+ break;
+ }
+ total--;
+ }
+}
+
+static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
+{
+ int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
+ mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
+ MZ_CLEAR_OBJ(num_codes);
+ if (static_table)
+ {
+ for (i = 0; i < table_len; i++)
+ num_codes[d->m_huff_code_sizes[table_num][i]]++;
+ }
+ else
+ {
+ tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
+ int num_used_syms = 0;
+ const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
+ for (i = 0; i < table_len; i++)
+ if (pSym_count[i])
+ {
+ syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
+ syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
+ }
+
+ pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
+ tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);
+
+ for (i = 0; i < num_used_syms; i++)
+ num_codes[pSyms[i].m_key]++;
+
+ tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);
+
+ MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
+ MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
+ for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
+ for (l = num_codes[i]; l > 0; l--)
+ d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
+ }
+
+ next_code[1] = 0;
+ for (j = 0, i = 2; i <= code_size_limit; i++)
+ next_code[i] = j = ((j + num_codes[i - 1]) << 1);
+
+ for (i = 0; i < table_len; i++)
+ {
+ mz_uint rev_code = 0, code, code_size;
+ if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
+ continue;
+ code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, code >>= 1)
+ rev_code = (rev_code << 1) | (code & 1);
+ d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
+ }
+}
+
+#define TDEFL_PUT_BITS(b, l) \
+ do \
+ { \
+ mz_uint bits = b; \
+ mz_uint len = l; \
+ MZ_ASSERT(bits <= ((1U << len) - 1U)); \
+ d->m_bit_buffer |= (bits << d->m_bits_in); \
+ d->m_bits_in += len; \
+ while (d->m_bits_in >= 8) \
+ { \
+ if (d->m_pOutput_buf < d->m_pOutput_buf_end) \
+ *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
+ d->m_bit_buffer >>= 8; \
+ d->m_bits_in -= 8; \
+ } \
+ } \
+ MZ_MACRO_END
+
+#define TDEFL_RLE_PREV_CODE_SIZE() \
+ { \
+ if (rle_repeat_count) \
+ { \
+ if (rle_repeat_count < 3) \
+ { \
+ d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
+ while (rle_repeat_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = prev_code_size; \
+ } \
+ else \
+ { \
+ d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 16; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3); \
+ } \
+ rle_repeat_count = 0; \
+ } \
+ }
+
+#define TDEFL_RLE_ZERO_CODE_SIZE() \
+ { \
+ if (rle_z_count) \
+ { \
+ if (rle_z_count < 3) \
+ { \
+ d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count); \
+ while (rle_z_count--) \
+ packed_code_sizes[num_packed_code_sizes++] = 0; \
+ } \
+ else if (rle_z_count <= 10) \
+ { \
+ d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 17; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3); \
+ } \
+ else \
+ { \
+ d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1); \
+ packed_code_sizes[num_packed_code_sizes++] = 18; \
+ packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
+ } \
+ rle_z_count = 0; \
+ } \
+ }
+
+static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+
+static void tdefl_start_dynamic_block(tdefl_compressor *d)
+{
+ int num_lit_codes, num_dist_codes, num_bit_lengths;
+ mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
+ mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;
+
+ d->m_huff_count[0][256] = 1;
+
+ tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
+ tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);
+
+ for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
+ if (d->m_huff_code_sizes[0][num_lit_codes - 1])
+ break;
+ for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
+ if (d->m_huff_code_sizes[1][num_dist_codes - 1])
+ break;
+
+ memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
+ memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
+ total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
+ num_packed_code_sizes = 0;
+ rle_z_count = 0;
+ rle_repeat_count = 0;
+
+ memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
+ for (i = 0; i < total_code_sizes_to_pack; i++)
+ {
+ mz_uint8 code_size = code_sizes_to_pack[i];
+ if (!code_size)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ if (++rle_z_count == 138)
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+ }
+ else
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ if (code_size != prev_code_size)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1);
+ packed_code_sizes[num_packed_code_sizes++] = code_size;
+ }
+ else if (++rle_repeat_count == 6)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ }
+ }
+ prev_code_size = code_size;
+ }
+ if (rle_repeat_count)
+ {
+ TDEFL_RLE_PREV_CODE_SIZE();
+ }
+ else
+ {
+ TDEFL_RLE_ZERO_CODE_SIZE();
+ }
+
+ tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);
+
+ TDEFL_PUT_BITS(2, 2);
+
+ TDEFL_PUT_BITS(num_lit_codes - 257, 5);
+ TDEFL_PUT_BITS(num_dist_codes - 1, 5);
+
+ for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
+ if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
+ break;
+ num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
+ TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
+ for (i = 0; (int)i < num_bit_lengths; i++)
+ TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);
+
+ for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;)
+ {
+ mz_uint code = packed_code_sizes[packed_code_sizes_index++];
+ MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
+ TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
+ if (code >= 16)
+ TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
+ }
+}
+
+static void tdefl_start_static_block(tdefl_compressor *d)
+{
+ mz_uint i;
+ mz_uint8 *p = &d->m_huff_code_sizes[0][0];
+
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+
+ memset(d->m_huff_code_sizes[1], 5, 32);
+
+ tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
+ tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);
+
+ TDEFL_PUT_BITS(1, 2);
+}
+
+static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
+{
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+ mz_uint8 *pOutput_buf = d->m_pOutput_buf;
+ mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
+ mz_uint64 bit_buffer = d->m_bit_buffer;
+ mz_uint bits_in = d->m_bits_in;
+
+#define TDEFL_PUT_BITS_FAST(b, l) \
+ { \
+ bit_buffer |= (((mz_uint64)(b)) << bits_in); \
+ bits_in += (l); \
+ }
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1)
+ {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+
+ if (flags & 1)
+ {
+ mz_uint s0, s1, n0, n1, sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
+
+ /* This sequence coaxes MSVC into using cmov's vs. jmp's. */
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ n0 = s_tdefl_small_dist_extra[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[match_dist >> 8];
+ n1 = s_tdefl_large_dist_extra[match_dist >> 8];
+ sym = (match_dist < 512) ? s0 : s1;
+ num_extra_bits = (match_dist < 512) ? n0 : n1;
+
+ MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
+ }
+ else
+ {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
+ {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+
+ if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
+ {
+ flags >>= 1;
+ lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+ }
+ }
+ }
+
+ if (pOutput_buf >= d->m_pOutput_buf_end)
+ return MZ_FALSE;
+
+ *(mz_uint64 *)pOutput_buf = bit_buffer;
+ pOutput_buf += (bits_in >> 3);
+ bit_buffer >>= (bits_in & ~7);
+ bits_in &= 7;
+ }
+
+#undef TDEFL_PUT_BITS_FAST
+
+ d->m_pOutput_buf = pOutput_buf;
+ d->m_bits_in = 0;
+ d->m_bit_buffer = 0;
+
+ while (bits_in)
+ {
+ mz_uint32 n = MZ_MIN(bits_in, 16);
+ TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
+ bit_buffer >>= n;
+ bits_in -= n;
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#else
+static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
+{
+ mz_uint flags;
+ mz_uint8 *pLZ_codes;
+
+ flags = 1;
+ for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1)
+ {
+ if (flags == 1)
+ flags = *pLZ_codes++ | 0x100;
+ if (flags & 1)
+ {
+ mz_uint sym, num_extra_bits;
+ mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
+ pLZ_codes += 3;
+
+ MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
+ TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);
+
+ if (match_dist < 512)
+ {
+ sym = s_tdefl_small_dist_sym[match_dist];
+ num_extra_bits = s_tdefl_small_dist_extra[match_dist];
+ }
+ else
+ {
+ sym = s_tdefl_large_dist_sym[match_dist >> 8];
+ num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
+ }
+ MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
+ TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
+ }
+ else
+ {
+ mz_uint lit = *pLZ_codes++;
+ MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
+ TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
+ }
+ }
+
+ TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);
+
+ return (d->m_pOutput_buf < d->m_pOutput_buf_end);
+}
+#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS */
+
+static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
+{
+ if (static_block)
+ tdefl_start_static_block(d);
+ else
+ tdefl_start_dynamic_block(d);
+ return tdefl_compress_lz_codes(d);
+}
+
+static int tdefl_flush_block(tdefl_compressor *d, int flush)
+{
+ mz_uint saved_bit_buf, saved_bits_in;
+ mz_uint8 *pSaved_output_buf;
+ mz_bool comp_block_succeeded = MZ_FALSE;
+ int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
+ mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;
+
+ d->m_pOutput_buf = pOutput_buf_start;
+ d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;
+
+ MZ_ASSERT(!d->m_output_flush_remaining);
+ d->m_output_flush_ofs = 0;
+ d->m_output_flush_remaining = 0;
+
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
+ d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);
+
+ if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
+ {
+ TDEFL_PUT_BITS(0x78, 8);
+ TDEFL_PUT_BITS(0x01, 8);
+ }
+
+ TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);
+
+ pSaved_output_buf = d->m_pOutput_buf;
+ saved_bit_buf = d->m_bit_buffer;
+ saved_bits_in = d->m_bits_in;
+
+ if (!use_raw_block)
+ comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));
+
+ /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */
+ if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
+ ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size))
+ {
+ mz_uint i;
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ TDEFL_PUT_BITS(0, 2);
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF)
+ {
+ TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
+ }
+ for (i = 0; i < d->m_total_lz_bytes; ++i)
+ {
+ TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
+ }
+ }
+ /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */
+ else if (!comp_block_succeeded)
+ {
+ d->m_pOutput_buf = pSaved_output_buf;
+ d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
+ tdefl_compress_block(d, MZ_TRUE);
+ }
+
+ if (flush)
+ {
+ if (flush == TDEFL_FINISH)
+ {
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER)
+ {
+ mz_uint i, a = d->m_adler32;
+ for (i = 0; i < 4; i++)
+ {
+ TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
+ a <<= 8;
+ }
+ }
+ }
+ else
+ {
+ mz_uint i, z = 0;
+ TDEFL_PUT_BITS(0, 3);
+ if (d->m_bits_in)
+ {
+ TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
+ }
+ for (i = 2; i; --i, z ^= 0xFFFF)
+ {
+ TDEFL_PUT_BITS(z & 0xFFFF, 16);
+ }
+ }
+ }
+
+ MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);
+
+ memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
+ d->m_total_lz_bytes = 0;
+ d->m_block_index++;
+
+ if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0)
+ {
+ if (d->m_pPut_buf_func)
+ {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
+ return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
+ }
+ else if (pOutput_buf_start == d->m_output_buf)
+ {
+ int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
+ d->m_out_buf_ofs += bytes_to_copy;
+ if ((n -= bytes_to_copy) != 0)
+ {
+ d->m_output_flush_ofs = bytes_to_copy;
+ d->m_output_flush_remaining = n;
+ }
+ }
+ else
+ {
+ d->m_out_buf_ofs += n;
+ }
+ }
+
+ return d->m_output_flush_remaining;
+}
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+static mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p)
+{
+ mz_uint16 ret;
+ memcpy(&ret, p, sizeof(mz_uint16));
+ return ret;
+}
+static mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p)
+{
+ mz_uint16 ret;
+ memcpy(&ret, p, sizeof(mz_uint16));
+ return ret;
+}
+#else
+#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
+#define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p)
+#endif
+static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
+{
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
+ mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s);
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;)
+ {
+ for (;;)
+ {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ q = (const mz_uint16 *)(d->m_dict + probe_pos);
+ if (TDEFL_READ_UNALIGNED_WORD2(q) != s01)
+ continue;
+ p = s;
+ probe_len = 32;
+ do
+ {
+ } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
+ (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
+ if (!probe_len)
+ {
+ *pMatch_dist = dist;
+ *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN);
+ break;
+ }
+ else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len)
+ {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len)
+ break;
+ c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
+ }
+ }
+}
+#else
+static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
+{
+ mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
+ mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
+ const mz_uint8 *s = d->m_dict + pos, *p, *q;
+ mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
+ MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
+ if (max_match_len <= match_len)
+ return;
+ for (;;)
+ {
+ for (;;)
+ {
+ if (--num_probes_left == 0)
+ return;
+#define TDEFL_PROBE \
+ next_probe_pos = d->m_next[probe_pos]; \
+ if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
+ return; \
+ probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK; \
+ if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) \
+ break;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ TDEFL_PROBE;
+ }
+ if (!dist)
+ break;
+ p = s;
+ q = d->m_dict + probe_pos;
+ for (probe_len = 0; probe_len < max_match_len; probe_len++)
+ if (*p++ != *q++)
+ break;
+ if (probe_len > match_len)
+ {
+ *pMatch_dist = dist;
+ if ((*pMatch_len = match_len = probe_len) == max_match_len)
+ return;
+ c0 = d->m_dict[pos + match_len];
+ c1 = d->m_dict[pos + match_len - 1];
+ }
+ }
+}
+#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES */
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+static mz_uint32 TDEFL_READ_UNALIGNED_WORD32(const mz_uint8* p)
+{
+ mz_uint32 ret;
+ memcpy(&ret, p, sizeof(mz_uint32));
+ return ret;
+}
+#else
+#define TDEFL_READ_UNALIGNED_WORD32(p) *(const mz_uint32 *)(p)
+#endif
+static mz_bool tdefl_compress_fast(tdefl_compressor *d)
+{
+ /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */
+ mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
+ mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
+ mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+
+ while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size)))
+ {
+ const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
+ mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
+ d->m_src_buf_left -= num_bytes_to_process;
+ lookahead_size += num_bytes_to_process;
+
+ while (num_bytes_to_process)
+ {
+ mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
+ memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
+ d->m_pSrc += n;
+ dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
+ num_bytes_to_process -= n;
+ }
+
+ dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
+ if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
+ break;
+
+ while (lookahead_size >= 4)
+ {
+ mz_uint cur_match_dist, cur_match_len = 1;
+ mz_uint8 *pCur_dict = d->m_dict + cur_pos;
+ mz_uint first_trigram = TDEFL_READ_UNALIGNED_WORD32(pCur_dict) & 0xFFFFFF;
+ mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
+ mz_uint probe_pos = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)lookahead_pos;
+
+ if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((TDEFL_READ_UNALIGNED_WORD32(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram))
+ {
+ const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
+ const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
+ mz_uint32 probe_len = 32;
+ do
+ {
+ } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
+ (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
+ cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
+ if (!probe_len)
+ cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;
+
+ if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)))
+ {
+ cur_match_len = 1;
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ }
+ else
+ {
+ mz_uint32 s0, s1;
+ cur_match_len = MZ_MIN(cur_match_len, lookahead_size);
+
+ MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ cur_match_dist--;
+
+ pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+ memcpy(&pLZ_code_buf[1], &cur_match_dist, sizeof(cur_match_dist));
+#else
+ *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
+#endif
+ pLZ_code_buf += 3;
+ *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);
+
+ s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
+ d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;
+
+ d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
+ }
+ }
+ else
+ {
+ *pLZ_code_buf++ = (mz_uint8)first_trigram;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ d->m_huff_count[0][(mz_uint8)first_trigram]++;
+ }
+
+ if (--num_flags_left == 0)
+ {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ total_lz_bytes += cur_match_len;
+ lookahead_pos += cur_match_len;
+ dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
+ MZ_ASSERT(lookahead_size >= cur_match_len);
+ lookahead_size -= cur_match_len;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
+ {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+
+ while (lookahead_size)
+ {
+ mz_uint8 lit = d->m_dict[cur_pos];
+
+ total_lz_bytes++;
+ *pLZ_code_buf++ = lit;
+ *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
+ if (--num_flags_left == 0)
+ {
+ num_flags_left = 8;
+ pLZ_flags = pLZ_code_buf++;
+ }
+
+ d->m_huff_count[0][lit]++;
+
+ lookahead_pos++;
+ dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ lookahead_size--;
+
+ if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
+ {
+ int n;
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ total_lz_bytes = d->m_total_lz_bytes;
+ pLZ_code_buf = d->m_pLZ_code_buf;
+ pLZ_flags = d->m_pLZ_flags;
+ num_flags_left = d->m_num_flags_left;
+ }
+ }
+ }
+
+ d->m_lookahead_pos = lookahead_pos;
+ d->m_lookahead_size = lookahead_size;
+ d->m_dict_size = dict_size;
+ d->m_total_lz_bytes = total_lz_bytes;
+ d->m_pLZ_code_buf = pLZ_code_buf;
+ d->m_pLZ_flags = pLZ_flags;
+ d->m_num_flags_left = num_flags_left;
+ return MZ_TRUE;
+}
+#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */
+
+static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
+{
+ d->m_total_lz_bytes++;
+ *d->m_pLZ_code_buf++ = lit;
+ *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
+ if (--d->m_num_flags_left == 0)
+ {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+ d->m_huff_count[0][lit]++;
+}
+
+static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
+{
+ mz_uint32 s0, s1;
+
+ MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));
+
+ d->m_total_lz_bytes += match_len;
+
+ d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);
+
+ match_dist -= 1;
+ d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
+ d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
+ d->m_pLZ_code_buf += 3;
+
+ *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
+ if (--d->m_num_flags_left == 0)
+ {
+ d->m_num_flags_left = 8;
+ d->m_pLZ_flags = d->m_pLZ_code_buf++;
+ }
+
+ s0 = s_tdefl_small_dist_sym[match_dist & 511];
+ s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
+ d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;
+
+ if (match_len >= TDEFL_MIN_MATCH_LEN)
+ d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
+}
+
+static mz_bool tdefl_compress_normal(tdefl_compressor *d)
+{
+ const mz_uint8 *pSrc = d->m_pSrc;
+ size_t src_buf_left = d->m_src_buf_left;
+ tdefl_flush flush = d->m_flush;
+
+ while ((src_buf_left) || ((flush) && (d->m_lookahead_size)))
+ {
+ mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
+ /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */
+ if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1))
+ {
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
+ mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
+ const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
+ src_buf_left -= num_bytes_to_process;
+ d->m_lookahead_size += num_bytes_to_process;
+ while (pSrc != pSrc_end)
+ {
+ mz_uint8 c = *pSrc++;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
+ ins_pos++;
+ }
+ }
+ else
+ {
+ while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
+ {
+ mz_uint8 c = *pSrc++;
+ mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
+ src_buf_left--;
+ d->m_dict[dst_pos] = c;
+ if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
+ d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
+ if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN)
+ {
+ mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
+ mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
+ d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
+ d->m_hash[hash] = (mz_uint16)(ins_pos);
+ }
+ }
+ }
+ d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
+ if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
+ break;
+
+ /* Simple lazy/greedy parsing state machine. */
+ len_to_move = 1;
+ cur_match_dist = 0;
+ cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
+ cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
+ if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS))
+ {
+ if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))
+ {
+ mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
+ cur_match_len = 0;
+ while (cur_match_len < d->m_lookahead_size)
+ {
+ if (d->m_dict[cur_pos + cur_match_len] != c)
+ break;
+ cur_match_len++;
+ }
+ if (cur_match_len < TDEFL_MIN_MATCH_LEN)
+ cur_match_len = 0;
+ else
+ cur_match_dist = 1;
+ }
+ }
+ else
+ {
+ tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
+ }
+ if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5)))
+ {
+ cur_match_dist = cur_match_len = 0;
+ }
+ if (d->m_saved_match_len)
+ {
+ if (cur_match_len > d->m_saved_match_len)
+ {
+ tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
+ if (cur_match_len >= 128)
+ {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ d->m_saved_match_len = 0;
+ len_to_move = cur_match_len;
+ }
+ else
+ {
+ d->m_saved_lit = d->m_dict[cur_pos];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ }
+ else
+ {
+ tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
+ len_to_move = d->m_saved_match_len - 1;
+ d->m_saved_match_len = 0;
+ }
+ }
+ else if (!cur_match_dist)
+ tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
+ else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128))
+ {
+ tdefl_record_match(d, cur_match_len, cur_match_dist);
+ len_to_move = cur_match_len;
+ }
+ else
+ {
+ d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
+ d->m_saved_match_dist = cur_match_dist;
+ d->m_saved_match_len = cur_match_len;
+ }
+ /* Move the lookahead forward by len_to_move bytes. */
+ d->m_lookahead_pos += len_to_move;
+ MZ_ASSERT(d->m_lookahead_size >= len_to_move);
+ d->m_lookahead_size -= len_to_move;
+ d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
+ /* Check if it's time to flush the current LZ codes to the internal output buffer. */
+ if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
+ ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))))
+ {
+ int n;
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ if ((n = tdefl_flush_block(d, 0)) != 0)
+ return (n < 0) ? MZ_FALSE : MZ_TRUE;
+ }
+ }
+
+ d->m_pSrc = pSrc;
+ d->m_src_buf_left = src_buf_left;
+ return MZ_TRUE;
+}
+
+static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
+{
+ if (d->m_pIn_buf_size)
+ {
+ *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
+ }
+
+ if (d->m_pOut_buf_size)
+ {
+ size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
+ memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
+ d->m_output_flush_ofs += (mz_uint)n;
+ d->m_output_flush_remaining -= (mz_uint)n;
+ d->m_out_buf_ofs += n;
+
+ *d->m_pOut_buf_size = d->m_out_buf_ofs;
+ }
+
+ return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
+{
+ if (!d)
+ {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return TDEFL_STATUS_BAD_PARAM;
+ }
+
+ d->m_pIn_buf = pIn_buf;
+ d->m_pIn_buf_size = pIn_buf_size;
+ d->m_pOut_buf = pOut_buf;
+ d->m_pOut_buf_size = pOut_buf_size;
+ d->m_pSrc = (const mz_uint8 *)(pIn_buf);
+ d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
+ d->m_out_buf_ofs = 0;
+ d->m_flush = flush;
+
+ if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
+ (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf))
+ {
+ if (pIn_buf_size)
+ *pIn_buf_size = 0;
+ if (pOut_buf_size)
+ *pOut_buf_size = 0;
+ return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
+ }
+ d->m_wants_to_finish |= (flush == TDEFL_FINISH);
+
+ if ((d->m_output_flush_remaining) || (d->m_finished))
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
+ if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
+ ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
+ ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0))
+ {
+ if (!tdefl_compress_fast(d))
+ return d->m_prev_return_status;
+ }
+ else
+#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */
+ {
+ if (!tdefl_compress_normal(d))
+ return d->m_prev_return_status;
+ }
+
+ if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
+ d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);
+
+ if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining))
+ {
+ if (tdefl_flush_block(d, flush) < 0)
+ return d->m_prev_return_status;
+ d->m_finished = (flush == TDEFL_FINISH);
+ if (flush == TDEFL_FULL_FLUSH)
+ {
+ MZ_CLEAR_OBJ(d->m_hash);
+ MZ_CLEAR_OBJ(d->m_next);
+ d->m_dict_size = 0;
+ }
+ }
+
+ return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
+}
+
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
+{
+ MZ_ASSERT(d->m_pPut_buf_func);
+ return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
+}
+
+tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ d->m_pPut_buf_func = pPut_buf_func;
+ d->m_pPut_buf_user = pPut_buf_user;
+ d->m_flags = (mz_uint)(flags);
+ d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
+ d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
+ d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
+ if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
+ MZ_CLEAR_OBJ(d->m_hash);
+ d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
+ d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
+ d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
+ d->m_pLZ_flags = d->m_lz_code_buf;
+ d->m_num_flags_left = 8;
+ d->m_pOutput_buf = d->m_output_buf;
+ d->m_pOutput_buf_end = d->m_output_buf;
+ d->m_prev_return_status = TDEFL_STATUS_OKAY;
+ d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
+ d->m_adler32 = 1;
+ d->m_pIn_buf = NULL;
+ d->m_pOut_buf = NULL;
+ d->m_pIn_buf_size = NULL;
+ d->m_pOut_buf_size = NULL;
+ d->m_flush = TDEFL_NO_FLUSH;
+ d->m_pSrc = NULL;
+ d->m_src_buf_left = 0;
+ d->m_out_buf_ofs = 0;
+ if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
+ MZ_CLEAR_OBJ(d->m_dict);
+ memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
+ memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
+ return TDEFL_STATUS_OKAY;
+}
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
+{
+ return d->m_prev_return_status;
+}
+
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
+{
+ return d->m_adler32;
+}
+
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ tdefl_compressor *pComp;
+ mz_bool succeeded;
+ if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
+ return MZ_FALSE;
+ pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ if (!pComp)
+ return MZ_FALSE;
+ succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
+ succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
+ MZ_FREE(pComp);
+ return succeeded;
+}
+
+typedef struct
+{
+ size_t m_size, m_capacity;
+ mz_uint8 *m_pBuf;
+ mz_bool m_expandable;
+} tdefl_output_buffer;
+
+static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
+{
+ tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
+ size_t new_size = p->m_size + len;
+ if (new_size > p->m_capacity)
+ {
+ size_t new_capacity = p->m_capacity;
+ mz_uint8 *pNew_buf;
+ if (!p->m_expandable)
+ return MZ_FALSE;
+ do
+ {
+ new_capacity = MZ_MAX(128U, new_capacity << 1U);
+ } while (new_size > new_capacity);
+ pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
+ if (!pNew_buf)
+ return MZ_FALSE;
+ p->m_pBuf = pNew_buf;
+ p->m_capacity = new_capacity;
+ }
+ memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
+ p->m_size = new_size;
+ return MZ_TRUE;
+}
+
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
+{
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_len)
+ return MZ_FALSE;
+ else
+ *pOut_len = 0;
+ out_buf.m_expandable = MZ_TRUE;
+ if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return NULL;
+ *pOut_len = out_buf.m_size;
+ return out_buf.m_pBuf;
+}
+
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
+{
+ tdefl_output_buffer out_buf;
+ MZ_CLEAR_OBJ(out_buf);
+ if (!pOut_buf)
+ return 0;
+ out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
+ out_buf.m_capacity = out_buf_len;
+ if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
+ return 0;
+ return out_buf.m_size;
+}
+
+static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
+
+/* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy)
+{
+ mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
+ if (window_bits > 0)
+ comp_flags |= TDEFL_WRITE_ZLIB_HEADER;
+
+ if (!level)
+ comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
+ else if (strategy == MZ_FILTERED)
+ comp_flags |= TDEFL_FILTER_MATCHES;
+ else if (strategy == MZ_HUFFMAN_ONLY)
+ comp_flags &= ~TDEFL_MAX_PROBES_MASK;
+ else if (strategy == MZ_FIXED)
+ comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
+ else if (strategy == MZ_RLE)
+ comp_flags |= TDEFL_RLE_MATCHES;
+
+ return comp_flags;
+}
+
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) */
+#endif
+
+/* Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at
+ http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
+ This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck. */
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip)
+{
+ /* Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined. */
+ static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
+ tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
+ tdefl_output_buffer out_buf;
+ int i, bpl = w * num_chans, y, z;
+ mz_uint32 c;
+ *pLen_out = 0;
+ if (!pComp)
+ return NULL;
+ MZ_CLEAR_OBJ(out_buf);
+ out_buf.m_expandable = MZ_TRUE;
+ out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
+ if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity)))
+ {
+ MZ_FREE(pComp);
+ return NULL;
+ }
+ /* write dummy header */
+ for (z = 41; z; --z)
+ tdefl_output_buffer_putter(&z, 1, &out_buf);
+ /* compress image data */
+ tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
+ for (y = 0; y < h; ++y)
+ {
+ tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
+ tdefl_compress_buffer(pComp, (const mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH);
+ }
+ if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE)
+ {
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ /* write real header */
+ *pLen_out = out_buf.m_size - 41;
+ {
+ static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 };
+ mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d,
+ 0x0a, 0x1a, 0x0a, 0x00, 0x00,
+ 0x00, 0x0d, 0x49, 0x48, 0x44,
+ 0x52, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x49, 0x44, 0x41,
+ 0x54 };
+ pnghdr[18] = (mz_uint8)(w >> 8);
+ pnghdr[19] = (mz_uint8)w;
+ pnghdr[22] = (mz_uint8)(h >> 8);
+ pnghdr[23] = (mz_uint8)h;
+ pnghdr[25] = chans[num_chans];
+ pnghdr[33] = (mz_uint8)(*pLen_out >> 24);
+ pnghdr[34] = (mz_uint8)(*pLen_out >> 16);
+ pnghdr[35] = (mz_uint8)(*pLen_out >> 8);
+ pnghdr[36] = (mz_uint8)*pLen_out;
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
+ memcpy(out_buf.m_pBuf, pnghdr, 41);
+ }
+ /* write footer (IDAT CRC-32, followed by IEND chunk) */
+ if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf))
+ {
+ *pLen_out = 0;
+ MZ_FREE(pComp);
+ MZ_FREE(out_buf.m_pBuf);
+ return NULL;
+ }
+ c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4);
+ for (i = 0; i < 4; ++i, c <<= 8)
+ (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
+ /* compute final size of file, grab compressed data buffer and return */
+ *pLen_out += 57;
+ MZ_FREE(pComp);
+ return out_buf.m_pBuf;
+}
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out)
+{
+ /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */
+ return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE);
+}
+
+#ifdef _MSC_VER
+#pragma warning(pop)
+#endif
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tdef.h b/platform/linux-generic/miniz/miniz_tdef.h
new file mode 100644
index 000000000..25448b6fa
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tdef.h
@@ -0,0 +1,183 @@
+#pragma once
+#include "miniz.h"
+#include "miniz_common.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* ------------------- Low-level Compression API Definitions */
+
+/* Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently). */
+#define TDEFL_LESS_MEMORY 0
+
+/* tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search): */
+/* TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression). */
+enum
+{
+ TDEFL_HUFFMAN_ONLY = 0,
+ TDEFL_DEFAULT_MAX_PROBES = 128,
+ TDEFL_MAX_PROBES_MASK = 0xFFF
+};
+
+/* TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data. */
+/* TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers). */
+/* TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing. */
+/* TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory). */
+/* TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) */
+/* TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. */
+/* TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. */
+/* TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. */
+/* The low 12 bits are reserved to control the max # of hash probes per dictionary lookup (see TDEFL_MAX_PROBES_MASK). */
+enum
+{
+ TDEFL_WRITE_ZLIB_HEADER = 0x01000,
+ TDEFL_COMPUTE_ADLER32 = 0x02000,
+ TDEFL_GREEDY_PARSING_FLAG = 0x04000,
+ TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
+ TDEFL_RLE_MATCHES = 0x10000,
+ TDEFL_FILTER_MATCHES = 0x20000,
+ TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
+ TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
+};
+
+/* High level compression functions: */
+/* tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc(). */
+/* On entry: */
+/* pSrc_buf, src_buf_len: Pointer and size of source block to compress. */
+/* flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression. */
+/* On return: */
+/* Function returns a pointer to the compressed data, or NULL on failure. */
+/* *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data. */
+/* The caller must free() the returned block when it's no longer needed. */
+void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
+
+/* tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory. */
+/* Returns 0 on failure. */
+size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
+
+/* Compresses an image to a compressed PNG file in memory. */
+/* On entry: */
+/* pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4. */
+/* The image pitch in bytes per scanline will be w*num_chans. The leftmost pixel on the top scanline is stored first in memory. */
+/* level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL */
+/* If flip is true, the image will be flipped on the Y axis (useful for OpenGL apps). */
+/* On return: */
+/* Function returns a pointer to the compressed data, or NULL on failure. */
+/* *pLen_out will be set to the size of the PNG image file. */
+/* The caller must mz_free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed. */
+void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip);
+void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out);
+
+/* Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. */
+typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
+
+/* tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally. */
+mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+enum
+{
+ TDEFL_MAX_HUFF_TABLES = 3,
+ TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TDEFL_LZ_DICT_SIZE = 32768,
+ TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
+ TDEFL_MIN_MATCH_LEN = 3,
+ TDEFL_MAX_MATCH_LEN = 258
+};
+
+/* TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes). */
+#if TDEFL_LESS_MEMORY
+enum
+{
+ TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 12,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#else
+enum
+{
+ TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
+ TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
+ TDEFL_MAX_HUFF_SYMBOLS = 288,
+ TDEFL_LZ_HASH_BITS = 15,
+ TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
+ TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
+ TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
+};
+#endif
+
+/* The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions. */
+typedef enum {
+ TDEFL_STATUS_BAD_PARAM = -2,
+ TDEFL_STATUS_PUT_BUF_FAILED = -1,
+ TDEFL_STATUS_OKAY = 0,
+ TDEFL_STATUS_DONE = 1
+} tdefl_status;
+
+/* Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums */
+typedef enum {
+ TDEFL_NO_FLUSH = 0,
+ TDEFL_SYNC_FLUSH = 2,
+ TDEFL_FULL_FLUSH = 3,
+ TDEFL_FINISH = 4
+} tdefl_flush;
+
+/* tdefl's compression state structure. */
+typedef struct
+{
+ tdefl_put_buf_func_ptr m_pPut_buf_func;
+ void *m_pPut_buf_user;
+ mz_uint m_flags, m_max_probes[2];
+ int m_greedy_parsing;
+ mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
+ mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
+ mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
+ mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
+ tdefl_status m_prev_return_status;
+ const void *m_pIn_buf;
+ void *m_pOut_buf;
+ size_t *m_pIn_buf_size, *m_pOut_buf_size;
+ tdefl_flush m_flush;
+ const mz_uint8 *m_pSrc;
+ size_t m_src_buf_left, m_out_buf_ofs;
+ mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
+ mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
+ mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
+ mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
+ mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
+ mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
+} tdefl_compressor;
+
+/* Initializes the compressor. */
+/* There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory. */
+/* pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression. */
+/* If pBut_buf_func is NULL the user should always call the tdefl_compress() API. */
+/* flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.) */
+tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+/* Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible. */
+tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);
+
+/* tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr. */
+/* tdefl_compress_buffer() always consumes the entire input buffer. */
+tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);
+
+tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
+mz_uint32 tdefl_get_adler32(tdefl_compressor *d);
+
+/* Create tdefl_compress() flags given zlib-style compression parameters. */
+/* level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files) */
+/* window_bits may be -15 (raw deflate) or 15 (zlib) */
+/* strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED */
+mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tinfl.c b/platform/linux-generic/miniz/miniz_tinfl.c
new file mode 100644
index 000000000..3dfa1d550
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tinfl.c
@@ -0,0 +1,725 @@
+/**************************************************************************
+ *
+ * Copyright 2013-2014 RAD Game Tools and Valve Software
+ * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "miniz_tinfl.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* ------------------- Low-level Decompression (completely independent from all compression API's) */
+
+#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
+#define TINFL_MEMSET(p, c, l) memset(p, c, l)
+
+#define TINFL_CR_BEGIN \
+ switch (r->m_state) \
+ { \
+ case 0:
+#define TINFL_CR_RETURN(state_index, result) \
+ do \
+ { \
+ status = result; \
+ r->m_state = state_index; \
+ goto common_exit; \
+ case state_index:; \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_RETURN_FOREVER(state_index, result) \
+ do \
+ { \
+ for (;;) \
+ { \
+ TINFL_CR_RETURN(state_index, result); \
+ } \
+ } \
+ MZ_MACRO_END
+#define TINFL_CR_FINISH }
+
+#define TINFL_GET_BYTE(state_index, c) \
+ do \
+ { \
+ while (pIn_buf_cur >= pIn_buf_end) \
+ { \
+ TINFL_CR_RETURN(state_index, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); \
+ } \
+ c = *pIn_buf_cur++; \
+ } \
+ MZ_MACRO_END
+
+#define TINFL_NEED_BITS(state_index, n) \
+ do \
+ { \
+ mz_uint c; \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < (mz_uint)(n))
+#define TINFL_SKIP_BITS(state_index, n) \
+ do \
+ { \
+ if (num_bits < (mz_uint)(n)) \
+ { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+#define TINFL_GET_BITS(state_index, b, n) \
+ do \
+ { \
+ if (num_bits < (mz_uint)(n)) \
+ { \
+ TINFL_NEED_BITS(state_index, n); \
+ } \
+ b = bit_buf & ((1 << (n)) - 1); \
+ bit_buf >>= (n); \
+ num_bits -= (n); \
+ } \
+ MZ_MACRO_END
+
+/* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. */
+/* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */
+/* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */
+/* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
+#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff) \
+ do \
+ { \
+ temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]; \
+ if (temp >= 0) \
+ { \
+ code_len = temp >> 9; \
+ if ((code_len) && (num_bits >= code_len)) \
+ break; \
+ } \
+ else if (num_bits > TINFL_FAST_LOOKUP_BITS) \
+ { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do \
+ { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while ((temp < 0) && (num_bits >= (code_len + 1))); \
+ if (temp >= 0) \
+ break; \
+ } \
+ TINFL_GET_BYTE(state_index, c); \
+ bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
+ num_bits += 8; \
+ } while (num_bits < 15);
+
+/* TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read */
+/* beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully */
+/* decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. */
+/* The slow path is only executed at the very end of the input buffer. */
+/* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */
+/* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */
+#define TINFL_HUFF_DECODE(state_index, sym, pHuff) \
+ do \
+ { \
+ int temp; \
+ mz_uint code_len, c; \
+ if (num_bits < 15) \
+ { \
+ if ((pIn_buf_end - pIn_buf_cur) < 2) \
+ { \
+ TINFL_HUFF_BITBUF_FILL(state_index, pHuff); \
+ } \
+ else \
+ { \
+ bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
+ pIn_buf_cur += 2; \
+ num_bits += 16; \
+ } \
+ } \
+ if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0) \
+ code_len = temp >> 9, temp &= 511; \
+ else \
+ { \
+ code_len = TINFL_FAST_LOOKUP_BITS; \
+ do \
+ { \
+ temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
+ } while (temp < 0); \
+ } \
+ sym = temp; \
+ bit_buf >>= code_len; \
+ num_bits -= code_len; \
+ } \
+ MZ_MACRO_END
+
+tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
+{
+ static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
+ static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };
+ static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
+ static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
+ static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
+ static const int s_min_table_sizes[3] = { 257, 1, 4 };
+
+ tinfl_status status = TINFL_STATUS_FAILED;
+ mz_uint32 num_bits, dist, counter, num_extra;
+ tinfl_bit_buf_t bit_buf;
+ const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
+ mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
+ size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;
+
+ /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */
+ if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start))
+ {
+ *pIn_buf_size = *pOut_buf_size = 0;
+ return TINFL_STATUS_BAD_PARAM;
+ }
+
+ num_bits = r->m_num_bits;
+ bit_buf = r->m_bit_buf;
+ dist = r->m_dist;
+ counter = r->m_counter;
+ num_extra = r->m_num_extra;
+ dist_from_out_buf_start = r->m_dist_from_out_buf_start;
+ TINFL_CR_BEGIN
+
+ bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
+ r->m_z_adler32 = r->m_check_adler32 = 1;
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
+ {
+ TINFL_GET_BYTE(1, r->m_zhdr0);
+ TINFL_GET_BYTE(2, r->m_zhdr1);
+ counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
+ if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
+ counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
+ if (counter)
+ {
+ TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
+ }
+ }
+
+ do
+ {
+ TINFL_GET_BITS(3, r->m_final, 3);
+ r->m_type = r->m_final >> 1;
+ if (r->m_type == 0)
+ {
+ TINFL_SKIP_BITS(5, num_bits & 7);
+ for (counter = 0; counter < 4; ++counter)
+ {
+ if (num_bits)
+ TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
+ else
+ TINFL_GET_BYTE(7, r->m_raw_header[counter]);
+ }
+ if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8))))
+ {
+ TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
+ }
+ while ((counter) && (num_bits))
+ {
+ TINFL_GET_BITS(51, dist, 8);
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)dist;
+ counter--;
+ }
+ while (counter)
+ {
+ size_t n;
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ while (pIn_buf_cur >= pIn_buf_end)
+ {
+ TINFL_CR_RETURN(38, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS);
+ }
+ n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
+ TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
+ pIn_buf_cur += n;
+ pOut_buf_cur += n;
+ counter -= (mz_uint)n;
+ }
+ }
+ else if (r->m_type == 3)
+ {
+ TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
+ }
+ else
+ {
+ if (r->m_type == 1)
+ {
+ mz_uint8 *p = r->m_tables[0].m_code_size;
+ mz_uint i;
+ r->m_table_sizes[0] = 288;
+ r->m_table_sizes[1] = 32;
+ TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
+ for (i = 0; i <= 143; ++i)
+ *p++ = 8;
+ for (; i <= 255; ++i)
+ *p++ = 9;
+ for (; i <= 279; ++i)
+ *p++ = 7;
+ for (; i <= 287; ++i)
+ *p++ = 8;
+ }
+ else
+ {
+ for (counter = 0; counter < 3; counter++)
+ {
+ TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
+ r->m_table_sizes[counter] += s_min_table_sizes[counter];
+ }
+ MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
+ for (counter = 0; counter < r->m_table_sizes[2]; counter++)
+ {
+ mz_uint s;
+ TINFL_GET_BITS(14, s, 3);
+ r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
+ }
+ r->m_table_sizes[2] = 19;
+ }
+ for (; (int)r->m_type >= 0; r->m_type--)
+ {
+ int tree_next, tree_cur;
+ tinfl_huff_table *pTable;
+ mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16];
+ pTable = &r->m_tables[r->m_type];
+ MZ_CLEAR_OBJ(total_syms);
+ MZ_CLEAR_OBJ(pTable->m_look_up);
+ MZ_CLEAR_OBJ(pTable->m_tree);
+ for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
+ total_syms[pTable->m_code_size[i]]++;
+ used_syms = 0, total = 0;
+ next_code[0] = next_code[1] = 0;
+ for (i = 1; i <= 15; ++i)
+ {
+ used_syms += total_syms[i];
+ next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
+ }
+ if ((65536 != total) && (used_syms > 1))
+ {
+ TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
+ }
+ for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
+ {
+ mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index];
+ if (!code_size)
+ continue;
+ cur_code = next_code[code_size]++;
+ for (l = code_size; l > 0; l--, cur_code >>= 1)
+ rev_code = (rev_code << 1) | (cur_code & 1);
+ if (code_size <= TINFL_FAST_LOOKUP_BITS)
+ {
+ mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
+ while (rev_code < TINFL_FAST_LOOKUP_SIZE)
+ {
+ pTable->m_look_up[rev_code] = k;
+ rev_code += (1 << code_size);
+ }
+ continue;
+ }
+ if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)]))
+ {
+ pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ }
+ rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
+ for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
+ {
+ tree_cur -= ((rev_code >>= 1) & 1);
+ if (!pTable->m_tree[-tree_cur - 1])
+ {
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
+ tree_cur = tree_next;
+ tree_next -= 2;
+ }
+ else
+ tree_cur = pTable->m_tree[-tree_cur - 1];
+ }
+ tree_cur -= ((rev_code >>= 1) & 1);
+ pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
+ }
+ if (r->m_type == 2)
+ {
+ for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);)
+ {
+ mz_uint s;
+ TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
+ if (dist < 16)
+ {
+ r->m_len_codes[counter++] = (mz_uint8)dist;
+ continue;
+ }
+ if ((dist == 16) && (!counter))
+ {
+ TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
+ }
+ num_extra = "\02\03\07"[dist - 16];
+ TINFL_GET_BITS(18, s, num_extra);
+ s += "\03\03\013"[dist - 16];
+ TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
+ counter += s;
+ }
+ if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
+ {
+ TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
+ }
+ TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]);
+ TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
+ }
+ }
+ for (;;)
+ {
+ mz_uint8 *pSrc;
+ for (;;)
+ {
+ if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
+ {
+ TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
+ if (counter >= 256)
+ break;
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = (mz_uint8)counter;
+ }
+ else
+ {
+ int sym2;
+ mz_uint code_len;
+#if TINFL_USE_64BIT_BITBUF
+ if (num_bits < 30)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 4;
+ num_bits += 32;
+ }
+#else
+ if (num_bits < 15)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
+ code_len = sym2 >> 9;
+ else
+ {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do
+ {
+ sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ counter = sym2;
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+ if (counter & 256)
+ break;
+
+#if !TINFL_USE_64BIT_BITBUF
+ if (num_bits < 15)
+ {
+ bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
+ pIn_buf_cur += 2;
+ num_bits += 16;
+ }
+#endif
+ if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
+ code_len = sym2 >> 9;
+ else
+ {
+ code_len = TINFL_FAST_LOOKUP_BITS;
+ do
+ {
+ sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
+ } while (sym2 < 0);
+ }
+ bit_buf >>= code_len;
+ num_bits -= code_len;
+
+ pOut_buf_cur[0] = (mz_uint8)counter;
+ if (sym2 & 256)
+ {
+ pOut_buf_cur++;
+ counter = sym2;
+ break;
+ }
+ pOut_buf_cur[1] = (mz_uint8)sym2;
+ pOut_buf_cur += 2;
+ }
+ }
+ if ((counter &= 511) == 256)
+ break;
+
+ num_extra = s_length_extra[counter - 257];
+ counter = s_length_base[counter - 257];
+ if (num_extra)
+ {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(25, extra_bits, num_extra);
+ counter += extra_bits;
+ }
+
+ TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
+ num_extra = s_dist_extra[dist];
+ dist = s_dist_base[dist];
+ if (num_extra)
+ {
+ mz_uint extra_bits;
+ TINFL_GET_BITS(27, extra_bits, num_extra);
+ dist += extra_bits;
+ }
+
+ dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
+ if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
+ {
+ TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
+ }
+
+ pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);
+
+ if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
+ {
+ while (counter--)
+ {
+ while (pOut_buf_cur >= pOut_buf_end)
+ {
+ TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
+ }
+ *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
+ }
+ continue;
+ }
+#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
+ else if ((counter >= 9) && (counter <= dist))
+ {
+ const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
+ do
+ {
+#ifdef MINIZ_UNALIGNED_USE_MEMCPY
+ memcpy(pOut_buf_cur, pSrc, sizeof(mz_uint32)*2);
+#else
+ ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
+ ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
+#endif
+ pOut_buf_cur += 8;
+ } while ((pSrc += 8) < pSrc_end);
+ if ((counter &= 7) < 3)
+ {
+ if (counter)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ if (counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ continue;
+ }
+ }
+#endif
+ while(counter>2)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur[2] = pSrc[2];
+ pOut_buf_cur += 3;
+ pSrc += 3;
+ counter -= 3;
+ }
+ if (counter > 0)
+ {
+ pOut_buf_cur[0] = pSrc[0];
+ if (counter > 1)
+ pOut_buf_cur[1] = pSrc[1];
+ pOut_buf_cur += counter;
+ }
+ }
+ }
+ } while (!(r->m_final & 1));
+
+ /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
+ /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */
+ TINFL_SKIP_BITS(32, num_bits & 7);
+ while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
+ {
+ --pIn_buf_cur;
+ num_bits -= 8;
+ }
+ bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
+ MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */
+
+ if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
+ {
+ for (counter = 0; counter < 4; ++counter)
+ {
+ mz_uint s;
+ if (num_bits)
+ TINFL_GET_BITS(41, s, 8);
+ else
+ TINFL_GET_BYTE(42, s);
+ r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
+ }
+ }
+ TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);
+
+ TINFL_CR_FINISH
+
+common_exit:
+ /* As long as we aren't telling the caller that we NEED more input to make forward progress: */
+ /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
+ /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */
+ if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS))
+ {
+ while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
+ {
+ --pIn_buf_cur;
+ num_bits -= 8;
+ }
+ }
+ r->m_num_bits = num_bits;
+ r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
+ r->m_dist = dist;
+ r->m_counter = counter;
+ r->m_num_extra = num_extra;
+ r->m_dist_from_out_buf_start = dist_from_out_buf_start;
+ *pIn_buf_size = pIn_buf_cur - pIn_buf_next;
+ *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
+ if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
+ {
+ const mz_uint8 *ptr = pOut_buf_next;
+ size_t buf_len = *pOut_buf_size;
+ mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16;
+ size_t block_len = buf_len % 5552;
+ while (buf_len)
+ {
+ for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
+ {
+ s1 += ptr[0], s2 += s1;
+ s1 += ptr[1], s2 += s1;
+ s1 += ptr[2], s2 += s1;
+ s1 += ptr[3], s2 += s1;
+ s1 += ptr[4], s2 += s1;
+ s1 += ptr[5], s2 += s1;
+ s1 += ptr[6], s2 += s1;
+ s1 += ptr[7], s2 += s1;
+ }
+ for (; i < block_len; ++i)
+ s1 += *ptr++, s2 += s1;
+ s1 %= 65521U, s2 %= 65521U;
+ buf_len -= block_len;
+ block_len = 5552;
+ }
+ r->m_check_adler32 = (s2 << 16) + s1;
+ if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32))
+ status = TINFL_STATUS_ADLER32_MISMATCH;
+ }
+ return status;
+}
+
+/* Higher level helper functions. */
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
+{
+ tinfl_decompressor decomp;
+ void *pBuf = NULL, *pNew_buf;
+ size_t src_buf_ofs = 0, out_buf_capacity = 0;
+ *pOut_len = 0;
+ tinfl_init(&decomp);
+ for (;;)
+ {
+ size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
+ tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size,
+ (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
+ {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ src_buf_ofs += src_buf_size;
+ *pOut_len += dst_buf_size;
+ if (status == TINFL_STATUS_DONE)
+ break;
+ new_out_buf_capacity = out_buf_capacity * 2;
+ if (new_out_buf_capacity < 128)
+ new_out_buf_capacity = 128;
+ pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
+ if (!pNew_buf)
+ {
+ MZ_FREE(pBuf);
+ *pOut_len = 0;
+ return NULL;
+ }
+ pBuf = pNew_buf;
+ out_buf_capacity = new_out_buf_capacity;
+ }
+ return pBuf;
+}
+
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
+{
+ tinfl_decompressor decomp;
+ tinfl_status status;
+ tinfl_init(&decomp);
+ status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
+ return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
+}
+
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
+{
+ int result = 0;
+ tinfl_decompressor decomp;
+ mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
+ size_t in_buf_ofs = 0, dict_ofs = 0;
+ if (!pDict)
+ return TINFL_STATUS_FAILED;
+ tinfl_init(&decomp);
+ for (;;)
+ {
+ size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
+ tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
+ (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
+ in_buf_ofs += in_buf_size;
+ if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
+ break;
+ if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
+ {
+ result = (status == TINFL_STATUS_DONE);
+ break;
+ }
+ dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
+ }
+ MZ_FREE(pDict);
+ *pIn_buf_size = in_buf_ofs;
+ return result;
+}
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/miniz/miniz_tinfl.h b/platform/linux-generic/miniz/miniz_tinfl.h
new file mode 100644
index 000000000..28ca15fcb
--- /dev/null
+++ b/platform/linux-generic/miniz/miniz_tinfl.h
@@ -0,0 +1,146 @@
+#pragma once
+#include "miniz.h"
+#include "miniz_common.h"
+/* ------------------- Low-level Decompression API Definitions */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/* Decompression flags used by tinfl_decompress(). */
+/* TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream. */
+/* TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input. */
+/* TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB). */
+/* TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes. */
+enum
+{
+ TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
+ TINFL_FLAG_HAS_MORE_INPUT = 2,
+ TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
+ TINFL_FLAG_COMPUTE_ADLER32 = 8
+};
+
+/* High level decompression functions: */
+/* tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc(). */
+/* On entry: */
+/* pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress. */
+/* On return: */
+/* Function returns a pointer to the decompressed data, or NULL on failure. */
+/* *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data. */
+/* The caller must call mz_free() on the returned block when it's no longer needed. */
+void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);
+
+/* tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory. */
+/* Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success. */
+#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
+size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);
+
+/* tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer. */
+/* Returns 1 on success or 0 on failure. */
+typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
+int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);
+
+struct tinfl_decompressor_tag;
+typedef struct tinfl_decompressor_tag tinfl_decompressor;
+
+/* Max size of LZ dictionary. */
+#define TINFL_LZ_DICT_SIZE 32768
+
+/* Return status. */
+typedef enum {
+ /* This flags indicates the inflator needs 1 or more input bytes to make forward progress, but the caller is indicating that no more are available. The compressed data */
+ /* is probably corrupted. If you call the inflator again with more bytes it'll try to continue processing the input but this is a BAD sign (either the data is corrupted or you called it incorrectly). */
+ /* If you call it again with no input you'll just get TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS again. */
+ TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS = -4,
+
+ /* This flag indicates that one or more of the input parameters was obviously bogus. (You can try calling it again, but if you get this error the calling code is wrong.) */
+ TINFL_STATUS_BAD_PARAM = -3,
+
+ /* This flags indicate the inflator is finished but the adler32 check of the uncompressed data didn't match. If you call it again it'll return TINFL_STATUS_DONE. */
+ TINFL_STATUS_ADLER32_MISMATCH = -2,
+
+ /* This flags indicate the inflator has somehow failed (bad code, corrupted input, etc.). If you call it again without resetting via tinfl_init() it it'll just keep on returning the same status failure code. */
+ TINFL_STATUS_FAILED = -1,
+
+ /* Any status code less than TINFL_STATUS_DONE must indicate a failure. */
+
+ /* This flag indicates the inflator has returned every byte of uncompressed data that it can, has consumed every byte that it needed, has successfully reached the end of the deflate stream, and */
+ /* if zlib headers and adler32 checking enabled that it has successfully checked the uncompressed data's adler32. If you call it again you'll just get TINFL_STATUS_DONE over and over again. */
+ TINFL_STATUS_DONE = 0,
+
+ /* This flag indicates the inflator MUST have more input data (even 1 byte) before it can make any more forward progress, or you need to clear the TINFL_FLAG_HAS_MORE_INPUT */
+ /* flag on the next call if you don't have any more source data. If the source data was somehow corrupted it's also possible (but unlikely) for the inflator to keep on demanding input to */
+ /* proceed, so be sure to properly set the TINFL_FLAG_HAS_MORE_INPUT flag. */
+ TINFL_STATUS_NEEDS_MORE_INPUT = 1,
+
+ /* This flag indicates the inflator definitely has 1 or more bytes of uncompressed data available, but it cannot write this data into the output buffer. */
+ /* Note if the source compressed data was corrupted it's possible for the inflator to return a lot of uncompressed data to the caller. I've been assuming you know how much uncompressed data to expect */
+ /* (either exact or worst case) and will stop calling the inflator and fail after receiving too much. In pure streaming scenarios where you have no idea how many bytes to expect this may not be possible */
+ /* so I may need to add some code to address this. */
+ TINFL_STATUS_HAS_MORE_OUTPUT = 2
+} tinfl_status;
+
+/* Initializes the decompressor to its initial state. */
+#define tinfl_init(r) \
+ do \
+ { \
+ (r)->m_state = 0; \
+ } \
+ MZ_MACRO_END
+#define tinfl_get_adler32(r) (r)->m_check_adler32
+
+/* Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability. */
+/* This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output. */
+tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);
+
+/* Internal/private bits follow. */
+enum
+{
+ TINFL_MAX_HUFF_TABLES = 3,
+ TINFL_MAX_HUFF_SYMBOLS_0 = 288,
+ TINFL_MAX_HUFF_SYMBOLS_1 = 32,
+ TINFL_MAX_HUFF_SYMBOLS_2 = 19,
+ TINFL_FAST_LOOKUP_BITS = 10,
+ TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
+};
+
+typedef struct
+{
+ mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
+ mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
+} tinfl_huff_table;
+
+#if MINIZ_HAS_64BIT_REGISTERS
+#define TINFL_USE_64BIT_BITBUF 1
+#else
+#define TINFL_USE_64BIT_BITBUF 0
+#endif
+
+#if TINFL_USE_64BIT_BITBUF
+typedef mz_uint64 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (64)
+#else
+typedef mz_uint32 tinfl_bit_buf_t;
+#define TINFL_BITBUF_SIZE (32)
+#endif
+
+struct tinfl_decompressor_tag
+{
+ mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
+ tinfl_bit_buf_t m_bit_buf;
+ size_t m_dist_from_out_buf_start;
+ tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
+ mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
+};
+
+typedef struct
+{
+ tinfl_decompressor m_decomp;
+ mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
+ int m_window_bits;
+ mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
+ tinfl_status m_last_status;
+} inflate_state;
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 26cd8efbd..e3f1cc6e0 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -35,6 +35,16 @@ static cos_tbl_t *cos_tbl;
static pmr_tbl_t *pmr_tbl;
static _cls_queue_grp_tbl_t *queue_grp_tbl;
+typedef struct cls_global_t {
+ cos_tbl_t cos_tbl;
+ pmr_tbl_t pmr_tbl;
+ _cls_queue_grp_tbl_t queue_grp_tbl;
+ odp_shm_t shm;
+
+} cls_global_t;
+
+static cls_global_t *cls_global;
+
static const rss_key default_rss = {
.u8 = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
@@ -45,115 +55,79 @@ static const rss_key default_rss = {
}
};
+static inline uint32_t _odp_cos_to_ndx(odp_cos_t cos)
+{
+ return _odp_typeval(cos) - 1;
+}
+
+static inline odp_cos_t _odp_cos_from_ndx(uint32_t ndx)
+{
+ return _odp_cast_scalar(odp_cos_t, ndx + 1);
+}
+
+static inline uint32_t _odp_pmr_to_ndx(odp_pmr_t pmr)
+{
+ return _odp_typeval(pmr) - 1;
+}
+
+static inline odp_pmr_t _odp_pmr_from_ndx(uint32_t ndx)
+{
+ return _odp_cast_scalar(odp_pmr_t, ndx + 1);
+}
+
static
-cos_t *get_cos_entry_internal(odp_cos_t cos_id)
+cos_t *get_cos_entry_internal(odp_cos_t cos)
{
- return &cos_tbl->cos_entry[_odp_typeval(cos_id)];
+ return &cos_tbl->cos_entry[_odp_cos_to_ndx(cos)];
}
static
-pmr_t *get_pmr_entry_internal(odp_pmr_t pmr_id)
+pmr_t *get_pmr_entry_internal(odp_pmr_t pmr)
{
- return &pmr_tbl->pmr[_odp_typeval(pmr_id)];
+ return &pmr_tbl->pmr[_odp_pmr_to_ndx(pmr)];
}
int odp_classification_init_global(void)
{
- odp_shm_t cos_shm;
- odp_shm_t pmr_shm;
- odp_shm_t queue_grp_shm;
+ odp_shm_t shm;
int i;
- cos_shm = odp_shm_reserve("shm_odp_cos_tbl",
- sizeof(cos_tbl_t),
- sizeof(cos_t), 0);
+ shm = odp_shm_reserve("_odp_cls_global", sizeof(cls_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
- if (cos_shm == ODP_SHM_INVALID) {
- ODP_ERR("shm allocation failed for shm_odp_cos_tbl");
- goto error;
- }
+ cls_global = odp_shm_addr(shm);
+ memset(cls_global, 0, sizeof(cls_global_t));
- cos_tbl = odp_shm_addr(cos_shm);
- if (cos_tbl == NULL)
- goto error_cos;
+ cls_global->shm = shm;
+ cos_tbl = &cls_global->cos_tbl;
+ pmr_tbl = &cls_global->pmr_tbl;
+ queue_grp_tbl = &cls_global->queue_grp_tbl;
- memset(cos_tbl, 0, sizeof(cos_tbl_t));
for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
/* init locks */
- cos_t *cos =
- get_cos_entry_internal(_odp_cast_scalar(odp_cos_t, i));
+ cos_t *cos = get_cos_entry_internal(_odp_cos_from_ndx(i));
LOCK_INIT(&cos->s.lock);
}
- pmr_shm = odp_shm_reserve("shm_odp_pmr_tbl",
- sizeof(pmr_tbl_t),
- sizeof(pmr_t), 0);
-
- if (pmr_shm == ODP_SHM_INVALID) {
- ODP_ERR("shm allocation failed for shm_odp_pmr_tbl");
- goto error_cos;
- }
-
- pmr_tbl = odp_shm_addr(pmr_shm);
- if (pmr_tbl == NULL)
- goto error_pmr;
-
- memset(pmr_tbl, 0, sizeof(pmr_tbl_t));
for (i = 0; i < CLS_PMR_MAX_ENTRY; i++) {
/* init locks */
- pmr_t *pmr =
- get_pmr_entry_internal(_odp_cast_scalar(odp_pmr_t, i));
+ pmr_t *pmr = get_pmr_entry_internal(_odp_pmr_from_ndx(i));
LOCK_INIT(&pmr->s.lock);
}
- queue_grp_shm = odp_shm_reserve("shm_odp_cls_queue_grp_tbl",
- sizeof(_cls_queue_grp_tbl_t),
- sizeof(queue_entry_t *), 0);
-
- if (queue_grp_shm == ODP_SHM_INVALID) {
- ODP_ERR("shm allocation failed for queue_grp_tbl");
- goto error_queue_grp;
- }
-
- queue_grp_tbl = odp_shm_addr(queue_grp_shm);
- memset(queue_grp_tbl, 0, sizeof(_cls_queue_grp_tbl_t));
-
return 0;
-
-error_queue_grp:
- odp_shm_free(queue_grp_shm);
-error_pmr:
- odp_shm_free(pmr_shm);
-error_cos:
- odp_shm_free(cos_shm);
-error:
- return -1;
}
int odp_classification_term_global(void)
{
- int ret = 0;
- int rc = 0;
-
- ret = odp_shm_free(odp_shm_lookup("shm_odp_cos_tbl"));
- if (ret < 0) {
- ODP_ERR("shm free failed for shm_odp_cos_tbl");
- rc = -1;
- }
-
- ret = odp_shm_free(odp_shm_lookup("shm_odp_pmr_tbl"));
- if (ret < 0) {
- ODP_ERR("shm free failed for shm_odp_pmr_tbl");
- rc = -1;
- }
-
- ret = odp_shm_free(odp_shm_lookup("shm_odp_cls_queue_grp_tbl"));
- if (ret < 0) {
- ODP_ERR("shm free failed for shm_odp_cls_queue_grp_tbl");
- rc = -1;
+ if (cls_global && odp_shm_free(cls_global->shm)) {
+ ODP_ERR("shm free failed");
+ return -1;
}
- return rc;
+ return 0;
}
void odp_cls_cos_param_init(odp_cls_cos_param_t *param)
@@ -283,7 +257,7 @@ odp_cos_t odp_cls_cos_create(const char *name, odp_cls_cos_param_t *param)
odp_atomic_init_u32(&cos->s.num_rule, 0);
cos->s.index = i;
UNLOCK(&cos->s.lock);
- return _odp_cast_scalar(odp_cos_t, i);
+ return _odp_cos_from_ndx(i);
}
UNLOCK(&cos->s.lock);
}
@@ -308,7 +282,7 @@ odp_pmr_t alloc_pmr(pmr_t **pmr)
pmr_tbl->pmr[i].s.num_pmr = 0;
*pmr = &pmr_tbl->pmr[i];
/* return as locked */
- return _odp_cast_scalar(odp_pmr_t, i);
+ return _odp_pmr_from_ndx(i);
}
UNLOCK(&pmr_tbl->pmr[i].s.lock);
}
@@ -317,25 +291,28 @@ odp_pmr_t alloc_pmr(pmr_t **pmr)
}
static
-cos_t *get_cos_entry(odp_cos_t cos_id)
+cos_t *get_cos_entry(odp_cos_t cos)
{
- if (_odp_typeval(cos_id) >= CLS_COS_MAX_ENTRY ||
- cos_id == ODP_COS_INVALID)
+ uint32_t cos_id = _odp_cos_to_ndx(cos);
+
+ if (cos_id >= CLS_COS_MAX_ENTRY || cos == ODP_COS_INVALID)
return NULL;
- if (cos_tbl->cos_entry[_odp_typeval(cos_id)].s.valid == 0)
+ if (cos_tbl->cos_entry[cos_id].s.valid == 0)
return NULL;
- return &cos_tbl->cos_entry[_odp_typeval(cos_id)];
+ return &cos_tbl->cos_entry[cos_id];
}
static
-pmr_t *get_pmr_entry(odp_pmr_t pmr_id)
+pmr_t *get_pmr_entry(odp_pmr_t pmr)
{
- if (_odp_typeval(pmr_id) >= CLS_PMR_MAX_ENTRY ||
- pmr_id == ODP_PMR_INVALID)
+ uint32_t pmr_id = _odp_pmr_to_ndx(pmr);
+
+ if (pmr_id >= CLS_PMR_MAX_ENTRY ||
+ pmr == ODP_PMR_INVALID)
return NULL;
- if (pmr_tbl->pmr[_odp_typeval(pmr_id)].s.valid == 0)
+ if (pmr_tbl->pmr[pmr_id].s.valid == 0)
return NULL;
- return &pmr_tbl->pmr[_odp_typeval(pmr_id)];
+ return &pmr_tbl->pmr[pmr_id];
}
int odp_cos_destroy(odp_cos_t cos_id)
@@ -1016,7 +993,7 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
pkt_hdr->p.input_flags.dst_queue = 1;
if (!cos->s.queue_group) {
- pkt_hdr->dst_queue = queue_fn->from_ext(cos->s.queue);
+ pkt_hdr->dst_queue = cos->s.queue;
return 0;
}
@@ -1025,8 +1002,7 @@ int cls_classify_packet(pktio_entry_t *entry, const uint8_t *base,
hash = hash & (CLS_COS_QUEUE_MAX - 1);
tbl_index = (cos->s.index * CLS_COS_QUEUE_MAX) + (hash %
cos->s.num_queue);
- pkt_hdr->dst_queue = queue_fn->from_ext(queue_grp_tbl->
- s.queue[tbl_index]);
+ pkt_hdr->dst_queue = queue_grp_tbl->s.queue[tbl_index];
return 0;
}
diff --git a/platform/linux-generic/odp_comp.c b/platform/linux-generic/odp_comp.c
new file mode 100644
index 000000000..25ed11f55
--- /dev/null
+++ b/platform/linux-generic/odp_comp.c
@@ -0,0 +1,678 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+#include "config.h"
+
+#include <string.h>
+
+#include <odp/api/comp.h>
+#include <odp/api/event.h>
+#include <odp/api/packet.h>
+#include <odp/api/plat/strong_types.h>
+#include <odp_packet_internal.h>
+
+#include <odp_debug_internal.h>
+#include <odp_init_internal.h>
+
+#include "miniz/miniz.h"
+
+#define MAX_SESSIONS 16
+#define MEM_LEVEL 8
+
+/** Forward declaration of session structure */
+typedef struct odp_comp_generic_session odp_comp_generic_session_t;
+
+#define to_gen_session(s) ((odp_comp_generic_session_t *)(intptr_t)(s))
+
+/**
+ * Algorithm handler function prototype
+ */
+typedef
+int (*comp_func_t)(odp_packet_t pkt_in,
+ odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session);
+
+/**
+ * Per session data structure
+ */
+struct odp_comp_generic_session {
+ struct odp_comp_generic_session *next;
+ odp_comp_session_param_t params;
+ struct {
+ comp_func_t func;
+ mz_stream stream;
+ union {
+ tdefl_compressor comp;
+ inflate_state inflate;
+ } data;
+ } comp;
+};
+
+typedef struct odp_comp_global_s {
+ odp_spinlock_t lock;
+ odp_shm_t global_shm;
+ odp_comp_generic_session_t *free;
+ odp_comp_generic_session_t sessions[MAX_SESSIONS];
+} odp_comp_global_t;
+
+static odp_comp_global_t *global;
+
+static
+odp_comp_generic_session_t *alloc_session(void)
+{
+ odp_comp_generic_session_t *session = NULL;
+
+ odp_spinlock_lock(&global->lock);
+ session = global->free;
+ if (session) {
+ global->free = session->next;
+ session->next = NULL;
+ }
+ odp_spinlock_unlock(&global->lock);
+
+ return session;
+}
+
+static
+void free_session(odp_comp_generic_session_t *session)
+{
+ odp_spinlock_lock(&global->lock);
+ session->next = global->free;
+ global->free = session;
+ odp_spinlock_unlock(&global->lock);
+}
+
+static int
+null_comp_routine(odp_packet_t pkt_in ODP_UNUSED,
+ odp_packet_t pkt_out ODP_UNUSED,
+ const odp_comp_packet_op_param_t *params ODP_UNUSED,
+ odp_comp_generic_session_t *session ODP_UNUSED)
+{
+ return 0;
+}
+
+static
+odp_comp_packet_result_t *get_op_result_from_packet(odp_packet_t pkt)
+{
+ return &(packet_hdr(pkt)->comp_op_result);
+}
+
+void odp_comp_session_param_init(odp_comp_session_param_t *param)
+{
+ memset(param, 0, sizeof(odp_comp_session_param_t));
+}
+
+static void process_input(odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session,
+ odp_comp_packet_result_t *result,
+ odp_bool_t sync)
+{
+ mz_streamp streamp = &session->comp.stream;
+ int ret = 0;
+ uint8_t *out_data = NULL;
+ uint32_t out_len = 0;
+ uint32_t written = 0;
+ uint32_t start = 0;
+ uint32_t output_end = 0;
+ uint32_t space_avail = 0;
+ odp_packet_seg_t cur_seg = ODP_PACKET_SEG_INVALID;
+ odp_packet_data_range_t *res_data_range;
+ int finish = 0;
+
+ res_data_range = &result->output_data_range;
+
+ start = res_data_range->offset + res_data_range->length;
+ space_avail = params->out_data_range.length -
+ res_data_range->length;
+ output_end = space_avail + start;
+
+ do {
+ out_data =
+ odp_packet_offset(pkt_out, start, &out_len, &cur_seg);
+ ODP_DBG("out_data 0x%x seg_data_ptr 0x%x out_len %d seg 0x%x\n",
+ out_data, odp_packet_seg_data(pkt_out, cur_seg),
+ out_len, cur_seg);
+
+ if (0 == out_len) {
+ /* there are no more segments */
+ ODP_DBG("Ran out of space. (streamp->avail_out) %d\n",
+ (streamp->avail_out));
+ result->status = ODP_COMP_STATUS_OUT_OF_SPACE_TERM;
+ break;
+ }
+
+ /* if segment length is greater than user given available
+ * space, then adjust output len
+ */
+ if (out_len > space_avail)
+ out_len = space_avail;
+
+ streamp->next_out = out_data;
+ streamp->avail_out = out_len;
+
+ ODP_DBG("next_in 0x%x, avail_in %d next_out 0x%lx"
+ " avail_out %d, sync %d\n",
+ streamp->next_in, streamp->avail_in,
+ streamp->next_out,
+ streamp->avail_out,
+ sync);
+
+ if (session->params.op == ODP_COMP_OP_COMPRESS)
+ ret = mz_deflate(streamp,
+ sync ? MZ_FINISH : MZ_NO_FLUSH);
+ else
+ ret = mz_inflate(streamp, MZ_NO_FLUSH);
+
+ ODP_DBG("ret %d streamp->avail_out %d avail_in %d\n",
+ ret, streamp->avail_out, streamp->avail_in);
+
+ out_len = out_len - streamp->avail_out;
+ written += out_len;
+
+ /* increase next offset by amount of data written into
+ * output buffer and decrease available space by amount
+ * of space consumed.
+ */
+ start += out_len;
+ space_avail -= out_len;
+
+ ODP_DBG("ret %d,written %d\n", ret, out_len);
+
+ if (ret == MZ_STREAM_END) {
+ if (session->params.op == ODP_COMP_OP_COMPRESS) {
+ /* required to continue processing of next pkt
+ with same stream */
+ mz_deflateReset(streamp);
+ } else {
+ mz_inflateReset(streamp);
+ }
+ finish = 1;
+ break;
+ }
+ if ((ret != MZ_BUF_ERROR) && (ret != MZ_OK)) {
+ ODP_DBG("deflate failed. Err %s,ret %d"
+ "(streamp->avail_out) %d\n",
+ streamp->msg, ret, (streamp->avail_out));
+ result->status = ODP_COMP_STATUS_FAILURE;
+ return;
+ }
+ } while (!streamp->avail_out && (start < output_end));
+
+ res_data_range->length += written;
+
+ if ((!finish) && !(streamp->avail_out)) {
+ /* if write stopped as output exhausted,
+ return OUT_OF_SPACE_ERR
+ */
+ ODP_DBG("Ran out of space. (out avail) %d,"
+ "to process %d\n", streamp->avail_out,
+ streamp->avail_in);
+ result->status = ODP_COMP_STATUS_OUT_OF_SPACE_TERM;
+ } else {
+ result->status = ODP_COMP_STATUS_SUCCESS;
+ }
+}
+
+/*
+ * Deflate routine to perform deflate based compression/decompression
+ *
+ * NOTE: Current implementation does not support in-place
+ */
+static int deflate_comp(odp_packet_t pkt_in,
+ odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *params,
+ odp_comp_generic_session_t *session)
+{
+ mz_streamp streamp;
+ uint8_t *data = NULL;
+ uint32_t len;
+ uint32_t in_len = 0;
+ uint32_t read = 0;
+ uint32_t consumed = 0;
+ odp_bool_t sync = false;
+ odp_packet_seg_t in_seg = ODP_PACKET_SEG_INVALID;
+ odp_comp_packet_result_t *result = get_op_result_from_packet(pkt_out);
+
+ ODP_ASSERT(session != NULL);
+ ODP_ASSERT(params != NULL);
+ ODP_ASSERT(pkt_in != ODP_PACKET_INVALID);
+ ODP_ASSERT(pkt_out != ODP_PACKET_INVALID);
+
+ streamp = &session->comp.stream;
+
+ /* Adjust pointer for beginning of area to compress.
+ Since we need to pass phys cont area so we need to deal with segments
+ here as packet inherently are segmented and segments may not be
+ contiguous.
+ */
+
+ read = params->in_data_range.offset;
+ len = params->in_data_range.length;
+
+ while (read < (len + params->in_data_range.offset)) {
+ data = odp_packet_offset(pkt_in,
+ read,
+ &in_len,
+ &in_seg);
+ ODP_DBG("data 0x%x in_len %d seg 0x%x len %d\n",
+ data, in_len, in_seg, len);
+
+ if (in_len > len)
+ in_len = len;
+
+ /* tracker for data consumed from input */
+ consumed += in_len;
+ streamp->next_in = data;
+ streamp->avail_in = in_len;
+
+ if (consumed >= len) {
+ ODP_DBG("This is last chunk\n");
+ sync = true;
+ }
+
+ process_input(pkt_out, params, session, result, sync);
+
+ if (result->status != ODP_COMP_STATUS_SUCCESS)
+ return -1;
+
+ read += in_len;
+ }
+
+ ODP_DBG("Read %d Written %d\n",
+ read,
+ result->output_data_range.length);
+
+ return 0;
+}
+
+static void *comp_zalloc(void *opaque, size_t items, size_t size)
+{
+ odp_comp_generic_session_t *session = opaque;
+
+ if (items * size > sizeof(session->comp.data))
+ return NULL;
+ else
+ return &session->comp.data;
+}
+
+static void comp_zfree(void *opaque ODP_UNUSED, void *data ODP_UNUSED)
+{
+ /* Do nothing */
+}
+
+static int deflate_init(odp_comp_generic_session_t *session)
+{
+ mz_streamp streamp = &session->comp.stream;
+ uint32_t level;
+ uint32_t strategy;
+ int32_t window_bits;
+ uint32_t cl;
+ odp_comp_huffman_code_t cc;
+
+ /* optional check as such may not required */
+ ODP_ASSERT(strcmp(mz_version(), MZ_VERSION) == 0);
+
+ memset(&session->comp.stream, 0, sizeof(mz_stream));
+
+ /* let zlib handles required memory allocations
+ we will identify if there any memory allocations issues that
+ may come b/w odp and zlib allocated memory
+ */
+ streamp->zalloc = comp_zalloc;
+ streamp->zfree = comp_zfree;
+ streamp->opaque = session;
+
+ switch (session->params.comp_algo) {
+ case ODP_COMP_ALG_ZLIB:
+ cl = session->params.alg_param.zlib.deflate.comp_level;
+ cc = session->params.alg_param.zlib.deflate.huffman_code;
+ window_bits = MZ_DEFAULT_WINDOW_BITS;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ cl = session->params.alg_param.deflate.comp_level;
+ cc = session->params.alg_param.deflate.huffman_code;
+ window_bits = -MZ_DEFAULT_WINDOW_BITS;
+ break;
+ default:
+ return -1;
+ }
+
+ level = MZ_DEFAULT_COMPRESSION; /* Z_BEST_COMPRESSION; */
+ if (cl)
+ level = cl;
+
+ switch (cc) {
+ case ODP_COMP_HUFFMAN_DEFAULT:
+ case ODP_COMP_HUFFMAN_DYNAMIC:/*Z_HUFFMAN_ONLY */
+ strategy = MZ_DEFAULT_STRATEGY;
+ break;
+ case ODP_COMP_HUFFMAN_FIXED:
+ strategy = MZ_FIXED;
+ break;
+ default:
+ return -1;
+ }
+ ODP_DBG(" level %d strategy %d window %d\n",
+ level, strategy, window_bits);
+
+ if (ODP_COMP_OP_COMPRESS == session->params.op) {
+ if (mz_deflateInit2(streamp, level, MZ_DEFLATED, window_bits,
+ MEM_LEVEL, strategy) != MZ_OK) {
+ ODP_DBG("Err in Deflate Initialization %s\n",
+ streamp->msg);
+ return -1;
+ }
+ } else {
+ if (mz_inflateInit2(streamp, window_bits) != MZ_OK) {
+ ODP_DBG("Err in Inflate Initialization %s\n",
+ streamp->msg);
+ return -1;
+ }
+ }
+
+ session->comp.func = deflate_comp;
+
+ return 0;
+}
+
+static int term_def(odp_comp_generic_session_t *session)
+{
+ int rc = 0;
+ mz_streamp streamp = &session->comp.stream;
+
+ if (ODP_COMP_OP_COMPRESS == session->params.op) {
+ rc = mz_deflateEnd(streamp);
+
+ if (rc != MZ_OK) {
+ ODP_ERR("deflateEnd failed. Err %s,rc %d\n",
+ streamp->msg, rc);
+ /* we choose to just return 0 with error info */
+ }
+ } else {
+ rc = mz_inflateEnd(streamp);
+ if (rc != MZ_OK) {
+ ODP_ERR("inflateEnd failed. Err %s\n", streamp->msg);
+ /* we choose to just return 0 with error info */
+ }
+ }
+
+ return 0;
+}
+
+odp_comp_session_t
+odp_comp_session_create(const odp_comp_session_param_t *params)
+{
+ odp_comp_generic_session_t *session;
+ int rc;
+
+ /* Allocate memory for this session */
+ session = alloc_session();
+ if (NULL == session)
+ return ODP_COMP_SESSION_INVALID;
+
+ /* Copy stuff over */
+ memcpy(&session->params, params, sizeof(*params));
+
+ /* Process based on compress */
+ switch (params->comp_algo) {
+ case ODP_COMP_ALG_NULL:
+ session->comp.func = null_comp_routine;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ case ODP_COMP_ALG_ZLIB:
+ rc = deflate_init(session);
+ if (rc < 0)
+ goto cleanup;
+ break;
+ default:
+ rc = -1;
+ goto cleanup;
+ }
+
+ return (odp_comp_session_t)session;
+
+cleanup:
+ free_session(session);
+
+ return ODP_COMP_SESSION_INVALID;
+}
+
+int odp_comp_session_destroy(odp_comp_session_t session)
+{
+ odp_comp_generic_session_t *generic;
+ int32_t rc = 0;
+
+ generic = (odp_comp_generic_session_t *)(intptr_t)session;
+
+ switch (generic->params.comp_algo) {
+ case ODP_COMP_ALG_DEFLATE:
+ case ODP_COMP_ALG_ZLIB:
+ rc = term_def(generic);
+ break;
+ default:
+ break;
+ }
+ if (rc < 0) {
+ ODP_ERR("Compression Unit could not be terminated\n");
+ return -1;
+ }
+
+ memset(generic, 0, sizeof(*generic));
+ free_session(generic);
+ return 0;
+}
+
+int odp_comp_capability(odp_comp_capability_t *capa)
+{
+ if (NULL == capa)
+ return -1;
+
+ /* Initialize comp capability structure */
+ memset(capa, 0, sizeof(odp_comp_capability_t));
+
+ capa->comp_algos.bit.null = 1;
+ capa->comp_algos.bit.deflate = 1;
+ capa->comp_algos.bit.zlib = 1;
+ capa->hash_algos.bit.none = 1;
+ capa->sync = ODP_SUPPORT_YES;
+ capa->async = ODP_SUPPORT_YES;
+ capa->max_sessions = MAX_SESSIONS;
+ return 0;
+}
+
+int
+odp_comp_alg_capability(odp_comp_alg_t comp,
+ odp_comp_alg_capability_t *capa)
+{
+ switch (comp) {
+ case ODP_COMP_ALG_ZLIB:
+ capa->hash_algo.all_bits = 0;
+ capa->hash_algo.bit.none = 1;
+ capa->max_level = MZ_BEST_COMPRESSION;
+ capa->compression_ratio = 50;
+ return 0;
+ case ODP_COMP_ALG_DEFLATE:
+ capa->hash_algo.all_bits = 0;
+ capa->hash_algo.bit.none = 1;
+ capa->max_level = MZ_BEST_COMPRESSION;
+ capa->compression_ratio = 50;
+ return 0;
+ default:
+ /* Error unsupported enum */
+ return -1;
+ }
+ return -1;
+}
+
+int
+odp_comp_hash_alg_capability(odp_comp_hash_alg_t hash,
+ odp_comp_hash_alg_capability_t *capa)
+{
+ (void)capa;
+ switch (hash) {
+ case ODP_COMP_HASH_ALG_NONE:
+ capa[0].digest_len = 0;
+ return 0;
+ default:
+ return -1;
+ }
+ return -1;
+}
+
+static int _odp_comp_single(odp_packet_t pkt_in, odp_packet_t pkt_out,
+ const odp_comp_packet_op_param_t *param)
+{
+ odp_comp_generic_session_t *session;
+ odp_comp_packet_result_t *result;
+ int rc;
+
+ session = to_gen_session(param->session);
+ ODP_ASSERT(session);
+ ODP_ASSERT(pkt_in != ODP_PACKET_INVALID);
+ ODP_ASSERT(pkt_out != ODP_PACKET_INVALID);
+
+ result = get_op_result_from_packet(pkt_out);
+ ODP_ASSERT(result);
+
+ result->pkt_in = pkt_in;
+ result->output_data_range.offset = param->out_data_range.offset;
+ result->output_data_range.length = 0;
+
+ packet_subtype_set(pkt_out, ODP_EVENT_PACKET_COMP);
+
+ rc = session->comp.func(pkt_in, pkt_out, param, session);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int odp_comp_op(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[])
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ rc = _odp_comp_single(pkt_in[i], pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+ }
+
+ return i;
+}
+
+int odp_comp_op_enq(const odp_packet_t pkt_in[], odp_packet_t pkt_out[],
+ int num_pkt, const odp_comp_packet_op_param_t param[])
+{
+ int i;
+ int rc;
+
+ for (i = 0; i < num_pkt; i++) {
+ odp_event_t event;
+ odp_comp_generic_session_t *session;
+
+ rc = _odp_comp_single(pkt_in[i], pkt_out[i], &param[i]);
+ if (rc < 0)
+ break;
+
+ event = odp_packet_to_event(pkt_out[i]);
+ session = to_gen_session(param[i].session);
+ if (odp_queue_enq(session->params.compl_queue, event)) {
+ odp_event_free(event);
+ break;
+ }
+ }
+
+ return i;
+}
+
+int odp_comp_result(odp_comp_packet_result_t *result,
+ odp_packet_t packet)
+{
+ odp_comp_packet_result_t *op_result;
+
+ ODP_ASSERT(odp_event_subtype(odp_packet_to_event(packet))
+ == ODP_EVENT_PACKET_COMP);
+
+ op_result = get_op_result_from_packet(packet);
+ ODP_DBG("Copy operational result back\n");
+ memcpy(result, op_result, sizeof(*result));
+ return 0;
+}
+
+int _odp_comp_init_global(void)
+{
+ size_t mem_size;
+ odp_shm_t shm;
+ int idx;
+
+ /* Calculate the memory size we need */
+ mem_size = sizeof(*global);
+
+ /* Allocate our globally shared memory */
+ shm = odp_shm_reserve("comp_pool", mem_size, ODP_CACHE_LINE_SIZE, 0);
+
+ global = odp_shm_addr(shm);
+
+ /* Clear it out */
+ memset(global, 0, mem_size);
+ global->global_shm = shm;
+
+ /* Initialize free list and lock */
+ for (idx = 0; idx < MAX_SESSIONS; idx++) {
+ global->sessions[idx].next = global->free;
+ global->free = &global->sessions[idx];
+ }
+ odp_spinlock_init(&global->lock);
+
+ return 0;
+}
+
+int _odp_comp_term_global(void)
+{
+ int rc = 0;
+ int ret;
+ int count = 0;
+ odp_comp_generic_session_t *session;
+
+ for (session = global->free; session != NULL; session = session->next)
+ count++;
+
+ if (count != MAX_SESSIONS) {
+ ODP_ERR("comp sessions still active\n");
+ rc = -1;
+ }
+
+ ret = odp_shm_free(global->global_shm);
+ if (ret < 0) {
+ ODP_ERR("shm free failed for comp_pool\n");
+ rc = -1;
+ }
+
+ return rc;
+}
+
+odp_packet_t odp_comp_packet_from_event(odp_event_t ev)
+{
+ /* This check not mandated by the API specification */
+ ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ ODP_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_COMP);
+
+ return odp_packet_from_event(ev);
+}
+
+odp_event_t odp_comp_packet_to_event(odp_packet_t pkt)
+{
+ return odp_packet_to_event(pkt);
+}
+
+/** Get printable format of odp_comp_session_t */
+uint64_t odp_comp_session_to_u64(odp_comp_session_t hdl)
+{
+ return _odp_pri(hdl);
+}
diff --git a/platform/linux-generic/odp_cpumask.c b/platform/linux-generic/odp_cpumask.c
index 1b384d4e5..551a80d82 100644
--- a/platform/linux-generic/odp_cpumask.c
+++ b/platform/linux-generic/odp_cpumask.c
@@ -236,8 +236,8 @@ static int get_available_cpus(void)
int ret;
/* Clear the global cpumasks for control and worker CPUs */
- odp_cpumask_zero(&odp_global_data.control_cpus);
- odp_cpumask_zero(&odp_global_data.worker_cpus);
+ odp_cpumask_zero(&odp_global_ro.control_cpus);
+ odp_cpumask_zero(&odp_global_ro.worker_cpus);
CPU_ZERO(&cpuset);
ret = sched_getaffinity(0, sizeof(cpuset), &cpuset);
@@ -249,12 +249,12 @@ static int get_available_cpus(void)
for (cpu_idnum = 0; cpu_idnum < CPU_SETSIZE - 1; cpu_idnum++) {
if (CPU_ISSET(cpu_idnum, &cpuset)) {
- odp_global_data.num_cpus_installed++;
+ odp_global_ro.num_cpus_installed++;
/* Add the CPU to our default cpumasks */
- odp_cpumask_set(&odp_global_data.control_cpus,
- (int)cpu_idnum);
- odp_cpumask_set(&odp_global_data.worker_cpus,
- (int)cpu_idnum);
+ odp_cpumask_set(&odp_global_ro.control_cpus,
+ (int)cpu_idnum);
+ odp_cpumask_set(&odp_global_ro.worker_cpus,
+ (int)cpu_idnum);
}
}
@@ -269,8 +269,8 @@ static int get_available_cpus(void)
*/
static void init_default_control_cpumask(int worker_cpus_default)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
int i;
/* (Bits for all available CPUs are SET in control cpumask) */
@@ -281,7 +281,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* If only one or two CPUs installed, use CPU 0 for control.
* Otherwise leave it for the kernel and start with CPU 1.
*/
- if (odp_global_data.num_cpus_installed < 3) {
+ if (odp_global_ro.num_cpus_installed < 3) {
/*
* If only two CPUS, use CPU 0 for control and
* use CPU 1 for workers.
@@ -294,7 +294,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* reserve remaining CPUs for workers
*/
odp_cpumask_clr(control_mask, 0);
- for (i = 2; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 2; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(worker_mask, i))
odp_cpumask_clr(control_mask, i);
}
@@ -303,7 +303,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* The worker cpumask was specified so first ensure
* the control cpumask does not overlap any worker CPUs
*/
- for (i = 0; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 0; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(worker_mask, i))
odp_cpumask_clr(control_mask, i);
@@ -311,7 +311,7 @@ static void init_default_control_cpumask(int worker_cpus_default)
* If only one or two CPUs installed,
* ensure availability of CPU 0 for control threads
*/
- if (odp_global_data.num_cpus_installed < 3) {
+ if (odp_global_ro.num_cpus_installed < 3) {
odp_cpumask_set(control_mask, 0);
odp_cpumask_clr(control_mask, 1);
} else {
@@ -337,8 +337,8 @@ static void init_default_control_cpumask(int worker_cpus_default)
*/
static void init_default_worker_cpumask(int control_cpus_default)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
int i;
/* (Bits for all available CPUs are SET in worker cpumask) */
@@ -348,10 +348,10 @@ static void init_default_worker_cpumask(int control_cpus_default)
* The control cpumask was also unspecified...
* CPU 0 is only used for workers on uniprocessor systems
*/
- if (odp_global_data.num_cpus_installed > 1)
+ if (odp_global_ro.num_cpus_installed > 1)
odp_cpumask_clr(worker_mask, 0);
- if (odp_global_data.num_cpus_installed > 2)
+ if (odp_global_ro.num_cpus_installed > 2)
/*
* If three or more CPUs, reserve CPU 0 for kernel,
* reserve CPU 1 for control, and
@@ -363,7 +363,7 @@ static void init_default_worker_cpumask(int control_cpus_default)
* The control cpumask was specified so first ensure
* the worker cpumask does not overlap any control CPUs
*/
- for (i = 0; i < odp_global_data.num_cpus_installed; i++)
+ for (i = 0; i < odp_global_ro.num_cpus_installed; i++)
if (odp_cpumask_isset(control_mask, i))
odp_cpumask_clr(worker_mask, i);
@@ -371,7 +371,7 @@ static void init_default_worker_cpumask(int control_cpus_default)
* If only one CPU installed, use CPU 0 for workers
* even though it is used for control as well.
*/
- if (odp_global_data.num_cpus_installed < 2)
+ if (odp_global_ro.num_cpus_installed < 2)
odp_cpumask_set(worker_mask, 0);
else
odp_cpumask_clr(worker_mask, 0);
@@ -386,8 +386,8 @@ static void init_default_worker_cpumask(int control_cpus_default)
*/
int odp_cpumask_init_global(const odp_init_t *params)
{
- odp_cpumask_t *control_mask = &odp_global_data.control_cpus;
- odp_cpumask_t *worker_mask = &odp_global_data.worker_cpus;
+ odp_cpumask_t *control_mask = &odp_global_ro.control_cpus;
+ odp_cpumask_t *worker_mask = &odp_global_ro.worker_cpus;
odp_cpumask_t check_mask;
int control_cpus_default = 1;
int worker_cpus_default = 1;
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index c1b79f07f..c269116ce 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -23,20 +23,20 @@ int odp_cpumask_default_worker(odp_cpumask_t *mask, int num)
* If no user supplied number or it's too large, then attempt
* to use all CPUs
*/
- cpu = odp_cpumask_count(&odp_global_data.worker_cpus);
+ cpu = odp_cpumask_count(&odp_global_ro.worker_cpus);
if (0 == num || cpu < num)
num = cpu;
/* build the mask, allocating down from highest numbered CPU */
odp_cpumask_zero(mask);
for (cpu = 0, i = CPU_SETSIZE - 1; i >= 0 && cpu < num; --i) {
- if (odp_cpumask_isset(&odp_global_data.worker_cpus, i)) {
+ if (odp_cpumask_isset(&odp_global_ro.worker_cpus, i)) {
odp_cpumask_set(mask, i);
cpu++;
}
}
- odp_cpumask_and(&overlap, mask, &odp_global_data.control_cpus);
+ odp_cpumask_and(&overlap, mask, &odp_global_ro.control_cpus);
if (odp_cpumask_count(&overlap))
ODP_DBG("\n\tWorker CPUs overlap with control CPUs...\n"
"\tthis will likely have a performance impact on the worker threads.\n");
@@ -59,7 +59,7 @@ int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
* If user supplied number is too large, then attempt
* to use all installed control CPUs
*/
- cpu = odp_cpumask_count(&odp_global_data.control_cpus);
+ cpu = odp_cpumask_count(&odp_global_ro.control_cpus);
if (cpu < num)
num = cpu;
}
@@ -67,13 +67,13 @@ int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
/* build the mask, allocating upwards from lowest numbered CPU */
odp_cpumask_zero(mask);
for (cpu = 0, i = 0; i < CPU_SETSIZE && cpu < num; i++) {
- if (odp_cpumask_isset(&odp_global_data.control_cpus, i)) {
+ if (odp_cpumask_isset(&odp_global_ro.control_cpus, i)) {
odp_cpumask_set(mask, i);
cpu++;
}
}
- odp_cpumask_and(&overlap, mask, &odp_global_data.worker_cpus);
+ odp_cpumask_and(&overlap, mask, &odp_global_ro.worker_cpus);
if (odp_cpumask_count(&overlap))
ODP_DBG("\n\tControl CPUs overlap with worker CPUs...\n"
"\tthis will likely have a performance impact on the worker threads.\n");
@@ -83,8 +83,8 @@ int odp_cpumask_default_control(odp_cpumask_t *mask, int num)
int odp_cpumask_all_available(odp_cpumask_t *mask)
{
- odp_cpumask_or(mask, &odp_global_data.worker_cpus,
- &odp_global_data.control_cpus);
+ odp_cpumask_or(mask, &odp_global_ro.worker_cpus,
+ &odp_global_ro.control_cpus);
return odp_cpumask_count(mask);
}
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
index 7d01f10eb..4dd1f8325 100644
--- a/platform/linux-generic/odp_crypto_null.c
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -20,6 +20,7 @@
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/thread_inlines.h>
#include <odp_packet_internal.h>
+#include <odp/api/plat/queue_inlines.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -315,8 +316,9 @@ odp_crypto_init_global(void)
mem_size = sizeof(odp_crypto_global_t);
/* Allocate our globally shared memory */
- shm = odp_shm_reserve("crypto_pool", mem_size,
- ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve("_odp_crypto_pool_null", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
if (ODP_SHM_INVALID == shm) {
ODP_ERR("unable to allocate crypto pool\n");
return -1;
@@ -351,9 +353,9 @@ int odp_crypto_term_global(void)
rc = -1;
}
- ret = odp_shm_free(odp_shm_lookup("crypto_pool"));
+ ret = odp_shm_free(odp_shm_lookup("_odp_crypto_pool_null"));
if (ret < 0) {
- ODP_ERR("shm free failed for crypto_pool\n");
+ ODP_ERR("shm free failed for _odp_crypto_pool_null\n");
rc = -1;
}
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
index 8473bf5ab..8feebefb3 100644
--- a/platform/linux-generic/odp_crypto_openssl.c
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -20,6 +20,7 @@
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/thread_inlines.h>
#include <odp_packet_internal.h>
+#include <odp/api/plat/queue_inlines.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -124,10 +125,10 @@ static const odp_crypto_auth_capability_t auth_capa_aes_gmac[] = {
static const odp_crypto_auth_capability_t auth_capa_aes_cmac[] = {
{.digest_len = 12, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
-{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
{.digest_len = 12, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0} },
-{.digest_len = 16, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0} },
{.digest_len = 12, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 16, .aad_len = {.min = 0, .max = 0, .inc = 0} },
+{.digest_len = 16, .key_len = 24, .aad_len = {.min = 0, .max = 0, .inc = 0} },
{.digest_len = 16, .key_len = 32, .aad_len = {.min = 0, .max = 0, .inc = 0} } };
#if _ODP_HAVE_CHACHA20_POLY1305
@@ -1855,8 +1856,9 @@ odp_crypto_init_global(void)
mem_size += nlocks * sizeof(odp_ticketlock_t);
/* Allocate our globally shared memory */
- shm = odp_shm_reserve("crypto_pool", mem_size,
- ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve("_odp_crypto_pool_ssl", mem_size,
+ ODP_CACHE_LINE_SIZE,
+ 0);
if (ODP_SHM_INVALID == shm) {
ODP_ERR("unable to allocate crypto pool\n");
return -1;
@@ -1902,7 +1904,7 @@ int odp_crypto_term_global(void)
CRYPTO_set_locking_callback(NULL);
CRYPTO_set_id_callback(NULL);
- ret = odp_shm_free(odp_shm_lookup("crypto_pool"));
+ ret = odp_shm_free(odp_shm_lookup("_odp_crypto_pool_ssl"));
if (ret < 0) {
ODP_ERR("shm free failed for crypto_pool\n");
rc = -1;
diff --git a/platform/linux-generic/odp_event.c b/platform/linux-generic/odp_event.c
index bb378528b..efcbc1e26 100644
--- a/platform/linux-generic/odp_event.c
+++ b/platform/linux-generic/odp_event.c
@@ -59,6 +59,16 @@ int odp_event_type_multi(const odp_event_t event[], int num,
return i;
}
+uint32_t odp_event_flow_id(odp_event_t event)
+{
+ return event_flow_id(event);
+}
+
+void odp_event_flow_id_set(odp_event_t event, uint32_t flow_id)
+{
+ event_flow_id_set(event, flow_id);
+}
+
void odp_event_free(odp_event_t event)
{
switch (odp_event_type(event)) {
diff --git a/platform/linux-generic/odp_fdserver.c b/platform/linux-generic/odp_fdserver.c
index 0c9cc50e8..232d7f4e0 100644
--- a/platform/linux-generic/odp_fdserver.c
+++ b/platform/linux-generic/odp_fdserver.c
@@ -249,9 +249,9 @@ static int get_socket(void)
/* construct the named socket path: */
snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid,
- odp_global_data.main_pid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid);
s_sock = socket(AF_UNIX, SOCK_STREAM, 0);
if (s_sock == -1) {
@@ -566,16 +566,16 @@ int _odp_fdserver_init_global(void)
int res;
snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKDIR_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
mkdir(sockpath, 0744);
/* construct the server named socket path: */
snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid,
- odp_global_data.main_pid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid);
/* create UNIX domain socket: */
sock = socket(AF_UNIX, SOCK_STREAM, 0);
@@ -692,17 +692,17 @@ int _odp_fdserver_term_global(void)
/* construct the server named socket path: */
snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCK_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid,
- odp_global_data.main_pid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid);
/* delete the UNIX domain socket: */
unlink(sockpath);
/* delete shm files directory */
snprintf(sockpath, FDSERVER_SOCKPATH_MAXLEN, FDSERVER_SOCKDIR_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
rmdir(sockpath);
return 0;
diff --git a/platform/linux-generic/odp_hash_crc_gen.c b/platform/linux-generic/odp_hash_crc_gen.c
index 1ea6d8cd3..93ebe0e95 100644
--- a/platform/linux-generic/odp_hash_crc_gen.c
+++ b/platform/linux-generic/odp_hash_crc_gen.c
@@ -12,6 +12,7 @@
#include <odp/api/hash.h>
#include <odp/api/hints.h>
#include <odp/api/rwlock.h>
+#include <odp/api/shared_memory.h>
#include <odp_debug_internal.h>
#include <odp_init_internal.h>
@@ -22,16 +23,40 @@ typedef struct crc_table_t {
uint32_t poly;
int reflect;
odp_rwlock_t rwlock;
+ odp_shm_t shm;
} crc_table_t;
-static crc_table_t crc_table;
+static crc_table_t *crc_table;
int _odp_hash_init_global(void)
{
- memset(&crc_table, 0, sizeof(crc_table_t));
+ odp_shm_t shm;
- odp_rwlock_init(&crc_table.rwlock);
+ shm = odp_shm_reserve("_odp_hash_crc_gen", sizeof(crc_table_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ crc_table = odp_shm_addr(shm);
+
+ if (crc_table == NULL) {
+ ODP_ERR("Shm reserve failed for odp_hash_crc_gen\n");
+ return -1;
+ }
+
+ memset(crc_table, 0, sizeof(crc_table_t));
+
+ crc_table->shm = shm;
+ odp_rwlock_init(&crc_table->rwlock);
+
+ return 0;
+}
+
+int _odp_hash_term_global(void)
+{
+ if (odp_shm_free(crc_table->shm)) {
+ ODP_ERR("Shm free failed for odp_hash_crc_gen\n");
+ return -1;
+ }
return 0;
}
@@ -98,9 +123,9 @@ static inline void crc_table_gen(uint32_t poly, int reflect, int width)
{
uint32_t i, crc, bit, shift, msb, mask;
- crc_table.width = width;
- crc_table.poly = poly;
- crc_table.reflect = reflect;
+ crc_table->width = width;
+ crc_table->poly = poly;
+ crc_table->reflect = reflect;
shift = width - 8;
mask = 0xffffffff >> (32 - width);
@@ -136,7 +161,7 @@ static inline void crc_table_gen(uint32_t poly, int reflect, int width)
}
}
- crc_table.crc[i] = crc & mask;
+ crc_table->crc[i] = crc & mask;
}
}
@@ -156,9 +181,10 @@ static inline uint32_t crc_calc(const uint8_t *data, uint32_t data_len,
byte = data[i];
if (reflect) {
- crc = crc_table.crc[(crc ^ byte) & 0xff] ^ (crc >> 8);
+ crc = crc_table->crc[(crc ^ byte) & 0xff] ^ (crc >> 8);
} else {
- crc = crc_table.crc[(crc >> shift) ^ byte] ^ (crc << 8);
+ crc = crc_table->crc[(crc >> shift) ^ byte] ^
+ (crc << 8);
crc = crc & mask;
}
}
@@ -192,16 +218,16 @@ int odp_hash_crc_gen64(const void *data_ptr, uint32_t data_len,
return -1;
}
- odp_rwlock_read_lock(&crc_table.rwlock);
+ odp_rwlock_read_lock(&crc_table->rwlock);
- update_table = (crc_table.width != width) ||
- (crc_table.poly != poly) ||
- (crc_table.reflect != reflect);
+ update_table = (crc_table->width != width) ||
+ (crc_table->poly != poly) ||
+ (crc_table->reflect != reflect);
/* Generate CRC table if not yet generated. */
if (odp_unlikely(update_table)) {
- odp_rwlock_read_unlock(&crc_table.rwlock);
- odp_rwlock_write_lock(&crc_table.rwlock);
+ odp_rwlock_read_unlock(&crc_table->rwlock);
+ odp_rwlock_write_lock(&crc_table->rwlock);
crc_table_gen(poly, reflect, width);
}
@@ -209,9 +235,9 @@ int odp_hash_crc_gen64(const void *data_ptr, uint32_t data_len,
crc = crc_calc(data_ptr, data_len, init_val, reflect, width);
if (odp_unlikely(update_table))
- odp_rwlock_write_unlock(&crc_table.rwlock);
+ odp_rwlock_write_unlock(&crc_table->rwlock);
else
- odp_rwlock_read_unlock(&crc_table.rwlock);
+ odp_rwlock_read_unlock(&crc_table->rwlock);
if (crc_param->xor_out)
crc = crc ^ (uint32_t)crc_param->xor_out;
diff --git a/platform/linux-generic/odp_init.c b/platform/linux-generic/odp_init.c
index 28f94cd37..a09583ee5 100644
--- a/platform/linux-generic/odp_init.c
+++ b/platform/linux-generic/odp_init.c
@@ -9,6 +9,7 @@
#include <odp_posix_extensions.h>
#include <odp/api/init.h>
+#include <odp/api/shared_memory.h>
#include <odp_debug_internal.h>
#include <odp_init_internal.h>
#include <odp_schedule_if.h>
@@ -22,11 +23,12 @@ enum init_stage {
LIBCONFIG_INIT,
CPUMASK_INIT,
CPU_CYCLES_INIT,
- HASH_INIT,
TIME_INIT,
SYSINFO_INIT,
ISHM_INIT,
FDSERVER_INIT,
+ GLOBAL_RW_DATA_INIT,
+ HASH_INIT,
THREAD_INIT,
POOL_INIT,
QUEUE_INIT,
@@ -35,6 +37,7 @@ enum init_stage {
TIMER_INIT,
RANDOM_INIT,
CRYPTO_INIT,
+ COMP_INIT,
CLASSIFICATION_INIT,
TRAFFIC_MNGR_INIT,
NAME_TABLE_INIT,
@@ -44,13 +47,51 @@ enum init_stage {
ALL_INIT /* All init stages completed */
};
-struct odp_global_data_s odp_global_data;
+struct odp_global_data_ro_t odp_global_ro;
+struct odp_global_data_rw_t *odp_global_rw;
void odp_init_param_init(odp_init_t *param)
{
memset(param, 0, sizeof(odp_init_t));
}
+static int global_rw_data_init(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_global_rw_data",
+ sizeof(struct odp_global_data_rw_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ odp_global_rw = odp_shm_addr(shm);
+ if (odp_global_rw == NULL) {
+ ODP_ERR("Global RW data shm reserve failed.\n");
+ return -1;
+ }
+
+ memset(odp_global_rw, 0, sizeof(struct odp_global_data_rw_t));
+
+ return 0;
+}
+
+static int global_rw_data_term(void)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup("_odp_global_rw_data");
+ if (shm == ODP_SHM_INVALID) {
+ ODP_ERR("Unable to find global RW data shm.\n");
+ return -1;
+ }
+
+ if (odp_shm_free(shm)) {
+ ODP_ERR("Global RW data shm free failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static int term_global(enum init_stage stage)
{
int rc = 0;
@@ -99,6 +140,13 @@ static int term_global(enum init_stage stage)
}
/* Fall through */
+ case COMP_INIT:
+ if (_odp_comp_term_global()) {
+ ODP_ERR("ODP comp term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
case CRYPTO_INIT:
if (odp_crypto_term_global()) {
ODP_ERR("ODP crypto term failed.\n");
@@ -151,6 +199,20 @@ static int term_global(enum init_stage stage)
}
/* Fall through */
+ case HASH_INIT:
+ if (_odp_hash_term_global()) {
+ ODP_ERR("ODP hash term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
+ case GLOBAL_RW_DATA_INIT:
+ if (global_rw_data_term()) {
+ ODP_ERR("ODP global RW data term failed.\n");
+ rc = -1;
+ }
+ /* Fall through */
+
case FDSERVER_INIT:
if (_odp_fdserver_term_global()) {
ODP_ERR("ODP fdserver term failed.\n");
@@ -179,8 +241,6 @@ static int term_global(enum init_stage stage)
}
/* Fall through */
- case HASH_INIT:
- /* Fall through */
case CPU_CYCLES_INIT:
/* Fall through */
case CPUMASK_INIT:
@@ -208,18 +268,23 @@ int odp_init_global(odp_instance_t *instance,
const odp_init_t *params,
const odp_platform_init_t *platform_params ODP_UNUSED)
{
- memset(&odp_global_data, 0, sizeof(struct odp_global_data_s));
- odp_global_data.main_pid = getpid();
+ memset(&odp_global_ro, 0, sizeof(struct odp_global_data_ro_t));
+ odp_global_ro.main_pid = getpid();
enum init_stage stage = NO_INIT;
- odp_global_data.log_fn = odp_override_log;
- odp_global_data.abort_fn = odp_override_abort;
+ odp_global_ro.log_fn = odp_override_log;
+ odp_global_ro.abort_fn = odp_override_abort;
+ odp_init_param_init(&odp_global_ro.init_param);
if (params != NULL) {
+ odp_global_ro.init_param = *params;
+
if (params->log_fn != NULL)
- odp_global_data.log_fn = params->log_fn;
+ odp_global_ro.log_fn = params->log_fn;
if (params->abort_fn != NULL)
- odp_global_data.abort_fn = params->abort_fn;
+ odp_global_ro.abort_fn = params->abort_fn;
+ if (params->mem_model == ODP_MEM_MODEL_PROCESS)
+ odp_global_ro.shm_single_va = 1;
}
if (_odp_libconfig_init_global()) {
@@ -240,12 +305,6 @@ int odp_init_global(odp_instance_t *instance,
}
stage = CPU_CYCLES_INIT;
- if (_odp_hash_init_global()) {
- ODP_ERR("ODP hash init failed.\n");
- goto init_failed;
- }
- stage = HASH_INIT;
-
if (odp_time_init_global()) {
ODP_ERR("ODP time init failed.\n");
goto init_failed;
@@ -270,6 +329,18 @@ int odp_init_global(odp_instance_t *instance,
}
stage = FDSERVER_INIT;
+ if (global_rw_data_init()) {
+ ODP_ERR("ODP global RW data init failed.\n");
+ goto init_failed;
+ }
+ stage = GLOBAL_RW_DATA_INIT;
+
+ if (_odp_hash_init_global()) {
+ ODP_ERR("ODP hash init failed.\n");
+ goto init_failed;
+ }
+ stage = HASH_INIT;
+
if (odp_thread_init_global()) {
ODP_ERR("ODP thread init failed.\n");
goto init_failed;
@@ -315,6 +386,12 @@ int odp_init_global(odp_instance_t *instance,
}
stage = CRYPTO_INIT;
+ if (_odp_comp_init_global()) {
+ ODP_ERR("ODP comp init failed.\n");
+ goto init_failed;
+ }
+ stage = COMP_INIT;
+
if (odp_classification_init_global()) {
ODP_ERR("ODP classification init failed.\n");
goto init_failed;
@@ -351,7 +428,7 @@ int odp_init_global(odp_instance_t *instance,
}
stage = IPSEC_INIT;
- *instance = (odp_instance_t)odp_global_data.main_pid;
+ *instance = (odp_instance_t)odp_global_ro.main_pid;
return 0;
@@ -362,7 +439,7 @@ init_failed:
int odp_term_global(odp_instance_t instance)
{
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
ODP_ERR("Bad instance.\n");
return -1;
}
@@ -441,7 +518,7 @@ int odp_init_local(odp_instance_t instance, odp_thread_type_t thr_type)
{
enum init_stage stage = NO_INIT;
- if (instance != (odp_instance_t)odp_global_data.main_pid) {
+ if (instance != (odp_instance_t)odp_global_ro.main_pid) {
ODP_ERR("Bad instance.\n");
goto init_fail;
}
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 71258fe9a..8430d707c 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -13,10 +13,12 @@
#include <odp/api/byteorder.h>
#include <odp/api/plat/byteorder_inlines.h>
+#include <odp_global_data.h>
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
#include <odp_packet_internal.h>
#include <odp_ipsec_internal.h>
+#include <odp/api/plat/queue_inlines.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -25,6 +27,8 @@
#include <string.h>
+static odp_ipsec_config_t *ipsec_config;
+
int odp_ipsec_capability(odp_ipsec_capability_t *capa)
{
int rc;
@@ -143,14 +147,12 @@ void odp_ipsec_config_init(odp_ipsec_config_t *config)
config->inbound.lookup.max_spi = UINT32_MAX;
}
-static odp_ipsec_config_t ipsec_config;
-
int odp_ipsec_config(const odp_ipsec_config_t *config)
{
if (ODP_CONFIG_IPSEC_SAS > config->max_num_sa)
return -1;
- ipsec_config = *config;
+ *ipsec_config = *config;
return 0;
}
@@ -786,8 +788,8 @@ static ipsec_sa_t *ipsec_in_single(odp_packet_t pkt,
parse_param.proto = state.is_ipv4 ? ODP_PROTO_IPV4 :
state.is_ipv6 ? ODP_PROTO_IPV6 :
ODP_PROTO_NONE;
- parse_param.last_layer = ipsec_config.inbound.parse_level;
- parse_param.chksums = ipsec_config.inbound.chksums;
+ parse_param.last_layer = ipsec_config->inbound.parse_level;
+ parse_param.chksums = ipsec_config->inbound.chksums;
/* We do not care about return code here.
* Parsing error should not result in IPsec error. */
@@ -809,9 +811,9 @@ err:
/* Generate sequence number */
static inline
-uint32_t ipsec_seq_no(ipsec_sa_t *ipsec_sa)
+uint64_t ipsec_seq_no(ipsec_sa_t *ipsec_sa)
{
- return odp_atomic_fetch_add_u32(&ipsec_sa->out.seq, 1);
+ return odp_atomic_fetch_add_u64(&ipsec_sa->hot.out.seq, 1);
}
/* Helper for calculating encode length using data length and block size */
@@ -870,14 +872,18 @@ static int ipsec_out_tunnel_ipv4(odp_packet_t *pkt,
state->ip_tot_len += _ODP_IPV4HDR_LEN;
out_ip.tot_len = odp_cpu_to_be_16(state->ip_tot_len);
- /* No need to convert to BE: ID just should not be duplicated */
- out_ip.id = odp_atomic_fetch_add_u32(&ipsec_sa->out.tun_ipv4.hdr_id,
- 1);
if (ipsec_sa->copy_df)
flags = state->out_tunnel.ip_df;
else
flags = ((uint16_t)ipv4_param->df) << 14;
out_ip.frag_offset = odp_cpu_to_be_16(flags);
+
+ /* Allocate unique IP ID only for non-atomic datagrams */
+ if (out_ip.frag_offset == 0)
+ out_ip.id = _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa);
+ else
+ out_ip.id = 0;
+
out_ip.ttl = ipv4_param->ttl;
/* Will be filled later by packet checksum update */
out_ip.chksum = 0;
@@ -974,23 +980,45 @@ static int ipsec_out_tunnel_ipv6(odp_packet_t *pkt,
return 0;
}
+#define IPSEC_RANDOM_BUF_SIZE 256
+
+static int ipsec_random_data(uint8_t *data, uint32_t len)
+{
+ static __thread uint8_t buffer[IPSEC_RANDOM_BUF_SIZE];
+ static __thread uint32_t buffer_used = IPSEC_RANDOM_BUF_SIZE;
+
+ if (odp_likely(buffer_used + len <= IPSEC_RANDOM_BUF_SIZE)) {
+ memcpy(data, &buffer[buffer_used], len);
+ buffer_used += len;
+ } else if (odp_likely(len <= IPSEC_RANDOM_BUF_SIZE)) {
+ uint32_t rnd_len;
+
+ rnd_len = odp_random_data(buffer, IPSEC_RANDOM_BUF_SIZE,
+ odp_global_ro.ipsec_rand_kind);
+ if (odp_unlikely(rnd_len != IPSEC_RANDOM_BUF_SIZE))
+ return -1;
+ memcpy(data, &buffer[0], len);
+ buffer_used = len;
+ } else {
+ return -1;
+ }
+ return 0;
+}
+
static int ipsec_out_iv(ipsec_state_t *state,
- ipsec_sa_t *ipsec_sa)
+ ipsec_sa_t *ipsec_sa,
+ uint64_t seq_no)
{
if (ipsec_sa->use_counter_iv) {
- uint64_t ctr;
-
/* Both GCM and CTR use 8-bit counters */
- ODP_ASSERT(sizeof(ctr) == ipsec_sa->esp_iv_len);
+ ODP_ASSERT(sizeof(seq_no) == ipsec_sa->esp_iv_len);
- ctr = odp_atomic_fetch_add_u64(&ipsec_sa->out.counter,
- 1);
/* Check for overrun */
- if (ctr == 0)
+ if (seq_no == 0)
return -1;
memcpy(state->iv, ipsec_sa->salt, ipsec_sa->salt_length);
- memcpy(state->iv + ipsec_sa->salt_length, &ctr,
+ memcpy(state->iv + ipsec_sa->salt_length, &seq_no,
ipsec_sa->esp_iv_len);
if (ipsec_sa->aes_ctr_iv) {
@@ -1000,12 +1028,7 @@ static int ipsec_out_iv(ipsec_state_t *state,
state->iv[15] = 1;
}
} else if (ipsec_sa->esp_iv_len) {
- uint32_t len;
-
- len = odp_random_data(state->iv, ipsec_sa->esp_iv_len,
- ODP_RANDOM_CRYPTO);
-
- if (len != ipsec_sa->esp_iv_len)
+ if (ipsec_random_data(state->iv, ipsec_sa->esp_iv_len))
return -1;
}
@@ -1034,6 +1057,7 @@ static int ipsec_out_esp(odp_packet_t *pkt,
unsigned trl_len;
unsigned pkt_len, new_len;
uint8_t proto = _ODP_IPPROTO_ESP;
+ uint64_t seq_no;
if (odp_unlikely(opt->flag.tfc_dummy)) {
ip_data_len = 0;
@@ -1067,7 +1091,9 @@ static int ipsec_out_esp(odp_packet_t *pkt,
return -1;
}
- if (ipsec_out_iv(state, ipsec_sa) < 0) {
+ seq_no = ipsec_seq_no(ipsec_sa);
+
+ if (ipsec_out_iv(state, ipsec_sa, seq_no) < 0) {
status->error.alg = 1;
return -1;
}
@@ -1077,7 +1103,7 @@ static int ipsec_out_esp(odp_packet_t *pkt,
memset(&esp, 0, sizeof(esp));
esp.spi = odp_cpu_to_be_32(ipsec_sa->spi);
- esp.seq_no = odp_cpu_to_be_32(ipsec_seq_no(ipsec_sa));
+ esp.seq_no = odp_cpu_to_be_32(seq_no & 0xffffffff);
state->esp.aad.spi = esp.spi;
state->esp.aad.seq_no = esp.seq_no;
@@ -1199,15 +1225,18 @@ static int ipsec_out_ah(odp_packet_t *pkt,
ipsec_sa->icv_len;
uint16_t ipsec_offset = state->ip_offset + state->ip_hdr_len;
uint8_t proto = _ODP_IPPROTO_AH;
+ uint64_t seq_no;
if (state->ip_tot_len + hdr_len > mtu) {
status->error.mtu = 1;
return -1;
}
+ seq_no = ipsec_seq_no(ipsec_sa);
+
memset(&ah, 0, sizeof(ah));
ah.spi = odp_cpu_to_be_32(ipsec_sa->spi);
- ah.seq_no = odp_cpu_to_be_32(ipsec_seq_no(ipsec_sa));
+ ah.seq_no = odp_cpu_to_be_32(seq_no & 0xffffffff);
ah.next_header = state->ip_next_hdr;
odp_packet_copy_from_mem(*pkt, state->ip_next_hdr_offset, 1, &proto);
@@ -1243,7 +1272,7 @@ static int ipsec_out_ah(odp_packet_t *pkt,
ah.ah_len = hdr_len / 4 - 2;
/* For GMAC */
- if (ipsec_out_iv(state, ipsec_sa) < 0) {
+ if (ipsec_out_iv(state, ipsec_sa, seq_no) < 0) {
status->error.alg = 1;
return -1;
}
@@ -1304,9 +1333,10 @@ static void ipsec_out_ah_post(ipsec_state_t *state, odp_packet_t pkt)
static void ipsec_out_checksums(odp_packet_t pkt,
ipsec_state_t *state)
{
- odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt;
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt,
+ sctp_chksum_pkt;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- odp_ipsec_outbound_config_t outbound = ipsec_config.outbound;
+ odp_ipsec_outbound_config_t outbound = ipsec_config->outbound;
ipv4_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_ipv4,
state->is_ipv4,
@@ -1323,6 +1353,12 @@ static void ipsec_out_checksums(odp_packet_t pkt,
pkt_hdr->p.flags.l4_chksum_set,
pkt_hdr->p.flags.l4_chksum);
+ sctp_chksum_pkt = OL_TX_CHKSUM_PKT(outbound.chksum.inner_sctp,
+ state->ip_next_hdr ==
+ _ODP_IPPROTO_SCTP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
+
if (ipv4_chksum_pkt)
_odp_packet_ipv4_chksum_insert(pkt);
@@ -1331,6 +1367,9 @@ static void ipsec_out_checksums(odp_packet_t pkt,
if (udp_chksum_pkt)
_odp_packet_udp_chksum_insert(pkt);
+
+ if (sctp_chksum_pkt)
+ _odp_packet_sctp_chksum_insert(pkt);
}
static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
@@ -1348,7 +1387,14 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
odp_ipsec_frag_mode_t frag_mode;
uint32_t mtu;
- ipsec_sa = _odp_ipsec_sa_use(sa);
+ /*
+ * No need to do _odp_ipsec_sa_use() here since an ODP application
+ * is not allowed to do call IPsec output before SA creation has
+ * completed nor call odp_ipsec_sa_disable() before IPsec output
+ * has completed. IOW, the needed sychronization between threads
+ * is done by the application.
+ */
+ ipsec_sa = _odp_ipsec_sa_entry_from_hdl(sa);
ODP_ASSERT(NULL != ipsec_sa);
if (opt->flag.tfc_dummy) {
@@ -1450,6 +1496,18 @@ static ipsec_sa_t *ipsec_out_single(odp_packet_t pkt,
param.session = ipsec_sa->session;
+ /*
+ * NOTE: Do not change to an asynchronous design without thinking
+ * concurrency and what changes are required to guarantee that
+ * used SAs are not destroyed when asynchronous operations are in
+ * progress.
+ *
+ * The containing code does not hold a reference to the SA but
+ * completes outbound processing synchronously and makes use of
+ * the fact that the application may not disable (and then destroy)
+ * the SA before this output routine returns (and all its side
+ * effects are visible to the disabling thread).
+ */
rc = odp_crypto_op(&pkt, &pkt, &param, 1);
if (rc < 0) {
ODP_DBG("Crypto failed\n");
@@ -1600,9 +1658,6 @@ int odp_ipsec_out(const odp_packet_t pkt_in[], int num_in,
out_pkt++;
sa_idx += sa_inc;
opt_idx += opt_inc;
-
- /* Last thing */
- _odp_ipsec_sa_unuse(ipsec_sa);
}
*num_out = out_pkt;
@@ -1645,7 +1700,7 @@ int odp_ipsec_in_enq(const odp_packet_t pkt_in[], int num_in,
queue = ipsec_sa->queue;
} else {
result->sa = ODP_IPSEC_SA_INVALID;
- queue = ipsec_config.inbound.default_queue;
+ queue = ipsec_config->inbound.default_queue;
}
if (odp_queue_enq(queue, odp_ipsec_packet_to_event(pkt))) {
@@ -1710,9 +1765,6 @@ int odp_ipsec_out_enq(const odp_packet_t pkt_in[], int num_in,
in_pkt++;
sa_idx += sa_inc;
opt_idx += opt_inc;
-
- /* Last thing */
- _odp_ipsec_sa_unuse(ipsec_sa);
}
return in_pkt;
@@ -1744,7 +1796,7 @@ int _odp_ipsec_try_inline(odp_packet_t *pkt)
pkt_hdr = packet_hdr(*pkt);
pkt_hdr->p.input_flags.dst_queue = 1;
- pkt_hdr->dst_queue = queue_fn->from_ext(ipsec_sa->queue);
+ pkt_hdr->dst_queue = ipsec_sa->queue;
/* Last thing */
_odp_ipsec_sa_unuse(ipsec_sa);
@@ -1852,9 +1904,6 @@ err:
in_pkt++;
sa_idx += sa_inc;
opt_idx += opt_inc;
-
- /* Last thing */
- _odp_ipsec_sa_unuse(ipsec_sa);
}
return in_pkt;
@@ -1888,15 +1937,37 @@ odp_event_t odp_ipsec_packet_to_event(odp_packet_t pkt)
int _odp_ipsec_init_global(void)
{
- odp_ipsec_config_init(&ipsec_config);
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("_odp_ipsec", sizeof(odp_ipsec_config_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ ipsec_config = odp_shm_addr(shm);
+
+ if (ipsec_config == NULL) {
+ ODP_ERR("Shm reserve failed for odp_ipsec\n");
+ return -1;
+ }
+
+ odp_ipsec_config_init(ipsec_config);
memset(&default_out_opt, 0, sizeof(default_out_opt));
+ odp_global_ro.ipsec_rand_kind = ODP_RANDOM_CRYPTO;
+ if (odp_global_ro.ipsec_rand_kind > odp_random_max_kind())
+ odp_global_ro.ipsec_rand_kind = odp_random_max_kind();
+
return 0;
}
int _odp_ipsec_term_global(void)
{
- /* Do nothing for now */
+ odp_shm_t shm = odp_shm_lookup("_odp_ipsec");
+
+ if (shm == ODP_SHM_INVALID || odp_shm_free(shm)) {
+ ODP_ERR("Shm free failed for odp_ipsec");
+ return -1;
+ }
+
return 0;
}
diff --git a/platform/linux-generic/odp_ipsec_events.c b/platform/linux-generic/odp_ipsec_events.c
index e39776492..c1d153c09 100644
--- a/platform/linux-generic/odp_ipsec_events.c
+++ b/platform/linux-generic/odp_ipsec_events.c
@@ -17,6 +17,7 @@
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
typedef struct {
/* common buffer header */
@@ -40,7 +41,7 @@ int _odp_ipsec_events_init_global(void)
param.buf.num = IPSEC_EVENTS_POOL_BUF_COUNT;
param.type = ODP_POOL_BUFFER;
- ipsec_status_pool = odp_pool_create("ipsec_status_pool", &param);
+ ipsec_status_pool = odp_pool_create("_odp_ipsec_status_pool", &param);
if (ODP_POOL_INVALID == ipsec_status_pool) {
ODP_ERR("Error: status pool create failed.\n");
goto err_status;
@@ -54,16 +55,15 @@ err_status:
int _odp_ipsec_events_term_global(void)
{
- int ret = 0;
- int rc = 0;
+ int ret;
ret = odp_pool_destroy(ipsec_status_pool);
if (ret < 0) {
ODP_ERR("status pool destroy failed");
- rc = -1;
+ return -1;
}
- return rc;
+ return 0;
}
ipsec_status_t _odp_ipsec_status_from_event(odp_event_t ev)
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 3b1686b86..6eb12f95f 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -14,6 +14,7 @@
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
#include <odp_ipsec_internal.h>
+#include <odp_ring_mpmc_internal.h>
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/plat/cpu_inlines.h>
@@ -24,8 +25,69 @@
#define IPSEC_SA_STATE_FREE 0xc0000000
#define IPSEC_SA_STATE_RESERVED 0x80000000
+/*
+ * We do not have global IPv4 ID counter that is accessed for every outbound
+ * packet. Instead, we split IPv4 ID space to fixed size blocks that we
+ * allocate to threads on demand. When a thread has used its block of IDs,
+ * it frees it and allocates a new block. Free blocks are kept in a ring so
+ * that the block last freed is the one to be allocated last to maximize
+ * the time before IPv4 ID reuse.
+ */
+#define IPV4_ID_BLOCK_SIZE 64 /* must be power of 2 */
+#define IPV4_ID_RING_SIZE (UINT16_MAX / IPV4_ID_BLOCK_SIZE)
+#define IPV4_ID_RING_MASK (IPV4_ID_RING_SIZE - 1)
+
+#if IPV4_ID_RING_SIZE <= ODP_THREAD_COUNT_MAX
+#warning IPV4_ID_RING_SIZE is too small for the maximum number of threads.
+#endif
+
+/*
+ * To avoid checking and updating the packet and byte counters in the
+ * SA for every packet, we increment the global counters once for several
+ * packets. We decrement a preallocated thread-local quota for every
+ * packet. When the quota runs out, we get a new quota by incementing the
+ * global counter.
+ *
+ * This improves performance but the looser synchronization between
+ * threads makes life time warnings and errors somewhat inaccurate.
+ * The warnings and errors may get triggered a bit too early since
+ * some threads may still have unused quota when the first thread
+ * hits the limit.
+ */
+#define SA_LIFE_PACKETS_PREALLOC 64
+#define SA_LIFE_BYTES_PREALLOC 4000
+
+typedef struct sa_thread_local_s {
+ /*
+ * Packets that can be processed in this thread before looking at
+ * the SA-global packet counter and checking hard and soft limits.
+ */
+ uint32_t packet_quota;
+ /*
+ * Bytes that can be processed in this thread before looking at
+ * at the SA-global byte counter and checking hard and soft limits.
+ */
+ uint32_t byte_quota;
+ /*
+ * Life time status when this thread last checked the global
+ * counter(s).
+ */
+ odp_ipsec_op_status_t lifetime_status;
+} sa_thread_local_t;
+
+typedef struct ODP_ALIGNED_CACHE ipsec_thread_local_s {
+ sa_thread_local_t sa[ODP_CONFIG_IPSEC_SAS];
+ uint16_t first_ipv4_id; /* first ID of current block of IDs */
+ uint16_t next_ipv4_id; /* next ID to be used */
+} ipsec_thread_local_t;
+
typedef struct ipsec_sa_table_t {
ipsec_sa_t ipsec_sa[ODP_CONFIG_IPSEC_SAS];
+ ipsec_thread_local_t per_thread[ODP_THREAD_COUNT_MAX];
+ struct ODP_ALIGNED_CACHE {
+ ring_mpmc_t ipv4_id_ring;
+ uint32_t ODP_ALIGNED_CACHE ipv4_id_data[IPV4_ID_RING_SIZE];
+ } hot;
odp_shm_t shm;
} ipsec_sa_table_t;
@@ -38,12 +100,36 @@ static inline ipsec_sa_t *ipsec_sa_entry(uint32_t ipsec_sa_idx)
static inline ipsec_sa_t *ipsec_sa_entry_from_hdl(odp_ipsec_sa_t ipsec_sa_hdl)
{
- return ipsec_sa_entry(_odp_typeval(ipsec_sa_hdl));
+ return ipsec_sa_entry(_odp_typeval(ipsec_sa_hdl) - 1);
}
static inline odp_ipsec_sa_t ipsec_sa_index_to_handle(uint32_t ipsec_sa_idx)
{
- return _odp_cast_scalar(odp_ipsec_sa_t, ipsec_sa_idx);
+ return _odp_cast_scalar(odp_ipsec_sa_t, ipsec_sa_idx + 1);
+}
+
+ipsec_sa_t *_odp_ipsec_sa_entry_from_hdl(odp_ipsec_sa_t sa)
+{
+ ODP_ASSERT(ODP_IPSEC_SA_INVALID != sa);
+ return ipsec_sa_entry_from_hdl(sa);
+}
+
+static inline sa_thread_local_t *ipsec_sa_thread_local(ipsec_sa_t *sa)
+{
+ return &ipsec_sa_tbl->per_thread[odp_thread_id()].sa[sa->ipsec_sa_idx];
+}
+
+static void init_sa_thread_local(ipsec_sa_t *sa)
+{
+ sa_thread_local_t *sa_tl;
+ int n;
+
+ for (n = 0; n < ODP_THREAD_COUNT_MAX; n++) {
+ sa_tl = &ipsec_sa_tbl->per_thread[n].sa[sa->ipsec_sa_idx];
+ sa_tl->packet_quota = 0;
+ sa_tl->byte_quota = 0;
+ sa_tl->lifetime_status.all = 0;
+ }
}
int _odp_ipsec_sad_init_global(void)
@@ -51,25 +137,50 @@ int _odp_ipsec_sad_init_global(void)
odp_shm_t shm;
unsigned i;
- shm = odp_shm_reserve("ipsec_sa_table",
+ shm = odp_shm_reserve("_odp_ipsec_sa_table",
sizeof(ipsec_sa_table_t),
- ODP_CACHE_LINE_SIZE, 0);
-
- ipsec_sa_tbl = odp_shm_addr(shm);
- if (ipsec_sa_tbl == NULL)
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID)
return -1;
+ ipsec_sa_tbl = odp_shm_addr(shm);
memset(ipsec_sa_tbl, 0, sizeof(ipsec_sa_table_t));
ipsec_sa_tbl->shm = shm;
+ ring_mpmc_init(&ipsec_sa_tbl->hot.ipv4_id_ring);
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ /*
+ * Make the current ID block fully used, forcing allocation
+ * of a fresh block at first use.
+ */
+ ipsec_sa_tbl->per_thread[i].first_ipv4_id = 0;
+ ipsec_sa_tbl->per_thread[i].next_ipv4_id = IPV4_ID_BLOCK_SIZE;
+ }
+ /*
+ * Initialize IPv4 ID ring with ID blocks.
+ *
+ * The last ID block is left unused since the ring can hold
+ * only IPV4_ID_RING_SIZE - 1 entries.
+ */
+ for (i = 0; i < IPV4_ID_RING_SIZE - 1; i++) {
+ uint32_t data = i * IPV4_ID_BLOCK_SIZE;
+
+ ring_mpmc_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ }
+
for (i = 0; i < ODP_CONFIG_IPSEC_SAS; i++) {
ipsec_sa_t *ipsec_sa = ipsec_sa_entry(i);
ipsec_sa->ipsec_sa_hdl = ipsec_sa_index_to_handle(i);
ipsec_sa->ipsec_sa_idx = i;
odp_atomic_init_u32(&ipsec_sa->state, IPSEC_SA_STATE_FREE);
- odp_atomic_init_u64(&ipsec_sa->bytes, 0);
- odp_atomic_init_u64(&ipsec_sa->packets, 0);
+ odp_atomic_init_u64(&ipsec_sa->hot.bytes, 0);
+ odp_atomic_init_u64(&ipsec_sa->hot.packets, 0);
}
return 0;
@@ -276,6 +387,7 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa_t *ipsec_sa;
odp_crypto_session_param_t crypto_param;
odp_crypto_ses_create_err_t ses_create_rc;
+ const odp_crypto_key_t *salt_param = NULL;
ipsec_sa = ipsec_sa_reserve();
if (NULL == ipsec_sa) {
@@ -289,6 +401,10 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->queue = param->dest_queue;
ipsec_sa->mode = param->mode;
ipsec_sa->flags = 0;
+ if (param->opt.esn) {
+ ODP_ERR("ESN is not supported!\n");
+ return ODP_IPSEC_SA_INVALID;
+ }
if (ODP_IPSEC_DIR_INBOUND == param->dir) {
ipsec_sa->lookup_mode = param->inbound.lookup_mode;
if (ODP_IPSEC_LOOKUP_DSTADDR_SPI == ipsec_sa->lookup_mode) {
@@ -307,10 +423,10 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
if (param->inbound.antireplay_ws > IPSEC_ANTIREPLAY_WS)
goto error;
ipsec_sa->antireplay = (param->inbound.antireplay_ws != 0);
- odp_atomic_init_u64(&ipsec_sa->in.antireplay, 0);
+ odp_atomic_init_u64(&ipsec_sa->hot.in.antireplay, 0);
} else {
ipsec_sa->lookup_mode = ODP_IPSEC_LOOKUP_DISABLED;
- odp_atomic_store_u32(&ipsec_sa->out.seq, 1);
+ odp_atomic_store_u64(&ipsec_sa->hot.out.seq, 1);
ipsec_sa->out.frag_mode = param->outbound.frag_mode;
ipsec_sa->out.mtu = param->outbound.mtu;
}
@@ -320,8 +436,8 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->copy_flabel = param->opt.copy_flabel;
ipsec_sa->udp_encap = param->opt.udp_encap;
- odp_atomic_store_u64(&ipsec_sa->bytes, 0);
- odp_atomic_store_u64(&ipsec_sa->packets, 0);
+ odp_atomic_store_u64(&ipsec_sa->hot.bytes, 0);
+ odp_atomic_store_u64(&ipsec_sa->hot.packets, 0);
ipsec_sa->soft_limit_bytes = param->lifetime.soft_limit.bytes;
ipsec_sa->soft_limit_packets = param->lifetime.soft_limit.packets;
ipsec_sa->hard_limit_bytes = param->lifetime.hard_limit.bytes;
@@ -337,7 +453,6 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
memcpy(&ipsec_sa->out.tun_ipv4.dst_ip,
param->outbound.tunnel.ipv4.dst_addr,
sizeof(ipsec_sa->out.tun_ipv4.dst_ip));
- odp_atomic_init_u32(&ipsec_sa->out.tun_ipv4.hdr_id, 0);
ipsec_sa->out.tun_ipv4.param.src_addr =
&ipsec_sa->out.tun_ipv4.src_ip;
ipsec_sa->out.tun_ipv4.param.dst_addr =
@@ -396,6 +511,8 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
(uint32_t)-1 == crypto_param.auth_digest_len)
goto error;
+ ipsec_sa->salt_length = 0;
+
switch (crypto_param.cipher_alg) {
case ODP_CIPHER_ALG_NULL:
ipsec_sa->esp_iv_len = 0;
@@ -418,20 +535,33 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->aes_ctr_iv = 1;
ipsec_sa->esp_iv_len = 8;
ipsec_sa->esp_block_len = 1;
+ /* 4 byte nonse */
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
break;
#if ODP_DEPRECATED_API
case ODP_CIPHER_ALG_AES128_GCM:
#endif
case ODP_CIPHER_ALG_AES_GCM:
+ ipsec_sa->use_counter_iv = 1;
+ ipsec_sa->esp_iv_len = 8;
+ ipsec_sa->esp_block_len = 16;
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
+ break;
case ODP_CIPHER_ALG_AES_CCM:
ipsec_sa->use_counter_iv = 1;
ipsec_sa->esp_iv_len = 8;
ipsec_sa->esp_block_len = 16;
+ ipsec_sa->salt_length = 3;
+ salt_param = &param->crypto.cipher_key_extra;
break;
case ODP_CIPHER_ALG_CHACHA20_POLY1305:
ipsec_sa->use_counter_iv = 1;
ipsec_sa->esp_iv_len = 8;
ipsec_sa->esp_block_len = 1;
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.cipher_key_extra;
break;
default:
goto error;
@@ -451,6 +581,8 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
ipsec_sa->esp_iv_len = 8;
ipsec_sa->esp_block_len = 16;
crypto_param.auth_iv.length = 12;
+ ipsec_sa->salt_length = 4;
+ salt_param = &param->crypto.auth_key_extra;
break;
case ODP_AUTH_ALG_CHACHA20_POLY1305:
crypto_param.auth_aad_len = sizeof(ipsec_aad_t);
@@ -459,29 +591,29 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
break;
}
- if (1 == ipsec_sa->use_counter_iv &&
- ODP_IPSEC_DIR_OUTBOUND == param->dir)
- odp_atomic_init_u64(&ipsec_sa->out.counter, 1);
-
ipsec_sa->icv_len = crypto_param.auth_digest_len;
- if (param->crypto.cipher_key_extra.length) {
- if (param->crypto.cipher_key_extra.length >
- IPSEC_MAX_SALT_LEN)
+ if (ipsec_sa->salt_length) {
+ if (ipsec_sa->salt_length > IPSEC_MAX_SALT_LEN) {
+ ODP_ERR("IPSEC_MAX_SALT_LEN too small\n");
+ goto error;
+ }
+
+ if (ipsec_sa->salt_length != salt_param->length) {
+ ODP_ERR("Bad extra keying material length: %i\n",
+ salt_param->length);
goto error;
+ }
- ipsec_sa->salt_length = param->crypto.cipher_key_extra.length;
- memcpy(ipsec_sa->salt,
- param->crypto.cipher_key_extra.data,
- param->crypto.cipher_key_extra.length);
- } else {
- ipsec_sa->salt_length = 0;
+ memcpy(ipsec_sa->salt, salt_param->data, ipsec_sa->salt_length);
}
if (odp_crypto_session_create(&crypto_param, &ipsec_sa->session,
&ses_create_rc))
goto error;
+ init_sa_thread_local(ipsec_sa);
+
ipsec_sa_publish(ipsec_sa);
return ipsec_sa->ipsec_sa_hdl;
@@ -623,17 +755,11 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
odp_ipsec_op_status_t *status)
{
int rc = 0;
+ sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
- if (ipsec_sa->hard_limit_bytes > 0 &&
- odp_atomic_load_u64(&ipsec_sa->bytes) >
- ipsec_sa->hard_limit_bytes) {
- status->error.hard_exp_bytes = 1;
- rc = -1;
- }
- if (ipsec_sa->hard_limit_packets > 0 &&
- odp_atomic_load_u64(&ipsec_sa->packets) >
- ipsec_sa->hard_limit_packets) {
- status->error.hard_exp_packets = 1;
+ if (sa_tl->lifetime_status.error.hard_exp_packets ||
+ sa_tl->lifetime_status.error.hard_exp_bytes) {
+ status->all |= sa_tl->lifetime_status.all;
rc = -1;
}
@@ -643,30 +769,47 @@ int _odp_ipsec_sa_stats_precheck(ipsec_sa_t *ipsec_sa,
int _odp_ipsec_sa_stats_update(ipsec_sa_t *ipsec_sa, uint32_t len,
odp_ipsec_op_status_t *status)
{
- uint64_t bytes = odp_atomic_fetch_add_u64(&ipsec_sa->bytes, len) + len;
- uint64_t packets = odp_atomic_fetch_add_u64(&ipsec_sa->packets, 1) + 1;
- int rc = 0;
+ sa_thread_local_t *sa_tl = ipsec_sa_thread_local(ipsec_sa);
+ uint64_t packets, bytes;
+
+ if (odp_unlikely(sa_tl->packet_quota == 0)) {
+ packets = odp_atomic_fetch_add_u64(&ipsec_sa->hot.packets,
+ SA_LIFE_PACKETS_PREALLOC);
+ packets += SA_LIFE_PACKETS_PREALLOC;
+ sa_tl->packet_quota += SA_LIFE_PACKETS_PREALLOC;
+
+ if (ipsec_sa->soft_limit_packets > 0 &&
+ packets >= ipsec_sa->soft_limit_packets)
+ sa_tl->lifetime_status.warn.soft_exp_packets = 1;
+
+ if (ipsec_sa->hard_limit_packets > 0 &&
+ packets >= ipsec_sa->hard_limit_packets)
+ sa_tl->lifetime_status.error.hard_exp_packets = 1;
+ }
+ sa_tl->packet_quota--;
- if (ipsec_sa->soft_limit_bytes > 0 &&
- bytes > ipsec_sa->soft_limit_bytes)
- status->warn.soft_exp_bytes = 1;
+ if (odp_unlikely(sa_tl->byte_quota < len)) {
+ bytes = odp_atomic_fetch_add_u64(&ipsec_sa->hot.bytes,
+ len + SA_LIFE_BYTES_PREALLOC);
+ bytes += len + SA_LIFE_BYTES_PREALLOC;
+ sa_tl->byte_quota += len + SA_LIFE_BYTES_PREALLOC;
- if (ipsec_sa->soft_limit_packets > 0 &&
- packets > ipsec_sa->soft_limit_packets)
- status->warn.soft_exp_packets = 1;
+ if (ipsec_sa->soft_limit_bytes > 0 &&
+ bytes >= ipsec_sa->soft_limit_bytes)
+ sa_tl->lifetime_status.warn.soft_exp_bytes = 1;
- if (ipsec_sa->hard_limit_bytes > 0 &&
- bytes > ipsec_sa->hard_limit_bytes) {
- status->error.hard_exp_bytes = 1;
- rc = -1;
- }
- if (ipsec_sa->hard_limit_packets > 0 &&
- packets > ipsec_sa->hard_limit_packets) {
- status->error.hard_exp_packets = 1;
- rc = -1;
+ if (ipsec_sa->hard_limit_bytes > 0 &&
+ bytes >= ipsec_sa->hard_limit_bytes)
+ sa_tl->lifetime_status.error.hard_exp_bytes = 1;
}
+ sa_tl->byte_quota -= len;
- return rc;
+ status->all |= sa_tl->lifetime_status.all;
+
+ if (sa_tl->lifetime_status.error.hard_exp_packets ||
+ sa_tl->lifetime_status.error.hard_exp_bytes)
+ return -1;
+ return 0;
}
int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
@@ -675,7 +818,7 @@ int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
/* Try to be as quick as possible, we will discard packets later */
if (ipsec_sa->antireplay &&
seq + IPSEC_ANTIREPLAY_WS <=
- (odp_atomic_load_u64(&ipsec_sa->in.antireplay) & 0xffffffff)) {
+ (odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay) & 0xffffffff)) {
status->error.antireplay = 1;
return -1;
}
@@ -692,7 +835,7 @@ int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
if (!ipsec_sa->antireplay)
return 0;
- state = odp_atomic_load_u64(&ipsec_sa->in.antireplay);
+ state = odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay);
while (0 == cas) {
uint32_t max_seq = state & 0xffffffff;
@@ -701,26 +844,54 @@ int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
if (seq + IPSEC_ANTIREPLAY_WS <= max_seq) {
status->error.antireplay = 1;
return -1;
- }
-
- if (seq > max_seq) {
+ } else if (seq >= max_seq + IPSEC_ANTIREPLAY_WS) {
+ mask = 1;
+ max_seq = seq;
+ } else if (seq > max_seq) {
mask <<= seq - max_seq;
mask |= 1;
max_seq = seq;
+ } else if (mask & (1U << (max_seq - seq))) {
+ status->error.antireplay = 1;
+ return -1;
} else {
- if (mask & (1U << (max_seq - seq))) {
- status->error.antireplay = 1;
- return -1;
- }
-
mask |= (1U << (max_seq - seq));
}
new_state = (((uint64_t)mask) << 32) | max_seq;
- cas = odp_atomic_cas_acq_rel_u64(&ipsec_sa->in.antireplay,
+ cas = odp_atomic_cas_acq_rel_u64(&ipsec_sa->hot.in.antireplay,
&state, new_state);
}
return 0;
}
+
+uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa)
+{
+ (void) ipsec_sa;
+ ipsec_thread_local_t *tl = &ipsec_sa_tbl->per_thread[odp_thread_id()];
+ uint32_t data;
+
+ if (odp_unlikely(tl->next_ipv4_id ==
+ tl->first_ipv4_id + IPV4_ID_BLOCK_SIZE)) {
+ /* Return used ID block to the ring */
+ data = tl->first_ipv4_id;
+ ring_mpmc_enq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ /* Get new ID block */
+ ring_mpmc_deq_multi(&ipsec_sa_tbl->hot.ipv4_id_ring,
+ ipsec_sa_tbl->hot.ipv4_id_data,
+ IPV4_ID_RING_MASK,
+ &data,
+ 1);
+ tl->first_ipv4_id = data;
+ tl->next_ipv4_id = data;
+ }
+
+ /* No need to convert to BE: ID just should not be duplicated */
+ return tl->next_ipv4_id++;
+}
diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c
index fc2f948cc..875f9f9d5 100644
--- a/platform/linux-generic/odp_ishm.c
+++ b/platform/linux-generic/odp_ishm.c
@@ -14,32 +14,20 @@
* internal shared memory is guaranteed to always be located at the same virtual
* address, i.e. pointers to internal shared memory are fully shareable
* between odp threads (regardless of thread type or fork time) in that case.
- * Internal shared memory is mainly meant to be used internaly within ODP
+ * Internal shared memory is mainly meant to be used internally within ODP
* (hence its name), but may also be allocated by odp applications and drivers,
* in the future (through these interfaces).
- * To guarrentee this full pointer shareability (when reserved with the
- * _ODP_ISHM_SINGLE_VA flag) internal shared memory is handled as follows:
- * At global_init time, a huge virtual address space reservation is performed.
- * Note that this is just reserving virtual space, not physical memory.
+ * To guarantee this full pointer shareability (when reserved with the
+ * _ODP_ISHM_SINGLE_VA flag) the whole internal shared memory area is reserved
+ * at global_init time.
* Because all ODP threads (pthreads or processes) are descendants of the ODP
- * instantiation process, this VA space is inherited by all ODP threads.
- * When internal shmem reservation actually occurs, and
- * when reserved with the _ODP_ISHM_SINGLE_VA flag, physical memory is
- * allocated, and mapped (MAP_FIXED) to some part in the huge preallocated
- * address space area:
- * because this virtual address space is common to all ODP threads, we
- * know this mapping will succeed, and not clash with anything else.
- * Hence, an ODP threads which perform a lookup for the same ishm block
- * can map it at the same VA address.
- * When internal shared memory is released, the physical memory is released
- * and the corresponding virtual space returned to its "pool" of preallocated
- * virtual space (assuming it was allocated from there).
- * Note, though, that, if 2 linux processes share the same ishm block,
- * the virtual space is marked as released as soon as one of the processes
- * releases the ishm block, but the physical memory space is actually released
- * by the kernel once all processes have done a ishm operation (i,e. a sync).
- * This is due to the fact that linux does not contain any syscall to unmap
- * memory from a different process.
+ * instantiation process, this address space is inherited by all ODP threads.
+ * When internal shmem reservation actually occurs, and when reserved with the
+ * _ODP_ISHM_SINGLE_VA flag, memory is allocated from the pre-reserved single
+ * VA memory.
+ * When an internal shared memory block is released, the memory is returned to
+ * its "pool" of pre-reserved memory (assuming it was allocated from there). The
+ * memory is not returned back to kernel until odp_term_global().
*
* This file contains functions to handle the VA area (handling fragmentation
* and defragmentation resulting from different allocs/release) and also
@@ -60,9 +48,10 @@
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
#include <odp_fdserver_internal.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <odp_ishmphy_internal.h>
#include <odp_ishmpool_internal.h>
+#include <odp_libconfig_internal.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
@@ -84,7 +73,7 @@
*
* This is the number of separate ISHM areas that can be reserved concurrently
* (Note that freeing such blocks may take time, or possibly never happen
- * if some of the block ownwers never procsync() after free). This number
+ * if some of the block owners never procsync() after free). This number
* should take that into account)
*/
#define ISHM_MAX_NB_BLOCKS 128
@@ -137,6 +126,8 @@
#define EXPORT_FILE_LINE6_FMT "user_length: %" PRIu64
#define EXPORT_FILE_LINE7_FMT "user_flags: %" PRIu32
#define EXPORT_FILE_LINE8_FMT "align: %" PRIu32
+#define EXPORT_FILE_LINE9_FMT "offset: %" PRIu64
+
/*
* A fragment describes a piece of the shared virtual address space,
* and is allocated only when allocation is done with the _ODP_ISHM_SINGLE_VA
@@ -164,7 +155,7 @@ typedef struct ishm_fragment {
* will allocate both a block and a fragment.
* Blocks contain only global data common to all processes.
*/
-typedef enum {UNKNOWN, HUGE, NORMAL, EXTERNAL} huge_flag_t;
+typedef enum {UNKNOWN, HUGE, NORMAL, EXTERNAL, CACHED} huge_flag_t;
typedef struct ishm_block {
char name[ISHM_NAME_MAXLEN]; /* name for the ishm block (if any) */
char filename[ISHM_FILENAME_MAXLEN]; /* name of the .../odp-* file */
@@ -173,6 +164,7 @@ typedef struct ishm_block {
uint32_t flags; /* block creation flags. */
uint32_t external_fd:1; /* block FD was externally provided */
uint64_t user_len; /* length, as requested at reserve time. */
+ uint64_t offset; /* offset from beginning of the fd */
void *start; /* only valid if _ODP_ISHM_SINGLE_VA is set*/
uint64_t len; /* length. multiple of page size. 0 if free*/
ishm_fragment_t *fragment; /* used when _ODP_ISHM_SINGLE_VA is used */
@@ -191,8 +183,14 @@ typedef struct ishm_block {
typedef struct {
odp_spinlock_t lock;
uint64_t dev_seq; /* used when creating device names */
+ /* limit for reserving memory using huge pages */
+ uint64_t huge_page_limit;
uint32_t odpthread_cnt; /* number of running ODP threads */
ishm_block_t block[ISHM_MAX_NB_BLOCKS];
+ void *single_va_start; /* start of single VA memory */
+ int single_va_fd; /* single VA memory file descriptor */
+ odp_bool_t single_va_huge; /* single VA memory from huge pages */
+ char single_va_filename[ISHM_FILENAME_MAXLEN];
} ishm_table_t;
static ishm_table_t *ishm_tbl;
@@ -238,6 +236,16 @@ typedef struct {
} ishm_ftable_t;
static ishm_ftable_t *ishm_ftbl;
+struct huge_page_cache {
+ uint64_t len;
+ int max_fds; /* maximum amount requested of pre-allocated huge pages */
+ int total; /* amount of actually pre-allocated huge pages */
+ int idx; /* retrieve fd[idx] to get a free file descriptor */
+ int fd[]; /* list of file descriptors */
+};
+
+static struct huge_page_cache *hpc;
+
#ifndef MAP_ANONYMOUS
#define MAP_ANONYMOUS MAP_ANON
#endif
@@ -245,6 +253,159 @@ static ishm_ftable_t *ishm_ftbl;
/* prototypes: */
static void procsync(void);
+static int hp_create_file(uint64_t len, const char *filename)
+{
+ int fd;
+ void *addr;
+
+ if (len <= 0) {
+ ODP_ERR("Length is wrong\n");
+ return -1;
+ }
+
+ fd = open(filename, O_RDWR | O_CREAT | O_TRUNC,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+ if (fd < 0) {
+ ODP_ERR("Could not create cache file %s\n", filename);
+ return -1;
+ }
+
+ /* remove file from file system */
+ unlink(filename);
+
+ if (ftruncate(fd, len) == -1) {
+ ODP_ERR("Could not truncate file: %s\n", strerror(errno));
+ close(fd);
+ return -1;
+ }
+
+ /* commit huge page */
+ addr = _odp_ishmphy_map(fd, len, 0, 0);
+ if (addr == NULL) {
+ /* no more pages available */
+ close(fd);
+ return -1;
+ }
+ _odp_ishmphy_unmap(addr, len, 0);
+
+ ODP_DBG("Created HP cache file %s, fd: %d\n", filename, fd);
+
+ return fd;
+}
+
+static void hp_init(void)
+{
+ char filename[ISHM_FILENAME_MAXLEN];
+ char dir[ISHM_FILENAME_MAXLEN];
+ int count;
+ void *addr;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "num_cached_hp",
+ &count)) {
+ return;
+ }
+
+ if (count <= 0)
+ return;
+
+ ODP_DBG("Init HP cache with up to %d pages\n", count);
+
+ if (!odp_global_ro.hugepage_info.default_huge_page_dir) {
+ ODP_ERR("No huge page dir\n");
+ return;
+ }
+
+ snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
+ odp_global_ro.hugepage_info.default_huge_page_dir,
+ odp_global_ro.uid);
+
+ if (mkdir(dir, 0744) != 0) {
+ if (errno != EEXIST) {
+ ODP_ERR("Failed to create dir: %s\n", strerror(errno));
+ return;
+ }
+ }
+
+ snprintf(filename, ISHM_FILENAME_MAXLEN,
+ "%s/odp-%d-ishm_cached",
+ dir,
+ odp_global_ro.main_pid);
+
+ addr = mmap(NULL,
+ sizeof(struct huge_page_cache) + sizeof(int) * count,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (addr == MAP_FAILED) {
+ ODP_ERR("Unable to mmap memory for huge page cache\n.");
+ return;
+ }
+
+ hpc = addr;
+
+ hpc->max_fds = count;
+ hpc->total = 0;
+ hpc->idx = -1;
+ hpc->len = odp_sys_huge_page_size();
+
+ for (int i = 0; i < count; ++i) {
+ int fd;
+
+ fd = hp_create_file(hpc->len, filename);
+ if (fd == -1) {
+ do {
+ hpc->fd[i++] = -1;
+ } while (i < count);
+ break;
+ }
+ hpc->total++;
+ hpc->fd[i] = fd;
+ }
+ hpc->idx = hpc->total - 1;
+
+ ODP_DBG("HP cache has %d huge pages of size 0x%08" PRIx64 "\n",
+ hpc->total, hpc->len);
+}
+
+static void hp_term(void)
+{
+ if (NULL == hpc)
+ return;
+
+ for (int i = 0; i < hpc->total; i++) {
+ if (hpc->fd[i] != -1)
+ close(hpc->fd[i]);
+ }
+
+ hpc->total = 0;
+ hpc->idx = -1;
+ hpc->len = 0;
+}
+
+static int hp_get_cached(uint64_t len)
+{
+ int fd;
+
+ if (NULL == hpc || hpc->idx < 0 || len != hpc->len)
+ return -1;
+
+ fd = hpc->fd[hpc->idx];
+ hpc->fd[hpc->idx--] = -1;
+
+ return fd;
+}
+
+static int hp_put_cached(int fd)
+{
+ if (NULL == hpc || odp_unlikely(++hpc->idx >= hpc->total)) {
+ hpc->idx--;
+ ODP_ERR("Trying to put more FD than allowed: %d\n", fd);
+ return -1;
+ }
+
+ hpc->fd[hpc->idx] = fd;
+
+ return 0;
+}
+
/*
* Take a piece of the preallocated virtual space to fit "size" bytes.
* (best fit). Size must be rounded up to an integer number of pages size.
@@ -260,7 +421,7 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
ishm_fragment_t *rem_fragmnt;
uintptr_t border;/* possible start of new fragment (next alignement) */
intptr_t left; /* room remaining after, if the segment is allocated */
- uintptr_t remainder = odp_global_data.shm_max_memory;
+ uintptr_t remainder = odp_global_ro.shm_max_memory;
/*
* search for the best bit, i.e. search for the unallocated fragment
@@ -286,7 +447,8 @@ static void *alloc_fragment(uintptr_t size, int block_index, intptr_t align,
}
if (!(*best_fragmnt)) {
- ODP_ERR("unable to get virtual address for shmem block!\n.");
+ ODP_ERR("Out of single VA memory. Try increasing "
+ "'shm.single_va_size_kb' in ODP config.\n");
return NULL;
}
@@ -408,51 +570,101 @@ static void free_fragment(ishm_fragment_t *fragmnt)
}
}
+static char *create_seq_string(char *output, size_t size)
+{
+ snprintf(output, size, "%08" PRIu64, ishm_tbl->dev_seq++);
+
+ return output;
+}
+
+static int create_export_file(ishm_block_t *new_block, const char *name,
+ uint64_t len, uint32_t flags, uint32_t align,
+ odp_bool_t single_va, uint64_t offset)
+{
+ FILE *export_file;
+
+ snprintf(new_block->exptname, ISHM_FILENAME_MAXLEN,
+ ISHM_EXPTNAME_FORMAT,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
+ odp_global_ro.main_pid,
+ name);
+ export_file = fopen(new_block->exptname, "w");
+ if (export_file == NULL) {
+ ODP_ERR("open failed: err=%s.\n",
+ strerror(errno));
+ new_block->exptname[0] = 0;
+ return -1;
+ }
+
+ fprintf(export_file, EXPORT_FILE_LINE1_FMT "\n");
+ fprintf(export_file, EXPORT_FILE_LINE2_FMT "\n", new_block->name);
+ if (single_va)
+ fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
+ ishm_tbl->single_va_filename);
+ else
+ fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
+ new_block->filename);
+
+ fprintf(export_file, EXPORT_FILE_LINE4_FMT "\n", len);
+ fprintf(export_file, EXPORT_FILE_LINE5_FMT "\n", flags);
+ fprintf(export_file, EXPORT_FILE_LINE6_FMT "\n",
+ new_block->user_len);
+ fprintf(export_file, EXPORT_FILE_LINE7_FMT "\n",
+ new_block->user_flags);
+ fprintf(export_file, EXPORT_FILE_LINE8_FMT "\n", align);
+ fprintf(export_file, EXPORT_FILE_LINE9_FMT "\n", offset);
+
+ fclose(export_file);
+
+ return 0;
+}
+
/*
* Create file with size len. returns -1 on error
* Creates a file to /dev/shm/odp-<pid>-<sequence_or_name> (for normal pages)
- * or /mnt/huge/odp-<pid>-<sequence_or_name> (for huge pages)
+ * or /mnt/huge/odp-<pid>-<sequence_or_name> (for huge pages).
* Return the new file descriptor, or -1 on error.
*/
static int create_file(int block_index, huge_flag_t huge, uint64_t len,
- uint32_t flags, uint32_t align)
+ uint32_t flags, uint32_t align, odp_bool_t single_va)
{
char *name;
int fd;
- ishm_block_t *new_block; /* entry in the main block table */
+ ishm_block_t *new_block = NULL; /* entry in the main block table */
char seq_string[ISHM_FILENAME_MAXLEN]; /* used to construct filename*/
char filename[ISHM_FILENAME_MAXLEN]; /* filename in /dev/shm or
* /mnt/huge */
int oflag = O_RDWR | O_CREAT | O_TRUNC; /* flags for open */
- FILE *export_file;
char dir[ISHM_FILENAME_MAXLEN];
- new_block = &ishm_tbl->block[block_index];
- name = new_block->name;
-
- /* create the filename: */
- snprintf(seq_string, ISHM_FILENAME_MAXLEN, "%08" PRIu64,
- ishm_tbl->dev_seq++);
+ /* No ishm_block_t for the master single VA memory file */
+ if (single_va) {
+ name = (char *)(uintptr_t)"single_va";
+ } else {
+ new_block = &ishm_tbl->block[block_index];
+ name = new_block->name;
+ if (!name || !name[0])
+ name = create_seq_string(seq_string,
+ ISHM_FILENAME_MAXLEN);
+ }
/* huge dir must be known to create files there!: */
if ((huge == HUGE) &&
- (!odp_global_data.hugepage_info.default_huge_page_dir))
+ (!odp_global_ro.hugepage_info.default_huge_page_dir))
return -1;
if (huge == HUGE)
snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
- odp_global_data.hugepage_info.default_huge_page_dir,
- odp_global_data.uid);
+ odp_global_ro.hugepage_info.default_huge_page_dir,
+ odp_global_ro.uid);
else
snprintf(dir, ISHM_FILENAME_MAXLEN, "%s/%s",
- odp_global_data.shm_dir,
- odp_global_data.uid);
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid);
- snprintf(filename, ISHM_FILENAME_MAXLEN,
- ISHM_FILENAME_FORMAT,
- dir,
- odp_global_data.main_pid,
- (name && name[0]) ? name : seq_string);
+ snprintf(filename, ISHM_FILENAME_MAXLEN, ISHM_FILENAME_FORMAT, dir,
+ odp_global_ro.main_pid, name);
mkdir(dir, 0744);
@@ -472,38 +684,21 @@ static int create_file(int block_index, huge_flag_t huge, uint64_t len,
return -1;
}
+ /* No export file is created since this is only for internal use.*/
+ if (single_va) {
+ snprintf(ishm_tbl->single_va_filename, ISHM_FILENAME_MAXLEN,
+ "%s", filename);
+ return fd;
+ }
/* if _ODP_ISHM_EXPORT is set, create a description file for
* external ref:
*/
if (flags & _ODP_ISHM_EXPORT) {
memcpy(new_block->filename, filename, ISHM_FILENAME_MAXLEN);
- snprintf(new_block->exptname, ISHM_FILENAME_MAXLEN,
- ISHM_EXPTNAME_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid,
- odp_global_data.main_pid,
- (name && name[0]) ? name : seq_string);
- export_file = fopen(new_block->exptname, "w");
- if (export_file == NULL) {
- ODP_ERR("open failed: err=%s.\n",
- strerror(errno));
- new_block->exptname[0] = 0;
- } else {
- fprintf(export_file, EXPORT_FILE_LINE1_FMT "\n");
- fprintf(export_file, EXPORT_FILE_LINE2_FMT "\n", name);
- fprintf(export_file, EXPORT_FILE_LINE3_FMT "\n",
- new_block->filename);
- fprintf(export_file, EXPORT_FILE_LINE4_FMT "\n", len);
- fprintf(export_file, EXPORT_FILE_LINE5_FMT "\n", flags);
- fprintf(export_file, EXPORT_FILE_LINE6_FMT "\n",
- new_block->user_len);
- fprintf(export_file, EXPORT_FILE_LINE7_FMT "\n",
- new_block->user_flags);
- fprintf(export_file, EXPORT_FILE_LINE8_FMT "\n", align);
-
- fclose(export_file);
- }
+
+ create_export_file(new_block, name, len, flags, align, false,
+ 0);
} else {
new_block->exptname[0] = 0;
/* remove the file from the filesystem, keeping its fd open */
@@ -516,8 +711,9 @@ static int create_file(int block_index, huge_flag_t huge, uint64_t len,
/* delete the files related to a given ishm block: */
static void delete_file(ishm_block_t *block)
{
- /* remove the .../odp-* file, unless fd was external: */
- if (block->filename[0] != 0)
+ /* remove the .../odp-* file, unless fd was external or single va */
+ if (block->filename[0] != 0 &&
+ strcmp(block->filename, ishm_tbl->single_va_filename))
unlink(block->filename);
/* also remove possible description file (if block was exported): */
if (block->exptname[0] != 0)
@@ -525,20 +721,18 @@ static void delete_file(ishm_block_t *block)
}
/*
- * performs the mapping, possibly allocating a fragment of the pre-reserved
- * VA space if the _ODP_ISHM_SINGLE_VA flag was given.
- * Sets fd, and returns the mapping address.
- * This function will also set the _ODP_ISHM_SINGLE_VA flag if the alignment
- * requires it
+ * Performs the mapping.
+ * Sets fd, and returns the mapping address. Not to be used with
+ * _ODP_ISHM_SINGLE_VA blocks.
* Mutex must be assured by the caller.
*/
static void *do_map(int block_index, uint64_t len, uint32_t align,
- uint32_t flags, huge_flag_t huge, int *fd)
+ uint64_t offset, uint32_t flags, huge_flag_t huge, int *fd)
{
ishm_block_t *new_block; /* entry in the main block table */
- void *addr = NULL;
void *mapped_addr;
- ishm_fragment_t *fragment = NULL;
+
+ ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
new_block = &ishm_tbl->block[block_index];
@@ -548,33 +742,16 @@ static void *do_map(int block_index, uint64_t len, uint32_t align,
* unless a fd was already given
*/
if (*fd < 0) {
- *fd = create_file(block_index, huge, len, flags, align);
+ *fd = create_file(block_index, huge, len, flags, align, false);
if (*fd < 0)
return NULL;
} else {
new_block->filename[0] = 0;
}
- /* allocate an address range in the prebooked VA area if needed */
- if (flags & _ODP_ISHM_SINGLE_VA) {
- addr = alloc_fragment(len, block_index, align, &fragment);
- if (!addr) {
- ODP_ERR("alloc_fragment failed.\n");
- if (!new_block->external_fd) {
- close(*fd);
- *fd = -1;
- delete_file(new_block);
- }
- return NULL;
- }
- new_block->fragment = fragment;
- }
-
/* try to mmap: */
- mapped_addr = _odp_ishmphy_map(*fd, addr, len, flags);
+ mapped_addr = _odp_ishmphy_map(*fd, len, offset, flags);
if (mapped_addr == NULL) {
- if (flags & _ODP_ISHM_SINGLE_VA)
- free_fragment(fragment);
if (!new_block->external_fd) {
close(*fd);
*fd = -1;
@@ -587,36 +764,86 @@ static void *do_map(int block_index, uint64_t len, uint32_t align,
}
/*
+ * Allocate block from pre-reserved single VA memory
+ */
+static void *alloc_single_va(const char *name, int new_index, uint64_t size,
+ uint32_t align, uint32_t flags, int *fd,
+ uint64_t *len_out)
+{
+ uint64_t len;
+ uint64_t page_sz;
+ char *file_name = (char *)(uintptr_t)name;
+ void *addr;
+ ishm_block_t *new_block = &ishm_tbl->block[new_index];
+ ishm_fragment_t *fragment = NULL;
+ char seq_string[ISHM_FILENAME_MAXLEN];
+
+ if (!file_name || !file_name[0])
+ file_name = create_seq_string(seq_string, ISHM_FILENAME_MAXLEN);
+
+ /* Common fd for all single VA blocks */
+ *fd = ishm_tbl->single_va_fd;
+
+ if (ishm_tbl->single_va_huge) {
+ page_sz = odp_sys_huge_page_size();
+ new_block->huge = HUGE;
+ } else {
+ page_sz = odp_sys_page_size();
+ new_block->huge = NORMAL;
+ }
+ new_block->filename[0] = 0;
+
+ len = (size + (page_sz - 1)) & (-page_sz);
+
+ if (align < page_sz)
+ align = page_sz;
+
+ /* Allocate memory from the pre-reserved single VA space */
+ addr = alloc_fragment(len, new_index, align, &fragment);
+ if (!addr) {
+ ODP_ERR("alloc_fragment failed.\n");
+ return NULL;
+ }
+ new_block->fragment = fragment;
+
+ /* Create export info file */
+ if (flags & _ODP_ISHM_EXPORT) {
+ uint64_t offset = (uintptr_t)addr -
+ (uintptr_t)ishm_tbl->single_va_start;
+ memcpy(new_block->filename, ishm_tbl->single_va_filename,
+ ISHM_FILENAME_MAXLEN);
+
+ create_export_file(new_block, file_name, len, flags, align,
+ true, offset);
+ } else {
+ new_block->exptname[0] = 0;
+ }
+
+ *len_out = len;
+ return addr;
+}
+
+/*
* Performs an extra mapping (for a process trying to see an existing block
- * i.e. performing a lookup).
+ * i.e. performing a lookup). Not to be used with _ODP_ISHM_SINGLE_VA blocks.
* Mutex must be assured by the caller.
*/
static void *do_remap(int block_index, int fd)
{
void *mapped_addr;
- ishm_fragment_t *fragment;
uint64_t len;
+ uint64_t offset;
uint32_t flags;
len = ishm_tbl->block[block_index].len;
+ offset = ishm_tbl->block[block_index].offset;
flags = ishm_tbl->block[block_index].flags;
- if (flags & _ODP_ISHM_SINGLE_VA) {
- fragment = ishm_tbl->block[block_index].fragment;
- if (!fragment) {
- ODP_ERR("invalid fragment failure.\n");
- return NULL;
- }
-
- /* try to mmap: */
- mapped_addr = _odp_ishmphy_map(fd, fragment->start, len, flags);
- if (mapped_addr == NULL)
- return NULL;
- return mapped_addr;
- }
+ ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
/* try to mmap: */
- mapped_addr = _odp_ishmphy_map(fd, NULL, len, flags);
+ mapped_addr = _odp_ishmphy_map(fd, len, offset, flags);
+
if (mapped_addr == NULL)
return NULL;
@@ -624,8 +851,8 @@ static void *do_remap(int block_index, int fd)
}
/*
- * Performs unmapping, possibly freeing a prereserved VA space fragment,
- * if the _ODP_ISHM_SINGLE_VA flag was set at alloc time
+ * Performs unmapping, possibly freeing a pre-reserved single VA memory
+ * fragment, if the _ODP_ISHM_SINGLE_VA flag was set at alloc time.
* Mutex must be assured by the caller.
*/
static int do_unmap(void *start, uint64_t size, uint32_t flags,
@@ -669,56 +896,6 @@ static int find_block_by_name(const char *name)
}
/*
- * Search for a block by address (only works when flag _ODP_ISHM_SINGLE_VA
- * was set at reserve() time, or if the block is already known by this
- * process).
- * Search is performed in the process table and in the global ishm table.
- * The provided address does not have to be at start: any address
- * within the fragment is OK.
- * Returns the index to the found block (if any) or -1 if none.
- * Mutex must be assured by the caller.
- */
-static int find_block_by_address(void *addr)
-{
- int block_index;
- int i;
- ishm_fragment_t *fragmnt;
-
- /*
- * first check if there is already a process known block for this
- * address
- */
- for (i = 0; i < ishm_proctable->nb_entries; i++) {
- block_index = ishm_proctable->entry[i].block_index;
- if ((addr > ishm_proctable->entry[i].start) &&
- ((char *)addr < ((char *)ishm_proctable->entry[i].start +
- ishm_tbl->block[block_index].len)))
- return block_index;
- }
-
- /*
- * then check if there is a existing single VA block known by some other
- * process and containing the given address
- */
- for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
- if ((!ishm_tbl->block[i].len) ||
- (!(ishm_tbl->block[i].flags & _ODP_ISHM_SINGLE_VA)))
- continue;
- fragmnt = ishm_tbl->block[i].fragment;
- if (!fragmnt) {
- ODP_ERR("find_fragment: invalid NULL fragment\n");
- return -1;
- }
- if ((addr >= fragmnt->start) &&
- ((char *)addr < ((char *)fragmnt->start + fragmnt->len)))
- return i;
- }
-
- /* address does not belong to any accessible block: */
- return -1;
-}
-
-/*
* Search a given ishm block in the process local table. Return its index
* in the process table or -1 if not found (meaning that the ishm table
* block index was not referenced in the process local table, i.e. the
@@ -755,7 +932,9 @@ static void procsync(void)
block = &ishm_tbl->block[ishm_proctable->entry[i].block_index];
if (ishm_proctable->entry[i].seq != block->seq) {
/* obsolete entry: free memory and remove proc entry */
- close(ishm_proctable->entry[i].fd);
+ if (ishm_proctable->entry[i].fd !=
+ ishm_tbl->single_va_fd)
+ close(ishm_proctable->entry[i].fd);
_odp_ishmphy_unmap(ishm_proctable->entry[i].start,
ishm_proctable->entry[i].len,
ishm_proctable->entry[i].flags);
@@ -791,6 +970,8 @@ static int block_free_internal(int block_index, int close_fd, int deregister)
proc_index = procfind_block(block_index);
if (proc_index >= 0) {
+ int fd = ishm_proctable->entry[proc_index].fd;
+
/* remove the mapping and possible fragment */
do_unmap(ishm_proctable->entry[proc_index].start,
block->len,
@@ -798,8 +979,12 @@ static int block_free_internal(int block_index, int close_fd, int deregister)
block_index);
/* close the related fd */
- if (close_fd)
- close(ishm_proctable->entry[proc_index].fd);
+ if (close_fd && (fd != ishm_tbl->single_va_fd)) {
+ if (block->huge == CACHED)
+ hp_put_cached(fd);
+ else
+ close(fd);
+ }
/* remove entry from process local table: */
last = ishm_proctable->nb_entries - 1;
@@ -839,17 +1024,17 @@ static int block_free_internal(int block_index, int close_fd, int deregister)
* main block table (>=0) or -1 on error.
*/
int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
- uint32_t align, uint32_t flags, uint32_t user_flags)
+ uint32_t align, uint64_t offset, uint32_t flags,
+ uint32_t user_flags)
{
int new_index; /* index in the main block table*/
ishm_block_t *new_block; /* entry in the main block table*/
uint64_t page_sz; /* normal page size. usually 4K*/
uint64_t page_hp_size; /* huge page size */
uint32_t hp_align;
- uint64_t len; /* mapped length */
+ uint64_t len = 0; /* mapped length */
void *addr = NULL; /* mapping address */
int new_proc_entry;
- struct stat statbuf;
static int huge_error_printed; /* to avoid millions of error...*/
odp_spinlock_lock(&ishm_tbl->lock);
@@ -884,24 +1069,20 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
else
new_block->name[0] = 0;
+ new_block->offset = 0;
+
/* save user data: */
new_block->user_flags = user_flags;
new_block->user_len = size;
/* If a file descriptor is provided, get the real size and map: */
if (fd >= 0) {
- if (fstat(fd, &statbuf) < 0) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- ODP_ERR("_ishm_reserve failed (fstat failed: %s).\n",
- strerror(errno));
- __odp_errno = errno;
- return -1;
- }
- len = statbuf.st_size;
new_block->external_fd = 1;
- /* note that the huge page flag is meningless here as huge
+ len = size;
+ /* note that the huge page flag is meaningless here as huge
* page is determined by the provided file descriptor: */
- addr = do_map(new_index, len, align, flags, EXTERNAL, &fd);
+ addr = do_map(new_index, len, align, offset, flags, EXTERNAL,
+ &fd);
if (addr == NULL) {
odp_spinlock_unlock(&ishm_tbl->lock);
ODP_ERR("_ishm_reserve failed.\n");
@@ -910,10 +1091,12 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
new_block->huge = EXTERNAL;
} else {
new_block->external_fd = 0;
+ new_block->huge = UNKNOWN;
}
/* Otherwise, Try first huge pages when possible and needed: */
- if ((fd < 0) && page_hp_size && (size > page_sz)) {
+ if ((fd < 0) && page_hp_size && ((flags & _ODP_ISHM_USE_HP) ||
+ size > ishm_tbl->huge_page_limit)) {
/* at least, alignment in VA should match page size, but user
* can request more: If the user requirement exceeds the page
* size then we have to make sure the block will be mapped at
@@ -925,19 +1108,42 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
else
flags |= _ODP_ISHM_SINGLE_VA;
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ goto use_single_va;
+
/* roundup to page size */
len = (size + (page_hp_size - 1)) & (-page_hp_size);
- addr = do_map(new_index, len, hp_align, flags, HUGE, &fd);
- if (addr == NULL) {
- if (!huge_error_printed) {
- ODP_ERR("No huge pages, fall back to normal "
- "pages. "
- "check: /proc/sys/vm/nr_hugepages.\n");
- huge_error_printed = 1;
+ /* try pre-allocated pages */
+ fd = hp_get_cached(len);
+ if (fd != -1) {
+ /* do as if user provided a fd */
+ new_block->external_fd = 1;
+ addr = do_map(new_index, len, hp_align, 0, flags,
+ CACHED, &fd);
+ if (addr == NULL) {
+ ODP_ERR("Could not use cached hp %d\n",
+ fd);
+ hp_put_cached(fd);
+ fd = -1;
+ } else {
+ new_block->huge = CACHED;
+ }
+ }
+ if (fd == -1) {
+ addr = do_map(new_index, len, hp_align, 0, flags, HUGE,
+ &fd);
+
+ if (addr == NULL) {
+ if (!huge_error_printed) {
+ ODP_ERR("No huge pages, fall back to "
+ "normal pages. Check: "
+ "/proc/sys/vm/nr_hugepages.\n");
+ huge_error_printed = 1;
+ }
+ } else {
+ new_block->huge = HUGE;
}
- } else {
- new_block->huge = HUGE;
}
}
@@ -953,16 +1159,29 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
else
flags |= _ODP_ISHM_SINGLE_VA;
+ if (flags & _ODP_ISHM_SINGLE_VA)
+ goto use_single_va;
+
/* roundup to page size */
len = (size + (page_sz - 1)) & (-page_sz);
- addr = do_map(new_index, len, align, flags, NORMAL, &fd);
+ addr = do_map(new_index, len, align, 0, flags, NORMAL, &fd);
new_block->huge = NORMAL;
}
+use_single_va:
+ /* Reserve memory from single VA space */
+ if (fd < 0 && (flags & _ODP_ISHM_SINGLE_VA))
+ addr = alloc_single_va(name, new_index, size, align, flags, &fd,
+ &len);
+
/* if neither huge pages or normal pages works, we cannot proceed: */
if ((fd < 0) || (addr == NULL) || (len == 0)) {
- if ((!new_block->external_fd) && (fd >= 0))
+ if (new_block->external_fd) {
+ if (new_block->huge == CACHED)
+ hp_put_cached(fd);
+ } else if (fd >= 0 && (fd != ishm_tbl->single_va_fd)) {
close(fd);
+ }
delete_file(new_block);
odp_spinlock_unlock(&ishm_tbl->lock);
ODP_ERR("_ishm_reserve failed.\n");
@@ -998,6 +1217,65 @@ int _odp_ishm_reserve(const char *name, uint64_t size, int fd,
}
/*
+ * Pre-reserve all single VA memory. Called only in global init.
+ */
+static void *reserve_single_va(uint64_t size, int *fd_out)
+{
+ uint64_t page_sz; /* normal page size. usually 4K*/
+ uint64_t page_hp_size; /* huge page size */
+ uint64_t len; /* mapped length */
+ int fd = -1;
+ void *addr = NULL;
+
+ /* Get system page sizes: page_hp_size is 0 if no huge page available*/
+ page_sz = odp_sys_page_size();
+ page_hp_size = odp_sys_huge_page_size();
+
+ /* Try first huge pages when possible and needed: */
+ if (page_hp_size && (size > page_sz)) {
+ /* roundup to page size */
+ len = (size + (page_hp_size - 1)) & (-page_hp_size);
+ fd = create_file(-1, HUGE, len, 0, 0, true);
+ if (fd >= 0) {
+ addr = _odp_ishmphy_reserve_single_va(len, fd);
+ if (!addr) {
+ close(fd);
+ unlink(ishm_tbl->single_va_filename);
+ fd = -1;
+ }
+ }
+ if (fd < 0)
+ ODP_ERR("No huge pages, fall back to normal pages. "
+ "Check: /proc/sys/vm/nr_hugepages.\n");
+ ishm_tbl->single_va_huge = true;
+ }
+
+ /* Fall back to normal pages if necessary */
+ if (fd < 0) {
+ /* roundup to page size */
+ len = (size + (page_sz - 1)) & (-page_sz);
+
+ fd = create_file(-1, NORMAL, len, 0, 0, true);
+ if (fd >= 0)
+ addr = _odp_ishmphy_reserve_single_va(len, fd);
+ ishm_tbl->single_va_huge = false;
+ }
+
+ /* If neither huge pages or normal pages works, we cannot proceed: */
+ if ((fd < 0) || (len == 0) || !addr) {
+ if (fd >= 0) {
+ close(fd);
+ unlink(ishm_tbl->single_va_filename);
+ }
+ ODP_ERR("Reserving single VA memory failed.\n");
+ return NULL;
+ }
+
+ *fd_out = fd;
+ return addr;
+}
+
+/*
* Try to map an memory block mapped by another ODP instance into the
* current ODP instance.
* returns 0 on success.
@@ -1012,6 +1290,7 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
uint64_t len;
uint32_t flags;
uint64_t user_len;
+ uint64_t offset;
uint32_t user_flags;
uint32_t align;
int fd;
@@ -1020,8 +1299,8 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
/* try to read the block description file: */
snprintf(export_filename, ISHM_FILENAME_MAXLEN,
ISHM_EXPTNAME_FORMAT,
- odp_global_data.shm_dir,
- odp_global_data.uid,
+ odp_global_ro.shm_dir,
+ odp_global_ro.uid,
external_odp_pid,
remote_name);
@@ -1056,6 +1335,9 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
if (fscanf(export_file, EXPORT_FILE_LINE8_FMT " ", &align) != 1)
goto error_exp_file;
+ if (fscanf(export_file, EXPORT_FILE_LINE9_FMT " ", &offset) != 1)
+ goto error_exp_file;
+
fclose(export_file);
/* now open the filename given in the description file: */
@@ -1066,16 +1348,22 @@ int _odp_ishm_find_exported(const char *remote_name, pid_t external_odp_pid,
return -1;
}
- /* clear the _ODP_ISHM_EXPORT flag so we don't export that again*/
+ /* Clear the _ODP_ISHM_EXPORT flag so we don't export again. Single
+ * VA doesn't hold up after export. */
flags &= ~(uint32_t)_ODP_ISHM_EXPORT;
+ flags &= ~(uint32_t)_ODP_ISHM_SINGLE_VA;
/* reserve the memory, providing the opened file descriptor: */
- block_index = _odp_ishm_reserve(local_name, 0, fd, align, flags, 0);
+ block_index = _odp_ishm_reserve(local_name, len, fd, align, offset,
+ flags, 0);
if (block_index < 0) {
close(fd);
return block_index;
}
+ /* Offset is required to remap the block to other processes */
+ ishm_tbl->block[block_index].offset = offset;
+
/* set inherited info: */
ishm_tbl->block[block_index].user_flags = user_flags;
ishm_tbl->block[block_index].user_len = user_len;
@@ -1117,59 +1405,6 @@ int _odp_ishm_free_by_index(int block_index)
}
/*
- * free and unmap internal shared memory, identified by its block name:
- * return -1 on error. 0 if OK.
- */
-int _odp_ishm_free_by_name(const char *name)
-{
- int block_index;
- int ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table */
- block_index = find_block_by_name(name);
- if (block_index < 0) {
- ODP_ERR("Request to free an non existing block..."
- " (double free?)\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- ret = block_free(block_index);
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
- * Free and unmap internal shared memory identified by address:
- * return -1 on error. 0 if OK.
- */
-int _odp_ishm_free_by_address(void *addr)
-{
- int block_index;
- int ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table */
- block_index = find_block_by_address(addr);
- if (block_index < 0) {
- ODP_ERR("Request to free an non existing block..."
- " (double free?)\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- ret = block_free(block_index);
-
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
* Lookup for an ishm shared memory, identified by its block index
* in the main ishm block table.
* Map this ishm area in the process VA (if not already present).
@@ -1206,7 +1441,12 @@ static void *block_lookup(int block_index)
/* perform the mapping */
block = &ishm_tbl->block[block_index];
- mapped_addr = do_remap(block_index, fd);
+ /* No need to remap single VA */
+ if (block->flags & _ODP_ISHM_SINGLE_VA)
+ mapped_addr = block->start;
+ else
+ mapped_addr = do_remap(block_index, fd);
+
if (mapped_addr == NULL) {
ODP_ERR(" lookup: Could not map existing shared memory!\n");
return NULL;
@@ -1226,28 +1466,9 @@ static void *block_lookup(int block_index)
}
/*
- * Lookup for an ishm shared memory, identified by its block_index.
- * Maps this ishmem area in the process VA (if not already present).
- * Returns the block user address, or NULL if the index
- * does not match any known ishm blocks.
- */
-void *_odp_ishm_lookup_by_index(int block_index)
-{
- void *ret;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- ret = block_lookup(block_index);
- odp_spinlock_unlock(&ishm_tbl->lock);
- return ret;
-}
-
-/*
* Lookup for an ishm shared memory, identified by its block name.
- * Map this ishm area in the process VA (if not already present).
- * Return the block index, or -1 if the index
- * does not match any known ishm blocks.
+ * Return the block index, or -1 if the index does not match any known ishm
+ * blocks.
*/
int _odp_ishm_lookup_by_name(const char *name)
{
@@ -1258,68 +1479,25 @@ int _odp_ishm_lookup_by_name(const char *name)
/* search the block in main ishm table: return -1 if not found: */
block_index = find_block_by_name(name);
- if ((block_index < 0) || (!block_lookup(block_index))) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
odp_spinlock_unlock(&ishm_tbl->lock);
return block_index;
}
/*
- * Lookup for an ishm shared memory block, identified by its VA address.
- * This works only if the block has already been looked-up (mapped) by the
- * current process or it it was created with the _ODP_ISHM_SINGLE_VA flag.
- * Map this ishm area in the process VA (if not already present).
- * Return the block index, or -1 if the address
- * does not match any known ishm blocks.
- */
-int _odp_ishm_lookup_by_address(void *addr)
-{
- int block_index;
-
- odp_spinlock_lock(&ishm_tbl->lock);
- procsync();
-
- /* search the block in main ishm table: return -1 if not found: */
- block_index = find_block_by_address(addr);
- if ((block_index < 0) || (!block_lookup(block_index))) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return -1;
- }
-
- odp_spinlock_unlock(&ishm_tbl->lock);
- return block_index;
-}
-
-/*
- * Returns the VA address of a given block (which has to be known in the current
- * process). Returns NULL if the block is unknown.
+ * Returns the VA address of a given block. Maps this ishm area in the process
+ * VA (if not already present).
+ * Returns NULL if the block is unknown.
*/
void *_odp_ishm_address(int block_index)
{
- int proc_index;
void *addr;
odp_spinlock_lock(&ishm_tbl->lock);
procsync();
- if ((block_index < 0) ||
- (block_index >= ISHM_MAX_NB_BLOCKS) ||
- (ishm_tbl->block[block_index].len == 0)) {
- ODP_ERR("Request for address on an invalid block\n");
- odp_spinlock_unlock(&ishm_tbl->lock);
- return NULL;
- }
-
- proc_index = procfind_block(block_index);
- if (proc_index < 0) {
- odp_spinlock_unlock(&ishm_tbl->lock);
- return NULL;
- }
+ addr = block_lookup(block_index);
- addr = ishm_proctable->entry[proc_index].start;
odp_spinlock_unlock(&ishm_tbl->lock);
return addr;
}
@@ -1415,7 +1593,7 @@ int _odp_ishm_cleanup_files(const char *dirpath)
int p_len;
int f_len;
- snprintf(userdir, PATH_MAX, "%s/%s", dirpath, odp_global_data.uid);
+ snprintf(userdir, PATH_MAX, "%s/%s", dirpath, odp_global_ro.uid);
dir = opendir(userdir);
if (!dir) {
@@ -1424,7 +1602,7 @@ int _odp_ishm_cleanup_files(const char *dirpath)
dirpath, strerror(errno));
return 0;
}
- snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_global_data.main_pid);
+ snprintf(prefix, PATH_MAX, _ODP_FILES_FMT, odp_global_ro.main_pid);
p_len = strlen(prefix);
while ((e = readdir(dir)) != NULL) {
if (strncmp(e->d_name, prefix, p_len) == 0) {
@@ -1451,39 +1629,60 @@ int _odp_ishm_cleanup_files(const char *dirpath)
int _odp_ishm_init_global(const odp_init_t *init)
{
void *addr;
- void *spce_addr;
+ void *spce_addr = NULL;
int i;
+ int val_kb;
uid_t uid;
- char *hp_dir = odp_global_data.hugepage_info.default_huge_page_dir;
- uint64_t align;
- uint64_t max_memory = ODP_CONFIG_ISHM_VA_PREALLOC_SZ;
- uint64_t internal = ODP_CONFIG_ISHM_VA_PREALLOC_SZ / 8;
+ char *hp_dir = odp_global_ro.hugepage_info.default_huge_page_dir;
+ uint64_t max_memory;
+ uint64_t internal;
+ uint64_t huge_page_limit;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "single_va_size_kb",
+ &val_kb)) {
+ ODP_ERR("Unable to read single VA size from config\n");
+ return -1;
+ }
+
+ ODP_DBG("Shm single VA size: %dkB\n", val_kb);
+
+ max_memory = (uint64_t)val_kb * 1024;
+ internal = max_memory / 8;
+
+ if (!_odp_libconfig_lookup_ext_int("shm", NULL, "huge_page_limit_kb",
+ &val_kb)) {
+ ODP_ERR("Unable to read huge page usage limit from config\n");
+ return -1;
+ }
+ huge_page_limit = (uint64_t)val_kb * 1024;
+
+ ODP_DBG("Shm huge page usage limit: %dkB\n", val_kb);
/* user requested memory size + some extra for internal use */
if (init && init->shm.max_memory)
max_memory = init->shm.max_memory + internal;
- odp_global_data.shm_max_memory = max_memory;
- odp_global_data.shm_max_size = max_memory - internal;
- odp_global_data.main_pid = getpid();
- odp_global_data.shm_dir = getenv("ODP_SHM_DIR");
- if (odp_global_data.shm_dir) {
- odp_global_data.shm_dir_from_env = 1;
+ odp_global_ro.shm_max_memory = max_memory;
+ odp_global_ro.shm_max_size = max_memory - internal;
+ odp_global_ro.main_pid = getpid();
+ odp_global_ro.shm_dir = getenv("ODP_SHM_DIR");
+ if (odp_global_ro.shm_dir) {
+ odp_global_ro.shm_dir_from_env = 1;
} else {
- odp_global_data.shm_dir =
+ odp_global_ro.shm_dir =
calloc(1, sizeof(ISHM_FILENAME_NORMAL_PAGE_DIR));
- sprintf(odp_global_data.shm_dir, "%s",
+ sprintf(odp_global_ro.shm_dir, "%s",
ISHM_FILENAME_NORMAL_PAGE_DIR);
- odp_global_data.shm_dir_from_env = 0;
+ odp_global_ro.shm_dir_from_env = 0;
}
- ODP_DBG("ishm: using dir %s\n", odp_global_data.shm_dir);
+ ODP_DBG("ishm: using dir %s\n", odp_global_ro.shm_dir);
uid = getuid();
- snprintf(odp_global_data.uid, UID_MAXLEN, "%d",
+ snprintf(odp_global_ro.uid, UID_MAXLEN, "%d",
uid);
- if ((syscall(SYS_gettid)) != odp_global_data.main_pid) {
+ if ((syscall(SYS_gettid)) != odp_global_ro.main_pid) {
ODP_ERR("ishm init must be performed by the main "
"ODP process!\n.");
return -1;
@@ -1491,14 +1690,12 @@ int _odp_ishm_init_global(const odp_init_t *init)
if (!hp_dir) {
ODP_DBG("NOTE: No support for huge pages\n");
- align = odp_sys_page_size();
} else {
ODP_DBG("Huge pages mount point is: %s\n", hp_dir);
_odp_ishm_cleanup_files(hp_dir);
- align = odp_sys_huge_page_size();
}
- _odp_ishm_cleanup_files(odp_global_data.shm_dir);
+ _odp_ishm_cleanup_files(odp_global_ro.shm_dir);
/* allocate space for the internal shared mem block table: */
addr = mmap(NULL, sizeof(ishm_table_t),
@@ -1511,6 +1708,7 @@ int _odp_ishm_init_global(const odp_init_t *init)
memset(ishm_tbl, 0, sizeof(ishm_table_t));
ishm_tbl->dev_seq = 0;
ishm_tbl->odpthread_cnt = 0;
+ ishm_tbl->huge_page_limit = huge_page_limit;
odp_spinlock_init(&ishm_tbl->lock);
/* allocate space for the internal shared mem fragment table: */
@@ -1523,14 +1721,16 @@ int _odp_ishm_init_global(const odp_init_t *init)
ishm_ftbl = addr;
memset(ishm_ftbl, 0, sizeof(ishm_ftable_t));
- /*
- *reserve the address space for _ODP_ISHM_SINGLE_VA reserved blocks,
- * only address space!
- */
- spce_addr = _odp_ishmphy_book_va(max_memory, align);
- if (!spce_addr) {
- ODP_ERR("unable to reserve virtual space\n.");
- goto init_glob_err3;
+ /* Reserve memory for _ODP_ISHM_SINGLE_VA reserved blocks */
+ ishm_tbl->single_va_fd = -1;
+ if (max_memory) {
+ spce_addr = reserve_single_va(max_memory,
+ &ishm_tbl->single_va_fd);
+ if (!spce_addr) {
+ ODP_ERR("unable to reserve single VA memory\n.");
+ goto init_glob_err3;
+ }
+ ishm_tbl->single_va_start = spce_addr;
}
/* use the first fragment descriptor to describe to whole VA space: */
@@ -1564,11 +1764,14 @@ int _odp_ishm_init_global(const odp_init_t *init)
/* get ready to create pools: */
_odp_ishm_pool_init();
+ /* init cache files */
+ hp_init();
+
return 0;
init_glob_err4:
- if (_odp_ishmphy_unbook_va())
- ODP_ERR("unable to unbook virtual space\n.");
+ if (_odp_ishmphy_free_single_va())
+ ODP_ERR("unable to free single VA memory\n.");
init_glob_err3:
if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0)
ODP_ERR("unable to munmap main fragment table\n.");
@@ -1585,7 +1788,7 @@ int _odp_ishm_init_local(void)
* Do not re-run this for the main ODP process, as it has already
* been done in advance at _odp_ishm_init_global() time:
*/
- if ((getpid() == odp_global_data.main_pid) &&
+ if ((getpid() == odp_global_ro.main_pid) &&
(syscall(SYS_gettid) == getpid()))
return 0;
@@ -1666,9 +1869,10 @@ int _odp_ishm_term_global(void)
{
int ret = 0;
int index;
+ int fd = ishm_tbl->single_va_fd;
ishm_block_t *block;
- if ((getpid() != odp_global_data.main_pid) ||
+ if ((getpid() != odp_global_ro.main_pid) ||
(syscall(SYS_gettid) != getpid()))
ODP_ERR("odp_term_global() must be performed by the main "
"ODP process!\n.");
@@ -1687,6 +1891,9 @@ int _odp_ishm_term_global(void)
/* perform the last thread terminate which was postponed: */
ret = do_odp_ishm_term_local();
+ /* remove the file from the filesystem, keeping its fd open */
+ unlink(ishm_tbl->single_va_filename);
+
/* free the fragment table */
if (munmap(ishm_ftbl, sizeof(ishm_ftable_t)) < 0) {
ret |= -1;
@@ -1698,12 +1905,18 @@ int _odp_ishm_term_global(void)
ODP_ERR("unable to munmap main table\n.");
}
- /* free the reserved VA space */
- if (_odp_ishmphy_unbook_va())
+ /* free the reserved single VA memory */
+ if (_odp_ishmphy_free_single_va())
+ ret |= -1;
+ if ((fd >= 0) && close(fd)) {
ret |= -1;
+ ODP_ERR("unable to close single VA\n.");
+ }
- if (!odp_global_data.shm_dir_from_env)
- free(odp_global_data.shm_dir);
+ if (!odp_global_ro.shm_dir_from_env)
+ free(odp_global_ro.shm_dir);
+
+ hp_term();
return ret;
}
@@ -1730,6 +1943,8 @@ int _odp_ishm_status(const char *title)
int nb_blocks = 0;
int single_va_blocks = 0;
int max_name_len = 0;
+ uint64_t lost_total = 0; /* statistics for total unused memory */
+ uint64_t len_total = 0; /* statistics for total allocated memory */
odp_spinlock_lock(&ishm_tbl->lock);
procsync();
@@ -1747,10 +1962,10 @@ int _odp_ishm_status(const char *title)
max_name_len = str_len;
}
- ODP_PRINT("ishm blocks allocated at: %s\n", title);
-
- ODP_PRINT(" %-*s flag len user_len seq ref start fd"
- " file\n", max_name_len, "name");
+ ODP_PRINT("%s\n", title);
+ ODP_PRINT(" %-*s flag %-29s %-08s %-08s %-3s %-3s %-3s file\n",
+ max_name_len, "name", "range", "user_len", "unused",
+ "seq", "ref", "fd");
/* display block table: 1 line per entry +1 extra line if mapped here */
for (i = 0; i < ISHM_MAX_NB_BLOCKS; i++) {
@@ -1776,27 +1991,43 @@ int _odp_ishm_status(const char *title)
case EXTERNAL:
huge = 'E';
break;
+ case CACHED:
+ huge = 'C';
+ break;
default:
huge = '?';
}
proc_index = procfind_block(i);
- ODP_PRINT("%2i %-*s %s%c 0x%-08lx %-8lu %-3lu %-3lu",
+ lost_total += ishm_tbl->block[i].len -
+ ishm_tbl->block[i].user_len;
+ len_total += ishm_tbl->block[i].len;
+ ODP_PRINT("%2i %-*s %s%c 0x%-08lx-0x%08lx %-08ld %-08ld %-3lu %-3lu",
i, max_name_len, ishm_tbl->block[i].name,
flags, huge,
- ishm_tbl->block[i].len,
+ ishm_proctable->entry[proc_index].start,
+ (uintptr_t)ishm_proctable->entry[proc_index].start +
+ ishm_tbl->block[i].len,
ishm_tbl->block[i].user_len,
+ ishm_tbl->block[i].len - ishm_tbl->block[i].user_len,
ishm_tbl->block[i].seq,
ishm_tbl->block[i].refcnt);
if (proc_index < 0)
continue;
- ODP_PRINT("%-08lx %-3d",
- ishm_proctable->entry[proc_index].start,
+ ODP_PRINT(" %-3d",
ishm_proctable->entry[proc_index].fd);
- ODP_PRINT("%s\n", ishm_tbl->block[i].filename);
+ ODP_PRINT("%s\n", ishm_tbl->block[i].filename[0] ?
+ ishm_tbl->block[i].filename : "(none)");
}
+ ODP_PRINT("TOTAL: %58s%-08ld %2s%-08ld\n",
+ "", len_total,
+ "", lost_total);
+ ODP_PRINT("%65s(%dMB) %4s(%dMB)\n",
+ "", len_total / 1024 / 1024,
+ "", lost_total / 1024 / 1024);
+
/* display the virtual space allocations... : */
ODP_PRINT("\nishm virtual space:\n");
@@ -1896,6 +2127,9 @@ void _odp_ishm_print(int block_index)
case EXTERNAL:
str = "external";
break;
+ case CACHED:
+ str = "cached";
+ break;
default:
str = "??";
}
diff --git a/platform/linux-generic/odp_ishmphy.c b/platform/linux-generic/odp_ishmphy.c
index e770b7bca..65e0cc826 100644
--- a/platform/linux-generic/odp_ishmphy.c
+++ b/platform/linux-generic/odp_ishmphy.c
@@ -17,7 +17,7 @@
#include <odp/api/debug.h>
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <odp_ishmphy_internal.h>
#include <stdlib.h>
@@ -30,6 +30,7 @@
#include <fcntl.h>
#include <sys/types.h>
#include <sys/wait.h>
+#include <inttypes.h>
#include <odp_ishmphy_internal.h>
static void *common_va_address;
@@ -39,125 +40,83 @@ static uint64_t common_va_len;
#define MAP_ANONYMOUS MAP_ANON
#endif
-/* Book some virtual address space
- * This function is called at odp_init_global() time to pre-book some
- * virtual address space inherited by all odpthreads (i.e. descendant
- * processes and threads) and later used to guarantee the unicity the
- * the mapping VA address when memory is reserver with the _ODP_ISHM_SINGLE_VA
- * flag.
+/* Reserve single VA memory
+ * This function is called at odp_init_global() time to pre-reserve some memory
+ * which is inherited by all odpthreads (i.e. descendant processes and threads).
+ * This memory block is later used when memory is reserved with
+ * _ODP_ISHM_SINGLE_VA flag.
* returns the address of the mapping or NULL on error.
*/
-void *_odp_ishmphy_book_va(uintptr_t len, intptr_t align)
+void *_odp_ishmphy_reserve_single_va(uint64_t len, int fd)
{
void *addr;
- addr = mmap(NULL, len + align, PROT_NONE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
+ addr = mmap(NULL, len, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, fd, 0);
if (addr == MAP_FAILED) {
- ODP_ERR("_ishmphy_book_va failure\n");
+ ODP_ERR("mmap failed: %s\n", strerror(errno));
return NULL;
}
- if (mprotect(addr, len, PROT_NONE))
- ODP_ERR("failure for protect\n");
+ if (mprotect(addr, len, PROT_READ | PROT_WRITE))
+ ODP_ERR("mprotect failed: %s\n", strerror(errno));
- ODP_DBG("VA Reserved: %p, len=%p\n", addr, len + align);
+ ODP_DBG("VA Reserved: %p, len=%" PRIu64 "\n", addr, len);
common_va_address = addr;
common_va_len = len;
- /* return the nearest aligned address: */
- return (void *)(((uintptr_t)addr + align - 1) & (-align));
+ return addr;
}
-/* Un-book some virtual address space
- * This function is called at odp_term_global() time to unbook
- * the virtual address space booked by _ishmphy_book_va()
+/* Free single VA memory
+ * This function is called at odp_term_global() time to free the memory reserved
+ * by _odp_ishmphy_reserve_single_va()
*/
-int _odp_ishmphy_unbook_va(void)
+int _odp_ishmphy_free_single_va(void)
{
int ret;
+ if (!common_va_address)
+ return 0;
+
ret = munmap(common_va_address, common_va_len);
if (ret)
- ODP_ERR("_unishmphy_book_va failure\n");
+ ODP_ERR("munmap failed: %s\n", strerror(errno));
return ret;
}
/*
* do a mapping:
* Performs a mapping of the provided file descriptor to the process VA
- * space. If the _ODP_ISHM_SINGLE_VA flag is set, 'start' is assumed to be
- * the VA address where the mapping is to be done.
- * If the flag is not set, a new VA address is taken.
+ * space. Not to be used with _ODP_ISHM_SINGLE_VA blocks.
* returns the address of the mapping or NULL on error.
*/
-void *_odp_ishmphy_map(int fd, void *start, uint64_t size,
- int flags)
+void *_odp_ishmphy_map(int fd, uint64_t size, uint64_t offset, int flags)
{
- void *mapped_addr_tmp, *mapped_addr;
+ void *mapped_addr;
int mmap_flags = MAP_POPULATE;
- if (flags & _ODP_ISHM_SINGLE_VA) {
- if (!start) {
- ODP_ERR("failure: missing address\n");
- return NULL;
- }
- /* maps over fragment of reserved VA: */
- /* first, try a normal map. If that works, remap it where it
- * should (on the prereverved space), and remove the initial
- * normal mapping:
- * This is because it turned out that if a mapping fails
- * on a the prereserved virtual address space, then
- * the prereserved address space which was tried to be mapped
- * on becomes available to the kernel again! This was not
- * according to expectations: the assumption was that if a
- * mapping fails, the system should remain unchanged, but this
- * is obvioulsy not true (at least for huge pages when
- * exhausted).
- * So the strategy is to first map at a non reserved place
- * (which can then be freed and returned to the kernel on
- * failure) and peform a new map to the prereserved space on
- * success (which is then guaranteed to work).
- * The initial free maping can then be removed.
- */
- mapped_addr = MAP_FAILED;
- mapped_addr_tmp = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | mmap_flags, fd, 0);
- if (mapped_addr_tmp != MAP_FAILED) {
- /* If OK, do new map at right fixed location... */
- mapped_addr = mmap(start,
- size, PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_FIXED | mmap_flags,
- fd, 0);
- if (mapped_addr != start)
- ODP_ERR("new map failed:%s\n", strerror(errno));
- /* ... and remove initial mapping: */
- if (munmap(mapped_addr_tmp, size))
- ODP_ERR("munmap failed:%s\n", strerror(errno));
- }
- } else {
- /* just do a new mapping in the VA space: */
- mapped_addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
- MAP_SHARED | mmap_flags, fd, 0);
- if ((mapped_addr >= common_va_address) &&
- ((char *)mapped_addr <
- (char *)common_va_address + common_va_len)) {
- ODP_ERR("VA SPACE OVERLAP!\n");
- }
+ ODP_ASSERT(!(flags & _ODP_ISHM_SINGLE_VA));
+
+ /* do a new mapping in the VA space: */
+ mapped_addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | mmap_flags, fd, offset);
+ if ((mapped_addr >= common_va_address) &&
+ ((char *)mapped_addr <
+ (char *)common_va_address + common_va_len)) {
+ ODP_ERR("VA SPACE OVERLAP!\n");
}
- if (mapped_addr == MAP_FAILED) {
- ODP_ERR("mmap failed:%s\n", strerror(errno));
+ if (mapped_addr == MAP_FAILED)
return NULL;
- }
/* if locking is requested, lock it...*/
if (flags & _ODP_ISHM_LOCK) {
if (mlock(mapped_addr, size)) {
+ ODP_ERR("mlock failed: %s\n", strerror(errno));
if (munmap(mapped_addr, size))
- ODP_ERR("munmap failed:%s\n", strerror(errno));
- ODP_ERR("mlock failed:%s\n", strerror(errno));
+ ODP_ERR("munmap failed: %s\n", strerror(errno));
return NULL;
}
}
@@ -165,44 +124,25 @@ void *_odp_ishmphy_map(int fd, void *start, uint64_t size,
}
/* free a mapping:
- * If the _ODP_ISHM_SINGLE_VA flag was given at creation time the virtual
- * address range must be returned to the preoallocated "pool". this is
- * done by mapping non accessibly memory there (hence blocking the VA but
- * releasing the physical memory).
- * If the _ODP_ISHM_SINGLE_VA flag was not given, both physical memory and
- * virtual address space are realeased by calling the normal munmap.
+ * _ODP_ISHM_SINGLE_VA memory is not returned back to linux until global
+ * terminate. If the _ODP_ISHM_SINGLE_VA flag was not given, both physical
+ * memory and virtual address space are released by calling the normal munmap.
* return 0 on success or -1 on error.
*/
int _odp_ishmphy_unmap(void *start, uint64_t len, int flags)
{
- void *addr;
int ret;
- int mmap_flgs;
-
- mmap_flgs = MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS | MAP_NORESERVE;
/* if locking was requested, unlock...*/
if (flags & _ODP_ISHM_LOCK)
munlock(start, len);
- if (flags & _ODP_ISHM_SINGLE_VA) {
- /* map unnaccessible memory overwrites previous mapping
- * and free the physical memory, but guarantees to block
- * the VA range from other mappings
- */
- addr = mmap(start, len, PROT_NONE, mmap_flgs, -1, 0);
- if (addr == MAP_FAILED) {
- ODP_ERR("_ishmphy_free failure for ISHM_SINGLE_VA\n");
- return -1;
- }
- if (mprotect(start, len, PROT_NONE))
- ODP_ERR("_ishmphy_free failure for protect\n");
+ if (flags & _ODP_ISHM_SINGLE_VA)
return 0;
- }
/* just release the mapping */
ret = munmap(start, len);
if (ret)
- ODP_ERR("_ishmphy_free failure: %s\n", strerror(errno));
+ ODP_ERR("munmap failed: %s\n", strerror(errno));
return ret;
}
diff --git a/platform/linux-generic/odp_ishmpool.c b/platform/linux-generic/odp_ishmpool.c
index 468653710..9a4e08fab 100644
--- a/platform/linux-generic/odp_ishmpool.c
+++ b/platform/linux-generic/odp_ishmpool.c
@@ -50,7 +50,7 @@
#include <odp_shm_internal.h>
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <odp_ishmpool_internal.h>
#include <stdlib.h>
#include <stdio.h>
@@ -223,7 +223,7 @@ static pool_t *_odp_ishmbud_pool_create(const char *pool_name, int store_idx,
/* allocate required memory: */
blk_idx = _odp_ishm_reserve(pool_name, total_sz, -1,
- ODP_CACHE_LINE_SIZE, flags, 0);
+ ODP_CACHE_LINE_SIZE, 0, flags, 0);
if (blk_idx < 0) {
ODP_ERR("_odp_ishm_reserve failed.");
return NULL;
@@ -558,7 +558,7 @@ static pool_t *_odp_ishmslab_pool_create(const char *pool_name, int store_idx,
/* allocate required memory: */
blk_idx = _odp_ishm_reserve(pool_name, total_sz, -1,
- ODP_CACHE_LINE_SIZE, flags, 0);
+ ODP_CACHE_LINE_SIZE, 0, flags, 0);
if (blk_idx < 0) {
ODP_ERR("_odp_ishm_reserve failed.");
return NULL;
@@ -784,23 +784,3 @@ void _odp_ishm_pool_init(void)
for (i = 0; i < MAX_NB_POOL; i++)
pool_blk_idx[i] = -1;
}
-
-_odp_ishm_pool_t *_odp_ishm_pool_lookup(const char *pool_name)
-{
- int block_idx;
- int store_idx;
-
- /* search for a _ishm block with the given name */
- block_idx = _odp_ishm_lookup_by_name(pool_name);
- if (block_idx < 0)
- return NULL;
-
- /* a block with that name exists: make sure it is within
- * the registered pools */
- for (store_idx = 0; store_idx < MAX_NB_POOL; store_idx++) {
- if (pool_blk_idx[store_idx] == block_idx)
- return _odp_ishm_address(block_idx);
- }
-
- return NULL;
-}
diff --git a/platform/linux-generic/odp_libconfig.c b/platform/linux-generic/odp_libconfig.c
index e0e995501..014409e2b 100644
--- a/platform/linux-generic/odp_libconfig.c
+++ b/platform/linux-generic/odp_libconfig.c
@@ -21,10 +21,12 @@ int _odp_libconfig_init_global(void)
const char *filename;
const char *vers;
const char *vers_rt;
- const char *ipml;
- const char *ipml_rt;
- config_t *config = &odp_global_data.libconfig_default;
- config_t *config_rt = &odp_global_data.libconfig_runtime;
+ const char *impl;
+ const char *impl_rt;
+ config_t *config = &odp_global_ro.libconfig_default;
+ config_t *config_rt = &odp_global_ro.libconfig_runtime;
+ const char *impl_field = "odp_implementation";
+ const char *vers_field = "config_file_version";
config_init(config);
config_init(config_rt);
@@ -40,32 +42,45 @@ int _odp_libconfig_init_global(void)
if (filename == NULL)
return 0;
+ ODP_PRINT("ODP CONFIG FILE: %s\n", filename);
+
if (!config_read_file(config_rt, filename)) {
- ODP_ERR("Failed to read config file: %s(%d): %s\n",
- config_error_file(config_rt),
- config_error_line(config_rt),
- config_error_text(config_rt));
+ ODP_PRINT(" ERROR: failed to read config file: %s(%d): %s\n\n",
+ config_error_file(config_rt),
+ config_error_line(config_rt),
+ config_error_text(config_rt));
goto fail;
}
/* Check runtime configuration's implementation name and version */
- if (!config_lookup_string(config, "odp_implementation", &ipml) ||
- !config_lookup_string(config_rt, "odp_implementation", &ipml_rt)) {
- ODP_ERR("Configuration missing 'odp_implementation' field\n");
+ if (!config_lookup_string(config, impl_field, &impl) ||
+ !config_lookup_string(config_rt, impl_field, &impl_rt)) {
+ ODP_PRINT(" ERROR: missing mandatory field: %s\n\n",
+ impl_field);
+ goto fail;
+ }
+ if (!config_lookup_string(config, vers_field, &vers) ||
+ !config_lookup_string(config_rt, vers_field, &vers_rt)) {
+ ODP_PRINT(" ERROR: missing mandatory field: %s\n\n",
+ vers_field);
goto fail;
}
- if (!config_lookup_string(config, "config_file_version", &vers) ||
- !config_lookup_string(config_rt, "config_file_version", &vers_rt)) {
- ODP_ERR("Configuration missing 'config_file_version' field\n");
+ if (strcmp(impl, impl_rt)) {
+ ODP_PRINT(" ERROR: ODP implementation name mismatch:\n"
+ " Expected: \"%s\"\n"
+ " Found: \"%s\"\n\n", impl, impl_rt);
goto fail;
}
- if (strcmp(vers, vers_rt) || strcmp(ipml, ipml_rt)) {
- ODP_ERR("Runtime configuration mismatch\n");
+ if (strcmp(vers, vers_rt)) {
+ ODP_PRINT(" ERROR: config file version number mismatch:\n"
+ " Expected: \"%s\"\n"
+ " Found: \"%s\"\n\n", vers, vers_rt);
goto fail;
}
return 0;
fail:
+ ODP_ERR("Config file failure\n");
config_destroy(config);
config_destroy(config_rt);
return -1;
@@ -73,8 +88,8 @@ fail:
int _odp_libconfig_term_global(void)
{
- config_destroy(&odp_global_data.libconfig_default);
- config_destroy(&odp_global_data.libconfig_runtime);
+ config_destroy(&odp_global_ro.libconfig_default);
+ config_destroy(&odp_global_ro.libconfig_runtime);
return 0;
}
@@ -84,16 +99,55 @@ int _odp_libconfig_lookup_int(const char *path, int *value)
int ret_def = CONFIG_FALSE;
int ret_rt = CONFIG_FALSE;
- ret_def = config_lookup_int(&odp_global_data.libconfig_default, path,
+ ret_def = config_lookup_int(&odp_global_ro.libconfig_default, path,
value);
/* Runtime option overrides default value */
- ret_rt = config_lookup_int(&odp_global_data.libconfig_runtime, path,
+ ret_rt = config_lookup_int(&odp_global_ro.libconfig_runtime, path,
value);
return (ret_def == CONFIG_TRUE || ret_rt == CONFIG_TRUE) ? 1 : 0;
}
+int _odp_libconfig_lookup_array(const char *path, int value[], int max_num)
+{
+ const config_t *config;
+ config_setting_t *setting;
+ int num, i, j;
+ int num_out = 0;
+
+ for (j = 0; j < 2; j++) {
+ if (j == 0)
+ config = &odp_global_ro.libconfig_default;
+ else
+ config = &odp_global_ro.libconfig_runtime;
+
+ setting = config_lookup(config, path);
+
+ /* Runtime config may not define the array, whereas
+ * the default config has it always defined. When the array
+ * is defined, it must be correctly formatted. */
+ if (setting == NULL)
+ continue;
+
+ if (config_setting_is_array(setting) == CONFIG_FALSE)
+ return 0;
+
+ num = config_setting_length(setting);
+
+ if (num <= 0 || num > max_num)
+ return 0;
+
+ for (i = 0; i < num; i++)
+ value[i] = config_setting_get_int_elem(setting, i);
+
+ num_out = num;
+ }
+
+ /* Number of elements copied */
+ return num_out;
+}
+
static int lookup_int(config_t *cfg,
const char *base_path,
const char *local_path,
@@ -121,11 +175,11 @@ int _odp_libconfig_lookup_ext_int(const char *base_path,
const char *name,
int *value)
{
- if (lookup_int(&odp_global_data.libconfig_runtime,
+ if (lookup_int(&odp_global_ro.libconfig_runtime,
base_path, local_path, name, value))
return 1;
- if (lookup_int(&odp_global_data.libconfig_default,
+ if (lookup_int(&odp_global_ro.libconfig_default,
base_path, local_path, name, value))
return 1;
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index 0df32d1c8..f4c99ce15 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -22,6 +22,7 @@
#include <protocols/eth.h>
#include <protocols/ip.h>
+#include <protocols/sctp.h>
#include <protocols/tcp.h>
#include <protocols/udp.h>
@@ -2017,6 +2018,31 @@ static uint16_t packet_sum_ones_comp16(odp_packet_hdr_t *pkt_hdr,
return sum;
}
+static uint32_t packet_sum_crc32c(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset,
+ uint32_t len,
+ uint32_t init_val)
+{
+ uint32_t sum = init_val;
+
+ if (offset + len > pkt_hdr->frame_len)
+ return sum;
+
+ while (len > 0) {
+ uint32_t seglen = 0; /* GCC */
+ void *mapaddr = packet_map(pkt_hdr, offset, &seglen, NULL);
+
+ if (seglen > len)
+ seglen = len;
+
+ sum = odp_hash_crc32c(mapaddr, seglen, sum);
+ len -= seglen;
+ offset += seglen;
+ }
+
+ return sum;
+}
+
/** Parser helper function for Ethernet packets */
static inline uint16_t parse_eth(packet_parser_t *prs, const uint8_t **parseptr,
uint32_t *offset, uint32_t frame_len)
@@ -2299,6 +2325,34 @@ static inline void parse_udp(packet_parser_t *prs, const uint8_t **parseptr,
*parseptr += sizeof(_odp_udphdr_t);
}
+/**
+ * Parser helper function for SCTP
+ */
+static inline void parse_sctp(packet_parser_t *prs, const uint8_t **parseptr,
+ uint16_t sctp_len,
+ odp_proto_chksums_t chksums,
+ uint32_t *l4_part_sum)
+{
+ if (odp_unlikely(sctp_len < sizeof(_odp_sctphdr_t))) {
+ prs->flags.sctp_err = 1;
+ return;
+ }
+
+ if (chksums.chksum.sctp &&
+ !prs->input_flags.ipfrag) {
+ const _odp_sctphdr_t *sctp =
+ (const _odp_sctphdr_t *)*parseptr;
+ uint32_t crc = ~0;
+ uint32_t zero = 0;
+
+ crc = odp_hash_crc32c(sctp, sizeof(*sctp) - 4, crc);
+ crc = odp_hash_crc32c(&zero, 4, crc);
+ *l4_part_sum = crc;
+ }
+
+ *parseptr += sizeof(_odp_sctphdr_t);
+}
+
static inline
int packet_parse_common_l3_l4(packet_parser_t *prs, const uint8_t *parseptr,
uint32_t offset,
@@ -2389,6 +2443,8 @@ int packet_parse_common_l3_l4(packet_parser_t *prs, const uint8_t *parseptr,
case _ODP_IPPROTO_SCTP:
prs->input_flags.sctp = 1;
+ parse_sctp(prs, &parseptr, frame_len - prs->l4_offset, chksums,
+ l4_part_sum);
break;
case _ODP_IPPROTO_NO_NEXT:
@@ -2587,6 +2643,30 @@ int _odp_packet_udp_chksum_insert(odp_packet_t pkt)
return _odp_packet_tcp_udp_chksum_insert(pkt, _ODP_IPPROTO_UDP);
}
+/**
+ * Calculate and fill in SCTP checksum
+ *
+ * @param pkt ODP packet
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int _odp_packet_sctp_chksum_insert(odp_packet_t pkt)
+{
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
+ uint32_t sum;
+
+ if (pkt_hdr->p.l4_offset == ODP_PACKET_OFFSET_INVALID)
+ return -1;
+
+ sum = 0;
+ odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+ sum = ~packet_sum_crc32c(pkt_hdr, pkt_hdr->p.l4_offset,
+ pkt_hdr->frame_len - pkt_hdr->p.l4_offset,
+ ~0);
+ return odp_packet_copy_from_mem(pkt, pkt_hdr->p.l4_offset + 8, 4, &sum);
+}
+
static int packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
odp_proto_chksums_t chksums,
uint32_t l4_part_sum)
@@ -2627,6 +2707,29 @@ static int packet_l4_chksum(odp_packet_hdr_t *pkt_hdr,
}
}
+ if (chksums.chksum.sctp &&
+ pkt_hdr->p.input_flags.sctp &&
+ !pkt_hdr->p.input_flags.ipfrag) {
+ uint32_t sum = ~packet_sum_crc32c(pkt_hdr,
+ pkt_hdr->p.l4_offset +
+ _ODP_SCTPHDR_LEN,
+ pkt_hdr->frame_len -
+ pkt_hdr->p.l4_offset -
+ _ODP_SCTPHDR_LEN,
+ l4_part_sum);
+ _odp_sctphdr_t *sctp = packet_map(pkt_hdr,
+ pkt_hdr->p.l4_offset,
+ NULL, NULL);
+
+ pkt_hdr->p.input_flags.l4_chksum_done = 1;
+ if (sum != sctp->chksum) {
+ pkt_hdr->p.flags.l4_chksum_err = 1;
+ pkt_hdr->p.flags.sctp_err = 1;
+ ODP_DBG("SCTP chksum fail (%x/%x)!\n", sum,
+ sctp->chksum);
+ }
+ }
+
return pkt_hdr->p.flags.all_flags != 0;
}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index cb8086afd..4954d5f04 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -28,6 +28,7 @@
#include <odp/api/time.h>
#include <odp/api/plat/time_inlines.h>
#include <odp_pcapng.h>
+#include <odp/api/plat/queue_inlines.h>
#include <string.h>
#include <inttypes.h>
@@ -64,9 +65,10 @@ int odp_pktio_init_global(void)
odp_shm_t shm;
int pktio_if;
- shm = odp_shm_reserve("odp_pktio_entries",
+ shm = odp_shm_reserve("_odp_pktio_entries",
sizeof(pktio_table_t),
- sizeof(pktio_entry_t), 0);
+ sizeof(pktio_entry_t),
+ 0);
if (shm == ODP_SHM_INVALID)
return -1;
@@ -137,7 +139,6 @@ static void init_in_queues(pktio_entry_t *entry)
for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
- entry->s.in_queue[i].queue_int = NULL;
entry->s.in_queue[i].pktin = PKTIN_INVALID;
}
}
@@ -327,7 +328,6 @@ static void destroy_in_queues(pktio_entry_t *entry, int num)
if (entry->s.in_queue[i].queue != ODP_QUEUE_INVALID) {
odp_queue_destroy(entry->s.in_queue[i].queue);
entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
- entry->s.in_queue[i].queue_int = NULL;
}
}
}
@@ -613,9 +613,18 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
odp_packet_t packets[num];
odp_packet_hdr_t *pkt_hdr;
odp_buffer_hdr_t *buf_hdr;
- int i;
- int pkts;
- int num_rx = 0;
+ int i, pkts, num_rx, num_ev, num_dst;
+ odp_queue_t cur_queue;
+ odp_event_t ev[num];
+ odp_queue_t dst[num];
+ int dst_idx[num];
+
+ num_rx = 0;
+ num_dst = 0;
+ num_ev = 0;
+
+ /* Some compilers need this dummy initialization */
+ cur_queue = ODP_QUEUE_INVALID;
pkts = entry->s.ops->recv(entry, pktin_index, packets, num);
@@ -624,60 +633,96 @@ static inline int pktin_recv_buf(pktio_entry_t *entry, int pktin_index,
pkt_hdr = packet_hdr(pkt);
buf_hdr = packet_to_buf_hdr(pkt);
- if (pkt_hdr->p.input_flags.dst_queue) {
- int ret;
+ if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
+ /* Sort events for enqueue multi operation(s) */
+ if (odp_unlikely(num_dst == 0)) {
+ num_dst = 1;
+ cur_queue = pkt_hdr->dst_queue;
+ dst[0] = cur_queue;
+ dst_idx[0] = 0;
+ }
- ret = queue_fn->enq(pkt_hdr->dst_queue, buf_hdr);
- if (ret < 0)
- odp_packet_free(pkt);
+ ev[num_ev] = odp_packet_to_event(pkt);
+
+ if (cur_queue != pkt_hdr->dst_queue) {
+ cur_queue = pkt_hdr->dst_queue;
+ dst[num_dst] = cur_queue;
+ dst_idx[num_dst] = num_ev;
+ num_dst++;
+ }
+
+ num_ev++;
continue;
}
buffer_hdrs[num_rx++] = buf_hdr;
}
+
+ /* Optimization for the common case */
+ if (odp_likely(num_dst == 0))
+ return num_rx;
+
+ for (i = 0; i < num_dst; i++) {
+ int num_enq, ret;
+ int idx = dst_idx[i];
+
+ if (i == (num_dst - 1))
+ num_enq = num_ev - idx;
+ else
+ num_enq = dst_idx[i + 1] - idx;
+
+ ret = odp_queue_enq_multi(dst[i], &ev[idx], num_enq);
+
+ if (ret < 0)
+ ret = 0;
+
+ if (ret < num_enq)
+ odp_event_free_multi(&ev[idx + ret], num_enq - ret);
+ }
+
return num_rx;
}
-static int pktout_enqueue(void *q_int, odp_buffer_hdr_t *buf_hdr)
+static int pktout_enqueue(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
{
odp_packet_t pkt = packet_from_buf_hdr(buf_hdr);
int len = 1;
int nbr;
- if (sched_fn->ord_enq_multi(q_int, (void **)buf_hdr, len, &nbr))
+ if (sched_fn->ord_enq_multi(queue, (void **)buf_hdr, len, &nbr))
return (nbr == len ? 0 : -1);
- nbr = odp_pktout_send(queue_fn->get_pktout(q_int), &pkt, len);
+ nbr = odp_pktout_send(queue_fn->get_pktout(queue), &pkt, len);
return (nbr == len ? 0 : -1);
}
-static int pktout_enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[], int num)
+static int pktout_enq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
+ int num)
{
odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
int nbr;
int i;
- if (sched_fn->ord_enq_multi(q_int, (void **)buf_hdr, num, &nbr))
+ if (sched_fn->ord_enq_multi(queue, (void **)buf_hdr, num, &nbr))
return nbr;
for (i = 0; i < num; ++i)
pkt_tbl[i] = packet_from_buf_hdr(buf_hdr[i]);
- nbr = odp_pktout_send(queue_fn->get_pktout(q_int), pkt_tbl, num);
+ nbr = odp_pktout_send(queue_fn->get_pktout(queue), pkt_tbl, num);
return nbr;
}
-static odp_buffer_hdr_t *pktin_dequeue(void *q_int)
+static odp_buffer_hdr_t *pktin_dequeue(odp_queue_t queue)
{
odp_buffer_hdr_t *buf_hdr;
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts;
- odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(q_int);
+ odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
int pktin_index = pktin_queue.index;
pktio_entry_t *entry = get_pktio_entry(pktio);
- buf_hdr = queue_fn->deq(q_int);
- if (buf_hdr != NULL)
+ if (queue_fn->orig_deq_multi(queue, &buf_hdr, 1) == 1)
return buf_hdr;
pkts = pktin_recv_buf(entry, pktin_index, hdr_tbl, QUEUE_MULTI_MAX);
@@ -689,7 +734,8 @@ static odp_buffer_hdr_t *pktin_dequeue(void *q_int)
int num_enq;
int num = pkts - 1;
- num_enq = queue_fn->enq_multi(q_int, &hdr_tbl[1], num);
+ num_enq = odp_queue_enq_multi(queue,
+ (odp_event_t *)&hdr_tbl[1], num);
if (odp_unlikely(num_enq < num)) {
if (odp_unlikely(num_enq < 0))
@@ -705,17 +751,18 @@ static odp_buffer_hdr_t *pktin_dequeue(void *q_int)
return buf_hdr;
}
-static int pktin_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[], int num)
+static int pktin_deq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr[],
+ int num)
{
int nbr;
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts, i, j;
- odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(q_int);
+ odp_pktin_queue_t pktin_queue = queue_fn->get_pktin(queue);
odp_pktio_t pktio = pktin_queue.pktio;
int pktin_index = pktin_queue.index;
pktio_entry_t *entry = get_pktio_entry(pktio);
- nbr = queue_fn->deq_multi(q_int, buf_hdr, num);
+ nbr = queue_fn->orig_deq_multi(queue, buf_hdr, num);
if (odp_unlikely(nbr > num))
ODP_ABORT("queue_deq_multi req: %d, returned %d\n", num, nbr);
@@ -740,7 +787,7 @@ static int pktin_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[], int num)
if (j) {
int num_enq;
- num_enq = queue_fn->enq_multi(q_int, hdr_tbl, j);
+ num_enq = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl, j);
if (odp_unlikely(num_enq < j)) {
if (odp_unlikely(num_enq < 0))
@@ -765,7 +812,7 @@ int sched_cb_pktin_poll_one(int pktio_index,
odp_packet_hdr_t *pkt_hdr;
odp_buffer_hdr_t *buf_hdr;
odp_packet_t packets[QUEUE_MULTI_MAX];
- void *q_int;
+ odp_queue_t queue;
if (odp_unlikely(entry->s.state != PKTIO_STATE_STARTED)) {
if (entry->s.state < PKTIO_STATE_ACTIVE ||
@@ -785,9 +832,14 @@ int sched_cb_pktin_poll_one(int pktio_index,
pkt = packets[i];
pkt_hdr = packet_hdr(pkt);
if (odp_unlikely(pkt_hdr->p.input_flags.dst_queue)) {
- q_int = pkt_hdr->dst_queue;
+ int num_enq;
+
+ queue = pkt_hdr->dst_queue;
buf_hdr = packet_to_buf_hdr(pkt);
- if (queue_fn->enq_multi(q_int, &buf_hdr, 1) < 0) {
+ num_enq = odp_queue_enq_multi(queue,
+ (odp_event_t *)&buf_hdr,
+ 1);
+ if (num_enq < 0) {
/* Queue full? */
odp_packet_free(pkt);
__atomic_fetch_add(&entry->s.stats.in_discards,
@@ -819,53 +871,6 @@ int sched_cb_pktin_poll(int pktio_index, int pktin_index,
return pktin_recv_buf(entry, pktin_index, hdr_tbl, num);
}
-int sched_cb_pktin_poll_old(int pktio_index, int num_queue, int index[])
-{
- odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- int num, idx;
- pktio_entry_t *entry = pktio_entry_by_index(pktio_index);
- int state = entry->s.state;
-
- if (odp_unlikely(state != PKTIO_STATE_STARTED)) {
- if (state < PKTIO_STATE_ACTIVE ||
- state == PKTIO_STATE_STOP_PENDING)
- return -1;
-
- ODP_DBG("interface not started\n");
- return 0;
- }
-
- for (idx = 0; idx < num_queue; idx++) {
- void *q_int;
- int num_enq;
-
- num = pktin_recv_buf(entry, index[idx], hdr_tbl,
- QUEUE_MULTI_MAX);
-
- if (num == 0)
- continue;
-
- if (num < 0) {
- ODP_ERR("Packet recv error\n");
- return -1;
- }
-
- q_int = entry->s.in_queue[index[idx]].queue_int;
- num_enq = queue_fn->enq_multi(q_int, hdr_tbl, num);
-
- if (odp_unlikely(num_enq < num)) {
- if (odp_unlikely(num_enq < 0))
- num_enq = 0;
-
- ODP_DBG("Interface %s dropped %i packets\n",
- entry->s.name, num - num_enq);
- buffer_free_multi(&hdr_tbl[num_enq], num - num_enq);
- }
- }
-
- return 0;
-}
-
void sched_cb_pktio_stop_finalize(int pktio_index)
{
int state;
@@ -1272,9 +1277,9 @@ int odp_pktio_term_global(void)
pktio_if);
}
- ret = odp_shm_free(odp_shm_lookup("odp_pktio_entries"));
+ ret = odp_shm_free(odp_shm_lookup("_odp_pktio_entries"));
if (ret != 0)
- ODP_ERR("shm free failed for odp_pktio_entries");
+ ODP_ERR("shm free failed for _odp_pktio_entries");
return ret;
}
@@ -1371,35 +1376,6 @@ int odp_pktio_stats_reset(odp_pktio_t pktio)
return ret;
}
-static int abort_pktin_enqueue(void *q_int ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr ODP_UNUSED)
-{
- ODP_ABORT("attempted enqueue to a pktin queue");
- return -1;
-}
-
-static int abort_pktin_enq_multi(void *q_int ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED,
- int num ODP_UNUSED)
-{
- ODP_ABORT("attempted enqueue to a pktin queue");
- return 0;
-}
-
-static odp_buffer_hdr_t *abort_pktout_dequeue(void *q_int ODP_UNUSED)
-{
- ODP_ABORT("attempted dequeue from a pktout queue");
- return NULL;
-}
-
-static int abort_pktout_deq_multi(void *q_int ODP_UNUSED,
- odp_buffer_hdr_t *buf_hdr[] ODP_UNUSED,
- int num ODP_UNUSED)
-{
- ODP_ABORT("attempted dequeue from a pktout queue");
- return 0;
-}
-
int odp_pktin_queue_config(odp_pktio_t pktio,
const odp_pktin_queue_param_t *param)
{
@@ -1410,7 +1386,6 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
unsigned i;
int rc;
odp_queue_t queue;
- void *q_int;
odp_pktin_queue_param_t default_param;
if (param == NULL) {
@@ -1498,23 +1473,19 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
return -1;
}
- q_int = queue_fn->from_ext(queue);
-
if (mode == ODP_PKTIN_MODE_QUEUE) {
- queue_fn->set_pktin(q_int, pktio, i);
- queue_fn->set_enq_deq_fn(q_int,
- abort_pktin_enqueue,
- abort_pktin_enq_multi,
+ queue_fn->set_pktin(queue, pktio, i);
+ queue_fn->set_enq_deq_fn(queue,
+ NULL,
+ NULL,
pktin_dequeue,
pktin_deq_multi);
}
entry->s.in_queue[i].queue = queue;
- entry->s.in_queue[i].queue_int = q_int;
} else {
entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
- entry->s.in_queue[i].queue_int = NULL;
}
entry->s.in_queue[i].pktin.index = i;
@@ -1606,7 +1577,6 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
for (i = 0; i < num_queues; i++) {
odp_queue_t queue;
odp_queue_param_t queue_param;
- void *q_int;
char name[ODP_QUEUE_NAME_LEN];
int pktio_id = odp_pktio_index(pktio);
@@ -1626,15 +1596,14 @@ int odp_pktout_queue_config(odp_pktio_t pktio,
return -1;
}
- q_int = queue_fn->from_ext(queue);
- queue_fn->set_pktout(q_int, pktio, i);
+ queue_fn->set_pktout(queue, pktio, i);
/* Override default enqueue / dequeue functions */
- queue_fn->set_enq_deq_fn(q_int,
+ queue_fn->set_enq_deq_fn(queue,
pktout_enqueue,
pktout_enq_multi,
- abort_pktout_dequeue,
- abort_pktout_deq_multi);
+ NULL,
+ NULL);
entry->s.out_queue[i].queue = queue;
}
diff --git a/platform/linux-generic/odp_pcapng.c b/platform/linux-generic/odp_pcapng.c
index b8d29e5a8..21fa7b2c5 100644
--- a/platform/linux-generic/odp_pcapng.c
+++ b/platform/linux-generic/odp_pcapng.c
@@ -74,7 +74,7 @@ static void get_pcapng_fifo_name(char *pcapng_entry, size_t len,
char *pktio_name, int qidx)
{
snprintf(pcapng_entry, len, "%d-%s-flow-%d",
- odp_global_data.main_pid, pktio_name, qidx);
+ odp_global_ro.main_pid, pktio_name, qidx);
pcapng_entry[len - 1] = 0;
}
@@ -120,13 +120,13 @@ static void *inotify_update(void *arg)
while (1) {
offset = 0;
FD_ZERO(&rfds);
- FD_SET(odp_global_data.inotify_pcapng_fd, &rfds);
+ FD_SET(odp_global_rw->inotify_pcapng_fd, &rfds);
time.tv_sec = 2;
time.tv_usec = 0;
- select(odp_global_data.inotify_pcapng_fd + 1, &rfds, NULL,
+ select(odp_global_rw->inotify_pcapng_fd + 1, &rfds, NULL,
NULL, &time);
- if (FD_ISSET(odp_global_data.inotify_pcapng_fd, &rfds)) {
- rdlen = read(odp_global_data.inotify_pcapng_fd,
+ if (FD_ISSET(odp_global_rw->inotify_pcapng_fd, &rfds)) {
+ rdlen = read(odp_global_rw->inotify_pcapng_fd,
buffer, INOTIFY_BUF_LEN);
while (offset < rdlen) {
int qidx;
@@ -219,23 +219,23 @@ int pcapng_prepare(pktio_entry_t *entry)
}
/* already running from a previous pktio */
- if (odp_global_data.inotify_pcapng_is_running == 1)
+ if (odp_global_rw->inotify_pcapng_is_running == 1)
return 0;
- odp_global_data.inotify_pcapng_fd = -1;
- odp_global_data.inotify_watch_fd = -1;
+ odp_global_rw->inotify_pcapng_fd = -1;
+ odp_global_rw->inotify_watch_fd = -1;
- odp_global_data.inotify_pcapng_fd = inotify_init();
- if (odp_global_data.inotify_pcapng_fd == -1) {
+ odp_global_rw->inotify_pcapng_fd = inotify_init();
+ if (odp_global_rw->inotify_pcapng_fd == -1) {
ODP_ERR("can't init inotify. pcap disabled\n");
goto out_destroy;
}
- odp_global_data.inotify_watch_fd =
- inotify_add_watch(odp_global_data.inotify_pcapng_fd,
+ odp_global_rw->inotify_watch_fd =
+ inotify_add_watch(odp_global_rw->inotify_pcapng_fd,
PCAPNG_WATCH_DIR, IN_CLOSE | IN_OPEN);
- if (odp_global_data.inotify_watch_fd == -1) {
+ if (odp_global_rw->inotify_watch_fd == -1) {
ODP_ERR("can't register inotify for %s. pcap disabled\n",
strerror(errno));
goto out_destroy;
@@ -243,12 +243,12 @@ int pcapng_prepare(pktio_entry_t *entry)
/* create a thread to poll inotify triggers */
pthread_attr_init(&attr);
- ret = pthread_create(&odp_global_data.inotify_thread, &attr,
+ ret = pthread_create(&odp_global_rw->inotify_thread, &attr,
inotify_update, entry);
if (ret)
ODP_ERR("can't start inotify thread. pcap disabled\n");
else
- odp_global_data.inotify_pcapng_is_running = 1;
+ odp_global_rw->inotify_pcapng_is_running = 1;
return ret;
@@ -265,24 +265,24 @@ void pcapng_destroy(pktio_entry_t *entry)
unsigned int max_queue =
MAX(entry->s.num_in_queue, entry->s.num_out_queue);
- if (odp_global_data.inotify_pcapng_is_running == 1) {
- ret = pthread_cancel(odp_global_data.inotify_thread);
+ if (odp_global_rw->inotify_pcapng_is_running == 1) {
+ ret = pthread_cancel(odp_global_rw->inotify_thread);
if (ret)
ODP_ERR("can't cancel inotify thread %s\n",
strerror(errno));
}
/* fd's will be -1 in case of any failure */
- ret = inotify_rm_watch(odp_global_data.inotify_pcapng_fd,
- odp_global_data.inotify_watch_fd);
+ ret = inotify_rm_watch(odp_global_rw->inotify_pcapng_fd,
+ odp_global_rw->inotify_watch_fd);
if (ret)
ODP_ERR("can't deregister inotify %s\n", strerror(errno));
- if (odp_global_data.inotify_pcapng_fd != -1)
- close(odp_global_data.inotify_pcapng_fd);
+ if (odp_global_rw->inotify_pcapng_fd != -1)
+ close(odp_global_rw->inotify_pcapng_fd);
- if (odp_global_data.inotify_watch_fd != -1)
- close(odp_global_data.inotify_watch_fd);
+ if (odp_global_rw->inotify_watch_fd != -1)
+ close(odp_global_rw->inotify_watch_fd);
for (i = 0; i < max_queue; i++) {
char pcapng_name[128];
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 01ee9234b..7f4bb795a 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -16,9 +16,14 @@
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
#include <odp_packet_internal.h>
+#include <odp_packet_dpdk.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp_ring_internal.h>
+#include <odp_global_data.h>
+#include <odp_libconfig_internal.h>
+#include <odp_shm_internal.h>
+#include <odp_timer_internal.h>
#include <string.h>
#include <stdio.h>
@@ -30,8 +35,9 @@
#define UNLOCK(a) odp_ticketlock_unlock(a)
#define LOCK_INIT(a) odp_ticketlock_init(a)
-#define CACHE_BURST 32
-#define RING_SIZE_MIN (2 * CACHE_BURST)
+#define CACHE_BURST 32
+#define RING_SIZE_MIN (2 * CACHE_BURST)
+#define POOL_MAX_NUM_MIN RING_SIZE_MIN
/* Make sure packet buffers don't cross huge page boundaries starting from this
* page size. 2MB is typically the smallest used huge page size. */
@@ -40,6 +46,10 @@
/* Define a practical limit for contiguous memory allocations */
#define MAX_SIZE (10 * 1024 * 1024)
+/* Minimum supported buffer alignment. Requests for values below this will be
+ * rounded up to this value. */
+#define BUFFER_ALIGN_MIN ODP_CACHE_LINE_SIZE
+
ODP_STATIC_ASSERT(CONFIG_POOL_CACHE_SIZE > (2 * CACHE_BURST),
"cache_burst_size_too_large_compared_to_cache_size");
@@ -70,7 +80,7 @@ const _odp_pool_inline_offset_t ODP_ALIGNED_CACHE _odp_pool_inline = {
static inline odp_pool_t pool_index_to_handle(uint32_t pool_idx)
{
- return _odp_cast_scalar(odp_pool_t, pool_idx);
+ return _odp_cast_scalar(odp_pool_t, pool_idx + 1);
}
static inline pool_t *pool_from_buf(odp_buffer_t buf)
@@ -80,6 +90,32 @@ static inline pool_t *pool_from_buf(odp_buffer_t buf)
return buf_hdr->pool_ptr;
}
+static int read_config_file(pool_table_t *pool_tbl)
+{
+ const char *str;
+ int val = 0;
+
+ ODP_PRINT("Pool config:\n");
+
+ str = "pool.pkt.max_num";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ if (val > CONFIG_POOL_MAX_NUM || val < POOL_MAX_NUM_MIN) {
+ ODP_ERR("Bad value %s = %u\n", str, val);
+ return -1;
+ }
+
+ pool_tbl->config.pkt_max_num = val;
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ ODP_PRINT("\n");
+
+ return 0;
+}
+
int odp_pool_init_global(void)
{
uint32_t i;
@@ -87,7 +123,8 @@ int odp_pool_init_global(void)
shm = odp_shm_reserve("_odp_pool_table",
sizeof(pool_table_t),
- ODP_CACHE_LINE_SIZE, 0);
+ ODP_CACHE_LINE_SIZE,
+ 0);
pool_tbl = odp_shm_addr(shm);
@@ -97,6 +134,11 @@ int odp_pool_init_global(void)
memset(pool_tbl, 0, sizeof(pool_table_t));
pool_tbl->shm = shm;
+ if (read_config_file(pool_tbl)) {
+ odp_shm_free(shm);
+ return -1;
+ }
+
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
pool_t *pool = pool_entry(i);
@@ -186,7 +228,7 @@ int odp_pool_term_local(void)
return 0;
}
-static pool_t *reserve_pool(void)
+static pool_t *reserve_pool(uint32_t shmflags)
{
int i;
pool_t *pool;
@@ -199,11 +241,11 @@ static pool_t *reserve_pool(void)
if (pool->reserved == 0) {
pool->reserved = 1;
UNLOCK(&pool->lock);
- sprintf(ring_name, "pool_ring_%d", i);
+ sprintf(ring_name, "_odp_pool_ring_%d", i);
pool->ring_shm =
odp_shm_reserve(ring_name,
sizeof(pool_ring_t),
- ODP_CACHE_LINE_SIZE, 0);
+ ODP_CACHE_LINE_SIZE, shmflags);
if (odp_unlikely(pool->ring_shm == ODP_SHM_INVALID)) {
ODP_ERR("Unable to alloc pool ring %d\n", i);
LOCK(&pool->lock);
@@ -245,7 +287,10 @@ static void init_buffers(pool_t *pool)
type = pool->params.type;
for (i = 0; i < pool->num + skipped_blocks ; i++) {
- addr = &pool->base_addr[i * pool->block_size];
+ int skip = 0;
+
+ addr = &pool->base_addr[(i * pool->block_size) +
+ pool->block_offset];
buf_hdr = addr;
pkt_hdr = addr;
/* Skip packet buffers which cross huge page boundaries. Some
@@ -262,7 +307,7 @@ static void init_buffers(pool_t *pool)
~(page_size - 1));
if (last_page != first_page) {
skipped_blocks++;
- continue;
+ skip = 1;
}
}
if (pool->uarea_size)
@@ -307,8 +352,10 @@ static void init_buffers(pool_t *pool)
pool->tailroom];
/* Store buffer index into the global pool */
- ring_enq(ring, mask, i);
+ if (!skip)
+ ring_enq(ring, mask, i);
}
+ pool->skipped_blocks = skipped_blocks;
}
static bool shm_is_from_huge_pages(odp_shm_t shm)
@@ -337,6 +384,7 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
uint32_t max_len;
uint32_t ring_size;
uint32_t num_extra = 0;
+ uint32_t extra_shm_flags = 0;
int name_len;
const char *postfix = "_uarea";
char uarea_name[ODP_POOL_NAME_LEN + sizeof(postfix)];
@@ -346,8 +394,8 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
if (params->type == ODP_POOL_BUFFER)
align = params->buf.align;
- if (align < ODP_CONFIG_BUFFER_ALIGN_MIN)
- align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+ if (align < BUFFER_ALIGN_MIN)
+ align = BUFFER_ALIGN_MIN;
/* Validate requested buffer alignment */
if (align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
@@ -415,7 +463,7 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
if (uarea_size)
uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
- pool = reserve_pool();
+ pool = reserve_pool(shmflags);
if (pool == NULL) {
ODP_ERR("No more free pools");
@@ -436,11 +484,39 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
pool->params = *params;
- hdr_size = sizeof(odp_packet_hdr_t);
- hdr_size = ROUNDUP_CACHE_LINE(hdr_size);
+ if (params->type == ODP_POOL_PACKET) {
+ hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
+ block_size = ROUNDUP_CACHE_LINE(hdr_size + align + headroom +
+ seg_len + tailroom);
+ } else {
+ /* Header size is rounded up to cache line size, so the
+ * following data can be cache line aligned without extra
+ * padding. */
+ uint32_t align_pad = (align > ODP_CACHE_LINE_SIZE) ?
+ align - ODP_CACHE_LINE_SIZE : 0;
+
+ hdr_size = (params->type == ODP_POOL_BUFFER) ?
+ ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t)) :
+ ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
+
+ block_size = ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len);
+ }
- block_size = ROUNDUP_CACHE_LINE(hdr_size + align + headroom + seg_len +
- tailroom);
+ /* Calculate extra space required for storing DPDK objects and mbuf
+ * headers. NOP if zero-copy is disabled. */
+ pool->block_offset = 0;
+ if (params->type == ODP_POOL_PACKET) {
+ uint32_t dpdk_obj_size;
+
+ dpdk_obj_size = _odp_dpdk_pool_obj_size(pool, block_size);
+ if (!dpdk_obj_size) {
+ ODP_ERR("Calculating DPDK mempool obj size failed\n");
+ return ODP_POOL_INVALID;
+ }
+ if (dpdk_obj_size != block_size)
+ extra_shm_flags |= _ODP_ISHM_USE_HP;
+ block_size = dpdk_obj_size;
+ }
/* Allocate extra memory for skipping packet buffers which cross huge
* page boundaries. */
@@ -451,10 +527,11 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
FIRST_HP_SIZE - 1) / FIRST_HP_SIZE);
}
- if (num <= RING_SIZE_MIN)
+ /* Ring size must be larger than the number of items stored */
+ if (num + 1 <= RING_SIZE_MIN)
ring_size = RING_SIZE_MIN;
else
- ring_size = ROUNDUP_POWER2_U32(num);
+ ring_size = ROUNDUP_POWER2_U32(num + 1);
pool->ring_mask = ring_size - 1;
pool->num = num;
@@ -471,8 +548,8 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
pool->ext_desc = NULL;
pool->ext_destroy = NULL;
- shm = odp_shm_reserve(pool->name, pool->shm_size,
- ODP_PAGE_SIZE, shmflags);
+ shm = _odp_shm_reserve(pool->name, pool->shm_size,
+ ODP_PAGE_SIZE, shmflags, extra_shm_flags);
pool->shm = shm;
@@ -503,6 +580,12 @@ static odp_pool_t pool_create(const char *name, odp_pool_param_t *params,
ring_init(&pool->ring->hdr);
init_buffers(pool);
+ /* Create zero-copy DPDK memory pool. NOP if zero-copy is disabled. */
+ if (params->type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
+ ODP_ERR("Creating DPDK packet pool failed\n");
+ goto error;
+ }
+
return pool->pool_hdl;
error:
@@ -528,57 +611,74 @@ static int check_params(odp_pool_param_t *params)
switch (params->type) {
case ODP_POOL_BUFFER:
if (params->buf.num > capa.buf.max_num) {
- ODP_DBG("buf.num too large %u\n", params->buf.num);
+ ODP_ERR("buf.num too large %u\n", params->buf.num);
return -1;
}
if (params->buf.size > capa.buf.max_size) {
- ODP_DBG("buf.size too large %u\n", params->buf.size);
+ ODP_ERR("buf.size too large %u\n", params->buf.size);
return -1;
}
if (params->buf.align > capa.buf.max_align) {
- ODP_DBG("buf.align too large %u\n", params->buf.align);
+ ODP_ERR("buf.align too large %u\n", params->buf.align);
return -1;
}
break;
case ODP_POOL_PACKET:
+ if (params->pkt.num > capa.pkt.max_num) {
+ ODP_ERR("pkt.num too large %u\n", params->pkt.num);
+ return -1;
+ }
+
+ if (params->pkt.max_num > capa.pkt.max_num) {
+ ODP_ERR("pkt.max_num too large %u\n",
+ params->pkt.max_num);
+ return -1;
+ }
+
if (params->pkt.len > capa.pkt.max_len) {
- ODP_DBG("pkt.len too large %u\n", params->pkt.len);
+ ODP_ERR("pkt.len too large %u\n", params->pkt.len);
return -1;
}
if (params->pkt.max_len > capa.pkt.max_len) {
- ODP_DBG("pkt.max_len too large %u\n",
+ ODP_ERR("pkt.max_len too large %u\n",
params->pkt.max_len);
return -1;
}
if (params->pkt.seg_len > capa.pkt.max_seg_len) {
- ODP_DBG("pkt.seg_len too large %u\n",
+ ODP_ERR("pkt.seg_len too large %u\n",
params->pkt.seg_len);
return -1;
}
if (params->pkt.uarea_size > capa.pkt.max_uarea_size) {
- ODP_DBG("pkt.uarea_size too large %u\n",
+ ODP_ERR("pkt.uarea_size too large %u\n",
params->pkt.uarea_size);
return -1;
}
+ if (params->pkt.headroom > capa.pkt.max_headroom) {
+ ODP_ERR("pkt.headroom too large %u\n",
+ params->pkt.headroom);
+ return -1;
+ }
+
break;
case ODP_POOL_TIMEOUT:
if (params->tmo.num > capa.tmo.max_num) {
- ODP_DBG("tmo.num too large %u\n", params->tmo.num);
+ ODP_ERR("tmo.num too large %u\n", params->tmo.num);
return -1;
}
break;
default:
- ODP_DBG("bad pool type %i\n", params->type);
+ ODP_ERR("bad pool type %i\n", params->type);
return -1;
}
@@ -594,6 +694,8 @@ odp_pool_t odp_pool_create(const char *name, odp_pool_param_t *params)
if (params->type == ODP_POOL_PACKET)
shm_flags = ODP_SHM_PROC;
+ if (odp_global_ro.shm_single_va)
+ shm_flags |= ODP_SHM_SINGLE_VA;
return pool_create(name, params, shm_flags);
}
@@ -904,7 +1006,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Packet pools */
capa->pkt.max_pools = ODP_CONFIG_POOLS;
capa->pkt.max_len = CONFIG_PACKET_MAX_LEN;
- capa->pkt.max_num = CONFIG_POOL_MAX_NUM;
+ capa->pkt.max_num = pool_tbl->config.pkt_max_num;
capa->pkt.min_headroom = CONFIG_PACKET_HEADROOM;
capa->pkt.max_headroom = CONFIG_PACKET_HEADROOM;
capa->pkt.min_tailroom = CONFIG_PACKET_TAILROOM;
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
index 89eed3c0d..37a2fad12 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -15,6 +15,7 @@
#include <odp_buffer_internal.h>
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
+#include <odp_timer_internal.h>
#include <odp/api/shared_memory.h>
#include <odp/api/schedule.h>
#include <odp_schedule_if.h>
@@ -27,8 +28,7 @@
#include <odp/api/traffic_mngr.h>
#include <odp_libconfig_internal.h>
#include <odp/api/plat/queue_inline_types.h>
-
-#define NUM_INTERNAL_QUEUES 64
+#include <odp_global_data.h>
#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->s.lock))
@@ -38,7 +38,7 @@
#include <string.h>
#include <inttypes.h>
-#define MIN_QUEUE_SIZE 8
+#define MIN_QUEUE_SIZE 32
#define MAX_QUEUE_SIZE (1 * 1024 * 1024)
static int queue_init(queue_entry_t *queue, const char *name,
@@ -47,12 +47,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue_global_t *queue_glb;
extern _odp_queue_inline_offset_t _odp_queue_inline_offset;
-static inline queue_entry_t *qentry_from_handle(odp_queue_t handle)
-{
- return (queue_entry_t *)(uintptr_t)handle;
-}
-
-static int queue_capa(odp_queue_capability_t *capa, int sched)
+static int queue_capa(odp_queue_capability_t *capa, int sched ODP_UNUSED)
{
memset(capa, 0, sizeof(odp_queue_capability_t));
@@ -62,6 +57,7 @@ static int queue_capa(odp_queue_capability_t *capa, int sched)
capa->plain.max_size = queue_glb->config.max_queue_size;
capa->plain.lockfree.max_num = queue_glb->queue_lf_num;
capa->plain.lockfree.max_size = queue_glb->queue_lf_size;
+#if ODP_DEPRECATED_API
capa->sched.max_num = capa->max_queues;
capa->sched.max_size = queue_glb->config.max_queue_size;
@@ -70,6 +66,7 @@ static int queue_capa(odp_queue_capability_t *capa, int sched)
capa->max_sched_groups = sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
}
+#endif
return 0;
}
@@ -139,13 +136,13 @@ static int queue_init_global(void)
shm = odp_shm_reserve("_odp_queue_gbl",
sizeof(queue_global_t),
- sizeof(queue_entry_t), 0);
+ sizeof(queue_entry_t),
+ 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
queue_glb = odp_shm_addr(shm);
- if (queue_glb == NULL)
- return -1;
-
memset(queue_glb, 0, sizeof(queue_global_t));
for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
@@ -166,7 +163,8 @@ static int queue_init_global(void)
(uint64_t)queue_glb->config.max_queue_size;
shm = odp_shm_reserve("_odp_queue_rings", mem_size,
- ODP_CACHE_LINE_SIZE, 0);
+ ODP_CACHE_LINE_SIZE,
+ 0);
if (shm == ODP_SHM_INVALID) {
odp_shm_free(queue_glb->queue_gbl_shm);
@@ -272,21 +270,31 @@ static odp_queue_t queue_create(const char *name,
uint32_t i;
queue_entry_t *queue;
void *queue_lf;
- odp_queue_t handle = ODP_QUEUE_INVALID;
- odp_queue_type_t type = ODP_QUEUE_TYPE_PLAIN;
+ odp_queue_type_t type;
odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
if (param == NULL) {
odp_queue_param_init(&default_param);
param = &default_param;
}
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
if (param->nonblocking == ODP_BLOCKING) {
if (param->size > queue_glb->config.max_queue_size)
return ODP_QUEUE_INVALID;
} else if (param->nonblocking == ODP_NONBLOCKING_LF) {
/* Only plain type lock-free queues supported */
- if (param->type != ODP_QUEUE_TYPE_PLAIN)
+ if (type != ODP_QUEUE_TYPE_PLAIN)
return ODP_QUEUE_INVALID;
if (param->size > queue_glb->queue_lf_size)
return ODP_QUEUE_INVALID;
@@ -310,9 +318,9 @@ static odp_queue_t queue_create(const char *name,
if (!queue->s.spsc &&
param->nonblocking == ODP_NONBLOCKING_LF) {
- queue_lf_func_t *lf_func;
+ queue_lf_func_t *lf_fn;
- lf_func = &queue_glb->queue_lf_func;
+ lf_fn = &queue_glb->queue_lf_func;
queue_lf = queue_lf_create(queue);
@@ -322,14 +330,13 @@ static odp_queue_t queue_create(const char *name,
}
queue->s.queue_lf = queue_lf;
- queue->s.enqueue = lf_func->enq;
- queue->s.enqueue_multi = lf_func->enq_multi;
- queue->s.dequeue = lf_func->deq;
- queue->s.dequeue_multi = lf_func->deq_multi;
+ queue->s.enqueue = lf_fn->enq;
+ queue->s.enqueue_multi = lf_fn->enq_multi;
+ queue->s.dequeue = lf_fn->deq;
+ queue->s.dequeue_multi = lf_fn->deq_multi;
+ queue->s.orig_dequeue_multi = lf_fn->deq_multi;
}
- type = queue->s.type;
-
if (type == ODP_QUEUE_TYPE_SCHED)
queue->s.status = QUEUE_STATUS_NOTSCHED;
else
@@ -357,19 +364,6 @@ static odp_queue_t queue_create(const char *name,
return handle;
}
-void sched_queue_destroy_finalize(uint32_t queue_index)
-{
- queue_entry_t *queue = qentry_from_index(queue_index);
-
- LOCK(queue);
-
- if (queue->s.status == QUEUE_STATUS_DESTROYED) {
- queue->s.status = QUEUE_STATUS_FREE;
- sched_fn->destroy_queue(queue_index);
- }
- UNLOCK(queue);
-}
-
void sched_queue_set_status(uint32_t queue_index, int status)
{
queue_entry_t *queue = qentry_from_index(queue_index);
@@ -404,8 +398,10 @@ static int queue_destroy(odp_queue_t handle)
if (queue->s.spsc)
empty = ring_spsc_is_empty(&queue->s.ring_spsc);
- else
+ else if (queue->s.type == ODP_QUEUE_TYPE_SCHED)
empty = ring_st_is_empty(&queue->s.ring_st);
+ else
+ empty = ring_mpmc_is_empty(&queue->s.ring_mpmc);
if (!empty) {
UNLOCK(queue);
@@ -489,137 +485,80 @@ static inline void buffer_index_to_buf(odp_buffer_hdr_t *buf_hdr[],
}
}
-static inline int enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
- int num)
+static inline int _plain_queue_enq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- int sched = 0;
- int ret;
queue_entry_t *queue;
- int num_enq;
- ring_st_t *ring_st;
+ int ret, num_enq;
+ ring_mpmc_t *ring_mpmc;
uint32_t buf_idx[num];
- queue = q_int;
- ring_st = &queue->s.ring_st;
+ queue = qentry_from_handle(handle);
+ ring_mpmc = &queue->s.ring_mpmc;
- if (sched_fn->ord_enq_multi(q_int, (void **)buf_hdr, num, &ret))
+ if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
return ret;
buffer_index_from_buf(buf_idx, buf_hdr, num);
- LOCK(queue);
-
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- UNLOCK(queue);
- ODP_ERR("Bad queue status\n");
- return -1;
- }
-
- num_enq = ring_st_enq_multi(ring_st, buf_idx, num);
-
- if (odp_unlikely(num_enq == 0)) {
- UNLOCK(queue);
- return 0;
- }
-
- if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
- queue->s.status = QUEUE_STATUS_SCHED;
- sched = 1;
- }
-
- UNLOCK(queue);
-
- /* Add queue to scheduling */
- if (sched && sched_fn->sched_queue(queue->s.index))
- ODP_ABORT("schedule_queue failed\n");
+ num_enq = ring_mpmc_enq_multi(ring_mpmc, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, num);
return num_enq;
}
-static int queue_int_enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
- int num)
-{
- return enq_multi(q_int, buf_hdr, num);
-}
-
-static int queue_int_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
+static inline int _plain_queue_deq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- int ret;
-
- ret = enq_multi(q_int, &buf_hdr, 1);
+ int num_deq;
+ queue_entry_t *queue;
+ ring_mpmc_t *ring_mpmc;
+ uint32_t buf_idx[num];
- if (ret == 1)
- return 0;
- else
- return -1;
-}
+ queue = qentry_from_handle(handle);
+ ring_mpmc = &queue->s.ring_mpmc;
-static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
-{
- queue_entry_t *queue = qentry_from_handle(handle);
+ num_deq = ring_mpmc_deq_multi(ring_mpmc, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, num);
- if (odp_unlikely(num == 0))
+ if (num_deq == 0)
return 0;
- if (num > QUEUE_MULTI_MAX)
- num = QUEUE_MULTI_MAX;
+ buffer_index_to_buf(buf_hdr, buf_idx, num_deq);
- return queue->s.enqueue_multi(queue,
- (odp_buffer_hdr_t **)(uintptr_t)ev, num);
+ return num_deq;
}
-static int queue_enq(odp_queue_t handle, odp_event_t ev)
+static int plain_queue_enq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- queue_entry_t *queue = qentry_from_handle(handle);
-
- return queue->s.enqueue(queue,
- (odp_buffer_hdr_t *)(uintptr_t)ev);
+ return _plain_queue_enq_multi(handle, buf_hdr, num);
}
-static inline int plain_queue_deq(queue_entry_t *queue,
- odp_buffer_hdr_t *buf_hdr[], int num)
+static int plain_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
{
- int num_deq;
- ring_st_t *ring_st;
- uint32_t buf_idx[num];
-
- ring_st = &queue->s.ring_st;
-
- LOCK(queue);
-
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
- /* Bad queue, or queue has been destroyed. */
- UNLOCK(queue);
- return -1;
- }
-
- num_deq = ring_st_deq_multi(ring_st, buf_idx, num);
+ int ret;
- UNLOCK(queue);
+ ret = _plain_queue_enq_multi(handle, &buf_hdr, 1);
- if (num_deq == 0)
+ if (ret == 1)
return 0;
-
- buffer_index_to_buf(buf_hdr, buf_idx, num_deq);
-
- return num_deq;
+ else
+ return -1;
}
-static int queue_int_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
- int num)
+static int plain_queue_deq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- queue_entry_t *queue = q_int;
-
- return plain_queue_deq(queue, buf_hdr, num);
+ return _plain_queue_deq_multi(handle, buf_hdr, num);
}
-static odp_buffer_hdr_t *queue_int_deq(void *q_int)
+static odp_buffer_hdr_t *plain_queue_deq(odp_queue_t handle)
{
- queue_entry_t *queue = q_int;
odp_buffer_hdr_t *buf_hdr = NULL;
int ret;
- ret = plain_queue_deq(queue, &buf_hdr, 1);
+ ret = _plain_queue_deq_multi(handle, &buf_hdr, 1);
if (ret == 1)
return buf_hdr;
@@ -627,87 +566,46 @@ static odp_buffer_hdr_t *queue_int_deq(void *q_int)
return NULL;
}
-static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
+static int error_enqueue(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
{
- queue_entry_t *queue = qentry_from_handle(handle);
+ (void)buf_hdr;
- if (num > QUEUE_MULTI_MAX)
- num = QUEUE_MULTI_MAX;
+ ODP_ERR("Enqueue not supported (0x%" PRIx64 ")\n",
+ odp_queue_to_u64(handle));
- return queue->s.dequeue_multi(queue,
- (odp_buffer_hdr_t **)ev, num);
+ return -1;
}
-static odp_event_t queue_deq(odp_queue_t handle)
+static int error_enqueue_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
- queue_entry_t *queue = qentry_from_handle(handle);
+ (void)buf_hdr;
+ (void)num;
+
+ ODP_ERR("Enqueue multi not supported (0x%" PRIx64 ")\n",
+ odp_queue_to_u64(handle));
- return (odp_event_t)queue->s.dequeue(queue);
+ return -1;
}
-static int queue_init(queue_entry_t *queue, const char *name,
- const odp_queue_param_t *param)
+static odp_buffer_hdr_t *error_dequeue(odp_queue_t handle)
{
- uint64_t offset;
- uint32_t queue_size;
- int spsc;
+ ODP_ERR("Dequeue not supported (0x%" PRIx64 ")\n",
+ odp_queue_to_u64(handle));
- if (name == NULL) {
- queue->s.name[0] = 0;
- } else {
- strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
- queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
- }
- memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
- if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
- return -1;
-
- if (param->type == ODP_QUEUE_TYPE_SCHED)
- queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
-
- queue->s.type = queue->s.param.type;
-
- queue->s.pktin = PKTIN_INVALID;
- queue->s.pktout = PKTOUT_INVALID;
-
- /* Use default size for all small queues to quarantee performance
- * level. */
- queue_size = queue_glb->config.default_queue_size;
- if (param->size > queue_glb->config.default_queue_size)
- queue_size = param->size;
-
- /* Round up if not already a power of two */
- queue_size = ROUNDUP_POWER2_U32(queue_size);
-
- if (queue_size > queue_glb->config.max_queue_size) {
- ODP_ERR("Too large queue size %u\n", queue_size);
- return -1;
- }
-
- offset = queue->s.index * (uint64_t)queue_glb->config.max_queue_size;
-
- /* Single-producer / single-consumer plain queue has simple and
- * lock-free implementation */
- spsc = (param->type == ODP_QUEUE_TYPE_PLAIN) &&
- (param->enq_mode == ODP_QUEUE_OP_MT_UNSAFE) &&
- (param->deq_mode == ODP_QUEUE_OP_MT_UNSAFE);
-
- queue->s.spsc = spsc;
- queue->s.queue_lf = NULL;
+ return NULL;
+}
- if (spsc) {
- queue_spsc_init(queue, queue_size);
- } else {
- queue->s.enqueue = queue_int_enq;
- queue->s.dequeue = queue_int_deq;
- queue->s.enqueue_multi = queue_int_enq_multi;
- queue->s.dequeue_multi = queue_int_deq_multi;
+static int error_dequeue_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ (void)buf_hdr;
+ (void)num;
- ring_st_init(&queue->s.ring_st, &queue_glb->ring_data[offset],
- queue_size);
- }
+ ODP_ERR("Dequeue multi not supported (0x%" PRIx64 ")\n",
+ odp_queue_to_u64(handle));
- return 0;
+ return -1;
}
static void queue_param_init(odp_queue_param_t *params)
@@ -717,7 +615,7 @@ static void queue_param_init(odp_queue_param_t *params)
params->enq_mode = ODP_QUEUE_OP_MT;
params->deq_mode = ODP_QUEUE_OP_MT;
params->nonblocking = ODP_BLOCKING;
- params->sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ params->sched.prio = odp_schedule_default_prio();
params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
params->sched.group = ODP_SCHED_GROUP_ALL;
}
@@ -736,7 +634,7 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
queue_id = queue_to_index(handle);
if (odp_unlikely(queue_id >= ODP_CONFIG_QUEUES)) {
- ODP_ERR("Invalid queue handle:%" PRIu64 "\n",
+ ODP_ERR("Invalid queue handle: 0x%" PRIx64 "\n",
odp_queue_to_u64(handle));
return -1;
}
@@ -761,45 +659,87 @@ static int queue_info(odp_queue_t handle, odp_queue_info_t *info)
return 0;
}
+static inline int _sched_queue_enq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ int sched = 0;
+ int ret;
+ queue_entry_t *queue;
+ int num_enq;
+ ring_st_t *ring_st;
+ uint32_t buf_idx[num];
+
+ queue = qentry_from_handle(handle);
+ ring_st = &queue->s.ring_st;
+
+ if (sched_fn->ord_enq_multi(handle, (void **)buf_hdr, num, &ret))
+ return ret;
+
+ buffer_index_from_buf(buf_idx, buf_hdr, num);
+
+ LOCK(queue);
+
+ num_enq = ring_st_enq_multi(ring_st, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, num);
+
+ if (odp_unlikely(num_enq == 0)) {
+ UNLOCK(queue);
+ return 0;
+ }
+
+ if (queue->s.status == QUEUE_STATUS_NOTSCHED) {
+ queue->s.status = QUEUE_STATUS_SCHED;
+ sched = 1;
+ }
+
+ UNLOCK(queue);
+
+ /* Add queue to scheduling */
+ if (sched && sched_fn->sched_queue(queue->s.index))
+ ODP_ABORT("schedule_queue failed\n");
+
+ return num_enq;
+}
+
int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
int update_status)
{
- int num_deq;
+ int num_deq, status;
ring_st_t *ring_st;
queue_entry_t *queue = qentry_from_index(queue_index);
- int status_sync = sched_fn->status_sync;
uint32_t buf_idx[max_num];
ring_st = &queue->s.ring_st;
LOCK(queue);
- if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
+ status = queue->s.status;
+
+ if (odp_unlikely(status < QUEUE_STATUS_READY)) {
/* Bad queue, or queue has been destroyed.
- * Scheduler finalizes queue destroy after this. */
+ * Inform scheduler about a destroyed queue. */
+ if (queue->s.status == QUEUE_STATUS_DESTROYED) {
+ queue->s.status = QUEUE_STATUS_FREE;
+ sched_fn->destroy_queue(queue_index);
+ }
+
UNLOCK(queue);
return -1;
}
- num_deq = ring_st_deq_multi(ring_st, buf_idx, max_num);
+ num_deq = ring_st_deq_multi(ring_st, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, max_num);
if (num_deq == 0) {
/* Already empty queue */
- if (update_status && queue->s.status == QUEUE_STATUS_SCHED) {
+ if (update_status && status == QUEUE_STATUS_SCHED)
queue->s.status = QUEUE_STATUS_NOTSCHED;
- if (status_sync)
- sched_fn->unsched_queue(queue->s.index);
- }
-
UNLOCK(queue);
return 0;
}
- if (status_sync && queue->s.type == ODP_QUEUE_TYPE_SCHED)
- sched_fn->save_context(queue->s.index);
-
UNLOCK(queue);
buffer_index_to_buf((odp_buffer_hdr_t **)ev, buf_idx, num_deq);
@@ -807,6 +747,24 @@ int sched_queue_deq(uint32_t queue_index, odp_event_t ev[], int max_num,
return num_deq;
}
+static int sched_queue_enq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
+{
+ return _sched_queue_enq_multi(handle, buf_hdr, num);
+}
+
+static int sched_queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
+{
+ int ret;
+
+ ret = _sched_queue_enq_multi(handle, &buf_hdr, 1);
+
+ if (ret == 1)
+ return 0;
+ else
+ return -1;
+}
+
int sched_queue_empty(uint32_t queue_index)
{
queue_entry_t *queue = qentry_from_index(queue_index);
@@ -833,48 +791,137 @@ int sched_queue_empty(uint32_t queue_index)
return ret;
}
+static int queue_init(queue_entry_t *queue, const char *name,
+ const odp_queue_param_t *param)
+{
+ uint64_t offset;
+ uint32_t queue_size;
+ odp_queue_type_t queue_type;
+ int spsc;
+
+ queue_type = param->type;
+
+ if (name == NULL) {
+ queue->s.name[0] = 0;
+ } else {
+ strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
+ queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
+ }
+ memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
+ if (queue->s.param.sched.lock_count > sched_fn->max_ordered_locks())
+ return -1;
+
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
+
+ queue->s.type = queue_type;
+ odp_atomic_init_u64(&queue->s.num_timers, 0);
+
+ queue->s.pktin = PKTIN_INVALID;
+ queue->s.pktout = PKTOUT_INVALID;
+
+ queue_size = param->size;
+ if (queue_size == 0)
+ queue_size = queue_glb->config.default_queue_size;
+
+ if (queue_size < MIN_QUEUE_SIZE)
+ queue_size = MIN_QUEUE_SIZE;
+
+ /* Round up if not already a power of two */
+ queue_size = ROUNDUP_POWER2_U32(queue_size);
+
+ if (queue_size > queue_glb->config.max_queue_size) {
+ ODP_ERR("Too large queue size %u\n", queue_size);
+ return -1;
+ }
+
+ offset = queue->s.index * (uint64_t)queue_glb->config.max_queue_size;
+
+ /* Single-producer / single-consumer plain queue has simple and
+ * lock-free implementation */
+ spsc = (queue_type == ODP_QUEUE_TYPE_PLAIN) &&
+ (param->enq_mode == ODP_QUEUE_OP_MT_UNSAFE) &&
+ (param->deq_mode == ODP_QUEUE_OP_MT_UNSAFE);
+
+ queue->s.spsc = spsc;
+ queue->s.queue_lf = NULL;
+
+ /* Default to error functions */
+ queue->s.enqueue = error_enqueue;
+ queue->s.enqueue_multi = error_enqueue_multi;
+ queue->s.dequeue = error_dequeue;
+ queue->s.dequeue_multi = error_dequeue_multi;
+ queue->s.orig_dequeue_multi = error_dequeue_multi;
+
+ if (spsc) {
+ queue_spsc_init(queue, queue_size);
+ } else {
+ if (queue_type == ODP_QUEUE_TYPE_PLAIN) {
+ queue->s.enqueue = plain_queue_enq;
+ queue->s.enqueue_multi = plain_queue_enq_multi;
+ queue->s.dequeue = plain_queue_deq;
+ queue->s.dequeue_multi = plain_queue_deq_multi;
+ queue->s.orig_dequeue_multi = plain_queue_deq_multi;
+
+ queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_mask = queue_size - 1;
+ ring_mpmc_init(&queue->s.ring_mpmc);
+
+ } else {
+ queue->s.enqueue = sched_queue_enq;
+ queue->s.enqueue_multi = sched_queue_enq_multi;
+
+ queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_mask = queue_size - 1;
+ ring_st_init(&queue->s.ring_st);
+ }
+ }
+
+ return 0;
+}
+
static uint64_t queue_to_u64(odp_queue_t hdl)
{
return _odp_pri(hdl);
}
-static odp_pktout_queue_t queue_get_pktout(void *q_int)
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *qentry = qentry_from_handle(handle);
return qentry->s.pktout;
}
-static void queue_set_pktout(void *q_int, odp_pktio_t pktio, int index)
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *qentry = qentry_from_handle(handle);
qentry->s.pktout.pktio = pktio;
qentry->s.pktout.index = index;
}
-static odp_pktin_queue_t queue_get_pktin(void *q_int)
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *qentry = qentry_from_handle(handle);
return qentry->s.pktin;
}
-static void queue_set_pktin(void *q_int, odp_pktio_t pktio, int index)
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *qentry = qentry_from_handle(handle);
qentry->s.pktin.pktio = pktio;
qentry->s.pktin.index = index;
}
-static void queue_set_enq_deq_func(void *q_int,
+static void queue_set_enq_deq_func(odp_queue_t handle,
queue_enq_fn_t enq,
queue_enq_multi_fn_t enq_multi,
queue_deq_fn_t deq,
queue_deq_multi_fn_t deq_multi)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *qentry = qentry_from_handle(handle);
if (enq)
qentry->s.enqueue = enq;
@@ -889,29 +936,91 @@ static void queue_set_enq_deq_func(void *q_int,
qentry->s.dequeue_multi = deq_multi;
}
-static void *queue_from_ext(odp_queue_t handle)
+static int queue_orig_multi(odp_queue_t handle,
+ odp_buffer_hdr_t **buf_hdr, int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->s.orig_dequeue_multi(handle, buf_hdr, num);
+}
+
+static int queue_api_enq_multi(odp_queue_t handle,
+ const odp_event_t ev[], int num)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ return queue->s.enqueue_multi(handle,
+ (odp_buffer_hdr_t **)(uintptr_t)ev, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_inc_u64(&queue->s.num_timers);
+}
+
+static void queue_timer_rem(odp_queue_t handle)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ odp_atomic_dec_u64(&queue->s.num_timers);
+}
+
+static int queue_api_enq(odp_queue_t handle, odp_event_t ev)
+{
+ queue_entry_t *queue = qentry_from_handle(handle);
+
+ return queue->s.enqueue(handle,
+ (odp_buffer_hdr_t *)(uintptr_t)ev);
+}
+
+static int queue_api_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
{
- return qentry_from_handle(handle);
+ queue_entry_t *queue = qentry_from_handle(handle);
+ int ret;
+
+ if (num > QUEUE_MULTI_MAX)
+ num = QUEUE_MULTI_MAX;
+
+ ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->s.num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
}
-static odp_queue_t queue_to_ext(void *q_int)
+static odp_event_t queue_api_deq(odp_queue_t handle)
{
- queue_entry_t *qentry = q_int;
+ queue_entry_t *queue = qentry_from_handle(handle);
+ odp_event_t ev = (odp_event_t)queue->s.dequeue(handle);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->s.num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
- return qentry->s.handle;
+ return ev;
}
/* API functions */
-queue_api_t queue_basic_api = {
+_odp_queue_api_fn_t queue_basic_api = {
.queue_create = queue_create,
.queue_destroy = queue_destroy,
.queue_lookup = queue_lookup,
.queue_capability = queue_capability,
.queue_context_set = queue_context_set,
- .queue_enq = queue_enq,
- .queue_enq_multi = queue_enq_multi,
- .queue_deq = queue_deq,
- .queue_deq_multi = queue_deq_multi,
+ .queue_enq = queue_api_enq,
+ .queue_enq_multi = queue_api_enq_multi,
+ .queue_deq = queue_api_deq,
+ .queue_deq_multi = queue_api_deq_multi,
.queue_type = queue_type,
.queue_sched_type = queue_sched_type,
.queue_sched_prio = queue_sched_prio,
@@ -928,15 +1037,12 @@ queue_fn_t queue_basic_fn = {
.term_global = queue_term_global,
.init_local = queue_init_local,
.term_local = queue_term_local,
- .from_ext = queue_from_ext,
- .to_ext = queue_to_ext,
- .enq = queue_int_enq,
- .enq_multi = queue_int_enq_multi,
- .deq = queue_int_deq,
- .deq_multi = queue_int_deq_multi,
.get_pktout = queue_get_pktout,
.set_pktout = queue_set_pktout,
.get_pktin = queue_get_pktin,
.set_pktin = queue_set_pktin,
- .set_enq_deq_fn = queue_set_enq_deq_func
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
};
diff --git a/platform/linux-generic/odp_queue_if.c b/platform/linux-generic/odp_queue_if.c
index f3984542a..5ff8bbac5 100644
--- a/platform/linux-generic/odp_queue_if.c
+++ b/platform/linux-generic/odp_queue_if.c
@@ -19,101 +19,81 @@
#include <odp/visibility_begin.h>
_odp_queue_inline_offset_t ODP_ALIGNED_CACHE _odp_queue_inline_offset;
+const _odp_queue_api_fn_t *_odp_queue_api;
#include <odp/visibility_end.h>
-extern const queue_api_t queue_scalable_api;
+extern const _odp_queue_api_fn_t queue_scalable_api;
extern const queue_fn_t queue_scalable_fn;
-extern const queue_api_t queue_basic_api;
+extern const _odp_queue_api_fn_t queue_basic_api;
extern const queue_fn_t queue_basic_fn;
-const queue_api_t *queue_api;
const queue_fn_t *queue_fn;
odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
{
- return queue_api->queue_create(name, param);
+ return _odp_queue_api->queue_create(name, param);
}
int odp_queue_destroy(odp_queue_t queue)
{
- return queue_api->queue_destroy(queue);
+ return _odp_queue_api->queue_destroy(queue);
}
odp_queue_t odp_queue_lookup(const char *name)
{
- return queue_api->queue_lookup(name);
+ return _odp_queue_api->queue_lookup(name);
}
int odp_queue_capability(odp_queue_capability_t *capa)
{
- return queue_api->queue_capability(capa);
+ return _odp_queue_api->queue_capability(capa);
}
int odp_queue_context_set(odp_queue_t queue, void *context, uint32_t len)
{
- return queue_api->queue_context_set(queue, context, len);
-}
-
-int odp_queue_enq(odp_queue_t queue, odp_event_t ev)
-{
- return queue_api->queue_enq(queue, ev);
-}
-
-int odp_queue_enq_multi(odp_queue_t queue, const odp_event_t events[], int num)
-{
- return queue_api->queue_enq_multi(queue, events, num);
-}
-
-odp_event_t odp_queue_deq(odp_queue_t queue)
-{
- return queue_api->queue_deq(queue);
-}
-
-int odp_queue_deq_multi(odp_queue_t queue, odp_event_t events[], int num)
-{
- return queue_api->queue_deq_multi(queue, events, num);
+ return _odp_queue_api->queue_context_set(queue, context, len);
}
odp_queue_type_t odp_queue_type(odp_queue_t queue)
{
- return queue_api->queue_type(queue);
+ return _odp_queue_api->queue_type(queue);
}
odp_schedule_sync_t odp_queue_sched_type(odp_queue_t queue)
{
- return queue_api->queue_sched_type(queue);
+ return _odp_queue_api->queue_sched_type(queue);
}
odp_schedule_prio_t odp_queue_sched_prio(odp_queue_t queue)
{
- return queue_api->queue_sched_prio(queue);
+ return _odp_queue_api->queue_sched_prio(queue);
}
odp_schedule_group_t odp_queue_sched_group(odp_queue_t queue)
{
- return queue_api->queue_sched_group(queue);
+ return _odp_queue_api->queue_sched_group(queue);
}
uint32_t odp_queue_lock_count(odp_queue_t queue)
{
- return queue_api->queue_lock_count(queue);
+ return _odp_queue_api->queue_lock_count(queue);
}
uint64_t odp_queue_to_u64(odp_queue_t hdl)
{
- return queue_api->queue_to_u64(hdl);
+ return _odp_queue_api->queue_to_u64(hdl);
}
void odp_queue_param_init(odp_queue_param_t *param)
{
- return queue_api->queue_param_init(param);
+ return _odp_queue_api->queue_param_init(param);
}
int odp_queue_info(odp_queue_t queue, odp_queue_info_t *info)
{
- return queue_api->queue_info(queue, info);
+ return _odp_queue_api->queue_info(queue, info);
}
int _odp_queue_init_global(void)
@@ -123,14 +103,12 @@ int _odp_queue_init_global(void)
if (sched == NULL || !strcmp(sched, "default"))
sched = ODP_SCHEDULE_DEFAULT;
- if (!strcmp(sched, "basic") ||
- !strcmp(sched, "sp") ||
- !strcmp(sched, "iquery")) {
+ if (!strcmp(sched, "basic") || !strcmp(sched, "sp")) {
queue_fn = &queue_basic_fn;
- queue_api = &queue_basic_api;
+ _odp_queue_api = &queue_basic_api;
} else if (!strcmp(sched, "scalable")) {
queue_fn = &queue_scalable_fn;
- queue_api = &queue_scalable_api;
+ _odp_queue_api = &queue_scalable_api;
} else {
ODP_ABORT("Unknown scheduler specified via ODP_SCHEDULER\n");
return -1;
diff --git a/platform/linux-generic/odp_queue_lf.c b/platform/linux-generic/odp_queue_lf.c
index d12a994be..3e156a086 100644
--- a/platform/linux-generic/odp_queue_lf.c
+++ b/platform/linux-generic/odp_queue_lf.c
@@ -162,7 +162,7 @@ static inline int next_idx(int idx)
return next;
}
-static int queue_lf_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
+static int queue_lf_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
{
queue_entry_t *queue;
queue_lf_t *queue_lf;
@@ -172,7 +172,7 @@ static int queue_lf_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
ring_lf_node_t new_val;
ring_lf_node_t *node;
- queue = q_int;
+ queue = qentry_from_handle(handle);
queue_lf = queue->s.queue_lf;
new_val.s.ptr = (uintptr_t)buf_hdr;
@@ -209,18 +209,18 @@ static int queue_lf_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-static int queue_lf_enq_multi(void *q_int, odp_buffer_hdr_t **buf_hdr,
+static int queue_lf_enq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
int num)
{
(void)num;
- if (queue_lf_enq(q_int, buf_hdr[0]) == 0)
+ if (queue_lf_enq(handle, buf_hdr[0]) == 0)
return 1;
return 0;
}
-static odp_buffer_hdr_t *queue_lf_deq(void *q_int)
+static odp_buffer_hdr_t *queue_lf_deq(odp_queue_t handle)
{
queue_entry_t *queue;
queue_lf_t *queue_lf;
@@ -231,7 +231,7 @@ static odp_buffer_hdr_t *queue_lf_deq(void *q_int)
uint64_t lowest, counter;
odp_buffer_hdr_t *buf_hdr;
- queue = q_int;
+ queue = qentry_from_handle(handle);
queue_lf = queue->s.queue_lf;
new_val.s.counter = 0;
new_val.s.ptr = 0;
@@ -287,14 +287,14 @@ static odp_buffer_hdr_t *queue_lf_deq(void *q_int)
return NULL;
}
-static int queue_lf_deq_multi(void *q_int, odp_buffer_hdr_t **buf_hdr,
+static int queue_lf_deq_multi(odp_queue_t handle, odp_buffer_hdr_t **buf_hdr,
int num)
{
odp_buffer_hdr_t *buf;
(void)num;
- buf = queue_lf_deq(q_int);
+ buf = queue_lf_deq(handle);
if (buf == NULL)
return 0;
@@ -318,8 +318,11 @@ uint32_t queue_lf_init_global(uint32_t *queue_lf_size,
if (!lockfree)
return 0;
- shm = odp_shm_reserve("odp_queues_lf", sizeof(queue_lf_global_t),
- ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve("_odp_queues_lf", sizeof(queue_lf_global_t),
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID)
+ return 0;
queue_lf_glb = odp_shm_addr(shm);
memset(queue_lf_glb, 0, sizeof(queue_lf_global_t));
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c
index 13e93bc19..88abe8c78 100644
--- a/platform/linux-generic/odp_queue_scalable.c
+++ b/platform/linux-generic/odp_queue_scalable.c
@@ -24,15 +24,15 @@
#include <odp_pool_internal.h>
#include <odp_queue_scalable_internal.h>
#include <odp_schedule_if.h>
-#include <odp_ishm_internal.h>
+#include <odp_timer_internal.h>
+#include <odp_shm_internal.h>
#include <odp_ishmpool_internal.h>
#include <odp/api/plat/queue_inline_types.h>
+#include <odp_global_data.h>
#include <string.h>
#include <inttypes.h>
-#define NUM_INTERNAL_QUEUES 64
-
#define MIN(a, b) \
({ \
__typeof__(a) tmp_a = (a); \
@@ -54,12 +54,11 @@ typedef struct queue_table_t {
static queue_table_t *queue_tbl;
static _odp_ishm_pool_t *queue_shm_pool;
-static void *queue_from_ext(odp_queue_t handle);
-static int _queue_enq(void *handle, odp_buffer_hdr_t *buf_hdr);
-static odp_buffer_hdr_t *_queue_deq(void *handle);
-static int _queue_enq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr);
+static odp_buffer_hdr_t *_queue_deq(odp_queue_t handle);
+static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num);
-static int _queue_deq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num);
static queue_entry_t *get_qentry(uint32_t queue_id)
@@ -111,7 +110,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
sched_elem = &queue->s.sched_elem;
ring_size = param->size > 0 ?
- ROUNDUP_POWER2_U32(param->size) : CONFIG_QUEUE_SIZE;
+ ROUNDUP_POWER2_U32(param->size) : CONFIG_SCAL_QUEUE_SIZE;
strncpy(queue->s.name, name ? name : "", ODP_QUEUE_NAME_LEN - 1);
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
@@ -125,10 +124,13 @@ static int queue_init(queue_entry_t *queue, const char *name,
ring[ring_idx] = NULL;
queue->s.type = queue->s.param.type;
+ odp_atomic_init_u64(&queue->s.num_timers, 0);
+
queue->s.enqueue = _queue_enq;
queue->s.dequeue = _queue_deq;
queue->s.enqueue_multi = _queue_enq_multi;
queue->s.dequeue_multi = _queue_deq_multi;
+ queue->s.orig_dequeue_multi = _queue_deq_multi;
queue->s.pktin = PKTIN_INVALID;
sched_elem->node.next = NULL;
@@ -163,6 +165,8 @@ static int queue_init(queue_entry_t *queue, const char *name,
/* Queue initialized successfully, add it to the sched group */
if (queue->s.type == ODP_QUEUE_TYPE_SCHED) {
+ int prio = odp_schedule_max_prio() - param->sched.prio;
+
if (queue->s.param.sched.sync == ODP_SCHED_SYNC_ORDERED) {
sched_elem->rwin =
rwin_alloc(queue_shm_pool,
@@ -173,9 +177,9 @@ static int queue_init(queue_entry_t *queue, const char *name,
}
}
sched_elem->sched_grp = param->sched.group;
- sched_elem->sched_prio = param->sched.prio;
+ sched_elem->sched_prio = prio;
sched_elem->schedq =
- sched_queue_add(param->sched.group, param->sched.prio);
+ sched_queue_add(param->sched.group, prio);
ODP_ASSERT(sched_elem->schedq != NULL);
}
@@ -203,33 +207,29 @@ static int queue_init_global(void)
_odp_queue_inline_offset.context = offsetof(queue_entry_t,
s.param.context);
- /* Attach to the pool if it exists */
- queue_shm_pool = _odp_ishm_pool_lookup("queue_shm_pool");
+ /* Create shared memory pool to allocate shared memory for the
+ * queues. Use the default queue size.
+ */
+ /* Add size of the array holding the queues */
+ pool_size = sizeof(queue_table_t);
+ /* Add storage required for queues */
+ pool_size += (CONFIG_SCAL_QUEUE_SIZE *
+ sizeof(odp_buffer_hdr_t *)) * ODP_CONFIG_QUEUES;
+
+ /* Add the reorder window size */
+ pool_size += sizeof(reorder_window_t) * ODP_CONFIG_QUEUES;
+ /* Choose min_alloc and max_alloc such that buddy allocator is
+ * is selected.
+ */
+ min_alloc = 0;
+ max_alloc = CONFIG_SCAL_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *);
+ queue_shm_pool = _odp_ishm_pool_create("queue_shm_pool",
+ pool_size,
+ min_alloc, max_alloc, 0);
if (queue_shm_pool == NULL) {
- /* Create shared memory pool to allocate shared memory for the
- * queues. Use the default queue size.
- */
- /* Add size of the array holding the queues */
- pool_size = sizeof(queue_table_t);
- /* Add storage required for queues */
- pool_size += (CONFIG_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *)) *
- ODP_CONFIG_QUEUES;
- /* Add the reorder window size */
- pool_size += sizeof(reorder_window_t) * ODP_CONFIG_QUEUES;
- /* Choose min_alloc and max_alloc such that buddy allocator is
- * is selected.
- */
- min_alloc = 0;
- max_alloc = CONFIG_QUEUE_SIZE * sizeof(odp_buffer_hdr_t *);
- queue_shm_pool = _odp_ishm_pool_create("queue_shm_pool",
- pool_size,
- min_alloc, max_alloc,
- _ODP_ISHM_SINGLE_VA);
- if (queue_shm_pool == NULL) {
- ODP_ERR("Failed to allocate shared memory pool for"
- " queues\n");
- goto queue_shm_pool_create_failed;
- }
+ ODP_ERR("Failed to allocate shared memory pool for"
+ " queues\n");
+ goto queue_shm_pool_create_failed;
}
queue_tbl = (queue_table_t *)
@@ -313,13 +313,15 @@ static int queue_capability(odp_queue_capability_t *capa)
/* Reserve some queues for internal use */
capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+#if ODP_DEPRECATED_API
capa->max_ordered_locks = sched_fn->max_ordered_locks();
capa->max_sched_groups = sched_fn->num_grps();
capa->sched_prios = odp_schedule_num_prio();
- capa->plain.max_num = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
- capa->plain.max_size = 0;
capa->sched.max_num = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
capa->sched.max_size = 0;
+#endif
+ capa->plain.max_num = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ capa->plain.max_size = 0;
return 0;
}
@@ -356,15 +358,26 @@ static odp_queue_t queue_create(const char *name,
const odp_queue_param_t *param)
{
int queue_idx;
- odp_queue_t handle = ODP_QUEUE_INVALID;
queue_entry_t *queue;
+ odp_queue_type_t type;
odp_queue_param_t default_param;
+ odp_queue_t handle = ODP_QUEUE_INVALID;
if (param == NULL) {
odp_queue_param_init(&default_param);
param = &default_param;
}
+ type = param->type;
+
+ if (type == ODP_QUEUE_TYPE_SCHED) {
+ if (param->sched.prio < odp_schedule_min_prio() ||
+ param->sched.prio > odp_schedule_max_prio()) {
+ ODP_ERR("Bad queue priority: %i\n", param->sched.prio);
+ return ODP_QUEUE_INVALID;
+ }
+ }
+
for (queue_idx = 0; queue_idx < ODP_CONFIG_QUEUES; queue_idx++) {
queue = &queue_tbl->queue[queue_idx];
@@ -610,7 +623,7 @@ int _odp_queue_enq_sp(sched_elem_t *q,
return actual;
}
-static int _queue_enq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num)
{
int actual;
@@ -646,7 +659,7 @@ static int _queue_enq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
return actual;
}
-static int _queue_enq(void *handle, odp_buffer_hdr_t *buf_hdr)
+static int _queue_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
{
return odp_likely(
_queue_enq_multi(handle, &buf_hdr, 1) == 1) ? 0 : -1;
@@ -666,7 +679,7 @@ static int queue_enq_multi(odp_queue_t handle, const odp_event_t ev[], int num)
for (i = 0; i < num; i++)
buf_hdr[i] = buf_hdl_to_hdr(odp_buffer_from_event(ev[i]));
- return queue->s.enqueue_multi(qentry_to_int(queue), buf_hdr, num);
+ return queue->s.enqueue_multi(handle, buf_hdr, num);
}
static int queue_enq(odp_queue_t handle, odp_event_t ev)
@@ -677,7 +690,7 @@ static int queue_enq(odp_queue_t handle, odp_event_t ev)
queue = qentry_from_ext(handle);
buf_hdr = buf_hdl_to_hdr(odp_buffer_from_event(ev));
- return queue->s.enqueue(qentry_to_int(queue), buf_hdr);
+ return queue->s.enqueue(handle, buf_hdr);
}
/* Single-consumer dequeue. */
@@ -809,7 +822,7 @@ inline int _odp_queue_deq_mc(sched_elem_t *q, odp_event_t *evp, int num)
return ret;
}
-static int _queue_deq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
+static int _queue_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num)
{
sched_elem_t *q;
@@ -820,7 +833,7 @@ static int _queue_deq_multi(void *handle, odp_buffer_hdr_t *buf_hdr[],
return _odp_queue_deq(q, buf_hdr, num);
}
-static odp_buffer_hdr_t *_queue_deq(void *handle)
+static odp_buffer_hdr_t *_queue_deq(odp_queue_t handle)
{
sched_elem_t *q;
odp_buffer_hdr_t *buf_hdr;
@@ -837,20 +850,32 @@ static odp_buffer_hdr_t *_queue_deq(void *handle)
static int queue_deq_multi(odp_queue_t handle, odp_event_t ev[], int num)
{
queue_entry_t *queue;
+ int ret;
if (num > QUEUE_MULTI_MAX)
num = QUEUE_MULTI_MAX;
queue = qentry_from_ext(handle);
- return queue->s.dequeue_multi(qentry_to_int(queue), (odp_buffer_hdr_t **)ev, num);
+
+ ret = queue->s.dequeue_multi(handle, (odp_buffer_hdr_t **)ev, num);
+
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->s.num_timers))
+ timer_run(ret ? 2 : 1);
+
+ return ret;
}
static odp_event_t queue_deq(odp_queue_t handle)
{
- queue_entry_t *queue;
+ queue_entry_t *queue = qentry_from_ext(handle);
+ odp_event_t ev = (odp_event_t)queue->s.dequeue(handle);
- queue = qentry_from_ext(handle);
- return (odp_event_t)queue->s.dequeue(qentry_to_int(queue));
+ if (odp_global_rw->inline_timers &&
+ odp_atomic_load_u64(&queue->s.num_timers))
+ timer_run(ev != ODP_EVENT_INVALID ? 2 : 1);
+
+ return ev;
}
static void queue_param_init(odp_queue_param_t *params)
@@ -860,7 +885,7 @@ static void queue_param_init(odp_queue_param_t *params)
params->enq_mode = ODP_QUEUE_OP_MT;
params->deq_mode = ODP_QUEUE_OP_MT;
params->nonblocking = ODP_BLOCKING;
- params->sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ params->sched.prio = odp_schedule_default_prio();
params->sched.sync = ODP_SCHED_SYNC_PARALLEL;
params->sched.group = ODP_SCHED_GROUP_ALL;
}
@@ -909,29 +934,29 @@ static uint64_t queue_to_u64(odp_queue_t hdl)
return _odp_pri(hdl);
}
-static odp_pktout_queue_t queue_get_pktout(void *handle)
+static odp_pktout_queue_t queue_get_pktout(odp_queue_t handle)
{
return qentry_from_int(handle)->s.pktout;
}
-static void queue_set_pktout(void *handle, odp_pktio_t pktio, int index)
+static void queue_set_pktout(odp_queue_t handle, odp_pktio_t pktio, int index)
{
qentry_from_int(handle)->s.pktout.pktio = pktio;
qentry_from_int(handle)->s.pktout.index = index;
}
-static odp_pktin_queue_t queue_get_pktin(void *handle)
+static odp_pktin_queue_t queue_get_pktin(odp_queue_t handle)
{
return qentry_from_int(handle)->s.pktin;
}
-static void queue_set_pktin(void *handle, odp_pktio_t pktio, int index)
+static void queue_set_pktin(odp_queue_t handle, odp_pktio_t pktio, int index)
{
qentry_from_int(handle)->s.pktin.pktio = pktio;
qentry_from_int(handle)->s.pktin.index = index;
}
-static void queue_set_enq_deq_func(void *handle,
+static void queue_set_enq_deq_func(odp_queue_t handle,
queue_enq_fn_t enq,
queue_enq_multi_fn_t enq_multi,
queue_deq_fn_t deq,
@@ -950,18 +975,29 @@ static void queue_set_enq_deq_func(void *handle,
qentry_from_int(handle)->s.dequeue_multi = deq_multi;
}
-static void *queue_from_ext(odp_queue_t handle)
+static int queue_orig_multi(odp_queue_t handle,
+ odp_buffer_hdr_t **buf_hdr, int num)
+{
+ return qentry_from_int(handle)->s.orig_dequeue_multi(handle,
+ buf_hdr, num);
+}
+
+static void queue_timer_add(odp_queue_t handle)
{
- return (void *)handle;
+ queue_entry_t *queue = qentry_from_ext(handle);
+
+ odp_atomic_inc_u64(&queue->s.num_timers);
}
-static odp_queue_t queue_to_ext(void *handle)
+static void queue_timer_rem(odp_queue_t handle)
{
- return (odp_queue_t)handle;
+ queue_entry_t *queue = qentry_from_ext(handle);
+
+ odp_atomic_dec_u64(&queue->s.num_timers);
}
/* API functions */
-queue_api_t queue_scalable_api = {
+_odp_queue_api_fn_t queue_scalable_api = {
.queue_create = queue_create,
.queue_destroy = queue_destroy,
.queue_lookup = queue_lookup,
@@ -987,15 +1023,12 @@ queue_fn_t queue_scalable_fn = {
.term_global = queue_term_global,
.init_local = queue_init_local,
.term_local = queue_term_local,
- .from_ext = queue_from_ext,
- .to_ext = queue_to_ext,
- .enq = _queue_enq,
- .enq_multi = _queue_enq_multi,
- .deq = _queue_deq,
- .deq_multi = _queue_deq_multi,
.get_pktout = queue_get_pktout,
.set_pktout = queue_set_pktout,
.get_pktin = queue_get_pktin,
.set_pktin = queue_set_pktin,
- .set_enq_deq_fn = queue_set_enq_deq_func
+ .set_enq_deq_fn = queue_set_enq_deq_func,
+ .orig_deq_multi = queue_orig_multi,
+ .timer_add = queue_timer_add,
+ .timer_rem = queue_timer_rem
};
diff --git a/platform/linux-generic/odp_queue_spsc.c b/platform/linux-generic/odp_queue_spsc.c
index 3e42b0383..002561a49 100644
--- a/platform/linux-generic/odp_queue_spsc.c
+++ b/platform/linux-generic/odp_queue_spsc.c
@@ -32,14 +32,14 @@ static inline void buffer_index_to_buf(odp_buffer_hdr_t *buf_hdr[],
}
}
-static inline int spsc_enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
- int num)
+static inline int spsc_enq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
queue_entry_t *queue;
ring_spsc_t *ring_spsc;
uint32_t buf_idx[num];
- queue = q_int;
+ queue = qentry_from_handle(handle);
ring_spsc = &queue->s.ring_spsc;
buffer_index_from_buf(buf_idx, buf_hdr, num);
@@ -49,18 +49,19 @@ static inline int spsc_enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
return -1;
}
- return ring_spsc_enq_multi(ring_spsc, buf_idx, num);
+ return ring_spsc_enq_multi(ring_spsc, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, num);
}
-static inline int spsc_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
- int num)
+static inline int spsc_deq_multi(odp_queue_t handle,
+ odp_buffer_hdr_t *buf_hdr[], int num)
{
queue_entry_t *queue;
int num_deq;
ring_spsc_t *ring_spsc;
uint32_t buf_idx[num];
- queue = q_int;
+ queue = qentry_from_handle(handle);
ring_spsc = &queue->s.ring_spsc;
if (odp_unlikely(queue->s.status < QUEUE_STATUS_READY)) {
@@ -68,7 +69,8 @@ static inline int spsc_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
return -1;
}
- num_deq = ring_spsc_deq_multi(ring_spsc, buf_idx, num);
+ num_deq = ring_spsc_deq_multi(ring_spsc, queue->s.ring_data,
+ queue->s.ring_mask, buf_idx, num);
if (num_deq == 0)
return 0;
@@ -78,17 +80,17 @@ static inline int spsc_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
return num_deq;
}
-static int queue_spsc_enq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_enq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num)
{
- return spsc_enq_multi(q_int, buf_hdr, num);
+ return spsc_enq_multi(handle, buf_hdr, num);
}
-static int queue_spsc_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
+static int queue_spsc_enq(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr)
{
int ret;
- ret = spsc_enq_multi(q_int, &buf_hdr, 1);
+ ret = spsc_enq_multi(handle, &buf_hdr, 1);
if (ret == 1)
return 0;
@@ -96,18 +98,18 @@ static int queue_spsc_enq(void *q_int, odp_buffer_hdr_t *buf_hdr)
return -1;
}
-static int queue_spsc_deq_multi(void *q_int, odp_buffer_hdr_t *buf_hdr[],
+static int queue_spsc_deq_multi(odp_queue_t handle, odp_buffer_hdr_t *buf_hdr[],
int num)
{
- return spsc_deq_multi(q_int, buf_hdr, num);
+ return spsc_deq_multi(handle, buf_hdr, num);
}
-static odp_buffer_hdr_t *queue_spsc_deq(void *q_int)
+static odp_buffer_hdr_t *queue_spsc_deq(odp_queue_t handle)
{
odp_buffer_hdr_t *buf_hdr = NULL;
int ret;
- ret = spsc_deq_multi(q_int, &buf_hdr, 1);
+ ret = spsc_deq_multi(handle, &buf_hdr, 1);
if (ret == 1)
return buf_hdr;
@@ -123,9 +125,11 @@ void queue_spsc_init(queue_entry_t *queue, uint32_t queue_size)
queue->s.dequeue = queue_spsc_deq;
queue->s.enqueue_multi = queue_spsc_enq_multi;
queue->s.dequeue_multi = queue_spsc_deq_multi;
+ queue->s.orig_dequeue_multi = queue_spsc_deq_multi;
offset = queue->s.index * (uint64_t)queue_glb->config.max_queue_size;
- ring_spsc_init(&queue->s.ring_spsc, &queue_glb->ring_data[offset],
- queue_size);
+ queue->s.ring_data = &queue_glb->ring_data[offset];
+ queue->s.ring_mask = queue_size - 1;
+ ring_spsc_init(&queue->s.ring_spsc);
}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index df63da72a..6176c951b 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -28,17 +28,14 @@
#include <odp_timer_internal.h>
#include <odp_queue_basic_internal.h>
#include <odp_libconfig_internal.h>
+#include <odp/api/plat/queue_inlines.h>
+
+/* No synchronization context */
+#define NO_SYNC_CONTEXT ODP_SCHED_SYNC_PARALLEL
/* Number of priority levels */
#define NUM_PRIO 8
-ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (NUM_PRIO - 1),
- "lowest_prio_does_not_match_with_num_prios");
-
-ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
- (ODP_SCHED_PRIO_NORMAL < (NUM_PRIO - 1)),
- "normal_prio_is_not_between_highest_and_lowest");
-
/* Number of scheduling groups */
#define NUM_SCHED_GRPS 32
@@ -53,10 +50,12 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
/* A thread polls a non preferred sched queue every this many polls
* of the prefer queue. */
-#define PREFER_RATIO 64
+#define MAX_PREFER_WEIGHT 127
+#define MIN_PREFER_WEIGHT 1
+#define MAX_PREFER_RATIO (MAX_PREFER_WEIGHT + 1)
/* Spread weight table */
-#define SPREAD_TBL_SIZE ((MAX_SPREAD - 1) * PREFER_RATIO)
+#define SPREAD_TBL_SIZE ((MAX_SPREAD - 1) * MAX_PREFER_RATIO)
/* Maximum number of packet IO interfaces */
#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
@@ -64,17 +63,11 @@ ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
/* Maximum pktin index. Needs to fit into 8 bits. */
#define MAX_PKTIN_INDEX 255
-/* Not a valid index */
-#define NULL_INDEX ((uint32_t)-1)
-
/* Maximum priority queue ring size. A ring must be large enough to store all
* queues in the worst case (all queues are scheduled, have the same priority
* and no spreading). */
#define MAX_RING_SIZE ODP_CONFIG_QUEUES
-/* Priority queue empty, not a valid queue index. */
-#define PRIO_QUEUE_EMPTY NULL_INDEX
-
/* For best performance, the number of queues should be a power of two. */
ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES),
"Number_of_queues_is_not_power_of_two");
@@ -83,6 +76,10 @@ ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES),
ODP_STATIC_ASSERT(CHECK_IS_POWER2(MAX_RING_SIZE),
"Ring_size_is_not_power_of_two");
+/* Thread ID is saved into uint16_t variable */
+ODP_STATIC_ASSERT(ODP_THREAD_COUNT_MAX < (64 * 1024),
+ "Max_64k_threads_supported");
+
/* Mask of queues per priority */
typedef uint8_t pri_mask_t;
@@ -92,10 +89,9 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= MAX_SPREAD,
/* Start of named groups in group mask arrays */
#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-/* Default burst size. Scheduler rounds up number of requested events up to
- * this value. */
-#define BURST_SIZE_MAX CONFIG_BURST_SIZE
-#define BURST_SIZE_MIN 1
+/* Limits for burst size configuration */
+#define BURST_MAX 255
+#define STASH_SIZE CONFIG_BURST_SIZE
/* Ordered stash size */
#define MAX_ORDERED_STASH 512
@@ -103,7 +99,7 @@ ODP_STATIC_ASSERT((8 * sizeof(pri_mask_t)) >= MAX_SPREAD,
/* Storage for stashed enqueue operation arguments */
typedef struct {
odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- queue_entry_t *queue_entry;
+ odp_queue_t queue;
int num;
} ordered_stash_t;
@@ -117,19 +113,24 @@ ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
"Lock_called_values_do_not_fit_in_uint32");
/* Scheduler local data */
-typedef struct {
- int thr;
- uint16_t stash_num;
- uint16_t stash_index;
+typedef struct ODP_ALIGNED_CACHE {
+ uint16_t thr;
+ uint8_t pause;
+ uint8_t sync_ctx;
uint16_t grp_round;
uint16_t spread_round;
- uint32_t stash_qi;
- odp_queue_t stash_queue;
- odp_event_t stash_ev[BURST_SIZE_MAX];
+
+ struct {
+ uint16_t num_ev;
+ uint16_t ev_index;
+ uint32_t qi;
+ odp_queue_t queue;
+ ring_t *ring;
+ odp_event_t ev[STASH_SIZE];
+ } stash;
uint32_t grp_epoch;
uint16_t num_grp;
- uint16_t pause;
uint8_t grp[NUM_SCHED_GRPS];
uint8_t spread_tbl[SPREAD_TBL_SIZE];
uint8_t grp_weight[GRP_WEIGHT_TBL_SIZE];
@@ -171,35 +172,23 @@ typedef struct ODP_ALIGNED_CACHE {
} order_context_t;
typedef struct {
- pri_mask_t pri_mask[NUM_PRIO];
- odp_spinlock_t mask_lock;
-
- prio_queue_t prio_q[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
-
- odp_shm_t shm;
-
struct {
+ uint8_t burst_default[NUM_PRIO];
+ uint8_t burst_max[NUM_PRIO];
uint8_t num_spread;
- uint8_t burst_hi;
- uint8_t burst_low;
+ uint8_t prefer_ratio;
} config;
- uint32_t pri_count[NUM_PRIO][MAX_SPREAD];
-
- odp_thrmask_t mask_all;
- odp_spinlock_t grp_lock;
- odp_atomic_u32_t grp_epoch;
- uint32_t ring_mask;
uint16_t max_spread;
-
- struct {
- char name[ODP_SCHED_GROUP_NAME_LEN];
- odp_thrmask_t mask;
- int allocated;
- } sched_grp[NUM_SCHED_GRPS];
+ uint32_t ring_mask;
+ pri_mask_t pri_mask[NUM_PRIO];
+ odp_spinlock_t mask_lock;
+ odp_atomic_u32_t grp_epoch;
+ odp_shm_t shm;
struct {
uint8_t grp;
+ /* Inverted prio value (max = 0) vs API (min = 0)*/
uint8_t prio;
uint8_t spread;
uint8_t sync;
@@ -209,6 +198,20 @@ typedef struct {
uint8_t pktin_index;
} queue[ODP_CONFIG_QUEUES];
+ /* Scheduler priority queues */
+ prio_queue_t prio_q[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
+
+ uint32_t pri_count[NUM_PRIO][MAX_SPREAD];
+
+ odp_thrmask_t mask_all;
+ odp_spinlock_t grp_lock;
+
+ struct {
+ char name[ODP_SCHED_GROUP_NAME_LEN];
+ odp_thrmask_t mask;
+ int allocated;
+ } sched_grp[NUM_SCHED_GRPS];
+
struct {
int num_pktin;
} pktio[NUM_PKTIO];
@@ -216,6 +219,9 @@ typedef struct {
order_context_t order[ODP_CONFIG_QUEUES];
+ /* Scheduler interface config options (not used in fast path) */
+ schedule_config_t config_if;
+
} sched_global_t;
/* Check that queue[] variables are large enough */
@@ -233,12 +239,11 @@ static sched_global_t *sched;
/* Thread local scheduler context */
static __thread sched_local_t sched_local;
-/* Function prototypes */
-static inline void schedule_release_context(void);
-
static int read_config_file(sched_global_t *sched)
{
const char *str;
+ int i;
+ int burst_val[NUM_PRIO];
int val = 0;
ODP_PRINT("Scheduler config:\n");
@@ -250,40 +255,98 @@ static int read_config_file(sched_global_t *sched)
}
if (val > MAX_SPREAD || val < MIN_SPREAD) {
- ODP_ERR("Bad value %s = %u\n", str, val);
+ ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val,
+ MIN_SPREAD, MAX_SPREAD);
return -1;
}
sched->config.num_spread = val;
ODP_PRINT(" %s: %i\n", str, val);
- str = "sched_basic.burst_size_hi";
+ str = "sched_basic.prio_spread_weight";
if (!_odp_libconfig_lookup_int(str, &val)) {
ODP_ERR("Config option '%s' not found.\n", str);
return -1;
}
- if (val > BURST_SIZE_MAX || val < BURST_SIZE_MIN) {
- ODP_ERR("Bad value %s = %u\n", str, val);
+ if (val > MAX_PREFER_WEIGHT || val < MIN_PREFER_WEIGHT) {
+ ODP_ERR("Bad value %s = %u [min: %u, max: %u]\n", str, val,
+ MIN_PREFER_WEIGHT, MAX_PREFER_WEIGHT);
return -1;
}
- sched->config.burst_hi = val;
+ sched->config.prefer_ratio = val + 1;
ODP_PRINT(" %s: %i\n", str, val);
- str = "sched_basic.burst_size_low";
+ str = "sched_basic.burst_size_default";
+ if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
+ NUM_PRIO) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ ODP_PRINT(" %s[] =", str);
+ for (i = 0; i < NUM_PRIO; i++) {
+ val = burst_val[i];
+ sched->config.burst_default[i] = val;
+ ODP_PRINT(" %3i", val);
+
+ if (val > STASH_SIZE || val < 1) {
+ ODP_ERR("Bad value %i\n", val);
+ return -1;
+ }
+ }
+ ODP_PRINT("\n");
+
+ str = "sched_basic.burst_size_max";
+ if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
+ NUM_PRIO) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ ODP_PRINT(" %s[] = ", str);
+ for (i = 0; i < NUM_PRIO; i++) {
+ val = burst_val[i];
+ sched->config.burst_max[i] = val;
+ ODP_PRINT(" %3i", val);
+
+ if (val > BURST_MAX || val < 1) {
+ ODP_ERR("Bad value %i\n", val);
+ return -1;
+ }
+ }
+
+ ODP_PRINT("\n");
+
+ str = "sched_basic.group_enable.all";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
+ return -1;
+ }
+
+ sched->config_if.group_enable.all = val;
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.group_enable.worker";
if (!_odp_libconfig_lookup_int(str, &val)) {
ODP_ERR("Config option '%s' not found.\n", str);
return -1;
}
- if (val > BURST_SIZE_MAX || val < BURST_SIZE_MIN) {
- ODP_ERR("Bad value %s = %u\n", str, val);
+ sched->config_if.group_enable.worker = val;
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ str = "sched_basic.group_enable.control";
+ if (!_odp_libconfig_lookup_int(str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", str);
return -1;
}
- sched->config.burst_low = val;
- ODP_PRINT(" %s: %i\n\n", str, val);
+ sched->config_if.group_enable.control = val;
+ ODP_PRINT(" %s: %i\n", str, val);
+
+ ODP_PRINT("\n");
return 0;
}
@@ -296,23 +359,23 @@ static inline uint8_t prio_spread_index(uint32_t index)
static void sched_local_init(void)
{
int i;
- uint8_t spread;
+ uint8_t spread, prefer_ratio;
uint8_t num_spread = sched->config.num_spread;
uint8_t offset = 1;
memset(&sched_local, 0, sizeof(sched_local_t));
sched_local.thr = odp_thread_id();
- sched_local.stash_queue = ODP_QUEUE_INVALID;
- sched_local.stash_qi = PRIO_QUEUE_EMPTY;
- sched_local.ordered.src_queue = NULL_INDEX;
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+ sched_local.stash.queue = ODP_QUEUE_INVALID;
spread = prio_spread_index(sched_local.thr);
+ prefer_ratio = sched->config.prefer_ratio;
for (i = 0; i < SPREAD_TBL_SIZE; i++) {
sched_local.spread_tbl[i] = spread;
- if (num_spread > 1 && (i % PREFER_RATIO) == 0) {
+ if (num_spread > 1 && (i % prefer_ratio) == 0) {
sched_local.spread_tbl[i] = prio_spread_index(spread +
offset);
offset++;
@@ -326,20 +389,20 @@ static int schedule_init_global(void)
{
odp_shm_t shm;
int i, j, grp;
+ int prefer_ratio;
ODP_DBG("Schedule init ... ");
- shm = odp_shm_reserve("odp_scheduler",
+ shm = odp_shm_reserve("_odp_scheduler",
sizeof(sched_global_t),
- ODP_CACHE_LINE_SIZE, 0);
-
- sched = odp_shm_addr(shm);
-
- if (sched == NULL) {
+ ODP_CACHE_LINE_SIZE,
+ 0);
+ if (shm == ODP_SHM_INVALID) {
ODP_ERR("Schedule init: Shm reserve failed.\n");
return -1;
}
+ sched = odp_shm_addr(shm);
memset(sched, 0, sizeof(sched_global_t));
if (read_config_file(sched)) {
@@ -347,8 +410,10 @@ static int schedule_init_global(void)
return -1;
}
+ prefer_ratio = sched->config.prefer_ratio;
+
/* When num_spread == 1, only spread_tbl[0] is used. */
- sched->max_spread = (sched->config.num_spread - 1) * PREFER_RATIO;
+ sched->max_spread = (sched->config.num_spread - 1) * prefer_ratio;
sched->shm = shm;
odp_spinlock_init(&sched->mask_lock);
@@ -356,15 +421,9 @@ static int schedule_init_global(void)
for (i = 0; i < NUM_PRIO; i++) {
for (j = 0; j < MAX_SPREAD; j++) {
prio_queue_t *prio_q;
- int k;
prio_q = &sched->prio_q[grp][i][j];
ring_init(&prio_q->ring);
-
- for (k = 0; k < MAX_RING_SIZE; k++) {
- prio_q->queue_index[k] =
- PRIO_QUEUE_EMPTY;
- }
}
}
}
@@ -392,11 +451,6 @@ static int schedule_init_global(void)
return 0;
}
-static inline void queue_destroy_finalize(uint32_t qi)
-{
- sched_queue_destroy_finalize(qi);
-}
-
static int schedule_term_global(void)
{
int ret = 0;
@@ -410,16 +464,12 @@ static int schedule_term_global(void)
ring_t *ring = &sched->prio_q[grp][i][j].ring;
uint32_t qi;
- while ((qi = ring_deq(ring, ring_mask)) !=
- RING_EMPTY) {
+ while (ring_deq(ring, ring_mask, &qi)) {
odp_event_t events[1];
int num;
num = sched_queue_deq(qi, events, 1, 1);
- if (num < 0)
- queue_destroy_finalize(qi);
-
if (num > 0)
ODP_ERR("Queue not empty\n");
}
@@ -442,17 +492,6 @@ static int schedule_init_local(void)
return 0;
}
-static int schedule_term_local(void)
-{
- if (sched_local.stash_num) {
- ODP_ERR("Locally pre-scheduled events exist.\n");
- return -1;
- }
-
- schedule_release_context();
- return 0;
-}
-
static inline void grp_update_mask(int grp, const odp_thrmask_t *new_mask)
{
odp_thrmask_copy(&sched->sched_grp[grp].mask, new_mask);
@@ -492,6 +531,31 @@ static uint32_t schedule_max_ordered_locks(void)
return CONFIG_QUEUE_MAX_ORD_LOCKS;
}
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return NUM_PRIO - 1;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
+static int schedule_num_prio(void)
+{
+ return NUM_PRIO;
+}
+
+static inline int prio_level_from_api(int api_prio)
+{
+ return schedule_max_prio() - api_prio;
+}
+
static void pri_set(int id, int prio)
{
odp_spinlock_lock(&sched->mask_lock);
@@ -531,7 +595,12 @@ static int schedule_init_queue(uint32_t queue_index,
{
uint32_t ring_size;
int i;
- int prio = sched_param->prio;
+ int prio = prio_level_from_api(sched_param->prio);
+
+ if (_odp_schedule_configured == 0) {
+ ODP_ERR("Scheduler has not been configured\n");
+ return -1;
+ }
pri_set_queue(queue_index, prio);
sched->queue[queue_index].grp = sched_param->group;
@@ -557,14 +626,9 @@ static int schedule_init_queue(uint32_t queue_index,
return 0;
}
-static inline int queue_is_atomic(uint32_t queue_index)
+static inline uint8_t sched_sync_type(uint32_t queue_index)
{
- return sched->queue[queue_index].sync == ODP_SCHED_SYNC_ATOMIC;
-}
-
-static inline int queue_is_ordered(uint32_t queue_index)
-{
- return sched->queue[queue_index].sync == ODP_SCHED_SYNC_ORDERED;
+ return sched->queue[queue_index].sync;
}
static void schedule_destroy_queue(uint32_t queue_index)
@@ -576,7 +640,7 @@ static void schedule_destroy_queue(uint32_t queue_index)
sched->queue[queue_index].prio = 0;
sched->queue[queue_index].spread = 0;
- if (queue_is_ordered(queue_index) &&
+ if ((sched_sync_type(queue_index) == ODP_SCHED_SYNC_ORDERED) &&
odp_atomic_load_u64(&sched->order[queue_index].ctx) !=
odp_atomic_load_u64(&sched->order[queue_index].next_ctx))
ODP_ERR("queue reorder incomplete\n");
@@ -615,21 +679,23 @@ static void schedule_pktio_start(int pktio_index, int num_pktin,
}
}
-static void schedule_release_atomic(void)
+static inline void release_atomic(void)
{
- uint32_t qi = sched_local.stash_qi;
+ uint32_t qi = sched_local.stash.qi;
+ ring_t *ring = sched_local.stash.ring;
- if (qi != PRIO_QUEUE_EMPTY && sched_local.stash_num == 0) {
- int grp = sched->queue[qi].grp;
- int prio = sched->queue[qi].prio;
- int spread = sched->queue[qi].spread;
- ring_t *ring = &sched->prio_q[grp][prio][spread].ring;
+ /* Release current atomic queue */
+ ring_enq(ring, sched->ring_mask, qi);
- /* Release current atomic queue */
- ring_enq(ring, sched->ring_mask, qi);
+ /* We don't hold sync context anymore */
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+}
- sched_local.stash_qi = PRIO_QUEUE_EMPTY;
- }
+static void schedule_release_atomic(void)
+{
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC &&
+ sched_local.stash.num_ev == 0)
+ release_atomic();
}
static inline int ordered_own_turn(uint32_t queue_index)
@@ -661,15 +727,16 @@ static inline void ordered_stash_release(void)
int i;
for (i = 0; i < sched_local.ordered.stash_num; i++) {
- queue_entry_t *queue_entry;
+ odp_queue_t queue;
odp_buffer_hdr_t **buf_hdr;
int num, num_enq;
- queue_entry = sched_local.ordered.stash[i].queue_entry;
+ queue = sched_local.ordered.stash[i].queue;
buf_hdr = sched_local.ordered.stash[i].buf_hdr;
num = sched_local.ordered.stash[i].num;
- num_enq = queue_fn->enq_multi(queue_entry, buf_hdr, num);
+ num_enq = odp_queue_enq_multi(queue,
+ (odp_event_t *)buf_hdr, num);
/* Drop packets that were not enqueued */
if (odp_unlikely(num_enq < num)) {
@@ -700,9 +767,11 @@ static inline void release_ordered(void)
}
sched_local.ordered.lock_called.all = 0;
- sched_local.ordered.src_queue = NULL_INDEX;
sched_local.ordered.in_order = 0;
+ /* We don't hold sync context anymore */
+ sched_local.sync_ctx = NO_SYNC_CONTEXT;
+
ordered_stash_release();
/* Next thread can continue processing */
@@ -711,32 +780,49 @@ static inline void release_ordered(void)
static void schedule_release_ordered(void)
{
- uint32_t queue_index;
-
- queue_index = sched_local.ordered.src_queue;
-
- if (odp_unlikely((queue_index == NULL_INDEX) || sched_local.stash_num))
+ if (odp_unlikely((sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED) ||
+ sched_local.stash.num_ev))
return;
release_ordered();
}
-static inline void schedule_release_context(void)
+static int schedule_term_local(void)
{
- if (sched_local.ordered.src_queue != NULL_INDEX)
- release_ordered();
- else
+ if (sched_local.stash.num_ev) {
+ ODP_ERR("Locally pre-scheduled events exist.\n");
+ return -1;
+ }
+
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC)
schedule_release_atomic();
+ else if (sched_local.sync_ctx == ODP_SCHED_SYNC_ORDERED)
+ schedule_release_ordered();
+
+ return 0;
+}
+
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ config->queue_size = queue_glb->config.max_queue_size;
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ (void)config;
+
+ return 0;
}
static inline int copy_from_stash(odp_event_t out_ev[], unsigned int max)
{
int i = 0;
- while (sched_local.stash_num && max) {
- out_ev[i] = sched_local.stash_ev[sched_local.stash_index];
- sched_local.stash_index++;
- sched_local.stash_num--;
+ while (sched_local.stash.num_ev && max) {
+ out_ev[i] = sched_local.stash.ev[sched_local.stash.ev_index];
+ sched_local.stash.ev_index++;
+ sched_local.stash.num_ev--;
max--;
i++;
}
@@ -744,17 +830,26 @@ static inline int copy_from_stash(odp_event_t out_ev[], unsigned int max)
return i;
}
-static int schedule_ord_enq_multi(void *q_int, void *buf_hdr[],
+static int schedule_ord_enq_multi(odp_queue_t dst_queue, void *buf_hdr[],
int num, int *ret)
{
int i;
- uint32_t stash_num = sched_local.ordered.stash_num;
- queue_entry_t *dst_queue = q_int;
- uint32_t src_queue = sched_local.ordered.src_queue;
+ uint32_t stash_num;
+ queue_entry_t *dst_qentry;
+ uint32_t src_queue;
- if ((src_queue == NULL_INDEX) || sched_local.ordered.in_order)
+ /* This check is done for every queue enqueue operation, also for plain
+ * queues. Return fast when not holding a scheduling context. */
+ if (odp_likely(sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED))
return 0;
+ if (sched_local.ordered.in_order)
+ return 0;
+
+ src_queue = sched_local.ordered.src_queue;
+ stash_num = sched_local.ordered.stash_num;
+ dst_qentry = qentry_from_handle(dst_queue);
+
if (ordered_own_turn(src_queue)) {
/* Own turn, so can do enqueue directly. */
sched_local.ordered.in_order = 1;
@@ -763,7 +858,7 @@ static int schedule_ord_enq_multi(void *q_int, void *buf_hdr[],
}
/* Pktout may drop packets, so the operation cannot be stashed. */
- if (dst_queue->s.pktout.pktio != ODP_PKTIO_INVALID ||
+ if (dst_qentry->s.pktout.pktio != ODP_PKTIO_INVALID ||
odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
/* If the local stash is full, wait until it is our turn and
* then release the stash and do enqueue directly. */
@@ -775,7 +870,7 @@ static int schedule_ord_enq_multi(void *q_int, void *buf_hdr[],
return 0;
}
- sched_local.ordered.stash[stash_num].queue_entry = dst_queue;
+ sched_local.ordered.stash[stash_num].queue = dst_queue;
sched_local.ordered.stash[stash_num].num = num;
for (i = 0; i < num; i++)
sched_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
@@ -798,7 +893,7 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
odp_buffer_hdr_t **hdr_tbl;
int ret;
void *q_int;
- odp_buffer_hdr_t *b_hdr[BURST_SIZE_MAX];
+ odp_buffer_hdr_t *b_hdr[CONFIG_BURST_SIZE];
hdr_tbl = (odp_buffer_hdr_t **)ev_tbl;
@@ -806,8 +901,8 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
hdr_tbl = b_hdr;
/* Limit burst to max queue enqueue size */
- if (max_num > BURST_SIZE_MAX)
- max_num = BURST_SIZE_MAX;
+ if (max_num > CONFIG_BURST_SIZE)
+ max_num = CONFIG_BURST_SIZE;
}
pktio_index = sched->queue[qi].pktio_index;
@@ -839,7 +934,7 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
q_int = qentry_from_index(qi);
- ret = queue_fn->enq_multi(q_int, b_hdr, num);
+ ret = odp_queue_enq_multi(q_int, (odp_event_t *)b_hdr, num);
/* Drop packets that were not enqueued */
if (odp_unlikely(ret < num)) {
@@ -862,7 +957,7 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
int ret;
int id;
uint32_t qi;
- unsigned int max_burst;
+ uint16_t burst_def;
int num_spread = sched->config.num_spread;
uint32_t ring_mask = sched->ring_mask;
@@ -872,22 +967,20 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
if (sched->pri_mask[prio] == 0)
continue;
- max_burst = sched->config.burst_hi;
- if (prio > ODP_SCHED_PRIO_DEFAULT)
- max_burst = sched->config.burst_low;
+ burst_def = sched->config.burst_default[prio];
/* Select the first ring based on weights */
id = first;
for (i = 0; i < num_spread;) {
int num;
- int ordered;
+ uint8_t sync_ctx, ordered;
odp_queue_t handle;
ring_t *ring;
int pktin;
- unsigned int max_deq = max_burst;
+ uint16_t max_deq = burst_def;
int stashed = 1;
- odp_event_t *ev_tbl = sched_local.stash_ev;
+ odp_event_t *ev_tbl = sched_local.stash.ev;
if (id >= num_spread)
id = 0;
@@ -902,36 +995,40 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
/* Get queue index from the priority queue */
ring = &sched->prio_q[grp][prio][id].ring;
- qi = ring_deq(ring, ring_mask);
- /* Priority queue empty */
- if (qi == RING_EMPTY) {
+ if (ring_deq(ring, ring_mask, &qi) == 0) {
+ /* Priority queue empty */
i++;
id++;
continue;
}
- ordered = queue_is_ordered(qi);
+ sync_ctx = sched_sync_type(qi);
+ ordered = (sync_ctx == ODP_SCHED_SYNC_ORDERED);
- /* When application's array is larger than max burst
+ /* When application's array is larger than default burst
* size, output all events directly there. Also, ordered
* queues are not stashed locally to improve
* parallelism. Ordered context can only be released
* when the local cache is empty. */
- if (max_num > max_burst || ordered) {
+ if (max_num > burst_def || ordered) {
+ uint16_t burst_max;
+
+ burst_max = sched->config.burst_max[prio];
stashed = 0;
ev_tbl = out_ev;
max_deq = max_num;
+ if (max_num > burst_max)
+ max_deq = burst_max;
}
pktin = queue_is_pktin(qi);
num = sched_queue_deq(qi, ev_tbl, max_deq, !pktin);
- if (num < 0) {
+ if (odp_unlikely(num < 0)) {
/* Destroyed queue. Continue scheduling the same
* priority queue. */
- sched_queue_destroy_finalize(qi);
continue;
}
@@ -979,10 +1076,13 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
/* Continue scheduling ordered queues */
ring_enq(ring, ring_mask, qi);
+ sched_local.sync_ctx = sync_ctx;
- } else if (queue_is_atomic(qi)) {
+ } else if (sync_ctx == ODP_SCHED_SYNC_ATOMIC) {
/* Hold queue during atomic access */
- sched_local.stash_qi = qi;
+ sched_local.stash.qi = qi;
+ sched_local.stash.ring = ring;
+ sched_local.sync_ctx = sync_ctx;
} else {
/* Continue scheduling the queue */
ring_enq(ring, ring_mask, qi);
@@ -991,12 +1091,12 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
handle = queue_from_index(qi);
if (stashed) {
- sched_local.stash_num = num;
- sched_local.stash_index = 0;
- sched_local.stash_queue = handle;
+ sched_local.stash.num_ev = num;
+ sched_local.stash.ev_index = 0;
+ sched_local.stash.queue = handle;
ret = copy_from_stash(out_ev, max_num);
} else {
- sched_local.stash_num = 0;
+ sched_local.stash.num_ev = 0;
ret = num;
}
@@ -1023,16 +1123,20 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
uint16_t spread_round, grp_round;
uint32_t epoch;
- if (sched_local.stash_num) {
+ if (sched_local.stash.num_ev) {
ret = copy_from_stash(out_ev, max_num);
if (out_queue)
- *out_queue = sched_local.stash_queue;
+ *out_queue = sched_local.stash.queue;
return ret;
}
- schedule_release_context();
+ /* Release schedule context */
+ if (sched_local.sync_ctx == ODP_SCHED_SYNC_ATOMIC)
+ release_atomic();
+ else if (sched_local.sync_ctx == ODP_SCHED_SYNC_ORDERED)
+ release_ordered();
if (odp_unlikely(sched_local.pause))
return 0;
@@ -1077,6 +1181,14 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
return 0;
}
+static inline int schedule_run(odp_queue_t *out_queue, odp_event_t out_ev[],
+ unsigned int max_num)
+{
+ timer_run(1);
+
+ return do_schedule(out_queue, out_ev, max_num);
+}
+
static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
odp_event_t out_ev[], unsigned int max_num)
{
@@ -1085,12 +1197,13 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
int ret;
while (1) {
- timer_run();
ret = do_schedule(out_queue, out_ev, max_num);
-
- if (ret)
+ if (ret) {
+ timer_run(2);
break;
+ }
+ timer_run(1);
if (wait == ODP_SCHED_WAIT)
continue;
@@ -1129,16 +1242,30 @@ static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
return schedule_loop(out_queue, wait, events, num);
}
-static inline void order_lock(void)
+static int schedule_multi_no_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
{
- uint32_t queue_index;
+ return schedule_run(out_queue, events, num);
+}
- queue_index = sched_local.ordered.src_queue;
+static int schedule_multi_wait(odp_queue_t *out_queue, odp_event_t events[],
+ int num)
+{
+ int ret;
+
+ do {
+ ret = schedule_run(out_queue, events, num);
+ } while (ret == 0);
+
+ return ret;
+}
- if (queue_index == NULL_INDEX)
+static inline void order_lock(void)
+{
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
return;
- wait_for_order(queue_index);
+ wait_for_order(sched_local.ordered.src_queue);
}
static void order_unlock(void)
@@ -1150,10 +1277,12 @@ static void schedule_order_lock(uint32_t lock_index)
odp_atomic_u64_t *ord_lock;
uint32_t queue_index;
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
+ return;
+
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX &&
- lock_index <= sched->queue[queue_index].order_lock_count &&
+ ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count &&
!sched_local.ordered.lock_called.u8[lock_index]);
ord_lock = &sched->order[queue_index].lock[lock_index];
@@ -1177,10 +1306,12 @@ static void schedule_order_unlock(uint32_t lock_index)
odp_atomic_u64_t *ord_lock;
uint32_t queue_index;
+ if (sched_local.sync_ctx != ODP_SCHED_SYNC_ORDERED)
+ return;
+
queue_index = sched_local.ordered.src_queue;
- ODP_ASSERT(queue_index != NULL_INDEX &&
- lock_index <= sched->queue[queue_index].order_lock_count);
+ ODP_ASSERT(lock_index <= sched->queue[queue_index].order_lock_count);
ord_lock = &sched->order[queue_index].lock[lock_index];
@@ -1221,11 +1352,6 @@ static uint64_t schedule_wait_time(uint64_t ns)
return ns;
}
-static int schedule_num_prio(void)
-{
- return NUM_PRIO;
-}
-
static odp_schedule_group_t schedule_group_create(const char *name,
const odp_thrmask_t *mask)
{
@@ -1439,9 +1565,27 @@ static int schedule_num_grps(void)
return NUM_SCHED_GRPS;
}
+static void schedule_get_config(schedule_config_t *config)
+{
+ *config = *(&sched->config_if);
+};
+
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = schedule_max_ordered_locks();
+ capa->max_groups = schedule_num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ capa->max_queue_size = queue_glb->config.max_queue_size;
+ capa->max_flow_id = BUF_HDR_MAX_FLOW_ID;
+
+ return 0;
+}
+
/* Fill in scheduler interface */
const schedule_fn_t schedule_basic_fn = {
- .status_sync = 0,
.pktio_start = schedule_pktio_start,
.thr_add = schedule_thr_add,
.thr_rem = schedule_thr_rem,
@@ -1457,20 +1601,27 @@ const schedule_fn_t schedule_basic_fn = {
.order_lock = order_lock,
.order_unlock = order_unlock,
.max_ordered_locks = schedule_max_ordered_locks,
- .unsched_queue = NULL,
- .save_context = NULL
+ .get_config = schedule_get_config
};
/* Fill in scheduler API calls */
const schedule_api_t schedule_basic_api = {
.schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
.schedule = schedule,
.schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
.schedule_pause = schedule_pause,
.schedule_resume = schedule_resume,
.schedule_release_atomic = schedule_release_atomic,
.schedule_release_ordered = schedule_release_ordered,
.schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
.schedule_num_prio = schedule_num_prio,
.schedule_group_create = schedule_group_create,
.schedule_group_destroy = schedule_group_destroy,
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index df1ee2c26..ba903e581 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -19,31 +19,78 @@ extern const schedule_api_t schedule_sp_api;
extern const schedule_fn_t schedule_basic_fn;
extern const schedule_api_t schedule_basic_api;
-extern const schedule_fn_t schedule_iquery_fn;
-extern const schedule_api_t schedule_iquery_api;
-
extern const schedule_fn_t schedule_scalable_fn;
extern const schedule_api_t schedule_scalable_api;
const schedule_fn_t *sched_fn;
const schedule_api_t *sched_api;
+int _odp_schedule_configured;
uint64_t odp_schedule_wait_time(uint64_t ns)
{
return sched_api->schedule_wait_time(ns);
}
+int odp_schedule_capability(odp_schedule_capability_t *capa)
+{
+ return sched_api->schedule_capability(capa);
+}
+
+void odp_schedule_config_init(odp_schedule_config_t *config)
+{
+ memset(config, 0, sizeof(*config));
+
+ sched_api->schedule_config_init(config);
+}
+
+int odp_schedule_config(const odp_schedule_config_t *config)
+{
+ int ret;
+ odp_schedule_config_t defconfig;
+
+ if (_odp_schedule_configured) {
+ ODP_ERR("Scheduler has been configured already\n");
+ return -1;
+ }
+
+ if (!config) {
+ odp_schedule_config_init(&defconfig);
+ config = &defconfig;
+ }
+
+ ret = sched_api->schedule_config(config);
+
+ if (ret >= 0)
+ _odp_schedule_configured = 1;
+
+ return ret;
+}
+
odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait)
{
+ ODP_ASSERT(_odp_schedule_configured);
+
return sched_api->schedule(from, wait);
}
int odp_schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t events[],
int num)
{
+ ODP_ASSERT(_odp_schedule_configured);
+
return sched_api->schedule_multi(from, wait, events, num);
}
+int odp_schedule_multi_wait(odp_queue_t *from, odp_event_t events[], int num)
+{
+ return sched_api->schedule_multi_wait(from, events, num);
+}
+
+int odp_schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[], int num)
+{
+ return sched_api->schedule_multi_no_wait(from, events, num);
+}
+
void odp_schedule_pause(void)
{
return sched_api->schedule_pause();
@@ -69,6 +116,21 @@ void odp_schedule_prefetch(int num)
return sched_api->schedule_prefetch(num);
}
+int odp_schedule_min_prio(void)
+{
+ return sched_api->schedule_min_prio();
+}
+
+int odp_schedule_max_prio(void)
+{
+ return sched_api->schedule_max_prio();
+}
+
+int odp_schedule_default_prio(void)
+{
+ return sched_api->schedule_default_prio();
+}
+
int odp_schedule_num_prio(void)
{
return sched_api->schedule_num_prio();
@@ -154,9 +216,6 @@ int _odp_schedule_init_global(void)
} else if (!strcmp(sched, "sp")) {
sched_fn = &schedule_sp_fn;
sched_api = &schedule_sp_api;
- } else if (!strcmp(sched, "iquery")) {
- sched_fn = &schedule_iquery_fn;
- sched_api = &schedule_iquery_api;
} else if (!strcmp(sched, "scalable")) {
sched_fn = &schedule_scalable_fn;
sched_api = &schedule_scalable_api;
diff --git a/platform/linux-generic/odp_schedule_iquery.c b/platform/linux-generic/odp_schedule_iquery.c
deleted file mode 100644
index 2501a3f68..000000000
--- a/platform/linux-generic/odp_schedule_iquery.c
+++ /dev/null
@@ -1,1585 +0,0 @@
-/* Copyright (c) 2016-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include "config.h"
-
-#include <odp/api/schedule.h>
-#include <odp_schedule_if.h>
-#include <odp/api/align.h>
-#include <odp/api/queue.h>
-#include <odp/api/shared_memory.h>
-#include <odp_debug_internal.h>
-#include <odp_ring_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp_bitmap_internal.h>
-#include <odp/api/thread.h>
-#include <odp/api/plat/thread_inlines.h>
-#include <odp/api/time.h>
-#include <odp/api/rwlock.h>
-#include <odp/api/hints.h>
-#include <odp/api/cpu.h>
-#include <odp/api/thrmask.h>
-#include <odp/api/packet_io.h>
-#include <odp_config_internal.h>
-#include <odp_timer_internal.h>
-#include <odp_queue_basic_internal.h>
-
-/* Number of priority levels */
-#define NUM_SCHED_PRIO 8
-
-ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (NUM_SCHED_PRIO - 1),
- "lowest_prio_does_not_match_with_num_prios");
-
-ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
- (ODP_SCHED_PRIO_NORMAL < (NUM_SCHED_PRIO - 1)),
- "normal_prio_is_not_between_highest_and_lowest");
-
-/* Number of scheduling groups */
-#define NUM_SCHED_GRPS 256
-
-/* Start of named groups in group mask arrays */
-#define SCHED_GROUP_NAMED (ODP_SCHED_GROUP_CONTROL + 1)
-
-/* Instantiate a WAPL bitmap to be used as queue index bitmap */
-typedef WAPL_BITMAP(ODP_CONFIG_QUEUES) queue_index_bitmap_t;
-
-typedef struct {
- odp_rwlock_t lock;
- queue_index_bitmap_t queues; /* queues in this priority level */
-} sched_prio_t;
-
-typedef struct {
- odp_rwlock_t lock;
- bool allocated;
- odp_thrmask_t threads; /* threads subscribe to this group */
- queue_index_bitmap_t queues; /* queues in this group */
- char name[ODP_SCHED_GROUP_NAME_LEN];
-} sched_group_t;
-
-/* Packet input poll command queues */
-#define PKTIO_CMD_QUEUES 4
-
-/* Maximum number of packet input queues per command */
-#define MAX_PKTIN 16
-
-/* Maximum number of packet IO interfaces */
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
-
-/* Maximum number of pktio poll commands */
-#define NUM_PKTIO_CMD (MAX_PKTIN * NUM_PKTIO)
-
-/* Not a valid index */
-#define NULL_INDEX ((uint32_t)-1)
-/* Pktio command is free */
-#define PKTIO_CMD_FREE ((uint32_t)-1)
-
-/* Packet IO poll queue ring size. In worst case, all pktios
- * have all pktins enabled and one poll command is created per
- * pktin queue. The ring size must be larger than or equal to
- * NUM_PKTIO_CMD / PKTIO_CMD_QUEUES, so that it can hold all
- * poll commands in the worst case.
- */
-#define PKTIO_RING_SIZE (NUM_PKTIO_CMD / PKTIO_CMD_QUEUES)
-
-/* Mask for wrapping around pktio poll command index */
-#define PKTIO_RING_MASK (PKTIO_RING_SIZE - 1)
-
-/* Maximum number of dequeues */
-#define MAX_DEQ CONFIG_BURST_SIZE
-
-/* Instantiate a RING data structure as pktio command queue */
-typedef struct ODP_ALIGNED_CACHE {
- /* Ring header */
- ring_t ring;
-
- /* Ring data: pktio poll command indexes */
- uint32_t cmd_index[PKTIO_RING_SIZE];
-} pktio_cmd_queue_t;
-
-/* Packet IO poll command */
-typedef struct {
- int pktio;
- int count;
- int pktin[MAX_PKTIN];
- uint32_t index;
-} pktio_cmd_t;
-
-/* Collect the pktio poll resources */
-typedef struct {
- odp_rwlock_t lock;
- /* count active commands per pktio interface */
- int actives[NUM_PKTIO];
- pktio_cmd_t commands[NUM_PKTIO_CMD];
- pktio_cmd_queue_t queues[PKTIO_CMD_QUEUES];
-} pktio_poll_t;
-
-/* Forward declaration */
-typedef struct sched_thread_local sched_thread_local_t;
-
-/* Order context of a queue */
-typedef struct ODP_ALIGNED_CACHE {
- /* Current ordered context id */
- odp_atomic_u64_t ODP_ALIGNED_CACHE ctx;
-
- /* Next unallocated context id */
- odp_atomic_u64_t next_ctx;
-
- /* Array of ordered locks */
- odp_atomic_u64_t lock[CONFIG_QUEUE_MAX_ORD_LOCKS];
-
-} order_context_t;
-
-typedef struct {
- odp_shm_t selfie;
-
- /* Schedule priorities */
- sched_prio_t prios[NUM_SCHED_PRIO];
-
- /* Schedule groups */
- sched_group_t groups[NUM_SCHED_GRPS];
-
- /* Cache queue parameters for easy reference */
- odp_schedule_param_t queues[ODP_CONFIG_QUEUES];
-
- /* Poll pktio inputs in spare time */
- pktio_poll_t pktio_poll;
-
- /* Queues send or unwind their availability indications
- * for scheduling, the bool value also serves as a focal
- * point for atomic competition. */
- bool availables[ODP_CONFIG_QUEUES];
-
- /* Quick reference to per thread context */
- sched_thread_local_t *threads[ODP_THREAD_COUNT_MAX];
-
- order_context_t order[ODP_CONFIG_QUEUES];
-} sched_global_t;
-
-/* Per thread events cache */
-typedef struct {
- int count;
- odp_queue_t queue;
- odp_event_t stash[MAX_DEQ], *top;
-} event_cache_t;
-
-/* Ordered stash size */
-#define MAX_ORDERED_STASH 512
-
-/* Storage for stashed enqueue operation arguments */
-typedef struct {
- odp_buffer_hdr_t *buf_hdr[QUEUE_MULTI_MAX];
- queue_entry_t *queue_entry;
- int num;
-} ordered_stash_t;
-
-/* Ordered lock states */
-typedef union {
- uint8_t u8[CONFIG_QUEUE_MAX_ORD_LOCKS];
- uint32_t all;
-} lock_called_t;
-
-ODP_STATIC_ASSERT(sizeof(lock_called_t) == sizeof(uint32_t),
- "Lock_called_values_do_not_fit_in_uint32");
-
-/* Instantiate a sparse bitmap to store thread's interested
- * queue indexes per priority.
- */
-typedef SPARSE_BITMAP(ODP_CONFIG_QUEUES) queue_index_sparse_t;
-
-struct sched_thread_local {
- int thread;
- bool pause;
-
- /* Cache events only for atomic queue */
- event_cache_t cache;
-
- /* Saved atomic context */
- bool *atomic;
-
- /* Record the pktio polls have done */
- uint16_t pktin_polls;
-
- /* Interested queue indexes to be checked by thread
- * at each priority level for scheduling, and a round
- * robin iterator to improve fairness between queues
- * in the same priority level.
- */
- odp_rwlock_t lock;
- queue_index_sparse_t indexes[NUM_SCHED_PRIO];
- sparse_bitmap_iterator_t iterators[NUM_SCHED_PRIO];
-
- struct {
- /* Source queue index */
- uint32_t src_queue;
- uint64_t ctx; /**< Ordered context id */
- int stash_num; /**< Number of stashed enqueue operations */
- uint8_t in_order; /**< Order status */
- lock_called_t lock_called; /**< States of ordered locks */
- /** Storage for stashed enqueue operations */
- ordered_stash_t stash[MAX_ORDERED_STASH];
- } ordered;
-};
-
-/* Global scheduler context */
-static sched_global_t *sched;
-
-/* Thread local scheduler context */
-static __thread sched_thread_local_t thread_local;
-
-static int schedule_init_global(void)
-{
- odp_shm_t shm;
- int i, k, prio, group;
-
- ODP_DBG("Schedule[iquery] init ... ");
-
- shm = odp_shm_reserve("odp_scheduler_iquery",
- sizeof(sched_global_t),
- ODP_CACHE_LINE_SIZE, 0);
-
- sched = odp_shm_addr(shm);
-
- if (sched == NULL) {
- ODP_ERR("Schedule[iquery] "
- "init: shm reserve.\n");
- return -1;
- }
-
- memset(sched, 0, sizeof(sched_global_t));
-
- sched->selfie = shm;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++)
- odp_rwlock_init(&sched->prios[prio].lock);
-
- for (group = 0; group < NUM_SCHED_GRPS; group++) {
- sched->groups[group].allocated = false;
- odp_rwlock_init(&sched->groups[group].lock);
- }
-
- odp_rwlock_init(&sched->pktio_poll.lock);
-
- for (i = 0; i < PKTIO_CMD_QUEUES; i++) {
- pktio_cmd_queue_t *queue =
- &sched->pktio_poll.queues[i];
-
- ring_init(&queue->ring);
-
- for (k = 0; k < PKTIO_RING_SIZE; k++)
- queue->cmd_index[k] = RING_EMPTY;
- }
-
- for (i = 0; i < NUM_PKTIO_CMD; i++)
- sched->pktio_poll.commands[i].index = PKTIO_CMD_FREE;
-
- ODP_DBG("done\n");
- return 0;
-}
-
-static int schedule_term_global(void)
-{
- uint32_t i;
- odp_shm_t shm = sched->selfie;
-
- for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
- int count = 0;
- odp_event_t events[1];
-
- if (sched->availables[i])
- count = sched_queue_deq(i, events, 1, 1);
-
- if (count < 0)
- sched_queue_destroy_finalize(i);
- else if (count > 0)
- ODP_ERR("Queue (%d) not empty\n", i);
- }
-
- memset(sched, 0, sizeof(sched_global_t));
-
- if (odp_shm_free(shm) < 0) {
- ODP_ERR("Schedule[iquery] "
- "term: shm release.\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * These APIs are used to manipulate thread's interests.
- */
-static void thread_set_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio);
-
-static void thread_clear_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio);
-
-static void thread_set_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *set);
-
-static void thread_clear_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *clear);
-
-static void sched_thread_local_reset(void)
-{
- int prio;
- queue_index_sparse_t *index;
- sparse_bitmap_iterator_t *iterator;
-
- memset(&thread_local, 0, sizeof(sched_thread_local_t));
-
- thread_local.thread = odp_thread_id();
- thread_local.cache.queue = ODP_QUEUE_INVALID;
- thread_local.ordered.src_queue = NULL_INDEX;
-
- odp_rwlock_init(&thread_local.lock);
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- index = &thread_local.indexes[prio];
- iterator = &thread_local.iterators[prio];
-
- sparse_bitmap_zero(index);
- sparse_bitmap_iterator(iterator, index);
- }
-}
-
-static int schedule_init_local(void)
-{
- int group;
- sched_group_t *G;
- queue_index_bitmap_t collect;
-
- wapl_bitmap_zero(&collect);
- sched_thread_local_reset();
-
- /* Collect all queue indexes of the schedule groups
- * which this thread has subscribed
- */
- for (group = 0; group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if ((group < SCHED_GROUP_NAMED || G->allocated) &&
- odp_thrmask_isset(&G->threads, thread_local.thread))
- wapl_bitmap_or(&collect, &collect, &G->queues);
-
- odp_rwlock_read_unlock(&G->lock);
- }
-
- /* Distribute the above collected queue indexes into
- * thread local interests per priority level.
- */
- thread_set_interests(&thread_local, &collect);
-
- /* "Night gathers, and now my watch begins..." */
- sched->threads[thread_local.thread] = &thread_local;
- return 0;
-}
-
-static inline void schedule_release_context(void);
-
-static int schedule_term_local(void)
-{
- int group;
- sched_group_t *G;
-
- if (thread_local.cache.count) {
- ODP_ERR("Locally pre-scheduled events exist.\n");
- return -1;
- }
-
- schedule_release_context();
-
- /* Unsubscribe all named schedule groups */
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated && odp_thrmask_isset(
- &G->threads, thread_local.thread))
- odp_thrmask_clr(&G->threads, thread_local.thread);
-
- odp_rwlock_write_unlock(&G->lock);
- }
-
- /* "...for this night and all the nights to come." */
- sched->threads[thread_local.thread] = NULL;
- sched_thread_local_reset();
- return 0;
-}
-
-static int init_sched_queue(uint32_t queue_index,
- const odp_schedule_param_t *sched_param)
-{
- int prio, group, thread, i;
- sched_prio_t *P;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- prio = sched_param->prio;
- group = sched_param->group;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- /* Named schedule group must be created prior
- * to queue creation to this group.
- */
- if (group >= SCHED_GROUP_NAMED && !G->allocated) {
- odp_rwlock_write_unlock(&G->lock);
- return -1;
- }
-
- /* Record the queue in its priority level globally */
- P = &sched->prios[prio];
-
- odp_rwlock_write_lock(&P->lock);
- wapl_bitmap_set(&P->queues, queue_index);
- odp_rwlock_write_unlock(&P->lock);
-
- /* Record the queue in its schedule group */
- wapl_bitmap_set(&G->queues, queue_index);
-
- /* Cache queue parameters for easy reference */
- memcpy(&sched->queues[queue_index],
- sched_param, sizeof(odp_schedule_param_t));
-
- odp_atomic_init_u64(&sched->order[queue_index].ctx, 0);
- odp_atomic_init_u64(&sched->order[queue_index].next_ctx, 0);
-
- for (i = 0; i < CONFIG_QUEUE_MAX_ORD_LOCKS; i++)
- odp_atomic_init_u64(&sched->order[queue_index].lock[i], 0);
-
- /* Update all threads in this schedule group to
- * start check this queue index upon scheduling.
- */
- thread = odp_thrmask_first(&G->threads);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_set_interest(local, queue_index, prio);
- thread = odp_thrmask_next(&G->threads, thread);
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-/*
- * Must be called with schedule group's rwlock held.
- * This is also being used in destroy_schedule_group()
- * to destroy all orphan queues while destroying a whole
- * schedule group.
- */
-static void __destroy_sched_queue(
- sched_group_t *G, uint32_t queue_index)
-{
- int prio, thread;
- sched_prio_t *P;
- sched_thread_local_t *local;
-
- prio = sched->queues[queue_index].prio;
-
- /* Forget the queue in its schedule group */
- wapl_bitmap_clear(&G->queues, queue_index);
-
- /* Forget queue schedule parameters */
- memset(&sched->queues[queue_index],
- 0, sizeof(odp_schedule_param_t));
-
- /* Update all threads in this schedule group to
- * stop check this queue index upon scheduling.
- */
- thread = odp_thrmask_first(&G->threads);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_clear_interest(local, queue_index, prio);
- thread = odp_thrmask_next(&G->threads, thread);
- }
-
- /* Forget the queue in its priority level globally */
- P = &sched->prios[prio];
-
- odp_rwlock_write_lock(&P->lock);
- wapl_bitmap_clear(&P->queues, queue_index);
- odp_rwlock_write_unlock(&P->lock);
-}
-
-static void destroy_sched_queue(uint32_t queue_index)
-{
- int group;
- sched_group_t *G;
-
- group = sched->queues[queue_index].group;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- /* Named schedule group could have been destroyed
- * earlier and left these orphan queues.
- */
- if (group >= SCHED_GROUP_NAMED && !G->allocated) {
- odp_rwlock_write_unlock(&G->lock);
- return;
- }
-
- __destroy_sched_queue(G, queue_index);
- odp_rwlock_write_unlock(&G->lock);
-
- if (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ORDERED &&
- odp_atomic_load_u64(&sched->order[queue_index].ctx) !=
- odp_atomic_load_u64(&sched->order[queue_index].next_ctx))
- ODP_ERR("queue reorder incomplete\n");
-}
-
-static int pktio_cmd_queue_hash(int pktio, int pktin)
-{
- return (pktio ^ pktin) % PKTIO_CMD_QUEUES;
-}
-
-static inline pktio_cmd_t *alloc_pktio_cmd(void)
-{
- int i;
- pktio_cmd_t *cmd = NULL;
-
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- /* Find next free command */
- for (i = 0; i < NUM_PKTIO_CMD; i++) {
- if (sched->pktio_poll.commands[i].index
- == PKTIO_CMD_FREE) {
- cmd = &sched->pktio_poll.commands[i];
- cmd->index = i;
- break;
- }
- }
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
- return cmd;
-}
-
-static inline void free_pktio_cmd(pktio_cmd_t *cmd)
-{
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- cmd->index = PKTIO_CMD_FREE;
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
-}
-
-static void schedule_pktio_start(int pktio,
- int count,
- int pktin[],
- odp_queue_t odpq[] ODP_UNUSED)
-{
- int i, index;
- pktio_cmd_t *cmd;
-
- if (count > MAX_PKTIN)
- ODP_ABORT("Too many input queues for scheduler\n");
-
- /* Record the active commands count per pktio interface */
- sched->pktio_poll.actives[pktio] = count;
-
- /* Create a pktio poll command per pktin */
- for (i = 0; i < count; i++) {
- cmd = alloc_pktio_cmd();
-
- if (cmd == NULL)
- ODP_ABORT("Scheduler out of pktio commands\n");
-
- index = pktio_cmd_queue_hash(pktio, pktin[i]);
-
- cmd->pktio = pktio;
- cmd->count = 1;
- cmd->pktin[0] = pktin[i];
- ring_enq(&sched->pktio_poll.queues[index].ring,
- PKTIO_RING_MASK, cmd->index);
- }
-}
-
-static int schedule_pktio_stop(int pktio, int pktin ODP_UNUSED)
-{
- int remains;
-
- odp_rwlock_write_lock(&sched->pktio_poll.lock);
-
- sched->pktio_poll.actives[pktio]--;
- remains = sched->pktio_poll.actives[pktio];
-
- odp_rwlock_write_unlock(&sched->pktio_poll.lock);
- return remains;
-}
-
-#define DO_SCHED_LOCK() odp_rwlock_read_lock(&thread_local.lock)
-#define DO_SCHED_UNLOCK() odp_rwlock_read_unlock(&thread_local.lock)
-
-static inline bool do_schedule_prio(int prio);
-
-static inline int pop_cache_events(odp_event_t ev[], unsigned int max)
-{
- int k = 0;
- event_cache_t *cache;
-
- cache = &thread_local.cache;
- while (cache->count && max) {
- ev[k] = *cache->top++;
- k++;
- max--;
- cache->count--;
- }
-
- return k;
-}
-
-static inline void assign_queue_handle(odp_queue_t *handle)
-{
- if (handle)
- *handle = thread_local.cache.queue;
-}
-
-static inline void pktio_poll_input(void)
-{
- int i, hash;
- uint32_t index;
-
- ring_t *ring;
- pktio_cmd_t *cmd;
-
- /*
- * Each thread starts the search for a poll command
- * from the hash(threadID) queue to mitigate contentions.
- * If the queue is empty, it moves to other queues.
- *
- * Most of the times, the search stops on the first
- * command found to optimize multi-threaded performance.
- * A small portion of polls have to do full iteration to
- * avoid packet input starvation when there are less
- * threads than command queues.
- */
- hash = thread_local.thread % PKTIO_CMD_QUEUES;
-
- for (i = 0; i < PKTIO_CMD_QUEUES; i++,
- hash = (hash + 1) % PKTIO_CMD_QUEUES) {
- ring = &sched->pktio_poll.queues[hash].ring;
- index = ring_deq(ring, PKTIO_RING_MASK);
-
- if (odp_unlikely(index == RING_EMPTY))
- continue;
-
- cmd = &sched->pktio_poll.commands[index];
-
- /* Poll packet input */
- if (odp_unlikely(sched_cb_pktin_poll_old(cmd->pktio,
- cmd->count,
- cmd->pktin))) {
- /* Pktio stopped or closed. Remove poll
- * command and call stop_finalize when all
- * commands of the pktio has been removed.
- */
- if (schedule_pktio_stop(cmd->pktio,
- cmd->pktin[0]) == 0)
- sched_cb_pktio_stop_finalize(cmd->pktio);
-
- free_pktio_cmd(cmd);
- } else {
- /* Continue scheduling the pktio */
- ring_enq(ring, PKTIO_RING_MASK, index);
-
- /* Do not iterate through all pktin poll
- * command queues every time.
- */
- if (odp_likely(thread_local.pktin_polls & 0xF))
- break;
- }
- }
-
- thread_local.pktin_polls++;
-}
-
-/*
- * Schedule queues
- */
-static int do_schedule(odp_queue_t *out_queue,
- odp_event_t out_ev[], unsigned int max_num)
-{
- int prio, count;
-
- /* Consume locally cached events */
- count = pop_cache_events(out_ev, max_num);
- if (count > 0) {
- assign_queue_handle(out_queue);
- return count;
- }
-
- schedule_release_context();
-
- if (odp_unlikely(thread_local.pause))
- return count;
-
- DO_SCHED_LOCK();
- /* Schedule events */
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- /* Round robin iterate the interested queue
- * indexes in this priority level to compete
- * and consume available queues
- */
- if (!do_schedule_prio(prio))
- continue;
-
- count = pop_cache_events(out_ev, max_num);
- assign_queue_handle(out_queue);
- DO_SCHED_UNLOCK();
- return count;
- }
-
- DO_SCHED_UNLOCK();
-
- /* Poll packet input when there are no events */
- pktio_poll_input();
- return 0;
-}
-
-static int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t out_ev[], unsigned int max_num)
-{
- int count, first = 1;
- odp_time_t next, wtime;
-
- while (1) {
- timer_run();
-
- count = do_schedule(out_queue, out_ev, max_num);
-
- if (count)
- break;
-
- if (wait == ODP_SCHED_WAIT)
- continue;
-
- if (wait == ODP_SCHED_NO_WAIT)
- break;
-
- if (first) {
- wtime = odp_time_local_from_ns(wait);
- next = odp_time_sum(odp_time_local(), wtime);
- first = 0;
- continue;
- }
-
- if (odp_time_cmp(next, odp_time_local()) < 0)
- break;
- }
-
- return count;
-}
-
-static odp_event_t schedule(odp_queue_t *out_queue, uint64_t wait)
-{
- odp_event_t ev;
-
- ev = ODP_EVENT_INVALID;
-
- schedule_loop(out_queue, wait, &ev, 1);
-
- return ev;
-}
-
-static int schedule_multi(odp_queue_t *out_queue, uint64_t wait,
- odp_event_t events[], int num)
-{
- return schedule_loop(out_queue, wait, events, num);
-}
-
-static void schedule_pause(void)
-{
- thread_local.pause = 1;
-}
-
-static void schedule_resume(void)
-{
- thread_local.pause = 0;
-}
-
-static uint64_t schedule_wait_time(uint64_t ns)
-{
- return ns;
-}
-
-static int number_of_priorites(void)
-{
- return NUM_SCHED_PRIO;
-}
-
-/*
- * Create a named schedule group with pre-defined
- * set of subscription threads.
- *
- * Sched queues belonging to this group must be
- * created after the group creation. Upon creation
- * the group holds 0 sched queues.
- */
-static odp_schedule_group_t schedule_group_create(
- const char *name, const odp_thrmask_t *mask)
-{
- int group;
- sched_group_t *G;
-
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- if (!G->allocated) {
- strncpy(G->name, name ? name : "",
- ODP_SCHED_GROUP_NAME_LEN - 1);
- odp_thrmask_copy(&G->threads, mask);
- wapl_bitmap_zero(&G->queues);
-
- G->allocated = true;
- odp_rwlock_write_unlock(&G->lock);
- return (odp_schedule_group_t)group;
- }
- odp_rwlock_write_unlock(&G->lock);
- }
-
- return ODP_SCHED_GROUP_INVALID;
-}
-
-static inline void __destroy_group_queues(sched_group_t *group)
-{
- unsigned int index;
- queue_index_bitmap_t queues;
- wapl_bitmap_iterator_t it;
-
- /* Constructor */
- wapl_bitmap_zero(&queues);
- wapl_bitmap_copy(&queues, &group->queues);
- wapl_bitmap_iterator(&it, &queues);
-
- /* Walk through the queue index bitmap */
- for (it.start(&it); it.has_next(&it);) {
- index = it.next(&it);
- __destroy_sched_queue(group, index);
- }
-}
-
-/*
- * Destroy a named schedule group.
- */
-static int schedule_group_destroy(odp_schedule_group_t group)
-{
- int done = -1;
- sched_group_t *G;
-
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return -1;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Destroy all queues in this schedule group
- * and leave no orphan queues.
- */
- __destroy_group_queues(G);
-
- done = 0;
- G->allocated = false;
- wapl_bitmap_zero(&G->queues);
- odp_thrmask_zero(&G->threads);
- memset(G->name, 0, ODP_SCHED_GROUP_NAME_LEN);
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static odp_schedule_group_t schedule_group_lookup(const char *name)
-{
- int group;
- sched_group_t *G;
-
- for (group = SCHED_GROUP_NAMED;
- group < NUM_SCHED_GRPS; group++) {
- G = &sched->groups[group];
-
- odp_rwlock_read_lock(&G->lock);
- if (strcmp(name, G->name) == 0) {
- odp_rwlock_read_unlock(&G->lock);
- return (odp_schedule_group_t)group;
- }
- odp_rwlock_read_unlock(&G->lock);
- }
-
- return ODP_SCHED_GROUP_INVALID;
-}
-
-static int schedule_group_join(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int done = -1, thread;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Make new joined threads to start check
- * queue indexes in this schedule group
- */
- thread = odp_thrmask_first(mask);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_set_interests(local, &G->queues);
-
- odp_thrmask_set(&G->threads, thread);
- thread = odp_thrmask_next(mask, thread);
- }
- done = 0;
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_leave(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
-{
- int done = -1, thread;
- sched_group_t *G;
- sched_thread_local_t *local;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_write_lock(&G->lock);
-
- if (G->allocated) {
- /* Make leaving threads to stop check
- * queue indexes in this schedule group
- */
- thread = odp_thrmask_first(mask);
- while (thread >= 0) {
- local = sched->threads[thread];
- thread_clear_interests(local, &G->queues);
-
- odp_thrmask_clr(&G->threads, thread);
- thread = odp_thrmask_next(mask, thread);
- }
- done = 0;
- }
-
- odp_rwlock_write_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_thrmask(odp_schedule_group_t group,
- odp_thrmask_t *thrmask)
-{
- int done = -1;
- sched_group_t *G;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if (G->allocated && thrmask != NULL) {
- done = 0;
- odp_thrmask_copy(thrmask, &G->threads);
- }
-
- odp_rwlock_read_unlock(&G->lock);
- return done;
-}
-
-static int schedule_group_info(odp_schedule_group_t group,
- odp_schedule_group_info_t *info)
-{
- int done = -1;
- sched_group_t *G;
-
- /* Named schedule group only */
- if (group < SCHED_GROUP_NAMED ||
- group >= NUM_SCHED_GRPS)
- return done;
-
- G = &sched->groups[group];
- odp_rwlock_read_lock(&G->lock);
-
- if (G->allocated && info != NULL) {
- done = 0;
- info->name = G->name;
- odp_thrmask_copy(&info->thrmask, &G->threads);
- }
-
- odp_rwlock_read_unlock(&G->lock);
- return done;
-}
-
-/* This function is a no-op */
-static void schedule_prefetch(int num ODP_UNUSED)
-{
-}
-
-/*
- * Limited to join and leave pre-defined schedule groups
- * before and after thread local initialization or termination.
- */
-static int group_add_thread(odp_schedule_group_t group, int thread)
-{
- sched_group_t *G;
-
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- odp_thrmask_set(&G->threads, thread);
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-static int group_remove_thread(odp_schedule_group_t group, int thread)
-{
- sched_group_t *G;
-
- if (group < 0 || group >= SCHED_GROUP_NAMED)
- return -1;
-
- G = &sched->groups[group];
-
- odp_rwlock_write_lock(&G->lock);
- odp_thrmask_clr(&G->threads, thread);
- odp_rwlock_write_unlock(&G->lock);
- return 0;
-}
-
-static int number_of_groups(void)
-{
- return NUM_SCHED_GRPS;
-}
-
-static int schedule_sched_queue(uint32_t queue_index)
-{
- /* Set available indications globally */
- sched->availables[queue_index] = true;
- return 0;
-}
-
-static int schedule_unsched_queue(uint32_t queue_index)
-{
- /* Clear available indications globally */
- sched->availables[queue_index] = false;
- return 0;
-}
-
-static void schedule_release_atomic(void)
-{
- unsigned int queue_index;
-
- if ((thread_local.atomic != NULL) &&
- (thread_local.cache.count == 0)) {
- queue_index = thread_local.atomic - sched->availables;
- thread_local.atomic = NULL;
- sched->availables[queue_index] = true;
- }
-}
-
-static inline int ordered_own_turn(uint32_t queue_index)
-{
- uint64_t ctx;
-
- ctx = odp_atomic_load_acq_u64(&sched->order[queue_index].ctx);
-
- return ctx == thread_local.ordered.ctx;
-}
-
-static inline void wait_for_order(uint32_t queue_index)
-{
- /* Busy loop to synchronize ordered processing */
- while (1) {
- if (ordered_own_turn(queue_index))
- break;
- odp_cpu_pause();
- }
-}
-
-/**
- * Perform stashed enqueue operations
- *
- * Should be called only when already in order.
- */
-static inline void ordered_stash_release(void)
-{
- int i;
-
- for (i = 0; i < thread_local.ordered.stash_num; i++) {
- queue_entry_t *queue_entry;
- odp_buffer_hdr_t **buf_hdr;
- int num, num_enq;
-
- queue_entry = thread_local.ordered.stash[i].queue_entry;
- buf_hdr = thread_local.ordered.stash[i].buf_hdr;
- num = thread_local.ordered.stash[i].num;
-
- num_enq = queue_fn->enq_multi(queue_entry, buf_hdr, num);
-
- if (odp_unlikely(num_enq < num)) {
- if (odp_unlikely(num_enq < 0))
- num_enq = 0;
-
- ODP_DBG("Dropped %i packets\n", num - num_enq);
- buffer_free_multi(&buf_hdr[num_enq], num - num_enq);
- }
- }
- thread_local.ordered.stash_num = 0;
-}
-
-static inline void release_ordered(void)
-{
- uint32_t qi;
- uint32_t i;
-
- qi = thread_local.ordered.src_queue;
-
- wait_for_order(qi);
-
- /* Release all ordered locks */
- for (i = 0; i < sched->queues[qi].lock_count; i++) {
- if (!thread_local.ordered.lock_called.u8[i])
- odp_atomic_store_rel_u64(&sched->order[qi].lock[i],
- thread_local.ordered.ctx + 1);
- }
-
- thread_local.ordered.lock_called.all = 0;
- thread_local.ordered.src_queue = NULL_INDEX;
- thread_local.ordered.in_order = 0;
-
- ordered_stash_release();
-
- /* Next thread can continue processing */
- odp_atomic_add_rel_u64(&sched->order[qi].ctx, 1);
-}
-
-static void schedule_release_ordered(void)
-{
- uint32_t queue_index;
-
- queue_index = thread_local.ordered.src_queue;
-
- if (odp_unlikely((queue_index == NULL_INDEX) ||
- thread_local.cache.count))
- return;
-
- release_ordered();
-}
-
-static inline void schedule_release_context(void)
-{
- if (thread_local.ordered.src_queue != NULL_INDEX)
- release_ordered();
- else
- schedule_release_atomic();
-}
-
-static int schedule_ord_enq_multi(void *q_int, void *buf_hdr[],
- int num, int *ret)
-{
- int i;
- uint32_t stash_num = thread_local.ordered.stash_num;
- queue_entry_t *dst_queue = q_int;
- uint32_t src_queue = thread_local.ordered.src_queue;
-
- if ((src_queue == NULL_INDEX) || thread_local.ordered.in_order)
- return 0;
-
- if (ordered_own_turn(src_queue)) {
- /* Own turn, so can do enqueue directly. */
- thread_local.ordered.in_order = 1;
- ordered_stash_release();
- return 0;
- }
-
- /* Pktout may drop packets, so the operation cannot be stashed. */
- if (dst_queue->s.pktout.pktio != ODP_PKTIO_INVALID ||
- odp_unlikely(stash_num >= MAX_ORDERED_STASH)) {
- /* If the local stash is full, wait until it is our turn and
- * then release the stash and do enqueue directly. */
- wait_for_order(src_queue);
-
- thread_local.ordered.in_order = 1;
-
- ordered_stash_release();
- return 0;
- }
-
- thread_local.ordered.stash[stash_num].queue_entry = dst_queue;
- thread_local.ordered.stash[stash_num].num = num;
- for (i = 0; i < num; i++)
- thread_local.ordered.stash[stash_num].buf_hdr[i] = buf_hdr[i];
-
- thread_local.ordered.stash_num++;
-
- *ret = num;
- return 1;
-}
-
-static void order_lock(void)
-{
- uint32_t queue_index;
-
- queue_index = thread_local.ordered.src_queue;
-
- if (queue_index == NULL_INDEX)
- return;
-
- wait_for_order(queue_index);
-}
-
-static void order_unlock(void)
-{
-}
-
-static void schedule_order_lock(uint32_t lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- uint32_t queue_index;
-
- queue_index = thread_local.ordered.src_queue;
-
- ODP_ASSERT(queue_index != NULL_INDEX &&
- lock_index <= sched->queues[queue_index].lock_count &&
- !thread_local.ordered.lock_called.u8[lock_index]);
-
- ord_lock = &sched->order[queue_index].lock[lock_index];
-
- /* Busy loop to synchronize ordered processing */
- while (1) {
- uint64_t lock_seq;
-
- lock_seq = odp_atomic_load_acq_u64(ord_lock);
-
- if (lock_seq == thread_local.ordered.ctx) {
- thread_local.ordered.lock_called.u8[lock_index] = 1;
- return;
- }
- odp_cpu_pause();
- }
-}
-
-static void schedule_order_unlock(uint32_t lock_index)
-{
- odp_atomic_u64_t *ord_lock;
- uint32_t queue_index;
-
- queue_index = thread_local.ordered.src_queue;
-
- ODP_ASSERT(queue_index != NULL_INDEX &&
- lock_index <= sched->queues[queue_index].lock_count);
-
- ord_lock = &sched->order[queue_index].lock[lock_index];
-
- ODP_ASSERT(thread_local.ordered.ctx == odp_atomic_load_u64(ord_lock));
-
- odp_atomic_store_rel_u64(ord_lock, thread_local.ordered.ctx + 1);
-}
-
-static void schedule_order_unlock_lock(uint32_t unlock_index,
- uint32_t lock_index)
-{
- schedule_order_unlock(unlock_index);
- schedule_order_lock(lock_index);
-}
-
-static uint32_t schedule_max_ordered_locks(void)
-{
- return CONFIG_QUEUE_MAX_ORD_LOCKS;
-}
-
-static void schedule_order_lock_start(uint32_t lock_index)
-{
- (void)lock_index;
-}
-
-static void schedule_order_lock_wait(uint32_t lock_index)
-{
- schedule_order_lock(lock_index);
-}
-
-static inline bool is_atomic_queue(unsigned int queue_index)
-{
- return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ATOMIC);
-}
-
-static inline bool is_ordered_queue(unsigned int queue_index)
-{
- return (sched->queues[queue_index].sync == ODP_SCHED_SYNC_ORDERED);
-}
-
-static void schedule_save_context(uint32_t queue_index)
-{
- if (is_atomic_queue(queue_index)) {
- thread_local.atomic = &sched->availables[queue_index];
- } else if (is_ordered_queue(queue_index)) {
- uint64_t ctx;
- odp_atomic_u64_t *next_ctx;
-
- next_ctx = &sched->order[queue_index].next_ctx;
- ctx = odp_atomic_fetch_inc_u64(next_ctx);
-
- thread_local.ordered.ctx = ctx;
- thread_local.ordered.src_queue = queue_index;
- }
-}
-
-/* Fill in scheduler interface */
-const schedule_fn_t schedule_iquery_fn = {
- .status_sync = 1,
- .pktio_start = schedule_pktio_start,
- .thr_add = group_add_thread,
- .thr_rem = group_remove_thread,
- .num_grps = number_of_groups,
- .init_queue = init_sched_queue,
- .destroy_queue = destroy_sched_queue,
- .sched_queue = schedule_sched_queue,
- .ord_enq_multi = schedule_ord_enq_multi,
- .init_global = schedule_init_global,
- .term_global = schedule_term_global,
- .init_local = schedule_init_local,
- .term_local = schedule_term_local,
- .order_lock = order_lock,
- .order_unlock = order_unlock,
- .max_ordered_locks = schedule_max_ordered_locks,
- .unsched_queue = schedule_unsched_queue,
- .save_context = schedule_save_context
-};
-
-/* Fill in scheduler API calls */
-const schedule_api_t schedule_iquery_api = {
- .schedule_wait_time = schedule_wait_time,
- .schedule = schedule,
- .schedule_multi = schedule_multi,
- .schedule_pause = schedule_pause,
- .schedule_resume = schedule_resume,
- .schedule_release_atomic = schedule_release_atomic,
- .schedule_release_ordered = schedule_release_ordered,
- .schedule_prefetch = schedule_prefetch,
- .schedule_num_prio = number_of_priorites,
- .schedule_group_create = schedule_group_create,
- .schedule_group_destroy = schedule_group_destroy,
- .schedule_group_lookup = schedule_group_lookup,
- .schedule_group_join = schedule_group_join,
- .schedule_group_leave = schedule_group_leave,
- .schedule_group_thrmask = schedule_group_thrmask,
- .schedule_group_info = schedule_group_info,
- .schedule_order_lock = schedule_order_lock,
- .schedule_order_unlock = schedule_order_unlock,
- .schedule_order_unlock_lock = schedule_order_unlock_lock,
- .schedule_order_lock_start = schedule_order_lock_start,
- .schedule_order_lock_wait = schedule_order_lock_wait
-};
-
-static void thread_set_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio)
-{
- queue_index_sparse_t *index;
-
- if (thread == NULL)
- return;
-
- if (prio >= NUM_SCHED_PRIO)
- return;
-
- index = &thread->indexes[prio];
-
- odp_rwlock_write_lock(&thread->lock);
- sparse_bitmap_set(index, queue_index);
- odp_rwlock_write_unlock(&thread->lock);
-}
-
-static void thread_clear_interest(sched_thread_local_t *thread,
- unsigned int queue_index, int prio)
-{
- queue_index_sparse_t *index;
-
- if (thread == NULL)
- return;
-
- if (prio >= NUM_SCHED_PRIO)
- return;
-
- index = &thread->indexes[prio];
-
- odp_rwlock_write_lock(&thread->lock);
- sparse_bitmap_clear(index, queue_index);
- odp_rwlock_write_unlock(&thread->lock);
-}
-
-static void thread_set_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *set)
-{
- int prio;
- sched_prio_t *P;
- unsigned int queue_index;
- queue_index_bitmap_t subset;
- wapl_bitmap_iterator_t it;
-
- if (thread == NULL || set == NULL)
- return;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- P = &sched->prios[prio];
- odp_rwlock_read_lock(&P->lock);
-
- /* The collection of queue indexes in 'set'
- * may belong to several priority levels.
- */
- wapl_bitmap_zero(&subset);
- wapl_bitmap_and(&subset, &P->queues, set);
-
- odp_rwlock_read_unlock(&P->lock);
-
- /* Add the subset to local indexes */
- wapl_bitmap_iterator(&it, &subset);
- for (it.start(&it); it.has_next(&it);) {
- queue_index = it.next(&it);
- thread_set_interest(thread, queue_index, prio);
- }
- }
-}
-
-static void thread_clear_interests(sched_thread_local_t *thread,
- queue_index_bitmap_t *clear)
-{
- int prio;
- sched_prio_t *P;
- unsigned int queue_index;
- queue_index_bitmap_t subset;
- wapl_bitmap_iterator_t it;
-
- if (thread == NULL || clear == NULL)
- return;
-
- for (prio = 0; prio < NUM_SCHED_PRIO; prio++) {
- P = &sched->prios[prio];
- odp_rwlock_read_lock(&P->lock);
-
- /* The collection of queue indexes in 'clear'
- * may belong to several priority levels.
- */
- wapl_bitmap_zero(&subset);
- wapl_bitmap_and(&subset, &P->queues, clear);
-
- odp_rwlock_read_unlock(&P->lock);
-
- /* Remove the subset from local indexes */
- wapl_bitmap_iterator(&it, &subset);
- for (it.start(&it); it.has_next(&it);) {
- queue_index = it.next(&it);
- thread_clear_interest(thread, queue_index, prio);
- }
- }
-}
-
-static inline bool compete_atomic_queue(unsigned int queue_index)
-{
- bool expected = sched->availables[queue_index];
-
- if (expected && is_atomic_queue(queue_index)) {
- expected = __atomic_compare_exchange_n(
- &sched->availables[queue_index],
- &expected, false, 0,
- __ATOMIC_RELEASE, __ATOMIC_RELAXED);
- }
-
- return expected;
-}
-
-static inline int consume_queue(int prio, unsigned int queue_index)
-{
- int count;
- unsigned int max = MAX_DEQ;
- event_cache_t *cache = &thread_local.cache;
-
- /* Low priorities have smaller batch size to limit
- * head of line blocking latency.
- */
- if (odp_unlikely(MAX_DEQ > 1 && prio > ODP_SCHED_PRIO_DEFAULT))
- max = MAX_DEQ / 2;
-
- /* For ordered queues we want consecutive events to
- * be dispatched to separate threads, so do not cache
- * them locally.
- */
- if (is_ordered_queue(queue_index))
- max = 1;
-
- count = sched_queue_deq(queue_index, cache->stash, max, 1);
-
- if (count < 0) {
- DO_SCHED_UNLOCK();
- sched_queue_destroy_finalize(queue_index);
- DO_SCHED_LOCK();
- return 0;
- }
-
- if (count == 0)
- return 0;
-
- cache->top = &cache->stash[0];
- cache->count = count;
- cache->queue = queue_from_index(queue_index);
- return count;
-}
-
-static inline bool do_schedule_prio(int prio)
-{
- int nbits, next, end;
- unsigned int queue_index;
- sparse_bitmap_iterator_t *it;
-
- it = &thread_local.iterators[prio];
- nbits = (int)*it->_base.last;
-
- /* No interests at all! */
- if (nbits <= 0)
- return false;
-
- /* In critical path, cannot afford iterator calls,
- * do it manually with internal knowledge
- */
- it->_start = (it->_start + 1) % nbits;
- end = it->_start + nbits;
-
- for (next = it->_start; next < end; next++) {
- queue_index = it->_base.il[next % nbits];
-
- if (!compete_atomic_queue(queue_index))
- continue;
-
- if (!consume_queue(prio, queue_index))
- continue;
-
- return true;
- }
-
- return false;
-}
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 826747ee1..4e9dd7717 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -22,7 +22,7 @@
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <odp_ishmpool_internal.h>
#include <odp_align_internal.h>
@@ -46,34 +46,27 @@
#define FLAG_PKTIN 0x80
-static _odp_ishm_pool_t *sched_shm_pool;
-
-ODP_STATIC_ASSERT(ODP_SCHED_PRIO_LOWEST == (ODP_SCHED_PRIO_NUM - 2),
- "lowest_prio_does_not_match_with_num_prios");
-
-ODP_STATIC_ASSERT((ODP_SCHED_PRIO_NORMAL > 0) &&
- (ODP_SCHED_PRIO_NORMAL < (ODP_SCHED_PRIO_NUM - 2)),
- "normal_prio_is_not_between_highest_and_lowest");
-
ODP_STATIC_ASSERT(CHECK_IS_POWER2(ODP_CONFIG_QUEUES),
"Number_of_queues_is_not_power_of_two");
-/*
- * Scheduler group related variables.
- */
-/* Currently used scheduler groups */
-static sched_group_mask_t sg_free;
-static sched_group_t *sg_vec[MAX_SCHED_GROUP];
-/* Group lock for MT-safe APIs */
-static odp_spinlock_t sched_grp_lock;
-
#define SCHED_GROUP_JOIN 0
#define SCHED_GROUP_LEAVE 1
-/*
- * Per thread state
- */
-static sched_scalable_thread_state_t thread_state[MAXTHREADS];
+typedef struct {
+ odp_shm_t shm;
+ _odp_ishm_pool_t *sched_shm_pool;
+ /** Currently used scheduler groups */
+ sched_group_mask_t sg_free;
+ sched_group_t *sg_vec[MAX_SCHED_GROUP];
+ /** Group lock for MT-safe APIs */
+ odp_spinlock_t sched_grp_lock;
+ /** Per thread state */
+ sched_scalable_thread_state_t thread_state[MAXTHREADS];
+ uint16_t poll_count[ODP_CONFIG_PKTIO_ENTRIES];
+} sched_global_t;
+
+static sched_global_t *global;
+
__thread sched_scalable_thread_state_t *sched_ts;
static int thread_state_init(int tidx)
@@ -82,7 +75,7 @@ static int thread_state_init(int tidx)
uint32_t i;
ODP_ASSERT(tidx < MAXTHREADS);
- ts = &thread_state[tidx];
+ ts = &global->thread_state[tidx];
ts->atomq = NULL;
ts->src_schedq = NULL;
ts->rctx = NULL;
@@ -523,9 +516,9 @@ static void signal_threads_add(sched_group_t *sg, uint32_t sgi, uint32_t prio)
/* Notify the thread about membership in this
* group/priority.
*/
- atom_bitset_set(&thread_state[thr].sg_wanted[prio],
+ atom_bitset_set(&global->thread_state[thr].sg_wanted[prio],
sgi, __ATOMIC_RELEASE);
- __atomic_store_n(&thread_state[thr].sg_sem, 1,
+ __atomic_store_n(&global->thread_state[thr].sg_sem, 1,
__ATOMIC_RELEASE);
}
}
@@ -537,11 +530,11 @@ sched_queue_t *sched_queue_add(odp_schedule_group_t grp, uint32_t prio)
uint32_t x;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
- ODP_ASSERT((sg_free & (1ULL << grp)) == 0);
+ ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp;
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
/* Use xcount to spread queues over the xfactor schedq's
* per priority.
@@ -563,11 +556,11 @@ static uint32_t sched_pktin_add(odp_schedule_group_t grp, uint32_t prio)
sched_group_t *sg;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
- ODP_ASSERT((sg_free & (1ULL << grp)) == 0);
+ ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp;
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
(void)sched_queue_add(grp, ODP_SCHED_PRIO_PKTIN);
return (ODP_SCHED_PRIO_PKTIN - prio) * sg->xfactor;
@@ -584,9 +577,9 @@ static void signal_threads_rem(sched_group_t *sg, uint32_t sgi, uint32_t prio)
/* Notify the thread about membership in this
* group/priority.
*/
- atom_bitset_clr(&thread_state[thr].sg_wanted[prio],
+ atom_bitset_clr(&global->thread_state[thr].sg_wanted[prio],
sgi, __ATOMIC_RELEASE);
- __atomic_store_n(&thread_state[thr].sg_sem, 1,
+ __atomic_store_n(&global->thread_state[thr].sg_sem, 1,
__ATOMIC_RELEASE);
}
}
@@ -598,11 +591,11 @@ void sched_queue_rem(odp_schedule_group_t grp, uint32_t prio)
uint32_t x;
ODP_ASSERT(grp >= 0 && grp < (odp_schedule_group_t)MAX_SCHED_GROUP);
- ODP_ASSERT((sg_free & (1ULL << grp)) == 0);
+ ODP_ASSERT((global->sg_free & (1ULL << grp)) == 0);
ODP_ASSERT(prio < ODP_SCHED_PRIO_NUM);
sgi = grp;
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
x = __atomic_sub_fetch(&sg->xcount[prio], 1, __ATOMIC_RELAXED);
if (x == 0) {
@@ -631,7 +624,7 @@ static void update_sg_add(sched_scalable_thread_state_t *ts,
added = bitset_andn(sg_wanted, ts->sg_actual[p]);
while (!bitset_is_null(added)) {
sgi = bitset_ffs(added) - 1;
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
for (x = 0; x < sg->xfactor; x++) {
/* Include our thread index to shift
* (rotate) the order of schedq's
@@ -657,7 +650,7 @@ static void update_sg_rem(sched_scalable_thread_state_t *ts,
removed = bitset_andn(ts->sg_actual[p], sg_wanted);
while (!bitset_is_null(removed)) {
sgi = bitset_ffs(removed) - 1;
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
for (x = 0; x < sg->xfactor; x++) {
remove_schedq_from_list(ts,
&sg->schedq[p *
@@ -710,8 +703,6 @@ static inline void _schedule_release_ordered(sched_scalable_thread_state_t *ts)
ts->rctx = NULL;
}
-static uint16_t poll_count[ODP_CONFIG_PKTIO_ENTRIES];
-
static void pktio_start(int pktio_idx,
int num_in_queue,
int in_queue_idx[],
@@ -725,7 +716,8 @@ static void pktio_start(int pktio_idx,
for (i = 0; i < num_in_queue; i++) {
rxq = in_queue_idx[i];
ODP_ASSERT(rxq < PKTIO_MAX_QUEUES);
- __atomic_fetch_add(&poll_count[pktio_idx], 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global->poll_count[pktio_idx], 1,
+ __ATOMIC_RELAXED);
qentry = qentry_from_ext(odpq[i]);
elem = &qentry->s.sched_elem;
elem->cons_type |= FLAG_PKTIN; /* Set pktin queue flag */
@@ -742,7 +734,7 @@ static void pktio_stop(sched_elem_t *elem)
{
elem->cons_type &= ~FLAG_PKTIN; /* Clear pktin queue flag */
sched_pktin_rem(elem->sched_grp);
- if (__atomic_sub_fetch(&poll_count[elem->pktio_idx],
+ if (__atomic_sub_fetch(&global->poll_count[elem->pktio_idx],
1, __ATOMIC_RELAXED) == 0) {
/* Call stop_finalize when all queues
* of the pktio have been removed */
@@ -891,7 +883,7 @@ static int _schedule(odp_queue_t *from, odp_event_t ev[], int num_evts)
ts = sched_ts;
atomq = ts->atomq;
- timer_run();
+ timer_run(1);
/* Once an atomic queue has been scheduled to a thread, it will stay
* on that thread until empty or 'rotated' by WRR
@@ -1349,6 +1341,18 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
return ev;
}
+static int schedule_multi_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_WAIT, events, max_num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_NO_WAIT, events, max_num);
+}
+
static void schedule_pause(void)
{
sched_ts->pause = true;
@@ -1369,6 +1373,21 @@ static int schedule_num_prio(void)
return ODP_SCHED_PRIO_NUM - 1; /* Discount the pktin priority level */
}
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return schedule_num_prio() - 1;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
static int schedule_group_update(sched_group_t *sg,
uint32_t sgi,
const odp_thrmask_t *mask,
@@ -1389,22 +1408,21 @@ static int schedule_group_update(sched_group_t *sg,
atom_bitset_clr(&sg->thr_wanted, thr, __ATOMIC_RELAXED);
for (p = 0; p < ODP_SCHED_PRIO_NUM; p++) {
if (sg->xcount[p] != 0) {
+ sched_scalable_thread_state_t *state;
+
+ state = &global->thread_state[thr];
+
/* This priority level has ODP queues
* Notify the thread about membership in
* this group/priority
*/
if (join_leave == SCHED_GROUP_JOIN)
- atom_bitset_set(
- &thread_state[thr].sg_wanted[p],
- sgi,
- __ATOMIC_RELEASE);
+ atom_bitset_set(&state->sg_wanted[p],
+ sgi, __ATOMIC_RELEASE);
else
- atom_bitset_clr(
- &thread_state[thr].sg_wanted[p],
- sgi,
- __ATOMIC_RELEASE);
- __atomic_store_n(&thread_state[thr].sg_sem,
- 1,
+ atom_bitset_clr(&state->sg_wanted[p],
+ sgi, __ATOMIC_RELEASE);
+ __atomic_store_n(&state->sg_sem, 1,
__ATOMIC_RELEASE);
}
}
@@ -1447,10 +1465,10 @@ static odp_schedule_group_t schedule_group_create(const char *name,
if (mask == NULL)
ODP_ABORT("mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
/* Allocate a scheduler group */
- free = atom_bitset_load(&sg_free, __ATOMIC_RELAXED);
+ free = atom_bitset_load(&global->sg_free, __ATOMIC_RELAXED);
do {
/* All sched_groups in use */
if (bitset_is_null(free))
@@ -1460,7 +1478,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
/* All sched_groups in use */
if (sgi >= MAX_SCHED_GROUP)
goto no_free_sched_group;
- } while (!atom_bitset_cmpxchg(&sg_free,
+ } while (!atom_bitset_cmpxchg(&global->sg_free,
&free,
bitset_clr(free, sgi),
true,
@@ -1477,12 +1495,13 @@ static odp_schedule_group_t schedule_group_create(const char *name,
size = sizeof(sched_group_t) +
(ODP_SCHED_PRIO_NUM * xfactor - 1) * sizeof(sched_queue_t);
- sg = (sched_group_t *)shm_pool_alloc_align(sched_shm_pool, size);
+ sg = (sched_group_t *)shm_pool_alloc_align(global->sched_shm_pool,
+ size);
if (sg == NULL)
goto shm_pool_alloc_failed;
strncpy(sg->name, name ? name : "", ODP_SCHED_GROUP_NAME_LEN - 1);
- sg_vec[sgi] = sg;
+ global->sg_vec[sgi] = sg;
memset(sg->thr_actual, 0, sizeof(sg->thr_actual));
sg->thr_wanted = bitset_null();
sg->xfactor = xfactor;
@@ -1494,16 +1513,16 @@ static odp_schedule_group_t schedule_group_create(const char *name,
if (odp_thrmask_count(mask) != 0)
schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return (odp_schedule_group_t)(sgi);
shm_pool_alloc_failed:
/* Free the allocated group index */
- atom_bitset_set(&sg_free, sgi, __ATOMIC_RELAXED);
+ atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELAXED);
no_free_sched_group:
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ODP_SCHED_GROUP_INVALID;
}
@@ -1529,15 +1548,15 @@ static int schedule_group_destroy(odp_schedule_group_t group)
sched_ts->sg_sem = 0;
update_sg_membership(sched_ts);
}
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group;
- if (bitset_is_set(sg_free, sgi)) {
+ if (bitset_is_set(global->sg_free, sgi)) {
ret = -1;
goto group_not_found;
}
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
/* First ensure all threads have processed group_join/group_leave
* requests.
*/
@@ -1570,18 +1589,18 @@ static int schedule_group_destroy(odp_schedule_group_t group)
}
}
- _odp_ishm_pool_free(sched_shm_pool, sg);
- sg_vec[sgi] = NULL;
- atom_bitset_set(&sg_free, sgi, __ATOMIC_RELEASE);
+ _odp_ishm_pool_free(global->sched_shm_pool, sg);
+ global->sg_vec[sgi] = NULL;
+ atom_bitset_set(&global->sg_free, sgi, __ATOMIC_RELEASE);
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
thrd_q_present_in_group:
group_not_found:
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group:
@@ -1599,19 +1618,19 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
group = ODP_SCHED_GROUP_INVALID;
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
/* Scan through the schedule group array */
for (sgi = 0; sgi < MAX_SCHED_GROUP; sgi++) {
- if ((sg_vec[sgi] != NULL) &&
- (strncmp(name, sg_vec[sgi]->name,
+ if ((global->sg_vec[sgi] != NULL) &&
+ (strncmp(name, global->sg_vec[sgi]->name,
ODP_SCHED_GROUP_NAME_LEN) == 0)) {
group = (odp_schedule_group_t)sgi;
break;
}
}
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return group;
}
@@ -1630,18 +1649,18 @@ static int schedule_group_join(odp_schedule_group_t group,
if (mask == NULL)
ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group;
- if (bitset_is_set(sg_free, sgi)) {
- odp_spinlock_unlock(&sched_grp_lock);
+ if (bitset_is_set(global->sg_free, sgi)) {
+ odp_spinlock_unlock(&global->sched_grp_lock);
return -1;
}
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_JOIN);
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
}
@@ -1662,24 +1681,24 @@ static int schedule_group_leave(odp_schedule_group_t group,
if (mask == NULL)
ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group;
- if (bitset_is_set(sg_free, sgi)) {
+ if (bitset_is_set(global->sg_free, sgi)) {
ret = -1;
goto group_not_found;
}
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
ret = schedule_group_update(sg, sgi, mask, SCHED_GROUP_LEAVE);
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found:
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group:
return ret;
@@ -1701,23 +1720,23 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
if (mask == NULL)
ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group;
- if (bitset_is_set(sg_free, sgi)) {
+ if (bitset_is_set(global->sg_free, sgi)) {
ret = -1;
goto group_not_found;
}
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
ret = _schedule_group_thrmask(sg, mask);
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found:
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group:
return ret;
@@ -1739,26 +1758,26 @@ static int schedule_group_info(odp_schedule_group_t group,
if (info == NULL)
ODP_ABORT("name or mask is NULL\n");
- odp_spinlock_lock(&sched_grp_lock);
+ odp_spinlock_lock(&global->sched_grp_lock);
sgi = (uint32_t)group;
- if (bitset_is_set(sg_free, sgi)) {
+ if (bitset_is_set(global->sg_free, sgi)) {
ret = -1;
goto group_not_found;
}
- sg = sg_vec[sgi];
+ sg = global->sg_vec[sgi];
ret = _schedule_group_thrmask(sg, &info->thrmask);
info->name = sg->name;
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
return ret;
group_not_found:
- odp_spinlock_unlock(&sched_grp_lock);
+ odp_spinlock_unlock(&global->sched_grp_lock);
invalid_group:
return ret;
@@ -1770,52 +1789,63 @@ static int schedule_init_global(void)
odp_schedule_group_t tmp_all;
odp_schedule_group_t tmp_wrkr;
odp_schedule_group_t tmp_ctrl;
+ odp_shm_t shm;
+ _odp_ishm_pool_t *pool;
uint32_t bits;
uint32_t pool_size;
uint64_t min_alloc;
uint64_t max_alloc;
- /* Attach to the pool if it exists */
- sched_shm_pool = _odp_ishm_pool_lookup("sched_shm_pool");
- if (sched_shm_pool == NULL) {
- /* Add storage required for sched groups. Assume worst case
- * xfactor of MAXTHREADS.
- */
- pool_size = (sizeof(sched_group_t) +
- (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
- sizeof(sched_queue_t)) * MAX_SCHED_GROUP;
- /* Choose min_alloc and max_alloc such that slab allocator
- * is selected.
- */
- min_alloc = sizeof(sched_group_t) +
- (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
- sizeof(sched_queue_t);
- max_alloc = min_alloc;
- sched_shm_pool = _odp_ishm_pool_create("sched_shm_pool",
- pool_size,
- min_alloc, max_alloc,
- _ODP_ISHM_SINGLE_VA);
- if (sched_shm_pool == NULL) {
- ODP_ERR("Failed to allocate shared memory pool "
- "for sched\n");
- goto failed_sched_shm_pool_create;
- }
+ shm = odp_shm_reserve("_odp_sched_scalable",
+ sizeof(sched_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ ODP_ERR("Schedule init: Shm reserve failed.\n");
+ return -1;
}
- odp_spinlock_init(&sched_grp_lock);
+ memset(global, 0, sizeof(sched_global_t));
+ global->shm = shm;
+
+ /* Add storage required for sched groups. Assume worst case
+ * xfactor of MAXTHREADS.
+ */
+ pool_size = (sizeof(sched_group_t) +
+ (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
+ sizeof(sched_queue_t)) * MAX_SCHED_GROUP;
+ /* Choose min_alloc and max_alloc such that slab allocator
+ * is selected.
+ */
+ min_alloc = sizeof(sched_group_t) +
+ (ODP_SCHED_PRIO_NUM * MAXTHREADS - 1) *
+ sizeof(sched_queue_t);
+ max_alloc = min_alloc;
+ pool = _odp_ishm_pool_create("sched_shm_pool", pool_size,
+ min_alloc, max_alloc,
+ _ODP_ISHM_SINGLE_VA);
+ if (pool == NULL) {
+ ODP_ERR("Failed to allocate shared memory pool "
+ "for sched\n");
+ goto failed_sched_shm_pool_create;
+ }
+ global->sched_shm_pool = pool;
+
+ odp_spinlock_init(&global->sched_grp_lock);
bits = MAX_SCHED_GROUP;
- if (MAX_SCHED_GROUP == sizeof(sg_free) * CHAR_BIT)
- sg_free = ~0;
+ if (MAX_SCHED_GROUP == sizeof(global->sg_free) * CHAR_BIT)
+ global->sg_free = ~0;
else
- sg_free = (1 << bits) - 1;
+ global->sg_free = (1 << bits) - 1;
for (uint32_t i = 0; i < MAX_SCHED_GROUP; i++)
- sg_vec[i] = NULL;
+ global->sg_vec[i] = NULL;
for (uint32_t i = 0; i < MAXTHREADS; i++) {
- thread_state[i].sg_sem = 0;
+ global->thread_state[i].sg_sem = 0;
for (uint32_t j = 0; j < ODP_SCHED_PRIO_NUM; j++)
- thread_state[i].sg_wanted[j] = bitset_null();
+ global->thread_state[i].sg_wanted[j] = bitset_null();
}
/* Create sched groups for default GROUP_ALL, GROUP_WORKER and
@@ -1871,7 +1901,12 @@ static int schedule_term_global(void)
if (odp_schedule_group_destroy(ODP_SCHED_GROUP_CONTROL) != 0)
ODP_ERR("Failed to destroy ODP_SCHED_GROUP_CONTROL\n");
- _odp_ishm_pool_destroy(sched_shm_pool);
+ _odp_ishm_pool_destroy(global->sched_shm_pool);
+
+ if (odp_shm_free(global->shm)) {
+ ODP_ERR("Shm free failed for scalable scheduler");
+ return -1;
+ }
return 0;
}
@@ -1959,6 +1994,19 @@ static int schedule_term_local(void)
return rc;
}
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ config->queue_size = 0; /* FIXME ? */
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ (void)config;
+
+ return 0;
+}
+
static int num_grps(void)
{
return MAX_SCHED_GROUP;
@@ -2007,7 +2055,7 @@ static int sched_queue(uint32_t queue_index)
return 0;
}
-static int ord_enq_multi(void *handle, void *buf_hdr[], int num,
+static int ord_enq_multi(odp_queue_t handle, void *buf_hdr[], int num,
int *ret)
{
@@ -2072,6 +2120,19 @@ static uint32_t schedule_max_ordered_locks(void)
return CONFIG_QUEUE_MAX_ORD_LOCKS;
}
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = schedule_max_ordered_locks();
+ capa->max_groups = num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ capa->max_queue_size = 0;
+
+ return 0;
+}
+
const schedule_fn_t schedule_scalable_fn = {
.pktio_start = pktio_start,
.thr_add = thr_add,
@@ -2092,13 +2153,21 @@ const schedule_fn_t schedule_scalable_fn = {
const schedule_api_t schedule_scalable_api = {
.schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
.schedule = schedule,
.schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
.schedule_pause = schedule_pause,
.schedule_resume = schedule_resume,
.schedule_release_atomic = schedule_release_atomic,
.schedule_release_ordered = schedule_release_ordered,
.schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
.schedule_num_prio = schedule_num_prio,
.schedule_group_create = schedule_group_create,
.schedule_group_destroy = schedule_group_destroy,
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index ae292051b..e7b378950 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -26,11 +26,12 @@
#define NUM_QUEUE ODP_CONFIG_QUEUES
#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
#define NUM_ORDERED_LOCKS 1
-#define NUM_PRIO 3
#define NUM_STATIC_GROUP 3
#define NUM_GROUP (NUM_STATIC_GROUP + 9)
#define NUM_PKTIN 32
-#define LOWEST_QUEUE_PRIO (NUM_PRIO - 2)
+#define NUM_PRIO 3
+#define MAX_API_PRIO (NUM_PRIO - 2)
+/* Lowest internal priority */
#define PKTIN_PRIO (NUM_PRIO - 1)
#define CMD_QUEUE 0
#define CMD_PKTIO 1
@@ -62,6 +63,7 @@ struct sched_cmd_s {
int init;
int num_pktin;
int pktin_idx[NUM_PKTIN];
+ odp_queue_t queue[NUM_PKTIN];
};
typedef struct ODP_ALIGNED_CACHE sched_cmd_t {
@@ -223,12 +225,21 @@ static int init_local(void)
static int term_global(void)
{
+ odp_event_t event;
int qi, ret = 0;
for (qi = 0; qi < NUM_QUEUE; qi++) {
+ int report = 1;
+
if (sched_global->queue_cmd[qi].s.init) {
- /* todo: dequeue until empty ? */
- sched_queue_destroy_finalize(qi);
+ while (sched_queue_deq(qi, &event, 1, 1) > 0) {
+ if (report) {
+ ODP_ERR("Queue not empty\n");
+ report = 0;
+ }
+ odp_event_free(event);
+ }
+
}
}
@@ -246,6 +257,19 @@ static int term_local(void)
return 0;
}
+static void schedule_config_init(odp_schedule_config_t *config)
+{
+ config->num_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ config->queue_size = queue_glb->config.max_queue_size;
+}
+
+static int schedule_config(const odp_schedule_config_t *config)
+{
+ (void)config;
+
+ return 0;
+}
+
static uint32_t max_ordered_locks(void)
{
return NUM_ORDERED_LOCKS;
@@ -351,14 +375,19 @@ static int init_queue(uint32_t qi, const odp_schedule_param_t *sched_param)
odp_schedule_group_t group = sched_param->group;
int prio = 0;
+ if (_odp_schedule_configured == 0) {
+ ODP_ERR("Scheduler has not been configured\n");
+ return -1;
+ }
+
if (group < 0 || group >= NUM_GROUP)
return -1;
if (!sched_group->s.group[group].allocated)
return -1;
- if (sched_param->prio > 0)
- prio = LOWEST_QUEUE_PRIO;
+ /* Inverted prio value (max = 0) vs API */
+ prio = MAX_API_PRIO - sched_param->prio;
sched_global->queue_cmd[qi].s.prio = prio;
sched_global->queue_cmd[qi].s.group = group;
@@ -392,9 +421,8 @@ static inline sched_cmd_t *rem_head(int group, int prio)
int pktio;
prio_queue = &sched_global->prio_queue[group][prio];
- ring_idx = ring_deq(&prio_queue->ring, RING_MASK);
- if (ring_idx == RING_EMPTY)
+ if (ring_deq(&prio_queue->ring, RING_MASK, &ring_idx) == 0)
return NULL;
pktio = index_from_ring_idx(&index, ring_idx);
@@ -415,10 +443,10 @@ static int sched_queue(uint32_t qi)
return 0;
}
-static int ord_enq_multi(void *q_int, void *buf_hdr[], int num,
+static int ord_enq_multi(odp_queue_t queue, void *buf_hdr[], int num,
int *ret)
{
- (void)q_int;
+ (void)queue;
(void)buf_hdr;
(void)num;
(void)ret;
@@ -430,7 +458,7 @@ static int ord_enq_multi(void *q_int, void *buf_hdr[], int num,
static void pktio_start(int pktio_index,
int num,
int pktin_idx[],
- odp_queue_t odpq[] ODP_UNUSED)
+ odp_queue_t queue[])
{
int i;
sched_cmd_t *cmd;
@@ -444,8 +472,10 @@ static void pktio_start(int pktio_index,
ODP_ABORT("Supports only %i pktin queues per interface\n",
NUM_PKTIN);
- for (i = 0; i < num; i++)
+ for (i = 0; i < num; i++) {
cmd->s.pktin_idx[i] = pktin_idx[i];
+ cmd->s.queue[i] = queue[i];
+ }
cmd->s.num_pktin = num;
@@ -499,6 +529,26 @@ static uint64_t schedule_wait_time(uint64_t ns)
return ns;
}
+static inline void enqueue_packets(odp_queue_t queue,
+ odp_buffer_hdr_t *hdr_tbl[], int num_pkt)
+{
+ int num_enq, num_drop;
+
+ num_enq = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl,
+ num_pkt);
+
+ if (num_enq < 0)
+ num_enq = 0;
+
+ if (num_enq < num_pkt) {
+ num_drop = num_pkt - num_enq;
+
+ ODP_DBG("Dropped %i packets\n", num_drop);
+ odp_packet_free_multi((odp_packet_t *)&hdr_tbl[num_enq],
+ num_drop);
+ }
+}
+
static int schedule_multi(odp_queue_t *from, uint64_t wait,
odp_event_t events[], int max_events ODP_UNUSED)
{
@@ -521,17 +571,36 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
uint32_t qi;
int num;
- timer_run();
-
cmd = sched_cmd();
if (cmd && cmd->s.type == CMD_PKTIO) {
- if (sched_cb_pktin_poll_old(cmd->s.index,
- cmd->s.num_pktin,
- cmd->s.pktin_idx)) {
- /* Pktio stopped or closed. */
- sched_cb_pktio_stop_finalize(cmd->s.index);
- } else {
+ odp_buffer_hdr_t *hdr_tbl[CONFIG_BURST_SIZE];
+ int i;
+ int num_pkt = 0;
+ int max_num = CONFIG_BURST_SIZE;
+ int pktio_idx = cmd->s.index;
+ int num_pktin = cmd->s.num_pktin;
+ int *pktin_idx = cmd->s.pktin_idx;
+ odp_queue_t *queue = cmd->s.queue;
+
+ for (i = 0; i < num_pktin; i++) {
+ num_pkt = sched_cb_pktin_poll(pktio_idx,
+ pktin_idx[i],
+ hdr_tbl, max_num);
+
+ if (num_pkt < 0) {
+ /* Pktio stopped or closed. */
+ sched_cb_pktio_stop_finalize(pktio_idx);
+ break;
+ }
+
+ if (num_pkt == 0)
+ continue;
+
+ enqueue_packets(queue[i], hdr_tbl, num_pkt);
+ }
+
+ if (num_pkt >= 0) {
/* Continue polling pktio. */
add_tail(cmd);
}
@@ -541,6 +610,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
}
if (cmd == NULL) {
+ timer_run(1);
/* All priority queues are empty */
if (wait == ODP_SCHED_NO_WAIT)
return 0;
@@ -564,28 +634,23 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
qi = cmd->s.index;
num = sched_queue_deq(qi, events, 1, 1);
- if (num > 0) {
- sched_local.cmd = cmd;
+ if (num <= 0) {
+ timer_run(1);
+ /* Destroyed or empty queue. Remove empty queue from
+ * scheduling. A dequeue operation to on an already
+ * empty queue moves it to NOTSCHED state and
+ * sched_queue() will be called on next enqueue. */
+ continue;
+ }
- if (from)
- *from = queue_from_index(qi);
+ timer_run(2);
- return num;
- }
+ sched_local.cmd = cmd;
- if (num < 0) {
- /* Destroyed queue */
- sched_queue_destroy_finalize(qi);
- continue;
- }
+ if (from)
+ *from = queue_from_index(qi);
- if (num == 0) {
- /* Remove empty queue from scheduling. A dequeue
- * operation to on an already empty queue moves
- * it to NOTSCHED state and sched_queue() will
- * be called on next enqueue. */
- continue;
- }
+ return num;
}
}
@@ -599,6 +664,18 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
return ODP_EVENT_INVALID;
}
+static int schedule_multi_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_WAIT, events, max_num);
+}
+
+static int schedule_multi_no_wait(odp_queue_t *from, odp_event_t events[],
+ int max_num)
+{
+ return schedule_multi(from, ODP_SCHED_NO_WAIT, events, max_num);
+}
+
static void schedule_pause(void)
{
sched_local.pause = 1;
@@ -622,6 +699,21 @@ static void schedule_prefetch(int num)
(void)num;
}
+static int schedule_min_prio(void)
+{
+ return 0;
+}
+
+static int schedule_max_prio(void)
+{
+ return MAX_API_PRIO;
+}
+
+static int schedule_default_prio(void)
+{
+ return schedule_max_prio() / 2;
+}
+
static int schedule_num_prio(void)
{
/* Lowest priority is used for pktin polling and is internal
@@ -851,9 +943,21 @@ static void order_unlock(void)
{
}
+static int schedule_capability(odp_schedule_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_schedule_capability_t));
+
+ capa->max_ordered_locks = max_ordered_locks();
+ capa->max_groups = num_grps();
+ capa->max_prios = schedule_num_prio();
+ capa->max_queues = ODP_CONFIG_QUEUES - NUM_INTERNAL_QUEUES;
+ capa->max_queue_size = queue_glb->config.max_queue_size;
+
+ return 0;
+}
+
/* Fill in scheduler interface */
const schedule_fn_t schedule_sp_fn = {
- .status_sync = 0,
.pktio_start = pktio_start,
.thr_add = thr_add,
.thr_rem = thr_rem,
@@ -868,21 +972,27 @@ const schedule_fn_t schedule_sp_fn = {
.term_local = term_local,
.order_lock = order_lock,
.order_unlock = order_unlock,
- .max_ordered_locks = max_ordered_locks,
- .unsched_queue = NULL,
- .save_context = NULL
+ .max_ordered_locks = max_ordered_locks
};
/* Fill in scheduler API calls */
const schedule_api_t schedule_sp_api = {
.schedule_wait_time = schedule_wait_time,
+ .schedule_capability = schedule_capability,
+ .schedule_config_init = schedule_config_init,
+ .schedule_config = schedule_config,
.schedule = schedule,
.schedule_multi = schedule_multi,
+ .schedule_multi_wait = schedule_multi_wait,
+ .schedule_multi_no_wait = schedule_multi_no_wait,
.schedule_pause = schedule_pause,
.schedule_resume = schedule_resume,
.schedule_release_atomic = schedule_release_atomic,
.schedule_release_ordered = schedule_release_ordered,
.schedule_prefetch = schedule_prefetch,
+ .schedule_min_prio = schedule_min_prio,
+ .schedule_max_prio = schedule_max_prio,
+ .schedule_default_prio = schedule_default_prio,
.schedule_num_prio = schedule_num_prio,
.schedule_group_create = schedule_group_create,
.schedule_group_destroy = schedule_group_destroy,
diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c
index 7bd323c93..b1bbdeb7b 100644
--- a/platform/linux-generic/odp_shared_memory.c
+++ b/platform/linux-generic/odp_shared_memory.c
@@ -11,7 +11,7 @@
#include <odp/api/std_types.h>
#include <odp/api/shared_memory.h>
#include <odp/api/plat/strong_types.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <odp_init_internal.h>
#include <odp_global_data.h>
#include <string.h>
@@ -44,12 +44,28 @@ static uint32_t get_ishm_flags(uint32_t flags)
return f;
}
+odp_shm_t _odp_shm_reserve(const char *name, uint64_t size, uint32_t align,
+ uint32_t flags, uint32_t extra_flags)
+{
+ int block_index;
+ uint32_t flgs = 0; /* internal ishm flags */
+
+ flgs = get_ishm_flags(flags);
+ flgs |= extra_flags;
+
+ block_index = _odp_ishm_reserve(name, size, -1, align, 0, flgs, flags);
+ if (block_index >= 0)
+ return to_handle(block_index);
+ else
+ return ODP_SHM_INVALID;
+}
+
int odp_shm_capability(odp_shm_capability_t *capa)
{
memset(capa, 0, sizeof(odp_shm_capability_t));
capa->max_blocks = ODP_CONFIG_SHM_BLOCKS;
- capa->max_size = odp_global_data.shm_max_size;
+ capa->max_size = odp_global_ro.shm_max_size;
capa->max_align = 0;
return 0;
@@ -58,16 +74,7 @@ int odp_shm_capability(odp_shm_capability_t *capa)
odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
uint32_t flags)
{
- int block_index;
- int flgs = 0; /* internal ishm flags */
-
- flgs = get_ishm_flags(flags);
-
- block_index = _odp_ishm_reserve(name, size, -1, align, flgs, flags);
- if (block_index >= 0)
- return to_handle(block_index);
- else
- return ODP_SHM_INVALID;
+ return _odp_shm_reserve(name, size, align, flags, 0);
}
odp_shm_t odp_shm_import(const char *remote_name,
@@ -115,7 +122,7 @@ int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info)
void odp_shm_print_all(void)
{
- _odp_ishm_status("Memory allocation status:");
+ _odp_ishm_status("ODP shared memory allocation status:");
}
void odp_shm_print(odp_shm_t shm)
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index bca02ba14..a7a78d27f 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -47,7 +47,7 @@
*/
static int sysconf_cpu_count(void)
{
- return odp_global_data.num_cpus_installed;
+ return odp_global_ro.num_cpus_installed;
}
#if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \
@@ -96,6 +96,8 @@ static uint64_t default_huge_page_size(void)
FILE *file;
file = fopen("/proc/meminfo", "rt");
+ if (!file)
+ return 0;
while (fgets(str, sizeof(str), file) != NULL) {
if (sscanf(str, "Hugepagesize: %8lu kB", &sz) == 1) {
@@ -339,35 +341,34 @@ int odp_system_info_init(void)
int i;
FILE *file;
- memset(&odp_global_data.system_info, 0, sizeof(system_info_t));
+ memset(&odp_global_ro.system_info, 0, sizeof(system_info_t));
- odp_global_data.system_info.page_size = ODP_PAGE_SIZE;
+ odp_global_ro.system_info.page_size = ODP_PAGE_SIZE;
/* By default, read max frequency from a cpufreq file */
for (i = 0; i < CONFIG_NUM_CPU; i++) {
uint64_t cpu_hz_max = read_cpufreq("cpuinfo_max_freq", i);
if (cpu_hz_max)
- odp_global_data.system_info.cpu_hz_max[i] = cpu_hz_max;
+ odp_global_ro.system_info.cpu_hz_max[i] = cpu_hz_max;
}
file = fopen("/proc/cpuinfo", "rt");
- if (file == NULL) {
- ODP_ERR("Failed to open /proc/cpuinfo\n");
- return -1;
+ if (file != NULL) {
+ /* Read CPU model, and set max cpu frequency
+ * if not set from cpufreq. */
+ cpuinfo_parser(file, &odp_global_ro.system_info);
+ fclose(file);
+ } else {
+ _odp_dummy_cpuinfo(&odp_global_ro.system_info);
}
- /* Read CPU model, and set max cpu frequency if not set from cpufreq. */
- cpuinfo_parser(file, &odp_global_data.system_info);
-
- fclose(file);
-
- if (systemcpu(&odp_global_data.system_info)) {
+ if (systemcpu(&odp_global_ro.system_info)) {
ODP_ERR("systemcpu failed\n");
return -1;
}
- system_hp(&odp_global_data.hugepage_info);
+ system_hp(&odp_global_ro.hugepage_info);
return 0;
}
@@ -377,7 +378,7 @@ int odp_system_info_init(void)
*/
int odp_system_info_term(void)
{
- free(odp_global_data.hugepage_info.default_huge_page_dir);
+ free(odp_global_ro.hugepage_info.default_huge_page_dir);
return 0;
}
@@ -417,14 +418,14 @@ uint64_t odp_cpu_hz_max(void)
uint64_t odp_cpu_hz_max_id(int id)
{
if (id >= 0 && id < CONFIG_NUM_CPU)
- return odp_global_data.system_info.cpu_hz_max[id];
+ return odp_global_ro.system_info.cpu_hz_max[id];
else
return 0;
}
uint64_t odp_sys_huge_page_size(void)
{
- return odp_global_data.hugepage_info.default_huge_page_size;
+ return odp_global_ro.hugepage_info.default_huge_page_size;
}
static int pagesz_compare(const void *pagesz1, const void *pagesz2)
@@ -442,8 +443,9 @@ int odp_sys_huge_page_size_all(uint64_t size[], int num)
/* See: kernel.org: hugetlbpage.txt */
dir = opendir("/sys/kernel/mm/hugepages");
if (!dir) {
- ODP_ERR("Failed to open huge page directory\n");
- return -1;
+ ODP_PRINT("Failed to open /sys/kernel/mm/hugepages: %s\n",
+ strerror(errno));
+ return 0;
}
while ((entry = readdir(dir)) != NULL) {
@@ -465,7 +467,7 @@ int odp_sys_huge_page_size_all(uint64_t size[], int num)
uint64_t odp_sys_page_size(void)
{
- return odp_global_data.system_info.page_size;
+ return odp_global_ro.system_info.page_size;
}
const char *odp_cpu_model_str(void)
@@ -476,19 +478,19 @@ const char *odp_cpu_model_str(void)
const char *odp_cpu_model_str_id(int id)
{
if (id >= 0 && id < CONFIG_NUM_CPU)
- return odp_global_data.system_info.model_str[id];
+ return odp_global_ro.system_info.model_str[id];
else
return NULL;
}
int odp_sys_cache_line_size(void)
{
- return odp_global_data.system_info.cache_line_size;
+ return odp_global_ro.system_info.cache_line_size;
}
int odp_cpu_count(void)
{
- return odp_global_data.system_info.cpu_count;
+ return odp_global_ro.system_info.cpu_count;
}
void odp_sys_info_print(void)
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index a5f62ec7e..b30174dde 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -54,7 +54,7 @@ int odp_thread_init_global(void)
{
odp_shm_t shm;
- shm = odp_shm_reserve("odp_thread_globals",
+ shm = odp_shm_reserve("_odp_thread_globals",
sizeof(thread_globals_t),
ODP_CACHE_LINE_SIZE, 0);
@@ -73,9 +73,9 @@ int odp_thread_term_global(void)
{
int ret;
- ret = odp_shm_free(odp_shm_lookup("odp_thread_globals"));
+ ret = odp_shm_free(odp_shm_lookup("_odp_thread_globals"));
if (ret < 0)
- ODP_ERR("shm free failed for odp_thread_globals");
+ ODP_ERR("shm free failed for _odp_thread_globals");
return ret;
}
@@ -136,6 +136,20 @@ int odp_thread_init_local(odp_thread_type_t type)
{
int id;
int cpu;
+ int group_all, group_worker, group_control;
+
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
+
+ if (sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
odp_spinlock_lock(&thread_globals->lock);
id = alloc_id(type);
@@ -159,11 +173,13 @@ int odp_thread_init_local(odp_thread_type_t type)
_odp_this_thread = &thread_globals->thr[id];
- sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
+ if (group_all)
+ sched_fn->thr_add(ODP_SCHED_GROUP_ALL, id);
- if (type == ODP_THREAD_WORKER)
+ if (type == ODP_THREAD_WORKER && group_worker)
sched_fn->thr_add(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
+
+ if (type == ODP_THREAD_CONTROL && group_control)
sched_fn->thr_add(ODP_SCHED_GROUP_CONTROL, id);
return 0;
@@ -172,14 +188,30 @@ int odp_thread_init_local(odp_thread_type_t type)
int odp_thread_term_local(void)
{
int num;
+ int group_all, group_worker, group_control;
int id = _odp_this_thread->thr;
odp_thread_type_t type = _odp_this_thread->type;
- sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+ group_all = 1;
+ group_worker = 1;
+ group_control = 1;
- if (type == ODP_THREAD_WORKER)
+ if (sched_fn->get_config) {
+ schedule_config_t schedule_config;
+
+ sched_fn->get_config(&schedule_config);
+ group_all = schedule_config.group_enable.all;
+ group_worker = schedule_config.group_enable.worker;
+ group_control = schedule_config.group_enable.control;
+ }
+
+ if (group_all)
+ sched_fn->thr_rem(ODP_SCHED_GROUP_ALL, id);
+
+ if (type == ODP_THREAD_WORKER && group_worker)
sched_fn->thr_rem(ODP_SCHED_GROUP_WORKER, id);
- else if (type == ODP_THREAD_CONTROL)
+
+ if (type == ODP_THREAD_CONTROL && group_control)
sched_fn->thr_rem(ODP_SCHED_GROUP_CONTROL, id);
odp_spinlock_lock(&thread_globals->lock);
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index 95cf1b4d6..6446b1c91 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -55,7 +55,11 @@
#include <odp/api/time.h>
#include <odp/api/plat/time_inlines.h>
#include <odp/api/timer.h>
+#include <odp_libconfig_internal.h>
+#include <odp_queue_if.h>
#include <odp_timer_internal.h>
+#include <odp/api/plat/queue_inlines.h>
+#include <odp_global_data.h>
/* Inlined API functions */
#include <odp/api/plat/event_inlines.h>
@@ -66,22 +70,15 @@
* for checking the freshness of received timeouts */
#define TMO_INACTIVE ((uint64_t)0x8000000000000000)
-odp_bool_t inline_timers = false;
-
/******************************************************************************
* Mutual exclusion in the absence of CAS16
*****************************************************************************/
#ifndef ODP_ATOMIC_U128
#define NUM_LOCKS 1024
-static _odp_atomic_flag_t locks[NUM_LOCKS]; /* Multiple locks per cache line! */
-#define IDX2LOCK(idx) (&locks[(idx) % NUM_LOCKS])
+#define IDX2LOCK(idx) (&timer_global->locks[(idx) % NUM_LOCKS])
#endif
-/* Max timer resolution in nanoseconds */
-static uint64_t highest_res_ns = 500;
-static uint64_t min_res_ns = INT64_MAX;
-
/******************************************************************************
* Translation between timeout buffer and timeout header
*****************************************************************************/
@@ -209,13 +206,23 @@ typedef struct timer_pool_s {
typedef struct timer_global_t {
odp_ticketlock_t lock;
+ odp_shm_t shm;
+ /* Max timer resolution in nanoseconds */
+ uint64_t highest_res_ns;
+ uint64_t min_res_ns;
+ odp_time_t time_per_ratelimit_period;
int num_timer_pools;
uint8_t timer_pool_used[MAX_TIMER_POOLS];
timer_pool_t *timer_pool[MAX_TIMER_POOLS];
-
+#ifndef ODP_ATOMIC_U128
+ /* Multiple locks per cache line! */
+ _odp_atomic_flag_t ODP_ALIGNED_CACHE locks[NUM_LOCKS];
+#endif
+ odp_bool_t use_inline_timers;
+ int inline_poll_interval;
} timer_global_t;
-static timer_global_t timer_global;
+static timer_global_t *timer_global;
static inline timer_pool_t *timer_pool_from_hdl(odp_timer_pool_t hdl)
{
@@ -231,9 +238,9 @@ static inline timer_pool_t *handle_to_tp(odp_timer_t hdl)
{
uint32_t tp_idx = _odp_typeval(hdl) >> INDEX_BITS;
if (odp_likely(tp_idx < MAX_TIMER_POOLS)) {
- timer_pool_t *tp = timer_global.timer_pool[tp_idx];
+ timer_pool_t *tp = timer_global->timer_pool[tp_idx];
if (odp_likely(tp != NULL))
- return timer_global.timer_pool[tp_idx];
+ return timer_global->timer_pool[tp_idx];
}
ODP_ABORT("Invalid timer handle %#x\n", hdl);
}
@@ -241,7 +248,7 @@ static inline timer_pool_t *handle_to_tp(odp_timer_t hdl)
static inline uint32_t handle_to_idx(odp_timer_t hdl,
timer_pool_t *tp)
{
- uint32_t idx = _odp_typeval(hdl) & ((1U << INDEX_BITS) - 1U);
+ uint32_t idx = (_odp_typeval(hdl) & ((1U << INDEX_BITS) - 1U)) - 1;
__builtin_prefetch(&tp->tick_buf[idx], 0, 0);
if (odp_likely(idx < odp_atomic_load_u32(&tp->high_wm)))
return idx;
@@ -251,8 +258,9 @@ static inline uint32_t handle_to_idx(odp_timer_t hdl,
static inline odp_timer_t tp_idx_to_handle(timer_pool_t *tp,
uint32_t idx)
{
- ODP_ASSERT(idx < (1U << INDEX_BITS));
- return _odp_cast_scalar(odp_timer_t, (tp->tp_idx << INDEX_BITS) | idx);
+ ODP_ASSERT((idx + 1) < (1U << INDEX_BITS));
+ return _odp_cast_scalar(odp_timer_t, (tp->tp_idx << INDEX_BITS) |
+ (idx + 1));
}
/* Forward declarations */
@@ -264,36 +272,47 @@ static odp_timer_pool_t timer_pool_new(const char *name,
{
uint32_t i, tp_idx;
size_t sz0, sz1, sz2;
+ uint64_t tp_size;
+ uint32_t flags = ODP_SHM_SW_ONLY;
- odp_ticketlock_lock(&timer_global.lock);
+ if (odp_global_ro.shm_single_va)
+ flags |= ODP_SHM_SINGLE_VA;
- if (timer_global.num_timer_pools >= MAX_TIMER_POOLS) {
- odp_ticketlock_unlock(&timer_global.lock);
+ odp_ticketlock_lock(&timer_global->lock);
+
+ if (timer_global->num_timer_pools >= MAX_TIMER_POOLS) {
+ odp_ticketlock_unlock(&timer_global->lock);
+ ODP_DBG("No more free timer pools\n");
return ODP_TIMER_POOL_INVALID;
}
for (i = 0; i < MAX_TIMER_POOLS; i++) {
- if (timer_global.timer_pool_used[i] == 0) {
- timer_global.timer_pool_used[i] = 1;
+ if (timer_global->timer_pool_used[i] == 0) {
+ timer_global->timer_pool_used[i] = 1;
break;
}
}
tp_idx = i;
- timer_global.num_timer_pools++;
+ timer_global->num_timer_pools++;
- odp_ticketlock_unlock(&timer_global.lock);
+ odp_ticketlock_unlock(&timer_global->lock);
sz0 = ROUNDUP_CACHE_LINE(sizeof(timer_pool_t));
sz1 = ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers);
sz2 = ROUNDUP_CACHE_LINE(sizeof(_odp_timer_t) *
param->num_timers);
- odp_shm_t shm = odp_shm_reserve(name, sz0 + sz1 + sz2,
- ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ tp_size = sz0 + sz1 + sz2;
+
+ odp_shm_t shm = odp_shm_reserve(name, tp_size, ODP_CACHE_LINE_SIZE,
+ flags);
if (odp_unlikely(shm == ODP_SHM_INVALID))
ODP_ABORT("%s: timer pool shm-alloc(%zuKB) failed\n",
name, (sz0 + sz1 + sz2) / 1024);
timer_pool_t *tp = (timer_pool_t *)odp_shm_addr(shm);
+
+ memset(tp, 0, tp_size);
+
tp->prev_scan = odp_time_global();
tp->time_per_tick = odp_time_global_from_ns(param->res_ns);
odp_atomic_init_u64(&tp->cur_tick, 0);
@@ -331,10 +350,14 @@ static odp_timer_pool_t timer_pool_new(const char *name,
}
tp->tp_idx = tp_idx;
odp_spinlock_init(&tp->lock);
- odp_ticketlock_lock(&timer_global.lock);
- timer_global.timer_pool[tp_idx] = tp;
- odp_ticketlock_unlock(&timer_global.lock);
- if (!inline_timers) {
+ odp_ticketlock_lock(&timer_global->lock);
+ timer_global->timer_pool[tp_idx] = tp;
+
+ if (timer_global->num_timer_pools == 1)
+ odp_global_rw->inline_timers = timer_global->use_inline_timers;
+
+ odp_ticketlock_unlock(&timer_global->lock);
+ if (!odp_global_rw->inline_timers) {
if (tp->param.clk_src == ODP_CLOCK_CPU)
itimer_init(tp);
}
@@ -368,7 +391,7 @@ static void odp_timer_pool_del(timer_pool_t *tp)
odp_spinlock_lock(&tp->lock);
- if (!inline_timers) {
+ if (!odp_global_rw->inline_timers) {
/* Stop POSIX itimer signals */
if (tp->param.clk_src == ODP_CLOCK_CPU)
itimer_fini(tp);
@@ -385,12 +408,17 @@ static void odp_timer_pool_del(timer_pool_t *tp)
odp_spinlock_unlock(&tp->lock);
- odp_ticketlock_lock(&timer_global.lock);
+ odp_ticketlock_lock(&timer_global->lock);
shm = tp->shm;
- timer_global.timer_pool[tp->tp_idx] = NULL;
- timer_global.timer_pool_used[tp->tp_idx] = 0;
- timer_global.num_timer_pools--;
- odp_ticketlock_unlock(&timer_global.lock);
+ timer_global->timer_pool[tp->tp_idx] = NULL;
+ timer_global->timer_pool_used[tp->tp_idx] = 0;
+ timer_global->num_timer_pools--;
+
+ /* Disable inline timer polling */
+ if (timer_global->num_timer_pools == 0)
+ odp_global_rw->inline_timers = false;
+
+ odp_ticketlock_unlock(&timer_global->lock);
rc = odp_shm_free(shm);
@@ -421,6 +449,8 @@ static inline odp_timer_t timer_alloc(timer_pool_t *tp,
tp->num_alloc,
_ODP_MEMMODEL_RLS);
hdl = tp_idx_to_handle(tp, idx);
+ /* Add timer to queue */
+ queue_fn->timer_add(queue);
} else {
__odp_errno = ENFILE; /* Reusing file table overflow */
hdl = ODP_TIMER_INVALID;
@@ -440,6 +470,9 @@ static inline odp_buffer_t timer_free(timer_pool_t *tp, uint32_t idx)
* grab any timeout buffer */
odp_buffer_t old_buf = timer_set_unused(tp, idx);
+ /* Remove timer from queue */
+ queue_fn->timer_rem(tim->queue);
+
/* Destroy timer */
timer_fini(tim, &tp->tick_buf[idx]);
@@ -813,7 +846,7 @@ static unsigned process_timer_pools(void)
unsigned nexp = 0;
for (size_t i = 0; i < MAX_TIMER_POOLS; i++) {
- tp = timer_global.timer_pool[i];
+ tp = timer_global->timer_pool[i];
if (tp == NULL)
continue;
@@ -850,29 +883,27 @@ static unsigned process_timer_pools(void)
return nexp;
}
-static odp_time_t time_per_ratelimit_period;
-
-unsigned _timer_run(void)
+unsigned int _timer_run(int dec)
{
static __thread odp_time_t last_timer_run;
- static __thread unsigned timer_run_cnt =
- CONFIG_TIMER_RUN_RATELIMIT_ROUNDS;
+ static __thread int timer_run_cnt = 1;
odp_time_t now;
- if (timer_global.num_timer_pools == 0)
+ if (timer_global->num_timer_pools == 0)
return 0;
/* Rate limit how often this thread checks the timer pools. */
- if (CONFIG_TIMER_RUN_RATELIMIT_ROUNDS > 1) {
- if (--timer_run_cnt)
+ if (timer_global->inline_poll_interval > 1) {
+ timer_run_cnt -= dec;
+ if (timer_run_cnt > 0)
return 0;
- timer_run_cnt = CONFIG_TIMER_RUN_RATELIMIT_ROUNDS;
+ timer_run_cnt = timer_global->inline_poll_interval;
}
now = odp_time_global();
if (odp_time_cmp(odp_time_diff(now, last_timer_run),
- time_per_ratelimit_period) == -1)
+ timer_global->time_per_ratelimit_period) == -1)
return 0;
last_timer_run = now;
@@ -975,7 +1006,7 @@ static int timer_res_init(void)
/* Timer resolution start from 1ms */
res = ODP_TIME_MSEC_IN_NS;
/* Set initial value of timer_res */
- highest_res_ns = res;
+ timer_global->highest_res_ns = res;
sigemptyset(&sigset);
/* Add SIGUSR1 to sigset */
sigaddset(&sigset, SIGUSR1);
@@ -1008,13 +1039,13 @@ static int timer_res_init(void)
}
}
/* Set timer_res */
- highest_res_ns = res;
+ timer_global->highest_res_ns = res;
/* Test the next timer resolution candidate */
res /= 10;
}
timer_res_init_done:
- highest_res_ns *= TIMER_RES_ROUNDUP_FACTOR;
+ timer_global->highest_res_ns *= TIMER_RES_ROUNDUP_FACTOR;
if (timer_delete(timerid) != 0)
ODP_ABORT("timer_delete() returned error %s\n",
strerror(errno));
@@ -1089,7 +1120,7 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src,
capa->max_pools_combined = MAX_TIMER_POOLS;
capa->max_pools = MAX_TIMER_POOLS;
capa->max_timers = 0;
- capa->highest_res_ns = highest_res_ns;
+ capa->highest_res_ns = timer_global->highest_res_ns;
} else {
ODP_ERR("ODP timer system doesn't support external clock source currently\n");
ret = -1;
@@ -1100,15 +1131,20 @@ int odp_timer_capability(odp_timer_clk_src_t clk_src,
odp_timer_pool_t odp_timer_pool_create(const char *name,
const odp_timer_pool_param_t *param)
{
- if (param->res_ns < highest_res_ns) {
+ if (odp_global_ro.init_param.not_used.feat.timer) {
+ ODP_ERR("Trying to use disabled ODP feature.\n");
+ return ODP_TIMER_POOL_INVALID;
+ }
+
+ if (param->res_ns < timer_global->highest_res_ns) {
__odp_errno = EINVAL;
return ODP_TIMER_POOL_INVALID;
}
- if (min_res_ns > param->res_ns) {
- min_res_ns = param->res_ns;
- time_per_ratelimit_period =
- odp_time_global_from_ns(min_res_ns / 2);
+ if (timer_global->min_res_ns > param->res_ns) {
+ timer_global->min_res_ns = param->res_ns;
+ timer_global->time_per_ratelimit_period =
+ odp_time_global_from_ns(timer_global->min_res_ns / 2);
}
return timer_pool_new(name, param);
@@ -1311,26 +1347,56 @@ void odp_timeout_free(odp_timeout_t tmo)
int odp_timer_init_global(const odp_init_t *params)
{
- memset(&timer_global, 0, sizeof(timer_global_t));
- odp_ticketlock_init(&timer_global.lock);
+ odp_shm_t shm;
+ const char *conf_str;
+ int val = 0;
+
+ shm = odp_shm_reserve("_odp_timer", sizeof(timer_global_t),
+ ODP_CACHE_LINE_SIZE, 0);
+
+ timer_global = odp_shm_addr(shm);
+
+ if (timer_global == NULL) {
+ ODP_ERR("Shm reserve failed for odp_timer\n");
+ return -1;
+ }
+
+ memset(timer_global, 0, sizeof(timer_global_t));
+ odp_ticketlock_init(&timer_global->lock);
+ timer_global->shm = shm;
+ timer_global->highest_res_ns = 500;
+ timer_global->min_res_ns = INT64_MAX;
#ifndef ODP_ATOMIC_U128
uint32_t i;
for (i = 0; i < NUM_LOCKS; i++)
- _odp_atomic_flag_clear(&locks[i]);
+ _odp_atomic_flag_clear(&timer_global->locks[i]);
#else
ODP_DBG("Using lock-less timer implementation\n");
#endif
+ conf_str = "timer.inline";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", conf_str);
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->use_inline_timers = val;
- if (params)
- inline_timers =
- !params->not_used.feat.schedule &&
- !params->not_used.feat.timer;
+ conf_str = "timer.inline_poll_interval";
+ if (!_odp_libconfig_lookup_int(conf_str, &val)) {
+ ODP_ERR("Config option '%s' not found.\n", conf_str);
+ odp_shm_free(shm);
+ return -1;
+ }
+ timer_global->inline_poll_interval = val;
- time_per_ratelimit_period =
- odp_time_global_from_ns(min_res_ns / 2);
+ if (params && params->not_used.feat.timer)
+ timer_global->use_inline_timers = false;
- if (!inline_timers) {
+ timer_global->time_per_ratelimit_period =
+ odp_time_global_from_ns(timer_global->min_res_ns / 2);
+
+ if (!timer_global->use_inline_timers) {
timer_res_init();
block_sigalarm();
}
@@ -1340,5 +1406,10 @@ int odp_timer_init_global(const odp_init_t *params)
int odp_timer_term_global(void)
{
+ if (odp_shm_free(timer_global->shm)) {
+ ODP_ERR("Shm free failed for odp_timer\n");
+ return -1;
+ }
+
return 0;
}
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index af403b4b6..23982dce0 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -46,7 +46,7 @@ static const pkt_desc_t EMPTY_PKT_DESC = { .word = 0 };
#define MAX_PRIORITIES ODP_TM_MAX_PRIORITIES
#define NUM_SHAPER_COLORS ODP_NUM_SHAPER_COLORS
-static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
+static const tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
[0] = {
[ODP_TM_SHAPER_GREEN] = { 0, DECR_BOTH },
[ODP_TM_SHAPER_YELLOW] = { 0, DECR_BOTH },
@@ -81,20 +81,66 @@ static tm_prop_t basic_prop_tbl[MAX_PRIORITIES][NUM_SHAPER_COLORS] = {
[ODP_TM_SHAPER_RED] = { 7, DELAY_PKT } }
};
-/* Profile tables. */
-static dynamic_tbl_t odp_tm_profile_tbls[ODP_TM_NUM_PROFILES];
-
-/* TM systems table. */
-static tm_system_t *odp_tm_systems[ODP_TM_MAX_NUM_SYSTEMS];
-
-static tm_system_group_t *tm_group_list;
-
-static odp_ticketlock_t tm_create_lock;
-static odp_ticketlock_t tm_profile_lock;
-static odp_barrier_t tm_first_enq;
-
-static int g_main_thread_cpu = -1;
-static int g_tm_cpu_num;
+#define MAX_SHAPER_PROFILES 128
+#define MAX_SCHED_PROFILES 128
+#define MAX_THRESHOLD_PROFILES 128
+#define MAX_WRED_PROFILES 128
+
+typedef struct {
+ struct {
+ tm_shaper_params_t profile[MAX_SHAPER_PROFILES];
+ odp_ticketlock_t lock;
+ } shaper;
+ struct {
+ tm_sched_params_t profile[MAX_SCHED_PROFILES];
+ odp_ticketlock_t lock;
+ } sched;
+ struct {
+ tm_queue_thresholds_t profile[MAX_THRESHOLD_PROFILES];
+ odp_ticketlock_t lock;
+ } threshold;
+ struct {
+ tm_wred_params_t profile[MAX_WRED_PROFILES];
+ odp_ticketlock_t lock;
+ } wred;
+} profile_tbl_t;
+
+typedef struct {
+ tm_system_t system[ODP_TM_MAX_NUM_SYSTEMS];
+
+ struct {
+ tm_system_group_t group[ODP_TM_MAX_NUM_SYSTEMS];
+ odp_ticketlock_t lock;
+ } system_group;
+ struct {
+ tm_queue_obj_t obj[ODP_TM_MAX_TM_QUEUES];
+ odp_ticketlock_t lock;
+ } queue_obj;
+ struct {
+ tm_node_obj_t obj[ODP_TM_MAX_NUM_TM_NODES];
+ odp_ticketlock_t lock;
+ } node_obj;
+
+ profile_tbl_t profile_tbl;
+
+ odp_ticketlock_t create_lock;
+ odp_ticketlock_t profile_lock;
+ odp_barrier_t first_enq;
+
+ int main_thread_cpu;
+ int cpu_num;
+
+ /* Service threads */
+ uint64_t busy_wait_counter;
+ odp_bool_t main_loop_running;
+ odp_atomic_u64_t atomic_request_cnt;
+ odp_atomic_u64_t currently_serving_cnt;
+ odp_atomic_u64_t atomic_done_cnt;
+
+ odp_shm_t shm;
+} tm_global_t;
+
+static tm_global_t *tm_glb;
/* Forward function declarations. */
static void tm_queue_cnts_decrement(tm_system_t *tm_system,
@@ -108,20 +154,30 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
tm_shaper_obj_t *timer_shaper,
pkt_desc_t *demoted_pkt_desc);
-static int queue_tm_reenq(void *queue, odp_buffer_hdr_t *buf_hdr)
+static inline tm_queue_obj_t *tm_qobj_from_index(uint32_t queue_id)
+{
+ return &tm_glb->queue_obj.obj[queue_id];
+}
+
+static inline tm_node_obj_t *tm_nobj_from_index(uint32_t node_id)
{
- odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE((uint8_t *)queue -
- offsetof(tm_queue_obj_t,
- tm_qentry));
+ return &tm_glb->node_obj.obj[node_id];
+}
+
+static int queue_tm_reenq(odp_queue_t queue, odp_buffer_hdr_t *buf_hdr)
+{
+ odp_tm_queue_t tm_queue = MAKE_ODP_TM_QUEUE(odp_queue_context(queue));
odp_packet_t pkt = packet_from_buf_hdr(buf_hdr);
return odp_tm_enq(tm_queue, pkt);
}
-static int queue_tm_reenq_multi(void *queue ODP_UNUSED,
- odp_buffer_hdr_t *buf[] ODP_UNUSED,
- int num ODP_UNUSED)
+static int queue_tm_reenq_multi(odp_queue_t queue, odp_buffer_hdr_t *buf[],
+ int num)
{
+ (void)queue;
+ (void)buf;
+ (void)num;
ODP_ABORT("Invalid call to queue_tm_reenq_multi()\n");
return 0;
}
@@ -218,74 +274,133 @@ static odp_bool_t tm_random_drop(tm_random_data_t *tm_random_data,
return drop;
}
-static void *alloc_entry_in_dynamic_tbl(dynamic_tbl_t *dynamic_tbl,
- uint32_t record_size,
- uint32_t *dynamic_idx_ptr)
+static void *alloc_entry_in_tbl(profile_tbl_t *profile_tbl,
+ profile_kind_t profile_kind,
+ uint32_t *idx)
{
- uint32_t num_allocd, new_num_allocd, idx;
- void **new_array_ptrs, *new_record;
+ uint32_t i;
- num_allocd = dynamic_tbl->num_allocd;
- if (num_allocd <= dynamic_tbl->num_used) {
- /* Need to alloc or realloc the array of ptrs. */
- if (num_allocd <= 32)
- new_num_allocd = 64;
- else
- new_num_allocd = 4 * num_allocd;
-
- new_array_ptrs = malloc(new_num_allocd * sizeof(void *));
- memset(new_array_ptrs, 0, new_num_allocd * sizeof(void *));
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE: {
+ tm_shaper_params_t *profile = NULL;
- if (dynamic_tbl->num_used != 0)
- memcpy(new_array_ptrs, dynamic_tbl->array_ptrs,
- dynamic_tbl->num_used * sizeof(void *));
+ odp_ticketlock_lock(&profile_tbl->shaper.lock);
+ for (i = 0; i < MAX_SHAPER_PROFILES; i++) {
+ if (profile_tbl->shaper.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
- if (dynamic_tbl->array_ptrs)
- free(dynamic_tbl->array_ptrs);
+ profile = &profile_tbl->shaper.profile[i];
+ memset(profile, 0, sizeof(tm_shaper_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->shaper.lock);
+ return profile;
+ }
+ case TM_SCHED_PROFILE: {
+ tm_sched_params_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->sched.lock);
+ for (i = 0; i < MAX_SCHED_PROFILES; i++) {
+ if (profile_tbl->sched.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->sched.profile[i];
+ memset(profile, 0, sizeof(tm_sched_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->sched.lock);
+ return profile;
+ }
+ case TM_THRESHOLD_PROFILE: {
+ tm_queue_thresholds_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->threshold.lock);
+ for (i = 0; i < MAX_THRESHOLD_PROFILES; i++) {
+ if (profile_tbl->threshold.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->threshold.profile[i];
+ memset(profile, 0, sizeof(tm_queue_thresholds_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->threshold.lock);
+ return profile;
+ }
+ case TM_WRED_PROFILE: {
+ tm_wred_params_t *profile = NULL;
+
+ odp_ticketlock_lock(&profile_tbl->wred.lock);
+ for (i = 0; i < MAX_WRED_PROFILES; i++) {
+ if (profile_tbl->wred.profile[i].status !=
+ TM_STATUS_FREE)
+ continue;
+
+ profile = &profile_tbl->wred.profile[i];
+ memset(profile, 0, sizeof(tm_wred_params_t));
+ profile->status = TM_STATUS_RESERVED;
+ *idx = i;
+ break;
+ }
+ odp_ticketlock_unlock(&profile_tbl->wred.lock);
+ return profile;
+ }
+ default:
+ ODP_ERR("Invalid TM profile\n");
+ return NULL;
- dynamic_tbl->num_allocd = new_num_allocd;
- dynamic_tbl->array_ptrs = new_array_ptrs;
}
+}
- idx = dynamic_tbl->num_used;
- new_record = malloc(record_size);
- memset(new_record, 0, record_size);
+static void free_tbl_entry(profile_tbl_t *profile_tbl,
+ profile_kind_t profile_kind,
+ uint32_t idx)
+{
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->shaper.lock);
+ profile_tbl->shaper.profile[idx].status = TM_STATUS_RESERVED;
+ odp_ticketlock_unlock(&profile_tbl->shaper.lock);
+ return;
- dynamic_tbl->array_ptrs[idx] = new_record;
- dynamic_tbl->num_used++;
- if (dynamic_idx_ptr)
- *dynamic_idx_ptr = idx;
+ case TM_SCHED_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->sched.lock);
+ profile_tbl->sched.profile[idx].status = TM_STATUS_RESERVED;
+ odp_ticketlock_unlock(&profile_tbl->sched.lock);
+ return;
- return new_record;
-}
+ case TM_THRESHOLD_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->threshold.lock);
+ profile_tbl->threshold.profile[idx].status = TM_STATUS_RESERVED;
+ odp_ticketlock_unlock(&profile_tbl->threshold.lock);
+ return;
-static void free_dynamic_tbl_entry(dynamic_tbl_t *dynamic_tbl,
- uint32_t record_size ODP_UNUSED,
- uint32_t dynamic_idx)
-{
- void *record;
+ case TM_WRED_PROFILE:
+ odp_ticketlock_lock(&profile_tbl->wred.lock);
+ profile_tbl->wred.profile[idx].status = TM_STATUS_RESERVED;
+ odp_ticketlock_unlock(&profile_tbl->wred.lock);
+ return;
+
+ default:
+ ODP_ERR("Invalid TM profile\n");
+ return;
- record = dynamic_tbl->array_ptrs[dynamic_idx];
- if (record) {
- free(record);
- dynamic_tbl->array_ptrs[dynamic_idx] = NULL;
- dynamic_tbl->num_freed++;
- if (dynamic_tbl->num_freed == dynamic_tbl->num_used) {
- free(dynamic_tbl->array_ptrs);
- memset(dynamic_tbl, 0, sizeof(dynamic_tbl_t));
- }
}
}
-static input_work_queue_t *input_work_queue_create(void)
+static void input_work_queue_init(input_work_queue_t *input_work_queue)
{
- input_work_queue_t *input_work_queue;
-
- input_work_queue = malloc(sizeof(input_work_queue_t));
memset(input_work_queue, 0, sizeof(input_work_queue_t));
odp_atomic_init_u64(&input_work_queue->queue_cnt, 0);
odp_ticketlock_init(&input_work_queue->lock);
- return input_work_queue;
}
static void input_work_queue_destroy(input_work_queue_t *input_work_queue)
@@ -295,7 +410,7 @@ static void input_work_queue_destroy(input_work_queue_t *input_work_queue)
* stopped new tm_enq() (et al) calls from succeeding.
*/
odp_ticketlock_lock(&input_work_queue->lock);
- free(input_work_queue);
+ memset(input_work_queue, 0, sizeof(input_work_queue_t));
}
static int input_work_queue_append(tm_system_t *tm_system,
@@ -305,7 +420,7 @@ static int input_work_queue_append(tm_system_t *tm_system,
input_work_item_t *entry_ptr;
uint32_t queue_cnt, tail_idx;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
queue_cnt = odp_atomic_load_u64(&input_work_queue->queue_cnt);
if (INPUT_WORK_RING_SIZE <= queue_cnt) {
input_work_queue->enqueue_fail_cnt++;
@@ -363,11 +478,11 @@ static tm_system_t *tm_system_alloc(void)
/* Find an open slot in the odp_tm_systems array. */
for (tm_idx = 0; tm_idx < ODP_TM_MAX_NUM_SYSTEMS; tm_idx++) {
- if (!odp_tm_systems[tm_idx]) {
- tm_system = malloc(sizeof(tm_system_t));
+ if (tm_glb->system[tm_idx].status == TM_STATUS_FREE) {
+ tm_system = &tm_glb->system[tm_idx];
memset(tm_system, 0, sizeof(tm_system_t));
- odp_tm_systems[tm_idx] = tm_system;
tm_system->tm_idx = tm_idx;
+ tm_system->status = TM_STATUS_RESERVED;
return tm_system;
}
}
@@ -377,47 +492,38 @@ static tm_system_t *tm_system_alloc(void)
static void tm_system_free(tm_system_t *tm_system)
{
- if (tm_system->root_node)
- free(tm_system->root_node);
-
- if (tm_system->queue_num_tbl)
- free(tm_system->queue_num_tbl);
-
- odp_tm_systems[tm_system->tm_idx] = NULL;
- free(tm_system);
+ tm_glb->system[tm_system->tm_idx].status = TM_STATUS_FREE;
}
static void *tm_common_profile_create(const char *name,
profile_kind_t profile_kind,
- uint32_t object_size,
tm_handle_t *profile_handle_ptr,
_odp_int_name_t *name_tbl_id_ptr)
{
_odp_int_name_kind_t handle_kind;
_odp_int_name_t name_tbl_id;
- dynamic_tbl_t *dynamic_tbl;
tm_handle_t profile_handle;
- uint32_t dynamic_tbl_idx;
+ uint32_t idx;
void *object_ptr;
- /* Note that alloc_entry_in_dynamic_tbl will zero out all of the memory
- * that it allocates, so an additional memset here is unnnecessary. */
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- object_ptr = alloc_entry_in_dynamic_tbl(dynamic_tbl, object_size,
- &dynamic_tbl_idx);
- if (!object_ptr)
+ /* Note that alloc_entry_in_tbl will zero out all of the memory that it
+ * allocates, so an additional memset here is unnecessary. */
+ object_ptr = alloc_entry_in_tbl(&tm_glb->profile_tbl, profile_kind,
+ &idx);
+ if (!object_ptr) {
+ ODP_ERR("No free profiles left\n");
return NULL;
+ }
handle_kind = PROFILE_TO_HANDLE_KIND[profile_kind];
- profile_handle = MAKE_PROFILE_HANDLE(profile_kind, dynamic_tbl_idx);
+ profile_handle = MAKE_PROFILE_HANDLE(profile_kind, idx);
name_tbl_id = ODP_INVALID_NAME;
if ((name != NULL) && (name[0] != '\0')) {
name_tbl_id = _odp_int_name_tbl_add(name, handle_kind,
profile_handle);
if (name_tbl_id == ODP_INVALID_NAME) {
- free_dynamic_tbl_entry(dynamic_tbl, object_size,
- dynamic_tbl_idx);
+ free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx);
return NULL;
}
}
@@ -429,20 +535,18 @@ static void *tm_common_profile_create(const char *name,
}
static int tm_common_profile_destroy(tm_handle_t profile_handle,
- uint32_t object_size,
_odp_int_name_t name_tbl_id)
{
profile_kind_t profile_kind;
- dynamic_tbl_t *dynamic_tbl;
- uint32_t dynamic_tbl_idx;
+ uint32_t idx;
if (name_tbl_id != ODP_INVALID_NAME)
_odp_int_name_tbl_delete(name_tbl_id);
- profile_kind = GET_PROFILE_KIND(profile_handle);
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- dynamic_tbl_idx = GET_TBL_IDX(profile_handle);
- free_dynamic_tbl_entry(dynamic_tbl, object_size, dynamic_tbl_idx);
+ profile_kind = GET_PROFILE_KIND(profile_handle);
+ idx = GET_TBL_IDX(profile_handle);
+ free_tbl_entry(&tm_glb->profile_tbl, profile_kind, idx);
+
return 0;
}
@@ -450,16 +554,31 @@ static void *tm_get_profile_params(tm_handle_t profile_handle,
profile_kind_t expected_profile_kind)
{
profile_kind_t profile_kind;
- dynamic_tbl_t *dynamic_tbl;
- uint32_t dynamic_tbl_idx;
+ uint32_t idx;
profile_kind = GET_PROFILE_KIND(profile_handle);
if (profile_kind != expected_profile_kind)
return NULL;
- dynamic_tbl = &odp_tm_profile_tbls[profile_kind];
- dynamic_tbl_idx = GET_TBL_IDX(profile_handle);
- return dynamic_tbl->array_ptrs[dynamic_tbl_idx];
+ idx = GET_TBL_IDX(profile_handle);
+
+ switch (profile_kind) {
+ case TM_SHAPER_PROFILE:
+ return &tm_glb->profile_tbl.shaper.profile[idx];
+
+ case TM_SCHED_PROFILE:
+ return &tm_glb->profile_tbl.sched.profile[idx];
+
+ case TM_THRESHOLD_PROFILE:
+ return &tm_glb->profile_tbl.threshold.profile[idx];
+
+ case TM_WRED_PROFILE:
+ return &tm_glb->profile_tbl.wred.profile[idx];
+
+ default:
+ ODP_ERR("Invalid TM profile\n");
+ return NULL;
+ }
}
static uint64_t tm_bps_to_rate(uint64_t bps)
@@ -1315,7 +1434,7 @@ static odp_bool_t tm_propagate_pkt_desc(tm_system_t *tm_system,
if (!shaper_change)
return false;
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sched_was_empty = prev_sched_pkt.queue_num == 0;
sched_change = false;
@@ -1409,7 +1528,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
if ((!blocked_scheduler) && (!timer_shaper))
return false;
- if (tm_node_obj->schedulers_obj == blocked_scheduler)
+ if (&tm_node_obj->schedulers_obj == blocked_scheduler)
return false;
/* See if this first shaper_obj is delaying the demoted_pkt_desc */
@@ -1435,7 +1554,7 @@ static odp_bool_t tm_demote_pkt_desc(tm_system_t *tm_system,
if ((!demoted_pkt_desc) && (!shaper_change))
return false;
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sched_was_empty = prev_sched_pkt.queue_num == 0;
sched_change = false;
@@ -1552,7 +1671,7 @@ static odp_bool_t tm_consume_pkt_desc(tm_system_t *tm_system,
tm_node_obj = shaper_obj->next_tm_node;
while (!tm_node_obj->is_root_node) { /* not at egress */
- schedulers_obj = tm_node_obj->schedulers_obj;
+ schedulers_obj = &tm_node_obj->schedulers_obj;
prev_sched_pkt = schedulers_obj->out_pkt_desc;
sent_priority = schedulers_obj->highest_priority;
@@ -1636,7 +1755,7 @@ static odp_bool_t tm_consume_sent_pkt(tm_system_t *tm_system,
pkt_len = sent_pkt_desc->pkt_len;
tm_queue_obj->pkts_consumed_cnt++;
- tm_queue_cnts_decrement(tm_system, tm_queue_obj->tm_wred_node,
+ tm_queue_cnts_decrement(tm_system, &tm_queue_obj->tm_wred_node,
tm_queue_obj->priority, pkt_len);
/* Get the next pkt in the tm_queue, if there is one. */
@@ -1902,7 +2021,7 @@ static int tm_enqueue(tm_system_t *tm_system,
pkt_color = odp_packet_color(pkt);
drop_eligible = odp_packet_drop_eligible(pkt);
- initial_tm_wred_node = tm_queue_obj->tm_wred_node;
+ initial_tm_wred_node = &tm_queue_obj->tm_wred_node;
if (drop_eligible) {
drop = random_early_discard(tm_system, tm_queue_obj,
initial_tm_wred_node, pkt_color);
@@ -2260,31 +2379,24 @@ static int tm_process_expired_timers(tm_system_t *tm_system,
return work_done;
}
-static volatile uint64_t busy_wait_counter;
-
-static odp_bool_t main_loop_running;
-static odp_atomic_u64_t atomic_request_cnt;
-static odp_atomic_u64_t currently_serving_cnt;
-static odp_atomic_u64_t atomic_done_cnt;
-
static void busy_wait(uint32_t iterations)
{
uint32_t cnt;
for (cnt = 1; cnt <= iterations; cnt++)
- busy_wait_counter++;
+ tm_glb->busy_wait_counter++;
}
static void signal_request(void)
{
- uint64_t my_request_num, serving_cnt;
+ uint64_t request_num, serving;
- my_request_num = odp_atomic_fetch_inc_u64(&atomic_request_cnt) + 1;
+ request_num = odp_atomic_fetch_inc_u64(&tm_glb->atomic_request_cnt) + 1;
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
- while (serving_cnt != my_request_num) {
+ serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
+ while (serving != request_num) {
busy_wait(100);
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
+ serving = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
}
}
@@ -2292,26 +2404,26 @@ static void check_for_request(void)
{
uint64_t request_num, serving_cnt, done_cnt;
- request_num = odp_atomic_load_u64(&atomic_request_cnt);
- serving_cnt = odp_atomic_load_u64(&currently_serving_cnt);
+ request_num = odp_atomic_load_u64(&tm_glb->atomic_request_cnt);
+ serving_cnt = odp_atomic_load_u64(&tm_glb->currently_serving_cnt);
if (serving_cnt == request_num)
return;
/* Signal the other requesting thread to proceed and then
* wait for their done indication */
- odp_atomic_inc_u64(&currently_serving_cnt);
+ odp_atomic_inc_u64(&tm_glb->currently_serving_cnt);
busy_wait(100);
- done_cnt = odp_atomic_load_u64(&atomic_done_cnt);
+ done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt);
while (done_cnt != request_num) {
busy_wait(100);
- done_cnt = odp_atomic_load_u64(&atomic_done_cnt);
+ done_cnt = odp_atomic_load_u64(&tm_glb->atomic_done_cnt);
}
}
static void signal_request_done(void)
{
- odp_atomic_inc_u64(&atomic_done_cnt);
+ odp_atomic_inc_u64(&tm_glb->atomic_done_cnt);
}
static int thread_affinity_get(odp_cpumask_t *odp_cpu_mask)
@@ -2346,18 +2458,18 @@ static void *tm_system_thread(void *arg)
uint32_t destroying, work_queue_cnt, timer_cnt;
int rc;
- rc = odp_init_local((odp_instance_t)odp_global_data.main_pid,
+ rc = odp_init_local((odp_instance_t)odp_global_ro.main_pid,
ODP_THREAD_WORKER);
ODP_ASSERT(rc == 0);
tm_group = arg;
tm_system = tm_group->first_tm_system;
_odp_int_timer_wheel = tm_system->_odp_int_timer_wheel;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
/* Wait here until we have seen the first enqueue operation. */
odp_barrier_wait(&tm_group->tm_group_barrier);
- main_loop_running = true;
+ tm_glb->main_loop_running = true;
destroying = odp_atomic_load_u64(&tm_system->destroying);
@@ -2407,7 +2519,7 @@ static void *tm_system_thread(void *arg)
/* Advance to the next tm_system in the tm_system_group. */
tm_system = tm_system->next;
_odp_int_timer_wheel = tm_system->_odp_int_timer_wheel;
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
}
odp_barrier_wait(&tm_system->tm_system_destroy_barrier);
@@ -2433,19 +2545,6 @@ void odp_tm_egress_init(odp_tm_egress_t *egress)
memset(egress, 0, sizeof(odp_tm_egress_t));
}
-static tm_node_obj_t *create_dummy_root_node(void)
-{
- tm_node_obj_t *tm_node_obj;
-
- tm_node_obj = malloc(sizeof(tm_node_obj_t));
- if (!tm_node_obj)
- return NULL;
-
- memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
- tm_node_obj->is_root_node = true;
- return tm_node_obj;
-}
-
int odp_tm_capabilities(odp_tm_capabilities_t capabilities[] ODP_UNUSED,
uint32_t capabilities_size)
{
@@ -2574,7 +2673,7 @@ static int affinitize_main_thread(void)
* just record this value and return. */
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count == 1) {
- g_main_thread_cpu = odp_cpumask_first(&odp_cpu_mask);
+ tm_glb->main_thread_cpu = odp_cpumask_first(&odp_cpu_mask);
return 0;
} else if (cpu_count == 0) {
return -1;
@@ -2586,7 +2685,7 @@ static int affinitize_main_thread(void)
CPU_SET(cpu_num, &linux_cpu_set);
rc = sched_setaffinity(0, sizeof(cpu_set_t), &linux_cpu_set);
if (rc == 0)
- g_main_thread_cpu = cpu_num;
+ tm_glb->main_thread_cpu = cpu_num;
else
ODP_DBG("%s sched_setaffinity failed with rc=%d\n",
__func__, rc);
@@ -2599,32 +2698,32 @@ static uint32_t tm_thread_cpu_select(void)
int cpu_count, cpu;
odp_cpumask_default_worker(&odp_cpu_mask, 0);
- if ((g_main_thread_cpu != -1) &&
- odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu))
- odp_cpumask_clr(&odp_cpu_mask, g_main_thread_cpu);
+ if ((tm_glb->main_thread_cpu != -1) &&
+ odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu))
+ odp_cpumask_clr(&odp_cpu_mask, tm_glb->main_thread_cpu);
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count < 1) {
odp_cpumask_all_available(&odp_cpu_mask);
- if ((g_main_thread_cpu != -1) &&
- odp_cpumask_isset(&odp_cpu_mask, g_main_thread_cpu))
+ if ((tm_glb->main_thread_cpu != -1) &&
+ odp_cpumask_isset(&odp_cpu_mask, tm_glb->main_thread_cpu))
cpu_count = odp_cpumask_count(&odp_cpu_mask);
if (cpu_count < 1)
odp_cpumask_all_available(&odp_cpu_mask);
}
- if (g_tm_cpu_num == 0) {
+ if (tm_glb->cpu_num == 0) {
cpu = odp_cpumask_first(&odp_cpu_mask);
} else {
- cpu = odp_cpumask_next(&odp_cpu_mask, g_tm_cpu_num);
+ cpu = odp_cpumask_next(&odp_cpu_mask, tm_glb->cpu_num);
if (cpu == -1) {
- g_tm_cpu_num = 0;
+ tm_glb->cpu_num = 0;
cpu = odp_cpumask_first(&odp_cpu_mask);
}
}
- g_tm_cpu_num++;
+ tm_glb->cpu_num++;
return cpu;
}
@@ -2648,35 +2747,9 @@ static int tm_thread_create(tm_system_group_t *tm_group)
return rc;
}
-
-static _odp_tm_group_t _odp_tm_group_create(const char *name ODP_UNUSED)
-{
- tm_system_group_t *tm_group, *first_tm_group, *second_tm_group;
-
- tm_group = malloc(sizeof(tm_system_group_t));
- memset(tm_group, 0, sizeof(tm_system_group_t));
- odp_barrier_init(&tm_group->tm_group_barrier, 2);
-
- /* Add this group to the tm_group_list linked list. */
- if (tm_group_list == NULL) {
- tm_group_list = tm_group;
- tm_group->next = tm_group;
- tm_group->prev = tm_group;
- } else {
- first_tm_group = tm_group_list;
- second_tm_group = first_tm_group->next;
- first_tm_group->next = tm_group;
- second_tm_group->prev = tm_group;
- tm_group->next = second_tm_group;
- tm_group->prev = first_tm_group;
- }
-
- return MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
-}
-
static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group)
{
- tm_system_group_t *tm_group, *prev_tm_group, *next_tm_group;
+ tm_system_group_t *tm_group;
int rc;
tm_group = GET_TM_GROUP(odp_tm_group);
@@ -2686,26 +2759,12 @@ static void _odp_tm_group_destroy(_odp_tm_group_t odp_tm_group)
rc = pthread_join(tm_group->thread, NULL);
ODP_ASSERT(rc == 0);
pthread_attr_destroy(&tm_group->attr);
- if (g_tm_cpu_num > 0)
- g_tm_cpu_num--;
-
- /* Remove this group from the tm_group_list linked list. Special case
- * when this is the last tm_group in the linked list. */
- prev_tm_group = tm_group->prev;
- next_tm_group = tm_group->next;
- if (prev_tm_group == next_tm_group) {
- ODP_ASSERT(tm_group_list == tm_group);
- tm_group_list = NULL;
- } else {
- prev_tm_group->next = next_tm_group;
- next_tm_group->prev = prev_tm_group;
- if (tm_group_list == tm_group)
- tm_group_list = next_tm_group;
- }
+ if (tm_glb->cpu_num > 0)
+ tm_glb->cpu_num--;
- tm_group->prev = NULL;
- tm_group->next = NULL;
- free(tm_group);
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ tm_group->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
}
static int _odp_tm_group_add(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm)
@@ -2777,12 +2836,21 @@ static int _odp_tm_group_remove(_odp_tm_group_t odp_tm_group, odp_tm_t odp_tm)
return 0;
}
+static void _odp_tm_init_tm_group(tm_system_group_t *tm_group)
+{
+ memset(tm_group, 0, sizeof(tm_system_group_t));
+
+ tm_group->status = TM_STATUS_RESERVED;
+ odp_barrier_init(&tm_group->tm_group_barrier, 2);
+}
+
static int tm_group_attach(odp_tm_t odp_tm)
{
tm_system_group_t *tm_group, *min_tm_group;
_odp_tm_group_t odp_tm_group;
odp_cpumask_t all_cpus, worker_cpus;
uint32_t total_cpus, avail_cpus;
+ uint32_t i;
/* If this platform has a small number of cpu's then allocate one
* tm_group and assign all tm_system's to this tm_group. Otherwise in
@@ -2796,34 +2864,37 @@ static int tm_group_attach(odp_tm_t odp_tm)
avail_cpus = odp_cpumask_count(&worker_cpus);
if (total_cpus < 24) {
- tm_group = tm_group_list;
- odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
- if (tm_group == NULL)
- odp_tm_group = _odp_tm_group_create("");
+ tm_group = &tm_glb->system_group.group[0];
- _odp_tm_group_add(odp_tm_group, odp_tm);
- return 0;
- }
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ if (tm_group->status == TM_STATUS_FREE)
+ _odp_tm_init_tm_group(tm_group);
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
- /* Manycore case. */
- if ((tm_group_list == NULL) || (avail_cpus > 1)) {
- odp_tm_group = _odp_tm_group_create("");
+ odp_tm_group = MAKE_ODP_TM_SYSTEM_GROUP(tm_group);
_odp_tm_group_add(odp_tm_group, odp_tm);
return 0;
}
/* Pick a tm_group according to the smallest number of tm_systems. */
- tm_group = tm_group_list;
min_tm_group = NULL;
- while (tm_group != NULL) {
+ odp_ticketlock_lock(&tm_glb->system_group.lock);
+ for (i = 0; i < ODP_TM_MAX_NUM_SYSTEMS && i < avail_cpus; i++) {
+ tm_group = &tm_glb->system_group.group[i];
+
+ if (tm_group->status == TM_STATUS_FREE) {
+ _odp_tm_init_tm_group(tm_group);
+ min_tm_group = tm_group;
+ break;
+ }
+
if (min_tm_group == NULL)
min_tm_group = tm_group;
else if (tm_group->num_tm_systems <
min_tm_group->num_tm_systems)
min_tm_group = tm_group;
-
- tm_group = tm_group->next;
}
+ odp_ticketlock_unlock(&tm_glb->system_group.lock);
if (min_tm_group == NULL)
return -1;
@@ -2842,10 +2913,15 @@ odp_tm_t odp_tm_create(const char *name,
odp_bool_t create_fail;
odp_tm_t odp_tm;
odp_pktout_queue_t pktout;
- uint32_t malloc_len, max_num_queues, max_queued_pkts, max_timers;
+ uint32_t max_num_queues, max_queued_pkts, max_timers;
uint32_t max_tm_queues, max_sorted_lists;
int rc;
+ if (odp_global_ro.init_param.not_used.feat.tm) {
+ ODP_ERR("TM has been disabled\n");
+ return ODP_TM_INVALID;
+ }
+
/* If we are using pktio output (usual case) get the first associated
* pktout_queue for this pktio and fail if there isn't one.
*/
@@ -2854,10 +2930,10 @@ odp_tm_t odp_tm_create(const char *name,
return ODP_TM_INVALID;
/* Allocate tm_system_t record. */
- odp_ticketlock_lock(&tm_create_lock);
+ odp_ticketlock_lock(&tm_glb->create_lock);
tm_system = tm_system_alloc();
if (!tm_system) {
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
@@ -2865,7 +2941,7 @@ odp_tm_t odp_tm_create(const char *name,
name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_HANDLE, odp_tm);
if (name_tbl_id == ODP_INVALID_NAME) {
tm_system_free(tm_system);
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
@@ -2881,10 +2957,8 @@ odp_tm_t odp_tm_create(const char *name,
tm_system_capabilities_set(&tm_system->capabilities,
&tm_system->requirements);
- malloc_len = max_tm_queues * sizeof(tm_queue_obj_t *);
- tm_system->queue_num_tbl = malloc(malloc_len);
- memset(tm_system->queue_num_tbl, 0, malloc_len);
tm_system->next_queue_num = 1;
+ tm_system->root_node.is_root_node = true;
tm_init_random_data(&tm_system->tm_random_data);
@@ -2920,15 +2994,7 @@ odp_tm_t odp_tm_create(const char *name,
== _ODP_INT_TIMER_WHEEL_INVALID;
}
- if (create_fail == 0) {
- tm_system->root_node = create_dummy_root_node();
- create_fail |= tm_system->root_node == NULL;
- }
-
- if (create_fail == 0) {
- tm_system->input_work_queue = input_work_queue_create();
- create_fail |= !tm_system->input_work_queue;
- }
+ input_work_queue_init(&tm_system->input_work_queue);
if (create_fail == 0) {
/* Pass any odp_groups or hints to tm_group_attach here. */
@@ -2939,8 +3005,6 @@ odp_tm_t odp_tm_create(const char *name,
if (create_fail) {
_odp_int_name_tbl_delete(name_tbl_id);
- if (tm_system->input_work_queue)
- input_work_queue_destroy(tm_system->input_work_queue);
if (tm_system->_odp_int_sorted_pool
!= _ODP_INT_SORTED_POOL_INVALID)
@@ -2958,11 +3022,11 @@ odp_tm_t odp_tm_create(const char *name,
tm_system->_odp_int_timer_wheel);
tm_system_free(tm_system);
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return ODP_TM_INVALID;
}
- odp_ticketlock_unlock(&tm_create_lock);
+ odp_ticketlock_unlock(&tm_glb->create_lock);
return odp_tm;
}
@@ -3008,7 +3072,7 @@ int odp_tm_destroy(odp_tm_t odp_tm)
* allocated by this group. */
_odp_tm_group_remove(tm_system->odp_tm_group, odp_tm);
- input_work_queue_destroy(tm_system->input_work_queue);
+ input_work_queue_destroy(&tm_system->input_work_queue);
_odp_sorted_pool_destroy(tm_system->_odp_int_sorted_pool);
_odp_queue_pool_destroy(tm_system->_odp_int_queue_pool);
_odp_timer_wheel_destroy(tm_system->_odp_int_timer_wheel);
@@ -3153,7 +3217,6 @@ odp_tm_shaper_t odp_tm_shaper_create(const char *name,
_odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_SHAPER_PROFILE,
- sizeof(tm_shaper_params_t),
&shaper_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3179,7 +3242,6 @@ int odp_tm_shaper_destroy(odp_tm_shaper_t shaper_profile)
return -1;
return tm_common_profile_destroy(shaper_profile,
- sizeof(tm_shaper_params_t),
profile_obj->name_tbl_id);
}
@@ -3211,7 +3273,7 @@ int odp_tm_shaper_params_update(odp_tm_shaper_t shaper_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_shaper_params_cvt_to(params, profile_obj);
return 0;
}
@@ -3280,7 +3342,6 @@ odp_tm_sched_t odp_tm_sched_create(const char *name,
odp_tm_sched_t sched_handle;
profile_obj = tm_common_profile_create(name, TM_SCHED_PROFILE,
- sizeof(tm_sched_params_t),
&sched_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3306,7 +3367,6 @@ int odp_tm_sched_destroy(odp_tm_sched_t sched_profile)
return -1;
return tm_common_profile_destroy(sched_profile,
- sizeof(tm_sched_params_t),
profile_obj->name_tbl_id);
}
@@ -3338,7 +3398,7 @@ int odp_tm_sched_params_update(odp_tm_sched_t sched_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_sched_params_cvt_to(params, profile_obj);
return 0;
}
@@ -3372,7 +3432,6 @@ odp_tm_threshold_t odp_tm_threshold_create(const char *name,
_odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_THRESHOLD_PROFILE,
- sizeof(tm_queue_thresholds_t),
&threshold_handle, &name_tbl_id);
if (!profile_obj)
return ODP_TM_INVALID;
@@ -3401,7 +3460,6 @@ int odp_tm_threshold_destroy(odp_tm_threshold_t threshold_profile)
return -1;
return tm_common_profile_destroy(threshold_profile,
- sizeof(odp_tm_threshold_params_t),
threshold_params->name_tbl_id);
}
@@ -3438,7 +3496,7 @@ int odp_tm_thresholds_params_update(odp_tm_threshold_t threshold_profile,
if (!profile_obj)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
profile_obj->max_pkts =
params->enable_max_pkts ? params->max_pkts : 0;
profile_obj->max_bytes =
@@ -3499,7 +3557,6 @@ odp_tm_wred_t odp_tm_wred_create(const char *name, odp_tm_wred_params_t *params)
_odp_int_name_t name_tbl_id;
profile_obj = tm_common_profile_create(name, TM_WRED_PROFILE,
- sizeof(tm_wred_params_t),
&wred_handle, &name_tbl_id);
if (!profile_obj)
@@ -3526,7 +3583,6 @@ int odp_tm_wred_destroy(odp_tm_wred_t wred_profile)
return -1;
return tm_common_profile_destroy(wred_profile,
- sizeof(tm_wred_params_t),
ODP_INVALID_NAME);
}
@@ -3558,7 +3614,7 @@ int odp_tm_wred_params_update(odp_tm_wred_t wred_profile,
if (!wred_params)
return -1;
- if (!main_loop_running) {
+ if (!tm_glb->main_loop_running) {
tm_wred_params_cvt_to(params, wred_params);
return 0;
}
@@ -3593,63 +3649,61 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
tm_schedulers_obj_t *schedulers_obj;
_odp_int_name_t name_tbl_id;
tm_wred_node_t *tm_wred_node;
- tm_node_obj_t *tm_node_obj;
+ tm_node_obj_t *tm_node_obj = NULL;
odp_tm_node_t odp_tm_node;
odp_tm_wred_t wred_profile;
tm_system_t *tm_system;
- uint32_t level, num_priorities, priority, schedulers_obj_len, color;
+ uint32_t level, num_priorities, priority, color;
+ uint32_t i;
/* Allocate a tm_node_obj_t record. */
tm_system = GET_TM_SYSTEM(odp_tm);
- tm_node_obj = malloc(sizeof(tm_node_obj_t));
- if (!tm_node_obj)
- return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t));
- if (!tm_wred_node) {
- free(tm_node_obj);
- return ODP_TM_INVALID;
- }
+ odp_ticketlock_lock(&tm_glb->node_obj.lock);
- level = params->level;
- requirements = &tm_system->requirements.per_level[level];
- num_priorities = requirements->max_priority + 1;
- schedulers_obj_len = sizeof(tm_schedulers_obj_t)
- + (sizeof(tm_sched_state_t) * num_priorities);
- schedulers_obj = malloc(schedulers_obj_len);
- if (!schedulers_obj) {
- free(tm_wred_node);
- free(tm_node_obj);
- return ODP_TM_INVALID;
- }
+ for (i = 0; i < ODP_TM_MAX_NUM_TM_NODES; i++) {
+ tm_node_obj_t *cur_node_obj = tm_nobj_from_index(i);
- memset(schedulers_obj, 0, schedulers_obj_len);
- odp_tm_node = MAKE_ODP_TM_NODE(tm_node_obj);
- name_tbl_id = ODP_INVALID_NAME;
- if ((name) && (name[0] != '\0')) {
- name_tbl_id = _odp_int_name_tbl_add(name, ODP_TM_NODE_HANDLE,
- odp_tm_node);
- if (name_tbl_id == ODP_INVALID_NAME) {
- free(schedulers_obj);
- free(tm_wred_node);
- free(tm_node_obj);
- return ODP_TM_INVALID;
+ if (cur_node_obj->status != TM_STATUS_FREE)
+ continue;
+
+ level = params->level;
+ requirements = &tm_system->requirements.per_level[level];
+ num_priorities = requirements->max_priority + 1;
+
+ odp_tm_node = MAKE_ODP_TM_NODE(cur_node_obj);
+ name_tbl_id = ODP_INVALID_NAME;
+ if ((name) && (name[0] != '\0')) {
+ name_tbl_id = _odp_int_name_tbl_add(name,
+ ODP_TM_NODE_HANDLE,
+ odp_tm_node);
+ if (name_tbl_id == ODP_INVALID_NAME)
+ break;
}
+ tm_node_obj = cur_node_obj;
+
+ memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
+ tm_node_obj->status = TM_STATUS_RESERVED;
+
+ break;
}
- memset(tm_node_obj, 0, sizeof(tm_node_obj_t));
- memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
- memset(schedulers_obj, 0, schedulers_obj_len);
+ odp_ticketlock_unlock(&tm_glb->node_obj.lock);
+
+ if (!tm_node_obj)
+ return ODP_TM_INVALID;
+
tm_node_obj->user_context = params->user_context;
tm_node_obj->name_tbl_id = name_tbl_id;
tm_node_obj->max_fanin = params->max_fanin;
tm_node_obj->is_root_node = false;
tm_node_obj->level = params->level;
tm_node_obj->tm_idx = tm_system->tm_idx;
- tm_node_obj->tm_wred_node = tm_wred_node;
- tm_node_obj->schedulers_obj = schedulers_obj;
+
+ tm_wred_node = &tm_node_obj->tm_wred_node;
odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+ schedulers_obj = &tm_node_obj->schedulers_obj;
schedulers_obj->num_priorities = num_priorities;
for (priority = 0; priority < num_priorities; priority++) {
sorted_list = _odp_sorted_list_create(
@@ -3677,7 +3731,7 @@ odp_tm_node_t odp_tm_node_create(odp_tm_t odp_tm,
tm_node_obj->magic_num = TM_NODE_MAGIC_NUM;
tm_node_obj->shaper_obj.enclosing_entity = tm_node_obj;
tm_node_obj->shaper_obj.in_tm_node_obj = 1;
- tm_node_obj->schedulers_obj->enclosing_entity = tm_node_obj;
+ tm_node_obj->schedulers_obj.enclosing_entity = tm_node_obj;
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return odp_tm_node;
@@ -3702,7 +3756,7 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (!tm_node_obj)
return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_node_obj->tm_idx];
if (!tm_system)
return -1;
@@ -3720,16 +3774,14 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (shaper_obj->shaper_params != NULL)
return -1;
- tm_wred_node = tm_node_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- if (tm_wred_node->threshold_params != NULL)
- return -1;
+ tm_wred_node = &tm_node_obj->tm_wred_node;
+ if (tm_wred_node->threshold_params != NULL)
+ return -1;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- return -1;
- }
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_params = tm_wred_node->wred_params[color];
+ if (wred_params != NULL)
+ return -1;
}
/* Now that all of the checks are done, time to so some freeing. */
@@ -3737,25 +3789,22 @@ int odp_tm_node_destroy(odp_tm_node_t tm_node)
if (tm_node_obj->name_tbl_id != ODP_INVALID_NAME)
_odp_int_name_tbl_delete(tm_node_obj->name_tbl_id);
- if (tm_node_obj->tm_wred_node != NULL)
- free(tm_node_obj->tm_wred_node);
-
- schedulers_obj = tm_node_obj->schedulers_obj;
- if (schedulers_obj != NULL) {
- num_priorities = schedulers_obj->num_priorities;
- for (priority = 0; priority < num_priorities; priority++) {
- sched_state = &schedulers_obj->sched_states[priority];
- sorted_list = sched_state->sorted_list;
- sorted_pool = tm_system->_odp_int_sorted_pool;
- rc = _odp_sorted_list_destroy(sorted_pool,
- sorted_list);
- if (rc != 0)
- return rc;
- }
+ schedulers_obj = &tm_node_obj->schedulers_obj;
+ num_priorities = schedulers_obj->num_priorities;
+ for (priority = 0; priority < num_priorities; priority++) {
+ sched_state = &schedulers_obj->sched_states[priority];
+ sorted_list = sched_state->sorted_list;
+ sorted_pool = tm_system->_odp_int_sorted_pool;
+ rc = _odp_sorted_list_destroy(sorted_pool,
+ sorted_list);
+ if (rc != 0)
+ return rc;
}
- free(schedulers_obj);
- free(tm_node_obj);
+ odp_ticketlock_lock(&tm_glb->node_obj.lock);
+ tm_node_obj->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->node_obj.lock);
+
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return 0;
}
@@ -3770,14 +3819,14 @@ int odp_tm_node_shaper_config(odp_tm_node_t tm_node,
if (!tm_node_obj)
return -1;
- tm_system = odp_tm_systems[tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_node_obj->tm_idx];
if (!tm_system)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_shaper_config_set(tm_system, shaper_profile,
&tm_node_obj->shaper_obj);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3796,10 +3845,10 @@ int odp_tm_node_sched_config(odp_tm_node_t tm_node,
if (!child_tm_node_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
child_shaper_obj = &child_tm_node_obj->shaper_obj;
tm_sched_config_set(child_shaper_obj, sched_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3809,12 +3858,12 @@ int odp_tm_node_threshold_config(odp_tm_node_t tm_node,
tm_node_obj_t *tm_node_obj;
tm_node_obj = GET_TM_NODE_OBJ(tm_node);
- if ((!tm_node_obj) || (!tm_node_obj->tm_wred_node))
+ if (!tm_node_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
- tm_threshold_config_set(tm_node_obj->tm_wred_node, thresholds_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ tm_threshold_config_set(&tm_node_obj->tm_wred_node, thresholds_profile);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -3831,9 +3880,9 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
if (!tm_node_obj)
return -1;
- wred_node = tm_node_obj->tm_wred_node;
+ wred_node = &tm_node_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
rc = 0;
if (pkt_color == ODP_PACKET_ALL_COLORS) {
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
@@ -3844,7 +3893,7 @@ int odp_tm_node_wred_config(odp_tm_node_t tm_node,
rc = -1;
}
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return rc;
}
@@ -3890,96 +3939,99 @@ odp_tm_queue_t odp_tm_queue_create(odp_tm_t odp_tm,
odp_tm_queue_params_t *params)
{
_odp_int_pkt_queue_t _odp_int_pkt_queue;
- tm_queue_obj_t *tm_queue_obj;
- tm_wred_node_t *tm_wred_node;
- odp_tm_queue_t odp_tm_queue;
+ tm_queue_obj_t *queue_obj;
+ odp_tm_queue_t odp_tm_queue = ODP_TM_INVALID;
odp_queue_t queue;
odp_tm_wred_t wred_profile;
tm_system_t *tm_system;
uint32_t color;
+ uint32_t i;
/* Allocate a tm_queue_obj_t record. */
tm_system = GET_TM_SYSTEM(odp_tm);
- tm_queue_obj = malloc(sizeof(tm_queue_obj_t));
- if (!tm_queue_obj)
- return ODP_TM_INVALID;
- tm_wred_node = malloc(sizeof(tm_wred_node_t));
- if (!tm_wred_node) {
- free(tm_queue_obj);
- return ODP_TM_INVALID;
- }
+ odp_ticketlock_lock(&tm_glb->queue_obj.lock);
- _odp_int_pkt_queue = _odp_pkt_queue_create(
- tm_system->_odp_int_queue_pool);
- if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID) {
- free(tm_wred_node);
- free(tm_queue_obj);
- return ODP_TM_INVALID;
- }
+ for (i = 0; i < ODP_TM_MAX_TM_QUEUES; i++) {
+ _odp_int_queue_pool_t int_queue_pool;
- odp_tm_queue = MAKE_ODP_TM_QUEUE(tm_queue_obj);
- memset(tm_queue_obj, 0, sizeof(tm_queue_obj_t));
- memset(tm_wred_node, 0, sizeof(tm_wred_node_t));
- tm_queue_obj->user_context = params->user_context;
- tm_queue_obj->priority = params->priority;
- tm_queue_obj->tm_idx = tm_system->tm_idx;
- tm_queue_obj->queue_num = tm_system->next_queue_num++;
- tm_queue_obj->tm_wred_node = tm_wred_node;
- tm_queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
- tm_queue_obj->pkt = ODP_PACKET_INVALID;
- odp_ticketlock_init(&tm_wred_node->tm_wred_node_lock);
+ queue_obj = tm_qobj_from_index(i);
- queue = odp_queue_create(NULL, NULL);
- if (queue == ODP_QUEUE_INVALID) {
- free(tm_wred_node);
- free(tm_queue_obj);
- return ODP_TM_INVALID;
- }
- tm_queue_obj->tm_qentry = queue_fn->from_ext(queue);
- queue_fn->set_enq_deq_fn(tm_queue_obj->tm_qentry,
- queue_tm_reenq, queue_tm_reenq_multi,
- NULL, NULL);
+ if (queue_obj->status != TM_STATUS_FREE)
+ continue;
- tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = tm_queue_obj;
- odp_ticketlock_lock(&tm_system->tm_system_lock);
- if (params->shaper_profile != ODP_TM_INVALID)
- tm_shaper_config_set(tm_system, params->shaper_profile,
- &tm_queue_obj->shaper_obj);
+ int_queue_pool = tm_system->_odp_int_queue_pool;
+ _odp_int_pkt_queue = _odp_pkt_queue_create(int_queue_pool);
+ if (_odp_int_pkt_queue == _ODP_INT_PKT_QUEUE_INVALID)
+ continue;
- if (params->threshold_profile != ODP_TM_INVALID)
- tm_threshold_config_set(tm_wred_node,
- params->threshold_profile);
+ odp_tm_queue = MAKE_ODP_TM_QUEUE(queue_obj);
+ memset(queue_obj, 0, sizeof(tm_queue_obj_t));
+ queue_obj->user_context = params->user_context;
+ queue_obj->priority = params->priority;
+ queue_obj->tm_idx = tm_system->tm_idx;
+ queue_obj->queue_num = tm_system->next_queue_num++;
+ queue_obj->_odp_int_pkt_queue = _odp_int_pkt_queue;
+ queue_obj->pkt = ODP_PACKET_INVALID;
+ odp_ticketlock_init(&queue_obj->tm_wred_node.tm_wred_node_lock);
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_profile = params->wred_profile[color];
- if (wred_profile != ODP_TM_INVALID)
- tm_wred_config_set(tm_wred_node, color, wred_profile);
+ queue = odp_queue_create(NULL, NULL);
+ if (queue == ODP_QUEUE_INVALID) {
+ odp_tm_queue = ODP_TM_INVALID;
+ continue;
+ }
+
+ queue_obj->queue = queue;
+ odp_queue_context_set(queue, queue_obj, sizeof(tm_queue_obj_t));
+ queue_fn->set_enq_deq_fn(queue, queue_tm_reenq,
+ queue_tm_reenq_multi, NULL, NULL);
+
+ tm_system->queue_num_tbl[queue_obj->queue_num - 1] = queue_obj;
+
+ odp_ticketlock_lock(&tm_system->tm_system_lock);
+
+ if (params->shaper_profile != ODP_TM_INVALID)
+ tm_shaper_config_set(tm_system, params->shaper_profile,
+ &queue_obj->shaper_obj);
+
+ if (params->threshold_profile != ODP_TM_INVALID)
+ tm_threshold_config_set(&queue_obj->tm_wred_node,
+ params->threshold_profile);
+
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_profile = params->wred_profile[color];
+ if (wred_profile != ODP_TM_INVALID)
+ tm_wred_config_set(&queue_obj->tm_wred_node,
+ color, wred_profile);
+ }
+
+ queue_obj->magic_num = TM_QUEUE_MAGIC_NUM;
+ queue_obj->shaper_obj.enclosing_entity = queue_obj;
+ queue_obj->shaper_obj.in_tm_node_obj = 0;
+
+ odp_ticketlock_unlock(&tm_system->tm_system_lock);
+
+ queue_obj->status = TM_STATUS_RESERVED;
+ break;
}
- tm_queue_obj->magic_num = TM_QUEUE_MAGIC_NUM;
- tm_queue_obj->shaper_obj.enclosing_entity = tm_queue_obj;
- tm_queue_obj->shaper_obj.in_tm_node_obj = 0;
+ odp_ticketlock_unlock(&tm_glb->queue_obj.lock);
- odp_ticketlock_unlock(&tm_system->tm_system_lock);
return odp_tm_queue;
}
int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
{
- tm_wred_params_t *wred_params;
tm_shaper_obj_t *shaper_obj;
tm_queue_obj_t *tm_queue_obj;
- tm_wred_node_t *tm_wred_node;
tm_system_t *tm_system;
- uint32_t color;
/* First lookup tm_queue. */
tm_queue_obj = GET_TM_QUEUE_OBJ(tm_queue);
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
@@ -3990,33 +4042,16 @@ int odp_tm_queue_destroy(odp_tm_queue_t tm_queue)
(tm_queue_obj->pkt != ODP_PACKET_INVALID))
return -1;
- /* Check that there is no shaper profile, threshold profile or wred
- * profile currently associated with this tm_queue. */
- if (shaper_obj->shaper_params != NULL)
- return -1;
-
- tm_wred_node = tm_queue_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- if (tm_wred_node->threshold_params != NULL)
- return -1;
-
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- return -1;
- }
- }
-
/* Now that all of the checks are done, time to so some freeing. */
odp_ticketlock_lock(&tm_system->tm_system_lock);
tm_system->queue_num_tbl[tm_queue_obj->queue_num - 1] = NULL;
- odp_queue_destroy(queue_fn->to_ext(tm_queue_obj->tm_qentry));
+ odp_queue_destroy(tm_queue_obj->queue);
+
+ odp_ticketlock_lock(&tm_glb->queue_obj.lock);
+ tm_queue_obj->status = TM_STATUS_FREE;
+ odp_ticketlock_unlock(&tm_glb->queue_obj.lock);
- /* First delete any associated tm_wred_node and then the tm_queue_obj
- * itself */
- free(tm_queue_obj->tm_wred_node);
- free(tm_queue_obj);
odp_ticketlock_unlock(&tm_system->tm_system_lock);
return 0;
}
@@ -4054,14 +4089,14 @@ int odp_tm_queue_shaper_config(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_shaper_config_set(tm_system, shaper_profile,
&tm_queue_obj->shaper_obj);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4081,10 +4116,10 @@ int odp_tm_queue_sched_config(odp_tm_node_t tm_node,
if (!child_tm_queue_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
child_shaper_obj = &child_tm_queue_obj->shaper_obj;
tm_sched_config_set(child_shaper_obj, sched_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4098,10 +4133,10 @@ int odp_tm_queue_threshold_config(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
- ret = tm_threshold_config_set(tm_queue_obj->tm_wred_node,
+ odp_ticketlock_lock(&tm_glb->profile_lock);
+ ret = tm_threshold_config_set(&tm_queue_obj->tm_wred_node,
thresholds_profile);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return ret;
}
@@ -4118,9 +4153,9 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- wred_node = tm_queue_obj->tm_wred_node;
+ wred_node = &tm_queue_obj->tm_wred_node;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
rc = 0;
if (pkt_color == ODP_PACKET_ALL_COLORS) {
for (color = 0; color < ODP_NUM_PACKET_COLORS; color++)
@@ -4131,7 +4166,7 @@ int odp_tm_queue_wred_config(odp_tm_queue_t tm_queue,
rc = -1;
}
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return rc;
}
@@ -4196,13 +4231,15 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
if ((!src_tm_node_obj) || src_tm_node_obj->is_root_node)
return -1;
- tm_system = odp_tm_systems[src_tm_node_obj->tm_idx];
+ tm_system = &tm_glb->system[src_tm_node_obj->tm_idx];
if (!tm_system)
return -1;
- src_tm_wred_node = src_tm_node_obj->tm_wred_node;
+ src_tm_wred_node = &src_tm_node_obj->tm_wred_node;
if (dst_tm_node == ODP_TM_ROOT) {
- src_tm_node_obj->shaper_obj.next_tm_node = tm_system->root_node;
+ tm_node_obj_t *root_node = &tm_system->root_node;
+
+ src_tm_node_obj->shaper_obj.next_tm_node = root_node;
src_tm_wred_node->next_tm_wred_node = NULL;
return 0;
}
@@ -4211,7 +4248,7 @@ int odp_tm_node_connect(odp_tm_node_t src_tm_node, odp_tm_node_t dst_tm_node)
if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node)
return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+ dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node;
if (src_tm_node_obj->tm_idx != dst_tm_node_obj->tm_idx)
return -1;
@@ -4242,9 +4279,8 @@ int odp_tm_node_disconnect(odp_tm_node_t src_tm_node)
dst_tm_node_obj->current_tm_node_fanin--;
}
- src_tm_wred_node = src_tm_node_obj->tm_wred_node;
- if (src_tm_wred_node != NULL)
- src_tm_wred_node->next_tm_wred_node = NULL;
+ src_tm_wred_node = &src_tm_node_obj->tm_wred_node;
+ src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_node_obj->shaper_obj.next_tm_node = NULL;
return 0;
@@ -4265,13 +4301,13 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
if (!src_tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[src_tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[src_tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node;
+ src_tm_wred_node = &src_tm_queue_obj->tm_wred_node;
if (dst_tm_node == ODP_TM_ROOT) {
- root_node = tm_system->root_node;
+ root_node = &tm_system->root_node;
src_tm_queue_obj->shaper_obj.next_tm_node = root_node;
src_tm_wred_node->next_tm_wred_node = NULL;
return 0;
@@ -4281,7 +4317,7 @@ int odp_tm_queue_connect(odp_tm_queue_t tm_queue, odp_tm_node_t dst_tm_node)
if ((!dst_tm_node_obj) || dst_tm_node_obj->is_root_node)
return -1;
- dst_tm_wred_node = dst_tm_node_obj->tm_wred_node;
+ dst_tm_wred_node = &dst_tm_node_obj->tm_wred_node;
if (src_tm_queue_obj->tm_idx != dst_tm_node_obj->tm_idx)
return -1;
@@ -4313,9 +4349,8 @@ int odp_tm_queue_disconnect(odp_tm_queue_t tm_queue)
dst_tm_node_obj->current_tm_queue_fanin--;
}
- src_tm_wred_node = src_tm_queue_obj->tm_wred_node;
- if (src_tm_wred_node != NULL)
- src_tm_wred_node->next_tm_wred_node = NULL;
+ src_tm_wred_node = &src_tm_queue_obj->tm_wred_node;
+ src_tm_wred_node->next_tm_wred_node = NULL;
src_tm_queue_obj->shaper_obj.next_tm_node = NULL;
return 0;
@@ -4330,7 +4365,7 @@ int odp_tm_enq(odp_tm_queue_t tm_queue, odp_packet_t pkt)
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
@@ -4351,7 +4386,7 @@ int odp_tm_enq_with_cnt(odp_tm_queue_t tm_queue, odp_packet_t pkt)
if (!tm_queue_obj)
return -1;
- tm_system = odp_tm_systems[tm_queue_obj->tm_idx];
+ tm_system = &tm_glb->system[tm_queue_obj->tm_idx];
if (!tm_system)
return -1;
@@ -4401,19 +4436,17 @@ int odp_tm_node_info(odp_tm_node_t tm_node, odp_tm_node_info_t *info)
if (shaper_params != NULL)
info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_node_obj->tm_wred_node;
- if (tm_wred_node != NULL) {
- threshold_params = tm_wred_node->threshold_params;
- if (threshold_params != NULL)
- info->threshold_profile =
- threshold_params->thresholds_profile;
+ tm_wred_node = &tm_node_obj->tm_wred_node;
+ threshold_params = tm_wred_node->threshold_params;
+ if (threshold_params != NULL)
+ info->threshold_profile =
+ threshold_params->thresholds_profile;
- for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
- wred_params = tm_wred_node->wred_params[color];
- if (wred_params != NULL)
- info->wred_profile[color] =
- wred_params->wred_profile;
- }
+ for (color = 0; color < ODP_NUM_PACKET_COLORS; color++) {
+ wred_params = tm_wred_node->wred_params[color];
+ if (wred_params != NULL)
+ info->wred_profile[color] =
+ wred_params->wred_profile;
}
return 0;
@@ -4517,7 +4550,7 @@ int odp_tm_queue_info(odp_tm_queue_t tm_queue, odp_tm_queue_info_t *info)
if (shaper_params != NULL)
info->shaper_profile = shaper_params->shaper_profile;
- tm_wred_node = tm_queue_obj->tm_wred_node;
+ tm_wred_node = &tm_queue_obj->tm_wred_node;
if (tm_wred_node != NULL) {
threshold_params = tm_wred_node->threshold_params;
if (threshold_params != NULL)
@@ -4576,9 +4609,7 @@ int odp_tm_queue_query(odp_tm_queue_t tm_queue,
if (!tm_queue_obj)
return -1;
- tm_wred_node = tm_queue_obj->tm_wred_node;
- if (!tm_wred_node)
- return -1;
+ tm_wred_node = &tm_queue_obj->tm_wred_node;
/* **TBD** Where do we get the queue_info from. */
queue_info.threshold_params = tm_wred_node->threshold_params;
@@ -4621,11 +4652,11 @@ int odp_tm_priority_threshold_config(odp_tm_t odp_tm,
if (thresholds_profile == ODP_TM_INVALID)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_system->priority_info[priority].threshold_params =
tm_get_profile_params(thresholds_profile,
TM_THRESHOLD_PROFILE);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4638,10 +4669,10 @@ int odp_tm_total_threshold_config(odp_tm_t odp_tm,
if (thresholds_profile == ODP_TM_INVALID)
return -1;
- odp_ticketlock_lock(&tm_profile_lock);
+ odp_ticketlock_lock(&tm_glb->profile_lock);
tm_system->total_info.threshold_params = tm_get_profile_params(
thresholds_profile, TM_THRESHOLD_PROFILE);
- odp_ticketlock_unlock(&tm_profile_lock);
+ odp_ticketlock_unlock(&tm_glb->profile_lock);
return 0;
}
@@ -4653,7 +4684,7 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
uint32_t queue_num, max_queue_num;
tm_system = GET_TM_SYSTEM(odp_tm);
- input_work_queue = tm_system->input_work_queue;
+ input_work_queue = &tm_system->input_work_queue;
ODP_PRINT("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n",
odp_tm, tm_system->tm_idx);
@@ -4690,17 +4721,48 @@ void odp_tm_stats_print(odp_tm_t odp_tm)
int odp_tm_init_global(void)
{
- odp_ticketlock_init(&tm_create_lock);
- odp_ticketlock_init(&tm_profile_lock);
- odp_barrier_init(&tm_first_enq, 2);
+ odp_shm_t shm;
+
+ if (odp_global_ro.init_param.not_used.feat.tm) {
+ ODP_DBG("TM disabled\n");
+ return 0;
+ }
- odp_atomic_init_u64(&atomic_request_cnt, 0);
- odp_atomic_init_u64(&currently_serving_cnt, 0);
- odp_atomic_init_u64(&atomic_done_cnt, 0);
+ shm = odp_shm_reserve("_odp_traffic_mng", sizeof(tm_global_t), 0, 0);
+ if (shm == ODP_SHM_INVALID)
+ return -1;
+
+ tm_glb = odp_shm_addr(shm);
+ memset(tm_glb, 0, sizeof(tm_global_t));
+
+ tm_glb->shm = shm;
+ tm_glb->main_thread_cpu = -1;
+
+ odp_ticketlock_init(&tm_glb->queue_obj.lock);
+ odp_ticketlock_init(&tm_glb->node_obj.lock);
+ odp_ticketlock_init(&tm_glb->system_group.lock);
+ odp_ticketlock_init(&tm_glb->create_lock);
+ odp_ticketlock_init(&tm_glb->profile_lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.sched.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.shaper.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.threshold.lock);
+ odp_ticketlock_init(&tm_glb->profile_tbl.wred.lock);
+ odp_barrier_init(&tm_glb->first_enq, 2);
+
+ odp_atomic_init_u64(&tm_glb->atomic_request_cnt, 0);
+ odp_atomic_init_u64(&tm_glb->currently_serving_cnt, 0);
+ odp_atomic_init_u64(&tm_glb->atomic_done_cnt, 0);
return 0;
}
int odp_tm_term_global(void)
{
+ if (odp_global_ro.init_param.not_used.feat.tm)
+ return 0;
+
+ if (odp_shm_free(tm_glb->shm)) {
+ ODP_ERR("shm free failed\n");
+ return -1;
+ }
return 0;
}
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 4e320aa08..1edd2488e 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -68,8 +68,6 @@
#if ODP_DPDK_ZERO_COPY
ODP_STATIC_ASSERT(CONFIG_PACKET_HEADROOM == RTE_PKTMBUF_HEADROOM,
"ODP and DPDK headroom sizes not matching!");
-ODP_STATIC_ASSERT(PKT_EXTRA_LEN >= sizeof(struct rte_mbuf),
- "DPDK rte_mbuf won't fit in odp_packet_hdr_t.extra!");
#endif
/* DPDK poll mode drivers requiring minimum RX burst size DPDK_MIN_RX_BURST */
@@ -81,6 +79,8 @@ ODP_STATIC_ASSERT(PKT_EXTRA_LEN >= sizeof(struct rte_mbuf),
#define DPDK_MBUF_BUF_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define DPDK_MEMPOOL_CACHE_SIZE 64
+#define MBUF_OFFSET (ROUNDUP_CACHE_LINE(sizeof(struct rte_mbuf)))
+
ODP_STATIC_ASSERT((DPDK_NB_MBUF % DPDK_MEMPOOL_CACHE_SIZE == 0) &&
(DPDK_MEMPOOL_CACHE_SIZE <= RTE_MEMPOOL_CACHE_MAX_SIZE) &&
(DPDK_MEMPOOL_CACHE_SIZE <= DPDK_MBUF_BUF_SIZE * 10 / 15)
@@ -139,9 +139,6 @@ static inline pkt_dpdk_t *pkt_priv(pktio_entry_t *pktio_entry)
static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
-/* Has dpdk_pktio_init() been called */
-static odp_bool_t dpdk_initialized;
-
#ifndef RTE_BUILD_SHARED_LIB
#define MEMPOOL_OPS(hdl) \
extern void mp_hdlr_init_##hdl(void)
@@ -168,6 +165,10 @@ void refer_constructors(void)
}
#endif
+static int dpdk_pktio_init(void);
+
+static int pool_alloc(struct rte_mempool *mp);
+
static int lookup_opt(const char *opt_name, const char *drv_name, int *val)
{
const char *base = "pktio_dpdk";
@@ -273,34 +274,39 @@ static inline void mbuf_update(struct rte_mbuf *mbuf, odp_packet_hdr_t *pkt_hdr,
}
/**
- * Initialize mbuf
- *
- * Called once per ODP packet.
+ * Initialize packet mbuf. Modified version of standard rte_pktmbuf_init()
+ * function.
*/
-static void mbuf_init(struct rte_mempool *mp, struct rte_mbuf *mbuf,
- odp_packet_hdr_t *pkt_hdr)
+static void pktmbuf_init(struct rte_mempool *mp, void *opaque_arg ODP_UNUSED,
+ void *_m, unsigned i ODP_UNUSED)
{
- void *buf_addr = pkt_hdr->buf_hdr.base_data - RTE_PKTMBUF_HEADROOM;
-
- memset(mbuf, 0, sizeof(struct rte_mbuf));
-
- mbuf->priv_size = 0;
- mbuf->buf_addr = buf_addr;
- mbuf->buf_physaddr = rte_mem_virt2phy(buf_addr);
- if (odp_unlikely(mbuf->buf_physaddr == RTE_BAD_PHYS_ADDR ||
- mbuf->buf_physaddr == 0))
- ODP_ABORT("Failed to map virt addr to phy");
-
- mbuf->buf_len = (uint16_t)rte_pktmbuf_data_room_size(mp);
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->pool = mp;
- mbuf->refcnt = 1;
- mbuf->nb_segs = 1;
- mbuf->port = 0xff;
-
- /* Store ODP packet handle inside rte_mbuf */
- mbuf->userdata = packet_handle(pkt_hdr);
- pkt_hdr->extra_type = PKT_EXTRA_TYPE_DPDK;
+ struct rte_mbuf *m = _m;
+ uint32_t mbuf_size, buf_len;
+ odp_packet_hdr_t *pkt_hdr;
+ void *buf_addr;
+
+ pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)((uint8_t *)m + MBUF_OFFSET);
+ buf_addr = pkt_hdr->buf_hdr.base_data - RTE_PKTMBUF_HEADROOM;
+
+ mbuf_size = sizeof(struct rte_mbuf);
+ buf_len = rte_pktmbuf_data_room_size(mp);
+
+ memset(m, 0, mbuf_size);
+ m->priv_size = 0;
+ m->buf_addr = buf_addr;
+ m->buf_iova = rte_mem_virt2iova(buf_addr);
+ m->buf_len = (uint16_t)buf_len;
+ m->data_off = RTE_PKTMBUF_HEADROOM;
+
+ if (odp_unlikely(m->buf_iova == RTE_BAD_IOVA || m->buf_iova == 0))
+ ODP_ABORT("Failed to map virt addr to iova\n");
+
+ /* Init some constant fields */
+ m->pool = mp;
+ m->nb_segs = 1;
+ m->port = MBUF_INVALID_PORT;
+ rte_mbuf_refcnt_set(m, 1);
+ m->next = NULL;
}
/**
@@ -309,60 +315,94 @@ static void mbuf_init(struct rte_mempool *mp, struct rte_mbuf *mbuf,
static struct rte_mempool *mbuf_pool_create(const char *name,
pool_t *pool_entry)
{
- struct rte_mempool *mp;
+ struct rte_mempool *mp = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
- unsigned elt_size;
- unsigned num;
- uint16_t data_room_size;
+ struct rte_mempool_objsz sz;
+ unsigned int elt_size = pool_entry->dpdk_elt_size;
+ unsigned int num = pool_entry->num;
+ uint32_t total_size;
if (!(pool_entry->mem_from_huge_pages)) {
ODP_ERR("DPDK requires memory is allocated from huge pages\n");
- return NULL;
+ goto fail;
}
- num = pool_entry->num;
- data_room_size = pool_entry->seg_len + CONFIG_PACKET_HEADROOM;
- elt_size = sizeof(struct rte_mbuf) + (unsigned)data_room_size;
- mbp_priv.mbuf_data_room_size = data_room_size;
- mbp_priv.mbuf_priv_size = 0;
+ if (pool_entry->seg_len < RTE_MBUF_DEFAULT_BUF_SIZE) {
+ ODP_ERR("Some NICs need at least %dB buffers to not segment "
+ "standard ethernet frames. Increase pool seg_len.\n",
+ RTE_MBUF_DEFAULT_BUF_SIZE);
+ goto fail;
+ }
- mp = rte_mempool_create_empty(name, num, elt_size, cache_size(num),
+ total_size = rte_mempool_calc_obj_size(elt_size, MEMPOOL_F_NO_SPREAD,
+ &sz);
+ if (total_size != pool_entry->block_size) {
+ ODP_ERR("DPDK pool block size not matching to ODP pool: "
+ "%" PRIu32 "/%" PRIu32 "\n", total_size,
+ pool_entry->block_size);
+ goto fail;
+ }
+
+ /* Skipped buffers have to be taken into account to populate pool
+ * properly. */
+ mp = rte_mempool_create_empty(name, num + pool_entry->skipped_blocks,
+ elt_size, cache_size(num),
sizeof(struct rte_pktmbuf_pool_private),
- rte_socket_id(), 0);
+ rte_socket_id(), MEMPOOL_F_NO_SPREAD);
if (mp == NULL) {
ODP_ERR("Failed to create empty DPDK packet pool\n");
- return NULL;
+ goto fail;
}
if (rte_mempool_set_ops_byname(mp, "odp_pool", pool_entry)) {
ODP_ERR("Failed setting mempool operations\n");
- return NULL;
+ goto fail;
}
+ mbp_priv.mbuf_data_room_size = pool_entry->seg_len;
+ mbp_priv.mbuf_priv_size = 0;
rte_pktmbuf_pool_init(mp, &mbp_priv);
- if (rte_mempool_ops_alloc(mp)) {
+ if (pool_alloc(mp)) {
ODP_ERR("Failed allocating mempool\n");
- return NULL;
+ goto fail;
}
+ num = rte_mempool_populate_iova(mp, (char *)pool_entry->base_addr,
+ RTE_BAD_IOVA, pool_entry->shm_size,
+ NULL, NULL);
+ if (num <= 0) {
+ ODP_ERR("Failed to populate mempool: %d\n", num);
+ goto fail;
+ }
+
+ rte_mempool_obj_iter(mp, pktmbuf_init, NULL);
+
return mp;
+
+fail:
+ if (mp)
+ rte_mempool_free(mp);
+ return NULL;
}
/* DPDK external memory pool operations */
-static int pool_enqueue(struct rte_mempool *mp ODP_UNUSED,
+static int pool_enqueue(struct rte_mempool *mp,
void * const *obj_table, unsigned num)
{
odp_packet_t pkt_tbl[num];
+ pool_t *pool_entry = (pool_t *)mp->pool_config;
unsigned i;
- if (odp_unlikely(num == 0))
+ if (odp_unlikely(num == 0 || !pool_entry->pool_in_use))
return 0;
- for (i = 0; i < num; i++)
- pkt_tbl[i] = (odp_packet_t)((struct rte_mbuf *)
- obj_table[i])->userdata;
+ for (i = 0; i < num; i++) {
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)
+ ((uint8_t *)obj_table[i] + MBUF_OFFSET);
+ pkt_tbl[i] = packet_handle(pkt_hdr);
+ }
odp_packet_free_multi(pkt_tbl, num);
@@ -388,13 +428,10 @@ static int pool_dequeue_bulk(struct rte_mempool *mp, void **obj_table,
}
for (i = 0; i < pkts; i++) {
- odp_packet_t pkt = packet_tbl[i];
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- struct rte_mbuf *mbuf = (struct rte_mbuf *)
- (uintptr_t)pkt_hdr->extra;
- if (pkt_hdr->extra_type != PKT_EXTRA_TYPE_DPDK)
- mbuf_init(mp, mbuf, pkt_hdr);
- obj_table[i] = mbuf;
+ odp_packet_hdr_t *pkt_hdr = packet_hdr(packet_tbl[i]);
+
+ obj_table[i] = (struct rte_mbuf *)(uintptr_t)
+ ((uint8_t *)pkt_hdr - MBUF_OFFSET);
}
return 0;
@@ -439,38 +476,65 @@ static void pool_destroy(void *pool)
{
struct rte_mempool *mp = (struct rte_mempool *)pool;
- if (mp != NULL)
+ if (mp != NULL) {
+ pool_t *pool_entry = (pool_t *)mp->pool_config;
+
+ pool_entry->pool_in_use = 0;
rte_mempool_free(mp);
+ }
}
-static struct rte_mempool *pool_create(pool_t *pool)
+int _odp_dpdk_pool_create(pool_t *pool)
{
struct rte_mempool *pkt_pool;
char pool_name[RTE_MEMPOOL_NAMESIZE];
- odp_ticketlock_lock(&pool->lock);
+ if (!ODP_DPDK_ZERO_COPY)
+ return 0;
- if (pool->ext_desc != NULL) {
- odp_ticketlock_unlock(&pool->lock);
- return (struct rte_mempool *)pool->ext_desc;
- }
+ pool->pool_in_use = 0;
snprintf(pool_name, sizeof(pool_name),
- "dpdk_pktpool_%" PRIu32 "", pool->pool_idx);
+ "dpdk_pktpool_%" PRIu32 "_%" PRIu32 "", odp_global_ro.main_pid,
+ pool->pool_idx);
pkt_pool = mbuf_pool_create(pool_name, pool);
if (pkt_pool == NULL) {
- odp_ticketlock_unlock(&pool->lock);
ODP_ERR("Creating external DPDK pool failed\n");
- return NULL;
+ return -1;
}
pool->ext_desc = pkt_pool;
pool->ext_destroy = pool_destroy;
+ pool->pool_in_use = 1;
+
+ return 0;
+}
+
+uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size)
+{
+ struct rte_mempool_objsz sz;
+ uint32_t total_size;
+
+ if (!ODP_DPDK_ZERO_COPY)
+ return block_size;
+
+ if (odp_global_rw->dpdk_initialized == 0) {
+ if (dpdk_pktio_init()) {
+ ODP_ERR("Initializing DPDK failed\n");
+ return 0;
+ }
+ odp_global_rw->dpdk_initialized = 1;
+ }
+
+ block_size += MBUF_OFFSET;
+ total_size = rte_mempool_calc_obj_size(block_size, MEMPOOL_F_NO_SPREAD,
+ &sz);
- odp_ticketlock_unlock(&pool->lock);
+ pool->dpdk_elt_size = sz.elt_size;
+ pool->block_offset = sz.header_size + MBUF_OFFSET;
- return pkt_pool;
+ return total_size;
}
static struct rte_mempool_ops ops_stack = {
@@ -530,10 +594,11 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
if (pktio_cls_enabled(pktio_entry)) {
packet_parse_reset(&parsed_hdr);
packet_set_len(&parsed_hdr, pkt_len);
- if (dpdk_packet_parse_common(&parsed_hdr.p, data,
- pkt_len, pkt_len, mbuf,
- ODP_PROTO_LAYER_ALL,
- pktin_cfg)) {
+ if (_odp_dpdk_packet_parse_common(&parsed_hdr.p, data,
+ pkt_len, pkt_len,
+ mbuf,
+ ODP_PROTO_LAYER_ALL,
+ pktin_cfg)) {
odp_packet_free(pkt_table[i]);
rte_pktmbuf_free(mbuf);
continue;
@@ -557,8 +622,9 @@ static inline int mbuf_to_pkt(pktio_entry_t *pktio_entry,
if (pktio_cls_enabled(pktio_entry))
copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
else if (parse_layer != ODP_PROTO_LAYER_NONE)
- if (dpdk_packet_parse_layer(pkt_hdr, mbuf, parse_layer,
- pktin_cfg)) {
+ if (_odp_dpdk_packet_parse_layer(pkt_hdr, mbuf,
+ parse_layer,
+ pktin_cfg)) {
odp_packet_free(pkt);
rte_pktmbuf_free(mbuf);
continue;
@@ -744,26 +810,49 @@ fail:
return i;
}
+static inline void prefetch_pkt(struct rte_mbuf *mbuf)
+{
+ odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)
+ ((uint8_t *)mbuf + MBUF_OFFSET);
+ void *data = rte_pktmbuf_mtod(mbuf, char *);
+
+ odp_prefetch(pkt_hdr);
+ odp_prefetch(&pkt_hdr->p);
+ odp_prefetch(data);
+}
+
static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
odp_packet_t pkt_table[],
struct rte_mbuf *mbuf_table[],
uint16_t mbuf_num, odp_time_t *ts)
{
- odp_packet_t pkt;
odp_packet_hdr_t *pkt_hdr;
uint16_t pkt_len;
struct rte_mbuf *mbuf;
void *data;
- int i;
- int nb_pkts = 0;
- odp_pool_t pool = pkt_priv(pktio_entry)->pool;
- odp_pktin_config_opt_t pktin_cfg = pktio_entry->s.config.pktin;
- odp_proto_layer_t parse_layer = pktio_entry->s.config.parser.layer;
- odp_pktio_t input = pktio_entry->s.handle;
+ int i, nb_pkts;
+ odp_pool_t pool;
+ odp_pktin_config_opt_t pktin_cfg;
+ odp_proto_layer_t parse_layer;
+ odp_pktio_t input;
+
+ prefetch_pkt(mbuf_table[0]);
+
+ nb_pkts = 0;
+ pool = pkt_priv(pktio_entry)->pool;
+ pktin_cfg = pktio_entry->s.config.pktin;
+ parse_layer = pktio_entry->s.config.parser.layer;
+ input = pktio_entry->s.handle;
+
+ if (odp_likely(mbuf_num > 1))
+ prefetch_pkt(mbuf_table[1]);
for (i = 0; i < mbuf_num; i++) {
odp_packet_hdr_t parsed_hdr;
+ if (odp_likely((i + 2) < mbuf_num))
+ prefetch_pkt(mbuf_table[i + 2]);
+
mbuf = mbuf_table[i];
if (odp_unlikely(mbuf->nb_segs != 1)) {
ODP_ERR("Segmented buffers not supported\n");
@@ -772,20 +861,19 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
}
data = rte_pktmbuf_mtod(mbuf, char *);
- odp_prefetch(data);
-
pkt_len = rte_pktmbuf_pkt_len(mbuf);
- pkt = (odp_packet_t)mbuf->userdata;
- pkt_hdr = packet_hdr(pkt);
+ pkt_hdr = (odp_packet_hdr_t *)(uintptr_t)((uint8_t *)mbuf +
+ MBUF_OFFSET);
if (pktio_cls_enabled(pktio_entry)) {
packet_parse_reset(&parsed_hdr);
packet_set_len(&parsed_hdr, pkt_len);
- if (dpdk_packet_parse_common(&parsed_hdr.p, data,
- pkt_len, pkt_len, mbuf,
- ODP_PROTO_LAYER_ALL,
- pktin_cfg)) {
+ if (_odp_dpdk_packet_parse_common(&parsed_hdr.p, data,
+ pkt_len, pkt_len,
+ mbuf,
+ ODP_PROTO_LAYER_ALL,
+ pktin_cfg)) {
rte_pktmbuf_free(mbuf);
continue;
}
@@ -809,8 +897,9 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
if (pktio_cls_enabled(pktio_entry))
copy_packet_cls_metadata(&parsed_hdr, pkt_hdr);
else if (parse_layer != ODP_PROTO_LAYER_NONE)
- if (dpdk_packet_parse_layer(pkt_hdr, mbuf, parse_layer,
- pktin_cfg)) {
+ if (_odp_dpdk_packet_parse_layer(pkt_hdr, mbuf,
+ parse_layer,
+ pktin_cfg)) {
rte_pktmbuf_free(mbuf);
continue;
}
@@ -820,7 +909,7 @@ static inline int mbuf_to_pkt_zero(pktio_entry_t *pktio_entry,
packet_set_ts(pkt_hdr, ts);
- pkt_table[nb_pkts++] = pkt;
+ pkt_table[nb_pkts++] = packet_handle(pkt_hdr);
}
return nb_pkts;
@@ -841,45 +930,25 @@ static inline int pkt_to_mbuf_zero(pktio_entry_t *pktio_entry,
for (i = 0; i < num; i++) {
odp_packet_t pkt = pkt_table[i];
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
- struct rte_mbuf *mbuf = (struct rte_mbuf *)
- (uintptr_t)pkt_hdr->extra;
+ struct rte_mbuf *mbuf = (struct rte_mbuf *)(uintptr_t)
+ ((uint8_t *)pkt_hdr - MBUF_OFFSET);
uint16_t pkt_len = odp_packet_len(pkt);
if (odp_unlikely(pkt_len > pkt_dpdk->mtu))
goto fail;
- if (odp_likely(pkt_hdr->buf_hdr.segcount == 1 &&
- pkt_hdr->extra_type == PKT_EXTRA_TYPE_DPDK)) {
+ if (odp_likely(pkt_hdr->buf_hdr.segcount == 1)) {
mbuf_update(mbuf, pkt_hdr, pkt_len);
if (odp_unlikely(pktio_entry->s.chksum_insert_ena))
pkt_set_ol_tx(pktout_cfg, pktout_capa, pkt_hdr,
mbuf, odp_packet_data(pkt));
} else {
- pool_t *pool_entry = pkt_hdr->buf_hdr.pool_ptr;
-
- if (odp_unlikely(pool_entry->ext_desc == NULL)) {
- if (pool_create(pool_entry) == NULL)
- ODP_ABORT("Creating DPDK pool failed");
- }
-
- if (pkt_hdr->buf_hdr.segcount != 1 ||
- !pool_entry->mem_from_huge_pages) {
- /* Fall back to packet copy */
- if (odp_unlikely(pkt_to_mbuf(pktio_entry, &mbuf,
- &pkt, 1) != 1))
- goto fail;
- (*copy_count)++;
-
- } else {
- mbuf_init((struct rte_mempool *)
- pool_entry->ext_desc, mbuf, pkt_hdr);
- mbuf_update(mbuf, pkt_hdr, pkt_len);
- if (pktio_entry->s.chksum_insert_ena)
- pkt_set_ol_tx(pktout_cfg, pktout_capa,
- pkt_hdr, mbuf,
- odp_packet_data(pkt));
- }
+ /* Fall back to packet copy */
+ if (odp_unlikely(pkt_to_mbuf(pktio_entry, &mbuf,
+ &pkt, 1) != 1))
+ goto fail;
+ (*copy_count)++;
}
mbuf_table[i] = mbuf;
}
@@ -1002,8 +1071,6 @@ static int dpdk_vdev_promisc_mode_set(uint16_t port_id, int enable)
static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
const odp_pktin_hash_proto_t *hash_proto)
{
- memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
-
if (hash_proto->proto.ipv4_udp)
rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
if (hash_proto->proto.ipv4_tcp)
@@ -1024,47 +1091,36 @@ static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
rss_conf->rss_key = NULL;
}
-static int dpdk_setup_port(pktio_entry_t *pktio_entry)
+static int dpdk_setup_eth_dev(pktio_entry_t *pktio_entry,
+ const struct rte_eth_dev_info *dev_info)
{
int ret;
pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
struct rte_eth_rss_conf rss_conf;
- uint16_t hw_ip_checksum = 0;
+ struct rte_eth_conf eth_conf;
+ uint64_t rss_hf_capa = dev_info->flow_type_rss_offloads;
+
+ memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
- /* Always set some hash functions to enable DPDK RSS hash calculation */
- if (pkt_dpdk->hash.all_bits == 0) {
- memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ /* Always set some hash functions to enable DPDK RSS hash calculation.
+ * Hash capability has been checked in pktin config. */
+ if (pkt_dpdk->hash.all_bits == 0)
rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP;
- } else {
+ else
rss_conf_to_hash_proto(&rss_conf, &pkt_dpdk->hash);
- }
- if (pktio_entry->s.config.pktin.bit.ipv4_chksum ||
- pktio_entry->s.config.pktin.bit.udp_chksum ||
- pktio_entry->s.config.pktin.bit.tcp_chksum)
- hw_ip_checksum = 1;
-
- struct rte_eth_conf port_conf = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
- .split_hdr_size = 0,
- .header_split = 0,
- .hw_ip_checksum = hw_ip_checksum,
- .hw_vlan_filter = 0,
- .hw_strip_crc = 0,
- .enable_scatter = 0,
- },
- .rx_adv_conf = {
- .rss_conf = rss_conf,
- },
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- };
+ /* Filter out unsupported flags */
+ rss_conf.rss_hf &= rss_hf_capa;
+
+ memset(&eth_conf, 0, sizeof(eth_conf));
+
+ eth_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ eth_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
+ eth_conf.rx_adv_conf.rss_conf = rss_conf;
ret = rte_eth_dev_configure(pkt_dpdk->port_id,
pktio_entry->s.num_in_queue,
- pktio_entry->s.num_out_queue, &port_conf);
+ pktio_entry->s.num_out_queue, &eth_conf);
if (ret < 0) {
ODP_ERR("Failed to setup device: err=%d, port=%" PRIu8 "\n",
ret, pkt_dpdk->port_id);
@@ -1154,15 +1210,17 @@ static int dpdk_pktio_init(void)
cmdline = "";
/* masklen includes the terminating null as well */
- cmd_len = strlen("odpdpdk -c --socket-mem ") + masklen +
- strlen(mem_str) + strlen(cmdline) + strlen(" ");
+ cmd_len = snprintf(NULL, 0, "odpdpdk --file-prefix %" PRIu32 "_ "
+ "--proc-type auto -c %s --socket-mem %s %s ",
+ odp_global_ro.main_pid, mask_str, mem_str, cmdline);
char full_cmd[cmd_len];
/* first argument is facility log, simply bind it to odpdpdk for now.*/
cmd_len = snprintf(full_cmd, cmd_len,
- "odpdpdk -c %s --socket-mem %s %s", mask_str,
- mem_str, cmdline);
+ "odpdpdk --file-prefix %" PRIu32 "_ "
+ "--proc-type auto -c %s --socket-mem %s %s ",
+ odp_global_ro.main_pid, mask_str, mem_str, cmdline);
for (i = 0, dpdk_argc = 1; i < cmd_len; ++i) {
if (isspace(full_cmd[i]))
@@ -1235,15 +1293,27 @@ static void dpdk_mempool_free(struct rte_mempool *mp, void *arg ODP_UNUSED)
rte_mempool_free(mp);
}
+/* RTE_ETH_FOREACH_DEV was introduced in v17.8, but causes a build error in
+ * v18.2 (only a warning, but our build system treats warnings as errors). */
+#if (RTE_VERSION >= RTE_VERSION_NUM(18, 2, 0, 0)) && \
+ (RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0))
+ #define ETH_FOREACH_DEV(p) \
+ for (p = rte_eth_find_next(0); \
+ (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
+ p = rte_eth_find_next(p + 1))
+#elif RTE_VERSION >= RTE_VERSION_NUM(17, 8, 0, 0)
+ #define ETH_FOREACH_DEV(p) RTE_ETH_FOREACH_DEV(p)
+#endif
+
static int dpdk_pktio_term(void)
{
- if (!dpdk_initialized)
+ uint16_t port_id;
+
+ if (!odp_global_rw->dpdk_initialized)
return 0;
#if RTE_VERSION >= RTE_VERSION_NUM(17, 8, 0, 0)
- uint16_t port_id;
-
- RTE_ETH_FOREACH_DEV(port_id) {
+ ETH_FOREACH_DEV(port_id) {
rte_eth_dev_close(port_id);
}
#endif
@@ -1254,12 +1324,70 @@ static int dpdk_pktio_term(void)
return 0;
}
+static int check_hash_proto(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
+{
+ struct rte_eth_dev_info dev_info;
+ uint64_t rss_hf_capa;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+ rss_hf_capa = dev_info.flow_type_rss_offloads;
+
+ if (p->hash_proto.proto.ipv4 &&
+ ((rss_hf_capa & ETH_RSS_IPV4) == 0)) {
+ ODP_ERR("hash_proto.ipv4 not supported\n");
+ return -1;
+ }
+
+ if (p->hash_proto.proto.ipv4_udp &&
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_UDP) == 0)) {
+ ODP_ERR("hash_proto.ipv4_udp not supported. "
+ "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
+ return -1;
+ }
+
+ if (p->hash_proto.proto.ipv4_tcp &&
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_TCP) == 0)) {
+ ODP_ERR("hash_proto.ipv4_tcp not supported. "
+ "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
+ return -1;
+ }
+
+ if (p->hash_proto.proto.ipv6 &&
+ ((rss_hf_capa & ETH_RSS_IPV6) == 0)) {
+ ODP_ERR("hash_proto.ipv6 not supported. "
+ "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
+ return -1;
+ }
+
+ if (p->hash_proto.proto.ipv6_udp &&
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_UDP) == 0)) {
+ ODP_ERR("hash_proto.ipv6_udp not supported. "
+ "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
+ return -1;
+ }
+
+ if (p->hash_proto.proto.ipv6_tcp &&
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_TCP) == 0)) {
+ ODP_ERR("hash_proto.ipv6_tcp not supported. "
+ "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
+ return -1;
+ }
+
+ return 0;
+}
+
static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
const odp_pktin_queue_param_t *p)
{
odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
uint8_t lockless;
+ if (p->hash_enable && check_hash_proto(pktio_entry, p))
+ return -1;
+
/**
* Scheduler synchronizes input queue polls. Only single thread
* at a time polls a queue */
@@ -1410,9 +1538,9 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
/* Initialize DPDK here instead of odp_init_global() to enable running
* 'make check' without root privileges */
- if (dpdk_initialized == 0) {
+ if (odp_global_rw->dpdk_initialized == 0) {
dpdk_pktio_init();
- dpdk_initialized = 1;
+ odp_global_rw->dpdk_initialized = 1;
}
/* Init pktio entry */
@@ -1421,7 +1549,12 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
pkt_dpdk->pool = pool;
pkt_dpdk->port_id = atoi(netdev);
+ /* rte_eth_dev_count() was removed in v18.05 */
+#if RTE_VERSION < RTE_VERSION_NUM(18, 5, 0, 0)
if (rte_eth_dev_count() == 0) {
+#else
+ if (rte_eth_dev_count_avail() == 0) {
+#endif
ODP_ERR("No DPDK ports found\n");
return -1;
}
@@ -1459,10 +1592,7 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
pkt_dpdk->min_rx_burst = 0;
if (ODP_DPDK_ZERO_COPY) {
- if (pool_entry->ext_desc != NULL)
- pkt_pool = (struct rte_mempool *)pool_entry->ext_desc;
- else
- pkt_pool = pool_create(pool_entry);
+ pkt_pool = (struct rte_mempool *)pool_entry->ext_desc;
} else {
snprintf(pool_name, sizeof(pool_name), "pktpool_%s", netdev);
/* Check if the pool exists already */
@@ -1501,91 +1631,122 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
return 0;
}
-static int dpdk_start(pktio_entry_t *pktio_entry)
+static int dpdk_setup_eth_tx(pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_rxconf *rxconf;
- pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
- uint16_t port_id = pkt_dpdk->port_id;
+ struct rte_eth_txconf txconf;
+ uint64_t tx_offloads;
+ uint32_t i;
int ret;
- unsigned i;
+ uint16_t port_id = pkt_dpdk->port_id;
- /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
- if (!pktio_entry->s.num_in_queue)
- pktio_entry->s.num_in_queue = 1;
- if (!pktio_entry->s.num_out_queue)
- pktio_entry->s.num_out_queue = 1;
+ txconf = dev_info->default_txconf;
+
+ tx_offloads = 0;
+ if (pktio_entry->s.config.pktout.bit.ipv4_chksum_ena)
+ tx_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->s.config.pktout.bit.udp_chksum_ena)
+ tx_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->s.config.pktout.bit.tcp_chksum_ena)
+ tx_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pktio_entry->s.config.pktout.bit.sctp_chksum_ena)
+ tx_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
+
+ txconf.offloads = tx_offloads;
+
+ if (tx_offloads)
+ pktio_entry->s.chksum_insert_ena = 1;
- /* init port */
- if (dpdk_setup_port(pktio_entry)) {
- ODP_ERR("Failed to configure device\n");
- return -1;
- }
- /* Init TX queues */
for (i = 0; i < pktio_entry->s.num_out_queue; i++) {
- const struct rte_eth_txconf *txconf = NULL;
- int ip_ena = pktio_entry->s.config.pktout.bit.ipv4_chksum_ena;
- int udp_ena = pktio_entry->s.config.pktout.bit.udp_chksum_ena;
- int tcp_ena = pktio_entry->s.config.pktout.bit.tcp_chksum_ena;
- int sctp_ena = pktio_entry->s.config.pktout.bit.sctp_chksum_ena;
- int chksum_ena = ip_ena | udp_ena | tcp_ena | sctp_ena;
-
- if (chksum_ena) {
- /* Enable UDP, TCP, STCP checksum offload */
- uint32_t txq_flags = 0;
-
- if (udp_ena == 0)
- txq_flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
-
- if (tcp_ena == 0)
- txq_flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
-
- if (sctp_ena == 0)
- txq_flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
-
- /* When IP checksum is requested alone, enable UDP
- * offload. DPDK IP checksum offload is enabled only
- * when one of the L4 checksum offloads is requested.*/
- if ((udp_ena == 0) && (tcp_ena == 0) && (sctp_ena == 0))
- txq_flags = ETH_TXQ_FLAGS_NOXSUMTCP |
- ETH_TXQ_FLAGS_NOXSUMSCTP;
-
- txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOREFCOUNT |
- ETH_TXQ_FLAGS_NOMULTMEMP |
- ETH_TXQ_FLAGS_NOVLANOFFL;
-
- rte_eth_dev_info_get(port_id, &dev_info);
- dev_info.default_txconf.txq_flags = txq_flags;
- txconf = &dev_info.default_txconf;
- pktio_entry->s.chksum_insert_ena = 1;
- }
ret = rte_eth_tx_queue_setup(port_id, i,
pkt_dpdk->opt.num_tx_desc,
rte_eth_dev_socket_id(port_id),
- txconf);
+ &txconf);
if (ret < 0) {
ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n",
ret, port_id);
return -1;
}
}
- /* Init RX queues */
- rte_eth_dev_info_get(port_id, &dev_info);
- rxconf = &dev_info.default_rxconf;
- rxconf->rx_drop_en = pkt_dpdk->opt.rx_drop_en;
+
+ return 0;
+}
+
+static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry,
+ const pkt_dpdk_t *pkt_dpdk,
+ const struct rte_eth_dev_info *dev_info)
+{
+ struct rte_eth_rxconf rxconf;
+ uint64_t rx_offloads;
+ uint32_t i;
+ int ret;
+ uint16_t port_id = pkt_dpdk->port_id;
+
+ rxconf = dev_info->default_rxconf;
+
+ rxconf.rx_drop_en = pkt_dpdk->opt.rx_drop_en;
+
+ rx_offloads = 0;
+ if (pktio_entry->s.config.pktin.bit.ipv4_chksum)
+ rx_offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+
+ if (pktio_entry->s.config.pktin.bit.udp_chksum)
+ rx_offloads |= DEV_RX_OFFLOAD_UDP_CKSUM;
+
+ if (pktio_entry->s.config.pktin.bit.tcp_chksum)
+ rx_offloads |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ rxconf.offloads = rx_offloads;
+
for (i = 0; i < pktio_entry->s.num_in_queue; i++) {
ret = rte_eth_rx_queue_setup(port_id, i,
pkt_dpdk->opt.num_rx_desc,
rte_eth_dev_socket_id(port_id),
- rxconf, pkt_dpdk->pkt_pool);
+ &rxconf, pkt_dpdk->pkt_pool);
if (ret < 0) {
ODP_ERR("Queue setup failed: err=%d, port=%" PRIu8 "\n",
ret, port_id);
return -1;
}
}
+
+ return 0;
+}
+
+static int dpdk_start(pktio_entry_t *pktio_entry)
+{
+ struct rte_eth_dev_info dev_info;
+ pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
+ uint16_t port_id = pkt_dpdk->port_id;
+ int ret;
+
+ /* DPDK doesn't support nb_rx_q/nb_tx_q being 0 */
+ if (!pktio_entry->s.num_in_queue)
+ pktio_entry->s.num_in_queue = 1;
+ if (!pktio_entry->s.num_out_queue)
+ pktio_entry->s.num_out_queue = 1;
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ /* Setup device */
+ if (dpdk_setup_eth_dev(pktio_entry, &dev_info)) {
+ ODP_ERR("Failed to configure device\n");
+ return -1;
+ }
+
+ /* Setup TX queues */
+ if (dpdk_setup_eth_tx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
+
+ /* Setup RX queues */
+ if (dpdk_setup_eth_rx(pktio_entry, pkt_dpdk, &dev_info))
+ return -1;
+
/* Start device */
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
@@ -1852,4 +2013,27 @@ const pktio_if_ops_t dpdk_pktio_ops = {
.output_queues_config = dpdk_output_queues_config
};
+#else
+
+#include <stdint.h>
+
+#include <odp/api/hints.h>
+
+#include <odp_packet_dpdk.h>
+#include <odp_pool_internal.h>
+
+/*
+ * Dummy functions for pool_create()
+ */
+
+uint32_t _odp_dpdk_pool_obj_size(pool_t *pool ODP_UNUSED, uint32_t block_size)
+{
+ return block_size;
+}
+
+int _odp_dpdk_pool_create(pool_t *pool ODP_UNUSED)
+{
+ return 0;
+}
+
#endif /* ODP_PKTIO_DPDK */
diff --git a/platform/linux-generic/pktio/dpdk_parse.c b/platform/linux-generic/pktio/dpdk_parse.c
index e9de0756a..5f2b31d08 100644
--- a/platform/linux-generic/pktio/dpdk_parse.c
+++ b/platform/linux-generic/pktio/dpdk_parse.c
@@ -457,10 +457,10 @@ int dpdk_packet_parse_common_l3_l4(packet_parser_t *prs,
/**
* DPDK packet parser
*/
-int dpdk_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
- uint32_t frame_len, uint32_t seg_len,
- struct rte_mbuf *mbuf, int layer,
- odp_pktin_config_opt_t pktin_cfg)
+int _odp_dpdk_packet_parse_common(packet_parser_t *prs, const uint8_t *ptr,
+ uint32_t frame_len, uint32_t seg_len,
+ struct rte_mbuf *mbuf, int layer,
+ odp_pktin_config_opt_t pktin_cfg)
{
uint32_t offset;
uint16_t ethtype;
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index 3b54962f4..cd438ad3d 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -11,7 +11,7 @@
#include <odp_packet_io_internal.h>
#include <odp/api/system_info.h>
#include <odp_shm_internal.h>
-#include <odp_ishm_internal.h>
+#include <odp_shm_internal.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -233,7 +233,7 @@ static void _ipc_export_pool(struct pktio_info *pinfo,
snprintf(pinfo->slave.pool_name, ODP_POOL_NAME_LEN, "%s",
_ipc_odp_buffer_pool_shm_name(pool_hdl));
- pinfo->slave.pid = odp_global_data.main_pid;
+ pinfo->slave.pid = odp_global_ro.main_pid;
pinfo->slave.block_size = pool->block_size;
pinfo->slave.base_addr = pool->base_addr;
}
@@ -423,7 +423,7 @@ static int ipc_pktio_open(odp_pktio_t id ODP_UNUSED,
snprintf(name, sizeof(name), "%s_info", dev);
shm = odp_shm_reserve(name, sizeof(struct pktio_info),
ODP_CACHE_LINE_SIZE,
- _ODP_ISHM_EXPORT | _ODP_ISHM_LOCK);
+ ODP_SHM_EXPORT | ODP_SHM_SINGLE_VA);
if (ODP_SHM_INVALID == shm) {
_ring_destroy("ipc_rx_cache");
ODP_ERR("can not create shm %s\n", name);
@@ -826,9 +826,13 @@ static int ipc_close(pktio_entry_t *pktio_entry)
static int ipc_pktio_init_global(void)
{
- _ring_tailq_init();
- ODP_PRINT("PKTIO: initialized ipc interface.\n");
- return 0;
+ ODP_DBG("PKTIO: initializing ipc interface.\n");
+ return _ring_tailq_init();
+}
+
+static int ipc_pktio_term_global(void)
+{
+ return _ring_tailq_term();
}
const pktio_if_ops_t ipc_pktio_ops = {
@@ -836,7 +840,7 @@ const pktio_if_ops_t ipc_pktio_ops = {
.print = NULL,
.init_global = ipc_pktio_init_global,
.init_local = NULL,
- .term = NULL,
+ .term = ipc_pktio_term_global,
.open = ipc_pktio_open,
.close = ipc_close,
.recv = ipc_pktio_recv,
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 4c0cba3f9..206f43f6c 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -18,7 +18,7 @@
#include <odp/api/hints.h>
#include <odp/api/plat/byteorder_inlines.h>
#include <odp_queue_if.h>
-#include <odp/api/plat/packet_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -94,7 +94,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
{
int nbr, i;
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- void *queue;
+ odp_queue_t queue;
odp_packet_hdr_t *pkt_hdr;
odp_packet_t pkt;
odp_time_t ts_val;
@@ -107,8 +107,8 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_ticketlock_lock(&pktio_entry->s.rxl);
- queue = queue_fn->from_ext(pkt_priv(pktio_entry)->loopq);
- nbr = queue_fn->deq_multi(queue, hdr_tbl, num);
+ queue = pkt_priv(pktio_entry)->loopq;
+ nbr = odp_queue_deq_multi(queue, (odp_event_t *)hdr_tbl, num);
if (pktio_entry->s.config.pktin.bit.ts_all ||
pktio_entry->s.config.pktin.bit.ts_ptp) {
@@ -236,7 +236,8 @@ static inline void loopback_fix_checksums(odp_packet_t pkt,
uint8_t l4_proto;
void *l3_hdr;
uint32_t l3_len;
- odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt;
+ odp_bool_t ipv4_chksum_pkt, udp_chksum_pkt, tcp_chksum_pkt,
+ sctp_chksum_pkt;
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
l3_hdr = odp_packet_l3_ptr(pkt, &l3_len);
@@ -260,6 +261,11 @@ static inline void loopback_fix_checksums(odp_packet_t pkt,
l4_proto == _ODP_IPPROTO_TCP,
pkt_hdr->p.flags.l4_chksum_set,
pkt_hdr->p.flags.l4_chksum);
+ sctp_chksum_pkt = OL_TX_CHKSUM_PKT(pktout_cfg->bit.sctp_chksum,
+ pktout_capa->bit.sctp_chksum,
+ l4_proto == _ODP_IPPROTO_SCTP,
+ pkt_hdr->p.flags.l4_chksum_set,
+ pkt_hdr->p.flags.l4_chksum);
if (ipv4_chksum_pkt)
_odp_packet_ipv4_chksum_insert(pkt);
@@ -269,13 +275,16 @@ static inline void loopback_fix_checksums(odp_packet_t pkt,
if (udp_chksum_pkt)
_odp_packet_udp_chksum_insert(pkt);
+
+ if (sctp_chksum_pkt)
+ _odp_packet_sctp_chksum_insert(pkt);
}
static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
const odp_packet_t pkt_tbl[], int num)
{
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- void *queue;
+ odp_queue_t queue;
int i;
int ret;
int nb_tx = 0;
@@ -325,8 +334,8 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
odp_ticketlock_lock(&pktio_entry->s.txl);
- queue = queue_fn->from_ext(pkt_priv(pktio_entry)->loopq);
- ret = queue_fn->enq_multi(queue, hdr_tbl, nb_tx);
+ queue = pkt_priv(pktio_entry)->loopq;
+ ret = odp_queue_enq_multi(queue, (odp_event_t *)hdr_tbl, nb_tx);
if (ret > 0) {
pktio_entry->s.stats.out_ucast_pkts += ret;
@@ -376,9 +385,11 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry)
capa->config.pktin.bit.ipv4_chksum = 1;
capa->config.pktin.bit.tcp_chksum = 1;
capa->config.pktin.bit.udp_chksum = 1;
+ capa->config.pktin.bit.sctp_chksum = 1;
capa->config.pktout.bit.ipv4_chksum = 1;
capa->config.pktout.bit.tcp_chksum = 1;
capa->config.pktout.bit.udp_chksum = 1;
+ capa->config.pktout.bit.sctp_chksum = 1;
capa->config.inbound_ipsec = 1;
capa->config.outbound_ipsec = 1;
@@ -388,6 +399,8 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry)
capa->config.pktout.bit.udp_chksum;
capa->config.pktout.bit.tcp_chksum_ena =
capa->config.pktout.bit.tcp_chksum;
+ capa->config.pktout.bit.sctp_chksum_ena =
+ capa->config.pktout.bit.sctp_chksum;
return 0;
}
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 0da2b7afd..8fb231534 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -836,12 +836,14 @@ static inline int netmap_recv_desc(pktio_entry_t *pktio_entry,
if (odp_likely(ring->slot[slot_id].len <= mtu)) {
slot_tbl[num_rx].buf = buf;
slot_tbl[num_rx].len = ring->slot[slot_id].len;
- ODP_DBG("dropped oversized packet\n");
num_rx++;
+ } else {
+ ODP_DBG("Dropped oversized packet: %" PRIu16 " "
+ "B\n", ring->slot[slot_id].len);
}
ring->cur = nm_ring_next(ring, slot_id);
+ ring->head = ring->cur;
}
- ring->head = ring->cur;
ring_id++;
}
desc->cur_rx_ring = ring_id;
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index c6a817fc1..9f7834033 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -56,7 +56,6 @@ typedef struct {
void *tx; /**< tx pcap handle */
void *tx_dump; /**< tx pcap dumper handle */
odp_pool_t pool; /**< rx pool */
- unsigned char *buf; /**< per-pktio temp buffer */
int loops; /**< number of times to loop rx pcap */
int loop_cnt; /**< number of loops completed */
odp_bool_t promisc; /**< promiscuous mode state */
@@ -141,12 +140,6 @@ static int _pcapif_init_tx(pkt_pcap_t *pcap)
pcap->tx = tx;
}
- pcap->buf = malloc(PKTIO_PCAP_MTU);
- if (!pcap->buf) {
- ODP_ERR("failed to malloc temp buffer\n");
- return -1;
- }
-
pcap->tx_dump = pcap_dump_open(tx, pcap->fname_tx);
if (!pcap->tx_dump) {
ODP_ERR("failed to open dump file %s (%s)\n",
@@ -198,7 +191,6 @@ static int pcapif_close(pktio_entry_t *pktio_entry)
if (pcap->rx)
pcap_close(pcap->rx);
- free(pcap->buf);
free(pcap->fname_rx);
free(pcap->fname_tx);
@@ -298,6 +290,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
static int _pcapif_dump_pkt(pkt_pcap_t *pcap, odp_packet_t pkt)
{
struct pcap_pkthdr hdr;
+ uint8_t tx_buf[PKTIO_PCAP_MTU];
if (!pcap->tx_dump)
return 0;
@@ -306,10 +299,10 @@ static int _pcapif_dump_pkt(pkt_pcap_t *pcap, odp_packet_t pkt)
hdr.len = hdr.caplen;
(void)gettimeofday(&hdr.ts, NULL);
- if (odp_packet_copy_to_mem(pkt, 0, hdr.len, pcap->buf) != 0)
+ if (odp_packet_copy_to_mem(pkt, 0, hdr.len, tx_buf) != 0)
return -1;
- pcap_dump(pcap->tx_dump, &hdr, pcap->buf);
+ pcap_dump(pcap->tx_dump, &hdr, tx_buf);
(void)pcap_dump_flush(pcap->tx_dump);
return 0;
diff --git a/platform/linux-generic/pktio/ring.c b/platform/linux-generic/pktio/ring.c
index bb0d67806..2cf0231cd 100644
--- a/platform/linux-generic/pktio/ring.c
+++ b/platform/linux-generic/pktio/ring.c
@@ -79,13 +79,21 @@
#include <inttypes.h>
#include <odp_packet_io_ring_internal.h>
#include <odp_errno_define.h>
+#include <odp_global_data.h>
#include <odp/api/plat/cpu_inlines.h>
-static TAILQ_HEAD(, _ring) odp_ring_list;
-
#define RING_VAL_IS_POWER_2(x) ((((x) - 1) & (x)) == 0)
+typedef struct {
+ TAILQ_HEAD(, _ring) ring_list;
+ /* Rings tailq lock */
+ odp_rwlock_t qlock;
+ odp_shm_t shm;
+} global_data_t;
+
+static global_data_t *global;
+
/*
* the enqueue of pointers on the ring.
*/
@@ -148,13 +156,37 @@ static TAILQ_HEAD(, _ring) odp_ring_list;
} \
} while (0)
-static odp_rwlock_t qlock; /* rings tailq lock */
-/* init tailq_ring */
-void _ring_tailq_init(void)
+/* Initialize tailq_ring */
+int _ring_tailq_init(void)
+{ odp_shm_t shm;
+
+ /* Allocate globally shared memory */
+ shm = odp_shm_reserve("_odp_ring_global", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ if (ODP_SHM_INVALID == shm) {
+ ODP_ERR("Shm reserve failed for pktio ring\n");
+ return -1;
+ }
+
+ global = odp_shm_addr(shm);
+ memset(global, 0, sizeof(global_data_t));
+ global->shm = shm;
+
+ TAILQ_INIT(&global->ring_list);
+ odp_rwlock_init(&global->qlock);
+
+ return 0;
+}
+
+/* Terminate tailq_ring */
+int _ring_tailq_term(void)
{
- TAILQ_INIT(&odp_ring_list);
- odp_rwlock_init(&qlock);
+ if (odp_shm_free(global->shm)) {
+ ODP_ERR("Shm free failed for pktio ring\n");
+ return -1;
+ }
+ return 0;
}
/* create the ring */
@@ -171,6 +203,8 @@ _ring_create(const char *name, unsigned count, unsigned flags)
shm_flag = ODP_SHM_PROC | ODP_SHM_EXPORT;
else
shm_flag = 0;
+ if (odp_global_ro.shm_single_va)
+ shm_flag |= ODP_SHM_SINGLE_VA;
/* count must be a power of 2 */
if (!RING_VAL_IS_POWER_2(count) || (count > _RING_SZ_MASK)) {
@@ -184,7 +218,7 @@ _ring_create(const char *name, unsigned count, unsigned flags)
snprintf(ring_name, sizeof(ring_name), "%s", name);
ring_size = count * sizeof(void *) + sizeof(_ring_t);
- odp_rwlock_write_lock(&qlock);
+ odp_rwlock_write_lock(&global->qlock);
/* reserve a memory zone for this ring.*/
shm = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE,
shm_flag);
@@ -205,13 +239,13 @@ _ring_create(const char *name, unsigned count, unsigned flags)
r->cons.tail = 0;
if (!(flags & _RING_NO_LIST))
- TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
+ TAILQ_INSERT_TAIL(&global->ring_list, r, next);
} else {
__odp_errno = ENOMEM;
ODP_ERR("Cannot reserve memory\n");
}
- odp_rwlock_write_unlock(&qlock);
+ odp_rwlock_write_unlock(&global->qlock);
return r;
}
@@ -222,10 +256,10 @@ int _ring_destroy(const char *name)
if (shm != ODP_SHM_INVALID) {
_ring_t *r = odp_shm_addr(shm);
- odp_rwlock_write_lock(&qlock);
+ odp_rwlock_write_lock(&global->qlock);
if (!(r->flags & _RING_NO_LIST))
- TAILQ_REMOVE(&odp_ring_list, r, next);
- odp_rwlock_write_unlock(&qlock);
+ TAILQ_REMOVE(&global->ring_list, r, next);
+ odp_rwlock_write_unlock(&global->qlock);
return odp_shm_free(shm);
}
@@ -439,13 +473,13 @@ void _ring_list_dump(void)
{
const _ring_t *mp = NULL;
- odp_rwlock_read_lock(&qlock);
+ odp_rwlock_read_lock(&global->qlock);
- TAILQ_FOREACH(mp, &odp_ring_list, next) {
+ TAILQ_FOREACH(mp, &global->ring_list, next) {
_ring_dump(mp);
}
- odp_rwlock_read_unlock(&qlock);
+ odp_rwlock_read_unlock(&global->qlock);
}
/* search a ring from its name */
@@ -453,12 +487,12 @@ _ring_t *_ring_lookup(const char *name)
{
_ring_t *r;
- odp_rwlock_read_lock(&qlock);
- TAILQ_FOREACH(r, &odp_ring_list, next) {
+ odp_rwlock_read_lock(&global->qlock);
+ TAILQ_FOREACH(r, &global->ring_list, next) {
if (strncmp(name, r->name, _RING_NAMESIZE) == 0)
break;
}
- odp_rwlock_read_unlock(&qlock);
+ odp_rwlock_read_unlock(&global->qlock);
return r;
}
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 459c65525..861152288 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -38,6 +38,7 @@
#include <odp_classification_inlines.h>
#include <odp_classification_internal.h>
#include <odp/api/hints.h>
+#include <odp_global_data.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -102,9 +103,8 @@ static int set_pkt_sock_fanout_mmap(pkt_sock_mmap_t *const pkt_sock,
union frame_map {
struct {
struct tpacket2_hdr ODP_ALIGNED(TPACKET_ALIGNMENT) tp_h;
- struct sockaddr_ll
- ODP_ALIGNED(TPACKET_ALIGN(sizeof(struct tpacket2_hdr)))
- s_ll;
+ struct sockaddr_ll ODP_ALIGNED(TPACKET_ALIGN(sizeof(struct
+ tpacket2_hdr))) s_ll;
} *v2;
void *raw;
@@ -462,6 +462,8 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type,
odp_pool_t pool_hdl, int fanout)
{
int ret = 0;
+ int flags = 0;
+ odp_shm_t shm;
ring->sock = sock;
ring->type = type;
@@ -477,10 +479,21 @@ static int mmap_setup_ring(int sock, struct ring *ring, int type,
}
ring->rd_len = ring->rd_num * sizeof(*ring->rd);
- ring->rd = malloc(ring->rd_len);
+
+ if (odp_global_ro.shm_single_va)
+ flags += ODP_SHM_SINGLE_VA;
+
+ shm = odp_shm_reserve(NULL, ring->rd_len, ODP_CACHE_LINE_SIZE, flags);
+
+ if (shm == ODP_SHM_INVALID) {
+ ODP_ERR("Reserving shm failed\n");
+ return -1;
+ }
+ ring->shm = shm;
+
+ ring->rd = odp_shm_addr(shm);
if (!ring->rd) {
- __odp_errno = errno;
- ODP_ERR("malloc(): %s\n", strerror(errno));
+ ODP_ERR("Reading shm addr failed\n");
return -1;
}
@@ -533,8 +546,10 @@ static int mmap_sock(pkt_sock_mmap_t *pkt_sock)
static int mmap_unmap_sock(pkt_sock_mmap_t *pkt_sock)
{
- free(pkt_sock->rx_ring.rd);
- free(pkt_sock->tx_ring.rd);
+ if (pkt_sock->rx_ring.shm != ODP_SHM_INVALID)
+ odp_shm_free(pkt_sock->rx_ring.shm);
+ if (pkt_sock->tx_ring.shm != ODP_SHM_INVALID)
+ odp_shm_free(pkt_sock->tx_ring.shm);
return munmap(pkt_sock->mmap_base, pkt_sock->mmap_len);
}
@@ -605,6 +620,8 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
pkt_sock->frame_offset = 0;
pkt_sock->pool = pool;
+ pkt_sock->rx_ring.shm = ODP_SHM_INVALID;
+ pkt_sock->tx_ring.shm = ODP_SHM_INVALID;
pkt_sock->sockfd = mmap_pkt_socket();
if (pkt_sock->sockfd == -1)
goto error;
diff --git a/platform/linux-generic/test/Makefile.am b/platform/linux-generic/test/Makefile.am
index 99934099a..03a1474da 100644
--- a/platform/linux-generic/test/Makefile.am
+++ b/platform/linux-generic/test/Makefile.am
@@ -1,7 +1,7 @@
include $(top_srcdir)/test/Makefile.inc
TESTS_ENVIRONMENT += TEST_DIR=${top_builddir}/test/validation
-SUBDIRS = performance
+SUBDIRS =
if test_vald
TESTS = validation/api/pktio/pktio_run.sh \
@@ -9,10 +9,10 @@ TESTS = validation/api/pktio/pktio_run.sh \
validation/api/shmem/shmem_linux$(EXEEXT)
SUBDIRS += validation/api/pktio\
- validation/api/shmem\
- mmap_vlan_ins\
- pktio_ipc\
- ring
+ validation/api/shmem\
+ mmap_vlan_ins\
+ pktio_ipc\
+ ring
if HAVE_PCAP
TESTS += validation/api/pktio/pktio_run_pcap.sh
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
new file mode 100644
index 000000000..6cae241da
--- /dev/null
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -0,0 +1,8 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.6"
+
+timer: {
+ # Enable inline timer implementation
+ inline = 1
+}
diff --git a/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c b/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c
index cf3d6d932..75ea0a7af 100644
--- a/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c
+++ b/platform/linux-generic/test/mmap_vlan_ins/mmap_vlan_ins.c
@@ -19,14 +19,15 @@
#define MAX_PKT_BURST 32
#define MAX_WORKERS 1
-static int g_ret;
-
-struct {
+typedef struct {
odp_pktio_t if0, if1;
odp_pktin_queue_t if0in, if1in;
odp_pktout_queue_t if0out, if1out;
odph_ethaddr_t src, dst;
-} global;
+ int g_ret;
+} global_data_t;
+
+static global_data_t *global;
static odp_pktio_t create_pktio(const char *name, odp_pool_t pool,
odp_pktin_queue_t *pktin,
@@ -80,12 +81,12 @@ static int run_worker(void *arg ODP_UNUSED)
int total_pkts = 0;
uint64_t wait_time = odp_pktin_wait_time(2 * ODP_TIME_SEC_IN_NS);
- if (odp_pktio_start(global.if0)) {
+ if (odp_pktio_start(global->if0)) {
printf("unable to start input interface\n");
exit(1);
}
printf("started input interface\n");
- if (odp_pktio_start(global.if1)) {
+ if (odp_pktio_start(global->if1)) {
printf("unable to start output interface\n");
exit(1);
}
@@ -93,7 +94,7 @@ static int run_worker(void *arg ODP_UNUSED)
printf("started all\n");
while (1) {
- pkts = odp_pktin_recv_tmo(global.if0in, pkt_tbl, MAX_PKT_BURST,
+ pkts = odp_pktin_recv_tmo(global->if0in, pkt_tbl, MAX_PKT_BURST,
wait_time);
if (odp_unlikely(pkts <= 0)) {
printf("recv tmo!\n");
@@ -109,10 +110,10 @@ static int run_worker(void *arg ODP_UNUSED)
return 0;
}
eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- eth->src = global.src;
- eth->dst = global.dst;
+ eth->src = global->src;
+ eth->dst = global->dst;
}
- sent = odp_pktout_send(global.if1out, pkt_tbl, pkts);
+ sent = odp_pktout_send(global->if1out, pkt_tbl, pkts);
if (sent < 0)
sent = 0;
total_pkts += sent;
@@ -124,7 +125,7 @@ static int run_worker(void *arg ODP_UNUSED)
printf("Total send packets: %d\n", total_pkts);
if (total_pkts < 10)
- g_ret = -1;
+ global->g_ret = -1;
return 0;
}
@@ -134,12 +135,20 @@ int main(int argc, char **argv)
odp_pool_t pool;
odp_pool_param_t params;
odp_cpumask_t cpumask;
+ odph_helper_options_t helper_options;
odph_odpthread_t thd[MAX_WORKERS];
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
+ odp_shm_t shm;
+ int ret;
- /* let helper collect its own arguments (e.g. --odph_proc) */
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ printf("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
if (argc < 3) {
printf("Too few arguments (%i).\n"
@@ -147,7 +156,10 @@ int main(int argc, char **argv)
exit(0);
}
- if (odp_init_global(&instance, NULL, NULL)) {
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (odp_init_global(&instance, &init_param, NULL)) {
printf("Error: ODP global init failed.\n");
exit(1);
}
@@ -157,6 +169,17 @@ int main(int argc, char **argv)
exit(1);
}
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("_appl_global_data", sizeof(global_data_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ global = odp_shm_addr(shm);
+ if (global == NULL) {
+ printf("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memset(global, 0, sizeof(global_data_t));
+
/* Create packet pool */
odp_pool_param_init(&params);
params.pkt.seg_len = POOL_SEG_LEN;
@@ -171,8 +194,10 @@ int main(int argc, char **argv)
exit(1);
}
- global.if0 = create_pktio(argv[1], pool, &global.if0in, &global.if0out);
- global.if1 = create_pktio(argv[2], pool, &global.if1in, &global.if1out);
+ global->if0 = create_pktio(argv[1], pool, &global->if0in,
+ &global->if0out);
+ global->if1 = create_pktio(argv[2], pool, &global->if1in,
+ &global->if1out);
odp_cpumask_default_worker(&cpumask, MAX_WORKERS);
@@ -185,11 +210,27 @@ int main(int argc, char **argv)
odph_odpthreads_create(thd, &cpumask, &thr_params);
odph_odpthreads_join(thd);
+ ret = global->g_ret;
+
+ if (odp_pktio_stop(global->if0) || odp_pktio_close(global->if0)) {
+ printf("Error: failed to close interface %s\n", argv[1]);
+ exit(EXIT_FAILURE);
+ }
+ if (odp_pktio_stop(global->if1) || odp_pktio_close(global->if1)) {
+ printf("Error: failed to close interface %s\n", argv[2]);
+ exit(EXIT_FAILURE);
+ }
+
if (odp_pool_destroy(pool)) {
printf("Error: pool destroy\n");
exit(EXIT_FAILURE);
}
+ if (odp_shm_free(shm)) {
+ printf("Error: shm free global data\n");
+ exit(EXIT_FAILURE);
+ }
+
if (odp_term_local()) {
printf("Error: term local\n");
exit(EXIT_FAILURE);
@@ -200,5 +241,5 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
- return g_ret;
+ return ret;
}
diff --git a/platform/linux-generic/test/performance/.gitignore b/platform/linux-generic/test/performance/.gitignore
deleted file mode 100644
index 7e563b8b3..000000000
--- a/platform/linux-generic/test/performance/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-*.log
-*.trs
diff --git a/platform/linux-generic/test/performance/Makefile.am b/platform/linux-generic/test/performance/Makefile.am
deleted file mode 100644
index 0b5da671f..000000000
--- a/platform/linux-generic/test/performance/Makefile.am
+++ /dev/null
@@ -1,13 +0,0 @@
-include $(top_srcdir)/test/Makefile.inc
-
-TESTS_ENVIRONMENT += TEST_DIR=${builddir}
-
-TESTSCRIPTS = odp_scheduling_run_proc.sh
-
-TEST_EXTENSIONS = .sh
-
-if test_perf_proc
-TESTS = $(TESTSCRIPTS)
-endif
-
-dist_check_SCRIPTS = $(TESTSCRIPTS)
diff --git a/platform/linux-generic/test/performance/odp_scheduling_run_proc.sh b/platform/linux-generic/test/performance/odp_scheduling_run_proc.sh
deleted file mode 100755
index c16bcb86c..000000000
--- a/platform/linux-generic/test/performance/odp_scheduling_run_proc.sh
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2016-2018, Linaro Limited
-# All rights reserved.
-#
-# SPDX-License-Identifier: BSD-3-Clause
-#
-# Script that passes command line arguments to odp_scheduling test when
-# launched by 'make check'
-
-TEST_DIR="${TEST_DIR:-$(dirname $0)}"
-PERFORMANCE="$TEST_DIR/../../../../test/performance"
-ret=0
-ALL=0
-
-run()
-{
- echo odp_scheduling_run starts requesting $1 worker threads
- echo =====================================================
-
- $PERFORMANCE/odp_scheduling${EXEEXT} --odph_proc -c $1 || ret=1
-}
-
-run 1
-run 5
-run 8
-run 11
-run $ALL
-
-exit $ret
diff --git a/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c b/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c
index e704d7a9d..4f77306de 100644
--- a/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c
+++ b/platform/linux-generic/test/pktio_ipc/pktio_ipc1.c
@@ -96,6 +96,9 @@ static int pktio_run_loop(odp_pool_t pool)
ret = odp_pktio_start(ipc_pktio);
if (!ret)
break;
+
+ /* Reduce polling frequency to once per 50ms */
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
}
/* packets loop */
diff --git a/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c b/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c
index 89ebea6d0..e6270f89d 100644
--- a/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c
+++ b/platform/linux-generic/test/pktio_ipc/pktio_ipc2.c
@@ -97,6 +97,9 @@ static int ipc_second_process(int master_pid)
ret = odp_pktio_start(ipc_pktio);
if (!ret)
break;
+
+ /* Reduce polling frequency to once per 50ms */
+ odp_time_wait_ns(50 * ODP_TIME_MSEC_IN_NS);
}
for (;;) {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
new file mode 100644
index 000000000..fc8974944
--- /dev/null
+++ b/platform/linux-generic/test/process-mode.conf
@@ -0,0 +1,9 @@
+# Mandatory fields
+odp_implementation = "linux-generic"
+config_file_version = "0.1.6"
+
+# Shared memory options
+shm: {
+ # Increase the amount of single VA memory
+ single_va_size_kb = 1048576
+}
diff --git a/platform/linux-generic/test/ring/ring_stress.c b/platform/linux-generic/test/ring/ring_stress.c
index c9bd53084..ddabffad0 100644
--- a/platform/linux-generic/test/ring/ring_stress.c
+++ b/platform/linux-generic/test/ring/ring_stress.c
@@ -34,12 +34,6 @@
#define NUM_BULK_OP ((RING_SIZE / PIECE_BULK) * 100)
/*
- * Since cunit framework cannot work with multi-threading, ask workers
- * to save their results for delayed assertion after thread collection.
- */
-static int worker_results[MAX_WORKERS];
-
-/*
* Note : make sure that both enqueue and dequeue
* operation starts at same time so to avoid data corruption
* Its because atomic lock will protect only indexes, but if order of
@@ -54,18 +48,42 @@ typedef enum {
STRESS_N_M_PRODUCER_CONSUMER
} stress_case_t;
+#define GLOBAL_SHM_NAME "RingGlobalShm"
+
/* worker function declarations */
static int stress_worker(void *_data);
/* global name for later look up in workers' context */
static const char *ring_name = "stress_ring";
-/* barrier to run threads at the same time */
-static odp_barrier_t barrier;
+typedef struct {
+ odp_shm_t shm;
+ /* Barrier to run threads at the same time */
+ odp_barrier_t barrier;
+ /*
+ * Since cunit framework cannot work with multi-threading, ask workers
+ * to save their results for delayed assertion after thread collection.
+ */
+ int worker_results[MAX_WORKERS];
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
int ring_test_stress_start(void)
{
_ring_t *r_stress = NULL;
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve(GLOBAL_SHM_NAME, sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (shm == ODP_SHM_INVALID) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+ global_mem->shm = shm;
/* multiple thread usage scenario, thread or process sharable */
r_stress = _ring_create(ring_name, RING_SIZE, _RING_SHM_PROC);
@@ -79,7 +97,11 @@ int ring_test_stress_start(void)
int ring_test_stress_end(void)
{
- _ring_destroy(ring_name);
+ if (odp_shm_free(global_mem->shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
return 0;
}
@@ -90,7 +112,8 @@ void ring_test_stress_1_1_producer_consumer(void)
pthrd_arg worker_param;
/* reset results for delayed assertion */
- memset(worker_results, 0, sizeof(worker_results));
+ memset(global_mem->worker_results, 0,
+ sizeof(global_mem->worker_results));
/* request 2 threads to run 1:1 stress */
worker_param.numthrds = odp_cpumask_default_worker(&cpus, 2);
@@ -103,7 +126,7 @@ void ring_test_stress_1_1_producer_consumer(void)
return;
}
- odp_barrier_init(&barrier, 2);
+ odp_barrier_init(&global_mem->barrier, 2);
/* kick the workers */
odp_cunit_thread_create(stress_worker, &worker_param);
@@ -113,7 +136,7 @@ void ring_test_stress_1_1_producer_consumer(void)
/* delayed assertion due to cunit limitation */
for (i = 0; i < worker_param.numthrds; i++)
- CU_ASSERT(0 == worker_results[i]);
+ CU_ASSERT(0 == global_mem->worker_results[i]);
}
void ring_test_stress_N_M_producer_consumer(void)
@@ -123,7 +146,8 @@ void ring_test_stress_N_M_producer_consumer(void)
pthrd_arg worker_param;
/* reset results for delayed assertion */
- memset(worker_results, 0, sizeof(worker_results));
+ memset(global_mem->worker_results, 0,
+ sizeof(global_mem->worker_results));
/* request MAX_WORKERS threads to run N:M stress */
worker_param.numthrds =
@@ -141,7 +165,7 @@ void ring_test_stress_N_M_producer_consumer(void)
if (worker_param.numthrds & 0x1)
worker_param.numthrds -= 1;
- odp_barrier_init(&barrier, worker_param.numthrds);
+ odp_barrier_init(&global_mem->barrier, worker_param.numthrds);
/* kick the workers */
odp_cunit_thread_create(stress_worker, &worker_param);
@@ -151,7 +175,7 @@ void ring_test_stress_N_M_producer_consumer(void)
/* delayed assertion due to cunit limitation */
for (i = 0; i < worker_param.numthrds; i++)
- CU_ASSERT(0 == worker_results[i]);
+ CU_ASSERT(0 == global_mem->worker_results[i]);
}
void ring_test_stress_1_N_producer_consumer(void)
@@ -214,7 +238,8 @@ static int stress_worker(void *_data)
int worker_id = odp_thread_id();
/* save the worker result for delayed assertion */
- result = &worker_results[(worker_id % worker_param->numthrds)];
+ result = &global_mem->worker_results[(worker_id %
+ worker_param->numthrds)];
/* verify ring lookup in worker context */
r_stress = _ring_lookup(ring_name);
@@ -223,7 +248,7 @@ static int stress_worker(void *_data)
return (*result = -1);
}
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&global_mem->barrier);
switch (worker_param->testcase) {
case STRESS_1_1_PRODUCER_CONSUMER:
@@ -242,7 +267,7 @@ static int stress_worker(void *_data)
break;
}
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&global_mem->barrier);
return 0;
}
diff --git a/platform/linux-generic/test/validation/api/shmem/shmem_linux.c b/platform/linux-generic/test/validation/api/shmem/shmem_linux.c
index 2bc7f1bdc..6b09c1dea 100644
--- a/platform/linux-generic/test/validation/api/shmem/shmem_linux.c
+++ b/platform/linux-generic/test/validation/api/shmem/shmem_linux.c
@@ -110,7 +110,8 @@
static int read_shmem_attribues(uint64_t ext_odp_pid, const char *blockname,
char *filename, uint64_t *len,
uint32_t *flags, uint64_t *user_len,
- uint32_t *user_flags, uint32_t *align)
+ uint32_t *user_flags, uint32_t *align,
+ uint64_t *offset)
{
char shm_attr_filename[PATH_MAX];
FILE *export_file;
@@ -151,6 +152,9 @@ static int read_shmem_attribues(uint64_t ext_odp_pid, const char *blockname,
if (fscanf(export_file, "align: %" PRIu32 " ", align) != 1)
goto export_file_read_err;
+ if (fscanf(export_file, "offset: %" PRIu64 " ", offset) != 1)
+ goto export_file_read_err;
+
fclose(export_file);
return 0;
@@ -209,6 +213,7 @@ int main(int argc __attribute__((unused)), char *argv[])
int fifo_fd = -1;
char shm_filename[PATH_MAX];/* shared mem device name, under /dev/shm */
uint64_t len;
+ uint64_t offset;
uint32_t flags;
uint64_t user_len;
uint32_t user_flags;
@@ -260,8 +265,9 @@ int main(int argc __attribute__((unused)), char *argv[])
/* read the shared memory attributes (includes the shm filename): */
if (read_shmem_attribues(odp_app1, SHM_NAME,
shm_filename, &len, &flags,
- &user_len, &user_flags, &align) != 0) {
- printf("erorr read_shmem_attribues\n");
+ &user_len, &user_flags, &align,
+ &offset) != 0) {
+ printf("error read_shmem_attribues\n");
test_failure(fifo_name, fifo_fd, odp_app1);
}
@@ -281,9 +287,10 @@ int main(int argc __attribute__((unused)), char *argv[])
*/
size = sizeof(test_shared_linux_data_t);
- addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, 0);
+ addr = mmap(NULL, size, PROT_READ, MAP_SHARED, shm_fd, offset);
if (addr == MAP_FAILED) {
- fprintf(stderr, "shmem_linux: map failed!\n");
+ fprintf(stderr, "shmem_linux: mmap failed: %s\n",
+ strerror(errno));
test_failure(fifo_name, fifo_fd, odp_app1);
}
diff --git a/scripts/Dockerfile b/scripts/Dockerfile
deleted file mode 100644
index b4c03ca48..000000000
--- a/scripts/Dockerfile
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM drydockaarch64/u16:v5.10.1
-
-RUN if $(sudo update-alternatives --list gcc); \
- then sudo update-alternatives --remove-all gcc; \
- fi
-
-RUN sudo apt-get update && sudo apt-get install -yy \
- autoconf \
- automake \
- ccache \
- clang-4.0 \
- gcc-4.8 \
- graphviz \
- kmod \
- libconfig-dev \
- libcunit1-dev \
- libnuma-dev \
- libpcap-dev \
- libssl-dev \
- libtool \
- linux-headers-`uname -r` \
- mscgen \
- ruby-dev \
- xsltproc
-
-RUN sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-4.8 10
-RUN sudo ln -s /usr/bin/clang-4.0 /usr/bin/clang
-RUN sudo ln -s /usr/bin/clang++-4.0 /usr/bin/clang++
diff --git a/scripts/build-pktio-dpdk b/scripts/build-pktio-dpdk
deleted file mode 100755
index b0c0a4d0e..000000000
--- a/scripts/build-pktio-dpdk
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/bin/bash
-
-TARGET=${TARGET:-"x86_64-native-linuxapp-gcc"}
-
-export ROOT_DIR=$(readlink -e $(dirname $0) | sed 's|/scripts||')
-pushd ${ROOT_DIR}
-
-echo '#include "pcap.h"' | cpp -H -o /dev/null 2>&1
-if [ "$?" != "0" ]; then
- echo "Error: pcap is not installed. You may need to install libpcap-dev"
-fi
-
-echo '#include "numa.h"' | cpp -H -o /dev/null 2>&1
-if [ "$?" != "0" ]; then
- echo "Error: NUMA library is not installed. You need to install libnuma-dev"
- exit 1
-fi
-
-git -c advice.detachedHead=false clone -q --depth=1 --single-branch --branch=17.11 http://dpdk.org/git/dpdk-stable dpdk
-pushd dpdk
-git log --oneline --decorate
-
-#Make and edit DPDK configuration
-make config T=${TARGET} O=${TARGET}
-pushd ${TARGET}
-#To use I/O without DPDK supported NIC's enable pcap pmd:
-sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
-popd
-
-#Build DPDK
-make build O=${TARGET} EXTRA_CFLAGS="-fPIC"
-make install O=${TARGET} DESTDIR=${TARGET}
-popd
-
-#Build ODP
-./bootstrap;
-./configure --enable-test-vald --enable-test-perf --enable-test-cpp \
- --enable-debug --enable-debug-print \
- --with-dpdk-path=`pwd`/dpdk/${TARGET}/usr/local
-make
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d5ffd1ddd..a0d189722 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2487,7 +2487,7 @@ sub process {
# Check the patch for a signoff:
if ($line =~ /^\s*signed-off-by:/i) {
$signoff++;
- $in_commit_log = 0;
+ #$in_commit_log = 0;
}
# Check if MAINTAINERS is being updated. If so, there's probably no need to
@@ -2497,7 +2497,7 @@ sub process {
}
# Check signature styles
- if (!$in_header_lines &&
+ if (!$in_header_lines && $in_commit_log &&
$line =~ /^(\s*)([a-z0-9_-]+by:|$signature_tags)(\s*)(.*)/i) {
my $space_before = $1;
my $sign_off = $2;
diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh
new file mode 100755
index 000000000..21922a493
--- /dev/null
+++ b/scripts/ci/build.sh
@@ -0,0 +1,22 @@
+#!/bin/bash
+set -e
+
+cd "$(dirname "$0")"/../..
+./bootstrap
+./configure \
+ --host=${TARGET_ARCH} --build=x86_64-linux-gnu \
+ --enable-dpdk \
+ --prefix=/opt/odp \
+ ${CONF}
+
+make -j $(nproc)
+
+make install
+
+pushd ${HOME}
+${CC} ${CFLAGS} ${OLDPWD}/example/hello/odp_hello.c -o odp_hello_inst_dynamic `PKG_CONFIG_PATH=/opt/odp/lib/pkgconfig:${PKG_CONFIG_PATH} pkg-config --cflags --libs libodp-linux`
+if [ -z "$TARGET_ARCH" ]
+then
+ LD_LIBRARY_PATH="/opt/odp/lib:$LD_LIBRARY_PATH" ./odp_hello_inst_dynamic
+fi
+popd
diff --git a/scripts/ci/build_arm64.sh b/scripts/ci/build_arm64.sh
new file mode 100755
index 000000000..647dd29cf
--- /dev/null
+++ b/scripts/ci/build_arm64.sh
@@ -0,0 +1,14 @@
+#!/bin/bash
+set -e
+
+export TARGET_ARCH=aarch64-linux-gnu
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CC="clang --target=${TARGET_ARCH}"
+ export CXX="clang++ --target=${TARGET_ARCH}"
+else
+ export CC="${TARGET_ARCH}-gcc"
+ export CXX="${TARGET_ARCH}-g++"
+fi
+export CPPFLAGS="-I/usr/include/${TARGET_ARCH}/dpdk"
+
+exec "$(dirname "$0")"/build.sh
diff --git a/scripts/ci/build_armhf.sh b/scripts/ci/build_armhf.sh
new file mode 100755
index 000000000..837561f83
--- /dev/null
+++ b/scripts/ci/build_armhf.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -e
+
+export TARGET_ARCH=arm-linux-gnueabihf
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CC="clang --target=${TARGET_ARCH}"
+ export CXX="clang++ --target=${TARGET_ARCH}"
+else
+ export CC="${TARGET_ARCH}-gcc"
+ export CXX="${TARGET_ARCH}-g++"
+fi
+export CPPFLAGS="-I/usr/include/${TARGET_ARCH}/dpdk"
+export CFLAGS="-march=armv7-a"
+export CXXFLAGS="-march=armv7-a"
+
+exec "$(dirname "$0")"/build.sh
diff --git a/scripts/ci/build_i386.sh b/scripts/ci/build_i386.sh
new file mode 100755
index 000000000..17b6bf668
--- /dev/null
+++ b/scripts/ci/build_i386.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -e
+
+export TARGET_ARCH=i686-linux-gnu
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CC="clang --target=${TARGET_ARCH}"
+ export CXX="clang++ --target=${TARGET_ARCH}"
+else
+ export CFLAGS="-m32"
+ export CXXFLAGS="-m32"
+ export LDFLAGS="-m32"
+fi
+export CPPFLAGS="-I/usr/include/i386-linux-gnu/dpdk"
+
+exec "$(dirname "$0")"/build.sh
diff --git a/scripts/ci/build_powerpc.sh b/scripts/ci/build_powerpc.sh
new file mode 100755
index 000000000..a213ee1d3
--- /dev/null
+++ b/scripts/ci/build_powerpc.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+set -e
+
+export TARGET_ARCH=powerpc-linux-gnu
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CC="clang --target=${TARGET_ARCH}"
+ export CXX="clang++ --target=${TARGET_ARCH}"
+else
+ export CC="${TARGET_ARCH}-gcc"
+ export CXX="${TARGET_ARCH}-g++"
+fi
+# No DPDK on PowerPC
+export CONF="${CONF} --disable-dpdk"
+
+exec "$(dirname "$0")"/build.sh
diff --git a/scripts/ci/build_x86_64.sh b/scripts/ci/build_x86_64.sh
new file mode 100755
index 000000000..01182fd90
--- /dev/null
+++ b/scripts/ci/build_x86_64.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+set -e
+
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CXX="clang++"
+fi
+
+exec "$(dirname "$0")"/build.sh
diff --git a/scripts/ci/check.sh b/scripts/ci/check.sh
new file mode 100755
index 000000000..85cee498c
--- /dev/null
+++ b/scripts/ci/check.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+set -e
+
+echo 1000 | tee /proc/sys/vm/nr_hugepages
+mkdir -p /mnt/huge
+mount -t hugetlbfs nodev /mnt/huge
+
+"`dirname "$0"`"/build_x86_64.sh
+
+cd "$(dirname "$0")"/../..
+
+# Ignore possible failures there because these tests depends on measurements
+# and systems might differ in performance.
+export CI="true"
+make check
+
+umount /mnt/huge
diff --git a/scripts/ci/check_inline_timer.sh b/scripts/ci/check_inline_timer.sh
new file mode 100755
index 000000000..d2eff7145
--- /dev/null
+++ b/scripts/ci/check_inline_timer.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+set -e
+
+"`dirname "$0"`"/build_x86_64.sh
+
+cd "$(dirname "$0")"/../..
+
+echo 1000 | tee /proc/sys/vm/nr_hugepages
+mkdir -p /mnt/huge
+mount -t hugetlbfs nodev /mnt/huge
+
+ODP_SCHEDULER=basic ./test/validation/api/timer/timer_main
+ODP_SCHEDULER=sp ./test/validation/api/timer/timer_main
+ODP_SCHEDULER=scalable ./test/validation/api/timer/timer_main
+
+umount /mnt/huge
diff --git a/scripts/ci/coverage.sh b/scripts/ci/coverage.sh
new file mode 100755
index 000000000..9640b7114
--- /dev/null
+++ b/scripts/ci/coverage.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+set -e
+
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CXX="clang++"
+fi
+
+cd "$(dirname "$0")"/../..
+./bootstrap
+./configure \
+ CFLAGS="-O0 -coverage $CLFAGS" CXXFLAGS="-O0 -coverage $CXXFLAGS" LDFLAGS="--coverage $LDFLAGS" \
+ --enable-debug=full --enable-helper-linux --enable-dpdk --disable-test-perf --disable-test-perf-proc
+export CCACHE_DISABLE=1
+make -j $(nproc)
+
+echo 1000 | tee /proc/sys/vm/nr_hugepages
+mkdir -p /mnt/huge
+mount -t hugetlbfs nodev /mnt/huge
+
+# Ignore possible failures there because these tests depends on measurements
+# and systems might differ in performance.
+export CI="true"
+
+ODP_SCHEDULER=basic make check
+ODP_SCHEDULER=sp make check
+ODP_SCHEDULER=scalable make check
+
+bash <(curl -s https://codecov.io/bash) -X coveragepy
+
+umount /mnt/huge
diff --git a/scripts/ci/distcheck.sh b/scripts/ci/distcheck.sh
new file mode 100755
index 000000000..9d45536f4
--- /dev/null
+++ b/scripts/ci/distcheck.sh
@@ -0,0 +1,19 @@
+#!/bin/bash
+set -e
+
+if [ "${CC#clang}" != "${CC}" ] ; then
+ export CXX="clang++"
+fi
+
+cd "$(dirname "$0")"/../..
+./bootstrap
+./configure ${CONF}
+
+# Ignore possible failures there because these tests depends on measurements
+# and systems might differ in performance.
+export CI="true"
+
+# Additional configure flags for distcheck
+export DISTCHECK_CONFIGURE_FLAGS="${CONF}"
+
+make distcheck
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 6b9ded4a1..7f345fba3 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -79,7 +79,18 @@ int odp_cunit_thread_exit(pthrd_arg *arg)
static int tests_global_init(odp_instance_t *inst)
{
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
@@ -87,6 +98,10 @@ static int tests_global_init(odp_instance_t *inst)
fprintf(stderr, "error: odp_init_local() failed.\n");
return -1;
}
+ if (0 != odp_schedule_config(NULL)) {
+ fprintf(stderr, "error: odp_schedule_config(NULL) failed.\n");
+ return -1;
+ }
return 0;
}
diff --git a/test/common/test_packet_parser.h b/test/common/test_packet_parser.h
index 2d02b98c3..0f17e8568 100644
--- a/test/common/test_packet_parser.h
+++ b/test/common/test_packet_parser.h
@@ -168,6 +168,32 @@ static const uint8_t test_packet_ipv4_sctp[] = {
0x79, 0x74, 0x65, 0x73, 0x2E
};
+/* IPv6 SCTP
+ * - chunk type: payload data
+ */
+static const uint8_t test_packet_ipv6_sctp[] = {
+ 0x00, 0x00, 0x09, 0x00, 0x05, 0x00, 0x00, 0x00,
+ 0x09, 0x00, 0x04, 0x00, 0x86, 0xDD, 0x60, 0x30,
+ 0x00, 0x00, 0x00, 0x63, 0x84, 0xFF, 0xFE, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
+ 0x09, 0xFF, 0xFE, 0x00, 0x04, 0x00, 0x35, 0x55,
+ 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x77, 0x77,
+ 0x77, 0x77, 0x88, 0x88, 0x88, 0x88, 0x04, 0xD2,
+ 0x16, 0x2E, 0xDE, 0xAD, 0xBE, 0xEF, 0x31, 0x44,
+ 0xE3, 0xFE, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00,
+ 0x00, 0x0A, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69,
+ 0x73, 0x20, 0x6D, 0x79, 0x20, 0x64, 0x75, 0x6D,
+ 0x6D, 0x79, 0x20, 0x70, 0x61, 0x79, 0x6C, 0x6F,
+ 0x61, 0x64, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6E,
+ 0x67, 0x2E, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6C,
+ 0x65, 0x6E, 0x67, 0x74, 0x68, 0x20, 0x6F, 0x66,
+ 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74,
+ 0x72, 0x69, 0x6E, 0x67, 0x20, 0x69, 0x73, 0x20,
+ 0x37, 0x31, 0x20, 0x62, 0x79, 0x74, 0x65, 0x73,
+ 0x2E
+};
+
static const uint8_t test_packet_ipv4_ipsec_ah[] = {
/* ETH */
0xf1, 0xf1, 0xf1, 0xf1, 0xf1, 0xf1,
diff --git a/test/m4/configure.m4 b/test/m4/configure.m4
index dd07839ff..b2b6cf0e0 100644
--- a/test/m4/configure.m4
+++ b/test/m4/configure.m4
@@ -1,3 +1,13 @@
+##########################################################################
+# Build and install test applications
+##########################################################################
+AC_ARG_WITH([tests],
+ [AS_HELP_STRING([--without-tests],
+ [don't build and install test applications])],
+ [],
+ [with_tests=yes])
+AM_CONDITIONAL([WITH_TESTS], [test x$with_tests != xno])
+
m4_include([test/m4/miscellaneous.m4])
m4_include([test/m4/performance.m4])
m4_include([test/m4/validation.m4])
@@ -12,6 +22,7 @@ AC_CONFIG_FILES([test/Makefile
test/validation/api/buffer/Makefile
test/validation/api/chksum/Makefile
test/validation/api/classification/Makefile
+ test/validation/api/comp/Makefile
test/validation/api/cpumask/Makefile
test/validation/api/crypto/Makefile
test/validation/api/errno/Makefile
diff --git a/test/performance/odp_bench_packet.c b/test/performance/odp_bench_packet.c
index b4f1905a9..9b779e98c 100644
--- a/test/performance/odp_bench_packet.c
+++ b/test/performance/odp_bench_packet.c
@@ -130,6 +130,8 @@ typedef struct {
bench_info_t *bench;
/** Number of benchmark functions */
int num_bench;
+ /** Break worker loop if set to 1 */
+ int exit_thread;
struct {
/** Test packet length */
uint32_t len;
@@ -166,14 +168,10 @@ typedef struct {
/** Global pointer to args */
static args_t *gbl_args;
-/** Global barrier to synchronize main and worker */
-static odp_barrier_t barrier;
-/** Break worker loop if set to 1 */
-static int exit_thread;
static void sig_handler(int signo ODP_UNUSED)
{
- exit_thread = 1;
+ gbl_args->exit_thread = 1;
}
/**
@@ -188,7 +186,7 @@ static void run_indef(args_t *args, int idx)
printf("Running %s() indefinitely\n", desc);
- while (!exit_thread) {
+ while (!gbl_args->exit_thread) {
int ret;
if (args->bench[idx].init != NULL)
@@ -1317,9 +1315,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "b:i:h";
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->bench_idx = 0; /* Run all benchmarks */
appl_args->burst_size = TEST_DEF_BURST;
@@ -1513,6 +1508,7 @@ bench_info_t test_suite[] = {
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t worker_thread;
int cpu;
odp_shm_t shm;
@@ -1521,11 +1517,22 @@ int main(int argc, char *argv[])
odp_pool_capability_t capa;
odp_pool_param_t params;
odp_instance_t instance;
+ odp_init_t init_param;
uint32_t pkt_num;
uint8_t ret;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
LOG_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1623,8 +1630,6 @@ int main(int argc, char *argv[])
memset(&worker_thread, 0, sizeof(odph_odpthread_t));
- odp_barrier_init(&barrier, 1 + 1);
-
signal(SIGINT, sig_handler);
/* Create worker threads */
diff --git a/test/performance/odp_cpu_bench.c b/test/performance/odp_cpu_bench.c
index 949825e99..852ed3087 100644
--- a/test/performance/odp_cpu_bench.c
+++ b/test/performance/odp_cpu_bench.c
@@ -95,13 +95,13 @@ typedef struct {
odp_queue_t queue[MAX_GROUPS][QUEUES_PER_GROUP];
/* Test lookup table */
lookup_entry_t *lookup_tbl;
+ /* Break workers loop if set to 1 */
+ int exit_threads;
} args_t;
/* Global pointer to args */
static args_t *gbl_args;
-static volatile int exit_threads; /* Break workers loop if set to 1 */
-
static const uint8_t test_udp_packet[] = {
0x00, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x45, 0x00,
@@ -177,7 +177,7 @@ static const uint8_t test_udp_packet[] = {
static void sig_handler(int signo ODP_UNUSED)
{
- exit_threads = 1;
+ gbl_args->exit_threads = 1;
}
static inline void init_packet(odp_packet_t pkt, uint32_t seq, uint16_t group)
@@ -280,7 +280,7 @@ static int run_thread(void *arg)
c1 = odp_cpu_cycles();
t1 = odp_time_local();
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
odp_event_t event_tbl[MAX_EVENT_BURST];
odp_queue_t dst_queue;
int num_events;
@@ -382,9 +382,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+a:+c:+l:+t:h";
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->accuracy = 1; /* Get and print pps stats second */
appl_args->cpu_count = 1;
appl_args->lookup_tbl_size = DEF_LOOKUP_TBL_SIZE;
@@ -474,9 +471,10 @@ static int print_stats(int num_workers, stats_t **thr_stats, int duration,
pkts_prev = pkts;
elapsed += accuracy;
- } while (!exit_threads && (loop_forever || (elapsed < duration)));
+ } while (!gbl_args->exit_threads &&
+ (loop_forever || (elapsed < duration)));
- exit_threads = 1;
+ gbl_args->exit_threads = 1;
odp_barrier_wait(&gbl_args->term_barrier);
pkts = 0;
@@ -523,11 +521,12 @@ static void gbl_args_init(args_t *args)
int main(int argc, char *argv[])
{
stats_t *stats[MAX_WORKERS];
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_cpumask_t cpumask;
odp_pool_capability_t pool_capa;
odp_pool_t pool;
- odp_queue_capability_t queue_capa;
+ odp_schedule_config_t schedule_config;
odp_shm_t shm;
odp_shm_t lookup_tbl_shm;
odp_pool_param_t params;
@@ -545,6 +544,13 @@ int main(int argc, char *argv[])
int cpu;
int ret = 0;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
odp_init_param_init(&init);
/* List features not to be used (may optimize performance) */
@@ -554,6 +560,8 @@ int main(int argc, char *argv[])
init.not_used.feat.timer = 1;
init.not_used.feat.tm = 1;
+ init.mem_model = helper_options.mem_model;
+
/* Signal handler has to be registered before global init in case ODP
* implementation creates internal threads/processes. */
signal(SIGINT, sig_handler);
@@ -606,27 +614,24 @@ int main(int argc, char *argv[])
printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
printf("cpu mask: %s\n", cpumaskstr);
- /* Create application queues */
- if (odp_queue_capability(&queue_capa)) {
- LOG_ERR("Error: odp_queue_capability() failed\n");
- exit(EXIT_FAILURE);
- }
+ odp_schedule_config_init(&schedule_config);
+ odp_schedule_config(&schedule_config);
/* Make sure a single queue can store all the packets in a group */
pkts_per_group = QUEUES_PER_GROUP * PKTS_PER_QUEUE;
- if (queue_capa.sched.max_size &&
- queue_capa.sched.max_size < pkts_per_group)
- pkts_per_group = queue_capa.sched.max_size;
+ if (schedule_config.queue_size &&
+ schedule_config.queue_size < pkts_per_group)
+ pkts_per_group = schedule_config.queue_size;
/* Divide queues evenly into groups */
- if (queue_capa.sched.max_num < QUEUES_PER_GROUP) {
+ if (schedule_config.num_queues < QUEUES_PER_GROUP) {
LOG_ERR("Error: min %d queues required\n", QUEUES_PER_GROUP);
return -1;
}
- num_queues = num_workers > queue_capa.sched.max_num ?
- queue_capa.sched.max_num : num_workers;
+ num_queues = num_workers > schedule_config.num_queues ?
+ schedule_config.num_queues : num_workers;
num_groups = (num_queues + QUEUES_PER_GROUP - 1) / QUEUES_PER_GROUP;
- if (num_groups * QUEUES_PER_GROUP > queue_capa.sched.max_num)
+ if (num_groups * QUEUES_PER_GROUP > schedule_config.num_queues)
num_groups--;
num_queues = num_groups * QUEUES_PER_GROUP;
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
index 61da80e8a..665268be0 100644
--- a/test/performance/odp_crypto.c
+++ b/test/performance/odp_crypto.c
@@ -1033,20 +1033,32 @@ int main(int argc, char *argv[])
odp_cpumask_t cpumask;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
int num_workers = 1;
+ odph_helper_options_t helper_options;
odph_odpthread_t thr[num_workers];
odp_instance_t instance;
+ odp_init_t init_param;
odp_pool_capability_t pool_capa;
odp_crypto_capability_t crypto_capa;
uint32_t max_seg_len;
unsigned i;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ app_err("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&cargs, 0, sizeof(cargs));
/* Parse and store the application arguments */
parse_args(argc, argv, &cargs);
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
app_err("ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1089,6 +1101,7 @@ int main(int argc, char *argv[])
odp_queue_param_init(&qparam);
if (cargs.schedule) {
+ odp_schedule_config(NULL);
qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
@@ -1196,9 +1209,6 @@ static void parse_args(int argc, char *argv[], crypto_args_t *cargs)
static const char *shortopts = "+a:c:df:hi:m:nl:spr";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
cargs->in_place = 0;
cargs->in_flight = 1;
cargs->debug_packets = 0;
diff --git a/test/performance/odp_ipsec.c b/test/performance/odp_ipsec.c
index 551c021af..e388916c4 100644
--- a/test/performance/odp_ipsec.c
+++ b/test/performance/odp_ipsec.c
@@ -277,6 +277,10 @@ static ipsec_alg_config_t algs_config[] = {
.data = test_key16,
.length = sizeof(test_key16)
},
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
.auth_alg = ODP_AUTH_ALG_NULL
},
},
@@ -288,6 +292,10 @@ static ipsec_alg_config_t algs_config[] = {
.data = test_key16,
.length = sizeof(test_key16)
},
+ .cipher_key_extra = {
+ .data = test_salt,
+ .length = 4,
+ },
.auth_alg = ODP_AUTH_ALG_SHA1_HMAC,
.auth_key = {
.data = test_key20,
@@ -337,7 +345,7 @@ static ipsec_alg_config_t algs_config[] = {
.data = test_key16,
.length = sizeof(test_key16)
},
- .cipher_key_extra = {
+ .auth_key_extra = {
.data = test_salt,
.length = 4,
},
@@ -928,9 +936,6 @@ static void parse_args(int argc, char *argv[], ipsec_args_t *cargs)
static const char *shortopts = "+a:c:df:hi:m:nl:sptu";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
cargs->in_place = 0;
cargs->in_flight = 1;
cargs->debug_packets = 0;
@@ -1012,20 +1017,32 @@ int main(int argc, char *argv[])
odp_cpumask_t cpumask;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
int num_workers = 1;
+ odph_helper_options_t helper_options;
odph_odpthread_t thr[num_workers];
odp_instance_t instance;
+ odp_init_t init_param;
odp_pool_capability_t capa;
odp_ipsec_config_t config;
uint32_t max_seg_len;
unsigned int i;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ app_err("Reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&cargs, 0, sizeof(cargs));
/* Parse and store the application arguments */
parse_args(argc, argv, &cargs);
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
app_err("ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1071,6 +1088,7 @@ int main(int argc, char *argv[])
odp_queue_param_init(&qparam);
if (cargs.schedule) {
+ odp_schedule_config(NULL);
qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 163363925..78e3920f7 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -95,13 +95,6 @@ typedef struct {
int verbose; /* Verbose output */
} appl_args_t;
-static int exit_threads; /* Break workers loop if set to 1 */
-
-static void sig_handler(int signo ODP_UNUSED)
-{
- exit_threads = 1;
-}
-
/* Statistics */
typedef union ODP_ALIGNED_CACHE {
struct {
@@ -175,12 +168,19 @@ typedef struct {
* Table index is pktio_index of the API. This is used by the sched
* mode. */
uint8_t dst_port_from_idx[MAX_PKTIO_INDEXES];
+ /* Break workers loop if set to 1 */
+ int exit_threads;
} args_t;
/* Global pointer to args */
static args_t *gbl_args;
+static void sig_handler(int signo ODP_UNUSED)
+{
+ gbl_args->exit_threads = 1;
+}
+
/*
* Drop packets which input parsing marked as containing errors.
*
@@ -336,7 +336,7 @@ static int run_worker_sched_mode(void *arg)
odp_barrier_wait(&gbl_args->init_barrier);
/* Loop packets */
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
odp_event_t ev_tbl[MAX_PKT_BURST];
odp_packet_t pkt_tbl[MAX_PKT_BURST];
int sent;
@@ -453,7 +453,7 @@ static int run_worker_plain_queue_mode(void *arg)
odp_barrier_wait(&gbl_args->init_barrier);
/* Loop packets */
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
int sent;
unsigned tx_drops;
odp_event_t event[MAX_PKT_BURST];
@@ -579,7 +579,7 @@ static int run_worker_direct_mode(void *arg)
odp_barrier_wait(&gbl_args->init_barrier);
/* Loop packets */
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
int sent;
unsigned tx_drops;
@@ -867,7 +867,8 @@ static int print_speed_stats(int num_workers, stats_t **thr_stats,
pkts_prev = pkts;
}
elapsed += timeout;
- } while (!exit_threads && (loop_forever || (elapsed < duration)));
+ } while (!gbl_args->exit_threads && (loop_forever ||
+ (elapsed < duration)));
if (stats_enabled)
printf("TEST RESULT: %" PRIu64 " maximum packets per second.\n",
@@ -1183,9 +1184,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:+t:+a:i:m:o:r:d:s:e:k:g:vh";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
appl_args->cpu_count = 1; /* use one worker by default */
@@ -1427,6 +1425,7 @@ static void create_groups(int num, odp_schedule_group_t *group)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
odp_pool_t pool;
int i;
@@ -1448,6 +1447,13 @@ int main(int argc, char *argv[])
odp_pool_capability_t pool_capa;
uint32_t pkt_len, pkt_num;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
odp_init_param_init(&init);
/* List features not to be used (may optimize performance) */
@@ -1457,6 +1463,8 @@ int main(int argc, char *argv[])
init.not_used.feat.timer = 1;
init.not_used.feat.tm = 1;
+ init.mem_model = helper_options.mem_model;
+
/* Signal handler has to be registered before global init in case ODP
* implementation creates internal threads/processes. */
signal(SIGINT, sig_handler);
@@ -1560,6 +1568,8 @@ int main(int argc, char *argv[])
bind_workers();
+ odp_schedule_config(NULL);
+
/* Default */
if (num_groups == 0) {
group[0] = ODP_SCHED_GROUP_ALL;
@@ -1682,7 +1692,7 @@ int main(int argc, char *argv[])
}
}
- exit_threads = 1;
+ gbl_args->exit_threads = 1;
if (gbl_args->appl.in_mode != DIRECT_RECV)
odp_barrier_wait(&gbl_args->term_barrier);
diff --git a/test/performance/odp_l2fwd_run.sh b/test/performance/odp_l2fwd_run.sh
index 6166c8b27..5745d3279 100755
--- a/test/performance/odp_l2fwd_run.sh
+++ b/test/performance/odp_l2fwd_run.sh
@@ -88,7 +88,7 @@ run_l2fwd()
$STDBUF odp_l2fwd${EXEEXT} -i $IF1,$IF2 -m 0 -t 30 -c 2 | tee $LOG
ret=$?
- kill ${GEN_PID}
+ kill -2 ${GEN_PID}
if [ ! -f $LOG ]; then
echo "FAIL: $LOG not found"
diff --git a/test/performance/odp_pktio_ordered.c b/test/performance/odp_pktio_ordered.c
index e884d38ab..15229aeba 100644
--- a/test/performance/odp_pktio_ordered.c
+++ b/test/performance/odp_pktio_ordered.c
@@ -148,8 +148,6 @@ typedef struct {
char *if_str; /**< Storage for interface names */
} appl_args_t;
-static int exit_threads; /**< Break workers loop if set to 1 */
-
/**
* Queue context
*/
@@ -250,14 +248,15 @@ typedef struct {
int num_rx_queue;
int num_tx_queue;
} pktios[MAX_PKTIOS];
+ /** Global barrier to synchronize main and workers */
+ odp_barrier_t barrier;
+ /** Break workers loop if set to 1 */
+ int exit_threads;
} args_t;
/** Global pointer to args */
static args_t *gbl_args;
-/** Global barrier to synchronize main and workers */
-static odp_barrier_t barrier;
-
/**
* Lookup the destination port for a given packet
*
@@ -537,10 +536,10 @@ static int run_worker(void *arg)
gbl_args->pktios[i].num_tx_queue];
}
}
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
/* Loop packets */
- while (!exit_threads) {
+ while (!gbl_args->exit_threads) {
pkts = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT, ev_tbl,
MAX_PKT_BURST);
if (pkts <= 0)
@@ -732,7 +731,7 @@ static int print_speed_stats(int num_workers, stats_t *thr_stats,
timeout = 1;
}
/* Wait for all threads to be ready*/
- odp_barrier_wait(&barrier);
+ odp_barrier_wait(&gbl_args->barrier);
do {
pkts = 0;
@@ -867,9 +866,6 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
static const char *shortopts = "+c:+t:+a:i:m:d:r:f:e:h";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = DEF_STATS_INT;
appl_args->cpu_count = 1; /* use one worker by default */
@@ -1061,12 +1057,15 @@ int main(int argc, char *argv[])
{
odp_cpumask_t cpumask;
odp_instance_t instance;
+ odp_init_t init_param;
odp_pool_t pool;
odp_pool_param_t params;
odp_shm_t shm;
- odp_queue_capability_t queue_capa;
+ odp_schedule_capability_t schedule_capa;
+ odp_schedule_config_t schedule_config;
odp_pool_capability_t pool_capa;
odph_ethaddr_t new_addr;
+ odph_helper_options_t helper_options;
odph_odpthread_t thread_tbl[MAX_WORKERS];
stats_t *stats;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
@@ -1078,8 +1077,18 @@ int main(int argc, char *argv[])
int in_mode;
uint32_t queue_size, pool_size;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
LOG_ERR("Error: ODP global init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1090,9 +1099,9 @@ int main(int argc, char *argv[])
exit(EXIT_FAILURE);
}
- if (odp_queue_capability(&queue_capa)) {
- LOG_ERR("Error: Queue capa failed\n");
- exit(EXIT_FAILURE);
+ if (odp_schedule_capability(&schedule_capa)) {
+ printf("Error: Schedule capa failed.\n");
+ return -1;
}
if (odp_pool_capability(&pool_capa)) {
@@ -1121,9 +1130,11 @@ int main(int argc, char *argv[])
/* Parse and store the application arguments */
parse_args(argc, argv, &gbl_args->appl);
+ odp_schedule_config(NULL);
+
if (gbl_args->appl.in_mode == SCHED_ORDERED) {
/* At least one ordered lock required */
- if (queue_capa.max_ordered_locks < 1) {
+ if (schedule_capa.max_ordered_locks < 1) {
LOG_ERR("Error: Ordered locks not available.\n");
exit(EXIT_FAILURE);
}
@@ -1150,9 +1161,9 @@ int main(int argc, char *argv[])
pool_size = pool_capa.pkt.max_num;
queue_size = MAX_NUM_PKT;
- if (queue_capa.sched.max_size &&
- queue_capa.sched.max_size < MAX_NUM_PKT)
- queue_size = queue_capa.sched.max_size;
+ if (schedule_config.queue_size &&
+ schedule_config.queue_size < MAX_NUM_PKT)
+ queue_size = schedule_config.queue_size;
/* Pool should not be larger than queue, otherwise queue enqueues at
* packet input may fail. */
@@ -1272,7 +1283,7 @@ int main(int argc, char *argv[])
stats = gbl_args->stats;
- odp_barrier_init(&barrier, num_workers + 1);
+ odp_barrier_init(&gbl_args->barrier, num_workers + 1);
/* Create worker threads */
cpu = odp_cpumask_first(&cpumask);
@@ -1315,7 +1326,7 @@ int main(int argc, char *argv[])
for (i = 0; i < if_count; i++)
odp_pktio_stop(gbl_args->pktios[i].pktio);
- exit_threads = 1;
+ gbl_args->exit_threads = 1;
/* Master thread waits for other threads to exit */
for (i = 0; i < num_workers; ++i)
diff --git a/test/performance/odp_pktio_ordered_run.sh b/test/performance/odp_pktio_ordered_run.sh
index d7f238120..295b8803b 100755
--- a/test/performance/odp_pktio_ordered_run.sh
+++ b/test/performance/odp_pktio_ordered_run.sh
@@ -31,8 +31,7 @@ fi
$STDBUF ${TEST_DIR}/odp_pktio_ordered${EXEEXT} \
-i pcap:in=${PCAP_IN}:loops=$LOOPS,pcap:out=${PCAP_OUT} \
-t $DURATION | tee $LOG
-
-ret=${PIPESTATUS[0]}
+ret=$?
if [ $ret -ne 0 ]; then
echo "FAIL: no odp_pktio_ordered${EXEEXT}"
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 7ddf82503..2ed2c3529 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -125,12 +125,18 @@ typedef struct {
odp_barrier_t tx_barrier;
odp_pktio_t pktio_tx;
odp_pktio_t pktio_rx;
+ /* Pool from which transmitted packets are allocated */
+ odp_pool_t transmit_pkt_pool;
pkt_rx_stats_t *rx_stats;
pkt_tx_stats_t *tx_stats;
uint8_t src_mac[ODPH_ETHADDR_LEN];
uint8_t dst_mac[ODPH_ETHADDR_LEN];
uint32_t rx_stats_size;
uint32_t tx_stats_size;
+ /* Indicate to the receivers to shutdown */
+ odp_atomic_u32_t shutdown;
+ /* Sequence number of IP packets */
+ odp_atomic_u32_t ODP_ALIGNED_CACHE ip_seq;
} test_globals_t;
/* Status of max rate search */
@@ -152,15 +158,6 @@ typedef struct {
odp_u32be_t magic; /* Packet header magic number */
} pkt_head_t;
-/* Pool from which transmitted packets are allocated */
-static odp_pool_t transmit_pkt_pool = ODP_POOL_INVALID;
-
-/* Sequence number of IP packets */
-static odp_atomic_u32_t ip_seq;
-
-/* Indicate to the receivers to shutdown */
-static odp_atomic_u32_t shutdown;
-
/* Application global data */
static test_globals_t *gbl_args;
@@ -180,7 +177,7 @@ static odp_packet_t pktio_create_packet(uint32_t seq)
payload_len = sizeof(pkt_hdr) + gbl_args->args.pkt_len;
- pkt = odp_packet_alloc(transmit_pkt_pool,
+ pkt = odp_packet_alloc(gbl_args->transmit_pkt_pool,
payload_len + ODPH_UDPHDR_LEN +
ODPH_IPV4HDR_LEN + ODPH_ETHHDR_LEN);
@@ -261,7 +258,7 @@ static int alloc_packets(odp_packet_t *pkt_tbl, int num_pkts)
int n;
uint16_t seq;
- seq = odp_atomic_fetch_add_u32(&ip_seq, num_pkts);
+ seq = odp_atomic_fetch_add_u32(&gbl_args->ip_seq, num_pkts);
for (n = 0; n < num_pkts; ++n) {
pkt_tbl[n] = pktio_create_packet(seq + n);
if (pkt_tbl[n] == ODP_PACKET_INVALID)
@@ -453,7 +450,7 @@ static int run_thread_rx(void *arg)
}
odp_event_free(ev[i]);
}
- if (n_ev == 0 && odp_atomic_load_u32(&shutdown))
+ if (n_ev == 0 && odp_atomic_load_u32(&gbl_args->shutdown))
break;
}
@@ -618,7 +615,7 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
thr_params.thr_type = ODP_THREAD_WORKER;
thr_params.instance = gbl_args->instance;
- odp_atomic_store_u32(&shutdown, 0);
+ odp_atomic_store_u32(&gbl_args->shutdown, 0);
memset(thd_tbl, 0, sizeof(thd_tbl));
memset(gbl_args->rx_stats, 0, gbl_args->rx_stats_size);
@@ -652,7 +649,7 @@ static int run_test_single(odp_cpumask_t *thd_mask_tx,
odp_time_wait_ns(SHUTDOWN_DELAY_NS);
/* indicate to the receivers to exit */
- odp_atomic_store_u32(&shutdown, 1);
+ odp_atomic_store_u32(&gbl_args->shutdown, 1);
/* wait for receivers */
odph_odpthreads_join(&thd_tbl[0]);
@@ -751,16 +748,20 @@ static int test_init(void)
params.pkt.num = PKT_BUF_NUM;
params.type = ODP_POOL_PACKET;
- transmit_pkt_pool = odp_pool_create("pkt_pool_transmit", &params);
- if (transmit_pkt_pool == ODP_POOL_INVALID)
+ gbl_args->transmit_pkt_pool = odp_pool_create("pkt_pool_transmit",
+ &params);
+ if (gbl_args->transmit_pkt_pool == ODP_POOL_INVALID)
LOG_ABORT("Failed to create transmit pool\n");
- odp_atomic_init_u32(&ip_seq, 0);
- odp_atomic_init_u32(&shutdown, 0);
+ odp_atomic_init_u32(&gbl_args->ip_seq, 0);
+ odp_atomic_init_u32(&gbl_args->shutdown, 0);
iface = gbl_args->args.ifaces[0];
schedule = gbl_args->args.schedule;
+ if (schedule)
+ odp_schedule_config(NULL);
+
/* create pktios and associate input/output queues */
gbl_args->pktio_tx = create_pktio(iface, schedule);
if (gbl_args->args.num_ifaces > 1) {
@@ -892,7 +893,7 @@ static int test_term(void)
}
}
- if (odp_pool_destroy(transmit_pkt_pool) != 0) {
+ if (odp_pool_destroy(gbl_args->transmit_pkt_pool) != 0) {
LOG_ERR("Failed to destroy transmit pool\n");
ret = -1;
}
@@ -959,9 +960,6 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const char *shortopts = "+c:t:b:pR:l:r:i:d:vh";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
args->cpu_count = 2;
args->num_tx_workers = 0; /* defaults to cpu_count+1/2 */
args->tx_batch_len = BATCH_LEN_MAX;
@@ -1042,9 +1040,21 @@ int main(int argc, char **argv)
int ret;
odp_shm_t shm;
int max_thrs;
+ odph_helper_options_t helper_options;
odp_instance_t instance;
+ odp_init_t init_param;
+
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (odp_init_global(&instance, NULL, NULL) != 0)
+ if (odp_init_global(&instance, &init_param, NULL) != 0)
LOG_ABORT("Failed global init.\n");
if (odp_init_local(instance, ODP_THREAD_CONTROL) != 0)
diff --git a/test/performance/odp_pool_perf.c b/test/performance/odp_pool_perf.c
index 4a77f3271..d771e53e5 100644
--- a/test/performance/odp_pool_perf.c
+++ b/test/performance/odp_pool_perf.c
@@ -128,7 +128,7 @@ static int set_num_cpu(test_global_t *global)
ret = odp_cpumask_default_worker(&global->cpumask, num_cpu);
if (num_cpu && ret != num_cpu) {
- printf("Error: Too many workers. Max supported %i\n.", ret);
+ printf("Error: Too many workers. Max supported %i.\n", ret);
return -1;
}
diff --git a/test/performance/odp_queue_perf.c b/test/performance/odp_queue_perf.c
index d5ff254db..80d0f1548 100644
--- a/test/performance/odp_queue_perf.c
+++ b/test/performance/odp_queue_perf.c
@@ -12,16 +12,44 @@
#include <getopt.h>
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define MAX_QUEUES (32 * 1024)
typedef struct test_options_t {
uint32_t num_queue;
uint32_t num_event;
uint32_t num_round;
+ uint32_t max_burst;
odp_nonblocking_t nonblock;
int single;
+ int num_cpu;
} test_options_t;
+typedef struct test_stat_t {
+ uint64_t rounds;
+ uint64_t events;
+ uint64_t nsec;
+ uint64_t cycles;
+ uint64_t deq_retry;
+
+} test_stat_t;
+
+typedef struct test_global_t {
+ odp_barrier_t barrier;
+ test_options_t options;
+ odp_instance_t instance;
+ odp_shm_t shm;
+ odp_pool_t pool;
+ odp_queue_t queue[MAX_QUEUES];
+ odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ test_stat_t stat[ODP_THREAD_COUNT_MAX];
+
+} test_global_t;
+
+static test_global_t test_global;
+
static void print_usage(void)
{
printf("\n"
@@ -29,8 +57,10 @@ static void print_usage(void)
"\n"
"Usage: odp_queue_perf [options]\n"
"\n"
- " -q, --num_queue Number of queues\n"
- " -e, --num_event Number of events per queue\n"
+ " -c, --num_cpu Number of worker threads. Default: 1\n"
+ " -q, --num_queue Number of queues. Default: 1\n"
+ " -e, --num_event Number of events per queue. Default: 1\n"
+ " -b, --burst_size Maximum number of events per operation. Default: 1\n"
" -r, --num_round Number of rounds\n"
" -l, --lockfree Lockfree queues\n"
" -w, --waitfree Waitfree queues\n"
@@ -46,20 +76,24 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
int ret = 0;
static const struct option longopts[] = {
- {"num_queue", required_argument, NULL, 'q'},
- {"num_event", required_argument, NULL, 'e'},
- {"num_round", required_argument, NULL, 'r'},
- {"lockfree", no_argument, NULL, 'l'},
- {"waitfree", no_argument, NULL, 'w'},
- {"single", no_argument, NULL, 's'},
- {"help", no_argument, NULL, 'h'},
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"num_event", required_argument, NULL, 'e'},
+ {"burst_size", required_argument, NULL, 'b'},
+ {"num_round", required_argument, NULL, 'r'},
+ {"lockfree", no_argument, NULL, 'l'},
+ {"waitfree", no_argument, NULL, 'w'},
+ {"single", no_argument, NULL, 's'},
+ {"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+q:e:r:lwsh";
+ static const char *shortopts = "+c:q:e:b:r:lwsh";
+ test_options->num_cpu = 1;
test_options->num_queue = 1;
test_options->num_event = 1;
+ test_options->max_burst = 1;
test_options->num_round = 1000;
test_options->nonblock = ODP_BLOCKING;
test_options->single = 0;
@@ -71,12 +105,18 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
break;
switch (opt) {
+ case 'c':
+ test_options->num_cpu = atoi(optarg);
+ break;
case 'q':
test_options->num_queue = atoi(optarg);
break;
case 'e':
test_options->num_event = atoi(optarg);
break;
+ case 'b':
+ test_options->max_burst = atoi(optarg);
+ break;
case 'r':
test_options->num_round = atoi(optarg);
break;
@@ -98,27 +138,31 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
}
}
+ if (test_options->num_queue > MAX_QUEUES) {
+ printf("Too many queues %u. Test maximum %u.\n",
+ test_options->num_queue, MAX_QUEUES);
+ return -1;
+ }
+
return ret;
}
-static int test_queue(test_options_t *test_options)
+static int create_queues(test_global_t *global)
{
odp_pool_capability_t pool_capa;
odp_queue_capability_t queue_capa;
odp_pool_param_t pool_param;
odp_queue_param_t queue_param;
odp_pool_t pool;
- odp_event_t ev;
- uint32_t i, j, rounds;
- uint64_t c1, c2, diff, ops, nsec;
- odp_time_t t1, t2;
- uint64_t num_retry = 0;
+ uint32_t i, j, max_size;
+ test_options_t *test_options = &global->options;
odp_nonblocking_t nonblock = test_options->nonblock;
uint32_t num_queue = test_options->num_queue;
uint32_t num_event = test_options->num_event;
uint32_t num_round = test_options->num_round;
uint32_t tot_event = num_queue * num_event;
- odp_queue_t queue[num_queue];
+ int ret = 0;
+ odp_queue_t *queue = global->queue;
odp_event_t event[tot_event];
printf("\nTesting %s queues\n",
@@ -127,7 +171,8 @@ static int test_queue(test_options_t *test_options)
(nonblock == ODP_NONBLOCKING_WF ? "WAITFREE" : "???")));
printf(" num rounds %u\n", num_round);
printf(" num queues %u\n", num_queue);
- printf(" num events per queue %u\n\n", num_event);
+ printf(" num events per queue %u\n", num_event);
+ printf(" max burst size %u\n", test_options->max_burst);
for (i = 0; i < num_queue; i++)
queue[i] = ODP_QUEUE_INVALID;
@@ -152,15 +197,15 @@ static int test_queue(test_options_t *test_options)
return -1;
}
- if (num_event > queue_capa.plain.max_size) {
- printf("Max queue size supported %u\n",
- queue_capa.plain.max_size);
+ max_size = queue_capa.plain.max_size;
+ if (max_size && num_event > max_size) {
+ printf("Max queue size supported %u\n", max_size);
return -1;
}
} else if (nonblock == ODP_NONBLOCKING_LF) {
if (queue_capa.plain.lockfree.max_num == 0) {
printf("Lockfree queues not supported\n");
- return 0;
+ return -1;
}
if (num_queue > queue_capa.plain.lockfree.max_num) {
@@ -169,15 +214,16 @@ static int test_queue(test_options_t *test_options)
return -1;
}
- if (num_event > queue_capa.plain.lockfree.max_size) {
+ max_size = queue_capa.plain.lockfree.max_size;
+ if (max_size && num_event > max_size) {
printf("Max lockfree queue size supported %u\n",
- queue_capa.plain.lockfree.max_size);
+ max_size);
return -1;
}
} else if (nonblock == ODP_NONBLOCKING_WF) {
if (queue_capa.plain.waitfree.max_num == 0) {
printf("Waitfree queues not supported\n");
- return 0;
+ return -1;
}
if (num_queue > queue_capa.plain.waitfree.max_num) {
@@ -186,9 +232,10 @@ static int test_queue(test_options_t *test_options)
return -1;
}
- if (num_event > queue_capa.plain.waitfree.max_size) {
+ max_size = queue_capa.plain.waitfree.max_size;
+ if (max_size && num_event > max_size) {
printf("Max waitfree queue size supported %u\n",
- queue_capa.plain.waitfree.max_size);
+ max_size);
return -1;
}
} else {
@@ -212,6 +259,8 @@ static int test_queue(test_options_t *test_options)
return -1;
}
+ global->pool = pool;
+
odp_queue_param_init(&queue_param);
queue_param.type = ODP_QUEUE_TYPE_PLAIN;
queue_param.nonblocking = nonblock;
@@ -227,7 +276,7 @@ static int test_queue(test_options_t *test_options)
if (queue[i] == ODP_QUEUE_INVALID) {
printf("Error: Queue create failed %u.\n", i);
- goto error;
+ return -1;
}
}
@@ -236,7 +285,8 @@ static int test_queue(test_options_t *test_options)
if (event[i] == ODP_EVENT_INVALID) {
printf("Error: Event alloc failed %u.\n", i);
- goto error;
+ ret = -1;
+ goto free_events;
}
}
@@ -246,98 +296,239 @@ static int test_queue(test_options_t *test_options)
if (odp_queue_enq(queue[i], event[id])) {
printf("Error: Queue enq failed %u/%u\n", i, j);
- goto error;
+ ret = -1;
+ goto free_events;
}
event[id] = ODP_EVENT_INVALID;
}
}
+free_events:
+ /* Free events that were not stored into queues */
+ for (i = 0; i < tot_event; i++) {
+ if (event[i] != ODP_EVENT_INVALID)
+ odp_event_free(event[i]);
+ }
+
+ return ret;
+}
+
+static int destroy_queues(test_global_t *global)
+{
+ odp_event_t ev;
+ uint32_t i, j;
+ int ret = 0;
+ test_options_t *test_options = &global->options;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_event = test_options->num_event;
+ odp_queue_t *queue = global->queue;
+ odp_pool_t pool = global->pool;
+
+ for (i = 0; i < num_queue; i++) {
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ printf("Error: Invalid queue handle (i: %u).\n", i);
+ break;
+ }
+
+ for (j = 0; j < num_event; j++) {
+ ev = odp_queue_deq(queue[i]);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ if (odp_queue_destroy(queue[i])) {
+ printf("Error: Queue destroy failed %u.\n", i);
+ ret = -1;
+ break;
+ }
+ }
+
+ if (pool != ODP_POOL_INVALID && odp_pool_destroy(pool)) {
+ printf("Error: Pool destroy failed.\n");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int run_test(void *arg)
+{
+ uint64_t c1, c2, cycles, nsec;
+ odp_time_t t1, t2;
+ uint32_t rounds;
+ int num_ev;
+ test_stat_t *stat;
+ test_global_t *global = arg;
+ test_options_t *test_options = &global->options;
+ odp_queue_t queue;
+ uint64_t num_retry = 0;
+ uint64_t events = 0;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_round = test_options->num_round;
+ int thr = odp_thread_id();
+ int ret = 0;
+ uint32_t i = 0;
+ uint32_t max_burst = test_options->max_burst;
+ odp_event_t ev[max_burst];
+
+ stat = &global->stat[thr];
+
+ /* Start all workers at the same time */
+ odp_barrier_wait(&global->barrier);
+
t1 = odp_time_local();
c1 = odp_cpu_cycles();
for (rounds = 0; rounds < num_round; rounds++) {
- int retry = 0;
+ do {
+ queue = global->queue[i++];
- for (i = 0; i < num_queue; i++) {
- ev = odp_queue_deq(queue[i]);
+ if (i == num_queue)
+ i = 0;
- if (ev == ODP_EVENT_INVALID) {
- if (retry < 5) {
- retry++;
- num_retry++;
- continue;
- }
+ num_ev = odp_queue_deq_multi(queue, ev, max_burst);
- printf("Error: Queue deq failed %u\n", i);
- goto error;
- }
+ if (odp_unlikely(num_ev <= 0))
+ num_retry++;
- retry = 0;
+ } while (num_ev <= 0);
- if (odp_queue_enq(queue[i], ev)) {
- printf("Error: Queue enq failed %u\n", i);
- goto error;
- }
+ if (odp_queue_enq_multi(queue, ev, num_ev) != num_ev) {
+ printf("Error: Queue enq failed %u\n", i);
+ ret = -1;
+ goto error;
}
+
+ events += num_ev;
}
c2 = odp_cpu_cycles();
t2 = odp_time_local();
- nsec = odp_time_diff_ns(t2, t1);
- diff = odp_cpu_cycles_diff(c2, c1);
- ops = num_round * num_queue;
+ nsec = odp_time_diff_ns(t2, t1);
+ cycles = odp_cpu_cycles_diff(c2, c1);
- printf("RESULT:\n");
- printf(" num deq + enq operations: %" PRIu64 "\n", ops);
- printf(" duration (nsec): %" PRIu64 "\n", nsec);
- printf(" num cycles: %" PRIu64 "\n", diff);
- printf(" cycles per deq + enq: %.3f\n", (double)diff / ops);
- printf(" num retries: %" PRIu64 "\n\n", num_retry);
+ stat->rounds = rounds;
+ stat->events = events;
+ stat->nsec = nsec;
+ stat->cycles = cycles;
+ stat->deq_retry = num_retry;
error:
+ return ret;
+}
- for (i = 0; i < num_queue; i++) {
- for (j = 0; j < num_event; j++) {
- ev = odp_queue_deq(queue[i]);
-
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
- }
+static int start_workers(test_global_t *global)
+{
+ odph_odpthread_params_t thr_params;
+ odp_cpumask_t cpumask;
+ int ret;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+
+ memset(&thr_params, 0, sizeof(thr_params));
+ thr_params.thr_type = ODP_THREAD_WORKER;
+ thr_params.instance = global->instance;
+ thr_params.start = run_test;
+ thr_params.arg = global;
+
+ ret = odp_cpumask_default_worker(&cpumask, num_cpu);
+
+ if (num_cpu && ret != num_cpu) {
+ printf("Error: Too many workers. Max supported %i\n.", ret);
+ return -1;
}
- for (i = 0; i < tot_event; i++) {
- if (event[i] != ODP_EVENT_INVALID)
- odp_event_free(event[i]);
+ /* Zero: all available workers */
+ if (num_cpu == 0) {
+ num_cpu = ret;
+ test_options->num_cpu = num_cpu;
}
- for (i = 0; i < num_queue; i++) {
- if (queue[i] == ODP_QUEUE_INVALID)
- break;
+ printf(" num workers %u\n\n", num_cpu);
- if (odp_queue_destroy(queue[i])) {
- printf("Error: Queue destroy failed %u.\n", i);
- break;
- }
- }
+ odp_barrier_init(&global->barrier, num_cpu);
- if (odp_pool_destroy(pool)) {
- printf("Error: Pool destroy failed.\n");
+ if (odph_odpthreads_create(global->thread_tbl, &cpumask, &thr_params)
+ != num_cpu)
return -1;
- }
return 0;
}
+static void print_stat(test_global_t *global)
+{
+ int i, num;
+ double rounds_ave, events_ave, nsec_ave, cycles_ave, retry_ave;
+ test_options_t *test_options = &global->options;
+ int num_cpu = test_options->num_cpu;
+ uint64_t rounds_sum = 0;
+ uint64_t events_sum = 0;
+ uint64_t nsec_sum = 0;
+ uint64_t cycles_sum = 0;
+ uint64_t retry_sum = 0;
+
+ /* Averages */
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ rounds_sum += global->stat[i].rounds;
+ events_sum += global->stat[i].events;
+ nsec_sum += global->stat[i].nsec;
+ cycles_sum += global->stat[i].cycles;
+ retry_sum += global->stat[i].deq_retry;
+ }
+
+ if (rounds_sum == 0) {
+ printf("No results.\n");
+ return;
+ }
+
+ rounds_ave = rounds_sum / num_cpu;
+ events_ave = events_sum / num_cpu;
+ nsec_ave = nsec_sum / num_cpu;
+ cycles_ave = cycles_sum / num_cpu;
+ retry_ave = retry_sum / num_cpu;
+ num = 0;
+
+ printf("RESULTS - per thread (Million events per sec):\n");
+ printf("----------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (1000.0 * global->stat[i].events) /
+ global->stat[i].nsec);
+ num++;
+ }
+ }
+ printf("\n\n");
+
+ printf("RESULTS - per thread average (%i threads):\n", num_cpu);
+ printf("------------------------------------------\n");
+ printf(" duration: %.3f msec\n", nsec_ave / 1000000);
+ printf(" num cycles: %.3f M\n", cycles_ave / 1000000);
+ printf(" evenst per dequeue: %.3f\n",
+ events_ave / rounds_ave);
+ printf(" cycles per event: %.3f\n",
+ cycles_ave / events_ave);
+ printf(" deq retries per sec: %.3f k\n",
+ (1000000.0 * retry_ave) / nsec_ave);
+ printf(" events per sec: %.3f M\n\n",
+ (1000.0 * events_ave) / nsec_ave);
+
+ printf("TOTAL events per sec: %.3f M\n\n",
+ (1000.0 * events_sum) / nsec_ave);
+}
+
int main(int argc, char **argv)
{
odp_instance_t instance;
odp_init_t init;
- test_options_t test_options;
-
- if (parse_options(argc, argv, &test_options))
- return -1;
+ test_global_t *global;
/* List features not to be used */
odp_init_param_init(&init);
@@ -360,8 +551,34 @@ int main(int argc, char **argv)
return -1;
}
- if (test_queue(&test_options))
- printf("Error: Queue test failed.\n");
+ global = &test_global;
+ memset(global, 0, sizeof(test_global_t));
+
+ if (parse_options(argc, argv, &global->options))
+ return -1;
+
+ global->instance = instance;
+
+ if (create_queues(global)) {
+ printf("Error: Create queues failed.\n");
+ goto destroy;
+ }
+
+ if (start_workers(global)) {
+ printf("Error: Test start failed.\n");
+ return -1;
+ }
+
+ /* Wait workers to exit */
+ odph_odpthreads_join(global->thread_tbl);
+
+ print_stat(global);
+
+destroy:
+ if (destroy_queues(global)) {
+ printf("Error: Destroy queues failed.\n");
+ return -1;
+ }
if (odp_term_local()) {
printf("Error: term local failed.\n");
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index 64a219835..b5be1a163 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -549,9 +549,6 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const char *shortopts = "+c:s:l:t:m:n:o:p:rh";
- /* Let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
args->cpu_count = 1;
args->sync_type = ODP_SCHED_SYNC_PARALLEL;
args->sample_per_prio = SAMPLE_EVENT_PER_PRIO;
@@ -637,26 +634,40 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
int main(int argc, char *argv[])
{
odp_instance_t instance;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
odph_odpthread_t *thread_tbl;
odph_odpthread_params_t thr_params;
odp_cpumask_t cpumask;
odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
odp_pool_param_t params;
odp_shm_t shm;
test_globals_t *globals;
test_args_t args;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ uint32_t pool_size;
int i, j;
int ret = 0;
int num_workers = 0;
printf("\nODP scheduling latency benchmark starts\n\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&args, 0, sizeof(args));
parse_args(argc, argv, &args);
/* ODP global init */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
LOG_ERR("ODP global init failed.\n");
return -1;
}
@@ -703,13 +714,24 @@ int main(int argc, char *argv[])
memset(globals, 0, sizeof(test_globals_t));
memcpy(&globals->args, &args, sizeof(test_args_t));
+ odp_schedule_config(NULL);
+
/*
* Create event pool
*/
+ if (odp_pool_capability(&pool_capa)) {
+ LOG_ERR("pool capa failed\n");
+ return -1;
+ }
+
+ pool_size = EVENT_POOL_SIZE;
+ if (pool_capa.buf.max_num && pool_capa.buf.max_num < EVENT_POOL_SIZE)
+ pool_size = pool_capa.buf.max_num;
+
odp_pool_param_init(&params);
params.buf.size = sizeof(test_event_t);
params.buf.align = 0;
- params.buf.num = EVENT_POOL_SIZE;
+ params.buf.num = pool_size;
params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("event_pool", &params);
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index e76725cc0..c301263ef 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -14,12 +14,20 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
+#define MAX_QUEUES (256 * 1024)
+
typedef struct test_options_t {
uint32_t num_cpu;
+ uint32_t num_queue;
+ uint32_t num_dummy;
uint32_t num_event;
uint32_t num_round;
uint32_t max_burst;
int queue_type;
+ int forward;
+ uint32_t queue_size;
+ uint32_t tot_queue;
+ uint32_t tot_event;
} test_options_t;
@@ -35,10 +43,11 @@ typedef struct test_stat_t {
typedef struct test_global_t {
test_options_t test_options;
+ odp_schedule_config_t schedule_config;
odp_barrier_t barrier;
odp_pool_t pool;
odp_cpumask_t cpumask;
- odp_queue_t queue[ODP_THREAD_COUNT_MAX];
+ odp_queue_t queue[MAX_QUEUES];
odph_odpthread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
@@ -53,11 +62,14 @@ static void print_usage(void)
"\n"
"Usage: odp_sched_perf [options]\n"
"\n"
- " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default 1.\n"
- " -e, --num_event Number of events per queue\n"
+ " -c, --num_cpu Number of CPUs (worker threads). 0: all available CPUs. Default: 1.\n"
+ " -q, --num_queue Number of queues. Default: 1.\n"
+ " -d, --num_dummy Number of empty queues. Default: 0.\n"
+ " -e, --num_event Number of events per queue. Default: 100.\n"
" -r, --num_round Number of rounds\n"
- " -b, --burst Maximum number of events per operation\n"
- " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default 0.\n"
+ " -b, --burst Maximum number of events per operation. Default: 100.\n"
+ " -t, --type Queue type. 0: parallel, 1: atomic, 2: ordered. Default: 0.\n"
+ " -f, --forward 0: Keep event in the original queue, 1: Forward event to the next queue. Default: 0.\n"
" -h, --help This help\n"
"\n");
}
@@ -70,21 +82,27 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
static const struct option longopts[] = {
{"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"num_dummy", required_argument, NULL, 'd'},
{"num_event", required_argument, NULL, 'e'},
{"num_round", required_argument, NULL, 'r'},
{"burst", required_argument, NULL, 'b'},
{"type", required_argument, NULL, 't'},
+ {"forward", required_argument, NULL, 'f'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:e:r:b:t:h";
+ static const char *shortopts = "+c:q:d:e:r:b:t:f:h";
test_options->num_cpu = 1;
+ test_options->num_queue = 1;
+ test_options->num_dummy = 0;
test_options->num_event = 100;
test_options->num_round = 100000;
test_options->max_burst = 100;
test_options->queue_type = 0;
+ test_options->forward = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -96,6 +114,12 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'c':
test_options->num_cpu = atoi(optarg);
break;
+ case 'q':
+ test_options->num_queue = atoi(optarg);
+ break;
+ case 'd':
+ test_options->num_dummy = atoi(optarg);
+ break;
case 'e':
test_options->num_event = atoi(optarg);
break;
@@ -108,6 +132,9 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 't':
test_options->queue_type = atoi(optarg);
break;
+ case 'f':
+ test_options->forward = atoi(optarg);
+ break;
case 'h':
/* fall through */
default:
@@ -117,6 +144,23 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
}
}
+ if ((test_options->num_queue + test_options->num_dummy) > MAX_QUEUES) {
+ printf("Error: Too many queues. Max supported %i\n.",
+ MAX_QUEUES);
+ ret = -1;
+ }
+
+ test_options->tot_queue = test_options->num_queue +
+ test_options->num_dummy;
+ test_options->tot_event = test_options->num_queue *
+ test_options->num_event;
+
+ test_options->queue_size = test_options->num_event;
+
+ /* When forwarding, all events may end up into a single queue. */
+ if (test_options->forward)
+ test_options->queue_size = test_options->tot_event;
+
return ret;
}
@@ -157,18 +201,28 @@ static int create_pool(test_global_t *global)
odp_pool_param_t pool_param;
odp_pool_t pool;
test_options_t *test_options = &global->test_options;
+ uint32_t num_cpu = test_options->num_cpu;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t num_dummy = test_options->num_dummy;
uint32_t num_event = test_options->num_event;
uint32_t num_round = test_options->num_round;
uint32_t max_burst = test_options->max_burst;
- int num_cpu = test_options->num_cpu;
- uint32_t tot_event = num_event * num_cpu;
+ uint32_t tot_queue = test_options->tot_queue;
+ uint32_t tot_event = test_options->tot_event;
+ uint32_t queue_size = test_options->queue_size;
+ int forward = test_options->forward;
printf("\nScheduler performance test\n");
- printf(" num cpu %i\n", num_cpu);
- printf(" num rounds %u\n", num_round);
- printf(" num events %u\n", tot_event);
+ printf(" num cpu %u\n", num_cpu);
+ printf(" num queues %u\n", num_queue);
+ printf(" num empty queues %u\n", num_dummy);
+ printf(" total queues %u\n", tot_queue);
printf(" events per queue %u\n", num_event);
- printf(" max burst %u\n", max_burst);
+ printf(" queue size %u\n", queue_size);
+ printf(" max burst size %u\n", max_burst);
+ printf(" total events %u\n", tot_event);
+ printf(" num rounds %u\n", num_round);
+ printf(" forward events %i\n", forward ? 1 : 0);
if (odp_pool_capability(&pool_capa)) {
printf("Error: Pool capa failed.\n");
@@ -198,7 +252,6 @@ static int create_pool(test_global_t *global)
static int create_queues(test_global_t *global)
{
- odp_queue_capability_t queue_capa;
odp_queue_param_t queue_param;
odp_queue_t queue;
odp_buffer_t buf;
@@ -207,7 +260,9 @@ static int create_queues(test_global_t *global)
uint32_t i, j;
test_options_t *test_options = &global->test_options;
uint32_t num_event = test_options->num_event;
- uint32_t num_queue = test_options->num_cpu;
+ uint32_t queue_size = test_options->queue_size;
+ uint32_t num_queue = test_options->num_queue;
+ uint32_t tot_queue = test_options->tot_queue;
int type = test_options->queue_type;
odp_pool_t pool = global->pool;
@@ -222,49 +277,56 @@ static int create_queues(test_global_t *global)
sync = ODP_SCHED_SYNC_ORDERED;
}
- printf(" num queues %u\n", num_queue);
printf(" queue type %s\n\n", type_str);
- if (odp_queue_capability(&queue_capa)) {
- printf("Error: Queue capa failed.\n");
+ if (tot_queue > global->schedule_config.num_queues) {
+ printf("Max queues supported %u\n",
+ global->schedule_config.num_queues);
return -1;
}
- if (num_queue > queue_capa.sched.max_num) {
- printf("Max queues supported %u\n", queue_capa.sched.max_num);
+ if (global->schedule_config.queue_size &&
+ queue_size > global->schedule_config.queue_size) {
+ printf("Max queue size %u\n",
+ global->schedule_config.queue_size);
return -1;
}
- if (queue_capa.sched.max_size &&
- num_event > queue_capa.sched.max_size) {
- printf("Max events per queue %u\n", queue_capa.sched.max_size);
- return -1;
- }
-
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
- global->queue[i] = ODP_QUEUE_INVALID;
-
odp_queue_param_init(&queue_param);
queue_param.type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue_param.sched.sync = sync;
queue_param.sched.group = ODP_SCHED_GROUP_ALL;
- queue_param.size = num_event;
+ queue_param.size = queue_size;
- for (i = 0; i < num_queue; i++) {
+ for (i = 0; i < tot_queue; i++) {
queue = odp_queue_create(NULL, &queue_param);
+ global->queue[i] = queue;
+
if (queue == ODP_QUEUE_INVALID) {
printf("Error: Queue create failed %u\n", i);
return -1;
}
-
- global->queue[i] = queue;
}
+ /* Store events into queues. Dummy queues are left empty. */
for (i = 0; i < num_queue; i++) {
queue = global->queue[i];
+ if (test_options->forward) {
+ uint32_t next = i + 1;
+
+ if (next == num_queue)
+ next = 0;
+
+ if (odp_queue_context_set(queue, &global->queue[next],
+ sizeof(odp_queue_t))) {
+ printf("Error: Context set failed %u\n", i);
+ return -1;
+ }
+ }
+
for (j = 0; j < num_event; j++) {
buf = odp_buffer_alloc(pool);
@@ -288,13 +350,15 @@ static int destroy_queues(test_global_t *global)
uint32_t i;
odp_event_t ev;
uint64_t wait;
+ test_options_t *test_options = &global->test_options;
+ uint32_t tot_queue = test_options->tot_queue;
wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
while ((ev = odp_schedule(NULL, wait)) != ODP_EVENT_INVALID)
odp_event_free(ev);
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ for (i = 0; i < tot_queue; i++) {
if (global->queue[i] != ODP_QUEUE_INVALID) {
if (odp_queue_destroy(global->queue[i])) {
printf("Error: Queue destroy failed %u\n", i);
@@ -314,10 +378,12 @@ static int test_sched(void *arg)
uint64_t events, enqueues;
odp_time_t t1, t2;
odp_queue_t queue;
+ odp_queue_t *next;
test_global_t *global = arg;
test_options_t *test_options = &global->test_options;
uint32_t num_round = test_options->num_round;
uint32_t max_burst = test_options->max_burst;
+ int forward = test_options->forward;
odp_event_t ev[max_burst];
thr = odp_thread_id();
@@ -343,6 +409,11 @@ static int test_sched(void *arg)
events += num;
i = 0;
+ if (odp_unlikely(forward)) {
+ next = odp_queue_context(queue);
+ queue = *next;
+ }
+
while (num) {
num_enq = odp_queue_enq_multi(queue, &ev[i],
num);
@@ -395,6 +466,11 @@ static int test_sched(void *arg)
if (ev[0] == ODP_EVENT_INVALID)
break;
+ if (odp_unlikely(forward)) {
+ next = odp_queue_context(queue);
+ queue = *next;
+ }
+
odp_queue_enq(queue, ev[0]);
}
@@ -485,6 +561,9 @@ static void print_stat(test_global_t *global)
(1000.0 * rounds_ave) / nsec_ave);
printf(" events per sec: %.3f M\n\n",
(1000.0 * events_ave) / nsec_ave);
+
+ printf("TOTAL events per sec: %.3f M\n\n",
+ (1000.0 * events_sum) / nsec_ave);
}
int main(int argc, char **argv)
@@ -520,6 +599,9 @@ int main(int argc, char **argv)
return -1;
}
+ odp_schedule_config_init(&global->schedule_config);
+ odp_schedule_config(&global->schedule_config);
+
if (set_num_cpu(global))
return -1;
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index d81994cd9..393ea3521 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -19,19 +19,28 @@
#define MAX_PKTIOS 32
#define MAX_PKTIO_NAME 31
#define MAX_PKTIO_QUEUES MAX_WORKERS
+#define MAX_PIPE_STAGES 64
+#define MAX_PIPE_QUEUES 1024
#define MAX_PKT_LEN 1514
-#define MAX_PKT_NUM (16 * 1024)
+#define MAX_PKT_NUM (128 * 1024)
#define MIN_PKT_SEG_LEN 64
-#define BURST_SIZE 32
#define CHECK_PERIOD 10000
#define TEST_PASSED_LIMIT 5000
#define TIMEOUT_OFFSET_NS 1000000
+#define SCHED_MODE_PARAL 1
+#define SCHED_MODE_ATOMIC 2
+#define SCHED_MODE_ORDER 3
typedef struct test_options_t {
long int timeout_us;
+ int sched_mode;
int num_worker;
int num_pktio;
int num_pktio_queue;
+ int burst_size;
+ int pipe_stages;
+ int pipe_queues;
+ uint32_t pipe_queue_size;
uint8_t collect_stat;
char pktio_name[MAX_PKTIOS][MAX_PKTIO_NAME + 1];
@@ -45,16 +54,29 @@ typedef struct {
typedef struct ODP_ALIGNED_CACHE {
uint64_t rx_pkt;
uint64_t tx_pkt;
+ uint64_t pipe_pkt;
+ uint64_t tx_drop;
+ uint64_t pipe_drop;
uint64_t tmo;
} worker_stat_t;
-typedef struct queue_context_t {
+typedef struct pktin_queue_context_t {
+ /* Queue context must start with stage and idx */
+ uint16_t stage;
+ uint16_t queue_idx;
+
+ uint8_t dst_pktio;
+ uint8_t dst_queue;
+ uint8_t src_pktio;
+ uint8_t src_queue;
odp_pktout_queue_t dst_pktout;
- uint8_t dst_pktio;
- uint8_t dst_queue;
- uint8_t src_pktio;
- uint8_t src_queue;
-} queue_context_t;
+} pktin_queue_context_t;
+
+typedef struct pipe_queue_context_t {
+ /* Queue context must start with stage and idx. */
+ uint16_t stage;
+ uint16_t queue_idx;
+} pipe_queue_context_t;
typedef struct {
volatile int stop_workers;
@@ -79,7 +101,7 @@ typedef struct {
odph_ethaddr_t my_addr;
odp_queue_t input_queue[MAX_PKTIO_QUEUES];
odp_pktout_queue_t pktout[MAX_PKTIO_QUEUES];
- queue_context_t queue_context[MAX_PKTIO_QUEUES];
+ pktin_queue_context_t queue_context[MAX_PKTIO_QUEUES];
} pktio[MAX_PKTIOS];
@@ -91,12 +113,22 @@ typedef struct {
} timer;
+ struct {
+ odp_queue_t queue[MAX_PIPE_QUEUES];
+ } pipe_queue[MAX_PKTIOS][MAX_PIPE_STAGES];
+
+ struct {
+ pipe_queue_context_t ctx;
+ } pipe_queue_ctx[MAX_PIPE_STAGES][MAX_PIPE_QUEUES];
+
worker_arg_t worker_arg[MAX_WORKERS];
worker_stat_t worker_stat[MAX_WORKERS];
uint64_t rx_pkt_sum;
uint64_t tx_pkt_sum;
+ odp_schedule_config_t schedule_config;
+
} test_global_t;
static test_global_t *test_global;
@@ -125,17 +157,42 @@ static inline void fill_eth_addr(odp_packet_t pkt[], int num,
}
}
-static int worker_thread(void *arg)
+static inline void send_packets(test_global_t *test_global,
+ odp_packet_t pkt[], int num_pkt,
+ int output, odp_pktout_queue_t pktout,
+ int worker_id)
+{
+ int sent, drop;
+
+ fill_eth_addr(pkt, num_pkt, test_global, output);
+
+ sent = odp_pktout_send(pktout, pkt, num_pkt);
+
+ if (odp_unlikely(sent < 0))
+ sent = 0;
+
+ drop = num_pkt - sent;
+
+ if (odp_unlikely(drop))
+ odp_packet_free_multi(&pkt[sent], drop);
+
+ if (odp_unlikely(test_global->opt.collect_stat)) {
+ test_global->worker_stat[worker_id].tx_pkt += sent;
+ test_global->worker_stat[worker_id].tx_drop += drop;
+ }
+}
+
+static int worker_thread_direct(void *arg)
{
- odp_event_t ev[BURST_SIZE];
- int num_pkt, sent, drop, out;
+ int num_pkt, out;
odp_pktout_queue_t pktout;
odp_queue_t queue;
- queue_context_t *queue_context;
+ pktin_queue_context_t *queue_context;
worker_arg_t *worker_arg = arg;
test_global_t *test_global = worker_arg->test_global_ptr;
int worker_id = worker_arg->worker_id;
uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
printf("Worker %i started\n", worker_id);
@@ -143,10 +200,11 @@ static int worker_thread(void *arg)
odp_barrier_wait(&test_global->worker_start);
while (1) {
- odp_packet_t pkt[BURST_SIZE];
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
- ev, BURST_SIZE);
+ ev, burst_size);
polls++;
@@ -174,22 +232,168 @@ static int worker_thread(void *arg)
pktout = queue_context->dst_pktout;
out = queue_context->dst_pktio;
- fill_eth_addr(pkt, num_pkt, test_global, out);
+ send_packets(test_global, pkt, num_pkt, out, pktout, worker_id);
- sent = odp_pktout_send(pktout, pkt, num_pkt);
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].rx_pkt += num_pkt;
+ }
- if (odp_unlikely(sent < 0))
- sent = 0;
+ printf("Worker %i stopped\n", worker_id);
- drop = num_pkt - sent;
+ return 0;
+}
- if (odp_unlikely(drop))
- odp_packet_free_multi(&pkt[sent], drop);
+static inline void enqueue_events(odp_queue_t dst_queue, odp_event_t ev[],
+ int num, int worker_id)
+{
+ int sent, drop;
- if (odp_unlikely(test_global->opt.collect_stat)) {
- test_global->worker_stat[worker_id].rx_pkt += num_pkt;
- test_global->worker_stat[worker_id].tx_pkt += sent;
+ sent = odp_queue_enq_multi(dst_queue, ev, num);
+
+ if (odp_unlikely(sent < 0))
+ sent = 0;
+
+ drop = num - sent;
+
+ if (odp_unlikely(drop))
+ odp_event_free_multi(&ev[sent], drop);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].pipe_drop += drop;
+}
+
+static inline odp_queue_t next_queue(test_global_t *test_global, int input,
+ uint16_t stage, uint16_t queue_idx)
+{
+ return test_global->pipe_queue[input][stage].queue[queue_idx];
+}
+
+static int worker_thread_pipeline(void *arg)
+{
+ int i, num_pkt, input, output, output_queue;
+ odp_queue_t queue, dst_queue;
+ odp_pktout_queue_t pktout;
+ pipe_queue_context_t *pipe_context;
+ uint16_t stage, queue_idx;
+ worker_arg_t *worker_arg = arg;
+ test_global_t *test_global = worker_arg->test_global_ptr;
+ int worker_id = worker_arg->worker_id;
+ int pipe_stages = test_global->opt.pipe_stages;
+ int pipe_queues = test_global->opt.pipe_queues;
+ int num_pktio = test_global->opt.num_pktio;
+ int num_pktio_queue = test_global->opt.num_pktio_queue;
+ uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
+
+ printf("Worker %i started\n", worker_id);
+
+ /* Wait for other workers to start */
+ odp_barrier_wait(&test_global->worker_start);
+
+ while (1) {
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
+
+ num_pkt = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
+ ev, burst_size);
+
+ polls++;
+
+ if (polls == CHECK_PERIOD) {
+ polls = 0;
+ if (test_global->stop_workers)
+ break;
}
+
+ if (num_pkt <= 0)
+ continue;
+
+ pipe_context = odp_queue_context(queue);
+ stage = pipe_context->stage;
+ queue_idx = pipe_context->queue_idx;
+
+ /* A queue is connected to a single input interface. All
+ * packets from a queue are from the same interface. */
+ input = odp_packet_input_index(odp_packet_from_event(ev[0]));
+
+ if (DEBUG_PRINT)
+ printf("worker %i: stage %u, idx %u, %i packets\n",
+ worker_id, stage, queue_idx, num_pkt);
+
+ if (stage == 0) {
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].rx_pkt +=
+ num_pkt;
+
+ /* The first stage (packet input). Forward packet flows
+ * into first pipeline queues. */
+ if (pipe_queues > num_pktio_queue) {
+ /* More pipeline queues than input queues.
+ * Use flow hash to spread flows into pipeline
+ * queues. */
+ odp_packet_t p;
+ worker_stat_t *stat;
+ uint32_t hash;
+ uint16_t idx;
+ int drop = 0;
+
+ stat = &test_global->worker_stat[worker_id];
+
+ for (i = 0; i < num_pkt; i++) {
+ p = odp_packet_from_event(ev[i]);
+ hash = odp_packet_flow_hash(p);
+ idx = queue_idx;
+
+ if (odp_packet_has_flow_hash(p))
+ idx = hash % pipe_queues;
+
+ dst_queue = next_queue(test_global,
+ input, stage,
+ idx);
+
+ if (odp_queue_enq(dst_queue, ev[i])) {
+ odp_event_free(ev[i]);
+ drop++;
+ }
+ }
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ stat->pipe_drop += drop;
+ } else {
+ queue_idx = queue_idx % pipe_queues;
+ dst_queue = next_queue(test_global, input,
+ stage, queue_idx);
+
+ enqueue_events(dst_queue, ev, num_pkt,
+ worker_id);
+ }
+ continue;
+ }
+
+ if (stage < pipe_stages) {
+ /* Middle stages */
+ dst_queue = next_queue(test_global, input, stage,
+ queue_idx);
+ enqueue_events(dst_queue, ev, num_pkt, worker_id);
+
+ if (odp_unlikely(test_global->opt.collect_stat))
+ test_global->worker_stat[worker_id].pipe_pkt +=
+ num_pkt;
+
+ continue;
+ }
+
+ /* The last stage, send packets out */
+ odp_packet_from_event_multi(pkt, ev, num_pkt);
+
+ /* If single interface loopback, otherwise forward to the next
+ * interface. */
+ output = (input + 1) % num_pktio;
+ output_queue = queue_idx % num_pktio_queue;
+ pktout = test_global->pktio[output].pktout[output_queue];
+
+ send_packets(test_global, pkt, num_pkt, output, pktout,
+ worker_id);
}
printf("Worker %i stopped\n", worker_id);
@@ -199,17 +403,17 @@ static int worker_thread(void *arg)
static int worker_thread_timers(void *arg)
{
- odp_event_t ev[BURST_SIZE];
- int num, num_pkt, sent, drop, out, tmos, i, src_pktio, src_queue;
+ int num, num_pkt, out, tmos, i, src_pktio, src_queue;
odp_pktout_queue_t pktout;
odp_queue_t queue;
- queue_context_t *queue_context;
+ pktin_queue_context_t *queue_context;
odp_timer_t timer;
odp_timer_set_t ret;
worker_arg_t *worker_arg = arg;
test_global_t *test_global = worker_arg->test_global_ptr;
int worker_id = worker_arg->worker_id;
uint32_t polls = 0;
+ int burst_size = test_global->opt.burst_size;
uint64_t tick = test_global->timer.timeout_tick;
printf("Worker (timers) %i started\n", worker_id);
@@ -218,10 +422,11 @@ static int worker_thread_timers(void *arg)
odp_barrier_wait(&test_global->worker_start);
while (1) {
- odp_packet_t pkt[BURST_SIZE];
+ odp_event_t ev[burst_size];
+ odp_packet_t pkt[burst_size];
num = odp_schedule_multi(&queue, ODP_SCHED_NO_WAIT,
- ev, BURST_SIZE);
+ ev, burst_size);
polls++;
@@ -299,22 +504,10 @@ static int worker_thread_timers(void *arg)
pktout = queue_context->dst_pktout;
out = queue_context->dst_pktio;
- fill_eth_addr(pkt, num_pkt, test_global, out);
-
- sent = odp_pktout_send(pktout, pkt, num_pkt);
+ send_packets(test_global, pkt, num_pkt, out, pktout, worker_id);
- if (odp_unlikely(sent < 0))
- sent = 0;
-
- drop = num_pkt - sent;
-
- if (odp_unlikely(drop))
- odp_packet_free_multi(&pkt[sent], drop);
-
- if (odp_unlikely(test_global->opt.collect_stat)) {
+ if (odp_unlikely(test_global->opt.collect_stat))
test_global->worker_stat[worker_id].rx_pkt += num_pkt;
- test_global->worker_stat[worker_id].tx_pkt += sent;
- }
}
printf("Worker %i stopped\n", worker_id);
@@ -343,12 +536,17 @@ static void print_usage(const char *progname)
"Usage: %s [options]\n"
"\n"
"OPTIONS:\n"
- " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n"
- " -c, --num_cpu <number> Worker thread count. Default: 1\n"
- " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n"
- " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n"
- " -s, --stat Collect statistics.\n"
- " -h, --help Display help and exit.\n\n",
+ " -i, --interface <name> Packet IO interfaces (comma-separated, no spaces)\n"
+ " -c, --num_cpu <number> Worker thread count. Default: 1\n"
+ " -q, --num_queue <number> Number of pktio queues. Default: Worker thread count\n"
+ " -b, --burst <number> Maximum number of events requested from scheduler. Default: 32\n"
+ " -t, --timeout <number> Flow inactivity timeout (in usec) per packet. Default: 0 (don't use timers)\n"
+ " --pipe-stages <number> Number of pipeline stages per interface\n"
+ " --pipe-queues <number> Number of queues per pipeline stage\n"
+ " --pipe-queue-size <num> Number of events a pipeline queue must be able to store. Default 256.\n"
+ " -m, --sched_mode <mode> Scheduler synchronization mode for all queues. 1: parallel, 2: atomic, 3: ordered. Default: 2\n"
+ " -s, --stat Collect statistics.\n"
+ " -h, --help Display help and exit.\n\n",
NO_PATH(progname));
}
@@ -356,26 +554,31 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
{
int i, opt, long_index;
char *name, *str;
- int len, str_len;
+ int len, str_len, sched_mode;
const struct option longopts[] = {
- {"interface", required_argument, NULL, 'i'},
- {"num_cpu", required_argument, NULL, 'c'},
- {"num_queue", required_argument, NULL, 'q'},
- {"timeout", required_argument, NULL, 't'},
- {"stat", no_argument, NULL, 's'},
- {"help", no_argument, NULL, 'h'},
+ {"interface", required_argument, NULL, 'i'},
+ {"num_cpu", required_argument, NULL, 'c'},
+ {"num_queue", required_argument, NULL, 'q'},
+ {"burst", required_argument, NULL, 'b'},
+ {"timeout", required_argument, NULL, 't'},
+ {"sched_mode", required_argument, NULL, 'm'},
+ {"pipe-stages", required_argument, NULL, 0},
+ {"pipe-queues", required_argument, NULL, 1},
+ {"pipe-queue-size", required_argument, NULL, 2},
+ {"stat", no_argument, NULL, 's'},
+ {"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- const char *shortopts = "+i:c:q:t:sh";
+ const char *shortopts = "+i:c:q:b:t:m:sh";
int ret = 0;
memset(test_options, 0, sizeof(test_options_t));
+ test_options->sched_mode = SCHED_MODE_ATOMIC;
test_options->num_worker = 1;
test_options->num_pktio_queue = 0;
-
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
+ test_options->burst_size = 32;
+ test_options->pipe_queue_size = 256;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -384,6 +587,15 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
break; /* No more options */
switch (opt) {
+ case 0:
+ test_options->pipe_stages = atoi(optarg);
+ break;
+ case 1:
+ test_options->pipe_queues = atoi(optarg);
+ break;
+ case 2:
+ test_options->pipe_queue_size = atoi(optarg);
+ break;
case 'i':
i = 0;
str = optarg;
@@ -421,9 +633,15 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
case 'q':
test_options->num_pktio_queue = atoi(optarg);
break;
+ case 'b':
+ test_options->burst_size = atoi(optarg);
+ break;
case 't':
test_options->timeout_us = atol(optarg);
break;
+ case 'm':
+ test_options->sched_mode = atoi(optarg);
+ break;
case 's':
test_options->collect_stat = 1;
break;
@@ -437,12 +655,54 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
}
}
+ if (test_options->timeout_us && test_options->pipe_stages) {
+ printf("Error: Cannot run timeout and pipeline tests simultaneously\n");
+ ret = -1;
+ }
+
+ if (test_options->pipe_stages > MAX_PIPE_STAGES) {
+ printf("Error: Too many pipeline stages\n");
+ ret = -1;
+ }
+
+ if (test_options->pipe_queues > MAX_PIPE_QUEUES) {
+ printf("Error: Too many queues per pipeline stage\n");
+ ret = -1;
+ }
+
+ if (test_options->num_pktio == 0) {
+ printf("Error: At least one pktio interface needed.\n");
+ ret = -1;
+ }
+
+ sched_mode = test_options->sched_mode;
+ if (sched_mode != SCHED_MODE_PARAL &&
+ sched_mode != SCHED_MODE_ATOMIC &&
+ sched_mode != SCHED_MODE_ORDER) {
+ printf("Error: Bad scheduler mode: %i\n", sched_mode);
+ ret = -1;
+ }
+
if (test_options->num_pktio_queue == 0)
test_options->num_pktio_queue = test_options->num_worker;
return ret;
}
+static odp_schedule_sync_t sched_sync_mode(test_global_t *test_global)
+{
+ switch (test_global->opt.sched_mode) {
+ case SCHED_MODE_PARAL:
+ return ODP_SCHED_SYNC_PARALLEL;
+ case SCHED_MODE_ATOMIC:
+ return ODP_SCHED_SYNC_ATOMIC;
+ case SCHED_MODE_ORDER:
+ return ODP_SCHED_SYNC_ORDERED;
+ default:
+ return -1;
+ }
+}
+
static int config_setup(test_global_t *test_global)
{
int i, cpu;
@@ -465,10 +725,8 @@ static int config_setup(test_global_t *test_global)
cpu = odp_cpumask_next(cpumask, cpu);
}
- if (test_global->opt.num_pktio == 0) {
- printf("Error: At least one pktio interface needed.\n");
- return -1;
- }
+ odp_schedule_config_init(&test_global->schedule_config);
+ odp_schedule_config(&test_global->schedule_config);
if (odp_pool_capability(&pool_capa)) {
printf("Error: Pool capability failed.\n");
@@ -481,8 +739,10 @@ static int config_setup(test_global_t *test_global)
if (pool_capa.pkt.max_len && pkt_len > pool_capa.pkt.max_len)
pkt_len = pool_capa.pkt.max_len;
- if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num)
+ if (pool_capa.pkt.max_num && pkt_num > pool_capa.pkt.max_num) {
pkt_num = pool_capa.pkt.max_num;
+ printf("Warning: Pool size rounded down to %u\n", pkt_num);
+ }
test_global->pkt_len = pkt_len;
test_global->pkt_num = pkt_num;
@@ -522,6 +782,7 @@ static void print_config(test_global_t *test_global)
" queues per interface: %i\n",
test_global->opt.num_pktio_queue);
+ printf(" burst size: %u\n", test_global->opt.burst_size);
printf(" collect statistics: %u\n", test_global->opt.collect_stat);
printf(" timeout usec: %li\n", test_global->opt.timeout_us);
@@ -531,34 +792,40 @@ static void print_config(test_global_t *test_global)
static void print_stat(test_global_t *test_global, uint64_t nsec)
{
int i;
- uint64_t rx, tx, drop, tmo;
+ uint64_t rx, tx, pipe, drop, tmo;
uint64_t rx_sum = 0;
uint64_t tx_sum = 0;
+ uint64_t pipe_sum = 0;
uint64_t tmo_sum = 0;
double sec = 0.0;
printf("\nTest statistics\n");
- printf(" worker rx_pkt tx_pkt dropped tmo\n");
+ printf(" worker rx_pkt tx_pkt pipe dropped tmo\n");
for (i = 0; i < test_global->opt.num_worker; i++) {
rx = test_global->worker_stat[i].rx_pkt;
tx = test_global->worker_stat[i].tx_pkt;
+ pipe = test_global->worker_stat[i].pipe_pkt;
tmo = test_global->worker_stat[i].tmo;
rx_sum += rx;
tx_sum += tx;
+ pipe_sum += pipe;
tmo_sum += tmo;
+ drop = test_global->worker_stat[i].tx_drop +
+ test_global->worker_stat[i].pipe_drop;
printf(" %6i %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16"
- PRIu64 "\n", i, rx, tx, rx - tx, tmo);
+ PRIu64 " %16" PRIu64 "\n", i, rx, tx, pipe, drop, tmo);
}
test_global->rx_pkt_sum = rx_sum;
test_global->tx_pkt_sum = tx_sum;
drop = rx_sum - tx_sum;
- printf(" -------------------------------------------------------------------\n");
+ printf(" ------------------------------------------------------------------------------------\n");
printf(" total %16" PRIu64 " %16" PRIu64 " %16" PRIu64 " %16"
- PRIu64 "\n\n", rx_sum, tx_sum, drop, tmo_sum);
+ PRIu64 " %16" PRIu64 "\n\n", rx_sum, tx_sum, pipe_sum, drop,
+ tmo_sum);
sec = nsec / 1000000000.0;
printf(" Total test time: %.2f sec\n", sec);
@@ -606,7 +873,7 @@ static int open_pktios(test_global_t *test_global)
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
- sched_sync = ODP_SCHED_SYNC_ATOMIC;
+ sched_sync = sched_sync_mode(test_global);
for (i = 0; i < num_pktio; i++)
test_global->pktio[i].pktio = ODP_PKTIO_INVALID;
@@ -684,7 +951,7 @@ static int open_pktios(test_global_t *test_global)
for (j = 0; j < num_queue; j++) {
odp_queue_t queue;
void *ctx;
- uint32_t len = sizeof(queue_context_t);
+ uint32_t len = sizeof(pktin_queue_context_t);
queue = test_global->pktio[i].input_queue[j];
ctx = &test_global->pktio[i].queue_context[j];
@@ -700,6 +967,9 @@ static int open_pktios(test_global_t *test_global)
pktout_param.num_queues = num_queue;
pktout_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ if (test_global->opt.pipe_stages)
+ pktout_param.op_mode = ODP_PKTIO_OP_MT;
+
if (odp_pktout_queue_config(pktio, &pktout_param)) {
printf("Error (%s): Pktout config failed.\n", name);
return -1;
@@ -722,7 +992,7 @@ static void link_pktios(test_global_t *test_global)
int i, num_pktio, input, output;
int num_queue;
odp_pktout_queue_t pktout;
- queue_context_t *ctx;
+ pktin_queue_context_t *ctx;
num_pktio = test_global->opt.num_pktio;
num_queue = test_global->opt.num_pktio_queue;
@@ -738,6 +1008,8 @@ static void link_pktios(test_global_t *test_global)
for (i = 0; i < num_queue; i++) {
ctx = &test_global->pktio[input].queue_context[i];
pktout = test_global->pktio[output].pktout[i];
+ ctx->stage = 0;
+ ctx->queue_idx = i;
ctx->dst_pktout = pktout;
ctx->dst_pktio = output;
ctx->dst_queue = i;
@@ -837,6 +1109,105 @@ static int close_pktios(test_global_t *test_global)
return ret;
}
+static int create_pipeline_queues(test_global_t *test_global)
+{
+ int i, j, k, num_pktio, stages, queues, ctx_size;
+ pipe_queue_context_t *ctx;
+ odp_queue_param_t queue_param;
+ odp_schedule_sync_t sched_sync;
+ int ret = 0;
+
+ num_pktio = test_global->opt.num_pktio;
+ stages = test_global->opt.pipe_stages;
+ queues = test_global->opt.pipe_queues;
+ sched_sync = sched_sync_mode(test_global);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ queue_param.sched.sync = sched_sync;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ queue_param.size = test_global->opt.pipe_queue_size;
+ if (test_global->schedule_config.queue_size &&
+ queue_param.size > test_global->schedule_config.queue_size) {
+ printf("Error: Pipeline queue max size is %u\n",
+ test_global->schedule_config.queue_size);
+ return -1;
+ }
+
+ ctx_size = sizeof(pipe_queue_context_t);
+
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ ctx = &test_global->pipe_queue_ctx[i][j].ctx;
+
+ /* packet input is stage 0 */
+ ctx->stage = i + 1;
+ ctx->queue_idx = j;
+ }
+ }
+
+ for (k = 0; k < num_pktio; k++) {
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ odp_queue_t q;
+
+ q = odp_queue_create(NULL, &queue_param);
+ test_global->pipe_queue[k][i].queue[j] = q;
+
+ if (q == ODP_QUEUE_INVALID) {
+ printf("Error: Queue create failed [%i] %i/%i\n",
+ k, i, j);
+ ret = -1;
+ break;
+ }
+
+ ctx = &test_global->pipe_queue_ctx[i][j].ctx;
+
+ if (odp_queue_context_set(q, ctx, ctx_size)) {
+ printf("Error: Queue ctx set failed [%i] %i/%i\n",
+ k, i, j);
+ ret = -1;
+ break;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void destroy_pipeline_queues(test_global_t *test_global)
+{
+ int i, j, k, num_pktio, stages, queues;
+ odp_queue_t queue;
+
+ num_pktio = test_global->opt.num_pktio;
+ stages = test_global->opt.pipe_stages;
+ queues = test_global->opt.pipe_queues;
+
+ for (k = 0; k < num_pktio; k++) {
+ for (i = 0; i < stages; i++) {
+ for (j = 0; j < queues; j++) {
+ queue = test_global->pipe_queue[k][i].queue[j];
+
+ if (queue == ODP_QUEUE_INVALID) {
+ printf("Error: Bad queue handle [%i] %i/%i\n",
+ k, i, j);
+ return;
+ }
+
+ if (odp_queue_destroy(queue)) {
+ printf("Error: Queue destroy failed [%i] %i/%i\n",
+ k, i, j);
+ return;
+ }
+ }
+ }
+ }
+}
+
static int create_timers(test_global_t *test_global)
{
int num_timer, num_pktio, num_queue, i, j;
@@ -1021,8 +1392,10 @@ static void start_workers(odph_odpthread_t thread[],
if (test_global->opt.timeout_us)
param.start = worker_thread_timers;
+ else if (test_global->opt.pipe_stages)
+ param.start = worker_thread_pipeline;
else
- param.start = worker_thread;
+ param.start = worker_thread_direct;
param.thr_type = ODP_THREAD_WORKER;
param.instance = test_global->instance;
@@ -1054,10 +1427,18 @@ int main(int argc, char *argv[])
odp_init_t init;
odp_shm_t shm;
odp_time_t t1, t2;
+ odph_helper_options_t helper_options;
odph_odpthread_t thread[MAX_WORKERS];
test_options_t test_options;
int ret = 0;
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ printf("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
signal(SIGINT, sig_handler);
if (parse_options(argc, argv, &test_options))
@@ -1074,6 +1455,8 @@ int main(int argc, char *argv[])
if (test_options.timeout_us)
init.not_used.feat.timer = 0;
+ init.mem_model = helper_options.mem_model;
+
/* Init ODP before calling anything else */
if (odp_init_global(&instance, &init, NULL)) {
printf("Error: Global init failed.\n");
@@ -1115,6 +1498,9 @@ int main(int argc, char *argv[])
link_pktios(test_global);
+ if (create_pipeline_queues(test_global))
+ goto quit;
+
if (create_timers(test_global))
goto quit;
@@ -1145,6 +1531,7 @@ quit:
stop_pktios(test_global);
empty_queues();
close_pktios(test_global);
+ destroy_pipeline_queues(test_global);
destroy_timers(test_global);
if (test_global->opt.collect_stat) {
@@ -1160,17 +1547,17 @@ quit:
if (odp_shm_free(shm)) {
printf("Error: shm free failed.\n");
- return -1;
+ ret = -1;
}
if (odp_term_local()) {
printf("Error: term local failed.\n");
- return -1;
+ ret = -1;
}
if (odp_term_global(instance)) {
printf("Error: term global failed.\n");
- return -1;
+ ret = -1;
}
return ret;
diff --git a/test/performance/odp_sched_pktio_run.sh b/test/performance/odp_sched_pktio_run.sh
index db14fb598..1c8d2945b 100755
--- a/test/performance/odp_sched_pktio_run.sh
+++ b/test/performance/odp_sched_pktio_run.sh
@@ -72,7 +72,7 @@ run_sched_pktio()
# Run test for 5 sec
sleep 5
- kill ${GEN_PID}
+ kill -2 ${GEN_PID}
wait ${GEN_PID}
# Kill with SIGINT to output statistics
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
index 3b75f635e..afe5b73b1 100644
--- a/test/performance/odp_scheduling.c
+++ b/test/performance/odp_scheduling.c
@@ -30,7 +30,7 @@
/* GNU lib C */
#include <getopt.h>
-#define NUM_MSG (512 * 1024) /**< Number of msg in pool */
+#define MAX_BUF (512 * 1024) /**< Maximum pool size */
#define MAX_ALLOCS 32 /**< Alloc burst size */
#define QUEUES_PER_PRIO 64 /**< Queue per priority */
#define NUM_PRIOS 2 /**< Number of tested priorities */
@@ -762,9 +762,6 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const char *shortopts = "+c:fh";
- /* let helper collect its own arguments (e.g. --odph_proc) */
- argc = odph_parse_options(argc, argv);
-
args->cpu_count = 1; /* use one worker by default */
while (1) {
@@ -798,6 +795,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
*/
int main(int argc, char *argv[])
{
+ odph_helper_options_t helper_options;
odph_odpthread_t *thread_tbl;
test_args_t args;
int num_workers;
@@ -811,17 +809,30 @@ int main(int argc, char *argv[])
odp_pool_param_t params;
int ret = 0;
odp_instance_t instance;
+ odp_init_t init_param;
odph_odpthread_params_t thr_params;
odp_queue_capability_t capa;
- uint32_t num_queues;
+ odp_pool_capability_t pool_capa;
+ odp_schedule_config_t schedule_config;
+ uint32_t num_queues, num_buf;
printf("\nODP example starts\n\n");
+ /* Let helper collect its own arguments (e.g. --odph_proc) */
+ argc = odph_parse_options(argc, argv);
+ if (odph_options(&helper_options)) {
+ LOG_ERR("Error: reading ODP helper options failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
memset(&args, 0, sizeof(args));
parse_args(argc, argv, &args);
/* ODP global init */
- if (odp_init_global(&instance, NULL, NULL)) {
+ if (odp_init_global(&instance, &init_param, NULL)) {
LOG_ERR("ODP global init failed.\n");
return -1;
}
@@ -869,11 +880,19 @@ int main(int argc, char *argv[])
/*
* Create message pool
*/
+ if (odp_pool_capability(&pool_capa)) {
+ LOG_ERR("Pool capabilities failed.\n");
+ return -1;
+ }
+
+ num_buf = MAX_BUF;
+ if (pool_capa.buf.max_num && pool_capa.buf.max_num < MAX_BUF)
+ num_buf = pool_capa.buf.max_num;
odp_pool_param_init(&params);
params.buf.size = sizeof(test_message_t);
params.buf.align = 0;
- params.buf.num = NUM_MSG;
+ params.buf.num = num_buf;
params.type = ODP_POOL_BUFFER;
pool = odp_pool_create("msg_pool", &params);
@@ -890,10 +909,15 @@ int main(int argc, char *argv[])
return -1;
}
+ odp_schedule_config_init(&schedule_config);
+ odp_schedule_config(&schedule_config);
+
globals->queues_per_prio = QUEUES_PER_PRIO;
num_queues = globals->queues_per_prio * NUM_PRIOS;
- if (num_queues > capa.sched.max_num)
- globals->queues_per_prio = capa.sched.max_num / NUM_PRIOS;
+ if (schedule_config.num_queues &&
+ num_queues > schedule_config.num_queues)
+ globals->queues_per_prio = schedule_config.num_queues /
+ NUM_PRIOS;
/* One plain queue is also used */
num_queues = (globals->queues_per_prio * NUM_PRIOS) + 1;
diff --git a/test/performance/odp_scheduling_run.sh b/test/performance/odp_scheduling_run.sh
index ae3d1c8f0..577922767 100755
--- a/test/performance/odp_scheduling_run.sh
+++ b/test/performance/odp_scheduling_run.sh
@@ -17,7 +17,12 @@ run()
echo odp_scheduling_run starts requesting $1 worker threads
echo ===============================================
- $TEST_DIR/odp_scheduling${EXEEXT} -c $1 || ret=1
+ $TEST_DIR/odp_scheduling${EXEEXT} -c $1
+
+ if [ $? -ne 0 ]; then
+ echo odp_scheduling FAILED
+ exit $?
+ fi
}
run 1
@@ -26,4 +31,4 @@ run 8
run 11
run $ALL
-exit $ret
+exit 0
diff --git a/test/validation/api/Makefile.am b/test/validation/api/Makefile.am
index 92ae85f8e..257764648 100644
--- a/test/validation/api/Makefile.am
+++ b/test/validation/api/Makefile.am
@@ -3,6 +3,7 @@ ODP_MODULES = atomic \
buffer \
chksum \
classification \
+ comp \
cpumask \
crypto \
errno \
@@ -36,6 +37,7 @@ TESTS = \
buffer/buffer_main$(EXEEXT) \
chksum/chksum_main$(EXEEXT) \
classification/classification_main$(EXEEXT) \
+ comp/comp_main$(EXEEXT) \
cpumask/cpumask_main$(EXEEXT) \
crypto/crypto_main$(EXEEXT) \
errno/errno_main$(EXEEXT) \
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index 71af2d124..9530df9ca 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -8,6 +8,7 @@
#include <malloc.h>
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
@@ -30,26 +31,22 @@
#define CHECK_MAX_MIN (1 << 0)
#define CHECK_XCHG (1 << 2)
-static odp_atomic_u32_t a32u;
-static odp_atomic_u64_t a64u;
-static odp_atomic_u32_t a32u_min;
-static odp_atomic_u32_t a32u_max;
-static odp_atomic_u64_t a64u_min;
-static odp_atomic_u64_t a64u_max;
-static odp_atomic_u32_t a32u_xchg;
-static odp_atomic_u64_t a64u_xchg;
-
typedef __volatile uint32_t volatile_u32_t;
typedef __volatile uint64_t volatile_u64_t;
typedef struct {
- /* Global variables */
+ odp_atomic_u64_t a64u;
+ odp_atomic_u64_t a64u_min;
+ odp_atomic_u64_t a64u_max;
+ odp_atomic_u64_t a64u_xchg;
+ odp_atomic_u32_t a32u;
+ odp_atomic_u32_t a32u_min;
+ odp_atomic_u32_t a32u_max;
+ odp_atomic_u32_t a32u_xchg;
+
uint32_t g_num_threads;
uint32_t g_iterations;
uint32_t g_verbose;
- uint32_t g_max_num_cores;
-
- volatile_u32_t global_lock_owner;
} global_shared_mem_t;
/* Per-thread memory */
@@ -101,7 +98,7 @@ static void test_atomic_inc_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_inc_u32(&a32u);
+ odp_atomic_inc_u32(&global_mem->a32u);
}
static void test_atomic_inc_64(void)
@@ -109,7 +106,7 @@ static void test_atomic_inc_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_inc_u64(&a64u);
+ odp_atomic_inc_u64(&global_mem->a64u);
}
static void test_atomic_dec_32(void)
@@ -117,7 +114,7 @@ static void test_atomic_dec_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_dec_u32(&a32u);
+ odp_atomic_dec_u32(&global_mem->a32u);
}
static void test_atomic_dec_64(void)
@@ -125,7 +122,7 @@ static void test_atomic_dec_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_dec_u64(&a64u);
+ odp_atomic_dec_u64(&global_mem->a64u);
}
static void test_atomic_fetch_inc_32(void)
@@ -133,7 +130,7 @@ static void test_atomic_fetch_inc_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u32(&a32u);
+ odp_atomic_fetch_inc_u32(&global_mem->a32u);
}
static void test_atomic_fetch_inc_64(void)
@@ -141,7 +138,7 @@ static void test_atomic_fetch_inc_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u64(&a64u);
+ odp_atomic_fetch_inc_u64(&global_mem->a64u);
}
static void test_atomic_fetch_dec_32(void)
@@ -149,7 +146,7 @@ static void test_atomic_fetch_dec_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u32(&a32u);
+ odp_atomic_fetch_dec_u32(&global_mem->a32u);
}
static void test_atomic_fetch_dec_64(void)
@@ -157,7 +154,7 @@ static void test_atomic_fetch_dec_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u64(&a64u);
+ odp_atomic_fetch_dec_u64(&global_mem->a64u);
}
static void test_atomic_add_32(void)
@@ -165,7 +162,7 @@ static void test_atomic_add_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
+ odp_atomic_add_u32(&global_mem->a32u, ADD_SUB_CNT);
}
static void test_atomic_add_64(void)
@@ -173,7 +170,7 @@ static void test_atomic_add_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
+ odp_atomic_add_u64(&global_mem->a64u, ADD_SUB_CNT);
}
static void test_atomic_sub_32(void)
@@ -181,7 +178,7 @@ static void test_atomic_sub_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
+ odp_atomic_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
}
static void test_atomic_sub_64(void)
@@ -189,7 +186,7 @@ static void test_atomic_sub_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
+ odp_atomic_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
}
static void test_atomic_fetch_add_32(void)
@@ -197,7 +194,7 @@ static void test_atomic_fetch_add_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
+ odp_atomic_fetch_add_u32(&global_mem->a32u, ADD_SUB_CNT);
}
static void test_atomic_fetch_add_64(void)
@@ -205,7 +202,7 @@ static void test_atomic_fetch_add_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
+ odp_atomic_fetch_add_u64(&global_mem->a64u, ADD_SUB_CNT);
}
static void test_atomic_fetch_sub_32(void)
@@ -213,7 +210,7 @@ static void test_atomic_fetch_sub_32(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
+ odp_atomic_fetch_sub_u32(&global_mem->a32u, ADD_SUB_CNT);
}
static void test_atomic_fetch_sub_64(void)
@@ -221,7 +218,7 @@ static void test_atomic_fetch_sub_64(void)
int i;
for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
+ odp_atomic_fetch_sub_u64(&global_mem->a64u, ADD_SUB_CNT);
}
static void test_atomic_min_32(void)
@@ -230,8 +227,8 @@ static void test_atomic_min_32(void)
uint32_t tmp;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_dec_u32(&a32u);
- odp_atomic_min_u32(&a32u_min, tmp);
+ tmp = odp_atomic_fetch_dec_u32(&global_mem->a32u);
+ odp_atomic_min_u32(&global_mem->a32u_min, tmp);
}
}
@@ -241,8 +238,8 @@ static void test_atomic_min_64(void)
uint64_t tmp;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_dec_u64(&a64u);
- odp_atomic_min_u64(&a64u_min, tmp);
+ tmp = odp_atomic_fetch_dec_u64(&global_mem->a64u);
+ odp_atomic_min_u64(&global_mem->a64u_min, tmp);
}
}
@@ -252,8 +249,8 @@ static void test_atomic_max_32(void)
uint32_t tmp;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_inc_u32(&a32u);
- odp_atomic_max_u32(&a32u_max, tmp);
+ tmp = odp_atomic_fetch_inc_u32(&global_mem->a32u);
+ odp_atomic_max_u32(&global_mem->a32u_max, tmp);
}
}
@@ -263,8 +260,8 @@ static void test_atomic_max_64(void)
uint64_t tmp;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_fetch_inc_u64(&a64u);
- odp_atomic_max_u64(&a64u_max, tmp);
+ tmp = odp_atomic_fetch_inc_u64(&global_mem->a64u);
+ odp_atomic_max_u64(&global_mem->a64u_max, tmp);
}
}
@@ -272,11 +269,12 @@ static void test_atomic_cas_inc_32(void)
{
int i;
uint32_t old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u32(&a32u);
+ old = odp_atomic_load_u32(a32u);
- while (odp_atomic_cas_u32(&a32u, &old, old + 1) == 0)
+ while (odp_atomic_cas_u32(a32u, &old, old + 1) == 0)
;
}
}
@@ -285,11 +283,12 @@ static void test_atomic_cas_dec_32(void)
{
int i;
uint32_t old;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u32(&a32u);
+ old = odp_atomic_load_u32(a32u);
- while (odp_atomic_cas_u32(&a32u, &old, old - 1) == 0)
+ while (odp_atomic_cas_u32(a32u, &old, old - 1) == 0)
;
}
}
@@ -298,11 +297,12 @@ static void test_atomic_cas_inc_64(void)
{
int i;
uint64_t old;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u64(&a64u);
+ old = odp_atomic_load_u64(a64u);
- while (odp_atomic_cas_u64(&a64u, &old, old + 1) == 0)
+ while (odp_atomic_cas_u64(a64u, &old, old + 1) == 0)
;
}
}
@@ -311,11 +311,12 @@ static void test_atomic_cas_dec_64(void)
{
int i;
uint64_t old;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
for (i = 0; i < CNT; i++) {
- old = odp_atomic_load_u64(&a64u);
+ old = odp_atomic_load_u64(a64u);
- while (odp_atomic_cas_u64(&a64u, &old, old - 1) == 0)
+ while (odp_atomic_cas_u64(a64u, &old, old - 1) == 0)
;
}
}
@@ -324,66 +325,74 @@ static void test_atomic_xchg_32(void)
{
uint32_t old, new;
int i;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+ odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
for (i = 0; i < CNT; i++) {
- new = odp_atomic_fetch_inc_u32(&a32u);
- old = odp_atomic_xchg_u32(&a32u_xchg, new);
+ new = odp_atomic_fetch_inc_u32(a32u);
+ old = odp_atomic_xchg_u32(a32u_xchg, new);
if (old & 0x1)
- odp_atomic_xchg_u32(&a32u_xchg, 0);
+ odp_atomic_xchg_u32(a32u_xchg, 0);
else
- odp_atomic_xchg_u32(&a32u_xchg, 1);
+ odp_atomic_xchg_u32(a32u_xchg, 1);
}
- odp_atomic_sub_u32(&a32u, CNT);
- odp_atomic_xchg_u32(&a32u_xchg, U32_MAGIC);
+ odp_atomic_sub_u32(a32u, CNT);
+ odp_atomic_xchg_u32(a32u_xchg, U32_MAGIC);
}
static void test_atomic_xchg_64(void)
{
uint64_t old, new;
int i;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+ odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
for (i = 0; i < CNT; i++) {
- new = odp_atomic_fetch_inc_u64(&a64u);
- old = odp_atomic_xchg_u64(&a64u_xchg, new);
+ new = odp_atomic_fetch_inc_u64(a64u);
+ old = odp_atomic_xchg_u64(a64u_xchg, new);
if (old & 0x1)
- odp_atomic_xchg_u64(&a64u_xchg, 0);
+ odp_atomic_xchg_u64(a64u_xchg, 0);
else
- odp_atomic_xchg_u64(&a64u_xchg, 1);
+ odp_atomic_xchg_u64(a64u_xchg, 1);
}
- odp_atomic_sub_u64(&a64u, CNT);
- odp_atomic_xchg_u64(&a64u_xchg, U64_MAGIC);
+ odp_atomic_sub_u64(a64u, CNT);
+ odp_atomic_xchg_u64(a64u_xchg, U64_MAGIC);
}
static void test_atomic_non_relaxed_32(void)
{
int i;
uint32_t tmp;
+ odp_atomic_u32_t *a32u = &global_mem->a32u;
+ odp_atomic_u32_t *a32u_min = &global_mem->a32u_min;
+ odp_atomic_u32_t *a32u_max = &global_mem->a32u_max;
+ odp_atomic_u32_t *a32u_xchg = &global_mem->a32u_xchg;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_load_acq_u32(&a32u);
- odp_atomic_store_rel_u32(&a32u, tmp);
+ tmp = odp_atomic_load_acq_u32(a32u);
+ odp_atomic_store_rel_u32(a32u, tmp);
- tmp = odp_atomic_load_acq_u32(&a32u_max);
- odp_atomic_add_rel_u32(&a32u_max, 1);
+ tmp = odp_atomic_load_acq_u32(a32u_max);
+ odp_atomic_add_rel_u32(a32u_max, 1);
- tmp = odp_atomic_load_acq_u32(&a32u_min);
- odp_atomic_sub_rel_u32(&a32u_min, 1);
+ tmp = odp_atomic_load_acq_u32(a32u_min);
+ odp_atomic_sub_rel_u32(a32u_min, 1);
- tmp = odp_atomic_load_u32(&a32u_xchg);
- while (odp_atomic_cas_acq_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ tmp = odp_atomic_load_u32(a32u_xchg);
+ while (odp_atomic_cas_acq_u32(a32u_xchg, &tmp, tmp + 1) == 0)
;
- tmp = odp_atomic_load_u32(&a32u_xchg);
- while (odp_atomic_cas_rel_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ tmp = odp_atomic_load_u32(a32u_xchg);
+ while (odp_atomic_cas_rel_u32(a32u_xchg, &tmp, tmp + 1) == 0)
;
- tmp = odp_atomic_load_u32(&a32u_xchg);
+ tmp = odp_atomic_load_u32(a32u_xchg);
/* finally set value for validation */
- while (odp_atomic_cas_acq_rel_u32(&a32u_xchg, &tmp, U32_MAGIC)
+ while (odp_atomic_cas_acq_rel_u32(a32u_xchg, &tmp, U32_MAGIC)
== 0)
;
}
@@ -393,28 +402,32 @@ static void test_atomic_non_relaxed_64(void)
{
int i;
uint64_t tmp;
+ odp_atomic_u64_t *a64u = &global_mem->a64u;
+ odp_atomic_u64_t *a64u_min = &global_mem->a64u_min;
+ odp_atomic_u64_t *a64u_max = &global_mem->a64u_max;
+ odp_atomic_u64_t *a64u_xchg = &global_mem->a64u_xchg;
for (i = 0; i < CNT; i++) {
- tmp = odp_atomic_load_acq_u64(&a64u);
- odp_atomic_store_rel_u64(&a64u, tmp);
+ tmp = odp_atomic_load_acq_u64(a64u);
+ odp_atomic_store_rel_u64(a64u, tmp);
- tmp = odp_atomic_load_acq_u64(&a64u_max);
- odp_atomic_add_rel_u64(&a64u_max, 1);
+ tmp = odp_atomic_load_acq_u64(a64u_max);
+ odp_atomic_add_rel_u64(a64u_max, 1);
- tmp = odp_atomic_load_acq_u64(&a64u_min);
- odp_atomic_sub_rel_u64(&a64u_min, 1);
+ tmp = odp_atomic_load_acq_u64(a64u_min);
+ odp_atomic_sub_rel_u64(a64u_min, 1);
- tmp = odp_atomic_load_u64(&a64u_xchg);
- while (odp_atomic_cas_acq_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ tmp = odp_atomic_load_u64(a64u_xchg);
+ while (odp_atomic_cas_acq_u64(a64u_xchg, &tmp, tmp + 1) == 0)
;
- tmp = odp_atomic_load_u64(&a64u_xchg);
- while (odp_atomic_cas_rel_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ tmp = odp_atomic_load_u64(a64u_xchg);
+ while (odp_atomic_cas_rel_u64(a64u_xchg, &tmp, tmp + 1) == 0)
;
- tmp = odp_atomic_load_u64(&a64u_xchg);
+ tmp = odp_atomic_load_u64(a64u_xchg);
/* finally set value for validation */
- while (odp_atomic_cas_acq_rel_u64(&a64u_xchg, &tmp, U64_MAGIC)
+ while (odp_atomic_cas_acq_rel_u64(a64u_xchg, &tmp, U64_MAGIC)
== 0)
;
}
@@ -494,44 +507,46 @@ static void test_atomic_cas_inc_dec_64(void)
static void test_atomic_init(void)
{
- odp_atomic_init_u32(&a32u, 0);
- odp_atomic_init_u64(&a64u, 0);
- odp_atomic_init_u32(&a32u_min, 0);
- odp_atomic_init_u32(&a32u_max, 0);
- odp_atomic_init_u64(&a64u_min, 0);
- odp_atomic_init_u64(&a64u_max, 0);
- odp_atomic_init_u32(&a32u_xchg, 0);
- odp_atomic_init_u64(&a64u_xchg, 0);
+ odp_atomic_init_u32(&global_mem->a32u, 0);
+ odp_atomic_init_u64(&global_mem->a64u, 0);
+ odp_atomic_init_u32(&global_mem->a32u_min, 0);
+ odp_atomic_init_u32(&global_mem->a32u_max, 0);
+ odp_atomic_init_u64(&global_mem->a64u_min, 0);
+ odp_atomic_init_u64(&global_mem->a64u_max, 0);
+ odp_atomic_init_u32(&global_mem->a32u_xchg, 0);
+ odp_atomic_init_u64(&global_mem->a64u_xchg, 0);
}
static void test_atomic_store(void)
{
- odp_atomic_store_u32(&a32u, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u, U64_INIT_VAL);
- odp_atomic_store_u32(&a32u_min, U32_INIT_VAL);
- odp_atomic_store_u32(&a32u_max, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u_min, U64_INIT_VAL);
- odp_atomic_store_u64(&a64u_max, U64_INIT_VAL);
- odp_atomic_store_u32(&a32u_xchg, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u_xchg, U64_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u, U64_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_min, U32_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_max, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_min, U64_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_max, U64_INIT_VAL);
+ odp_atomic_store_u32(&global_mem->a32u_xchg, U32_INIT_VAL);
+ odp_atomic_store_u64(&global_mem->a64u_xchg, U64_INIT_VAL);
}
static void test_atomic_validate(int check)
{
- CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
- CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&global_mem->a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&global_mem->a64u));
if (check & CHECK_MAX_MIN) {
- CU_ASSERT(odp_atomic_load_u32(&a32u_max) >
- odp_atomic_load_u32(&a32u_min));
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_max) >
+ odp_atomic_load_u32(&global_mem->a32u_min));
- CU_ASSERT(odp_atomic_load_u64(&a64u_max) >
- odp_atomic_load_u64(&a64u_min));
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_max) >
+ odp_atomic_load_u64(&global_mem->a64u_min));
}
if (check & CHECK_XCHG) {
- CU_ASSERT(odp_atomic_load_u32(&a32u_xchg) == U32_MAGIC);
- CU_ASSERT(odp_atomic_load_u64(&a64u_xchg) == U64_MAGIC);
+ CU_ASSERT(odp_atomic_load_u32(&global_mem->a32u_xchg) ==
+ U32_MAGIC);
+ CU_ASSERT(odp_atomic_load_u64(&global_mem->a64u_xchg) ==
+ U64_MAGIC);
}
}
@@ -540,8 +555,18 @@ static int atomic_init(odp_instance_t *inst)
uint32_t workers_count, max_threads;
int ret = 0;
odp_cpumask_t mask;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index a3be4be46..806f2486a 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -8,6 +8,7 @@
#include <malloc.h>
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
@@ -329,8 +330,18 @@ static int barrier_init(odp_instance_t *inst)
uint32_t workers_count, max_threads;
int ret = 0;
odp_cpumask_t mask;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index 7b54ef157..9b5d8ec0e 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -201,7 +201,7 @@ odp_queue_t queue_create(const char *queuename, bool sched)
if (sched) {
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index c4efd6091..55a86f5bc 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -184,14 +184,14 @@ static void classification_test_pktin_classifier_flag(void)
odp_pktio_close(pktio);
}
-static void classification_test_pmr_term_tcp_dport(void)
+static void _classification_test_pmr_term_tcp_dport(int num_pkt)
{
odp_packet_t pkt;
odph_tcphdr_t *tcp;
- uint32_t seqno;
+ uint32_t seqno[num_pkt];
uint16_t val;
uint16_t mask;
- int retval;
+ int retval, i, sent_queue, recv_queue, sent_default, recv_default;
odp_pktio_t pktio;
odp_queue_t queue;
odp_queue_t retqueue;
@@ -209,7 +209,6 @@ static void classification_test_pmr_term_tcp_dport(void)
odph_ethhdr_t *eth;
val = CLS_DEFAULT_DPORT;
mask = 0xffff;
- seqno = 0;
pktio = create_pktio(ODP_QUEUE_TYPE_SCHED, pkt_pool, true);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
@@ -243,50 +242,108 @@ static void classification_test_pmr_term_tcp_dport(void)
pmr = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos);
CU_ASSERT(pmr != ODP_PMR_INVALID);
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ for (i = 0; i < num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno[i] = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno[i] != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
- enqueue_pktio_interface(pkt, pktio);
+ enqueue_pktio_interface(pkt, pktio);
+ }
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- pool_recv = odp_packet_pool(pkt);
- CU_ASSERT(pool == pool_recv);
- CU_ASSERT(retqueue == queue);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ for (i = 0; i < num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ pool_recv = odp_packet_pool(pkt);
+ CU_ASSERT(pool == pool_recv);
+ CU_ASSERT(retqueue == queue);
+ CU_ASSERT(seqno[i] == cls_pkt_get_seq(pkt));
- odp_packet_free(pkt);
+ odp_packet_free(pkt);
+ }
/* Other packets are delivered to default queue */
- pkt = create_packet(default_pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
- odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ for (i = 0; i < num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno[i] = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno[i] != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+
+ enqueue_pktio_interface(pkt, pktio);
+ }
- tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+ for (i = 0; i < num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno[i] == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
- enqueue_pktio_interface(pkt, pktio);
+ odp_packet_free(pkt);
+ }
- pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- CU_ASSERT(retqueue == default_queue);
- recvpool = odp_packet_pool(pkt);
- CU_ASSERT(recvpool == default_pool);
+ sent_queue = 0;
+ sent_default = 0;
+
+ /* Both queues simultaneously */
+ for (i = 0; i < 2 * num_pkt; i++) {
+ pkt = create_packet(default_pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+
+ if ((i % 5) < 2) {
+ sent_queue++;
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
+ } else {
+ sent_default++;
+ tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
+ }
+
+ enqueue_pktio_interface(pkt, pktio);
+ }
+
+ recv_queue = 0;
+ recv_default = 0;
+
+ for (i = 0; i < 2 * num_pkt; i++) {
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(retqueue == queue || retqueue == default_queue);
+
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+
+ if (retqueue == queue) {
+ recv_queue++;
+ CU_ASSERT(tcp->dst_port ==
+ odp_cpu_to_be_16(CLS_DEFAULT_DPORT));
+ } else if (retqueue == default_queue) {
+ recv_default++;
+ CU_ASSERT(tcp->dst_port ==
+ odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1));
+ }
+ odp_packet_free(pkt);
+ }
+
+ CU_ASSERT(sent_queue == recv_queue);
+ CU_ASSERT(sent_default == recv_default);
- odp_packet_free(pkt);
odp_cos_destroy(cos);
odp_cos_destroy(default_cos);
odp_cls_pmr_destroy(pmr);
@@ -1941,6 +1998,16 @@ static void classification_test_pmr_term_ipv6saddr(void)
odp_pktio_close(pktio);
}
+static void classification_test_pmr_term_tcp_dport(void)
+{
+ _classification_test_pmr_term_tcp_dport(2);
+}
+
+static void classification_test_pmr_term_tcp_dport_multi(void)
+{
+ _classification_test_pmr_term_tcp_dport(SHM_PKT_NUM_BUFS / 4);
+}
+
odp_testinfo_t classification_suite_pmr[] = {
ODP_TEST_INFO(classification_test_pmr_term_tcp_dport),
ODP_TEST_INFO(classification_test_pmr_term_tcp_sport),
@@ -1959,5 +2026,6 @@ odp_testinfo_t classification_suite_pmr[] = {
ODP_TEST_INFO(classification_test_pmr_term_eth_type_0),
ODP_TEST_INFO(classification_test_pmr_term_eth_type_x),
ODP_TEST_INFO(classification_test_pktin_classifier_flag),
+ ODP_TEST_INFO(classification_test_pmr_term_tcp_dport_multi),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index 3b9e02761..4f7221403 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -18,6 +18,7 @@ static odp_pool_t pool_list[CLS_ENTRIES];
static odp_pool_t pool_default;
static odp_pktio_t pktio_loop;
static odp_cls_testcase_u tc;
+static int global_num_l2_qos;
#define NUM_COS_PMR_CHAIN 2
#define NUM_COS_DEFAULT 1
@@ -151,16 +152,16 @@ void configure_cls_pmr_chain(void)
uint32_t addr;
uint32_t mask;
odp_pmr_param_t pmr_param;
- odp_queue_capability_t queue_capa;
+ odp_schedule_capability_t schedule_capa;
- CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+ CU_ASSERT_FATAL(odp_schedule_capability(&schedule_capa) == 0);
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- qparam.sched.lock_count = queue_capa.max_ordered_locks;
+ qparam.sched.lock_count = schedule_capa.max_ordered_locks;
sprintf(queuename, "%s", "SrcQueue");
queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename, &qparam);
@@ -182,7 +183,7 @@ void configure_cls_pmr_chain(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DstQueue");
@@ -292,7 +293,7 @@ void configure_pktio_default_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qparam.sched.prio = odp_schedule_default_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DefaultQueue");
@@ -355,7 +356,7 @@ void configure_pktio_error_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_LOWEST;
+ qparam.sched.prio = odp_schedule_min_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "ErrorCos");
@@ -457,12 +458,17 @@ void configure_cos_with_l2_priority(void)
for (i = 0; i < CLS_L2_QOS_MAX; i++)
qos_tbl[i] = 0;
+ if (odp_schedule_num_prio() < num_qos)
+ num_qos = odp_schedule_num_prio();
+
+ global_num_l2_qos = num_qos;
+
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < num_qos; i++) {
- qparam.sched.prio = ODP_SCHED_PRIO_LOWEST - i;
+ qparam.sched.prio = odp_schedule_min_prio() + i;
sprintf(queuename, "%s_%d", "L2_Queue", i);
queue_tbl[i] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
@@ -506,7 +512,7 @@ void test_cos_with_l2_priority(void)
pkt_info.udp = true;
pkt_info.vlan = true;
- for (i = 0; i < CLS_L2_QOS_MAX; i++) {
+ for (i = 0; i < global_num_l2_qos; i++) {
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -538,7 +544,7 @@ void configure_pmr_cos(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "PMR_CoS");
@@ -613,7 +619,7 @@ void configure_pktio_pmr_composite(void)
odp_queue_param_init(&qparam);
qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
+ qparam.sched.prio = odp_schedule_max_prio();
qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "cos_pmr_composite_queue");
diff --git a/test/validation/api/comp/.gitignore b/test/validation/api/comp/.gitignore
new file mode 100644
index 000000000..97aea05ab
--- /dev/null
+++ b/test/validation/api/comp/.gitignore
@@ -0,0 +1 @@
+comp_main
diff --git a/test/validation/api/comp/Makefile.am b/test/validation/api/comp/Makefile.am
new file mode 100644
index 000000000..2e5d3a26b
--- /dev/null
+++ b/test/validation/api/comp/Makefile.am
@@ -0,0 +1,7 @@
+include ../Makefile.inc
+
+test_PROGRAMS = comp_main
+
+comp_main_SOURCES = \
+ comp.c \
+ test_vectors.h
diff --git a/test/validation/api/comp/comp.c b/test/validation/api/comp/comp.c
new file mode 100644
index 000000000..e84764f71
--- /dev/null
+++ b/test/validation/api/comp/comp.c
@@ -0,0 +1,591 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "config.h"
+
+#include <odp_api.h>
+#include <odp_cunit_common.h>
+#include "test_vectors.h"
+
+#define TEST_NUM_PKT 64
+#define TEST_PKT_LEN (8 * 1024)
+
+#define SEGMENTED_TEST_PKT_LEN (16 * 1024)
+#define SEGMENTED_TEST_PATTERN 0xAA
+
+#define COMP_PACKET_POOL "packet_pool"
+#define COMP_OUT_QUEUE "comp-out"
+
+struct suite_context_s {
+ odp_comp_op_mode_t op_mode;
+ odp_pool_t pool;
+ odp_queue_t queue;
+};
+
+static struct suite_context_s suite_context;
+
+/**
+ * Check if given compression and hash algorithms are supported
+ *
+ * @param comp Compression algorithm
+ * @param hash Hash algorithm
+ *
+ * @retval ODP_TEST_ACTIVE when both algorithms are supported
+ * @retval ODP_TEST_INACTIVE when either algorithm is not supported
+ */
+static int check_comp_alg_support(odp_comp_alg_t comp,
+ odp_comp_hash_alg_t hash)
+{
+ odp_comp_capability_t capability;
+
+ if (odp_comp_capability(&capability))
+ return ODP_TEST_INACTIVE;
+
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_SYNC &&
+ capability.sync == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_ASYNC &&
+ capability.async == ODP_SUPPORT_NO)
+ return ODP_TEST_INACTIVE;
+
+ /* Compression algorithms */
+ switch (comp) {
+ case ODP_COMP_ALG_NULL:
+ if (!capability.comp_algos.bit.null)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_DEFLATE:
+ if (!capability.comp_algos.bit.deflate)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_ZLIB:
+ if (!capability.comp_algos.bit.zlib)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_ALG_LZS:
+ if (!capability.comp_algos.bit.lzs)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ fprintf(stderr, "Unsupported compression algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ /* Hash algorithms */
+ switch (hash) {
+ case ODP_COMP_HASH_ALG_NONE:
+ if (!capability.hash_algos.bit.none)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_HASH_ALG_SHA1:
+ if (!capability.hash_algos.bit.sha1)
+ return ODP_TEST_INACTIVE;
+ break;
+ case ODP_COMP_HASH_ALG_SHA256:
+ if (!capability.hash_algos.bit.sha256)
+ return ODP_TEST_INACTIVE;
+ break;
+ default:
+ fprintf(stderr, "Unsupported hash algorithm\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static odp_packet_t run_comp_op(odp_comp_op_t op,
+ odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ odp_packet_t inpkt,
+ unsigned int outtext_len)
+{
+ odp_comp_session_t session;
+ odp_comp_capability_t capa;
+ odp_comp_alg_capability_t comp_capa;
+ odp_comp_hash_alg_capability_t hash_capa;
+ odp_comp_session_param_t ses_params;
+ odp_comp_packet_op_param_t op_params;
+ odp_packet_t outpkt;
+ odp_comp_packet_result_t comp_result;
+ int rc;
+
+ rc = odp_comp_capability(&capa);
+ CU_ASSERT_FATAL(!rc);
+
+ if (comp_alg == ODP_COMP_ALG_NULL &&
+ !(capa.comp_algos.bit.null))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_DEFLATE &&
+ !(capa.comp_algos.bit.deflate))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_ZLIB &&
+ !(capa.comp_algos.bit.zlib))
+ rc = -1;
+ if (comp_alg == ODP_COMP_ALG_LZS &&
+ !(capa.comp_algos.bit.lzs))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ if (hash_alg == ODP_COMP_HASH_ALG_NONE &&
+ !(capa.hash_algos.bit.none))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA1 &&
+ !(capa.hash_algos.bit.sha1))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA256 &&
+ !(capa.hash_algos.bit.sha256))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ rc = odp_comp_alg_capability(comp_alg, &comp_capa);
+ CU_ASSERT(!rc);
+
+ rc = odp_comp_hash_alg_capability(hash_alg, &hash_capa);
+ CU_ASSERT(!rc);
+
+ if (hash_alg == ODP_COMP_HASH_ALG_NONE &&
+ !(comp_capa.hash_algo.bit.none))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA1 &&
+ !(comp_capa.hash_algo.bit.sha1))
+ rc = -1;
+ if (hash_alg == ODP_COMP_HASH_ALG_SHA256 &&
+ !(comp_capa.hash_algo.bit.sha256))
+ rc = -1;
+
+ CU_ASSERT(!rc);
+
+ /* Create a compression session */
+ odp_comp_session_param_init(&ses_params);
+ ses_params.op = op;
+ ses_params.comp_algo = comp_alg;
+ ses_params.hash_algo = hash_alg;
+ ses_params.compl_queue = suite_context.queue;
+ ses_params.mode = suite_context.op_mode;
+
+ session = odp_comp_session_create(&ses_params);
+ CU_ASSERT_FATAL(session != ODP_COMP_SESSION_INVALID);
+ CU_ASSERT(odp_comp_session_to_u64(session) !=
+ odp_comp_session_to_u64(ODP_COMP_SESSION_INVALID));
+
+ /* Allocate compression output packet */
+ outpkt = odp_packet_alloc(suite_context.pool, outtext_len);
+ CU_ASSERT(outpkt != ODP_PACKET_INVALID);
+
+ op_params.out_data_range.offset = 0;
+ op_params.out_data_range.length = outtext_len;
+ op_params.in_data_range.offset = 0;
+ op_params.in_data_range.length = odp_packet_len(inpkt);
+ op_params.session = session;
+
+ if (suite_context.op_mode == ODP_COMP_OP_MODE_SYNC) {
+ rc = odp_comp_op(&inpkt, &outpkt, 1, &op_params);
+ CU_ASSERT(rc >= 0);
+ if (rc < 0)
+ goto cleanup;
+ } else {
+ odp_event_t event;
+ odp_packet_t packet;
+
+ rc = odp_comp_op_enq(&inpkt, &outpkt, 1, &op_params);
+ CU_ASSERT(rc == 1);
+ if (rc <= 0)
+ goto cleanup;
+ /* Poll completion queue for results */
+ do {
+ event = odp_queue_deq(suite_context.queue);
+ } while (event == ODP_EVENT_INVALID);
+ CU_ASSERT(ODP_EVENT_PACKET == odp_event_type(event));
+ CU_ASSERT(ODP_EVENT_PACKET_COMP ==
+ odp_event_subtype(event));
+
+ packet = odp_comp_packet_from_event(event);
+ CU_ASSERT(packet != ODP_PACKET_INVALID);
+ CU_ASSERT(packet == outpkt);
+ }
+
+ rc = odp_comp_result(&comp_result, outpkt);
+ CU_ASSERT(!rc);
+ CU_ASSERT(comp_result.status == ODP_COMP_STATUS_SUCCESS);
+ CU_ASSERT(comp_result.output_data_range.offset == 0);
+ odp_packet_trunc_tail(&outpkt,
+ odp_packet_len(outpkt) -
+ comp_result.output_data_range.length,
+ NULL, NULL);
+
+cleanup:
+
+ rc = odp_comp_session_destroy(session);
+ CU_ASSERT(!rc);
+
+ if (rc < 0) {
+ odp_packet_free(outpkt);
+ return ODP_PACKET_INVALID;
+ }
+
+ return outpkt;
+}
+
+static void packet_cmp(odp_packet_t pkt,
+ const uint8_t *text,
+ unsigned int text_len)
+{
+ odp_packet_seg_t seg;
+ uint32_t cmp_offset = 0, outlen = 0;
+ uint32_t compare_len = 0;
+ uint8_t *outdata;
+
+ seg = odp_packet_first_seg(pkt);
+ do {
+ outdata = odp_packet_seg_data(pkt, seg);
+ outlen = odp_packet_seg_data_len(pkt, seg);
+ compare_len = outlen < (text_len - cmp_offset) ?
+ outlen : (text_len - cmp_offset);
+
+ CU_ASSERT(!memcmp(outdata,
+ text + cmp_offset, compare_len));
+ cmp_offset += compare_len;
+ seg = odp_packet_next_seg(pkt, seg);
+ } while (seg != ODP_PACKET_SEG_INVALID && cmp_offset < text_len);
+}
+
+static void comp_decomp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t decomp_outpkt, comp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, plaintext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ plaintext_len, plaintext);
+ CU_ASSERT_FATAL(!rc);
+
+ comp_outpkt = run_comp_op(ODP_COMP_OP_COMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (comp_outpkt == ODP_PACKET_INVALID)
+ goto clean_in;
+
+ decomp_outpkt = run_comp_op(ODP_COMP_OP_DECOMPRESS,
+ comp_alg, hash_alg,
+ comp_outpkt,
+ plaintext_len);
+ if (decomp_outpkt == ODP_PACKET_INVALID)
+ goto cleanup;
+
+ packet_cmp(decomp_outpkt, plaintext, plaintext_len);
+
+ odp_packet_free(decomp_outpkt);
+
+cleanup:
+ odp_packet_free(comp_outpkt);
+clean_in:
+ odp_packet_free(inpkt);
+}
+
+static void comp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t comp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, plaintext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ plaintext_len, plaintext);
+ CU_ASSERT_FATAL(!rc);
+
+ comp_outpkt = run_comp_op(ODP_COMP_OP_COMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (comp_outpkt == ODP_PACKET_INVALID)
+ goto clean_in;
+
+ odp_packet_free(comp_outpkt);
+clean_in:
+ odp_packet_free(inpkt);
+}
+
+static void decomp_alg_test(odp_comp_alg_t comp_alg,
+ odp_comp_hash_alg_t hash_alg,
+ const uint8_t *comptext,
+ unsigned int comptext_len,
+ const uint8_t *plaintext,
+ unsigned int plaintext_len)
+{
+ odp_packet_t decomp_outpkt, inpkt;
+ int rc;
+
+ /* Allocate compression input packet */
+ inpkt = odp_packet_alloc(suite_context.pool, comptext_len);
+ CU_ASSERT(inpkt != ODP_PACKET_INVALID);
+
+ /* copy test data in to pkt memory */
+ rc = odp_packet_copy_from_mem(inpkt, 0,
+ comptext_len, comptext);
+ CU_ASSERT_FATAL(!rc);
+
+ decomp_outpkt = run_comp_op(ODP_COMP_OP_DECOMPRESS,
+ comp_alg, hash_alg,
+ inpkt,
+ plaintext_len);
+ if (decomp_outpkt == ODP_PACKET_INVALID)
+ goto cleanup;
+
+ packet_cmp(decomp_outpkt, plaintext, plaintext_len);
+
+ odp_packet_free(decomp_outpkt);
+cleanup:
+ odp_packet_free(inpkt);
+}
+
+static int comp_check_deflate_none(void)
+{
+ return check_comp_alg_support(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE);
+}
+
+/* Compress content using deflate algorithm */
+static void comp_test_compress_alg_deflate_none(void)
+{
+ comp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Decompress content using deflate algorithm */
+static void comp_test_decompress_alg_deflate_none(void)
+{
+ decomp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ compressed_text_def, COMP_DEFLATE_SIZE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+static int comp_check_zlib_none(void)
+{
+ return check_comp_alg_support(ODP_COMP_ALG_ZLIB,
+ ODP_COMP_HASH_ALG_NONE);
+}
+
+/* Compress content using zlib algorithm */
+static void comp_test_compress_alg_zlib_none(void)
+{
+ comp_alg_test(ODP_COMP_ALG_ZLIB, ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Decompress content using zlib algorithm */
+static void comp_test_decompress_alg_zlib_none(void)
+{
+ decomp_alg_test(ODP_COMP_ALG_ZLIB, ODP_COMP_HASH_ALG_NONE,
+ compressed_text_zlib, COMP_ZLIB_SIZE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Compress/Decompress content using deflate algorithm */
+static void comp_test_comp_decomp_alg_deflate_none(void)
+{
+ comp_decomp_alg_test(ODP_COMP_ALG_DEFLATE,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+/* Compress/Decompress content using zlib algorithm */
+static void comp_test_comp_decomp_alg_zlib_none(void)
+{
+ comp_decomp_alg_test(ODP_COMP_ALG_ZLIB,
+ ODP_COMP_HASH_ALG_NONE,
+ plaintext, PLAIN_TEXT_SIZE);
+}
+
+static int comp_suite_sync_init(void)
+{
+ suite_context.pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+
+ suite_context.queue = ODP_QUEUE_INVALID;
+ suite_context.op_mode = ODP_COMP_OP_MODE_SYNC;
+ return 0;
+}
+
+static int comp_suite_async_init(void)
+{
+ suite_context.pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (suite_context.pool == ODP_POOL_INVALID)
+ return -1;
+ suite_context.queue = odp_queue_lookup(COMP_OUT_QUEUE);
+ if (suite_context.queue == ODP_QUEUE_INVALID)
+ return -1;
+
+ suite_context.op_mode = ODP_COMP_OP_MODE_ASYNC;
+ return 0;
+}
+
+static odp_testinfo_t comp_suite[] = {
+ ODP_TEST_INFO_CONDITIONAL(comp_test_compress_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_compress_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_decompress_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_decompress_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_comp_decomp_alg_deflate_none,
+ comp_check_deflate_none),
+ ODP_TEST_INFO_CONDITIONAL(comp_test_comp_decomp_alg_zlib_none,
+ comp_check_zlib_none),
+ ODP_TEST_INFO_NULL,
+};
+
+static int comp_suite_term(void)
+{
+ int i;
+ int first = 1;
+
+ for (i = 0; comp_suite[i].name; i++) {
+ if (comp_suite[i].check_active &&
+ comp_suite[i].check_active() == ODP_TEST_INACTIVE) {
+ if (first) {
+ first = 0;
+ printf("\n\n Inactive tests:\n");
+ }
+ printf(" %s\n", comp_suite[i].name);
+ }
+ }
+ return 0;
+}
+
+/* Suite names */
+#define ODP_COMP_SYNC_TEST "Comp/decomp sync test"
+#define ODP_COMP_ASYNC_TEST "Comp/decomp async test"
+
+static odp_suiteinfo_t comp_suites[] = {
+ {ODP_COMP_SYNC_TEST, comp_suite_sync_init,
+ comp_suite_term, comp_suite},
+ {ODP_COMP_ASYNC_TEST, comp_suite_async_init,
+ comp_suite_term, comp_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+static int comp_init(odp_instance_t *inst)
+{
+ odp_pool_param_t params;
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+ odp_pool_capability_t pool_capa;
+
+ if (0 != odp_init_global(inst, NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ fprintf(stderr, "error: odp_pool_capability() failed.\n");
+ return -1;
+ }
+
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = TEST_PKT_LEN;
+ params.pkt.len = TEST_PKT_LEN;
+ params.pkt.num = TEST_NUM_PKT;
+ params.type = ODP_POOL_PACKET;
+
+ if (pool_capa.pkt.max_seg_len &&
+ TEST_PKT_LEN > pool_capa.pkt.max_seg_len) {
+ fprintf(stderr, "Warning: small packet segment length\n");
+ params.pkt.seg_len = pool_capa.pkt.max_seg_len;
+ }
+
+ pool = odp_pool_create(COMP_PACKET_POOL, &params);
+ if (ODP_POOL_INVALID == pool) {
+ fprintf(stderr, "Packet pool creation failed.\n");
+ return -1;
+ }
+
+ /* Queue to store compression/decompression events */
+ out_queue = odp_queue_create(COMP_OUT_QUEUE, NULL);
+ if (ODP_QUEUE_INVALID == out_queue) {
+ fprintf(stderr, "Comp outq creation failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int comp_term(odp_instance_t inst)
+{
+ odp_pool_t pool;
+ odp_queue_t out_queue;
+
+ out_queue = odp_queue_lookup(COMP_OUT_QUEUE);
+ if (ODP_QUEUE_INVALID != out_queue) {
+ if (odp_queue_destroy(out_queue))
+ fprintf(stderr, "Comp outq destroy failed.\n");
+ } else {
+ fprintf(stderr, "Comp outq not found.\n");
+ }
+
+ pool = odp_pool_lookup(COMP_PACKET_POOL);
+ if (ODP_POOL_INVALID != pool) {
+ if (odp_pool_destroy(pool))
+ fprintf(stderr, "Packet pool destroy failed.\n");
+ } else {
+ fprintf(stderr, "Packet pool not found.\n");
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[])
+{
+ int ret;
+
+ /* parse common options: */
+ if (odp_cunit_parse_options(argc, argv))
+ return -1;
+
+ odp_cunit_register_global_init(comp_init);
+ odp_cunit_register_global_term(comp_term);
+
+ ret = odp_cunit_register(comp_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/api/comp/test_vectors.h b/test/validation/api/comp/test_vectors.h
new file mode 100644
index 000000000..36d98b30d
--- /dev/null
+++ b/test/validation/api/comp/test_vectors.h
@@ -0,0 +1,1997 @@
+/* Copyright (c) 2018, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_COMP_VECTORS_H_
+#define _ODP_TEST_COMP_VECTORS_H_
+
+#define PLAIN_TEXT_SIZE 8192
+
+static uint8_t plaintext[PLAIN_TEXT_SIZE] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x45, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x98, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0xdc, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x94, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x1f, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x58, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x41, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xa9, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x08, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x23, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe7, 0xcd, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x0f, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xf9, 0x9c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xc2, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0xc4, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0xfb, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x50, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xba, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xe4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x30, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0xd9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x61, 0x6c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0x89,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb1, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0xa8, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x84, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0xbd, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xd0, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x76, 0x9a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x24, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xc4, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0xf8, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0xf5, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x8d, 0x7b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x1a, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xdd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xc8, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0xc2, 0x9a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xad, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x82, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xc6, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0xb9, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0xd3, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xd7, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x58, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x42, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xd4, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x9a, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x8d, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x89, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x7f, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xf9, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x78, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x40, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xde, 0xa1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x85, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xed, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xf0, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xb7, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x65, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x15, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x8c, 0x39,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xaf, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xb6, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xb6, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb1, 0x57, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0xe4, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x7e, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x0b, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x84, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x82, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x8f, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x4a, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x31, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x47, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbd, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x12, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x3f, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x47, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xc5, 0xd9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2b, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x7b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x3e, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0xb1, 0xf2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0xd3, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x81, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xee, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0xca, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x11, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0xe0, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xd9, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x5e, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xa8, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x7e, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0e, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0xc5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0xd6, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0xc3, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x4f, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x84, 0xf1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x24, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0x30, 0xda,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x37, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x1e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xac, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x8f, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x5a, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x82, 0xa1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x55, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0xe7, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x12, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x99, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0xca, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xea, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x5d, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x1b, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xc5, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x4b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x28, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x6a, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0xfe, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x91, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x6a, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x8d, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x21, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x17, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0xb7, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0xff, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x50,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0x12, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0xac, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0xe3, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x0a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x6b, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x8d, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x4a, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x2e, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x8a, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xaf, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x85, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0xc6, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x06, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xf6, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf1, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x47, 0x3e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xfe, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0xb9, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x57, 0x4c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xb6, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x9d, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0xf3, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0xf8, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x9d, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x4e, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x4d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x9e, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xd4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0x67, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xe7, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x4c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x8c, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0x8c, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x2e, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x2e, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x36, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x85, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x48, 0x59,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x2f, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x5e, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xab, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0xf6, 0x47,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x80, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xae, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0xcc, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x46, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x39, 0x49,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x14, 0x2c, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x5d, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x4f, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xdf, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x81, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xf8, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x63, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0xd9, 0x6a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xac, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x6e, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xcf, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0x00, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x57, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0x79, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x25, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xb3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x71, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x1b, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb3, 0xab, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x84, 0x63, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x41, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0xe4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0xb2, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x95, 0x42,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x3e, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0xd8, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xea,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xf4, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x8d, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x36, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x42, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf5, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0x5b, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x2c, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x6f, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xd2, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x8a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd0, 0x6c, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xf5, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x09, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xb4, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x71, 0x2a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0xd3, 0xf2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xaf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xb9, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x9a, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0xf2, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x72, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0xb7, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xb6, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x29, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x44, 0x70, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x94, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x80, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x6a, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x59, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0xe5, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x51, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xad, 0xbd, 0xf4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x97, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xac, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x08,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xbd, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0xb8, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x2b, 0x21,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x25, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf1, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x7b, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x46, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbb, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x4c, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x45, 0x17,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdf, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xda,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0b, 0x9c, 0x17,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x60,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x41, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x4d, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x27,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x66, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x17, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x7d, 0xac,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0x07, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x43, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb2, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, 0x12, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x8d, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x34, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x6f, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xc0, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0xd0, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0x77, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0xe1, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x10, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x1a, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x83, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x4a, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x69, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x61,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x7f, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x25, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x23, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x45, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xd7, 0xbb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9d, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0xac, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xe6, 0x38,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x38, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0xd6, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xae, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xaa, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x82, 0xb9, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x62, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xb2, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x28, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xfd, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0x5e, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7a, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x7a, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x83,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb4, 0x02, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xec, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x72, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x37, 0xa1, 0xd2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x29, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x3a, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0x2f, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x6b, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x60, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x6e, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xef, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x3f, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x18,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x1d, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd3, 0xaa, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xb9, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x00, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xc3, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x04, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xb2, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x54, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe4, 0xdb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0x2d, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0x3e, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x91, 0xa1, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x3c, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0xce, 0xab,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xaa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd9, 0xc4, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0xbe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xcb, 0x22,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0xd6, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xe8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x41, 0x15,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x81,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0x65, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xa6, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0xb6, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0x53, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0xba,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x9f, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0x60, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xf4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x65, 0xe1, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x84, 0x7c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x1f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xec, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xc9, 0xc7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0x27, 0xa3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0xd2, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x56, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0xf0, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x86, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xad, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x2c, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x51, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0xbd, 0x88,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x9d, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xaa, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x51, 0x29,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x8d, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xd5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xab, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x79,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8c, 0xfb, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0xa1, 0x6d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x0d, 0xcb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x10, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x11, 0xec,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x72, 0x90, 0x20,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xec, 0x32, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x2b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x91, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x42, 0xfd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x4f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0x7b, 0x8f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x99, 0xac, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xde, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0xb2, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0xd3, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xe5, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xd4, 0x1d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xf2, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xfb, 0x9e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbe, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x45, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbf, 0x0c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x03, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0xa4, 0x3d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x14, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x95, 0x71,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, 0xfe, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7d, 0xf5, 0x9d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x7a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, 0xb7, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xa2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0xa0, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xa0, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xaf, 0x01, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xb1, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x43, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc9, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x6d, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x36,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x67, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x38, 0x51,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x75, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x89, 0x3c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x83, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x1a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x73, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x97,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x43, 0x40,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x3d, 0x68,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0xe0, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8b, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0xbc, 0x2b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x98,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa1, 0xd4, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x2e, 0x23,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xaf, 0xd3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x92, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xe8, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0x61,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x17, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x35, 0x2c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0xe7, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x62, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0xed, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x9b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xfb, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xd7, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf7, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x7c, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x16, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xef, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x53, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0x01, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x85, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdd, 0xbd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xcd, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0x1a, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x88, 0xfc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0xcf, 0x1b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x2d, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x2b, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0xe4, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xc9, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x46,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x62, 0xe5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xb6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xac, 0xe1, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x5d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0x19, 0xbf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x68, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0xdc, 0x06,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x96,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x04, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0xf3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x56, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2b, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x26, 0x46, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x8d, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0xcb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x15, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb2, 0x5a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x26, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3a, 0x57, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xfa, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x84,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0xad, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xab, 0x26,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xc8, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x39, 0x09,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc7, 0xd7, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x9c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9e, 0x48, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xb9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0xfa, 0x0f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x72, 0x32,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0xf8, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0xca, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0xcf, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xdc, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xb7, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xed, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x0e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xf7, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xe7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9b, 0x92, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x87, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0e, 0x6f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x5b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd5, 0x35, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xed, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0xe4, 0x66,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x35,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xc7, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xde,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf2, 0x97, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xeb, 0x39,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xb5, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x53, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0xb1, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa7, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfa, 0xde, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x80,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x8f, 0x54,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x85,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xdc, 0xe5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x42, 0x1c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0x93,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x6d, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0xa2, 0x74,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2a, 0x17, 0xb2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa5, 0x34,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x09, 0x28,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa3, 0x25,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfb, 0x8f, 0xc6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb7, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x6e, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x54, 0x4d, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9a, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xb3, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x14,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x32, 0x31, 0x77,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x23, 0x4b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4e, 0x4e, 0xc4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xce, 0xe6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdb, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0xff,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x66, 0xb7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0xba,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0x7f, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x21, 0x40, 0x45,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6b, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x28, 0x62, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x82,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0xf0, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xcf, 0xb1, 0xe1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x37, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x35, 0x6b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf9, 0xc8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0xc6, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x3f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0xee, 0xb4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd7, 0x56,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x7f, 0x72,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb0, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0xd4, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xaf, 0x4e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4f, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd1, 0xe0, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xe0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0xd2, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x2f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4a, 0xc7, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe9, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0x2e, 0x13,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5a, 0x68, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x31, 0xae,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x24, 0x8e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7b, 0xa8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x9c, 0x0a, 0x10,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xed, 0x2a, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0xfa,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5b, 0x56, 0x69,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0xdc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8d, 0x90,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0x8b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xa5, 0x2d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xcb, 0x67,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x52, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0xaa, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x4a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xf1, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc4, 0x16, 0xcf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x06, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x95, 0xf7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x80, 0x45,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x97, 0xd8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x26, 0x91,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x48,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x86, 0xfb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x55,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xd2, 0x75, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdb, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x61, 0x03, 0xc3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x99,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x56, 0x97, 0x63,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x20, 0xa4,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x43, 0xc2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0x08, 0xe9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x9e, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xca, 0x95,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4d, 0x02, 0xe2,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0xe9, 0xb0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0xcd,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xd9, 0xc5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x94,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe1, 0x17, 0x24,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x09, 0xca,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x33, 0x13, 0x0b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x55, 0x6a, 0x76,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x57, 0x19,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3b, 0x26, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1d, 0xeb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0xf1, 0x65,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x4c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x2f, 0x33, 0xcc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xa2, 0xf6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xa9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0x67, 0x6e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x81, 0xa5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x42, 0x7f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa0, 0xfe,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xb3, 0x8c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x62,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0xc0, 0xed,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x47, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x69, 0xd9, 0x30,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x5f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x52, 0xef,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb6, 0xdb,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x98, 0xa4, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0x3b,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0xa9, 0x5c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x9f, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0xd0, 0xf5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x22, 0xc1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x47, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0xe3,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xc8, 0xef, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfd, 0xb1,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa2, 0xfc, 0xd7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x16,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x9d, 0x75,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x90, 0xee,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf4, 0x62, 0x87,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x49, 0xd0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x9f,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b, 0x79, 0xa0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x48, 0xce,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0xaa, 0x37,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2d, 0xbc,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, 0xcc, 0xdf,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x5e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x25, 0x1f, 0x43,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcb, 0x8d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x5c, 0x1a, 0x3a,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8f, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xed, 0x70,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6d, 0x6c,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x45, 0xd4, 0x86,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x46, 0x92,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x27, 0xa8, 0xc9,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86, 0x7d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x0f, 0x44,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x78,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xa9, 0x3e, 0x64,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x5b, 0xc0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x71, 0x2e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x09, 0x41,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2c, 0x58,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x44, 0x0d,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa4, 0xa6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x46, 0xd6,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6a, 0xa7,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xee, 0x76, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xba, 0xad,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xac, 0x12,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x39, 0xf0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x7c, 0x83,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, 0x57,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x67, 0xac, 0xb8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xea, 0x48
+};
+
+/** Length of the pre-compressed data using deflate algorithm */
+#define COMP_DEFLATE_SIZE 3769
+
+/** Pre-compressed data using deflate algorithm */
+static uint8_t compressed_text_def[COMP_DEFLATE_SIZE] = {
+ 0x35, 0x99, 0x7b, 0x5c, 0xcf, 0x77, 0xfb, 0xc7,
+ 0xdf, 0xe5, 0x18, 0x8b, 0xb0, 0x8a, 0xd8, 0x72,
+ 0x9e, 0xb3, 0x64, 0x0c, 0x91, 0x39, 0xe6, 0x90,
+ 0xd3, 0x44, 0x6e, 0xa7, 0x9b, 0x8c, 0x26, 0x87,
+ 0x85, 0x9b, 0xcd, 0x34, 0xe5, 0x7c, 0x33, 0x8d,
+ 0xed, 0x76, 0x3e, 0xe6, 0x9c, 0x8a, 0x08, 0x35,
+ 0x16, 0x33, 0x67, 0x99, 0x15, 0xe5, 0xd0, 0xb4,
+ 0xe6, 0xd4, 0x9c, 0x8a, 0xe4, 0x14, 0x39, 0xfc,
+ 0x7e, 0x8f, 0xf7, 0xf3, 0xb5, 0xfd, 0xb1, 0xd7,
+ 0x5e, 0xeb, 0xfb, 0xfd, 0x7c, 0xde, 0x87, 0xeb,
+ 0x7a, 0x5d, 0xaf, 0xeb, 0xfa, 0x1a, 0xf3, 0xff,
+ 0xff, 0x04, 0x77, 0xf9, 0xce, 0xd8, 0x7f, 0x4e,
+ 0xd5, 0xb2, 0x30, 0x76, 0x55, 0x5b, 0xf8, 0x94,
+ 0x6e, 0x16, 0xfa, 0x65, 0xed, 0x81, 0xbf, 0x1b,
+ 0x6a, 0xa1, 0xc7, 0xb2, 0xbb, 0xf0, 0xdc, 0x41,
+ 0x16, 0xea, 0x7b, 0x7e, 0x0f, 0x3f, 0x1f, 0x66,
+ 0xe1, 0xd0, 0xa0, 0xaa, 0xf0, 0xd8, 0x2b, 0x16,
+ 0x0a, 0x3a, 0xec, 0x85, 0x17, 0x7d, 0x68, 0xe1,
+ 0xd6, 0xce, 0x9b, 0xf0, 0xae, 0x37, 0x2c, 0x84,
+ 0x99, 0x7f, 0xc3, 0x8f, 0x05, 0x59, 0x08, 0x28,
+ 0x5d, 0x1d, 0x5e, 0xf8, 0x91, 0x85, 0xaa, 0xb5,
+ 0xdc, 0xe0, 0x77, 0xef, 0x59, 0xb8, 0x73, 0x7e,
+ 0x29, 0x3c, 0xb2, 0x93, 0x85, 0x69, 0xe5, 0xcb,
+ 0xc1, 0x07, 0xd7, 0xb1, 0xd0, 0xe4, 0xe5, 0x3a,
+ 0xf8, 0xa8, 0xc9, 0x16, 0x9a, 0x1f, 0xcb, 0x82,
+ 0x6f, 0x64, 0x3d, 0x67, 0x4f, 0xec, 0x80, 0xaf,
+ 0x29, 0x65, 0x61, 0x4c, 0x51, 0x08, 0xdc, 0x7b,
+ 0x98, 0x05, 0xe7, 0xbe, 0x6b, 0xe1, 0x49, 0x7c,
+ 0xbe, 0xd9, 0xa1, 0xc6, 0xf0, 0x41, 0xb7, 0x2d,
+ 0x6c, 0xf1, 0x9a, 0xa4, 0xf7, 0x65, 0x5a, 0xa8,
+ 0x33, 0x72, 0x3c, 0x7c, 0xd8, 0x42, 0x0b, 0x25,
+ 0xf6, 0xb6, 0x81, 0xbb, 0x6f, 0xe1, 0x6b, 0xd1,
+ 0xac, 0xd3, 0xdc, 0x1b, 0x6c, 0x61, 0xf8, 0xdc,
+ 0x51, 0xf0, 0x8c, 0x68, 0x0b, 0xb1, 0x87, 0xf3,
+ 0xe0, 0x09, 0x8b, 0x2d, 0x9c, 0x4f, 0xeb, 0x0f,
+ 0x3f, 0x75, 0xdd, 0xc2, 0xda, 0x69, 0x6b, 0xe0,
+ 0xfb, 0x37, 0x58, 0x08, 0xa8, 0xfd, 0x04, 0x5e,
+ 0x61, 0xbe, 0x85, 0x72, 0x27, 0x5a, 0xc2, 0x67,
+ 0x7f, 0x60, 0xe1, 0x3f, 0x85, 0xcb, 0xe1, 0x1d,
+ 0xf8, 0x7b, 0x8d, 0x67, 0xe1, 0xf0, 0x76, 0x87,
+ 0x2d, 0x64, 0x45, 0xce, 0x80, 0x2f, 0xc8, 0xb7,
+ 0x10, 0xea, 0x51, 0x53, 0xeb, 0xfb, 0xd3, 0x82,
+ 0xef, 0x99, 0x92, 0xf0, 0x4d, 0x19, 0x3c, 0xe6,
+ 0x98, 0xde, 0x7f, 0xa3, 0xd0, 0xc2, 0xeb, 0x5d,
+ 0x65, 0xe1, 0xc1, 0xc4, 0x8b, 0xef, 0xec, 0xe2,
+ 0x70, 0x07, 0xee, 0x71, 0xe6, 0xa9, 0xda, 0xf0,
+ 0x95, 0x0d, 0x2c, 0xdc, 0x3f, 0xa8, 0xfd, 0x66,
+ 0xf5, 0xb0, 0x30, 0x2e, 0xfd, 0x3e, 0x7c, 0xc5,
+ 0x57, 0x16, 0x96, 0x5c, 0xd1, 0xf3, 0x5a, 0x6d,
+ 0xb5, 0x30, 0x74, 0xd0, 0x00, 0x78, 0x83, 0xde,
+ 0x16, 0x72, 0x3b, 0xf2, 0x1e, 0xb3, 0x87, 0xf8,
+ 0xf2, 0xc9, 0xe0, 0xff, 0x9b, 0x22, 0xd6, 0xe9,
+ 0xbd, 0xe6, 0x2a, 0x3c, 0xfe, 0x37, 0x0b, 0x6d,
+ 0x23, 0x27, 0xc0, 0x03, 0x96, 0xf0, 0xfd, 0x85,
+ 0xda, 0x5f, 0x65, 0xe2, 0xf2, 0x5a, 0xb8, 0xee,
+ 0x63, 0x28, 0xef, 0x73, 0x7c, 0xf9, 0x0e, 0xee,
+ 0x41, 0xbc, 0xbf, 0xfd, 0x7a, 0x19, 0xbc, 0xd3,
+ 0xcf, 0xbc, 0xe6, 0xd3, 0x63, 0xf0, 0x57, 0x75,
+ 0x2d, 0xc4, 0x64, 0x6f, 0x82, 0xb7, 0x39, 0xce,
+ 0xdf, 0xe7, 0xfd, 0x0d, 0xaf, 0xbf, 0xcd, 0xc2,
+ 0x85, 0x3c, 0xc5, 0xe3, 0xdf, 0xed, 0x2d, 0x94,
+ 0xc8, 0xd7, 0x7a, 0xdb, 0xfe, 0x6a, 0x21, 0x2c,
+ 0x49, 0xf1, 0xb5, 0xec, 0xb4, 0x85, 0xa9, 0xa3,
+ 0x5b, 0xc1, 0xaf, 0x96, 0xb7, 0x70, 0xc4, 0xd5,
+ 0x01, 0x3e, 0x92, 0xf8, 0x58, 0xb8, 0xb8, 0x35,
+ 0xfc, 0x25, 0xf1, 0x3f, 0x34, 0xfe, 0x00, 0xfc,
+ 0x67, 0xd6, 0x13, 0x9d, 0xf8, 0x31, 0x7c, 0x35,
+ 0x79, 0x5b, 0x3e, 0x51, 0xf1, 0xbc, 0xfc, 0x53,
+ 0x0b, 0x7b, 0x03, 0xdf, 0x83, 0x3f, 0x68, 0x61,
+ 0xe1, 0xf1, 0x6d, 0xbd, 0x7f, 0x5f, 0x5f, 0xd6,
+ 0x37, 0x93, 0x7b, 0x35, 0x8f, 0x88, 0xff, 0x17,
+ 0x65, 0x95, 0x1f, 0xe6, 0x17, 0xfb, 0xef, 0x7b,
+ 0x73, 0xd3, 0xa0, 0x9b, 0xb8, 0xb7, 0x36, 0xb3,
+ 0x67, 0xc1, 0x73, 0xca, 0x58, 0x48, 0x59, 0xc2,
+ 0xba, 0x4d, 0xd9, 0x50, 0x0b, 0xe7, 0x7a, 0x68,
+ 0xfd, 0x69, 0xc4, 0x5d, 0xb7, 0x66, 0x8b, 0xe0,
+ 0x7e, 0x8e, 0x16, 0x3e, 0xf7, 0x3b, 0x08, 0x3f,
+ 0xbc, 0xc2, 0x82, 0x67, 0x45, 0xe9, 0x47, 0x2d,
+ 0xde, 0xff, 0x61, 0x7b, 0x74, 0xc2, 0x44, 0x13,
+ 0xef, 0xd5, 0xfc, 0x78, 0x8f, 0x99, 0xf1, 0x90,
+ 0xef, 0x9f, 0x24, 0xef, 0xcc, 0xc9, 0x37, 0x16,
+ 0xde, 0x6f, 0xf8, 0x02, 0x3e, 0x85, 0xb8, 0x1e,
+ 0xec, 0xbb, 0x42, 0x7f, 0x9f, 0x6d, 0x61, 0xf8,
+ 0xde, 0x02, 0x78, 0x4f, 0xe2, 0x68, 0x7a, 0xba,
+ 0xd6, 0x3b, 0xaa, 0xa9, 0x05, 0x9f, 0x59, 0xe8,
+ 0x8e, 0x09, 0xfd, 0x8b, 0xe7, 0x3f, 0xe4, 0x1e,
+ 0x4d, 0x6d, 0xe2, 0xab, 0x42, 0x8a, 0xe2, 0x63,
+ 0x03, 0xfb, 0x73, 0xaa, 0xa0, 0xf8, 0xcd, 0xfa,
+ 0x97, 0x85, 0x98, 0xeb, 0x49, 0xf0, 0x8c, 0x21,
+ 0x16, 0xe2, 0x32, 0x15, 0x7f, 0x05, 0x15, 0x2c,
+ 0x54, 0x1d, 0xfe, 0x5f, 0xb8, 0x4b, 0x0d, 0x0b,
+ 0xf1, 0xd1, 0xca, 0x17, 0x1f, 0x9e, 0xd7, 0x7c,
+ 0x26, 0x3a, 0x68, 0xce, 0xdf, 0xb1, 0x70, 0xab,
+ 0x5c, 0x36, 0xbc, 0xef, 0x49, 0x0b, 0xdd, 0x2e,
+ 0x3f, 0x87, 0xfb, 0x91, 0x8f, 0xae, 0xc7, 0xc9,
+ 0x2b, 0x33, 0x94, 0x73, 0xfe, 0xb9, 0xcf, 0x6b,
+ 0xf8, 0x97, 0xf0, 0x9a, 0x73, 0x1f, 0xc3, 0xab,
+ 0xc0, 0x0f, 0xfd, 0xa3, 0x17, 0x6b, 0x03, 0x2d,
+ 0x7c, 0xeb, 0xf5, 0x07, 0xfc, 0x19, 0xfb, 0x2c,
+ 0xfb, 0xc9, 0x4f, 0xf0, 0x1b, 0x9c, 0xbb, 0x47,
+ 0x9c, 0xd6, 0x5b, 0x8d, 0xef, 0x87, 0x2f, 0x51,
+ 0x3c, 0xd4, 0x42, 0x3f, 0x0a, 0x07, 0x13, 0xb7,
+ 0xa6, 0x3e, 0xeb, 0x2f, 0xac, 0x3c, 0x1c, 0xbe,
+ 0x95, 0x38, 0xac, 0x3a, 0x5b, 0xf9, 0x51, 0x89,
+ 0xfc, 0x3e, 0x30, 0x80, 0xba, 0x61, 0x52, 0x58,
+ 0x77, 0xef, 0x3b, 0x5a, 0xef, 0xdd, 0x74, 0x0b,
+ 0xab, 0x2a, 0x9e, 0x85, 0x7b, 0xb3, 0xcf, 0x56,
+ 0xab, 0xeb, 0xc3, 0xaf, 0x53, 0x67, 0xa6, 0xa7,
+ 0xfc, 0x08, 0xef, 0xc5, 0x73, 0xda, 0xdd, 0xdf,
+ 0x05, 0xff, 0xd8, 0xc3, 0x42, 0xf2, 0x30, 0x57,
+ 0xf8, 0xbf, 0x27, 0xf2, 0xfc, 0xaa, 0x33, 0xe1,
+ 0x5f, 0x71, 0xff, 0xaf, 0x4e, 0x46, 0xc1, 0xcf,
+ 0xf5, 0xb4, 0x30, 0xbe, 0x9e, 0xd6, 0x5b, 0x82,
+ 0xf8, 0x89, 0x0b, 0x91, 0x7e, 0xcc, 0xe7, 0x7c,
+ 0x6b, 0xbc, 0x75, 0x81, 0x37, 0x7c, 0x65, 0x21,
+ 0xe6, 0x07, 0xee, 0xd9, 0x78, 0x80, 0x03, 0x42,
+ 0x88, 0x13, 0xb3, 0x39, 0xc6, 0xc2, 0x91, 0x48,
+ 0x67, 0x78, 0x68, 0x2e, 0xfb, 0xad, 0x41, 0xdc,
+ 0x9a, 0x29, 0xb7, 0x2c, 0xf8, 0xb8, 0x53, 0xc7,
+ 0x4c, 0x71, 0x27, 0x0b, 0x43, 0x93, 0x74, 0x9e,
+ 0xe9, 0xec, 0xb3, 0xe5, 0xbb, 0xa9, 0xf0, 0x65,
+ 0x7d, 0x2d, 0xec, 0xab, 0xb8, 0x07, 0x1e, 0xcf,
+ 0xb9, 0xdc, 0x8c, 0xa3, 0xae, 0x99, 0x7c, 0xce,
+ 0xed, 0xf6, 0xad, 0x95, 0xf0, 0x0d, 0xe4, 0x45,
+ 0x9f, 0x71, 0xec, 0xc3, 0x78, 0x13, 0xa7, 0xae,
+ 0xaa, 0x8b, 0xa6, 0x7b, 0x33, 0x0b, 0x6f, 0x7a,
+ 0x70, 0x2e, 0x66, 0x36, 0xba, 0xd1, 0xbb, 0x89,
+ 0xf2, 0x65, 0x27, 0xbc, 0xf4, 0x22, 0xdd, 0x77,
+ 0x68, 0x23, 0x0b, 0x19, 0xf1, 0xa7, 0xe0, 0x09,
+ 0xe8, 0xe5, 0xa2, 0x79, 0xd7, 0xe1, 0xf5, 0x39,
+ 0xef, 0x80, 0x53, 0xab, 0xe1, 0xdd, 0x2a, 0x59,
+ 0x58, 0x53, 0x52, 0xfb, 0x2b, 0xc3, 0x7d, 0x25,
+ 0x3f, 0x27, 0xce, 0xcc, 0x25, 0xea, 0x51, 0xb9,
+ 0xc7, 0x17, 0xe1, 0x95, 0x77, 0x5b, 0x88, 0xf6,
+ 0xf3, 0x85, 0x77, 0xce, 0xb7, 0x10, 0xf7, 0x36,
+ 0x0d, 0x3e, 0x84, 0xfa, 0xf8, 0xe4, 0x60, 0x3f,
+ 0xf8, 0xf7, 0xb1, 0x16, 0xfc, 0x03, 0xfd, 0xe1,
+ 0x57, 0xd6, 0x5b, 0x68, 0x9c, 0xc8, 0xbe, 0xcc,
+ 0x5a, 0x74, 0xd9, 0x69, 0xbd, 0xf2, 0xb3, 0x23,
+ 0xfb, 0xce, 0x79, 0x42, 0x1e, 0x99, 0x92, 0xac,
+ 0xe7, 0x44, 0xa1, 0xfc, 0x43, 0x73, 0xea, 0x40,
+ 0xfc, 0xfa, 0xfd, 0xf0, 0xf3, 0xc4, 0xc7, 0x96,
+ 0xde, 0xfa, 0xfc, 0xdc, 0x5e, 0x16, 0xc2, 0x37,
+ 0x48, 0xdf, 0x1a, 0x13, 0x0f, 0xbb, 0x82, 0xc9,
+ 0x5b, 0x93, 0xf1, 0xb9, 0x85, 0x69, 0x77, 0xa4,
+ 0xbf, 0x7e, 0x9c, 0x6b, 0xb6, 0xbf, 0xea, 0xb5,
+ 0x37, 0x79, 0x5b, 0x6d, 0xb1, 0xf4, 0x27, 0xb7,
+ 0x9d, 0x85, 0x1e, 0x8b, 0x15, 0x1f, 0x27, 0x88,
+ 0x77, 0xaf, 0x26, 0x5f, 0xc2, 0x9f, 0xa3, 0x8b,
+ 0xd5, 0x9b, 0x50, 0x47, 0x4c, 0x2d, 0xd6, 0x3b,
+ 0xaf, 0xe5, 0x25, 0xf8, 0xf8, 0x79, 0x16, 0x8a,
+ 0xe6, 0x11, 0x67, 0x26, 0x81, 0xf8, 0x2a, 0xd5,
+ 0x4d, 0xf1, 0x58, 0x1c, 0x3d, 0x7b, 0xda, 0x34,
+ 0x4c, 0xef, 0xf3, 0xb1, 0x50, 0xb6, 0x6c, 0x65,
+ 0xf8, 0xc1, 0x7a, 0x3c, 0x7f, 0x78, 0x75, 0xf8,
+ 0x21, 0xea, 0xc9, 0xfc, 0x58, 0x6f, 0xf8, 0xf1,
+ 0x38, 0x0b, 0xbe, 0xcf, 0xfd, 0xe0, 0x25, 0xa8,
+ 0xcf, 0x8f, 0x23, 0x52, 0xf5, 0x3c, 0xf4, 0x38,
+ 0x73, 0xb7, 0x78, 0xf0, 0x17, 0x16, 0x9a, 0xff,
+ 0x26, 0x3f, 0x97, 0x54, 0x64, 0x61, 0x75, 0xd7,
+ 0xff, 0xc1, 0xfb, 0xe2, 0x07, 0xb7, 0xb4, 0xee,
+ 0x0e, 0xbf, 0xc5, 0xbd, 0xbf, 0xdf, 0x88, 0x38,
+ 0x30, 0xe9, 0xc1, 0x3c, 0x6f, 0x98, 0xfc, 0xc8,
+ 0xc7, 0xdc, 0xf7, 0x8b, 0x3e, 0xd3, 0xe1, 0xc3,
+ 0xa9, 0xbb, 0x51, 0x7f, 0x3d, 0x82, 0x17, 0x70,
+ 0x0e, 0x2e, 0xb3, 0xd0, 0x55, 0x13, 0x4d, 0x9c,
+ 0x3d, 0x2f, 0x3c, 0xaf, 0xf5, 0xb2, 0x8e, 0x65,
+ 0xa3, 0x54, 0x5f, 0x1c, 0xd0, 0xd5, 0x23, 0x99,
+ 0xaa, 0x7f, 0xfb, 0xa9, 0xb7, 0xc9, 0x71, 0x09,
+ 0xf0, 0xce, 0x9c, 0xe7, 0xd7, 0x13, 0xc5, 0x5f,
+ 0x6d, 0xb6, 0xd0, 0x3d, 0x55, 0xf1, 0x36, 0x96,
+ 0xf7, 0xfe, 0x6d, 0xb8, 0x67, 0x53, 0x0b, 0x1d,
+ 0x49, 0x0b, 0xc4, 0x27, 0x1a, 0x0f, 0xde, 0xfb,
+ 0xc7, 0xf4, 0xce, 0xfa, 0x3c, 0x79, 0x1e, 0x52,
+ 0x47, 0x7a, 0x32, 0x73, 0x9f, 0x05, 0xff, 0x49,
+ 0x3e, 0xe2, 0x3c, 0xb7, 0x5f, 0xd5, 0xae, 0xf0,
+ 0x3a, 0x9c, 0xe3, 0xbe, 0x58, 0xed, 0xa7, 0xdb,
+ 0x38, 0x0b, 0x73, 0x47, 0xa9, 0x1e, 0xf5, 0xc7,
+ 0x87, 0xb7, 0xe9, 0x80, 0x6e, 0x98, 0x65, 0xf8,
+ 0xcd, 0xa2, 0x04, 0xf9, 0x91, 0x66, 0xf8, 0xca,
+ 0xd5, 0xcb, 0x3b, 0xc2, 0x97, 0x12, 0xd7, 0xde,
+ 0xbe, 0xd2, 0x83, 0x40, 0xe2, 0xaa, 0xf3, 0x55,
+ 0xe9, 0xe9, 0x43, 0xea, 0xd4, 0xda, 0xa7, 0xdc,
+ 0xab, 0x49, 0x66, 0x1d, 0xf7, 0x22, 0xb5, 0xfe,
+ 0x1c, 0xe2, 0xae, 0x4e, 0xcb, 0xb7, 0xf0, 0x54,
+ 0xea, 0x70, 0xe9, 0x8e, 0x47, 0xe0, 0xcf, 0x22,
+ 0xf9, 0xfc, 0x10, 0xf1, 0x9b, 0xf8, 0xa1, 0xe1,
+ 0x8d, 0xa2, 0xb4, 0x5e, 0xf2, 0x64, 0xc4, 0x97,
+ 0x4b, 0xe1, 0x31, 0xe4, 0xe7, 0xc5, 0x8b, 0x5a,
+ 0x4f, 0x02, 0xf7, 0x92, 0x36, 0x5e, 0xf9, 0x30,
+ 0x8f, 0xf3, 0x7c, 0xf5, 0x4c, 0xfe, 0x34, 0x20,
+ 0xcd, 0xc2, 0x55, 0x27, 0x7c, 0x92, 0x69, 0xf1,
+ 0x89, 0x85, 0xbb, 0xfb, 0x5d, 0xe0, 0x19, 0xc9,
+ 0x16, 0xc6, 0x4c, 0x52, 0xbd, 0x9b, 0xcd, 0x3e,
+ 0x3e, 0x4f, 0x57, 0x3d, 0x5f, 0x15, 0x6f, 0x21,
+ 0x53, 0x76, 0xc2, 0x44, 0xa3, 0x17, 0x0b, 0xd6,
+ 0xc8, 0x4f, 0x4e, 0x45, 0x6f, 0x46, 0x17, 0xc8,
+ 0x5f, 0x85, 0x92, 0x0f, 0x83, 0x27, 0xab, 0x7f,
+ 0x59, 0x04, 0x6f, 0x9f, 0x24, 0xff, 0x13, 0x84,
+ 0x2e, 0x44, 0x24, 0xaa, 0xff, 0xa9, 0x8f, 0xbe,
+ 0x75, 0x0e, 0xdd, 0x09, 0xcf, 0xa6, 0x6f, 0x09,
+ 0x5b, 0x46, 0x1e, 0x9b, 0x6d, 0xd4, 0x81, 0x85,
+ 0x11, 0xaa, 0x9f, 0xbd, 0x89, 0xef, 0xc0, 0x10,
+ 0xad, 0xff, 0x5f, 0xf8, 0xfa, 0xf4, 0x1c, 0xc5,
+ 0x47, 0xbf, 0x11, 0x16, 0x76, 0x1d, 0x7e, 0x0a,
+ 0x8f, 0x23, 0xde, 0xe6, 0xaf, 0xf4, 0x84, 0x2f,
+ 0xa7, 0x5f, 0x89, 0x88, 0x53, 0x7f, 0x91, 0x5b,
+ 0xda, 0x82, 0xfb, 0x61, 0xf2, 0xca, 0xdc, 0x26,
+ 0x6f, 0xe6, 0xfd, 0xa4, 0xfd, 0x3d, 0x9e, 0x66,
+ 0x61, 0x71, 0x43, 0xe9, 0xd3, 0x7b, 0xe8, 0xd0,
+ 0x98, 0x3a, 0xf2, 0x2f, 0x8f, 0xc9, 0xbb, 0xb0,
+ 0x19, 0xac, 0xcb, 0x1c, 0xe5, 0x5e, 0xc2, 0xba,
+ 0xfe, 0x00, 0xff, 0x19, 0x1d, 0xa8, 0xe9, 0x2f,
+ 0xff, 0xf8, 0x5a, 0xf5, 0xa5, 0x8b, 0x3b, 0x7c,
+ 0x0c, 0xe7, 0xff, 0xc7, 0x5f, 0xd2, 0x9f, 0x91,
+ 0xf8, 0x86, 0xb2, 0xeb, 0xf4, 0xf7, 0x51, 0xec,
+ 0x27, 0xbe, 0xc3, 0x40, 0x78, 0x10, 0x7e, 0x29,
+ 0xb9, 0x97, 0xf2, 0x6b, 0x0e, 0xf9, 0xba, 0x7f,
+ 0xcc, 0xef, 0xf0, 0xb1, 0xac, 0xbb, 0xa9, 0xbb,
+ 0xf4, 0xb2, 0x0d, 0x71, 0xf7, 0xee, 0x5b, 0x74,
+ 0xc8, 0xc4, 0x13, 0x8f, 0x1f, 0x95, 0xd2, 0x7d,
+ 0xb9, 0x91, 0x47, 0xff, 0xeb, 0x24, 0x3f, 0x10,
+ 0x87, 0x1f, 0x2d, 0x95, 0xa0, 0xf3, 0xf4, 0x24,
+ 0xde, 0x7f, 0xaa, 0xa8, 0x7c, 0x9a, 0x40, 0xbf,
+ 0x56, 0x21, 0x12, 0x5f, 0x6f, 0x3e, 0x26, 0x9e,
+ 0x1b, 0x7f, 0xa9, 0xfd, 0x46, 0x72, 0x6f, 0x8f,
+ 0x8e, 0xaa, 0x1f, 0xe8, 0x43, 0x3c, 0x2f, 0x4c,
+ 0x2b, 0x06, 0xcf, 0x60, 0xfd, 0x89, 0x5f, 0x6d,
+ 0xd1, 0xfe, 0xf6, 0x58, 0x68, 0x71, 0x43, 0xfd,
+ 0xcc, 0xaf, 0xc4, 0xc3, 0x69, 0x97, 0x8a, 0xf0,
+ 0xdb, 0x3c, 0xa7, 0xb6, 0x87, 0xf2, 0x75, 0x0e,
+ 0xeb, 0x0a, 0xee, 0xe1, 0x05, 0xbf, 0x1a, 0x61,
+ 0x21, 0x6f, 0x2c, 0x79, 0x6e, 0x56, 0x8c, 0xb4,
+ 0x50, 0x31, 0x7c, 0x23, 0x3c, 0x97, 0xba, 0xd3,
+ 0xa5, 0x0e, 0x7d, 0x87, 0x69, 0x4d, 0xbf, 0xe6,
+ 0x58, 0x4b, 0x7a, 0x79, 0x15, 0xbd, 0xcd, 0xe9,
+ 0xa2, 0x7e, 0xba, 0x0c, 0x7a, 0x58, 0x78, 0x85,
+ 0x75, 0x9b, 0xf5, 0xc4, 0xc5, 0x57, 0x71, 0xd2,
+ 0x53, 0xa7, 0xd1, 0x16, 0x2e, 0xfc, 0xad, 0xf3,
+ 0xdb, 0xc6, 0xf7, 0x56, 0xb4, 0x52, 0xfe, 0xfd,
+ 0x8a, 0xef, 0x78, 0x7a, 0xf9, 0x32, 0xdc, 0x13,
+ 0x3f, 0xb2, 0x7c, 0xb7, 0xf2, 0x31, 0x86, 0x3e,
+ 0x65, 0xf6, 0x41, 0xad, 0x27, 0x85, 0xfe, 0x69,
+ 0x7c, 0x10, 0x7d, 0x83, 0xe9, 0x9e, 0x62, 0x61,
+ 0x77, 0x82, 0xf8, 0x52, 0xea, 0xce, 0xf9, 0x7a,
+ 0x8a, 0x5f, 0x37, 0x7c, 0xc8, 0x17, 0x6f, 0xa4,
+ 0x3f, 0x87, 0xf0, 0x6d, 0x71, 0xc3, 0xe9, 0x5b,
+ 0xcc, 0x37, 0x7c, 0x7e, 0xfb, 0x37, 0xe8, 0xae,
+ 0x29, 0x98, 0x63, 0x61, 0xbf, 0x63, 0x07, 0x78,
+ 0x34, 0xfa, 0x91, 0x92, 0xab, 0xbf, 0xaf, 0x66,
+ 0x1d, 0x09, 0x93, 0xa5, 0xa7, 0xc7, 0x4e, 0x58,
+ 0xf8, 0x64, 0x93, 0xfc, 0x44, 0x03, 0xde, 0x7b,
+ 0xae, 0x3e, 0x71, 0x61, 0x4a, 0xe3, 0x63, 0x52,
+ 0xdb, 0xd0, 0x07, 0x9a, 0x91, 0xf8, 0xfa, 0xb3,
+ 0x4d, 0x15, 0xff, 0xc7, 0xf1, 0xe3, 0x11, 0xe3,
+ 0x54, 0x5f, 0x86, 0x53, 0xaf, 0x27, 0x8e, 0xd0,
+ 0x79, 0x17, 0xc3, 0x67, 0xd5, 0x9b, 0x28, 0x3f,
+ 0xff, 0x07, 0xfe, 0xc2, 0xff, 0x91, 0xfc, 0xe7,
+ 0x95, 0xea, 0x16, 0x42, 0xda, 0xeb, 0x3e, 0xab,
+ 0x50, 0x67, 0xf3, 0x3e, 0x50, 0x3c, 0x5f, 0x44,
+ 0x37, 0xd2, 0x63, 0xa4, 0x1f, 0xab, 0xd9, 0xaf,
+ 0xff, 0x41, 0xf9, 0xc1, 0xe9, 0x3c, 0xff, 0x3b,
+ 0x23, 0x3f, 0x66, 0xc8, 0xaf, 0x9a, 0xc7, 0xf5,
+ 0xf7, 0x81, 0xe8, 0xc5, 0x9a, 0xe2, 0xfa, 0x7e,
+ 0x06, 0xe7, 0x5c, 0x39, 0x41, 0x7e, 0xe1, 0x02,
+ 0x79, 0xfd, 0x36, 0x40, 0xf7, 0x75, 0xfb, 0x9a,
+ 0x85, 0xcc, 0xc6, 0xf8, 0x32, 0x73, 0x9e, 0xfb,
+ 0xea, 0xe2, 0x3b, 0x17, 0xbe, 0x85, 0xfa, 0xfd,
+ 0xc3, 0x26, 0xf5, 0x1b, 0xa7, 0xd0, 0x31, 0x87,
+ 0xb6, 0xf2, 0xc7, 0xef, 0xf0, 0xf7, 0x67, 0x7f,
+ 0x47, 0x37, 0x4d, 0x03, 0xf2, 0x3c, 0xf3, 0xc4,
+ 0x7f, 0xe0, 0xae, 0xe8, 0xbe, 0xc3, 0x39, 0xe5,
+ 0x4b, 0x27, 0x9e, 0xd3, 0xf4, 0x72, 0x3e, 0xfc,
+ 0x21, 0x3a, 0xea, 0xda, 0x41, 0xfe, 0xdb, 0x91,
+ 0x7a, 0xb0, 0x60, 0xb4, 0xf2, 0x7f, 0x24, 0xfe,
+ 0x39, 0x6c, 0xbb, 0xfa, 0xcf, 0x4a, 0x63, 0x2c,
+ 0x04, 0x25, 0xca, 0x2f, 0x6c, 0x18, 0x6b, 0x61,
+ 0x6c, 0x7f, 0xf9, 0xb9, 0xd7, 0x87, 0x2c, 0x4c,
+ 0xde, 0x48, 0x9d, 0x36, 0xb3, 0x4a, 0xb0, 0xad,
+ 0x11, 0xea, 0x87, 0x26, 0xa1, 0x9b, 0xa3, 0x6f,
+ 0xc8, 0x8f, 0x6c, 0x27, 0xbf, 0x7d, 0xe7, 0xca,
+ 0xff, 0xc4, 0x12, 0x0f, 0xdd, 0x73, 0xe5, 0x27,
+ 0x52, 0x39, 0xef, 0x49, 0x67, 0xd5, 0xaf, 0xf4,
+ 0x44, 0xef, 0x7f, 0xff, 0x48, 0xf9, 0xdd, 0x66,
+ 0x95, 0x85, 0xa9, 0x17, 0x35, 0xdf, 0xd9, 0x81,
+ 0xbf, 0xea, 0x33, 0x50, 0xeb, 0x9f, 0x46, 0x3c,
+ 0xde, 0xcf, 0x97, 0x5f, 0x9e, 0x89, 0x3f, 0xfe,
+ 0x7c, 0xbe, 0xfc, 0xf1, 0x3b, 0xd6, 0x3d, 0x6b,
+ 0x57, 0x1a, 0xfc, 0xc1, 0x03, 0xb6, 0xdd, 0x08,
+ 0xdf, 0x61, 0xde, 0x10, 0x17, 0x6f, 0xfb, 0xc9,
+ 0x2f, 0x1f, 0xa7, 0x7e, 0xac, 0x3d, 0x2c, 0xbf,
+ 0x1d, 0xcc, 0x7d, 0xfe, 0xb2, 0x5e, 0xfa, 0xe2,
+ 0xcc, 0x7d, 0xdd, 0x8b, 0x51, 0x7c, 0x2d, 0x46,
+ 0x1f, 0x67, 0xf6, 0x93, 0xfe, 0xf5, 0x66, 0x3e,
+ 0xe4, 0x1d, 0x89, 0xef, 0x32, 0x87, 0xd1, 0x83,
+ 0x97, 0xb1, 0xdc, 0xbb, 0x09, 0x63, 0xdf, 0x8b,
+ 0x8b, 0x98, 0x0b, 0x98, 0x10, 0xfa, 0x8a, 0xd3,
+ 0x9b, 0xd4, 0x8f, 0x0e, 0xc1, 0x2f, 0x6e, 0x75,
+ 0x3e, 0x07, 0x6f, 0xcb, 0x3d, 0x3b, 0xba, 0x68,
+ 0x1e, 0xf7, 0x94, 0x39, 0x4d, 0x42, 0x05, 0xce,
+ 0xc1, 0x68, 0x2c, 0x35, 0x79, 0x69, 0x75, 0xfe,
+ 0xc3, 0x8d, 0xf3, 0xca, 0xf5, 0x96, 0x5f, 0x7e,
+ 0x82, 0xbf, 0x75, 0x98, 0xdb, 0x10, 0xde, 0x8b,
+ 0xf9, 0x4f, 0xfe, 0x0f, 0xd2, 0x7b, 0x83, 0xce,
+ 0xb8, 0x74, 0x54, 0x7f, 0xf2, 0x5d, 0x1f, 0xb6,
+ 0x3d, 0x43, 0xeb, 0x4d, 0xa5, 0x7e, 0xae, 0x8e,
+ 0x43, 0xb7, 0x4c, 0x5f, 0xd6, 0x3d, 0x24, 0x5b,
+ 0xe7, 0xe1, 0xce, 0xfe, 0x37, 0x26, 0x68, 0xbe,
+ 0xf5, 0x3d, 0xe7, 0x92, 0x91, 0xae, 0x7a, 0xb5,
+ 0x0a, 0xbd, 0x2d, 0x93, 0xa3, 0x7c, 0x18, 0x49,
+ 0x5c, 0x14, 0xcb, 0x50, 0x3e, 0x5f, 0x40, 0x4f,
+ 0x93, 0x0b, 0xd4, 0x3f, 0xed, 0xa0, 0xcf, 0x76,
+ 0x2e, 0x52, 0xbe, 0x1d, 0x61, 0x1e, 0xb1, 0xb6,
+ 0x0b, 0x71, 0x69, 0x7e, 0x21, 0x4e, 0x63, 0x8b,
+ 0xe9, 0x3c, 0xca, 0xb1, 0xfe, 0x4b, 0x5b, 0xdb,
+ 0xe9, 0x7d, 0xac, 0xc3, 0xe1, 0xfd, 0xaf, 0xb5,
+ 0x3d, 0xe2, 0x29, 0x67, 0xb9, 0xea, 0x75, 0x01,
+ 0xf7, 0x79, 0xf9, 0xad, 0xe2, 0xe9, 0x39, 0xf5,
+ 0xe8, 0xdb, 0x67, 0xc4, 0x91, 0xf1, 0xfd, 0xc6,
+ 0xc2, 0xc9, 0xa4, 0xa5, 0x70, 0x37, 0x74, 0xee,
+ 0xfb, 0x28, 0xf9, 0x87, 0x1a, 0xe8, 0x74, 0x93,
+ 0x28, 0xe9, 0x4d, 0x63, 0xe2, 0x27, 0xde, 0x41,
+ 0x7a, 0xed, 0xc8, 0x7d, 0x9d, 0xda, 0xab, 0xfe,
+ 0xe3, 0x20, 0x73, 0xa7, 0x51, 0x9d, 0x34, 0x1f,
+ 0x3b, 0x4b, 0xde, 0x2c, 0x9a, 0x20, 0xbd, 0xf0,
+ 0x64, 0xbd, 0xa1, 0xc1, 0xe8, 0x8c, 0x59, 0x49,
+ 0x3d, 0xc9, 0x6e, 0x25, 0xbf, 0xfb, 0x1e, 0x7d,
+ 0xf4, 0xc0, 0xa9, 0x8a, 0xcf, 0x85, 0xe8, 0x8e,
+ 0xc7, 0x1c, 0xf9, 0x9d, 0x86, 0xe8, 0x4a, 0x8d,
+ 0x29, 0xea, 0xd7, 0xab, 0xe2, 0x1f, 0x1c, 0x3a,
+ 0x7d, 0x0a, 0x2f, 0x45, 0xfd, 0x70, 0x6e, 0xc7,
+ 0x73, 0xcd, 0x55, 0x74, 0xfc, 0xcd, 0x75, 0xcd,
+ 0x5b, 0xbf, 0xc3, 0x5f, 0xb8, 0x25, 0x2b, 0x3e,
+ 0x8e, 0x11, 0x3f, 0x9b, 0x32, 0xf8, 0x9e, 0xd9,
+ 0x4a, 0x3f, 0x7f, 0xab, 0x89, 0xd6, 0x9f, 0xca,
+ 0xfe, 0x2e, 0xc6, 0xcb, 0x5f, 0xfd, 0xe8, 0x05,
+ 0xbf, 0xab, 0xf9, 0xe1, 0x2a, 0xea, 0x67, 0x4f,
+ 0x77, 0xc5, 0x4b, 0x0b, 0xf2, 0x71, 0xe4, 0x1d,
+ 0xcd, 0x17, 0x2e, 0xa1, 0x7b, 0x03, 0x82, 0xd2,
+ 0xe0, 0x17, 0xa8, 0x67, 0xe3, 0xf3, 0x74, 0x9f,
+ 0xcd, 0x99, 0x7b, 0xfd, 0x59, 0xf4, 0x4f, 0x3d,
+ 0xa1, 0x3e, 0x24, 0x5f, 0x09, 0x80, 0xbf, 0x40,
+ 0x17, 0xf3, 0xc2, 0xa4, 0xe7, 0xd9, 0xf8, 0xcd,
+ 0x4a, 0x6e, 0xf2, 0xff, 0x8f, 0xa8, 0x33, 0x39,
+ 0xfd, 0x55, 0x6f, 0xaa, 0xf3, 0xb9, 0xd3, 0x0e,
+ 0xc4, 0x91, 0xb9, 0x49, 0x5c, 0xc6, 0xce, 0x53,
+ 0x3f, 0xff, 0x27, 0xf3, 0x92, 0xad, 0xe7, 0xb5,
+ 0xdf, 0x5e, 0xf4, 0x3b, 0xb3, 0x3c, 0xe4, 0x1f,
+ 0xfe, 0xcb, 0xb9, 0x56, 0x4b, 0x55, 0x7d, 0xef,
+ 0x4f, 0x7d, 0xf5, 0x68, 0xac, 0xfc, 0x7d, 0x48,
+ 0x1d, 0x7a, 0xd0, 0x50, 0xf7, 0x39, 0x06, 0xdf,
+ 0x5e, 0xfb, 0xb6, 0xe6, 0xb3, 0xfe, 0xf8, 0x11,
+ 0x9f, 0xb3, 0x9a, 0x9f, 0x4f, 0xc7, 0xdf, 0x7f,
+ 0x18, 0x94, 0x03, 0x8f, 0x4e, 0xb4, 0x10, 0xa7,
+ 0xb9, 0xad, 0x29, 0x42, 0x5f, 0x42, 0xaa, 0x68,
+ 0xfd, 0x5f, 0x60, 0x9c, 0x9f, 0x64, 0xe9, 0x79,
+ 0x83, 0x88, 0xbf, 0xae, 0xc5, 0xe5, 0x4f, 0x4a,
+ 0x32, 0x37, 0xf2, 0x1b, 0xb8, 0x1d, 0xde, 0x10,
+ 0xbf, 0x51, 0xb7, 0xeb, 0xb7, 0xf0, 0x72, 0x70,
+ 0xe7, 0x48, 0x74, 0xde, 0x5c, 0x44, 0x57, 0x1e,
+ 0xb8, 0x2a, 0x5e, 0x13, 0xd8, 0xaf, 0x67, 0x5d,
+ 0x7c, 0xad, 0x19, 0xef, 0x65, 0xa1, 0x4d, 0xa0,
+ 0xf4, 0xde, 0xe7, 0x99, 0x85, 0xa3, 0xaf, 0xd4,
+ 0xcf, 0x05, 0x70, 0x7f, 0x0d, 0x76, 0xa9, 0x5f,
+ 0x8e, 0xe5, 0x1c, 0x0e, 0x9d, 0xd1, 0xfd, 0xf5,
+ 0xc6, 0xc7, 0x15, 0xb6, 0xd6, 0x79, 0x3e, 0x87,
+ 0x9f, 0xbe, 0x82, 0xee, 0x1a, 0x37, 0xe6, 0xfe,
+ 0x1b, 0xba, 0xa9, 0x7e, 0x4c, 0x61, 0x7f, 0x15,
+ 0x5e, 0xa9, 0xbf, 0x2a, 0xcd, 0x7d, 0x5f, 0x9b,
+ 0xac, 0xfe, 0xb7, 0x38, 0xf9, 0x39, 0xa2, 0x50,
+ 0xfe, 0xb5, 0x26, 0xf3, 0xaa, 0x32, 0x29, 0xd2,
+ 0x9f, 0x1d, 0xc4, 0x4f, 0xaf, 0x54, 0xe6, 0x84,
+ 0xa6, 0x99, 0x8b, 0x85, 0x03, 0xdb, 0xe5, 0xaf,
+ 0x87, 0x10, 0x77, 0xc5, 0xb2, 0xd4, 0x9f, 0x45,
+ 0x91, 0x67, 0xce, 0x49, 0x4d, 0xf4, 0x3c, 0xe2,
+ 0xc2, 0x39, 0x4f, 0xf1, 0xeb, 0xc7, 0x5c, 0x78,
+ 0xd8, 0x0b, 0x7d, 0x5e, 0xf3, 0xbe, 0xb5, 0x3f,
+ 0xca, 0x6f, 0x2c, 0xc0, 0xb7, 0x7e, 0x5d, 0x4e,
+ 0xf7, 0x3d, 0x10, 0x5f, 0x7b, 0xa9, 0x85, 0xfe,
+ 0x1e, 0x8a, 0x1f, 0xf2, 0xcf, 0x53, 0x7e, 0xad,
+ 0xe3, 0x9c, 0xe6, 0xdf, 0x96, 0x7e, 0xdd, 0xc7,
+ 0x2f, 0x94, 0x3f, 0xbd, 0x00, 0xbe, 0x8a, 0xba,
+ 0x59, 0xb0, 0x52, 0xf7, 0xf7, 0x80, 0x39, 0xf2,
+ 0xba, 0x03, 0xf4, 0x75, 0xa6, 0xff, 0x52, 0xd6,
+ 0xb7, 0x57, 0xbf, 0xff, 0xec, 0xc0, 0x47, 0xbe,
+ 0xca, 0xd6, 0x7d, 0x0e, 0x8e, 0xb0, 0x70, 0x75,
+ 0x89, 0xf2, 0x63, 0x0f, 0xe7, 0x7a, 0x20, 0x4b,
+ 0xf1, 0x75, 0x0d, 0x3d, 0xee, 0xdb, 0xb1, 0x1a,
+ 0xfc, 0x18, 0x75, 0xfd, 0xcd, 0x84, 0x7c, 0xf8,
+ 0x30, 0xce, 0xcd, 0x69, 0xb3, 0xfc, 0xc4, 0x60,
+ 0xf2, 0xb6, 0x81, 0xbb, 0xea, 0xeb, 0x36, 0xf2,
+ 0xe5, 0xa6, 0x13, 0x3a, 0x6a, 0xb6, 0xa0, 0xd3,
+ 0x45, 0x4b, 0x54, 0x5f, 0x93, 0xe8, 0x0b, 0x2b,
+ 0x4d, 0x94, 0x3e, 0xf9, 0x11, 0x77, 0x01, 0xbd,
+ 0x98, 0xdb, 0x98, 0x35, 0x9f, 0x59, 0x68, 0xb6,
+ 0x0f, 0x9f, 0x60, 0x46, 0xbd, 0x6f, 0xc1, 0xbb,
+ 0x19, 0xf7, 0x68, 0x6a, 0x31, 0xef, 0xeb, 0xdd,
+ 0x1b, 0xdf, 0x68, 0x7e, 0x47, 0x2f, 0xa7, 0x5d,
+ 0x93, 0xbe, 0x0d, 0x61, 0x6e, 0x34, 0x75, 0x8c,
+ 0xce, 0x77, 0x12, 0xfe, 0x23, 0x31, 0x5c, 0xfd,
+ 0x55, 0x2f, 0xe6, 0x91, 0x35, 0x3e, 0xed, 0x02,
+ 0x1f, 0xc7, 0x7d, 0xd7, 0x0b, 0x7a, 0xa6, 0xcf,
+ 0xb3, 0x8e, 0x26, 0xf9, 0x7a, 0x7f, 0x9d, 0x34,
+ 0x0b, 0xa9, 0x7b, 0x35, 0xcf, 0xfd, 0x04, 0x1d,
+ 0x8f, 0x68, 0xa1, 0x7a, 0xfe, 0xf2, 0x8c, 0x85,
+ 0xac, 0x53, 0xba, 0x8f, 0x20, 0xf6, 0xb7, 0xee,
+ 0xa1, 0xe6, 0x51, 0x57, 0x88, 0xc3, 0x2a, 0xe1,
+ 0xca, 0x97, 0x3d, 0xbc, 0xef, 0xc3, 0x0c, 0x17,
+ 0xf8, 0x04, 0x7c, 0x6d, 0x8f, 0x78, 0xfd, 0x9e,
+ 0xd0, 0x87, 0x7b, 0xbe, 0x70, 0x5d, 0xfd, 0x7b,
+ 0xd8, 0x75, 0x0b, 0x53, 0x2e, 0xea, 0xf7, 0x0a,
+ 0x4f, 0xce, 0xbf, 0xc7, 0x69, 0x74, 0xc5, 0xdc,
+ 0x43, 0xef, 0x67, 0x34, 0x41, 0x37, 0xcc, 0x51,
+ 0xea, 0xd4, 0xe0, 0x2f, 0xf0, 0xc5, 0xa6, 0x19,
+ 0xfd, 0xb2, 0x73, 0x6d, 0xcd, 0xe3, 0x67, 0x30,
+ 0x2f, 0x5a, 0x57, 0x46, 0xef, 0x6f, 0x49, 0x3d,
+ 0xcd, 0x6b, 0xa0, 0x7a, 0x95, 0x42, 0x9c, 0x0c,
+ 0x19, 0x88, 0x8f, 0x33, 0xc9, 0xc4, 0xa5, 0x63,
+ 0xe4, 0x52, 0xf8, 0x35, 0xe2, 0xea, 0xc0, 0x36,
+ 0xd5, 0xc3, 0x6c, 0xf4, 0xb2, 0xdd, 0x39, 0xf9,
+ 0xf1, 0xcf, 0xc8, 0x83, 0xc4, 0x18, 0xf5, 0x6b,
+ 0x81, 0xe8, 0xad, 0xe3, 0x63, 0x2f, 0x78, 0x06,
+ 0x7a, 0x7c, 0xc2, 0x4d, 0x7e, 0xcf, 0x3f, 0xca,
+ 0x42, 0xed, 0x92, 0xf2, 0xf7, 0xcb, 0xf1, 0x65,
+ 0x67, 0x22, 0x74, 0x3f, 0x2b, 0x99, 0xe3, 0x1e,
+ 0xa8, 0xab, 0x7e, 0xaf, 0x22, 0xf9, 0x12, 0x31,
+ 0x5f, 0x7e, 0xc9, 0x8b, 0xdf, 0x95, 0x2e, 0x4e,
+ 0x55, 0x7d, 0xbd, 0xc6, 0xef, 0x09, 0x23, 0x8b,
+ 0x69, 0xfe, 0x76, 0x9d, 0x38, 0x1b, 0xb8, 0x52,
+ 0xf9, 0xfa, 0x86, 0xfc, 0x72, 0xab, 0xae, 0xf5,
+ 0x75, 0xe2, 0x5c, 0xcf, 0x94, 0x56, 0x3f, 0x33,
+ 0x89, 0xfe, 0xfb, 0xdd, 0x06, 0xd5, 0xe3, 0x14,
+ 0x7c, 0x48, 0x2f, 0x47, 0xe9, 0xd5, 0x01, 0xf4,
+ 0x3a, 0xfa, 0xde, 0x1e, 0xf8, 0x22, 0x74, 0xae,
+ 0x54, 0x26, 0x73, 0xfd, 0x7f, 0xe6, 0x5d, 0x37,
+ 0xdc, 0xa5, 0x37, 0x4e, 0xdc, 0x53, 0xf3, 0x4a,
+ 0x8a, 0xbf, 0xed, 0xe8, 0xc8, 0x80, 0x10, 0xde,
+ 0x63, 0x02, 0xab, 0x58, 0xf0, 0xa9, 0x2b, 0x7f,
+ 0xf9, 0x01, 0xba, 0xf7, 0xf0, 0xb1, 0xea, 0x61,
+ 0x3e, 0x73, 0xd2, 0xa6, 0xcd, 0x75, 0x3e, 0x13,
+ 0xf1, 0x29, 0xd5, 0x37, 0xeb, 0xf7, 0x03, 0x47,
+ 0xe2, 0xbe, 0x7b, 0xb0, 0xfa, 0x8f, 0x59, 0xe8,
+ 0xd8, 0xcd, 0x8e, 0xea, 0x1f, 0xa3, 0x88, 0xa3,
+ 0xf0, 0x7d, 0xf8, 0x4c, 0x53, 0x88, 0x5e, 0xde,
+ 0x3a, 0x2a, 0x23, 0xe8, 0x87, 0xbe, 0x8d, 0xcd,
+ 0xf4, 0x82, 0xdf, 0xa2, 0xee, 0x54, 0xf8, 0x4c,
+ 0xfd, 0x54, 0x22, 0xfd, 0xc8, 0xaa, 0xad, 0xc4,
+ 0x89, 0x39, 0x48, 0x9d, 0xed, 0xb0, 0x53, 0xf5,
+ 0x79, 0x23, 0xf9, 0x5b, 0x39, 0x4d, 0xf9, 0x53,
+ 0x93, 0x7d, 0x44, 0xfb, 0x69, 0x5e, 0xdd, 0x93,
+ 0x3e, 0xfa, 0xcc, 0xa3, 0x29, 0xf0, 0x37, 0xcc,
+ 0xf7, 0x36, 0xbf, 0x46, 0xa7, 0x4d, 0x71, 0xe6,
+ 0x5d, 0x1e, 0xeb, 0x55, 0x5f, 0x97, 0x72, 0x7f,
+ 0x4f, 0x83, 0xa4, 0x77, 0xdd, 0xd3, 0x2c, 0xbc,
+ 0x1d, 0xa3, 0xfa, 0xe5, 0x4a, 0x5e, 0xf6, 0x9c,
+ 0x1e, 0x05, 0xef, 0x86, 0x2f, 0x5d, 0x11, 0xc3,
+ 0x9c, 0xc9, 0x34, 0x66, 0x3f, 0x77, 0x7f, 0xd3,
+ 0x3c, 0xdf, 0x15, 0x3f, 0x59, 0xc7, 0x53, 0xeb,
+ 0x39, 0x87, 0x4e, 0x0e, 0xf5, 0xd0, 0xef, 0xb9,
+ 0x4b, 0xa8, 0xa3, 0xbb, 0xf3, 0x42, 0xe1, 0x13,
+ 0xf8, 0xdd, 0xb7, 0x4b, 0x86, 0xf6, 0xdb, 0x95,
+ 0xb8, 0xfa, 0x28, 0x5a, 0xfd, 0xef, 0x7c, 0xf6,
+ 0x9b, 0x53, 0x5e, 0xf3, 0xb6, 0xf6, 0xdc, 0xc7,
+ 0x4e, 0x5f, 0xcd, 0x17, 0x23, 0x99, 0x93, 0x2f,
+ 0x1a, 0x72, 0x14, 0x3e, 0x89, 0xfb, 0x5e, 0xe4,
+ 0xa4, 0x7e, 0xb8, 0x11, 0xf5, 0x75, 0x6a, 0x67,
+ 0xd5, 0xc3, 0xad, 0xe8, 0x46, 0x72, 0x57, 0xd5,
+ 0xd7, 0x10, 0xfa, 0xf9, 0x87, 0xd3, 0x54, 0xdf,
+ 0x0f, 0x91, 0xdf, 0xe1, 0x71, 0xf2, 0x17, 0xad,
+ 0xf3, 0x2d, 0x38, 0x86, 0xcd, 0xd1, 0xfe, 0x78,
+ 0x5f, 0x70, 0x9c, 0xf2, 0xf7, 0x7e, 0xb7, 0xff,
+ 0x03
+};
+
+/** Length of the pre-compressed data using zlib algorithm */
+#define COMP_ZLIB_SIZE 3771
+
+/** Pre-compressed data using zlib algorithm */
+static uint8_t compressed_text_zlib[COMP_ZLIB_SIZE] = {
+ 0x78, 0x9c, 0x35, 0x99, 0x77, 0x5c, 0xd6, 0xe5,
+ 0x1a, 0xc6, 0x1f, 0x70, 0xa2, 0xe1, 0x0c, 0x50,
+ 0xb4, 0x70, 0xe7, 0x44, 0xc5, 0x34, 0x27, 0xe6,
+ 0x04, 0x15, 0x51, 0x73, 0x1e, 0x51, 0x8e, 0x60,
+ 0x42, 0xe2, 0x08, 0xf5, 0x68, 0x99, 0x24, 0xb8,
+ 0xd3, 0x24, 0xad, 0xe3, 0x9e, 0xb8, 0x11, 0x50,
+ 0x12, 0x15, 0xd2, 0xd0, 0xcc, 0x2d, 0x66, 0x28,
+ 0xe0, 0x20, 0x89, 0x5c, 0xe4, 0xc4, 0x81, 0x0b,
+ 0xc5, 0x71, 0xce, 0xe7, 0xf9, 0x5e, 0xf5, 0x47,
+ 0x57, 0x57, 0xbc, 0xef, 0xef, 0xf7, 0x8c, 0xfb,
+ 0xbe, 0xee, 0xeb, 0xbe, 0x5f, 0x63, 0xfe, 0xff,
+ 0x4f, 0x68, 0xb7, 0x6f, 0x8d, 0xfd, 0xe7, 0x78,
+ 0x6d, 0x0b, 0xa3, 0x57, 0xb4, 0x87, 0x4f, 0xf2,
+ 0xb5, 0xd0, 0x2f, 0xe7, 0x47, 0xf8, 0xdb, 0x61,
+ 0x16, 0x7a, 0x2e, 0xb9, 0x05, 0xbf, 0x37, 0xc4,
+ 0x42, 0x03, 0x8f, 0xef, 0xe0, 0x67, 0x22, 0x2c,
+ 0xec, 0x1f, 0x52, 0x0d, 0x1e, 0x7f, 0xd1, 0x42,
+ 0x41, 0xa7, 0x5d, 0xf0, 0xa2, 0xf7, 0x2d, 0x5c,
+ 0xdf, 0x7e, 0x0d, 0xde, 0xfd, 0xaa, 0x85, 0x08,
+ 0xf3, 0x6f, 0xf8, 0xe1, 0x60, 0x0b, 0x03, 0x4a,
+ 0xd7, 0x80, 0x17, 0x7e, 0x60, 0xa1, 0x5a, 0x6d,
+ 0x57, 0xf8, 0xad, 0xdb, 0x16, 0x6e, 0x9e, 0x59,
+ 0x0c, 0x8f, 0xee, 0x62, 0x61, 0x4a, 0xf9, 0x72,
+ 0xf0, 0xa1, 0x75, 0x2d, 0x78, 0xbe, 0x58, 0x03,
+ 0x1f, 0x39, 0xd1, 0x42, 0x8b, 0xc3, 0x39, 0xf0,
+ 0xf5, 0xac, 0xe7, 0xd4, 0xd1, 0x6d, 0xf0, 0x55,
+ 0xa5, 0x2c, 0x84, 0x14, 0x85, 0xc1, 0xbd, 0x86,
+ 0x5b, 0x70, 0xee, 0xbb, 0x1a, 0x9e, 0xc2, 0xe7,
+ 0x9b, 0xef, 0x6f, 0x02, 0x1f, 0x72, 0xc3, 0xc2,
+ 0xa6, 0x66, 0x13, 0xf4, 0xbe, 0x6c, 0x0b, 0x75,
+ 0x83, 0xc6, 0xc2, 0x87, 0xcf, 0xb7, 0x50, 0x62,
+ 0x57, 0x5b, 0xb8, 0xdb, 0x26, 0xbe, 0x16, 0xcb,
+ 0x3a, 0xcd, 0xed, 0xa1, 0x16, 0x02, 0x67, 0x8f,
+ 0x84, 0x67, 0xc5, 0x5a, 0x88, 0x3f, 0x90, 0x0f,
+ 0x4f, 0x5a, 0x68, 0xe1, 0xcc, 0xd9, 0xfe, 0xf0,
+ 0xe3, 0x57, 0x2c, 0xac, 0x9e, 0xb2, 0x0a, 0xbe,
+ 0x67, 0x1d, 0xe7, 0x53, 0xe7, 0x31, 0xbc, 0xe2,
+ 0x5c, 0x0b, 0xe5, 0x8e, 0xb6, 0x82, 0xcf, 0x7c,
+ 0xcf, 0xc2, 0x7f, 0x0a, 0x97, 0xc2, 0x3b, 0xf1,
+ 0xf7, 0x9a, 0x4f, 0x23, 0xe1, 0x1d, 0x0e, 0x58,
+ 0xc8, 0x89, 0x9e, 0x06, 0x9f, 0xf7, 0xd0, 0x42,
+ 0xb8, 0x7b, 0x2d, 0xad, 0xef, 0x4f, 0x0b, 0xde,
+ 0x27, 0x4b, 0xc2, 0x37, 0x64, 0xf1, 0x98, 0xc3,
+ 0x7a, 0xff, 0xd5, 0x42, 0x0b, 0xaf, 0x76, 0x94,
+ 0x85, 0x87, 0x12, 0x2f, 0xde, 0x33, 0x8b, 0xc3,
+ 0x1d, 0xb8, 0xc7, 0xe9, 0xc7, 0xeb, 0xc0, 0x97,
+ 0x37, 0xb4, 0x70, 0x67, 0x9f, 0xf6, 0x9b, 0xd3,
+ 0xd3, 0xc2, 0x98, 0xcc, 0x3b, 0xf0, 0x65, 0x5f,
+ 0x58, 0x58, 0x74, 0x51, 0xcf, 0x6b, 0xbd, 0xd9,
+ 0xc2, 0xb0, 0x21, 0x03, 0xe1, 0x0d, 0xfd, 0x2d,
+ 0xdc, 0xeb, 0xcc, 0x7b, 0xcc, 0x8f, 0xc4, 0x57,
+ 0xbb, 0x2c, 0xfe, 0xbf, 0x29, 0x62, 0x9d, 0x5e,
+ 0xab, 0x2e, 0xc1, 0x13, 0x7f, 0xb3, 0xd0, 0x3e,
+ 0x7a, 0x1c, 0x7c, 0xc0, 0x22, 0xbe, 0x3f, 0x5f,
+ 0xfb, 0xab, 0x42, 0x5c, 0x5e, 0x8e, 0xd4, 0x7d,
+ 0x0c, 0xe3, 0x7d, 0x8e, 0x2f, 0xde, 0xc2, 0xdd,
+ 0x89, 0xf7, 0x37, 0x5f, 0x2e, 0x81, 0x77, 0xf9,
+ 0x99, 0xd7, 0x7c, 0x7c, 0x18, 0xfe, 0xb2, 0x9e,
+ 0x85, 0xb8, 0xdc, 0x0d, 0xf0, 0xb6, 0x47, 0xf8,
+ 0xfb, 0x9c, 0xbf, 0xe1, 0x0d, 0xb6, 0x58, 0x38,
+ 0x97, 0xaf, 0x78, 0xfc, 0xbb, 0xa3, 0x85, 0x12,
+ 0x0f, 0xb5, 0xde, 0xf6, 0xbf, 0x5a, 0x88, 0x48,
+ 0x51, 0x7c, 0x2d, 0x39, 0x61, 0x61, 0xf2, 0xa8,
+ 0xd6, 0xf0, 0x4b, 0xe5, 0x2d, 0x1c, 0x74, 0x71,
+ 0x80, 0x07, 0x11, 0x1f, 0xf3, 0x17, 0xb6, 0x81,
+ 0xbf, 0x20, 0xfe, 0x87, 0x25, 0xee, 0x85, 0xff,
+ 0xcc, 0x7a, 0x62, 0x93, 0x3f, 0x84, 0xaf, 0x24,
+ 0x6f, 0xcb, 0x27, 0x2b, 0x9e, 0x97, 0x7e, 0x6c,
+ 0x61, 0xd7, 0xe0, 0x77, 0xe0, 0x77, 0x5b, 0x5a,
+ 0x78, 0x74, 0x43, 0xef, 0xdf, 0xdd, 0x97, 0xf5,
+ 0x4d, 0xe7, 0x5e, 0xcd, 0x03, 0xe2, 0xff, 0x79,
+ 0x59, 0xe5, 0x87, 0xf9, 0xc5, 0xfe, 0xfb, 0xf6,
+ 0xec, 0xb3, 0xd0, 0x0d, 0xdc, 0x5b, 0xdb, 0x99,
+ 0x33, 0xe0, 0x79, 0x65, 0x2c, 0xa4, 0x2d, 0x62,
+ 0xdd, 0xa6, 0x6c, 0xb8, 0x85, 0xd3, 0x3d, 0xb5,
+ 0xfe, 0xb3, 0xc4, 0x9d, 0x6f, 0xf3, 0x05, 0x70,
+ 0x1f, 0x47, 0x0b, 0x9f, 0xfa, 0xec, 0x83, 0x1f,
+ 0x58, 0x66, 0xc1, 0xa3, 0x92, 0xf4, 0xa3, 0x36,
+ 0xef, 0x7f, 0xbf, 0x23, 0x3a, 0x61, 0x62, 0x89,
+ 0xf7, 0xea, 0x3e, 0xbc, 0xc7, 0x4c, 0xbb, 0xcf,
+ 0xf7, 0x8f, 0x91, 0x77, 0xe6, 0xd8, 0x6b, 0x0b,
+ 0xef, 0x36, 0x7a, 0x0e, 0x9f, 0x44, 0x5c, 0x0f,
+ 0xf5, 0x5e, 0xa6, 0xbf, 0xcf, 0xb4, 0x10, 0xb8,
+ 0xab, 0x00, 0xde, 0x8b, 0x38, 0x9a, 0x9a, 0xa9,
+ 0xf5, 0x8e, 0x6c, 0x6a, 0xa1, 0xdd, 0x0c, 0x74,
+ 0xc7, 0x84, 0xff, 0xc5, 0xf3, 0xef, 0x73, 0x8f,
+ 0xa6, 0x0e, 0xf1, 0x55, 0x31, 0x4d, 0xf1, 0xb1,
+ 0x8e, 0xfd, 0x39, 0x55, 0x54, 0xfc, 0xe6, 0xfc,
+ 0xcb, 0x42, 0xdc, 0x95, 0x14, 0x78, 0x56, 0x80,
+ 0x85, 0x84, 0x6c, 0xc5, 0x5f, 0x41, 0x45, 0x0b,
+ 0xd5, 0x02, 0xbf, 0x81, 0x57, 0xa8, 0x69, 0x21,
+ 0x31, 0x56, 0xf9, 0xd2, 0x8e, 0xe7, 0xb5, 0x98,
+ 0x8e, 0x0e, 0x9a, 0x33, 0x37, 0x2d, 0x5c, 0x2f,
+ 0x97, 0x0b, 0xef, 0x7b, 0xcc, 0x82, 0xef, 0x85,
+ 0x67, 0x70, 0x1f, 0xf2, 0xd1, 0xe5, 0x08, 0x79,
+ 0x65, 0x86, 0x71, 0xce, 0x3f, 0xf7, 0x79, 0x05,
+ 0xff, 0x1c, 0x5e, 0x6b, 0xf6, 0x23, 0x78, 0x55,
+ 0xf8, 0xfe, 0x7f, 0xf4, 0x62, 0xf5, 0x60, 0x0b,
+ 0x5f, 0x37, 0xfb, 0x03, 0xfe, 0x94, 0x7d, 0x96,
+ 0xfd, 0xe8, 0x27, 0xf8, 0x55, 0xce, 0xdd, 0x3d,
+ 0x41, 0xeb, 0xad, 0xce, 0xf7, 0x23, 0x17, 0x29,
+ 0x1e, 0x6a, 0xa3, 0x1f, 0x85, 0x43, 0x89, 0x5b,
+ 0xd3, 0x80, 0xf5, 0x17, 0x56, 0x09, 0x84, 0x6f,
+ 0x26, 0x0e, 0xab, 0xcd, 0x54, 0x7e, 0x54, 0x26,
+ 0xbf, 0xf7, 0x0e, 0xa4, 0x6e, 0x98, 0x34, 0xd6,
+ 0xed, 0x7f, 0x53, 0xeb, 0xbd, 0x95, 0x69, 0x61,
+ 0x45, 0xa5, 0x53, 0x70, 0x2f, 0xf6, 0xd9, 0x7a,
+ 0x65, 0x03, 0xf8, 0x15, 0xea, 0xcc, 0xd4, 0xb4,
+ 0x1f, 0xe0, 0xbd, 0x79, 0x4e, 0x87, 0x3b, 0x3b,
+ 0xe0, 0x1f, 0xba, 0x5b, 0x48, 0x1d, 0xee, 0x02,
+ 0xff, 0xf7, 0x78, 0x9e, 0x5f, 0x6d, 0x3a, 0xfc,
+ 0x0b, 0xee, 0xff, 0xe5, 0xb1, 0x18, 0xf8, 0xe9,
+ 0x5e, 0x16, 0xc6, 0xd6, 0xd7, 0x7a, 0x4b, 0x10,
+ 0x3f, 0x09, 0x61, 0xd2, 0x8f, 0xb9, 0x9c, 0x6f,
+ 0xcd, 0x37, 0x15, 0xe0, 0x8d, 0x5e, 0x5a, 0x88,
+ 0xfb, 0x9e, 0x7b, 0x36, 0xee, 0xe0, 0xc0, 0x30,
+ 0xe2, 0xc4, 0x6c, 0x8c, 0xb3, 0x70, 0x30, 0xda,
+ 0x19, 0x1e, 0x7e, 0x8f, 0xfd, 0xd6, 0x24, 0x6e,
+ 0xcd, 0xa4, 0xeb, 0x16, 0xda, 0xb9, 0x51, 0xc7,
+ 0x4c, 0x71, 0x27, 0x0b, 0xc3, 0x52, 0x74, 0x9e,
+ 0x99, 0xec, 0xb3, 0xd5, 0xdb, 0xc9, 0xf0, 0x25,
+ 0xe4, 0xe3, 0xee, 0x4a, 0xaa, 0xb7, 0x89, 0x9c,
+ 0xcb, 0xb5, 0x04, 0xea, 0x9a, 0x79, 0xc8, 0xb9,
+ 0xdd, 0xb8, 0xbe, 0x1c, 0xbe, 0x8e, 0xbc, 0xe8,
+ 0x33, 0x86, 0x7d, 0x18, 0x2f, 0xe2, 0xd4, 0x45,
+ 0x75, 0xd1, 0xf4, 0x68, 0x6e, 0xe1, 0x75, 0x4f,
+ 0xce, 0xc5, 0xcc, 0x44, 0x37, 0xfc, 0x3d, 0x95,
+ 0x2f, 0xdb, 0xe1, 0xa5, 0x17, 0xe8, 0xbe, 0xc3,
+ 0x1b, 0x5b, 0xc8, 0x4a, 0x3c, 0x0e, 0x4f, 0x42,
+ 0x2f, 0x17, 0xcc, 0xb9, 0x02, 0x6f, 0xc0, 0x79,
+ 0x0f, 0x38, 0xbe, 0x12, 0xee, 0x5b, 0xd9, 0xc2,
+ 0xaa, 0x92, 0xda, 0x5f, 0x19, 0xee, 0x2b, 0xf5,
+ 0x19, 0x71, 0x66, 0xce, 0x53, 0x8f, 0xca, 0x3d,
+ 0xca, 0x80, 0x57, 0xd9, 0x69, 0x21, 0xd6, 0xc7,
+ 0x1b, 0xde, 0x95, 0xfa, 0x93, 0xf0, 0x46, 0xfa,
+ 0x12, 0x40, 0x7d, 0x7c, 0xbc, 0xaf, 0x1f, 0xfc,
+ 0xbb, 0x78, 0x0b, 0x7e, 0x83, 0xfd, 0xe0, 0x17,
+ 0xd7, 0x5a, 0x68, 0x92, 0xcc, 0xbe, 0xcc, 0x6a,
+ 0x74, 0xd9, 0x69, 0xad, 0xf2, 0xb3, 0x33, 0xfb,
+ 0xce, 0x7b, 0x4c, 0x1e, 0x99, 0x92, 0xac, 0xe7,
+ 0x68, 0xa1, 0xfc, 0x43, 0x0b, 0xea, 0x40, 0xe2,
+ 0xda, 0x3d, 0xf0, 0x33, 0xc4, 0xc7, 0x26, 0x7f,
+ 0x7d, 0x7e, 0x76, 0x6f, 0x0b, 0x91, 0xeb, 0xa4,
+ 0x6f, 0x4d, 0x88, 0x87, 0x1d, 0xa1, 0xe4, 0xad,
+ 0xc9, 0xfa, 0xd4, 0xc2, 0x94, 0x9b, 0xd2, 0x5f,
+ 0x1f, 0xce, 0x35, 0xd7, 0x4f, 0xf5, 0xda, 0x8b,
+ 0xbc, 0xad, 0xbe, 0x50, 0xfa, 0x73, 0xaf, 0x83,
+ 0x85, 0x9e, 0x0b, 0x15, 0x1f, 0x47, 0x89, 0xf7,
+ 0x66, 0x9e, 0x9f, 0xc3, 0x9f, 0xa1, 0x8b, 0x35,
+ 0x3c, 0xa9, 0x23, 0xa6, 0x36, 0xeb, 0x9d, 0xd3,
+ 0xea, 0x3c, 0x7c, 0xec, 0x1c, 0x0b, 0x45, 0x73,
+ 0x88, 0x33, 0x93, 0x44, 0x7c, 0x95, 0xf2, 0x55,
+ 0x3c, 0x16, 0x47, 0xcf, 0x9e, 0x34, 0x8d, 0xd0,
+ 0xfb, 0xda, 0x59, 0x28, 0x5b, 0xb6, 0x0a, 0x7c,
+ 0x5f, 0x7d, 0x9e, 0x1f, 0x58, 0x03, 0xbe, 0x9f,
+ 0x7a, 0x32, 0x37, 0xde, 0x0b, 0x7e, 0x24, 0xc1,
+ 0x82, 0xf7, 0x33, 0x1f, 0x78, 0x09, 0xea, 0xf3,
+ 0xa3, 0xa8, 0x74, 0x3d, 0x0f, 0x3d, 0xce, 0xde,
+ 0x29, 0x1e, 0xfa, 0x99, 0x85, 0x16, 0xbf, 0xc9,
+ 0xcf, 0xa5, 0x14, 0x59, 0x58, 0xd9, 0xfd, 0xbf,
+ 0xf0, 0xbe, 0xf8, 0xc1, 0x4d, 0x6d, 0x7a, 0xc0,
+ 0xaf, 0x73, 0xef, 0xef, 0x36, 0x26, 0x0e, 0x4c,
+ 0x66, 0x28, 0xcf, 0x1b, 0x2e, 0x3f, 0xf2, 0x21,
+ 0xf7, 0xfd, 0xbc, 0xcf, 0x54, 0x78, 0x20, 0x75,
+ 0x37, 0xe6, 0xaf, 0x07, 0xf0, 0x02, 0xce, 0xa1,
+ 0xc2, 0x0c, 0x74, 0xd5, 0xc4, 0x12, 0x67, 0xcf,
+ 0x0a, 0xcf, 0x68, 0xbd, 0xac, 0x63, 0xc9, 0x48,
+ 0xd5, 0x17, 0x07, 0x74, 0xf5, 0x60, 0xb6, 0xea,
+ 0xdf, 0x1e, 0xea, 0x6d, 0x6a, 0x42, 0x12, 0xbc,
+ 0x2b, 0xe7, 0xf9, 0xe5, 0x78, 0xf1, 0x97, 0x1b,
+ 0x2d, 0xf4, 0x48, 0x57, 0xbc, 0x8d, 0xe6, 0xbd,
+ 0x7f, 0x1b, 0xee, 0xd9, 0xd4, 0x46, 0x47, 0xce,
+ 0x0e, 0xc6, 0x27, 0x1a, 0x77, 0xde, 0xfb, 0xc7,
+ 0xd4, 0xae, 0xfa, 0x3c, 0x79, 0x1e, 0x56, 0x57,
+ 0x7a, 0x32, 0x7d, 0xb7, 0x05, 0xbf, 0x09, 0xed,
+ 0xc4, 0x79, 0x6e, 0xbf, 0x6a, 0xdd, 0xe1, 0x75,
+ 0x39, 0xc7, 0xdd, 0xf1, 0xda, 0x8f, 0xef, 0x18,
+ 0x0b, 0xb3, 0x47, 0xaa, 0x1e, 0xf5, 0xc7, 0x87,
+ 0xb7, 0xed, 0x84, 0x6e, 0x98, 0x25, 0xf8, 0xcd,
+ 0xa2, 0x24, 0xf9, 0x91, 0xe6, 0xf8, 0xca, 0x95,
+ 0x4b, 0x3b, 0xc3, 0x17, 0x13, 0xd7, 0x5e, 0xde,
+ 0xd2, 0x83, 0xc1, 0xc4, 0x55, 0xd7, 0x4b, 0xd2,
+ 0xd3, 0xfb, 0xd4, 0xa9, 0xd5, 0x4f, 0xb8, 0x57,
+ 0x93, 0xca, 0x3a, 0x6e, 0x47, 0x6b, 0xfd, 0x79,
+ 0xc4, 0x5d, 0xdd, 0x56, 0x6f, 0xe0, 0xe9, 0xd4,
+ 0xe1, 0xd2, 0x9d, 0x0f, 0xc2, 0x9f, 0x46, 0xf3,
+ 0xf9, 0x00, 0xf1, 0x6b, 0xf8, 0xa1, 0xc0, 0xc6,
+ 0x31, 0x5a, 0x2f, 0x79, 0x32, 0xe2, 0x73, 0xf9,
+ 0xf1, 0x38, 0xf2, 0x33, 0x23, 0x43, 0xeb, 0x49,
+ 0xe2, 0x5e, 0xce, 0x8e, 0x55, 0x3e, 0xcc, 0xe1,
+ 0x3c, 0x5f, 0x3e, 0x95, 0x3f, 0x1d, 0x40, 0xde,
+ 0x5f, 0x72, 0xc2, 0x27, 0x99, 0x96, 0x1f, 0x59,
+ 0xb8, 0xb5, 0x47, 0xfa, 0x9b, 0x95, 0x6a, 0x21,
+ 0x64, 0x82, 0xea, 0xdd, 0x4c, 0xf6, 0xf1, 0x69,
+ 0xa6, 0xea, 0xf9, 0x8a, 0x44, 0x0b, 0xd9, 0xb2,
+ 0x13, 0x26, 0x16, 0xbd, 0x98, 0xb7, 0x4a, 0x7e,
+ 0x72, 0x32, 0x7a, 0x33, 0xaa, 0x40, 0xfe, 0x2a,
+ 0x9c, 0x7c, 0x18, 0x3a, 0x51, 0xfd, 0xcb, 0x02,
+ 0x78, 0xc7, 0x14, 0xf9, 0x9f, 0x60, 0x74, 0x21,
+ 0x2a, 0x59, 0xfd, 0x4f, 0x03, 0xf4, 0xad, 0x6b,
+ 0xf8, 0x76, 0x78, 0x2e, 0x7d, 0x4b, 0xc4, 0x12,
+ 0xf2, 0xd8, 0x6c, 0xa1, 0x0e, 0xcc, 0x8f, 0x52,
+ 0xfd, 0xf4, 0x27, 0xbe, 0x07, 0x87, 0x69, 0xfd,
+ 0xff, 0xc2, 0xd7, 0x67, 0xe6, 0x29, 0x3e, 0xfa,
+ 0x8d, 0xb0, 0xb0, 0xe3, 0xc0, 0x13, 0x78, 0x02,
+ 0xf1, 0x36, 0x77, 0xb9, 0x07, 0x7c, 0x29, 0xfd,
+ 0x4a, 0x54, 0x82, 0xfa, 0x8b, 0x7b, 0xa5, 0x2d,
+ 0xb8, 0x1d, 0x20, 0xaf, 0xcc, 0x0d, 0xf2, 0x66,
+ 0xce, 0x4f, 0xda, 0xdf, 0xa3, 0x29, 0x16, 0x16,
+ 0x36, 0x92, 0x3e, 0xbd, 0x83, 0x0e, 0x85, 0xd4,
+ 0x95, 0x7f, 0x79, 0x44, 0xde, 0x45, 0x4c, 0x63,
+ 0x5d, 0xe6, 0x10, 0xf7, 0x12, 0xd1, 0xfd, 0x7b,
+ 0xf8, 0xcf, 0xe8, 0x40, 0x2d, 0x3f, 0xf9, 0xc7,
+ 0x57, 0xaa, 0x2f, 0xdd, 0xdc, 0xe0, 0x21, 0x9c,
+ 0xff, 0x1f, 0x7f, 0x49, 0x7f, 0x82, 0xf0, 0x0d,
+ 0x65, 0xd7, 0xe8, 0xef, 0x23, 0xd9, 0x4f, 0x62,
+ 0xa7, 0x41, 0xf0, 0x60, 0xfc, 0x52, 0x6a, 0x6f,
+ 0xe5, 0xd7, 0x2c, 0xf2, 0x75, 0x4f, 0xc8, 0xef,
+ 0xf0, 0xd1, 0xac, 0xbb, 0xa9, 0x9b, 0xf4, 0xb2,
+ 0x2d, 0x71, 0xf7, 0xf6, 0x6b, 0x74, 0xc8, 0x24,
+ 0x12, 0x8f, 0x1f, 0x94, 0xd2, 0x7d, 0xb9, 0x92,
+ 0x47, 0xff, 0xed, 0x22, 0x3f, 0x90, 0x80, 0x1f,
+ 0x2d, 0x95, 0xa4, 0xf3, 0xf4, 0x20, 0xde, 0x7f,
+ 0xaa, 0xa4, 0x7c, 0x1a, 0x47, 0xbf, 0x56, 0x31,
+ 0x1a, 0x5f, 0x6f, 0x3e, 0x24, 0x9e, 0x9b, 0x7c,
+ 0xae, 0xfd, 0x46, 0x73, 0x6f, 0x0f, 0x0e, 0xa9,
+ 0x1f, 0xe8, 0x43, 0x3c, 0xcf, 0x3f, 0x5b, 0x0c,
+ 0x9e, 0xc5, 0xfa, 0x93, 0xbf, 0xd8, 0xa4, 0xfd,
+ 0x51, 0x87, 0x5b, 0x5e, 0x55, 0x3f, 0xf3, 0x2b,
+ 0xf1, 0x70, 0xa2, 0x42, 0x25, 0xf8, 0x0d, 0x9e,
+ 0x53, 0xc7, 0x5d, 0xf9, 0x3a, 0x8b, 0x75, 0x85,
+ 0xf6, 0x6c, 0x06, 0xbf, 0x14, 0x65, 0x21, 0x7f,
+ 0x34, 0x79, 0x6e, 0x96, 0x05, 0x59, 0xa8, 0x14,
+ 0xb9, 0x1e, 0x7e, 0x8f, 0xba, 0xd3, 0xad, 0x2e,
+ 0x7d, 0x87, 0x69, 0x43, 0xbf, 0xe6, 0x58, 0x5b,
+ 0x7a, 0x79, 0x09, 0xbd, 0xcd, 0xeb, 0xa6, 0x7e,
+ 0xba, 0x0c, 0x7a, 0x58, 0x78, 0x91, 0x75, 0x9b,
+ 0xb5, 0xc4, 0xc5, 0x17, 0x09, 0xd2, 0x53, 0xa7,
+ 0x51, 0x16, 0xce, 0xfd, 0xad, 0xf3, 0xdb, 0xc2,
+ 0xf7, 0x96, 0xb5, 0x56, 0xfe, 0xfd, 0x8a, 0xef,
+ 0x78, 0x72, 0xe1, 0x02, 0xdc, 0x03, 0x3f, 0xb2,
+ 0x74, 0xa7, 0xf2, 0x31, 0x8e, 0x3e, 0x65, 0xe6,
+ 0x3e, 0xad, 0x27, 0x8d, 0xfe, 0x69, 0x6c, 0x30,
+ 0x7d, 0x83, 0xe9, 0x91, 0x66, 0x61, 0x67, 0x92,
+ 0xf8, 0x62, 0xea, 0xce, 0x99, 0xfa, 0x8a, 0x5f,
+ 0x57, 0x7c, 0xc8, 0x67, 0xaf, 0xa5, 0x3f, 0xfb,
+ 0xf1, 0x6d, 0x09, 0x81, 0xf4, 0x2d, 0xe6, 0x2b,
+ 0x3e, 0xbf, 0xf5, 0x2b, 0x74, 0xd7, 0x14, 0xcc,
+ 0xb2, 0xb0, 0xc7, 0xb1, 0x13, 0x3c, 0x16, 0xfd,
+ 0x48, 0xbb, 0xa7, 0xbf, 0xaf, 0x64, 0x1d, 0x49,
+ 0x13, 0xa5, 0xa7, 0x87, 0x8f, 0x5a, 0xf8, 0x68,
+ 0x83, 0xfc, 0x44, 0x43, 0xde, 0x7b, 0xba, 0x01,
+ 0x71, 0x61, 0x4a, 0xe3, 0x63, 0xd2, 0xdb, 0xd2,
+ 0x07, 0x9a, 0x20, 0x7c, 0xfd, 0xa9, 0xa6, 0x8a,
+ 0xff, 0x23, 0xf8, 0xf1, 0xa8, 0x31, 0xaa, 0x2f,
+ 0x81, 0xd4, 0xeb, 0xf1, 0x23, 0x74, 0xde, 0xc5,
+ 0xf0, 0x59, 0xf5, 0xc7, 0xcb, 0xcf, 0xff, 0x81,
+ 0xbf, 0xf0, 0x7b, 0x20, 0xff, 0x79, 0xb1, 0x86,
+ 0x85, 0xb0, 0x8e, 0xba, 0xcf, 0xaa, 0xd4, 0xd9,
+ 0xfc, 0xf7, 0x14, 0xcf, 0x19, 0xe8, 0x46, 0x66,
+ 0x9c, 0xf4, 0x63, 0x25, 0xfb, 0xf5, 0xdb, 0x27,
+ 0x3f, 0x38, 0x95, 0xe7, 0x7f, 0x6b, 0xe4, 0xc7,
+ 0x0c, 0xf9, 0x55, 0xeb, 0x88, 0xfe, 0x3e, 0x08,
+ 0xbd, 0x58, 0x55, 0x5c, 0xdf, 0xcf, 0xe2, 0x9c,
+ 0xab, 0x24, 0xc9, 0x2f, 0x9c, 0x23, 0xaf, 0xdf,
+ 0x0c, 0xd0, 0x7d, 0xdd, 0xb8, 0x6c, 0x21, 0xbb,
+ 0x09, 0xbe, 0xcc, 0x9c, 0xe1, 0xbe, 0xba, 0x79,
+ 0xcf, 0x86, 0x6f, 0xa2, 0x7e, 0x7f, 0xbf, 0x41,
+ 0xfd, 0xc6, 0x71, 0x74, 0xcc, 0xa1, 0xbd, 0xfc,
+ 0xf1, 0x5b, 0xfc, 0xfd, 0xa9, 0xdf, 0xd1, 0x4d,
+ 0xd3, 0x90, 0x3c, 0xcf, 0x3e, 0xfa, 0x1f, 0xb8,
+ 0x0b, 0xba, 0xef, 0x70, 0x5a, 0xf9, 0xd2, 0x85,
+ 0xe7, 0x34, 0xbd, 0x80, 0x6f, 0x33, 0xf7, 0xd1,
+ 0x51, 0x97, 0x4e, 0xf2, 0xdf, 0x8e, 0xd4, 0x83,
+ 0x79, 0xa3, 0x94, 0xff, 0x41, 0xf8, 0xe7, 0x88,
+ 0xad, 0xea, 0x3f, 0x2b, 0x87, 0x58, 0x08, 0x4e,
+ 0x96, 0x5f, 0x58, 0x37, 0xda, 0xc2, 0xe8, 0xfe,
+ 0xf2, 0x73, 0xaf, 0xf6, 0x5b, 0x98, 0xb8, 0x9e,
+ 0x3a, 0x6d, 0x66, 0x94, 0x60, 0x5b, 0x23, 0xd4,
+ 0x0f, 0x4d, 0x40, 0x37, 0x47, 0x5d, 0x95, 0x1f,
+ 0xd9, 0x4a, 0x7e, 0x7b, 0xcf, 0x96, 0xff, 0x89,
+ 0x27, 0x1e, 0x7a, 0xdc, 0x93, 0x9f, 0x48, 0xe7,
+ 0xbc, 0x27, 0x9c, 0x52, 0xbf, 0xd2, 0x0b, 0xbd,
+ 0xff, 0xfd, 0x03, 0xe5, 0x77, 0xdb, 0x15, 0x16,
+ 0x26, 0x67, 0x68, 0xbe, 0xb3, 0x0d, 0x7f, 0xd5,
+ 0x67, 0x90, 0xd6, 0x3f, 0x85, 0x78, 0xbc, 0xf3,
+ 0x50, 0x7e, 0x79, 0x3a, 0xfe, 0xf8, 0xd3, 0xb9,
+ 0xf2, 0xc7, 0x6f, 0x59, 0xf7, 0x8c, 0x1d, 0xf2,
+ 0xaf, 0x77, 0xef, 0xb2, 0xed, 0xc6, 0xf8, 0x0e,
+ 0xf3, 0x9a, 0xb8, 0x78, 0xd3, 0x4f, 0x7e, 0xf9,
+ 0x08, 0xf5, 0x63, 0xf5, 0x01, 0xf9, 0xed, 0x50,
+ 0xee, 0xf3, 0x97, 0xb5, 0xd2, 0x17, 0x67, 0xee,
+ 0xeb, 0x76, 0x9c, 0xe2, 0x6b, 0x21, 0xfa, 0x38,
+ 0xbd, 0x9f, 0xf4, 0xcf, 0x9f, 0xf9, 0x90, 0x57,
+ 0x34, 0xbe, 0xcb, 0x1c, 0x40, 0x0f, 0x5e, 0xc4,
+ 0x73, 0xef, 0x26, 0x82, 0x7d, 0x2f, 0x2c, 0x62,
+ 0x2e, 0x60, 0xc2, 0xe8, 0x2b, 0x4e, 0x6c, 0x50,
+ 0x3f, 0x1a, 0x80, 0x5f, 0xdc, 0xec, 0x7c, 0x1a,
+ 0xde, 0x9e, 0x7b, 0x76, 0xac, 0xa0, 0x79, 0xdc,
+ 0x13, 0xe6, 0x34, 0x49, 0x15, 0x39, 0x07, 0xa3,
+ 0xb1, 0xd4, 0xc4, 0xc5, 0x35, 0xf8, 0x0f, 0x57,
+ 0xce, 0xeb, 0x9e, 0x97, 0xfc, 0xf2, 0x63, 0xfc,
+ 0xad, 0xc3, 0xec, 0x46, 0xf0, 0xde, 0xcc, 0x7f,
+ 0x1e, 0x7e, 0x2f, 0xbd, 0x37, 0xe8, 0x4c, 0x85,
+ 0xce, 0xea, 0x4f, 0xbe, 0xed, 0xc3, 0xb6, 0xa7,
+ 0x69, 0xbd, 0xe9, 0xd4, 0xcf, 0x95, 0x09, 0xe8,
+ 0x96, 0xe9, 0xcb, 0xba, 0x03, 0x72, 0x75, 0x1e,
+ 0x6e, 0xec, 0x7f, 0x7d, 0x92, 0xe6, 0x5b, 0xdf,
+ 0x71, 0x2e, 0x59, 0x99, 0xaa, 0x57, 0x2b, 0xd0,
+ 0xdb, 0x32, 0x79, 0xca, 0x87, 0x20, 0xe2, 0xa2,
+ 0x58, 0x96, 0xf2, 0xf9, 0x1c, 0x7a, 0x9a, 0x5a,
+ 0xa0, 0xfe, 0x69, 0x1b, 0x7d, 0xb6, 0x73, 0x91,
+ 0xf2, 0xed, 0x20, 0xf3, 0x88, 0xd5, 0xdd, 0x88,
+ 0x4b, 0xf3, 0x0b, 0x71, 0x1a, 0x5f, 0x4c, 0xe7,
+ 0x51, 0x8e, 0xf5, 0x9f, 0xdf, 0xdc, 0x41, 0xef,
+ 0x63, 0x1d, 0x0e, 0xef, 0x7e, 0xa9, 0xed, 0x11,
+ 0x4f, 0x79, 0x4b, 0x55, 0xaf, 0x0b, 0xb8, 0xcf,
+ 0x0b, 0x6f, 0x14, 0x4f, 0xcf, 0xa8, 0x47, 0x5f,
+ 0x3f, 0x25, 0x8e, 0x8c, 0xf7, 0x57, 0x16, 0x8e,
+ 0xa5, 0xc8, 0x2f, 0xb9, 0xa2, 0x73, 0xdf, 0xc5,
+ 0xc8, 0x3f, 0xd4, 0x44, 0xa7, 0x3d, 0x63, 0xa4,
+ 0x37, 0x4d, 0x88, 0x9f, 0x44, 0x07, 0xe9, 0xb5,
+ 0x23, 0xf7, 0x75, 0x7c, 0x97, 0xfa, 0x8f, 0x7d,
+ 0xcc, 0x9d, 0x46, 0x76, 0xd1, 0x7c, 0xec, 0x14,
+ 0x79, 0xb3, 0x60, 0x9c, 0xf4, 0xc2, 0x83, 0xf5,
+ 0x86, 0x87, 0xa2, 0x33, 0x66, 0x39, 0xf5, 0x24,
+ 0xb7, 0xb5, 0xfc, 0xee, 0x3b, 0xf4, 0xd1, 0x83,
+ 0x26, 0x2b, 0x3e, 0xe7, 0xa3, 0x3b, 0xee, 0xb3,
+ 0xe4, 0x77, 0x1a, 0xa1, 0x2b, 0x35, 0x27, 0xa9,
+ 0x5f, 0xaf, 0x86, 0x7f, 0x70, 0xe8, 0xf2, 0x31,
+ 0xbc, 0x14, 0xf5, 0xc3, 0xb9, 0x03, 0xcf, 0x35,
+ 0x97, 0xd0, 0xf1, 0xd7, 0x57, 0x34, 0x6f, 0xfd,
+ 0x16, 0x7f, 0xe1, 0x9a, 0xaa, 0xf8, 0x38, 0x4c,
+ 0xfc, 0x6c, 0xc8, 0xe2, 0x7b, 0x66, 0x33, 0xfd,
+ 0xfc, 0x75, 0x4f, 0xad, 0x3f, 0x9d, 0xfd, 0x65,
+ 0x24, 0xca, 0x5f, 0xfd, 0x40, 0x1d, 0xcd, 0xb8,
+ 0xa5, 0xf9, 0xe1, 0x0a, 0xea, 0x67, 0x2f, 0x37,
+ 0xc5, 0x4b, 0x4b, 0xf2, 0x31, 0xe8, 0xa6, 0xe6,
+ 0x0b, 0xe7, 0xd1, 0xbd, 0x81, 0xc1, 0xca, 0xc7,
+ 0x73, 0xd4, 0xb3, 0xb1, 0xf9, 0xba, 0xcf, 0x16,
+ 0xcc, 0xbd, 0xfe, 0x2c, 0xfa, 0xa7, 0x9e, 0x50,
+ 0x1f, 0x52, 0x2f, 0x0e, 0x80, 0x3f, 0x47, 0x17,
+ 0xf3, 0x23, 0xa4, 0xe7, 0xb9, 0xf8, 0xcd, 0xca,
+ 0xae, 0xf2, 0xff, 0x0f, 0xa8, 0x33, 0x79, 0xfd,
+ 0x55, 0x6f, 0x6a, 0xf0, 0xb9, 0x13, 0x0e, 0xc4,
+ 0x91, 0xb9, 0x46, 0x5c, 0xc6, 0xcf, 0x51, 0x3f,
+ 0xff, 0x27, 0xf3, 0x92, 0xcd, 0x67, 0xb4, 0xdf,
+ 0xde, 0xf4, 0x3b, 0x33, 0xdc, 0xe5, 0x1f, 0xbe,
+ 0xe1, 0x5c, 0xab, 0xa7, 0xab, 0xbe, 0xf7, 0xa7,
+ 0xbe, 0xba, 0x37, 0x51, 0xfe, 0xde, 0xa7, 0x0e,
+ 0xdd, 0x6d, 0xa4, 0xfb, 0x0c, 0xc1, 0xb7, 0xd7,
+ 0xb9, 0xa1, 0xf9, 0xac, 0x1f, 0x7e, 0xa4, 0xdd,
+ 0x29, 0xcd, 0xcf, 0xa7, 0xe2, 0xef, 0xdf, 0x0f,
+ 0xce, 0x83, 0xc7, 0x26, 0x5b, 0x48, 0xd0, 0xdc,
+ 0xd6, 0x14, 0xa1, 0x2f, 0x61, 0x55, 0xb5, 0xfe,
+ 0xcf, 0x30, 0xce, 0x8f, 0x73, 0xf4, 0xbc, 0x21,
+ 0xc4, 0x5f, 0xf7, 0xe2, 0xf2, 0x27, 0x25, 0x99,
+ 0x1b, 0xf9, 0x0c, 0xda, 0x0a, 0x6f, 0x84, 0xdf,
+ 0xa8, 0xd7, 0xfd, 0x6b, 0x78, 0x39, 0xb8, 0x73,
+ 0x34, 0x3a, 0x6f, 0x32, 0xd0, 0x95, 0xbb, 0x2e,
+ 0x8a, 0xd7, 0x24, 0xf6, 0xeb, 0x51, 0x0f, 0x5f,
+ 0x6b, 0xc6, 0x72, 0x9f, 0x6d, 0x07, 0x4b, 0xef,
+ 0xdb, 0x3d, 0xb5, 0x70, 0xe8, 0xa5, 0xfa, 0xb9,
+ 0x01, 0xdc, 0x5f, 0xc3, 0x1d, 0xea, 0x97, 0xe3,
+ 0x39, 0x87, 0xfd, 0x27, 0x75, 0x7f, 0xfe, 0xf8,
+ 0xb8, 0xc2, 0x36, 0x3a, 0xcf, 0x67, 0xf0, 0x13,
+ 0x17, 0xd1, 0x5d, 0xe3, 0xca, 0xdc, 0x7f, 0x9d,
+ 0xaf, 0xea, 0xc7, 0x24, 0xf6, 0x57, 0xf1, 0xa5,
+ 0xfa, 0xab, 0xd2, 0xdc, 0xf7, 0xe5, 0x89, 0xea,
+ 0x7f, 0x8b, 0x93, 0x9f, 0x23, 0x0a, 0xe5, 0x5f,
+ 0x6b, 0x31, 0xaf, 0x2a, 0x93, 0x26, 0xfd, 0xd9,
+ 0x46, 0xfc, 0xf4, 0x4e, 0x67, 0x4e, 0x68, 0x9a,
+ 0xe3, 0xeb, 0xf7, 0x6e, 0x95, 0xbf, 0x0e, 0x20,
+ 0xee, 0x8a, 0xe5, 0xa8, 0x3f, 0x8b, 0x21, 0xcf,
+ 0x9c, 0x53, 0x3c, 0xf5, 0x3c, 0xe2, 0xc2, 0x39,
+ 0x5f, 0xf1, 0xeb, 0xc3, 0x5c, 0x78, 0xf8, 0x73,
+ 0x7d, 0x5e, 0xf3, 0xbe, 0xd5, 0x3f, 0xc8, 0x6f,
+ 0xcc, 0xc3, 0xb7, 0x7e, 0x59, 0x4e, 0xf7, 0x3d,
+ 0x08, 0x5f, 0x7b, 0xbe, 0xa5, 0xfe, 0x1e, 0x8e,
+ 0x1f, 0xf2, 0xcb, 0x57, 0x7e, 0xad, 0xe1, 0x9c,
+ 0xe6, 0xde, 0x90, 0x7e, 0xdd, 0xc1, 0x2f, 0x94,
+ 0x3f, 0x31, 0x0f, 0xbe, 0x82, 0xba, 0x59, 0xb0,
+ 0x5c, 0xf7, 0x77, 0x97, 0x39, 0xf2, 0x9a, 0xbd,
+ 0xf4, 0x75, 0xa6, 0x3f, 0x3a, 0xe4, 0xbc, 0x4b,
+ 0xbf, 0xff, 0x6c, 0xc3, 0x47, 0xbe, 0xcc, 0xd5,
+ 0x7d, 0x0e, 0x8d, 0xb2, 0x70, 0x69, 0x91, 0xf2,
+ 0xe3, 0x47, 0xce, 0x75, 0x6f, 0x8e, 0xe2, 0xeb,
+ 0x32, 0x7a, 0xdc, 0xb7, 0x73, 0x75, 0xf8, 0x61,
+ 0xea, 0xfa, 0xeb, 0x71, 0xf2, 0x07, 0xc3, 0x39,
+ 0x37, 0xa7, 0x8d, 0xf2, 0x13, 0x43, 0xc9, 0xdb,
+ 0x86, 0x6e, 0xaa, 0xaf, 0x5b, 0xc8, 0x97, 0x6b,
+ 0x4e, 0xe8, 0xa8, 0xd9, 0x84, 0x4e, 0x17, 0x2d,
+ 0x52, 0x7d, 0x4d, 0xa1, 0x2f, 0xac, 0x3c, 0x5e,
+ 0xfa, 0xe4, 0x43, 0xdc, 0x0d, 0xe8, 0xcd, 0xdc,
+ 0xc6, 0xac, 0xfa, 0xc4, 0x42, 0xf3, 0xdd, 0xf8,
+ 0x04, 0x33, 0xf2, 0x5d, 0x0b, 0x5e, 0xcd, 0xb9,
+ 0x47, 0x53, 0x9b, 0x79, 0x9f, 0xbf, 0x3f, 0xbe,
+ 0xd1, 0xfc, 0x8e, 0x5e, 0x4e, 0xb9, 0x2c, 0x7d,
+ 0x0b, 0x60, 0x6e, 0x34, 0x39, 0x44, 0xe7, 0x3b,
+ 0x01, 0xff, 0x91, 0x1c, 0xa9, 0xfe, 0xaa, 0x37,
+ 0xf3, 0xc8, 0x9a, 0x1f, 0x77, 0x83, 0x8f, 0xe1,
+ 0xbe, 0xeb, 0x07, 0x3f, 0xd5, 0xe7, 0x59, 0x87,
+ 0xe7, 0x43, 0xbd, 0xbf, 0x2e, 0xba, 0x93, 0xbe,
+ 0x4b, 0xf3, 0xdc, 0x8f, 0xd0, 0xf1, 0xa8, 0x96,
+ 0xaa, 0xe7, 0x2f, 0x4e, 0x5a, 0xc8, 0x39, 0xae,
+ 0xfb, 0x08, 0x66, 0x7f, 0x6b, 0xee, 0x6b, 0x1e,
+ 0x75, 0x91, 0x38, 0xac, 0x1a, 0xa9, 0x7c, 0xf9,
+ 0x91, 0xf7, 0xbd, 0x9f, 0xa5, 0x7e, 0x72, 0x1c,
+ 0xbe, 0xb6, 0x67, 0xa2, 0x7e, 0x4f, 0xe8, 0xc3,
+ 0x3d, 0x9f, 0xbb, 0xa2, 0xfe, 0x3d, 0xe2, 0x8a,
+ 0x85, 0x49, 0x19, 0xfa, 0xbd, 0xc2, 0x83, 0xf3,
+ 0xef, 0x79, 0x02, 0x5d, 0x31, 0xb7, 0xd1, 0xfb,
+ 0x69, 0x9e, 0xe8, 0x86, 0x39, 0x44, 0x9d, 0x1a,
+ 0xfa, 0x19, 0xbe, 0xd8, 0x34, 0xa7, 0x5f, 0x76,
+ 0xae, 0xa3, 0x79, 0xfc, 0x34, 0xe6, 0x45, 0x6b,
+ 0xca, 0xe8, 0xfd, 0xad, 0xa8, 0xa7, 0xf9, 0x0d,
+ 0x55, 0xaf, 0xd2, 0x88, 0x93, 0x80, 0x41, 0xf8,
+ 0x38, 0x93, 0x4a, 0x5c, 0x3a, 0x46, 0xab, 0xbe,
+ 0x5d, 0x26, 0xae, 0xf6, 0x6e, 0x51, 0x3d, 0xcc,
+ 0x45, 0x2f, 0x3b, 0x9c, 0x96, 0x1f, 0xff, 0x84,
+ 0x3c, 0x48, 0x8e, 0x53, 0xbf, 0x36, 0x18, 0xbd,
+ 0x75, 0x7c, 0xa4, 0xfe, 0x29, 0x0b, 0x3d, 0x3e,
+ 0xea, 0x2a, 0xbf, 0xe7, 0x17, 0x63, 0xa1, 0x4e,
+ 0x49, 0xf9, 0xfb, 0xa5, 0xf8, 0xb2, 0x93, 0x51,
+ 0xba, 0x9f, 0xe5, 0xcc, 0x71, 0xf7, 0xd6, 0x53,
+ 0xbf, 0x57, 0x89, 0x7c, 0x89, 0x9a, 0x2b, 0xbf,
+ 0xd4, 0x8c, 0xdf, 0x95, 0x32, 0x26, 0xab, 0xbe,
+ 0x5e, 0xe6, 0xf7, 0x84, 0xa0, 0x62, 0x9a, 0xbf,
+ 0x5d, 0x21, 0xce, 0x06, 0x2d, 0x57, 0xbe, 0xbe,
+ 0x26, 0xbf, 0x5c, 0x6b, 0x68, 0x7d, 0x5d, 0x38,
+ 0xd7, 0x93, 0xa5, 0xd5, 0xcf, 0x4c, 0xa0, 0xff,
+ 0x7e, 0xbb, 0x4e, 0xf5, 0x38, 0x0d, 0x1f, 0xd2,
+ 0xdb, 0x51, 0x7a, 0xb5, 0x17, 0xbd, 0x8e, 0xbd,
+ 0xad, 0x79, 0xee, 0x02, 0x74, 0xae, 0x54, 0x36,
+ 0x73, 0xfd, 0x7f, 0xe6, 0x5d, 0x57, 0xdd, 0xa4,
+ 0x37, 0x4e, 0xdc, 0x53, 0x8b, 0xca, 0x8a, 0xbf,
+ 0xad, 0xe8, 0xc8, 0xc0, 0xb0, 0x29, 0x3a, 0x9f,
+ 0xaa, 0x16, 0xda, 0xd5, 0x93, 0xbf, 0x7c, 0x0f,
+ 0xdd, 0xbb, 0xff, 0x48, 0xf5, 0xf0, 0x21, 0x73,
+ 0xd2, 0xa6, 0x2d, 0x74, 0x3e, 0xe3, 0xf1, 0x29,
+ 0x35, 0x36, 0xea, 0xf7, 0x03, 0x47, 0xe2, 0xbe,
+ 0x47, 0xa8, 0xfa, 0x8f, 0x19, 0xe8, 0xd8, 0xb5,
+ 0xce, 0xea, 0x1f, 0x63, 0x88, 0xa3, 0xc8, 0xdd,
+ 0xf8, 0x4c, 0x53, 0x88, 0x5e, 0x5e, 0x3f, 0x24,
+ 0x23, 0xe8, 0x83, 0xbe, 0x8d, 0xce, 0xd6, 0xfd,
+ 0x5c, 0xa7, 0xee, 0x54, 0xfc, 0x44, 0xfd, 0x54,
+ 0x32, 0xfd, 0xc8, 0x8a, 0xcd, 0xc4, 0x89, 0xd9,
+ 0x47, 0x9d, 0xed, 0xb4, 0x5d, 0xf5, 0x79, 0x3d,
+ 0xf9, 0x5b, 0xe5, 0xac, 0xf2, 0xa7, 0x16, 0xfb,
+ 0x88, 0xf5, 0xd1, 0xbc, 0xba, 0x17, 0x7d, 0xf4,
+ 0xc9, 0x07, 0x93, 0xe0, 0xaf, 0x99, 0xef, 0x6d,
+ 0x7c, 0x85, 0x4e, 0x9b, 0xe2, 0xcc, 0xbb, 0xdc,
+ 0xd7, 0xaa, 0xbe, 0x2e, 0xe6, 0xfe, 0x9e, 0x04,
+ 0x4b, 0xef, 0x7a, 0x90, 0x7f, 0x6f, 0x42, 0x54,
+ 0xbf, 0x5c, 0xc8, 0xcb, 0x5e, 0x53, 0x63, 0xe0,
+ 0xbe, 0xf8, 0xd2, 0x65, 0x71, 0xcc, 0x99, 0x4c,
+ 0x13, 0xf6, 0x73, 0xeb, 0x37, 0xcd, 0xf3, 0x5d,
+ 0xf0, 0x93, 0x75, 0x3d, 0xb4, 0x9e, 0xd3, 0xe8,
+ 0xe4, 0x30, 0x77, 0xfd, 0x9e, 0xbb, 0x88, 0x3a,
+ 0xba, 0x33, 0x9f, 0xdf, 0x3d, 0xcc, 0x38, 0x7e,
+ 0xf7, 0xed, 0x96, 0xa5, 0xfd, 0x76, 0x27, 0xae,
+ 0x3e, 0x88, 0x55, 0xff, 0x3b, 0x97, 0xfd, 0xe6,
+ 0x95, 0xd7, 0xbc, 0xad, 0x23, 0xf7, 0xb1, 0xdd,
+ 0x5b, 0xf3, 0xc5, 0x68, 0xe6, 0xe4, 0x0b, 0x02,
+ 0x0e, 0xc1, 0x27, 0x70, 0xdf, 0x0b, 0x9c, 0xd4,
+ 0x0f, 0x37, 0xa6, 0xbe, 0x4e, 0xee, 0xaa, 0x7a,
+ 0xb8, 0x19, 0xdd, 0x48, 0xed, 0xae, 0xfa, 0x1a,
+ 0x46, 0x3f, 0x7f, 0x7f, 0x8a, 0xea, 0xfb, 0x7e,
+ 0xf2, 0x3b, 0x32, 0x41, 0xfe, 0xa2, 0x0d, 0x3a,
+ 0xec, 0x18, 0x31, 0x4b, 0xfb, 0xe3, 0x7d, 0xa1,
+ 0x09, 0xca, 0xdf, 0x3b, 0xbe, 0xff, 0x03, 0x4e,
+ 0x9a, 0x03, 0x4d
+};
+
+#endif
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 5b395c2a6..b2f599ec2 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -7,6 +7,7 @@
#include "config.h"
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include "test_vectors.h"
@@ -695,8 +696,11 @@ static int check_alg_support(odp_cipher_alg_t cipher, odp_auth_alg_t auth)
{
odp_crypto_capability_t capability;
- if (odp_crypto_capability(&capability))
+ memset(&capability, 0, sizeof(odp_crypto_capability_t));
+ if (odp_crypto_capability(&capability)) {
+ fprintf(stderr, "odp_crypto_capability() failed\n");
return ODP_TEST_INACTIVE;
+ }
if (suite_context.packet) {
if (suite_context.op_mode == ODP_CRYPTO_SYNC &&
@@ -2029,8 +2033,19 @@ static int crypto_init(odp_instance_t *inst)
odp_pool_t pool;
odp_queue_t out_queue;
odp_pool_capability_t pool_capa;
+ odp_crypto_capability_t crypto_capa;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
@@ -2040,6 +2055,11 @@ static int crypto_init(odp_instance_t *inst)
return -1;
}
+ if (odp_crypto_capability(&crypto_capa)) {
+ fprintf(stderr, "error: odp_crypto_capability() failed.\n");
+ return -1;
+ }
+
if (odp_pool_capability(&pool_capa) < 0) {
fprintf(stderr, "error: odp_pool_capability() failed.\n");
return -1;
diff --git a/test/validation/api/init/init_main_ok.c b/test/validation/api/init/init_main_ok.c
index a97db8e79..651989aeb 100644
--- a/test/validation/api/init/init_main_ok.c
+++ b/test/validation/api/init/init_main_ok.c
@@ -14,8 +14,12 @@ static void init_test_odp_init_global(void)
{
int status;
odp_instance_t instance;
+ odp_init_t init_data;
- status = odp_init_global(&instance, NULL, NULL);
+ odp_init_param_init(&init_data);
+ init_data.mem_model = ODP_MEM_MODEL_THREAD;
+
+ status = odp_init_global(&instance, &init_data, NULL);
CU_ASSERT_FATAL(status == 0);
status = odp_term_global(instance);
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index be47d109b..f82c561a0 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -9,6 +9,7 @@
#include <odp_api.h>
#include <odp_cunit_common.h>
#include <unistd.h>
+#include <odp/helper/odph_api.h>
#include "ipsec.h"
@@ -344,7 +345,8 @@ void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
const odp_crypto_key_t *cipher_key,
odp_auth_alg_t auth_alg,
const odp_crypto_key_t *auth_key,
- const odp_crypto_key_t *extra_key)
+ const odp_crypto_key_t *cipher_key_extra,
+ const odp_crypto_key_t *auth_key_extra)
{
odp_ipsec_sa_param_init(param);
param->dir = in ? ODP_IPSEC_DIR_INBOUND :
@@ -377,8 +379,11 @@ void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
if (auth_key)
param->crypto.auth_key = *auth_key;
- if (extra_key)
- param->crypto.cipher_key_extra = *extra_key;
+ if (cipher_key_extra)
+ param->crypto.cipher_key_extra = *cipher_key_extra;
+
+ if (auth_key_extra)
+ param->crypto.auth_key_extra = *auth_key_extra;
}
void ipsec_sa_destroy(odp_ipsec_sa_t sa)
@@ -438,11 +443,14 @@ odp_packet_t ipsec_packet(const ipsec_test_packet *itp)
/*
* Compare packages ignoring everything before L3 header
*/
-static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt)
+static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt,
+ odp_bool_t is_outbound)
{
uint32_t len = (ODP_PACKET_INVALID == pkt) ? 1 : odp_packet_len(pkt);
uint32_t l3, l4;
uint8_t data[len];
+ const odph_ipv4hdr_t *itp_ip;
+ odph_ipv4hdr_t *ip;
if (NULL == itp)
return;
@@ -472,6 +480,38 @@ static void ipsec_check_packet(const ipsec_test_packet *itp, odp_packet_t pkt)
if (l4 - l3 != itp->l4_offset - itp->l3_offset)
return;
+ ip = (odph_ipv4hdr_t *) &data[l3];
+ itp_ip = (const odph_ipv4hdr_t *) &itp->data[itp->l3_offset];
+ if (ODPH_IPV4HDR_VER(ip->ver_ihl) == ODPH_IPV4 &&
+ is_outbound &&
+ ip->id != itp_ip->id) {
+ /*
+ * IP ID value chosen by the implementation differs
+ * from the IP value in our test vector. This requires
+ * special handling in outbound checks.
+ */
+ /*
+ * Let's change IP ID and header checksum to same values
+ * as in the test vector to facilitate packet comparison.
+ */
+ CU_ASSERT(odph_ipv4_csum_valid(pkt));
+ ip->id = itp_ip->id;
+ ip->chksum = itp_ip->chksum;
+
+ if (ip->proto == ODPH_IPPROTO_AH) {
+ /*
+ * ID field is included in the authentication so
+ * we cannot check ICV against our test vector.
+ * Check packet data before the first possible
+ * location of the AH ICV field.
+ */
+ CU_ASSERT_EQUAL(0, memcmp(data + l3,
+ itp->data + itp->l3_offset,
+ ODPH_IPV4HDR_LEN + 12));
+ return;
+ }
+ }
+
CU_ASSERT_EQUAL(0, memcmp(data + l3,
itp->data + itp->l3_offset,
len - l3));
@@ -714,7 +754,8 @@ void ipsec_check_in_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
odp_ipsec_sa_context(sa));
}
ipsec_check_packet(part->out[i].pkt_out,
- pkto[i]);
+ pkto[i],
+ false);
if (part->out[i].pkt_out != NULL &&
part->out[i].l3_type != _ODP_PROTO_L3_TYPE_UNDEF)
CU_ASSERT_EQUAL(part->out[i].l3_type,
@@ -759,7 +800,8 @@ void ipsec_check_out_one(const ipsec_test_part *part, odp_ipsec_sa_t sa)
odp_ipsec_sa_context(sa));
}
ipsec_check_packet(part->out[i].pkt_out,
- pkto[i]);
+ pkto[i],
+ true);
odp_packet_free(pkto[i]);
}
}
@@ -868,8 +910,18 @@ int ipsec_init(odp_instance_t *inst)
odp_queue_t out_queue;
odp_pool_capability_t pool_capa;
odp_pktio_t pktio;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
diff --git a/test/validation/api/ipsec/ipsec.h b/test/validation/api/ipsec/ipsec.h
index b2d6df698..2a6713d10 100644
--- a/test/validation/api/ipsec/ipsec.h
+++ b/test/validation/api/ipsec/ipsec.h
@@ -68,7 +68,8 @@ void ipsec_sa_param_fill(odp_ipsec_sa_param_t *param,
const odp_crypto_key_t *cipher_key,
odp_auth_alg_t auth_alg,
const odp_crypto_key_t *auth_key,
- const odp_crypto_key_t *extra_key);
+ const odp_crypto_key_t *cipher_key_extra,
+ const odp_crypto_key_t *auth_key_extra);
void ipsec_sa_destroy(odp_ipsec_sa_t sa);
odp_packet_t ipsec_packet(const ipsec_test_packet *itp);
diff --git a/test/validation/api/ipsec/ipsec_test_in.c b/test/validation/api/ipsec/ipsec_test_in.c
index 9c1112004..515331483 100644
--- a/test/validation/api/ipsec/ipsec_test_in.c
+++ b/test/validation/api/ipsec/ipsec_test_in.c
@@ -19,7 +19,7 @@ static void test_in_ipv4_ah_sha256(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -52,7 +52,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv4(void)
true, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -85,7 +85,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv6(void)
true, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -117,7 +117,7 @@ static void test_in_ipv4_ah_sha256_tun_ipv4_notun(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -150,7 +150,7 @@ static void test_in_ipv4_esp_null_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -182,7 +182,7 @@ static void test_in_ipv4_esp_aes_cbc_null(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -214,7 +214,7 @@ static void test_in_ipv4_esp_aes_cbc_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -246,7 +246,7 @@ static void test_in_ipv4_esp_aes_ctr_null(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CTR, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- &key_mcgrew_gcm_salt_3);
+ &key_mcgrew_gcm_salt_3, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -278,7 +278,7 @@ static void test_in_ipv4_ah_sha256_lookup(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -311,7 +311,7 @@ static void test_in_ipv4_esp_null_sha256_lookup(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -345,7 +345,7 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv4(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -378,7 +378,7 @@ static void test_in_ipv4_esp_null_sha256_tun_ipv6(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -410,7 +410,7 @@ static void test_in_ipv4_esp_udp_null_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
@@ -443,7 +443,7 @@ static void test_in_ipv4_esp_udp_null_sha256_lookup(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
@@ -477,7 +477,7 @@ static void test_in_ipv4_ah_sha256_noreplay(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.inbound.antireplay_ws = 0;
sa = odp_ipsec_sa_create(&param);
@@ -525,7 +525,7 @@ static void test_in_ipv4_ah_sha256_replay(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.inbound.antireplay_ws = 32;
sa = odp_ipsec_sa_create(&param);
@@ -583,7 +583,7 @@ static void test_in_ipv4_esp_null_sha256_noreplay(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.inbound.antireplay_ws = 0;
sa = odp_ipsec_sa_create(&param);
@@ -631,7 +631,7 @@ static void test_in_ipv4_esp_null_sha256_replay(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.inbound.antireplay_ws = 32;
sa = odp_ipsec_sa_create(&param);
@@ -694,7 +694,7 @@ static void test_in_ipv4_ah_esp_pkt(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -729,7 +729,7 @@ static void test_in_ipv4_esp_ah_pkt(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -759,7 +759,7 @@ static void test_in_ipv4_ah_esp_pkt_lookup(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -790,7 +790,7 @@ static void test_in_ipv4_esp_ah_pkt_lookup(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -821,7 +821,7 @@ static void test_in_ipv4_ah_sha256_bad1(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -851,7 +851,7 @@ static void test_in_ipv4_ah_sha256_bad2(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -881,7 +881,7 @@ static void test_in_ipv4_esp_null_sha256_bad1(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -911,7 +911,7 @@ static void test_in_ipv4_rfc3602_5_esp(void)
true, false, 0x4321, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_rfc3602,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -943,7 +943,7 @@ static void test_in_ipv4_rfc3602_6_esp(void)
true, false, 0x4321, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_rfc3602,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -976,7 +976,7 @@ static void test_in_ipv4_rfc3602_7_esp(void)
true, false, 0x8765, &tunnel,
ODP_CIPHER_ALG_AES_CBC, &key_rfc3602_2,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1009,7 +1009,7 @@ static void test_in_ipv4_rfc3602_8_esp(void)
true, false, 0x8765, &tunnel,
ODP_CIPHER_ALG_AES_CBC, &key_rfc3602_2,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1042,7 +1042,7 @@ static void test_in_ipv4_mcgrew_gcm_2_esp(void)
true, false, 0xa5f8, &tunnel,
ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_2,
ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_2);
+ &key_mcgrew_gcm_salt_2, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1075,7 +1075,7 @@ static void test_in_ipv4_mcgrew_gcm_3_esp(void)
true, false, 0x4a2cbfe3, &tunnel,
ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_3,
ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_3);
+ &key_mcgrew_gcm_salt_3, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1108,7 +1108,7 @@ static void test_in_ipv4_mcgrew_gcm_4_esp(void)
true, false, 0x00000000, &tunnel,
ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_4,
ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_4);
+ &key_mcgrew_gcm_salt_4, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1146,7 +1146,7 @@ static void test_in_ipv4_mcgrew_gcm_12_esp(void)
true, false, 0x335467ae, &tunnel,
ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_12,
ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_12);
+ &key_mcgrew_gcm_salt_12, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1178,7 +1178,7 @@ static void test_in_ipv4_mcgrew_gcm_12_esp_notun(void)
true, false, 0x335467ae, NULL,
ODP_CIPHER_ALG_AES_GCM, &key_mcgrew_gcm_12,
ODP_AUTH_ALG_AES_GCM, NULL,
- &key_mcgrew_gcm_salt_12);
+ &key_mcgrew_gcm_salt_12, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1211,7 +1211,7 @@ static void test_in_ipv4_mcgrew_gcm_15_esp(void)
true, false, 0x00004321, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_mcgrew_gcm_15,
- &key_mcgrew_gcm_salt_15);
+ NULL, &key_mcgrew_gcm_salt_15);
sa = odp_ipsec_sa_create(&param);
@@ -1244,7 +1244,7 @@ static void test_in_ipv4_rfc7634_chacha(void)
true, false, 0x01020304, &tunnel,
ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634,
ODP_AUTH_ALG_CHACHA20_POLY1305, NULL,
- &key_rfc7634_salt);
+ &key_rfc7634_salt, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1276,7 +1276,7 @@ static void test_in_ipv4_ah_aes_gmac_128(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ NULL, &key_mcgrew_gcm_salt_2);
sa = odp_ipsec_sa_create(&param);
@@ -1308,7 +1308,7 @@ static void test_in_ipv4_esp_null_aes_gmac_128(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ NULL, &key_mcgrew_gcm_salt_2);
sa = odp_ipsec_sa_create(&param);
@@ -1340,7 +1340,7 @@ static void test_in_ipv6_ah_sha256(void)
true, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1373,7 +1373,7 @@ static void test_in_ipv6_ah_sha256_tun_ipv4(void)
true, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1406,7 +1406,7 @@ static void test_in_ipv6_ah_sha256_tun_ipv6(void)
true, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1438,7 +1438,7 @@ static void test_in_ipv6_esp_null_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1471,7 +1471,7 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv4(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1504,7 +1504,7 @@ static void test_in_ipv6_esp_null_sha256_tun_ipv6(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1536,7 +1536,7 @@ static void test_in_ipv6_esp_udp_null_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
@@ -1569,7 +1569,7 @@ static void test_in_ipv6_esp_udp_null_sha256_lookup(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 59c631b58..ee3fd43fb 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -19,7 +19,7 @@ static void test_out_ipv4_ah_sha256(void)
false, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -62,7 +62,7 @@ static void test_out_ipv4_ah_sha256_tun_ipv4(void)
false, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -106,7 +106,7 @@ static void test_out_ipv4_ah_sha256_tun_ipv6(void)
false, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -136,7 +136,7 @@ static void test_out_ipv4_esp_null_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -174,7 +174,7 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv4(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -219,7 +219,7 @@ static void test_out_ipv4_esp_null_sha256_tun_ipv6(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -251,7 +251,7 @@ static void test_out_ipv4_esp_aes_cbc_null(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -261,7 +261,7 @@ static void test_out_ipv4_esp_aes_cbc_null(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- NULL);
+ NULL, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -294,7 +294,7 @@ static void test_out_ipv4_esp_udp_null_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
@@ -326,7 +326,7 @@ static void test_out_ipv4_esp_aes_cbc_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -336,7 +336,7 @@ static void test_out_ipv4_esp_aes_cbc_sha256(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CBC, &key_a5_128,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -370,7 +370,7 @@ static void test_out_ipv4_esp_aes_ctr_null(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_AES_CTR, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- &key_mcgrew_gcm_salt_3);
+ &key_mcgrew_gcm_salt_3, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -380,7 +380,7 @@ static void test_out_ipv4_esp_aes_ctr_null(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_CTR, &key_a5_128,
ODP_AUTH_ALG_NULL, NULL,
- &key_mcgrew_gcm_salt_3);
+ &key_mcgrew_gcm_salt_3, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -414,7 +414,7 @@ static void test_out_ipv4_esp_aes_gcm128(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_AES_GCM, &key_a5_128,
ODP_AUTH_ALG_AES_GCM, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ &key_mcgrew_gcm_salt_2, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -424,7 +424,7 @@ static void test_out_ipv4_esp_aes_gcm128(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_AES_GCM, &key_a5_128,
ODP_AUTH_ALG_AES_GCM, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ &key_mcgrew_gcm_salt_2, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -457,7 +457,7 @@ static void test_out_ipv4_ah_aes_gmac_128(void)
false, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ NULL, &key_mcgrew_gcm_salt_2);
sa = odp_ipsec_sa_create(&param);
@@ -487,7 +487,7 @@ static void test_out_ipv4_esp_null_aes_gmac_128(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_AES_GMAC, &key_a5_128,
- &key_mcgrew_gcm_salt_2);
+ NULL, &key_mcgrew_gcm_salt_2);
sa = odp_ipsec_sa_create(&param);
@@ -518,7 +518,7 @@ static void test_out_ipv4_esp_chacha20_poly1305(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634,
ODP_AUTH_ALG_CHACHA20_POLY1305, NULL,
- &key_rfc7634_salt);
+ &key_rfc7634_salt, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -528,7 +528,7 @@ static void test_out_ipv4_esp_chacha20_poly1305(void)
true, false, 123, NULL,
ODP_CIPHER_ALG_CHACHA20_POLY1305, &key_rfc7634,
ODP_AUTH_ALG_CHACHA20_POLY1305, NULL,
- &key_rfc7634_salt);
+ &key_rfc7634_salt, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -561,7 +561,7 @@ static void test_out_ipv4_ah_sha256_frag_check(void)
false, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
param.outbound.mtu = 100;
@@ -608,7 +608,7 @@ static void test_out_ipv4_ah_sha256_frag_check_2(void)
false, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
param.outbound.mtu = 100;
@@ -654,7 +654,7 @@ static void test_out_ipv4_esp_null_sha256_frag_check(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
param.outbound.mtu = 100;
@@ -702,7 +702,7 @@ static void test_out_ipv4_esp_null_sha256_frag_check_2(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.outbound.frag_mode = ODP_IPSEC_FRAG_CHECK;
param.outbound.mtu = 100;
@@ -749,7 +749,7 @@ static void test_out_ipv6_ah_sha256(void)
false, true, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -787,7 +787,7 @@ static void test_out_ipv6_ah_sha256_tun_ipv4(void)
false, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -831,7 +831,7 @@ static void test_out_ipv6_ah_sha256_tun_ipv6(void)
false, true, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -861,7 +861,7 @@ static void test_out_ipv6_esp_null_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -899,7 +899,7 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv4(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -944,7 +944,7 @@ static void test_out_ipv6_esp_null_sha256_tun_ipv6(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -975,7 +975,7 @@ static void test_out_ipv6_esp_udp_null_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
param.opt.udp_encap = 1;
sa = odp_ipsec_sa_create(&param);
@@ -1020,7 +1020,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv4(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1030,7 +1030,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv4(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -1102,7 +1102,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv6(void)
false, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
@@ -1112,7 +1112,7 @@ static void test_out_dummy_esp_null_sha256_tun_ipv6(void)
true, false, 123, &tunnel,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa2 = odp_ipsec_sa_create(&param);
@@ -1164,7 +1164,7 @@ static void test_out_ipv4_udp_esp_null_sha256(void)
false, false, 123, NULL,
ODP_CIPHER_ALG_NULL, NULL,
ODP_AUTH_ALG_SHA256_HMAC, &key_5a_256,
- NULL);
+ NULL, NULL);
sa = odp_ipsec_sa_create(&param);
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index 67e1e3c8c..b8502f011 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -8,6 +8,7 @@
#include <malloc.h>
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
@@ -1163,8 +1164,18 @@ static int lock_init(odp_instance_t *inst)
uint32_t workers_count, max_threads;
int ret = 0;
odp_cpumask_t mask;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
- if (0 != odp_init_global(inst, NULL, NULL)) {
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
fprintf(stderr, "error: odp_init_global() failed.\n");
return -1;
}
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index 0c86b0510..0f019f6c9 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -31,7 +31,7 @@ static uint32_t packet_len;
static uint32_t segmented_packet_len;
static odp_bool_t segmentation_supported = true;
-odp_packet_t test_packet, segmented_test_packet;
+odp_packet_t test_packet, segmented_test_packet, test_reset_packet;
static struct udata_struct {
uint64_t u64;
@@ -213,11 +213,23 @@ static int packet_suite_init(void)
test_packet = odp_packet_alloc(packet_pool, packet_len);
+ if (test_packet == ODP_PACKET_INVALID) {
+ printf("test_packet alloc failed\n");
+ return -1;
+ }
+
for (i = 0; i < packet_len; i++) {
odp_packet_copy_from_mem(test_packet, i, 1, &data);
data++;
}
+ test_reset_packet = odp_packet_alloc(packet_pool, packet_len);
+
+ if (test_reset_packet == ODP_PACKET_INVALID) {
+ printf("test_reset_packet alloc failed\n");
+ return -1;
+ }
+
/* Try to allocate PACKET_POOL_NUM_SEG largest possible packets to see
* if segmentation is supported */
do {
@@ -278,6 +290,7 @@ static int packet_suite_init(void)
static int packet_suite_term(void)
{
odp_packet_free(test_packet);
+ odp_packet_free(test_reset_packet);
odp_packet_free(segmented_test_packet);
if (odp_pool_destroy(packet_pool_double_uarea) != 0 ||
@@ -661,6 +674,70 @@ static void packet_test_length(void)
CU_ASSERT(buf_len >= packet_len + headroom + tailroom);
}
+static void packet_test_reset(void)
+{
+ uint32_t len, headroom;
+ uintptr_t ptr_len;
+ void *data, *new_data, *tail, *new_tail;
+ odp_packet_t pkt = test_reset_packet;
+
+ len = odp_packet_len(pkt);
+ headroom = odp_packet_headroom(pkt);
+ CU_ASSERT(len > 1);
+
+ if (headroom) {
+ data = odp_packet_data(pkt);
+ new_data = odp_packet_push_head(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len + 1);
+ CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data - 1));
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom - 1);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == (headroom - 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == headroom);
+ }
+
+ data = odp_packet_data(pkt);
+ new_data = odp_packet_pull_head(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len - 1);
+ CU_ASSERT((uintptr_t)new_data == ((uintptr_t)data + 1));
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom + 1);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == (headroom + 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+ CU_ASSERT(odp_packet_headroom(pkt) == headroom);
+ ptr_len = (uintptr_t)odp_packet_data(pkt) -
+ (uintptr_t)odp_packet_head(pkt);
+ CU_ASSERT(ptr_len == headroom);
+
+ tail = odp_packet_tail(pkt);
+ new_tail = odp_packet_pull_tail(pkt, 1);
+ CU_ASSERT(odp_packet_len(pkt) == len - 1);
+ CU_ASSERT((uintptr_t)new_tail == ((uintptr_t)tail - 1));
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+
+ CU_ASSERT(odp_packet_has_udp(pkt) == 0);
+ odp_packet_has_udp_set(pkt, 1);
+ CU_ASSERT(odp_packet_has_udp(pkt) != 0);
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_has_udp(pkt) == 0);
+
+ CU_ASSERT(odp_packet_reset(pkt, len - 1) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == (len - 1));
+
+ len = len - len / 2;
+ CU_ASSERT(odp_packet_reset(pkt, len) == 0);
+ CU_ASSERT(odp_packet_len(pkt) == len);
+}
+
static void packet_test_prefetch(void)
{
odp_packet_prefetch(test_packet, 0, odp_packet_len(test_packet));
@@ -3423,6 +3500,7 @@ odp_testinfo_t packet_suite[] = {
ODP_TEST_INFO(packet_test_debug),
ODP_TEST_INFO(packet_test_segments),
ODP_TEST_INFO(packet_test_length),
+ ODP_TEST_INFO(packet_test_reset),
ODP_TEST_INFO(packet_test_prefetch),
ODP_TEST_INFO(packet_test_headroom),
ODP_TEST_INFO(packet_test_tailroom),
diff --git a/test/validation/api/pktio/parser.c b/test/validation/api/pktio/parser.c
index 76df47b1c..db37c1c5b 100644
--- a/test/validation/api/pktio/parser.c
+++ b/test/validation/api/pktio/parser.c
@@ -301,6 +301,7 @@ static void parser_test_ipv4_icmp(void)
CU_ASSERT(!odp_packet_has_ipv6(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -318,6 +319,7 @@ static void parser_test_ipv4_tcp(void)
CU_ASSERT(!odp_packet_has_ipv6(pkt));
CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -335,6 +337,7 @@ static void parser_test_ipv4_udp(void)
CU_ASSERT(!odp_packet_has_ipv6(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -353,6 +356,7 @@ static void parser_test_vlan_ipv4_udp(void)
CU_ASSERT(!odp_packet_has_ipv6(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -372,6 +376,25 @@ static void parser_test_vlan_qinq_ipv4_udp(void)
CU_ASSERT(!odp_packet_has_ipv6(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv4_sctp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv4_sctp,
+ sizeof(test_packet_ipv4_sctp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv4(pkt));
+ CU_ASSERT(odp_packet_has_sctp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv6(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
odp_packet_free(pkt);
}
@@ -390,6 +413,7 @@ static void parser_test_ipv6_icmp(void)
CU_ASSERT(!odp_packet_has_ipv4(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -407,6 +431,7 @@ static void parser_test_ipv6_tcp(void)
CU_ASSERT(!odp_packet_has_ipv4(pkt));
CU_ASSERT(!odp_packet_has_udp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -424,6 +449,7 @@ static void parser_test_ipv6_udp(void)
CU_ASSERT(!odp_packet_has_ipv4(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
odp_packet_free(pkt);
}
@@ -442,6 +468,25 @@ static void parser_test_vlan_ipv6_udp(void)
CU_ASSERT(!odp_packet_has_ipv4(pkt));
CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_sctp(pkt));
+
+ odp_packet_free(pkt);
+}
+
+static void parser_test_ipv6_sctp(void)
+{
+ odp_packet_t pkt;
+
+ pkt = loopback_packet(pktio_a, pktio_b, test_packet_ipv6_sctp,
+ sizeof(test_packet_ipv6_sctp));
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(odp_packet_has_eth(pkt));
+ CU_ASSERT(odp_packet_has_ipv6(pkt));
+ CU_ASSERT(odp_packet_has_sctp(pkt));
+
+ CU_ASSERT(!odp_packet_has_ipv4(pkt));
+ CU_ASSERT(!odp_packet_has_tcp(pkt));
+ CU_ASSERT(!odp_packet_has_udp(pkt));
odp_packet_free(pkt);
}
@@ -555,9 +600,11 @@ odp_testinfo_t parser_suite[] = {
ODP_TEST_INFO(parser_test_ipv4_udp),
ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_ipv4_udp, loop_pktio),
ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_qinq_ipv4_udp, loop_pktio),
+ ODP_TEST_INFO(parser_test_ipv4_sctp),
ODP_TEST_INFO(parser_test_ipv6_icmp),
ODP_TEST_INFO(parser_test_ipv6_tcp),
ODP_TEST_INFO(parser_test_ipv6_udp),
ODP_TEST_INFO_CONDITIONAL(parser_test_vlan_ipv6_udp, loop_pktio),
+ ODP_TEST_INFO(parser_test_ipv6_sctp),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 04232f84f..a4f7cecd2 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -164,7 +164,7 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t ds
CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
}
-static uint32_t pktio_pkt_set_seq(odp_packet_t pkt)
+static uint32_t pktio_pkt_set_seq(odp_packet_t pkt, size_t l4_hdr_len)
{
static uint32_t tstseq;
size_t off;
@@ -180,7 +180,7 @@ static uint32_t pktio_pkt_set_seq(odp_packet_t pkt)
head.magic = TEST_SEQ_MAGIC;
head.seq = tstseq;
- off += ODPH_UDPHDR_LEN;
+ off += l4_hdr_len;
if (odp_packet_copy_from_mem(pkt, off, sizeof(head), &head) != 0)
return TEST_SEQ_INVALID;
@@ -194,7 +194,7 @@ static uint32_t pktio_pkt_set_seq(odp_packet_t pkt)
return head.seq;
}
-static uint32_t pktio_pkt_seq(odp_packet_t pkt)
+static uint32_t pktio_pkt_seq_hdr(odp_packet_t pkt, size_t l4_hdr_len)
{
size_t off;
uint32_t seq = TEST_SEQ_INVALID;
@@ -212,7 +212,7 @@ static uint32_t pktio_pkt_seq(odp_packet_t pkt)
return TEST_SEQ_INVALID;
}
- off += ODPH_UDPHDR_LEN;
+ off += l4_hdr_len;
if (odp_packet_copy_to_mem(pkt, off, sizeof(head), &head) != 0) {
fprintf(stderr, "error: header copy failed\n");
return TEST_SEQ_INVALID;
@@ -250,11 +250,15 @@ static uint32_t pktio_pkt_seq(odp_packet_t pkt)
return seq;
}
-static uint32_t pktio_init_packet(odp_packet_t pkt)
+static uint32_t pktio_pkt_seq(odp_packet_t pkt)
+{
+ return pktio_pkt_seq_hdr(pkt, ODPH_UDPHDR_LEN);
+}
+
+static void pktio_init_packet_eth_ipv4(odp_packet_t pkt, uint8_t proto)
{
odph_ethhdr_t *eth;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
char *buf;
uint16_t seq;
uint8_t src_mac[ODP_PKTIO_MACADDR_MAXSIZE] = PKTIO_SRC_MAC;
@@ -278,11 +282,22 @@ static uint32_t pktio_init_packet(odp_packet_t pkt)
ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
ip->tot_len = odp_cpu_to_be_16(pkt_len - ODPH_ETHHDR_LEN);
ip->ttl = 128;
- ip->proto = ODPH_IPPROTO_UDP;
+ ip->proto = proto;
seq = odp_atomic_fetch_inc_u32(&ip_seq);
ip->id = odp_cpu_to_be_16(seq);
ip->chksum = 0;
odph_ipv4_csum_update(pkt);
+}
+
+static uint32_t pktio_init_packet_udp(odp_packet_t pkt)
+{
+ odph_udphdr_t *udp;
+ char *buf;
+ int pkt_len = odp_packet_len(pkt);
+
+ buf = odp_packet_data(pkt);
+
+ pktio_init_packet_eth_ipv4(pkt, ODPH_IPPROTO_UDP);
/* UDP */
odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
@@ -293,40 +308,76 @@ static uint32_t pktio_init_packet(odp_packet_t pkt)
ODPH_ETHHDR_LEN - ODPH_IPV4HDR_LEN);
udp->chksum = 0;
- return pktio_pkt_set_seq(pkt);
+ return pktio_pkt_set_seq(pkt, ODPH_UDPHDR_LEN);
+}
+
+static uint32_t pktio_init_packet_sctp(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp;
+ char *buf;
+
+ buf = odp_packet_data(pkt);
+
+ pktio_init_packet_eth_ipv4(pkt, ODPH_IPPROTO_SCTP);
+
+ /* SCTP */
+ odp_packet_l4_offset_set(pkt, ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ sctp = (odph_sctphdr_t *)(buf + ODPH_ETHHDR_LEN + ODPH_IPV4HDR_LEN);
+ sctp->src_port = odp_cpu_to_be_16(12049);
+ sctp->dst_port = odp_cpu_to_be_16(12050);
+ sctp->tag = 0;
+ sctp->chksum = 0;
+
+ return pktio_pkt_set_seq(pkt, ODPH_SCTPHDR_LEN);
}
static int pktio_zero_checksums(odp_packet_t pkt)
{
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
uint32_t len;
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, &len);
- if (ip->proto != ODPH_IPPROTO_UDP) {
+ ip->chksum = 0;
+
+ if (ip->proto == ODPH_IPPROTO_UDP) {
+ odph_udphdr_t *udp;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, &len);
+ udp->chksum = 0;
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ odph_sctphdr_t *sctp;
+
+ sctp = (odph_sctphdr_t *)odp_packet_l4_ptr(pkt, &len);
+ sctp->chksum = 0;
+ } else {
CU_FAIL("unexpected L4 protocol");
return -1;
}
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, &len);
-
- ip->chksum = 0;
- udp->chksum = 0;
-
return 0;
}
static int pktio_fixup_checksums(odp_packet_t pkt)
{
- odph_udphdr_t *udp;
+ odph_ipv4hdr_t *ip;
pktio_zero_checksums(pkt);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
-
odph_ipv4_csum_update(pkt);
- udp->chksum = odph_ipv4_udp_chksum(pkt);
+
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ if (ip->proto == ODPH_IPPROTO_UDP) {
+ odph_udphdr_t *udp;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ udp->chksum = odph_ipv4_udp_chksum(pkt);
+ } else if (ip->proto == ODPH_IPPROTO_SCTP) {
+ odph_sctp_chksum_set(pkt);
+ } else {
+ CU_FAIL("unexpected L4 protocol");
+ return -1;
+ }
return 0;
}
@@ -419,12 +470,12 @@ static int flush_input_queue(odp_pktio_t pktio, odp_pktin_mode_t imode)
return 0;
}
-static int create_packets_cs(odp_packet_t pkt_tbl[],
- uint32_t pkt_seq[],
- int num,
- odp_pktio_t pktio_src,
- odp_pktio_t pktio_dst,
- odp_bool_t fix_cs)
+static int create_packets_udp(odp_packet_t pkt_tbl[],
+ uint32_t pkt_seq[],
+ int num,
+ odp_pktio_t pktio_src,
+ odp_pktio_t pktio_dst,
+ odp_bool_t fix_cs)
{
int i, ret;
@@ -433,7 +484,7 @@ static int create_packets_cs(odp_packet_t pkt_tbl[],
if (pkt_tbl[i] == ODP_PACKET_INVALID)
break;
- pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+ pkt_seq[i] = pktio_init_packet_udp(pkt_tbl[i]);
if (pkt_seq[i] == TEST_SEQ_INVALID) {
odp_packet_free(pkt_tbl[i]);
break;
@@ -454,11 +505,42 @@ static int create_packets_cs(odp_packet_t pkt_tbl[],
return i;
}
+static int create_packets_sctp(odp_packet_t pkt_tbl[],
+ uint32_t pkt_seq[],
+ int num,
+ odp_pktio_t pktio_src,
+ odp_pktio_t pktio_dst)
+{
+ int i, ret;
+
+ for (i = 0; i < num; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet_sctp(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+
+ ret = pktio_zero_checksums(pkt_tbl[i]);
+ if (ret != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+
+ return i;
+}
+
static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
{
- return create_packets_cs(pkt_tbl, pkt_seq, num, pktio_src, pktio_dst,
- true);
+ return create_packets_udp(pkt_tbl, pkt_seq, num, pktio_src, pktio_dst,
+ true);
}
static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
@@ -506,9 +588,9 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
return num_pkts;
}
-static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
- uint32_t seq_tbl[], int num, txrx_mode_e mode,
- uint64_t ns)
+static int wait_for_packets_hdr(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, txrx_mode_e mode,
+ uint64_t ns, size_t l4_hdr_len)
{
odp_time_t wait_time, end;
int num_rx = 0;
@@ -525,7 +607,8 @@ static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
break;
for (i = 0; i < n; ++i) {
- if (pktio_pkt_seq(pkt_tmp[i]) == seq_tbl[num_rx])
+ if (pktio_pkt_seq_hdr(pkt_tmp[i], l4_hdr_len) ==
+ seq_tbl[num_rx])
pkt_tbl[num_rx++] = pkt_tmp[i];
else
odp_packet_free(pkt_tmp[i]);
@@ -535,6 +618,14 @@ static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
return num_rx;
}
+static int wait_for_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
+ uint32_t seq_tbl[], int num, txrx_mode_e mode,
+ uint64_t ns)
+{
+ return wait_for_packets_hdr(pktio_rx, pkt_tbl, seq_tbl, num, mode, ns,
+ ODPH_UDPHDR_LEN);
+}
+
static int recv_packets_tmo(odp_pktio_t pktio, odp_packet_t pkt_tbl[],
uint32_t seq_tbl[], int num, recv_tmo_mode_e mode,
uint64_t tmo, uint64_t ns, int no_pkt)
@@ -1992,9 +2083,16 @@ static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
_pktio_wait_linkup(pktio[i]);
}
- ret = create_packets_cs(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
- pktio_rx, false);
- CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+ ret = create_packets_udp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx, false);
+ CU_ASSERT(ret == TX_BATCH_LEN);
+ if (ret != TX_BATCH_LEN) {
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+ return;
+ }
ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
CU_ASSERT_FATAL(ret > 0);
@@ -2019,6 +2117,75 @@ static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
}
}
+static void pktio_test_chksum_sctp(void (*config_fn)(odp_pktio_t, odp_pktio_t),
+ void (*prep_fn)(odp_packet_t pkt),
+ void (*test_fn)(odp_packet_t pkt))
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {ODP_PKTIO_INVALID};
+ pktio_info_t pktio_rx_info;
+ odp_pktout_queue_t pktout_queue;
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ int ret;
+ int i, num_rx;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ pktio_rx_info.id = pktio_rx;
+ pktio_rx_info.inq = ODP_QUEUE_INVALID;
+ pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ config_fn(pktio_tx, pktio_rx);
+
+ for (i = 0; i < num_ifaces; ++i) {
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ _pktio_wait_linkup(pktio[i]);
+ }
+
+ ret = create_packets_sctp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
+ pktio_rx);
+ CU_ASSERT(ret == TX_BATCH_LEN);
+ if (ret != TX_BATCH_LEN) {
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+ return;
+ }
+
+ ret = odp_pktout_queue(pktio_tx, &pktout_queue, 1);
+ CU_ASSERT_FATAL(ret > 0);
+
+ for (i = 0; i < TX_BATCH_LEN; i++)
+ if (prep_fn)
+ prep_fn(pkt_tbl[i]);
+
+ send_packets(pktout_queue, pkt_tbl, TX_BATCH_LEN);
+ num_rx = wait_for_packets_hdr(&pktio_rx_info, pkt_tbl, pkt_seq,
+ TX_BATCH_LEN, TXRX_MODE_MULTI,
+ ODP_TIME_SEC_IN_NS, ODPH_SCTPHDR_LEN);
+ CU_ASSERT(num_rx == TX_BATCH_LEN);
+ for (i = 0; i < num_rx; i++) {
+ test_fn(pkt_tbl[i]);
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
static int pktio_check_chksum_in_ipv4(void)
{
odp_pktio_t pktio;
@@ -2134,6 +2301,65 @@ static void pktio_test_chksum_in_udp(void)
pktio_test_chksum_in_udp_test);
}
+static int pktio_check_chksum_in_sctp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int idx = (num_ifaces == 1) ? 0 : 1;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[idx], pool[idx], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktin.bit.sctp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_in_sctp_config(odp_pktio_t pktio_tx ODP_UNUSED,
+ odp_pktio_t pktio_rx)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktin.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktin.bit.sctp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_rx, &config) == 0);
+}
+
+static void pktio_test_chksum_in_sctp_prep(odp_packet_t pkt)
+{
+ odp_packet_has_ipv4_set(pkt, 1);
+ odp_packet_has_sctp_set(pkt, 1);
+ odph_ipv4_csum_update(pkt);
+ odph_sctp_chksum_set(pkt);
+}
+
+static void pktio_test_chksum_in_sctp_test(odp_packet_t pkt)
+{
+ CU_ASSERT(odp_packet_l4_chksum_status(pkt) == ODP_PACKET_CHKSUM_OK);
+}
+
+static void pktio_test_chksum_in_sctp(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_in_sctp_config,
+ pktio_test_chksum_in_sctp_prep,
+ pktio_test_chksum_in_sctp_test);
+}
+
static int pktio_check_chksum_out_ipv4(void)
{
odp_pktio_t pktio;
@@ -2332,8 +2558,10 @@ static void pktio_test_chksum_out_udp_ovr_test(odp_packet_t pkt)
odph_udphdr_t *udp = odp_packet_l4_ptr(pkt, NULL);
CU_ASSERT(udp != NULL);
- if (udp != NULL)
+ if (udp != NULL) {
CU_ASSERT(udp->chksum != 0);
+ CU_ASSERT(!odph_udp_chksum_verify(pkt));
+ }
}
static void pktio_test_chksum_out_udp_ovr(void)
@@ -2367,6 +2595,126 @@ static void pktio_test_chksum_out_udp_pktio(void)
pktio_test_chksum_out_udp_test);
}
+static int pktio_check_chksum_out_sctp(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 ||
+ !capa.config.pktout.bit.sctp_chksum_ena ||
+ !capa.config.pktout.bit.sctp_chksum)
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_chksum_out_sctp_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.sctp_chksum_ena = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_sctp_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL) {
+ CU_ASSERT(sctp->chksum != 0);
+ CU_ASSERT(!odph_sctp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr_prep(odp_packet_t pkt)
+{
+ odph_ipv4_csum_update(pkt);
+ odp_packet_l4_chksum_insert(pkt, false);
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL)
+ CU_ASSERT(sctp->chksum == 0);
+}
+
+static void pktio_test_chksum_out_sctp_no_ovr(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_config,
+ pktio_test_chksum_out_sctp_no_ovr_prep,
+ pktio_test_chksum_out_sctp_no_ovr_test);
+}
+
+static void pktio_test_chksum_out_sctp_ovr_prep(odp_packet_t pkt)
+{
+ odp_packet_l4_chksum_insert(pkt, true);
+}
+
+static void pktio_test_chksum_out_sctp_ovr_test(odp_packet_t pkt)
+{
+ odph_sctphdr_t *sctp = odp_packet_l4_ptr(pkt, NULL);
+
+ CU_ASSERT(sctp != NULL);
+ if (sctp != NULL) {
+ CU_ASSERT(sctp->chksum != 0);
+ CU_ASSERT(!odph_sctp_chksum_verify(pkt));
+ }
+}
+
+static void pktio_test_chksum_out_sctp_ovr(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_config,
+ pktio_test_chksum_out_sctp_ovr_prep,
+ pktio_test_chksum_out_sctp_ovr_test);
+}
+
+static void pktio_test_chksum_out_sctp_pktio_config(odp_pktio_t pktio_tx,
+ odp_pktio_t pktio_rx
+ ODP_UNUSED)
+{
+ odp_pktio_capability_t capa;
+ odp_pktio_config_t config;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &capa) == 0);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum_ena);
+ CU_ASSERT_FATAL(capa.config.pktout.bit.sctp_chksum);
+
+ odp_pktio_config_init(&config);
+ config.pktout.bit.sctp_chksum_ena = 1;
+ config.pktout.bit.sctp_chksum = 1;
+ CU_ASSERT_FATAL(odp_pktio_config(pktio_tx, &config) == 0);
+}
+
+static void pktio_test_chksum_out_sctp_pktio(void)
+{
+ pktio_test_chksum_sctp(pktio_test_chksum_out_sctp_pktio_config,
+ NULL,
+ pktio_test_chksum_out_sctp_test);
+}
+
static int create_pool(const char *iface, int num)
{
char pool_name[ODP_POOL_NAME_LEN];
@@ -2512,6 +2860,8 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
pktio_check_chksum_in_ipv4),
ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_in_udp,
pktio_check_chksum_in_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_in_sctp,
+ pktio_check_chksum_in_sctp),
ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_ipv4_no_ovr,
pktio_check_chksum_out_ipv4),
ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_ipv4_pktio,
@@ -2524,6 +2874,12 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
pktio_check_chksum_out_udp),
ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_udp_ovr,
pktio_check_chksum_out_udp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_no_ovr,
+ pktio_check_chksum_out_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_pktio,
+ pktio_check_chksum_out_sctp),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_chksum_out_sctp_ovr,
+ pktio_check_chksum_out_sctp),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 2795e857f..0dbdd0ddf 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -11,6 +11,16 @@
#define PKT_LEN 400
#define PKT_NUM 500
+#define MAX_NUM_DEFAULT (10 * 1024 * 1024)
+
+typedef struct {
+ odp_barrier_t init_barrier;
+ odp_atomic_u32_t index;
+ uint32_t nb_threads;
+ odp_pool_t pool;
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
static const int default_buffer_size = 1500;
static const int default_buffer_num = 1000;
@@ -32,7 +42,7 @@ static void pool_test_create_destroy_buffer(void)
odp_pool_param_init(&param);
- param.type = ODP_POOL_BUFFER,
+ param.type = ODP_POOL_BUFFER;
param.buf.size = default_buffer_size;
param.buf.align = ODP_CACHE_LINE_SIZE;
param.buf.num = default_buffer_num;
@@ -106,7 +116,7 @@ static void pool_test_alloc_packet(void)
odp_pool_param_init(&param);
- param.type = ODP_POOL_PACKET,
+ param.type = ODP_POOL_PACKET;
param.pkt.num = PKT_NUM;
param.pkt.len = PKT_LEN;
@@ -145,7 +155,7 @@ static void pool_test_alloc_packet_subparam(void)
odp_pool_param_init(&param);
- param.type = ODP_POOL_PACKET,
+ param.type = ODP_POOL_PACKET;
param.pkt.num = PKT_NUM;
param.pkt.len = PKT_LEN;
param.pkt.num_subparam = num_sub;
@@ -270,6 +280,252 @@ static void pool_test_info_data_range(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void pool_test_buf_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_buffer_t *buf;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.buf.max_num)
+ max_num = capa.buf.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.num = max_num;
+ param.buf.size = 10;
+
+ pool = odp_pool_create("test_buf_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_buffer_t),
+ sizeof(odp_buffer_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ buf = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ buf[num] = odp_buffer_alloc(pool);
+
+ if (buf[num] != ODP_BUFFER_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_buffer_free(buf[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_pkt_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_packet_t *pkt;
+ uint32_t len = 10;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.pkt.max_num)
+ max_num = capa.pkt.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_PACKET;
+ param.pkt.num = max_num;
+ param.pkt.max_num = max_num;
+ param.pkt.len = len;
+ param.pkt.max_len = len;
+ param.pkt.headroom = 0;
+
+ pool = odp_pool_create("test_packet_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_packet_t),
+ sizeof(odp_packet_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ pkt = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ pkt[num] = odp_packet_alloc(pool, len);
+
+ if (pkt[num] != ODP_PACKET_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_packet_free(pkt[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void pool_test_tmo_max_num(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ odp_pool_capability_t capa;
+ uint32_t max_num, num, i;
+ odp_shm_t shm;
+ odp_timeout_t *tmo;
+
+ CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
+
+ max_num = MAX_NUM_DEFAULT;
+ if (capa.tmo.max_num)
+ max_num = capa.tmo.max_num;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = max_num;
+
+ pool = odp_pool_create("test_tmo_max_num", &param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ shm = odp_shm_reserve("test_max_num_shm",
+ max_num * sizeof(odp_packet_t),
+ sizeof(odp_packet_t), 0);
+
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+
+ tmo = odp_shm_addr(shm);
+
+ num = 0;
+ for (i = 0; i < max_num; i++) {
+ tmo[num] = odp_timeout_alloc(pool);
+
+ if (tmo[num] != ODP_TIMEOUT_INVALID)
+ num++;
+ }
+
+ CU_ASSERT(num == max_num);
+
+ for (i = 0; i < num; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT(odp_shm_free(shm) == 0);
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+static void buffer_alloc_loop(odp_pool_t pool, int num, int buffer_size)
+{
+ int allocs;
+
+ /* Allocate, modify, and free buffers */
+ for (allocs = 0; allocs < num;) {
+ odp_buffer_t buf;
+ uint8_t *data;
+ int i;
+
+ buf = odp_buffer_alloc(pool);
+ if (buf == ODP_BUFFER_INVALID)
+ continue;
+
+ data = odp_buffer_addr(buf);
+
+ for (i = 0; i < buffer_size; i++)
+ data[i] = i;
+
+ odp_buffer_free(buf);
+ allocs++;
+ }
+}
+
+static int run_pool_test_create_after_fork(void *arg ODP_UNUSED)
+{
+ int thr_index;
+
+ thr_index = odp_atomic_fetch_inc_u32(&global_mem->index);
+
+ /* Thread 0 allocates the shared pool */
+ if (thr_index == 0) {
+ odp_pool_t pool;
+ odp_pool_param_t param;
+
+ odp_pool_param_init(&param);
+
+ param.type = ODP_POOL_BUFFER;
+ param.buf.size = default_buffer_size;
+ param.buf.align = ODP_CACHE_LINE_SIZE;
+ param.buf.num = default_buffer_num;
+
+ pool = odp_pool_create(NULL, &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+ global_mem->pool = pool;
+ }
+
+ odp_barrier_wait(&global_mem->init_barrier);
+
+ buffer_alloc_loop(global_mem->pool, default_buffer_num,
+ default_buffer_size);
+
+ return CU_get_number_of_failures();
+}
+
+static void pool_test_create_after_fork(void)
+{
+ odp_shm_t shm;
+ odp_cpumask_t unused;
+ pthrd_arg thrdarg;
+
+ /* No single VA required since reserve is done before fork */
+ shm = odp_shm_reserve(NULL, sizeof(global_shared_mem_t), 0, 0);
+ CU_ASSERT_FATAL(shm != ODP_SHM_INVALID);
+ global_mem = odp_shm_addr(shm);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(global_mem);
+
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ if (thrdarg.numthrds > MAX_WORKERS)
+ thrdarg.numthrds = MAX_WORKERS;
+
+ global_mem->nb_threads = thrdarg.numthrds;
+ global_mem->pool = ODP_POOL_INVALID;
+ odp_barrier_init(&global_mem->init_barrier, thrdarg.numthrds + 1);
+ odp_atomic_init_u32(&global_mem->index, 0);
+
+ /* Fork here */
+ odp_cunit_thread_create(run_pool_test_create_after_fork, &thrdarg);
+
+ /* Wait until thread 0 has created the test pool */
+ odp_barrier_wait(&global_mem->init_barrier);
+
+ buffer_alloc_loop(global_mem->pool, default_buffer_num,
+ default_buffer_size);
+
+ /* Wait for all thread endings */
+ CU_ASSERT(odp_cunit_thread_exit(&thrdarg) >= 0);
+
+ CU_ASSERT(!odp_pool_destroy(global_mem->pool));
+
+ CU_ASSERT(!odp_shm_free(shm));
+}
+
odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_create_destroy_buffer),
ODP_TEST_INFO(pool_test_create_destroy_packet),
@@ -279,6 +535,10 @@ odp_testinfo_t pool_suite[] = {
ODP_TEST_INFO(pool_test_info_packet),
ODP_TEST_INFO(pool_test_lookup_info_print),
ODP_TEST_INFO(pool_test_info_data_range),
+ ODP_TEST_INFO(pool_test_buf_max_num),
+ ODP_TEST_INFO(pool_test_pkt_max_num),
+ ODP_TEST_INFO(pool_test_tmo_max_num),
+ ODP_TEST_INFO(pool_test_create_after_fork),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index 0bd1a06a1..aab95bab2 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -127,20 +127,15 @@ static void queue_test_capa(void)
odp_queue_param_t qparams;
char name[ODP_QUEUE_NAME_LEN];
odp_queue_t queue[MAX_QUEUES];
- uint32_t num_queues, min, i, j;
+ uint32_t num_queues, min, i;
memset(&capa, 0, sizeof(odp_queue_capability_t));
CU_ASSERT(odp_queue_capability(&capa) == 0);
CU_ASSERT(capa.max_queues != 0);
- CU_ASSERT(capa.max_sched_groups != 0);
- CU_ASSERT(capa.sched_prios != 0);
CU_ASSERT(capa.plain.max_num != 0);
- CU_ASSERT(capa.sched.max_num != 0);
min = capa.plain.max_num;
- if (min > capa.sched.max_num)
- min = capa.sched.max_num;
CU_ASSERT(capa.max_queues >= min);
@@ -152,33 +147,26 @@ static void queue_test_capa(void)
odp_queue_param_init(&qparams);
CU_ASSERT(qparams.nonblocking == ODP_BLOCKING);
- for (j = 0; j < 2; j++) {
- if (j == 0) {
- num_queues = capa.plain.max_num;
- } else {
- num_queues = capa.sched.max_num;
- qparams.type = ODP_QUEUE_TYPE_SCHED;
- }
-
- if (num_queues > MAX_QUEUES)
- num_queues = MAX_QUEUES;
+ num_queues = capa.plain.max_num;
- for (i = 0; i < num_queues; i++) {
- generate_name(name, i);
- queue[i] = odp_queue_create(name, &qparams);
+ if (num_queues > MAX_QUEUES)
+ num_queues = MAX_QUEUES;
- if (queue[i] == ODP_QUEUE_INVALID) {
- CU_FAIL("Queue create failed");
- num_queues = i;
- break;
- }
+ for (i = 0; i < num_queues; i++) {
+ generate_name(name, i);
+ queue[i] = odp_queue_create(name, &qparams);
- CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
+ if (queue[i] == ODP_QUEUE_INVALID) {
+ CU_FAIL("Queue create failed");
+ num_queues = i;
+ break;
}
- for (i = 0; i < num_queues; i++)
- CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
+ CU_ASSERT(odp_queue_lookup(name) != ODP_QUEUE_INVALID);
}
+
+ for (i = 0; i < num_queues; i++)
+ CU_ASSERT(odp_queue_destroy(queue[i]) == 0);
}
static void queue_test_mode(void)
@@ -605,10 +593,23 @@ static void queue_test_param(void)
odp_queue_param_t qparams;
odp_buffer_t enbuf;
- /* Schedule type queue */
+ /* Defaults */
odp_queue_param_init(&qparams);
+ CU_ASSERT(qparams.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(qparams.enq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(qparams.deq_mode == ODP_QUEUE_OP_MT);
+ CU_ASSERT(qparams.sched.prio == odp_schedule_default_prio());
+ CU_ASSERT(qparams.sched.sync == ODP_SCHED_SYNC_PARALLEL);
+ CU_ASSERT(qparams.sched.group == ODP_SCHED_GROUP_ALL);
+ CU_ASSERT(qparams.sched.lock_count == 0);
+ CU_ASSERT(qparams.nonblocking == ODP_BLOCKING);
+ CU_ASSERT(qparams.context == NULL);
+ CU_ASSERT(qparams.context_len == 0);
+ CU_ASSERT(qparams.size == 0);
+
+ /* Schedule type queue */
qparams.type = ODP_QUEUE_TYPE_SCHED;
- qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
+ qparams.sched.prio = odp_schedule_min_prio();
qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparams.sched.group = ODP_SCHED_GROUP_WORKER;
@@ -618,7 +619,7 @@ static void queue_test_param(void)
odp_queue_to_u64(ODP_QUEUE_INVALID));
CU_ASSERT(queue == odp_queue_lookup("test_queue"));
CU_ASSERT(ODP_QUEUE_TYPE_SCHED == odp_queue_type(queue));
- CU_ASSERT(ODP_SCHED_PRIO_LOWEST == odp_queue_sched_prio(queue));
+ CU_ASSERT(odp_schedule_min_prio() == odp_queue_sched_prio(queue));
CU_ASSERT(ODP_SCHED_SYNC_PARALLEL == odp_queue_sched_type(queue));
CU_ASSERT(ODP_SCHED_GROUP_WORKER == odp_queue_sched_group(queue));
@@ -702,6 +703,7 @@ static void queue_test_info(void)
odp_queue_info_t info;
odp_queue_param_t param;
odp_queue_capability_t capability;
+ odp_schedule_capability_t sched_capa;
char q_plain_ctx[] = "test_q_plain context data";
char q_order_ctx[] = "test_q_order context data";
uint32_t lock_count;
@@ -716,13 +718,14 @@ static void queue_test_info(void)
memset(&capability, 0, sizeof(odp_queue_capability_t));
CU_ASSERT(odp_queue_capability(&capability) == 0);
+ CU_ASSERT(odp_schedule_capability(&sched_capa) == 0);
/* Create a scheduled ordered queue with explicitly set params */
odp_queue_param_init(&param);
param.type = ODP_QUEUE_TYPE_SCHED;
- param.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ param.sched.prio = odp_schedule_default_prio();
param.sched.sync = ODP_SCHED_SYNC_ORDERED;
param.sched.group = ODP_SCHED_GROUP_ALL;
- param.sched.lock_count = capability.max_ordered_locks;
+ param.sched.lock_count = sched_capa.max_ordered_locks;
if (param.sched.lock_count == 0)
printf("\n Ordered locks NOT supported\n");
param.context = q_order_ctx;
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 2e44d3248..bdcd7b2dc 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -8,6 +8,7 @@
#include <odp_api.h>
#include "odp_cunit_common.h"
+#include <odp/helper/odph_api.h>
#define MAX_WORKERS_THREADS 32
#define MAX_ORDERED_LOCKS 2
@@ -20,6 +21,12 @@
#define NUM_BUFS_PAUSE 1000
#define NUM_BUFS_BEFORE_PAUSE 10
#define NUM_GROUPS 2
+#define MAX_QUEUES (64 * 1024)
+
+#define TEST_QUEUE_SIZE_NUM_EV 50
+
+#define MAX_FLOWS 16
+#define FLOW_TEST_NUM_EV (10 * MAX_FLOWS)
#define GLOBALS_SHM_NAME "test_globals"
#define MSG_POOL_NAME "msg_pool"
@@ -47,7 +54,7 @@
#define CHAOS_PTR_TO_NDX(p) ((uint64_t)(uint32_t)(uintptr_t)p)
#define CHAOS_NDX_TO_PTR(n) ((void *)(uintptr_t)n)
-#define ODP_WAIT_TOLERANCE (60 * ODP_TIME_MSEC_IN_NS)
+#define ODP_WAIT_TOLERANCE (150 * ODP_TIME_MSEC_IN_NS)
/* Test global variables */
typedef struct {
@@ -59,6 +66,7 @@ typedef struct {
odp_pool_t pool;
odp_pool_t queue_ctx_pool;
uint32_t max_sched_queue_size;
+ uint64_t num_flows;
odp_ticketlock_t lock;
odp_spinlock_t atomic_lock;
struct {
@@ -139,6 +147,21 @@ static void release_context(odp_schedule_sync_t sync)
odp_schedule_release_ordered();
}
+static void scheduler_test_capa(void)
+{
+ odp_schedule_capability_t capa;
+ odp_queue_capability_t queue_capa;
+
+ memset(&capa, 0, sizeof(odp_schedule_capability_t));
+ CU_ASSERT_FATAL(odp_schedule_capability(&capa) == 0);
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+
+ CU_ASSERT(capa.max_groups != 0);
+ CU_ASSERT(capa.max_prios != 0);
+ CU_ASSERT(capa.max_queues != 0);
+ CU_ASSERT(queue_capa.max_queues >= capa.max_queues);
+}
+
static void scheduler_test_wait_time(void)
{
int i;
@@ -156,7 +179,7 @@ static void scheduler_test_wait_time(void)
odp_queue_param_init(&qp);
qp.type = ODP_QUEUE_TYPE_SCHED;
qp.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qp.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qp.sched.prio = odp_schedule_default_prio();
qp.sched.group = ODP_SCHED_GROUP_ALL;
queue = odp_queue_create("dummy_queue", &qp);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
@@ -185,20 +208,43 @@ static void scheduler_test_wait_time(void)
upper_limit = odp_time_local_from_ns(5 * ODP_TIME_SEC_IN_NS +
ODP_WAIT_TOLERANCE);
- CU_ASSERT(odp_time_cmp(diff, lower_limit) >= 0);
- CU_ASSERT(odp_time_cmp(diff, upper_limit) <= 0);
+ if (odp_time_cmp(diff, lower_limit) <= 0) {
+ fprintf(stderr, "Exceed lower limit: "
+ "diff is %" PRIu64 ", lower_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ CU_FAIL("Exceed lower limit\n");
+ }
+
+ if (odp_time_cmp(diff, upper_limit) >= 0) {
+ fprintf(stderr, "Exceed upper limit: "
+ "diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
+ odp_time_to_ns(diff), odp_time_to_ns(upper_limit));
+ CU_FAIL("Exceed upper limit\n");
+ }
CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
}
static void scheduler_test_num_prio(void)
{
- int prio;
+ int num_prio, min_prio, max_prio, default_prio;
- prio = odp_schedule_num_prio();
+ num_prio = odp_schedule_num_prio();
+ CU_ASSERT(num_prio > 0);
- CU_ASSERT(prio > 0);
- CU_ASSERT(prio == odp_schedule_num_prio());
+ min_prio = odp_schedule_min_prio();
+ max_prio = odp_schedule_max_prio();
+ default_prio = odp_schedule_default_prio();
+
+ CU_ASSERT(min_prio <= max_prio);
+ CU_ASSERT(min_prio <= default_prio);
+ CU_ASSERT(default_prio <= max_prio);
+ CU_ASSERT(num_prio == (max_prio - min_prio + 1));
+
+ CU_ASSERT(min_prio == ODP_SCHED_PRIO_LOWEST);
+ CU_ASSERT(max_prio == ODP_SCHED_PRIO_HIGHEST);
+ CU_ASSERT(default_prio == ODP_SCHED_PRIO_DEFAULT);
+ CU_ASSERT(default_prio == ODP_SCHED_PRIO_NORMAL);
}
static void scheduler_test_queue_destroy(void)
@@ -228,7 +274,7 @@ static void scheduler_test_queue_destroy(void)
for (i = 0; i < 3; i++) {
qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.prio = odp_schedule_default_prio();
qp.sched.sync = sync[i];
qp.sched.group = ODP_SCHED_GROUP_ALL;
@@ -266,6 +312,201 @@ static void scheduler_test_queue_destroy(void)
CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
}
+static void scheduler_test_wait(void)
+{
+ odp_pool_t p;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ uint32_t *u32;
+ uint32_t i, j, num_enq, retry;
+ int ret;
+ uint32_t num_ev = 50;
+ uint32_t num_retry = 1000;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 10;
+ pool_param.buf.num = num_ev;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ p = odp_pool_create("sched_test_wait", &pool_param);
+
+ CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ queue = odp_queue_create("sched_test_wait", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (i = 0; i < 4; i++) {
+ num_enq = 0;
+
+ for (j = 0; j < num_ev; j++) {
+ buf = odp_buffer_alloc(p);
+
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ u32 = odp_buffer_addr(buf);
+ u32[0] = MAGIC;
+
+ ev = odp_buffer_to_event(buf);
+ if (!(CU_ASSERT(odp_queue_enq(queue, ev) == 0))) {
+ odp_buffer_free(buf);
+ continue;
+ }
+
+ num_enq++;
+ }
+
+ CU_ASSERT(num_enq == num_ev);
+
+ for (j = 0; j < num_enq; j++) {
+ if (i == 0) {
+ ev = odp_schedule(&from, ODP_SCHED_WAIT);
+ } else if (i == 1) {
+ ret = odp_schedule_multi_wait(&from, &ev, 1);
+ CU_ASSERT_FATAL(ret == 1);
+ } else if (i == 2) {
+ retry = 0;
+ do {
+ ev = odp_schedule(&from,
+ ODP_SCHED_NO_WAIT);
+ retry++;
+ } while (ev == ODP_EVENT_INVALID &&
+ retry < num_retry);
+ } else {
+ retry = 0;
+ do {
+ ret = odp_schedule_multi_no_wait(&from,
+ &ev,
+ 1);
+ retry++;
+ } while (ret == 0 && retry < num_retry);
+ CU_ASSERT_FATAL(ret == 1);
+ }
+
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(from == queue);
+
+ buf = odp_buffer_from_event(ev);
+ u32 = odp_buffer_addr(buf);
+
+ CU_ASSERT(u32[0] == MAGIC);
+
+ odp_buffer_free(buf);
+ }
+ }
+
+ /* Make sure that scheduler is empty */
+ retry = 0;
+ do {
+ ret = odp_schedule_multi_no_wait(NULL, &ev, 1);
+ CU_ASSERT(ret == 0 || ret == 1);
+
+ if (ret)
+ odp_event_free(ev);
+ else
+ retry++;
+ } while (ret || retry < num_retry);
+
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ CU_ASSERT_FATAL(odp_pool_destroy(p) == 0);
+}
+
+static void scheduler_test_queue_size(void)
+{
+ odp_queue_capability_t queue_capa;
+ odp_schedule_config_t default_config;
+ odp_pool_t pool;
+ odp_pool_param_t pool_param;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ odp_event_t ev;
+ odp_buffer_t buf;
+ uint32_t i, j, queue_size, num;
+ int ret;
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ CU_ASSERT_FATAL(odp_queue_capability(&queue_capa) == 0);
+ queue_size = TEST_QUEUE_SIZE_NUM_EV;
+ odp_schedule_config_init(&default_config);
+ if (default_config.queue_size &&
+ queue_size > default_config.queue_size)
+ queue_size = default_config.queue_size;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = TEST_QUEUE_SIZE_NUM_EV;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_queue_size", &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ /* Ensure that scheduler is empty */
+ for (j = 0; j < 10;) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ else
+ j++;
+ }
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = sync[i];
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.size = queue_size;
+
+ queue = odp_queue_create("test_queue_size", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (j = 0; j < queue_size; j++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret)
+ odp_event_free(ev);
+ }
+
+ num = 0;
+ for (j = 0; j < 100 * TEST_QUEUE_SIZE_NUM_EV; j++) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == queue);
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
static void scheduler_test_groups(void)
{
odp_pool_t p;
@@ -385,7 +626,7 @@ static void scheduler_test_groups(void)
odp_queue_param_init(&qp);
qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.prio = odp_schedule_default_prio();
qp.sched.sync = sync[i];
qp.sched.group = mygrp1;
@@ -603,7 +844,7 @@ static void chaos_run(unsigned int qtype)
pool = odp_pool_create("sched_chaos_pool", &params);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
qp.type = ODP_QUEUE_TYPE_SCHED;
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.prio = odp_schedule_default_prio();
qp.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
@@ -1425,6 +1666,8 @@ static int create_queues(test_globals_t *globals)
{
int i, j, prios, rc;
odp_queue_capability_t capa;
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t default_config;
odp_pool_t queue_ctx_pool;
odp_pool_param_t params;
odp_buffer_t queue_ctx_buf;
@@ -1441,17 +1684,24 @@ static int create_queues(test_globals_t *globals)
return -1;
}
+ if (odp_schedule_capability(&sched_capa) < 0) {
+ printf("Queue capability query failed\n");
+ return -1;
+ }
+
/* Limit to test maximum */
- if (capa.max_ordered_locks > MAX_ORDERED_LOCKS) {
- capa.max_ordered_locks = MAX_ORDERED_LOCKS;
+ if (sched_capa.max_ordered_locks > MAX_ORDERED_LOCKS) {
+ sched_capa.max_ordered_locks = MAX_ORDERED_LOCKS;
printf("Testing only %u ordered locks\n",
- capa.max_ordered_locks);
+ sched_capa.max_ordered_locks);
}
globals->max_sched_queue_size = BUFS_PER_QUEUE_EXCL;
- if (capa.sched.max_size && capa.sched.max_size < BUFS_PER_QUEUE_EXCL) {
- printf("Max sched queue size %u\n", capa.sched.max_size);
- globals->max_sched_queue_size = capa.sched.max_size;
+ odp_schedule_config_init(&default_config);
+ if (default_config.queue_size &&
+ globals->max_sched_queue_size > default_config.queue_size) {
+ printf("Max sched queue size %u\n", default_config.queue_size);
+ globals->max_sched_queue_size = default_config.queue_size;
}
prios = odp_schedule_num_prio();
@@ -1461,7 +1711,7 @@ static int create_queues(test_globals_t *globals)
queues_per_prio = QUEUES_PER_PRIO;
num_sched = (prios * queues_per_prio * sched_types) + CHAOS_NUM_QUEUES;
num_plain = (prios * queues_per_prio);
- while ((num_sched > capa.sched.max_num ||
+ while ((num_sched > default_config.num_queues ||
num_plain > capa.plain.max_num ||
num_sched + num_plain > capa.max_queues) && queues_per_prio) {
queues_per_prio--;
@@ -1546,7 +1796,7 @@ static int create_queues(test_globals_t *globals)
snprintf(name, sizeof(name), "sched_%d_%d_o", i, j);
p.sched.sync = ODP_SCHED_SYNC_ORDERED;
- p.sched.lock_count = capa.max_ordered_locks;
+ p.sched.lock_count = sched_capa.max_ordered_locks;
p.size = 0;
q = odp_queue_create(name, &p);
@@ -1555,12 +1805,12 @@ static int create_queues(test_globals_t *globals)
return -1;
}
if (odp_queue_lock_count(q) !=
- capa.max_ordered_locks) {
+ sched_capa.max_ordered_locks) {
printf("Queue %" PRIu64 " created with "
"%d locks instead of expected %d\n",
odp_queue_to_u64(q),
odp_queue_lock_count(q),
- capa.max_ordered_locks);
+ sched_capa.max_ordered_locks);
return -1;
}
@@ -1577,7 +1827,7 @@ static int create_queues(test_globals_t *globals)
qctx->sequence = 0;
for (ndx = 0;
- ndx < capa.max_ordered_locks;
+ ndx < sched_capa.max_ordered_locks;
ndx++) {
qctx->lock_sequence[ndx] = 0;
}
@@ -1601,6 +1851,32 @@ static int scheduler_suite_init(void)
odp_pool_t pool;
thread_args_t *args;
odp_pool_param_t params;
+ uint64_t num_flows;
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t sched_config;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ printf("odp_schedule_capability() failed\n");
+ return -1;
+ }
+
+ num_flows = 0;
+ odp_schedule_config_init(&sched_config);
+
+ /* Enable flow aware scheduling */
+ if (sched_capa.max_flow_id > 0) {
+ num_flows = MAX_FLOWS;
+ if ((MAX_FLOWS - 1) > sched_capa.max_flow_id)
+ num_flows = sched_capa.max_flow_id + 1;
+
+ sched_config.max_flow_id = num_flows - 1;
+ }
+
+ /* Configure the scheduler. All test cases share the config. */
+ if (odp_schedule_config(&sched_config)) {
+ printf("odp_schedule_config() failed.\n");
+ return -1;
+ }
odp_pool_param_init(&params);
params.buf.size = BUF_SIZE;
@@ -1627,6 +1903,8 @@ static int scheduler_suite_init(void)
memset(globals, 0, sizeof(test_globals_t));
+ globals->num_flows = num_flows;
+
globals->num_workers = odp_cpumask_default_worker(&mask, 0);
if (globals->num_workers > MAX_WORKERS)
globals->num_workers = MAX_WORKERS;
@@ -1730,13 +2008,139 @@ static int scheduler_suite_term(void)
return 0;
}
+static int check_flow_aware_support(void)
+{
+ if (globals->num_flows == 0) {
+ printf("\nTest: scheduler_test_flow_aware: SKIPPED\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void scheduler_test_flow_aware(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_schedule_config_t sched_config;
+ odp_pool_param_t pool_param;
+ odp_pool_t pool;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue, from;
+ uint32_t j, queue_size, num, num_flows, flow_id;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ int i, ret;
+ uint32_t flow_stat[MAX_FLOWS];
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
+ ODP_SCHED_SYNC_ATOMIC,
+ ODP_SCHED_SYNC_ORDERED};
+
+ /* Test should be skipped when no flows */
+ CU_ASSERT_FATAL(globals->num_flows);
+ CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
+
+ num_flows = globals->num_flows;
+
+ queue_size = FLOW_TEST_NUM_EV;
+ odp_schedule_config_init(&sched_config);
+ if (sched_config.queue_size &&
+ queue_size > sched_config.queue_size)
+ queue_size = sched_config.queue_size;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.buf.size = 100;
+ pool_param.buf.align = 0;
+ pool_param.buf.num = FLOW_TEST_NUM_EV;
+ pool_param.type = ODP_POOL_BUFFER;
+
+ pool = odp_pool_create("test_flow_aware", &pool_param);
+
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < 3; i++) {
+ memset(flow_stat, 0, sizeof(flow_stat));
+ flow_id = 0;
+
+ odp_queue_param_init(&queue_param);
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+ queue_param.sched.prio = odp_schedule_default_prio();
+ queue_param.sched.sync = sync[i];
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.size = queue_size;
+
+ queue = odp_queue_create("test_flow_aware", &queue_param);
+
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ for (j = 0; j < queue_size; j++) {
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+
+ ev = odp_buffer_to_event(buf);
+
+ odp_event_flow_id_set(ev, flow_id);
+ CU_ASSERT(odp_event_flow_id(ev) == flow_id);
+
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT(ret == 0);
+
+ if (ret) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ flow_stat[flow_id]++;
+
+ flow_id++;
+ if (flow_id == num_flows)
+ flow_id = 0;
+ }
+
+ num = 0;
+ for (j = 0; j < 100 * FLOW_TEST_NUM_EV; j++) {
+ ev = odp_schedule(&from, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ CU_ASSERT(from == queue);
+
+ flow_id = odp_event_flow_id(ev);
+ flow_stat[flow_id]--;
+
+ odp_event_free(ev);
+ num++;
+ }
+
+ CU_ASSERT(num == queue_size);
+
+ for (j = 0; j < num_flows; j++) {
+ CU_ASSERT(flow_stat[j] == 0);
+ if (flow_stat[j])
+ printf("flow id %" PRIu32 ", missing %" PRIi32
+ " events\n", j, flow_stat[j]);
+ }
+
+ drain_queues();
+ CU_ASSERT_FATAL(odp_queue_destroy(queue) == 0);
+ }
+
+ CU_ASSERT(odp_pool_destroy(pool) == 0);
+}
+
+/* Default scheduler config */
odp_testinfo_t scheduler_suite[] = {
+ ODP_TEST_INFO(scheduler_test_capa),
ODP_TEST_INFO(scheduler_test_wait_time),
ODP_TEST_INFO(scheduler_test_num_prio),
ODP_TEST_INFO(scheduler_test_queue_destroy),
+ ODP_TEST_INFO(scheduler_test_wait),
+ ODP_TEST_INFO(scheduler_test_queue_size),
ODP_TEST_INFO(scheduler_test_groups),
ODP_TEST_INFO(scheduler_test_pause_resume),
ODP_TEST_INFO(scheduler_test_ordered_lock),
+ ODP_TEST_INFO_CONDITIONAL(scheduler_test_flow_aware,
+ check_flow_aware_support),
ODP_TEST_INFO(scheduler_test_parallel),
ODP_TEST_INFO(scheduler_test_atomic),
ODP_TEST_INFO(scheduler_test_ordered),
@@ -1777,6 +2181,32 @@ odp_suiteinfo_t scheduler_suites[] = {
ODP_SUITE_INFO_NULL,
};
+static int global_init(odp_instance_t *inst)
+{
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
int main(int argc, char *argv[])
{
int ret;
@@ -1785,6 +2215,7 @@ int main(int argc, char *argv[])
if (odp_cunit_parse_options(argc, argv))
return -1;
+ odp_cunit_register_global_init(global_init);
ret = odp_cunit_register(scheduler_suites);
if (ret == 0)
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index 5a262daf4..91235851e 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -468,7 +468,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_small_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_small = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_small);
@@ -480,7 +480,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_medium_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_medium = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_medium);
@@ -492,7 +492,7 @@ static int run_test_singleva_after_fork(void *arg ODP_UNUSED)
size = sizeof(shared_test_data_big_t);
shm = odp_shm_reserve(glob_data->name[thr_index], size,
0, ODP_SHM_SINGLE_VA);
- CU_ASSERT(ODP_SHM_INVALID != shm);
+ CU_ASSERT_FATAL(ODP_SHM_INVALID != shm);
glob_data->shm[thr_index] = shm;
pattern_big = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(pattern_big);
@@ -550,7 +550,7 @@ static void shmem_test_singleva_after_fork(void)
glob_data = odp_shm_addr(shm);
CU_ASSERT_PTR_NOT_NULL(glob_data);
- thrdarg.numthrds = odp_cpumask_default_worker(&unused, 0);
+ thrdarg.numthrds = odp_cpumask_default_worker(&unused, 3);
if (thrdarg.numthrds > MAX_WORKERS)
thrdarg.numthrds = MAX_WORKERS;
@@ -578,21 +578,21 @@ static void shmem_test_singleva_after_fork(void)
case 0:
pattern_small =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_small);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_small);
for (i = 0; i < SMALL_MEM; i++)
CU_ASSERT(pattern_small->data[i] == i);
break;
case 1:
pattern_medium =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_medium);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_medium);
for (i = 0; i < MEDIUM_MEM; i++)
CU_ASSERT(pattern_medium->data[i] == (i << 2));
break;
case 2:
pattern_big =
odp_shm_addr(glob_data->shm[thr_index]);
- CU_ASSERT_PTR_NOT_NULL(pattern_big);
+ CU_ASSERT_PTR_NOT_NULL_FATAL(pattern_big);
for (i = 0; i < BIG_MEM; i++)
CU_ASSERT(pattern_big->data[i] == (i >> 2));
break;
diff --git a/test/validation/api/system/system.c b/test/validation/api/system/system.c
index 75fd26f31..f016a2ded 100644
--- a/test/validation/api/system/system.c
+++ b/test/validation/api/system/system.c
@@ -216,7 +216,11 @@ static void system_test_odp_sys_huge_page_size(void)
uint64_t page;
page = odp_sys_huge_page_size();
- CU_ASSERT(0 < page);
+ if (page == 0)
+ /* Not an error, but just to be sure to hit logs */
+ LOG_ERR("Huge pages do not seem to be supported\n");
+ else
+ CU_ASSERT(page % ODP_PAGE_SIZE == 0);
}
static void system_test_odp_sys_huge_page_size_all(void)
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index 2fab1a7dd..7b40cda62 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -7,13 +7,80 @@
#include "config.h"
#include <odp_api.h>
+#include <odp/helper/odph_api.h>
#include <odp_cunit_common.h>
#include <mask_common.h>
#include <test_debug.h>
-/* Test thread entry and exit synchronization barriers */
-odp_barrier_t bar_entry;
-odp_barrier_t bar_exit;
+#define GLOBAL_SHM_NAME "GlobalThreadTest"
+
+typedef struct {
+ /* Test thread entry and exit synchronization barriers */
+ odp_barrier_t bar_entry;
+ odp_barrier_t bar_exit;
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
+
+static int thread_global_init(odp_instance_t *inst)
+{
+ odp_shm_t global_shm;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t),
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ if (global_shm == ODP_SHM_INVALID) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ return 0;
+}
+
+static int thread_global_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
static void thread_test_odp_cpu_id(void)
{
@@ -36,12 +103,12 @@ static void thread_test_odp_thread_count(void)
static int thread_func(void *arg TEST_UNUSED)
{
/* indicate that thread has started */
- odp_barrier_wait(&bar_entry);
+ odp_barrier_wait(&global_mem->bar_entry);
CU_ASSERT(odp_thread_type() == ODP_THREAD_WORKER);
/* wait for indication that we can exit */
- odp_barrier_wait(&bar_exit);
+ odp_barrier_wait(&global_mem->bar_exit);
return CU_get_number_of_failures();
}
@@ -54,8 +121,8 @@ static void thread_test_odp_thrmask_worker(void)
CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
- odp_barrier_init(&bar_entry, args.numthrds + 1);
- odp_barrier_init(&bar_exit, args.numthrds + 1);
+ odp_barrier_init(&global_mem->bar_entry, args.numthrds + 1);
+ odp_barrier_init(&global_mem->bar_exit, args.numthrds + 1);
/* should start out with 0 worker threads */
ret = odp_thrmask_worker(&mask);
@@ -70,7 +137,7 @@ static void thread_test_odp_thrmask_worker(void)
return;
/* wait for thread(s) to start */
- odp_barrier_wait(&bar_entry);
+ odp_barrier_wait(&global_mem->bar_entry);
ret = odp_thrmask_worker(&mask);
CU_ASSERT(ret == odp_thrmask_count(&mask));
@@ -78,7 +145,7 @@ static void thread_test_odp_thrmask_worker(void)
CU_ASSERT(ret <= odp_thread_count_max());
/* allow thread(s) to exit */
- odp_barrier_wait(&bar_exit);
+ odp_barrier_wait(&global_mem->bar_exit);
odp_cunit_thread_exit(&args);
}
@@ -132,6 +199,9 @@ int main(int argc, char *argv[])
if (odp_cunit_parse_options(argc, argv))
return -1;
+ odp_cunit_register_global_init(thread_global_init);
+ odp_cunit_register_global_term(thread_global_term);
+
ret = odp_cunit_register(thread_suites);
if (ret == 0)
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
index e24012285..38c0906ba 100644
--- a/test/validation/api/time/time.c
+++ b/test/validation/api/time/time.c
@@ -14,7 +14,7 @@
#define BUSY_LOOP_CNT_LONG 6000000000 /* used for t > 4 sec */
#define MIN_TIME_RATE 32000
#define MAX_TIME_RATE 15000000000
-#define DELAY_TOLERANCE 20000000 /* deviation for delay */
+#define DELAY_TOLERANCE 40000000 /* deviation for delay */
#define WAIT_SECONDS 3
static uint64_t local_res;
@@ -413,7 +413,7 @@ static void time_test_wait_ns(void)
if (odp_time_cmp(diff, upper_limit) > 0) {
fprintf(stderr, "Exceed upper limit: "
"diff is %" PRIu64 ", upper_limit %" PRIu64 "\n",
- odp_time_to_ns(diff), odp_time_to_ns(lower_limit));
+ odp_time_to_ns(diff), odp_time_to_ns(upper_limit));
CU_FAIL("Exceed upper limit\n");
}
}
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index e0f068823..aaffd92d1 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -17,6 +17,8 @@
#include "odp_cunit_common.h"
#include "test_debug.h"
+#define GLOBAL_SHM_NAME "GlobalTimerTest"
+
#define MAX(a, b) (((a) > (b)) ? (a) : (b))
/* Timeout range in milliseconds (ms) */
@@ -32,25 +34,6 @@
#define USER_PTR ((void *)0xdead)
#define TICK_INVALID (~(uint64_t)0)
-/* Barrier for thread synchronisation */
-static odp_barrier_t test_barrier;
-
-/* Timeout pool handle used by all threads */
-static odp_pool_t tbp;
-
-/* Timer pool handle used by all threads */
-static odp_timer_pool_t tp;
-
-/* Count of timeouts delivered too late */
-static odp_atomic_u32_t ndelivtoolate;
-
-/* Sum of all allocated timers from all threads. Thread-local
- * caches may make this number lower than the capacity of the pool */
-static odp_atomic_u32_t timers_allocated;
-
-/* Timer resolution in nsec */
-static uint64_t resolution_ns;
-
/* Timer helper structure */
struct test_timer {
odp_timer_t tim; /* Timer handle */
@@ -59,6 +42,85 @@ struct test_timer {
uint64_t tick; /* Expiration tick or TICK_INVALID */
};
+typedef struct {
+ /* Timeout pool handle used by all threads */
+ odp_pool_t tbp;
+ /* Timer pool handle used by all threads */
+ odp_timer_pool_t tp;
+ /* Barrier for thread synchronization */
+ odp_barrier_t test_barrier;
+ /* Count of timeouts delivered too late */
+ odp_atomic_u32_t ndelivtoolate;
+ /* Sum of all allocated timers from all threads. Thread-local
+ * caches may make this number lower than the capacity of the pool */
+ odp_atomic_u32_t timers_allocated;
+} global_shared_mem_t;
+
+static global_shared_mem_t *global_mem;
+
+static int timer_global_init(odp_instance_t *inst)
+{
+ odp_shm_t global_shm;
+ odp_init_t init_param;
+ odph_helper_options_t helper_options;
+
+ if (odph_options(&helper_options)) {
+ fprintf(stderr, "error: odph_options() failed.\n");
+ return -1;
+ }
+
+ odp_init_param_init(&init_param);
+ init_param.mem_model = helper_options.mem_model;
+
+ if (0 != odp_init_global(inst, &init_param, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t),
+ ODP_CACHE_LINE_SIZE, ODP_SHM_SW_ONLY);
+ if (global_shm == ODP_SHM_INVALID) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ /* Configure scheduler */
+ odp_schedule_config(NULL);
+
+ return 0;
+}
+
+static int timer_global_term(odp_instance_t inst)
+{
+ odp_shm_t shm;
+
+ shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ if (0 != odp_shm_free(shm)) {
+ fprintf(stderr, "error: odp_shm_free() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ fprintf(stderr, "error: odp_term_local() failed.\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ fprintf(stderr, "error: odp_term_global() failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void timer_test_timeout_pool_alloc(void)
{
odp_pool_t pool;
@@ -272,7 +334,6 @@ static void timer_test_queue_type(odp_queue_type_t queue_type)
odp_queue_param_init(&queue_param);
if (queue_type == ODP_QUEUE_TYPE_SCHED) {
queue_param.type = ODP_QUEUE_TYPE_SCHED;
- queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
queue_param.sched.group = ODP_SCHED_GROUP_ALL;
}
@@ -334,8 +395,8 @@ static void timer_test_queue_type(odp_queue_type_t queue_type)
tim = odp_timeout_timer(tmo);
tick = odp_timeout_tick(tmo);
- CU_ASSERT(diff_period > (period_ns - (4 * res_ns)));
- CU_ASSERT(diff_period < (period_ns + (4 * res_ns)));
+ CU_ASSERT(diff_period > (period_ns - (5 * res_ns)));
+ CU_ASSERT(diff_period < (period_ns + (5 * res_ns)));
LOG_DBG("timeout tick %" PRIu64 ", "
"timeout period %" PRIu64 "\n",
@@ -540,7 +601,7 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
CU_FAIL("odp_timeout_tick() too small tick");
}
- if (tick > odp_timer_current_tick(tp))
+ if (tick > odp_timer_current_tick(global_mem->tp))
CU_FAIL("Timeout delivered early");
if (tick < prev_tick) {
@@ -548,7 +609,7 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
" prev_tick %" PRIu64"\n",
tick, prev_tick);
/* We don't report late timeouts using CU_FAIL */
- odp_atomic_inc_u32(&ndelivtoolate);
+ odp_atomic_inc_u32(&global_mem->ndelivtoolate);
}
}
@@ -579,6 +640,8 @@ static int worker_entrypoint(void *arg TEST_UNUSED)
struct timespec ts;
uint32_t nstale;
odp_timer_set_t timer_rc;
+ odp_timer_pool_t tp = global_mem->tp;
+ odp_pool_t tbp = global_mem->tbp;
queue = odp_queue_create("timer_queue", NULL);
if (queue == ODP_QUEUE_INVALID)
@@ -609,9 +672,9 @@ static int worker_entrypoint(void *arg TEST_UNUSED)
allocated = i;
if (allocated == 0)
CU_FAIL_FATAL("unable to alloc a timer");
- odp_atomic_fetch_add_u32(&timers_allocated, allocated);
+ odp_atomic_fetch_add_u32(&global_mem->timers_allocated, allocated);
- odp_barrier_wait(&test_barrier);
+ odp_barrier_wait(&global_mem->test_barrier);
/* Initial set all timers with a random expiration time */
nset = 0;
@@ -771,8 +834,13 @@ static void timer_test_odp_timer_all(void)
odp_cpumask_t unused;
odp_timer_pool_info_t tpinfo;
uint64_t ns, tick, ns2;
+ uint64_t resolution_ns;
+ uint32_t timers_allocated;
pthrd_arg thrdarg;
odp_timer_capability_t timer_capa;
+ odp_pool_t tbp;
+ odp_timer_pool_t tp;
+ uint32_t num_timers;
/* Reserve at least one core for running other processes so the timer
* test hopefully can run undisturbed and thus get better timing
@@ -787,29 +855,33 @@ static void timer_test_odp_timer_all(void)
if (num_workers < 1)
num_workers = 1;
+ num_timers = num_workers * NTIMERS;
+ CU_ASSERT_FATAL(!odp_timer_capability(ODP_CLOCK_CPU, &timer_capa));
+ if (timer_capa.max_timers && timer_capa.max_timers < num_timers)
+ num_timers = timer_capa.max_timers;
+
/* Create timeout pools */
odp_pool_param_init(&params);
params.type = ODP_POOL_TIMEOUT;
- params.tmo.num = (NTIMERS + 1) * num_workers;
+ params.tmo.num = num_timers + num_workers;
- tbp = odp_pool_create("tmo_pool", &params);
- if (tbp == ODP_POOL_INVALID)
+ global_mem->tbp = odp_pool_create("tmo_pool", &params);
+ if (global_mem->tbp == ODP_POOL_INVALID)
CU_FAIL_FATAL("Timeout pool create failed");
+ tbp = global_mem->tbp;
/* Create a timer pool */
- if (odp_timer_capability(ODP_CLOCK_CPU, &timer_capa))
- CU_FAIL("Error: get timer capacity failed.\n");
-
resolution_ns = MAX(RES, timer_capa.highest_res_ns);
tparam.res_ns = resolution_ns;
tparam.min_tmo = MIN_TMO;
tparam.max_tmo = MAX_TMO;
- tparam.num_timers = num_workers * NTIMERS;
+ tparam.num_timers = num_timers;
tparam.priv = 0;
tparam.clk_src = ODP_CLOCK_CPU;
- tp = odp_timer_pool_create(NAME, &tparam);
- if (tp == ODP_TIMER_POOL_INVALID)
+ global_mem->tp = odp_timer_pool_create(NAME, &tparam);
+ if (global_mem->tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
+ tp = global_mem->tp;
/* Start all created timer pools */
odp_timer_pool_start();
@@ -827,9 +899,13 @@ static void timer_test_odp_timer_all(void)
LOG_DBG("Resolution: %" PRIu64 "\n", tparam.res_ns);
LOG_DBG("Min timeout: %" PRIu64 "\n", tparam.min_tmo);
LOG_DBG("Max timeout: %" PRIu64 "\n", tparam.max_tmo);
- LOG_DBG("Num timers..: %u\n", tparam.num_timers);
- LOG_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS,
+ LOG_DBG("Num timers: %u\n", tparam.num_timers);
+ LOG_DBG("Tmo range: %u ms (%" PRIu64 " ticks)\n", RANGE_MS,
odp_timer_ns_to_tick(tp, 1000000ULL * RANGE_MS));
+ LOG_DBG("Max timers: %" PRIu32 "\n", timer_capa.max_timers);
+ LOG_DBG("Max timer pools: %" PRIu32 "\n", timer_capa.max_pools);
+ LOG_DBG("Max timer pools combined: %" PRIu32 "\n",
+ timer_capa.max_pools_combined);
tick = odp_timer_ns_to_tick(tp, 0);
CU_ASSERT(tick == 0);
@@ -854,13 +930,13 @@ static void timer_test_odp_timer_all(void)
}
/* Initialize barrier used by worker threads for synchronization */
- odp_barrier_init(&test_barrier, num_workers);
+ odp_barrier_init(&global_mem->test_barrier, num_workers);
/* Initialize the shared timeout counter */
- odp_atomic_init_u32(&ndelivtoolate, 0);
+ odp_atomic_init_u32(&global_mem->ndelivtoolate, 0);
/* Initialize the number of finally allocated elements */
- odp_atomic_init_u32(&timers_allocated, 0);
+ odp_atomic_init_u32(&global_mem->timers_allocated, 0);
/* Create and start worker threads */
thrdarg.testcase = 0;
@@ -870,14 +946,15 @@ static void timer_test_odp_timer_all(void)
/* Wait for worker threads to exit */
odp_cunit_thread_exit(&thrdarg);
LOG_DBG("Number of timeouts delivered/received too late: %" PRIu32 "\n",
- odp_atomic_load_u32(&ndelivtoolate));
+ odp_atomic_load_u32(&global_mem->ndelivtoolate));
/* Check some statistics after the test */
if (odp_timer_pool_info(tp, &tpinfo) != 0)
CU_FAIL("odp_timer_pool_info");
- CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS);
+ CU_ASSERT(tpinfo.param.num_timers == num_timers);
CU_ASSERT(tpinfo.cur_timers == 0);
- CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(&timers_allocated));
+ timers_allocated = odp_atomic_load_u32(&global_mem->timers_allocated);
+ CU_ASSERT(tpinfo.hwm_timers == timers_allocated);
/* Destroy timer pool, all timers must have been freed */
odp_timer_pool_destroy(tp);
@@ -911,6 +988,9 @@ int main(int argc, char *argv[])
if (odp_cunit_parse_options(argc, argv))
return -1;
+ odp_cunit_register_global_init(timer_global_init);
+ odp_cunit_register_global_term(timer_global_term);
+
int ret = odp_cunit_register(timer_suites);
if (ret == 0)