aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2022-10-28 16:14:01 +0300
committerGitHub <noreply@github.com>2022-10-28 16:14:01 +0300
commitbdfef681d7849339946bd63151fa2875d9ee801d (patch)
treeceec932bbc3f678f68ed91953dc83f3852f95df0
parent78066161560f2aa0ea829b1c435ab83809651162 (diff)
parent196c01565be4017e1d4d29df1912014b71adc105 (diff)
Merge ODP v1.38.0.0v1.38.0.0_DPDK_19.11
Merge ODP linux-generic v1.38.0.0 into linux-dpdk.
-rw-r--r--.github/workflows/ci-pipeline-arm64.yml24
-rw-r--r--.github/workflows/ci-pipeline.yml64
-rw-r--r--.github/workflows/coverity.yml2
-rw-r--r--.github/workflows/gh-pages.yml2
-rw-r--r--CHANGELOG40
-rw-r--r--DEPENDENCIES3
-rw-r--r--Makefile.inc4
-rw-r--r--config/odp-linux-dpdk.conf12
-rw-r--r--config/odp-linux-generic.conf12
-rw-r--r--configure.ac30
-rw-r--r--example/Makefile.inc6
-rw-r--r--helper/Makefile.am4
-rw-r--r--helper/cli.c531
-rw-r--r--helper/include/odp/helper/ip.h8
-rw-r--r--helper/include/odp/helper/threads.h7
-rw-r--r--helper/threads.c48
-rw-r--r--include/odp/api/spec/buffer.h14
-rw-r--r--include/odp/api/spec/crypto.h26
-rw-r--r--include/odp/api/spec/packet.h63
-rw-r--r--include/odp/api/spec/pool_types.h46
-rw-r--r--include/odp/api/spec/stash.h140
-rw-r--r--include/odp/api/spec/stash_types.h12
-rw-r--r--include/odp/api/spec/thread.h12
-rw-r--r--include/odp/api/spec/timer.h13
-rw-r--r--m4/odp_dpdk.m44
-rw-r--r--m4/odp_pthread.m410
-rw-r--r--platform/Makefile.inc6
-rw-r--r--platform/linux-dpdk/Makefile.am2
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h10
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inlines.h84
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inlines.h6
-rw-r--r--platform/linux-dpdk/include/odp_buffer_internal.h3
-rw-r--r--platform/linux-dpdk/include/odp_event_vector_internal.h10
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h2
-rw-r--r--platform/linux-dpdk/include/odp_timer_internal.h3
-rw-r--r--platform/linux-dpdk/m4/odp_libconfig.m42
-rw-r--r--platform/linux-dpdk/odp_buffer.c7
-rw-r--r--platform/linux-dpdk/odp_packet.c74
-rw-r--r--platform/linux-dpdk/odp_pool.c58
-rw-r--r--platform/linux-dpdk/odp_timer.c3
-rw-r--r--platform/linux-dpdk/test/alternate-timer.conf2
-rw-r--r--platform/linux-dpdk/test/crypto.conf2
-rw-r--r--platform/linux-dpdk/test/sched-basic.conf2
-rw-r--r--platform/linux-generic/Makefile.am2
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h8
-rw-r--r--platform/linux-generic/arch/aarch64/odp_crypto_armv8.c98
-rw-r--r--platform/linux-generic/include/odp/api/plat/byteorder_inlines.h6
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h12
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h10
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inlines.h86
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h25
-rw-r--r--platform/linux-generic/include/odp/api/plat/pool_inline_types.h6
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_inline_types.h1
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_inlines.h6
-rw-r--r--platform/linux-generic/include/odp_atomic_internal.h25
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h3
-rw-r--r--platform/linux-generic/include/odp_debug_internal.h7
-rw-r--r--platform/linux-generic/include/odp_event_vector_internal.h8
-rw-r--r--platform/linux-generic/include/odp_global_data.h2
-rw-r--r--platform/linux-generic/include/odp_llqueue.h16
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h4
-rw-r--r--platform/linux-generic/include/odp_print_internal.h22
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h45
-rw-r--r--platform/linux-generic/include/odp_timer_internal.h3
-rw-r--r--platform/linux-generic/m4/odp_libconfig.m42
-rw-r--r--platform/linux-generic/odp_buffer.c7
-rw-r--r--platform/linux-generic/odp_classification.c52
-rw-r--r--platform/linux-generic/odp_crypto_null.c100
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c100
-rw-r--r--platform/linux-generic/odp_ipsec.c9
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c2
-rw-r--r--platform/linux-generic/odp_packet.c76
-rw-r--r--platform/linux-generic/odp_packet_io.c2
-rw-r--r--platform/linux-generic/odp_packet_vector.c5
-rw-r--r--platform/linux-generic/odp_pool.c114
-rw-r--r--platform/linux-generic/odp_print.c47
-rw-r--r--platform/linux-generic/odp_schedule_basic.c174
-rw-r--r--platform/linux-generic/odp_stash.c142
-rw-r--r--platform/linux-generic/odp_timer.c66
-rw-r--r--platform/linux-generic/test/inline-timer.conf2
-rw-r--r--platform/linux-generic/test/packet_align.conf2
-rw-r--r--platform/linux-generic/test/process-mode.conf2
-rw-r--r--platform/linux-generic/test/sched-basic.conf2
-rw-r--r--platform/linux-generic/test/validation/api/shmem/Makefile.am3
-rwxr-xr-xscripts/ci/build.sh5
-rwxr-xr-xscripts/ci/build_armhf.sh5
-rwxr-xr-xscripts/ci/build_static_x86_64.sh35
-rwxr-xr-xscripts/ci/build_x86_64.sh1
-rwxr-xr-xscripts/ci/doxygen.sh17
-rw-r--r--test/Makefile.inc6
-rw-r--r--test/common/odp_cunit_common.c3
-rw-r--r--test/common/odp_cunit_common.h6
-rw-r--r--test/miscellaneous/.gitignore1
-rw-r--r--test/miscellaneous/Makefile.am7
-rw-r--r--test/miscellaneous/odp_api_headers.c14
-rw-r--r--test/performance/.gitignore1
-rw-r--r--test/performance/Makefile.am2
-rw-r--r--test/performance/odp_crypto.c6
-rw-r--r--test/performance/odp_ipsecfwd.c1454
-rw-r--r--test/validation/api/atomic/atomic.c2
-rw-r--r--test/validation/api/barrier/barrier.c4
-rw-r--r--test/validation/api/buffer/buffer.c56
-rw-r--r--test/validation/api/classification/odp_classification_basic.c50
-rw-r--r--test/validation/api/classification/odp_classification_common.c24
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c105
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h2
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c5
-rw-r--r--test/validation/api/crypto/test_vectors.h267
-rw-r--r--test/validation/api/crypto/test_vectors_len.h10
-rw-r--r--test/validation/api/ipsec/ipsec.c1
-rw-r--r--test/validation/api/lock/lock.c22
-rw-r--r--test/validation/api/packet/packet.c499
-rw-r--r--test/validation/api/pktio/pktio.c2
-rw-r--r--test/validation/api/pool/pool.c12
-rw-r--r--test/validation/api/queue/queue.c4
-rw-r--r--test/validation/api/scheduler/scheduler.c12
-rw-r--r--test/validation/api/shmem/shmem.c8
-rw-r--r--test/validation/api/stash/stash.c423
-rw-r--r--test/validation/api/thread/thread.c40
-rw-r--r--test/validation/api/timer/timer.c250
121 files changed, 4886 insertions, 1193 deletions
diff --git a/.github/workflows/ci-pipeline-arm64.yml b/.github/workflows/ci-pipeline-arm64.yml
index 79d3b8833..6cc5e38a7 100644
--- a/.github/workflows/ci-pipeline-arm64.yml
+++ b/.github/workflows/ci-pipeline-arm64.yml
@@ -28,7 +28,7 @@ jobs:
conf: '--enable-lto --enable-abi-compat'
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -45,7 +45,7 @@ jobs:
os: ['ubuntu_18.04', 'rocky_linux_8']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${{matrix.os}}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -64,7 +64,7 @@ jobs:
conf: ['', '--enable-lto']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -76,7 +76,7 @@ jobs:
runs-on: [self-hosted, ARM64]
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/out_of_tree.sh
- name: Failure log
@@ -92,7 +92,7 @@ jobs:
conf: ['--enable-user-guides', '--enable-user-guides --enable-abi-compat']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
# Ignore distcheck failure (caused by the first 'make check' run unmounting huge pages)
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/distcheck.sh || true
@@ -113,7 +113,7 @@ jobs:
'--without-openssl --without-pcap']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
-e CXX=g++-10 -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
- name: Failure log
@@ -130,7 +130,7 @@ jobs:
cflags: ['-march=armv8.2-a -O2', '-march=armv8-a+lse -O2']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
-e CXX=g++-10 -e CFLAGS="${{matrix.cflags}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
- name: Failure log
@@ -146,7 +146,7 @@ jobs:
os: ['ubuntu_18.04', 'ubuntu_22.04-openssl']
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${{matrix.os}}-${ARCH}-native /odp/scripts/ci/check.sh
- name: Failure log
@@ -158,7 +158,7 @@ jobs:
runs-on: [self-hosted, ARM64]
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/sched-basic.conf $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
- name: Failure log
@@ -170,7 +170,7 @@ jobs:
runs-on: [self-hosted, ARM64]
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODP_SCHEDULER=sp $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
- name: Failure log
@@ -184,7 +184,7 @@ jobs:
OS: ubuntu_20.04
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native-dpdk_20.11 /odp/scripts/ci/check.sh
- name: Failure log
@@ -198,7 +198,7 @@ jobs:
OS: ubuntu_20.04
steps:
- uses: AutoModality/action-clean@v1.1.0
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native-dpdk_21.11 /odp/scripts/ci/check.sh
- name: Failure log
diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml
index 703ae1adf..a88d0feb2 100644
--- a/.github/workflows/ci-pipeline.yml
+++ b/.github/workflows/ci-pipeline.yml
@@ -11,7 +11,7 @@ jobs:
Checkpatch:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
with:
fetch-depth: 0
@@ -41,7 +41,7 @@ jobs:
Documentation:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install dependencies
run: |
@@ -75,13 +75,31 @@ jobs:
- cc: clang
conf: '--enable-lto --enable-abi-compat'
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
if: ${{ failure() }}
run: find . -name config.log -exec cat {} \;
+ Build_static_u22:
+ runs-on: ubuntu-20.04
+ env:
+ OS: ubuntu_22.04
+ CONF: "--disable-shared --without-openssl --without-pcap"
+ strategy:
+ fail-fast: false
+ matrix:
+ cc_ver: [9, 10, 11, 12]
+ conf: ['', '--enable-lto']
+ steps:
+ - uses: actions/checkout@v3
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}"
+ -e CONF="${CONF} ${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-static /odp/scripts/ci/build_static_${ARCH}.sh
+ - name: Failure log
+ if: ${{ failure() }}
+ run: find . -name config.log -exec cat {} \;
+
Build_arm64:
runs-on: ubuntu-20.04
env:
@@ -91,7 +109,7 @@ jobs:
matrix:
cc: [gcc, clang]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Minimal
run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
@@ -134,7 +152,7 @@ jobs:
matrix:
conf: ['', '--enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -151,7 +169,7 @@ jobs:
cc: [gcc, clang]
conf: ['', '--enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -167,7 +185,7 @@ jobs:
os: ['centos_7', 'rocky_linux_8']
conf: ['--enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${{matrix.os}}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -184,7 +202,7 @@ jobs:
cc_ver: [9]
conf: ['', '--enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -201,7 +219,7 @@ jobs:
cc_ver: [10, 11, 12]
conf: ['', '--enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="gcc-${{matrix.cc_ver}}" -e CXX="g++-${{matrix.cc_ver}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -211,7 +229,7 @@ jobs:
Build_out-of-tree:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/out_of_tree.sh
- name: Failure log
@@ -223,7 +241,7 @@ jobs:
env:
CONF: "--with-platform=linux-generic"
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ODP_LIB_NAME="libodp-linux"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- name: Failure log
@@ -233,14 +251,14 @@ jobs:
Run_coverage:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/coverage.sh
- name: Failure log
if: ${{ failure() }}
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
- name: Upload to Codecov
- uses: codecov/codecov-action@v2
+ uses: codecov/codecov-action@v3
Run_distcheck:
runs-on: ubuntu-20.04
@@ -249,7 +267,7 @@ jobs:
matrix:
conf: ['--enable-user-guides', '--enable-user-guides --enable-abi-compat']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
# Ignore distcheck failure (caused by the first 'make check' run unmounting huge pages)
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/distcheck.sh || true
@@ -268,7 +286,7 @@ jobs:
'--disable-host-optimization', '--disable-host-optimization --enable-abi-compat',
'--without-openssl --without-pcap']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
-e CXX=g++-10 -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
- name: Failure log
@@ -283,7 +301,7 @@ jobs:
cc: [gcc, clang]
os: ['ubuntu_20.04', 'ubuntu_22.04-openssl']
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${{matrix.os}}-${ARCH} /odp/scripts/ci/check.sh
- name: Failure log
@@ -293,7 +311,7 @@ jobs:
Run_sched_config:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/sched-basic.conf $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
- name: Failure log
@@ -303,7 +321,7 @@ jobs:
Run_scheduler_sp:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODP_SCHEDULER=sp $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
- name: Failure log
@@ -313,7 +331,7 @@ jobs:
Run_process_mode:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODPH_PROC_MODE=1 $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
- name: Failure log
@@ -323,7 +341,7 @@ jobs:
Run_alternate_timer:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/alternate-timer.conf
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
@@ -336,7 +354,7 @@ jobs:
env:
OS: ubuntu_20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-dpdk_20.11 /odp/scripts/ci/check.sh
- name: Failure log
@@ -348,7 +366,7 @@ jobs:
env:
OS: ubuntu_20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-dpdk_21.11 /odp/scripts/ci/check.sh
- name: Failure log
@@ -364,7 +382,7 @@ jobs:
matrix:
driver: [crypto_aesni_mb, crypto_aesni_gcm]
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
-e CONF="${CONF}" -e ODP_PLATFORM_PARAMS="--vdev=${{matrix.driver}}"
-e ODP_CONFIG_FILE=/odp/platform/linux-dpdk/test/crypto.conf
diff --git a/.github/workflows/coverity.yml b/.github/workflows/coverity.yml
index d62fff0a7..82bbe0c65 100644
--- a/.github/workflows/coverity.yml
+++ b/.github/workflows/coverity.yml
@@ -14,7 +14,7 @@ jobs:
Coverity-analysis:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g
-e CC="${CC}" -e GITHUB_SHA="${GITHUB_SHA}"
-e COVERITY_TOKEN="${{ secrets.COVERITY_TOKEN }}"
diff --git a/.github/workflows/gh-pages.yml b/.github/workflows/gh-pages.yml
index aaf1af72a..18f269579 100644
--- a/.github/workflows/gh-pages.yml
+++ b/.github/workflows/gh-pages.yml
@@ -9,7 +9,7 @@ jobs:
Documentation:
runs-on: ubuntu-20.04
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install dependencies
run: |
sudo apt update
diff --git a/CHANGELOG b/CHANGELOG
index 84d7effb5..eb80ee6cc 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,43 @@
+== OpenDataPlane (1.38.0.0)
+
+=== Backward incompatible API changes
+==== Pool
+* Change `odp_pool_capability_t.pkt.max_uarea_size` specification to state
+that the value of zero means user area is not supported.
+* Specify that the default value of `odp_pool_param_t.pkt.uarea_size` is zero
+and implementation may round up the given value.
+
+=== Backward compatible API changes
+==== Buffer
+* Add `odp_buffer_user_area()` function which returns pointer to the user area
+configured with pool create parameters.
+
+==== Crypto
+* Add experimental ZUC-256 support.
+
+==== Packet
+* Add `odp_packet_vector_user_area()` function which returns pointer to the user
+area configured with pool create parameters.
+* Add new user flag metadata to packets (`odp_packet_user_flag()`,
+`odp_packet_user_flag_set()`) and packet vectors
+(`odp_packet_vector_user_flag()`, `odp_packet_vector_user_flag_set()`).
+
+==== Pool
+* Add user area size capability and parameter into buffer, timeout, and vector
+event pools.
+
+==== Stash
+* Add batch variants of all put/get functions and capabilities for maximum
+supported batch sizes.
+
+==== Thread
+* Clarify `odp_thread_id()` specification to state that thread IDs are assigned
+sequentially starting from 0 in the order threads call `odp_init_local()`.
+
+==== Timer
+* Add `odp_timeout_user_area()` function which returns pointer to the user area
+configured with pool create parameters.
+
== OpenDataPlane (1.37.2.0)
=== Backward compatible API changes
diff --git a/DEPENDENCIES b/DEPENDENCIES
index 6d5433094..0a1bea32b 100644
--- a/DEPENDENCIES
+++ b/DEPENDENCIES
@@ -305,6 +305,9 @@ Prerequisites for building the OpenDataPlane (ODP) API
e.g. with some packet length, packet segment length and pool size
combinations that would otherwise conform to reported capabilities.
+ Note that, currently, AF_XDP socket packet I/O cannot be instantiated if
+ DPDK zero-copy is enabled.
+
3.6.1 AF_XDP socket packet I/O requirements
AF_XDP socket packet I/O implementation requires libxdp and libbpf libraries.
diff --git a/Makefile.inc b/Makefile.inc
index 421e11361..e3c100e96 100644
--- a/Makefile.inc
+++ b/Makefile.inc
@@ -1,3 +1,7 @@
+AM_CFLAGS = $(ODP_CFLAGS)
+AM_CXXFLAGS = $(ODP_CXXFLAGS)
+AM_LDFLAGS = $(ODP_LDFLAGS)
+
ODP_INCLUDES = \
-I$(top_builddir)/include \
-I$(top_srcdir)/include
diff --git a/config/odp-linux-dpdk.conf b/config/odp-linux-dpdk.conf
index 3779bd539..d28f728f2 100644
--- a/config/odp-linux-dpdk.conf
+++ b/config/odp-linux-dpdk.conf
@@ -16,7 +16,7 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.17"
+config_file_version = "0.1.18"
# System options
system: {
@@ -169,6 +169,16 @@ sched_basic: {
burst_size_default = [ 32, 32, 32, 32, 32, 16, 8, 4]
burst_size_max = [255, 255, 255, 255, 255, 16, 16, 8]
+ # Burst size configuration per priority for each scheduled queue type.
+ # Overrides default values set in 'burst_size_default' and
+ # 'burst_size_max' if != 0.
+ burst_size_parallel = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_parallel = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_atomic = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_atomic = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_ordered = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_ordered = [0, 0, 0, 0, 0, 0, 0, 0]
+
# Automatically updated schedule groups
#
# DEPRECATED: use odp_schedule_config() API instead
diff --git a/config/odp-linux-generic.conf b/config/odp-linux-generic.conf
index 9c5e85242..f8accd07f 100644
--- a/config/odp-linux-generic.conf
+++ b/config/odp-linux-generic.conf
@@ -16,7 +16,7 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.21"
+config_file_version = "0.1.22"
# System options
system: {
@@ -211,6 +211,16 @@ sched_basic: {
burst_size_default = [ 32, 32, 32, 32, 32, 16, 8, 4]
burst_size_max = [255, 255, 255, 255, 255, 16, 16, 8]
+ # Burst size configuration per priority for each scheduled queue type.
+ # Overrides default values set in 'burst_size_default' and
+ # 'burst_size_max' if != 0.
+ burst_size_parallel = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_parallel = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_atomic = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_atomic = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_ordered = [0, 0, 0, 0, 0, 0, 0, 0]
+ burst_size_max_ordered = [0, 0, 0, 0, 0, 0, 0, 0]
+
# Automatically updated schedule groups
#
# DEPRECATED: use odp_schedule_config() API instead
diff --git a/configure.ac b/configure.ac
index c92023866..6bbb0ae03 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,8 +3,8 @@ AC_PREREQ([2.5])
# ODP API version
##########################################################################
m4_define([odp_version_generation], [1])
-m4_define([odp_version_major], [37])
-m4_define([odp_version_minor], [2])
+m4_define([odp_version_major], [38])
+m4_define([odp_version_minor], [0])
m4_define([odp_version_patch], [0])
m4_define([odp_version_api],
@@ -42,12 +42,6 @@ AC_SUBST(ODPH_VERSION_MINOR)
ODPH_VERSION=odph_version
AC_SUBST(ODPH_VERSION)
-##########################################################################
-# Test if user has set CFLAGS. Automake initializes CFLAGS to "-g -O2"
-# by default.
-##########################################################################
-AS_IF([test "$ac_cv_env_CFLAGS_set" = ""], [user_cflags=0], [user_cflags=1])
-
# Initialize automake
AM_INIT_AUTOMAKE([1.9 tar-pax subdir-objects foreign nostdinc -Wall -Werror])
AC_CONFIG_SRCDIR([include/odp/api/spec/init.h])
@@ -255,9 +249,9 @@ AC_ARG_ENABLE([lto],
# binutils), but object files are larger.
ODP_LTO_FLAGS="-flto -ffat-lto-objects"
fi])
-AC_SUBST(ODP_LTO_FLAGS)
ODP_CFLAGS="$ODP_CFLAGS $ODP_LTO_FLAGS"
+ODP_LDFLAGS="$ODP_LDFLAGS $ODP_LTO_FLAGS"
##########################################################################
# Build examples/tests dynamically
@@ -418,10 +412,6 @@ DX_INIT_DOXYGEN($PACKAGE_NAME,
##########################################################################
# Default include setup
##########################################################################
-CFLAGS="$ODP_CFLAGS $CFLAGS"
-CXXFLAGS="$ODP_CXXFLAGS $CXXFLAGS"
-LDFLAGS="$ODP_LTO_FLAGS $LDFLAGS"
-
AC_CONFIG_FILES([Makefile])
AC_CONFIG_FILES([include/Makefile
include/odp/api/spec/version.h
@@ -433,10 +423,10 @@ AC_CONFIG_FILES([helper/Makefile
##########################################################################
# distribute the changed variables among the Makefiles
-AC_SUBST([LIBS])
-AC_SUBST([CPPFLAGS])
-AC_SUBST([CFLAGS])
-AC_SUBST([LDFLAGS])
+AC_SUBST([ODP_CFLAGS])
+AC_SUBST([ODP_CXXFLAGS])
+AC_SUBST([ODP_LDFLAGS])
+
AC_SUBST([EXEEXT])
CC_VERSION=$($CC --version | head -n 1)
@@ -467,10 +457,10 @@ AC_MSG_RESULT([
cc: ${CC}
cc version: ${CC_VERSION}
cppflags: ${CPPFLAGS}
- cflags: ${CFLAGS}
- cxxflags: ${CXXFLAGS}
+ cflags: ${ODP_CFLAGS} ${CFLAGS}
+ cxxflags: ${ODP_CXXFLAGS} ${CXXFLAGS}
ld: ${LD}
- ldflags: ${LDFLAGS}
+ ldflags: ${ODP_LDFLAGS} ${LDFLAGS}
libs: ${LIBS}
dependency libs: ${PLAT_DEP_LIBS}
defs: ${DEFS}
diff --git a/example/Makefile.inc b/example/Makefile.inc
index dba5ff166..c7851bc71 100644
--- a/example/Makefile.inc
+++ b/example/Makefile.inc
@@ -8,16 +8,14 @@ TESTS_ENVIRONMENT = EXEEXT=${EXEEXT}
LDADD = $(LIB)/libodphelper.la $(LIB)/lib$(ODP_LIB_NAME).la
-AM_CFLAGS = \
+AM_CFLAGS += \
-I$(srcdir) \
-I$(top_srcdir)/example \
$(ODP_INCLUDES) \
$(HELPER_INCLUDES)
if STATIC_APPS
-AM_LDFLAGS = -L$(LIB) -static
-else
-AM_LDFLAGS =
+AM_LDFLAGS += -static
endif
AM_LDFLAGS += $(PLAT_DEP_LIBS)
diff --git a/helper/Makefile.am b/helper/Makefile.am
index 8e410703c..edcde4f5b 100644
--- a/helper/Makefile.am
+++ b/helper/Makefile.am
@@ -11,9 +11,9 @@ AM_CPPFLAGS = \
$(ODP_INCLUDES) \
$(HELPER_INCLUDES) \
$(LIBCLI_CPPFLAGS)
-AM_CFLAGS = $(PTHREAD_CFLAGS)
+AM_CFLAGS += $(PTHREAD_CFLAGS)
-AM_LDFLAGS = -version-number '$(ODPHELPER_LIBSO_VERSION)'
+AM_LDFLAGS += -version-number '$(ODPHELPER_LIBSO_VERSION)'
helperincludedir = $(includedir)/odp/helper/
helperinclude_HEADERS = \
diff --git a/helper/cli.c b/helper/cli.c
index 0503da230..fef42ec61 100644
--- a/helper/cli.c
+++ b/helper/cli.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2021, Nokia
+/* Copyright (c) 2021-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,6 +17,7 @@
#include <poll.h>
#include <stdio.h>
#include <strings.h>
+#include <inttypes.h>
/* Socketpair socket roles. */
enum {
@@ -195,9 +196,120 @@ static int check_num_args(struct cli_def *cli, int argc, int req_argc)
return 0;
}
-static int cmd_call_odp_cls_print_all(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+/* Not shared, used only in the server thread. */
+static struct cli_def *cli;
+static char *cli_log_fn_buf;
+
+ODP_PRINTF_FORMAT(2, 0)
+static int cli_log_va(odp_log_level_t level, const char *fmt, va_list in_args)
+{
+ (void)level;
+
+ va_list args;
+ char *str = NULL, *p, *last;
+ int len;
+
+ /*
+ * This function should be just a simple call to cli_vabufprint().
+ * Unfortunately libcli (at least versions 1.9.7 - 1.10.4) has a few
+ * bugs. cli_print() prints a newline at the end even if the string
+ * doesn't end in a newline. cli_*bufprint() on the other hand just
+ * throws away everything after the last newline.
+ *
+ * The following code ensures that each cli_*print() ends in a newline.
+ * If the string does not end in a newline, we keep the part of the
+ * string after the last newline and use it the next time we're called.
+ */
+ va_copy(args, in_args);
+ len = vsnprintf(NULL, 0, fmt, args);
+ va_end(args);
+
+ if (len < 0) {
+ ODPH_ERR("vsnprintf failed\n");
+ goto out;
+ }
+
+ len++;
+ str = malloc(len);
+
+ if (!str) {
+ ODPH_ERR("malloc failed\n");
+ return -1;
+ }
+
+ va_copy(args, in_args);
+ len = vsnprintf(str, len, fmt, args);
+ va_end(args);
+
+ if (len < 0) {
+ ODPH_ERR("vsnprintf failed\n");
+ goto out;
+ }
+
+ p = str;
+ last = strrchr(p, '\n');
+
+ if (last) {
+ *last++ = 0;
+ if (cli_log_fn_buf) {
+ cli_bufprint(cli, "%s%s\n", cli_log_fn_buf, p);
+ free(cli_log_fn_buf);
+ cli_log_fn_buf = NULL;
+ } else {
+ cli_bufprint(cli, "%s\n", p);
+ }
+ p = last;
+ }
+
+ if (*p) {
+ if (cli_log_fn_buf) {
+ char *buffer_new =
+ malloc(strlen(cli_log_fn_buf) + strlen(p) + 1);
+
+ if (!buffer_new) {
+ ODPH_ERR("malloc failed\n");
+ goto out;
+ }
+
+ strcpy(buffer_new, cli_log_fn_buf);
+ strcat(buffer_new, p);
+ free(cli_log_fn_buf);
+ cli_log_fn_buf = buffer_new;
+ } else {
+ cli_log_fn_buf = malloc(strlen(p) + 1);
+
+ if (!cli_log_fn_buf) {
+ ODPH_ERR("malloc failed\n");
+ goto out;
+ }
+
+ strcpy(cli_log_fn_buf, p);
+ }
+ }
+
+out:
+ free(str);
+
+ return len;
+}
+
+ODP_PRINTF_FORMAT(2, 3)
+static int cli_log(odp_log_level_t level, const char *fmt, ...)
+{
+ (void)level;
+
+ int r;
+ va_list args;
+
+ va_start(args, fmt);
+ r = cli_log_va(level, fmt, args);
+ va_end(args);
+
+ return r;
+}
+
+static int cmd_odp_cls_print_all(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -207,9 +319,8 @@ static int cmd_call_odp_cls_print_all(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_ipsec_print(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+static int cmd_odp_ipsec_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -219,9 +330,8 @@ static int cmd_call_odp_ipsec_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_shm_print_all(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+static int cmd_odp_shm_print_all(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -231,9 +341,8 @@ static int cmd_call_odp_shm_print_all(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_sys_config_print(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+static int cmd_odp_sys_config_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -243,9 +352,8 @@ static int cmd_call_odp_sys_config_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_sys_info_print(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+static int cmd_odp_sys_info_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -255,9 +363,8 @@ static int cmd_call_odp_sys_info_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_pktio_print(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[], int argc)
+static int cmd_odp_pktio_print(struct cli_def *cli, const char *command ODP_UNUSED, char *argv[],
+ int argc)
{
if (check_num_args(cli, argc, 1))
return CLI_ERROR;
@@ -274,9 +381,38 @@ static int cmd_call_odp_pktio_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_pool_print(struct cli_def *cli,
- const char *command ODP_UNUSED, char *argv[],
- int argc)
+static int cmd_odp_pktio_extra_stats_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[], int argc)
+{
+ if (check_num_args(cli, argc, 1))
+ return CLI_ERROR;
+
+ odp_pktio_t hdl = odp_pktio_lookup(argv[0]);
+
+ if (hdl == ODP_PKTIO_INVALID) {
+ cli_error(cli, "%% Name not found.");
+ return CLI_ERROR;
+ }
+
+ odp_pktio_extra_stats_print(hdl);
+
+ return CLI_OK;
+}
+
+static int cmd_odp_pool_print_all(struct cli_def *cli,
+ const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
+{
+ if (check_num_args(cli, argc, 0))
+ return CLI_ERROR;
+
+ odp_pool_print_all();
+
+ return CLI_OK;
+}
+
+static int cmd_odp_pool_print(struct cli_def *cli, const char *command ODP_UNUSED, char *argv[],
+ int argc)
{
if (check_num_args(cli, argc, 1))
return CLI_ERROR;
@@ -293,9 +429,8 @@ static int cmd_call_odp_pool_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_queue_print(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[], int argc)
+static int cmd_odp_queue_print(struct cli_def *cli, const char *command ODP_UNUSED, char *argv[],
+ int argc)
{
if (check_num_args(cli, argc, 1))
return CLI_ERROR;
@@ -312,9 +447,8 @@ static int cmd_call_odp_queue_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_queue_print_all(struct cli_def *cli,
- const char *command ODP_UNUSED,
- char *argv[] ODP_UNUSED, int argc)
+static int cmd_odp_queue_print_all(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
{
if (check_num_args(cli, argc, 0))
return CLI_ERROR;
@@ -324,9 +458,19 @@ static int cmd_call_odp_queue_print_all(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_call_odp_shm_print(struct cli_def *cli,
- const char *command ODP_UNUSED, char *argv[],
- int argc)
+static int cmd_odp_schedule_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[] ODP_UNUSED, int argc)
+{
+ if (check_num_args(cli, argc, 0))
+ return CLI_ERROR;
+
+ odp_schedule_print();
+
+ return CLI_OK;
+}
+
+static int cmd_odp_shm_print(struct cli_def *cli, const char *command ODP_UNUSED, char *argv[],
+ int argc)
{
if (check_num_args(cli, argc, 1))
return CLI_ERROR;
@@ -343,175 +487,248 @@ static int cmd_call_odp_shm_print(struct cli_def *cli,
return CLI_OK;
}
-static int cmd_user_cmd(struct cli_def *cli ODP_UNUSED, const char *command,
- char *argv[], int argc)
+static int cmd_odp_pktio_stats_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[], int argc)
{
- cli_shm_t *shm = shm_lookup();
+ if (check_num_args(cli, argc, 1))
+ return CLI_ERROR;
- if (!shm) {
- ODPH_ERR("Error: shm %s not found\n", shm_name);
+ odp_pktio_t hdl = odp_pktio_lookup(argv[0]);
+
+ if (hdl == ODP_PKTIO_INVALID) {
+ cli_error(cli, "%% Name not found.");
return CLI_ERROR;
}
- for (uint32_t i = 0; i < shm->num_user_commands; i++) {
- if (!strcasecmp(command, shm->user_cmd[i].name)) {
- shm->user_cmd[i].fn(argc, argv);
- break;
- }
+ odp_pktio_stats_t stats;
+
+ if (odp_pktio_stats(hdl, &stats) < 0) {
+ cli_error(cli, "%% Unable to query stats.");
+ return CLI_ERROR;
}
+ cli_log(ODP_LOG_PRINT, "Pktio statistics\n----------------\n");
+ cli_log(ODP_LOG_PRINT, " in_octets: %" PRIu64 "\n", stats.in_octets);
+ cli_log(ODP_LOG_PRINT, " in_packets: %" PRIu64 "\n", stats.in_packets);
+ cli_log(ODP_LOG_PRINT, " in_ucast_pkts: %" PRIu64 "\n", stats.in_ucast_pkts);
+ cli_log(ODP_LOG_PRINT, " in_mcast_pkts: %" PRIu64 "\n", stats.in_mcast_pkts);
+ cli_log(ODP_LOG_PRINT, " in_bcast_pkts: %" PRIu64 "\n", stats.in_bcast_pkts);
+ cli_log(ODP_LOG_PRINT, " in_discards: %" PRIu64 "\n", stats.in_discards);
+ cli_log(ODP_LOG_PRINT, " in_errors: %" PRIu64 "\n", stats.in_errors);
+ cli_log(ODP_LOG_PRINT, " out_octets: %" PRIu64 "\n", stats.out_octets);
+ cli_log(ODP_LOG_PRINT, " out_packets: %" PRIu64 "\n", stats.out_packets);
+ cli_log(ODP_LOG_PRINT, " out_ucast_pkts: %" PRIu64 "\n", stats.out_ucast_pkts);
+ cli_log(ODP_LOG_PRINT, " out_mcast_pkts: %" PRIu64 "\n", stats.out_mcast_pkts);
+ cli_log(ODP_LOG_PRINT, " out_bcast_pkts: %" PRIu64 "\n", stats.out_bcast_pkts);
+ cli_log(ODP_LOG_PRINT, " out_discards: %" PRIu64 "\n", stats.out_discards);
+ cli_log(ODP_LOG_PRINT, " out_errors: %" PRIu64 "\n\n", stats.out_errors);
+
return CLI_OK;
}
-static struct cli_def *create_cli(cli_shm_t *shm)
+static void cli_log_pktin_queue_stats(odp_pktin_queue_stats_t *stats)
{
- struct cli_command *c;
- struct cli_def *cli;
+ cli_log(ODP_LOG_PRINT, " octets: %" PRIu64 "\n", stats->octets);
+ cli_log(ODP_LOG_PRINT, " packets: %" PRIu64 "\n", stats->packets);
+ cli_log(ODP_LOG_PRINT, " discards: %" PRIu64 "\n", stats->discards);
+ cli_log(ODP_LOG_PRINT, " errors: %" PRIu64 "\n", stats->errors);
+}
- cli = cli_init();
- cli_set_banner(cli, NULL);
- cli_set_hostname(cli, shm->cli_param.hostname);
+static void cli_log_pktout_queue_stats(odp_pktout_queue_stats_t *stats)
+{
+ cli_log(ODP_LOG_PRINT, " octets: %" PRIu64 "\n", stats->octets);
+ cli_log(ODP_LOG_PRINT, " packets: %" PRIu64 "\n", stats->packets);
+ cli_log(ODP_LOG_PRINT, " discards: %" PRIu64 "\n", stats->discards);
+ cli_log(ODP_LOG_PRINT, " errors: %" PRIu64 "\n", stats->errors);
+}
- c = cli_register_command(cli, NULL, "call", NULL,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC,
- "Call ODP API function.");
- cli_register_command(cli, c, "odp_cls_print_all",
- cmd_call_odp_cls_print_all,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
- cli_register_command(cli, c, "odp_ipsec_print",
- cmd_call_odp_ipsec_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
- cli_register_command(cli, c, "odp_pktio_print",
- cmd_call_odp_pktio_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, "<name>");
- cli_register_command(cli, c, "odp_pool_print",
- cmd_call_odp_pool_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, "<name>");
- cli_register_command(cli, c, "odp_queue_print",
- cmd_call_odp_queue_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, "<name>");
- cli_register_command(cli, c, "odp_queue_print_all",
- cmd_call_odp_queue_print_all,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
- cli_register_command(cli, c, "odp_shm_print_all",
- cmd_call_odp_shm_print_all,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
- cli_register_command(cli, c, "odp_shm_print",
- cmd_call_odp_shm_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, "<name>");
- cli_register_command(cli, c, "odp_sys_config_print",
- cmd_call_odp_sys_config_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
- cli_register_command(cli, c, "odp_sys_info_print",
- cmd_call_odp_sys_info_print,
- PRIVILEGE_UNPRIVILEGED, MODE_EXEC, NULL);
+static int cmd_odp_pktio_queue_stats_print(struct cli_def *cli, const char *command ODP_UNUSED,
+ char *argv[], int argc)
+{
+ if (check_num_args(cli, argc, 1))
+ return CLI_ERROR;
- for (uint32_t i = 0; i < shm->num_user_commands; i++) {
- cli_register_command(cli, NULL, shm->user_cmd[i].name,
- cmd_user_cmd, PRIVILEGE_UNPRIVILEGED,
- MODE_EXEC, shm->user_cmd[i].help);
+ odp_pktio_t hdl = odp_pktio_lookup(argv[0]);
+
+ if (hdl == ODP_PKTIO_INVALID) {
+ cli_error(cli, "%% Name not found.");
+ return CLI_ERROR;
}
- return cli;
-}
+ int in_q_cnt = odp_pktin_queue(hdl, NULL, 0);
-/* Not shared, used only in the server thread. */
-static struct cli_def *cli;
-static char *cli_log_fn_buf;
+ if (in_q_cnt > 0) {
+ odp_pktin_queue_t in_qs[in_q_cnt];
+ odp_pktin_queue_stats_t in_stats;
-ODP_PRINTF_FORMAT(2, 0)
-static int cli_log_va(odp_log_level_t level, const char *fmt, va_list in_args)
-{
- (void)level;
+ in_q_cnt = odp_pktin_queue(hdl, in_qs, in_q_cnt);
- va_list args;
- char *str, *p, *last;
- int len;
+ cli_log(ODP_LOG_PRINT, "Pktin queue statistics\n----------------------\n");
- /*
- * This function should be just a simple call to cli_vabufprint().
- * Unfortunately libcli (at least versions 1.9.7 - 1.10.4) has a few
- * bugs. cli_print() prints a newline at the end even if the string
- * doesn't end in a newline. cli_*bufprint() on the other hand just
- * throws away everything after the last newline.
- *
- * The following code ensures that each cli_*print() ends in a newline.
- * If the string does not end in a newline, we keep the part of the
- * string after the last newline and use it the next time we're called.
- */
- va_copy(args, in_args);
- len = vsnprintf(NULL, 0, fmt, args) + 1;
- va_end(args);
- str = malloc(len);
+ for (int i = 0; i < in_q_cnt; i++) {
+ cli_log(ODP_LOG_PRINT, "Pktin queue: %d:\n", i);
- if (!str) {
- ODPH_ERR("malloc failed\n");
- return -1;
+ if (odp_pktin_queue_stats(in_qs[i], &in_stats) < 0) {
+ cli_log(ODP_LOG_PRINT,
+ " (Unable to read statistics, skipping)\n");
+ continue;
+ }
+
+ cli_log_pktin_queue_stats(&in_stats);
+ }
}
- va_copy(args, in_args);
- vsnprintf(str, len, fmt, args);
- va_end(args);
- p = str;
- last = strrchr(p, '\n');
+ int out_q_cnt = odp_pktout_queue(hdl, NULL, 0);
- if (last) {
- *last++ = 0;
- if (cli_log_fn_buf) {
- cli_bufprint(cli, "%s%s\n", cli_log_fn_buf, p);
- free(cli_log_fn_buf);
- cli_log_fn_buf = NULL;
- } else {
- cli_bufprint(cli, "%s\n", p);
+ if (out_q_cnt > 0) {
+ odp_pktout_queue_t out_qs[out_q_cnt];
+ odp_pktout_queue_stats_t out_stats;
+
+ out_q_cnt = odp_pktout_queue(hdl, out_qs, out_q_cnt);
+
+ cli_log(ODP_LOG_PRINT, "Pktout queue statistics\n-----------------------\n");
+
+ for (int i = 0; i < out_q_cnt; i++) {
+ cli_log(ODP_LOG_PRINT, "Pktout queue: %d:\n", i);
+
+ if (odp_pktout_queue_stats(out_qs[i], &out_stats) < 0) {
+ cli_log(ODP_LOG_PRINT,
+ " (Unable to read statistics, skipping)\n");
+ continue;
+ }
+
+ cli_log_pktout_queue_stats(&out_stats);
}
- p = last;
}
- if (*p) {
- if (cli_log_fn_buf) {
- char *buffer_new =
- malloc(strlen(cli_log_fn_buf) + strlen(p) + 1);
+ cli_log(ODP_LOG_PRINT, "\n");
- if (!buffer_new) {
- ODPH_ERR("malloc failed\n");
- goto out;
+ return CLI_OK;
+}
+
+static int cmd_odp_pktio_event_queue_stats_print(struct cli_def *cli,
+ const char *command ODP_UNUSED, char *argv[],
+ int argc)
+{
+ if (check_num_args(cli, argc, 1))
+ return CLI_ERROR;
+
+ odp_pktio_t hdl = odp_pktio_lookup(argv[0]);
+
+ if (hdl == ODP_PKTIO_INVALID) {
+ cli_error(cli, "%% Name not found.");
+ return CLI_ERROR;
+ }
+
+ int in_q_cnt = odp_pktin_event_queue(hdl, NULL, 0);
+
+ if (in_q_cnt > 0) {
+ odp_queue_t in_qs[in_q_cnt];
+ odp_pktin_queue_stats_t in_stats;
+
+ in_q_cnt = odp_pktin_event_queue(hdl, in_qs, in_q_cnt);
+
+ cli_log(ODP_LOG_PRINT,
+ "Pktin event queue statistics\n----------------------------\n");
+
+ for (int i = 0; i < in_q_cnt; i++) {
+ cli_log(ODP_LOG_PRINT, "Pktin event queue: %d:\n", i);
+
+ if (odp_pktin_event_queue_stats(hdl, in_qs[i], &in_stats) < 0) {
+ cli_log(ODP_LOG_PRINT,
+ " (Unable to read statistics, skipping)\n");
+ continue;
}
- strcpy(buffer_new, cli_log_fn_buf);
- strcat(buffer_new, p);
- free(cli_log_fn_buf);
- cli_log_fn_buf = buffer_new;
- } else {
- cli_log_fn_buf = malloc(strlen(p) + 1);
+ cli_log_pktin_queue_stats(&in_stats);
+ }
+ }
- if (!cli_log_fn_buf) {
- ODPH_ERR("malloc failed\n");
- goto out;
+ int out_q_cnt = odp_pktout_event_queue(hdl, NULL, 0);
+
+ if (out_q_cnt > 0) {
+ odp_queue_t out_qs[out_q_cnt];
+ odp_pktout_queue_stats_t out_stats;
+
+ out_q_cnt = odp_pktout_event_queue(hdl, out_qs, out_q_cnt);
+
+ cli_log(ODP_LOG_PRINT,
+ "Pktout event queue statistics\n-----------------------------\n");
+
+ for (int i = 0; i < out_q_cnt; i++) {
+ cli_log(ODP_LOG_PRINT, "Pktout event queue: %d:\n", i);
+
+ if (odp_pktout_event_queue_stats(hdl, out_qs[i], &out_stats) < 0) {
+ cli_log(ODP_LOG_PRINT,
+ " (Unable to read statistics, skipping)\n");
+ continue;
}
- strcpy(cli_log_fn_buf, p);
+ cli_log_pktout_queue_stats(&out_stats);
}
}
-out:
- free(str);
+ cli_log(ODP_LOG_PRINT, "\n");
- return len;
+ return CLI_OK;
}
-ODP_PRINTF_FORMAT(2, 3)
-static int cli_log(odp_log_level_t level, const char *fmt, ...)
+static int cmd_user_cmd(struct cli_def *cli ODP_UNUSED, const char *command,
+ char *argv[], int argc)
{
- (void)level;
+ cli_shm_t *shm = shm_lookup();
- int r;
- va_list args;
+ if (!shm) {
+ ODPH_ERR("Error: shm %s not found\n", shm_name);
+ return CLI_ERROR;
+ }
- va_start(args, fmt);
- r = cli_log_va(level, fmt, args);
- va_end(args);
+ for (uint32_t i = 0; i < shm->num_user_commands; i++) {
+ if (!strcasecmp(command, shm->user_cmd[i].name)) {
+ shm->user_cmd[i].fn(argc, argv);
+ break;
+ }
+ }
- return r;
+ return CLI_OK;
+}
+
+static struct cli_def *create_cli(cli_shm_t *shm)
+{
+ struct cli_def *cli;
+
+ cli = cli_init();
+ cli_set_banner(cli, NULL);
+ cli_set_hostname(cli, shm->cli_param.hostname);
+
+#define CMD(name, help) \
+ cli_register_command(cli, NULL, #name, cmd_ ## name, \
+ PRIVILEGE_UNPRIVILEGED, MODE_EXEC, help)
+
+ CMD(odp_cls_print_all, NULL);
+ CMD(odp_ipsec_print, NULL);
+ CMD(odp_pktio_event_queue_stats_print, "<name>");
+ CMD(odp_pktio_extra_stats_print, "<name>");
+ CMD(odp_pktio_print, "<name>");
+ CMD(odp_pktio_queue_stats_print, "<name>");
+ CMD(odp_pktio_stats_print, "<name>");
+ CMD(odp_pool_print_all, NULL);
+ CMD(odp_pool_print, "<name>");
+ CMD(odp_queue_print_all, NULL);
+ CMD(odp_queue_print, "<name>");
+ CMD(odp_schedule_print, NULL);
+ CMD(odp_shm_print_all, NULL);
+ CMD(odp_shm_print, "<name>");
+ CMD(odp_sys_config_print, NULL);
+ CMD(odp_sys_info_print, NULL);
+
+ for (uint32_t i = 0; i < shm->num_user_commands; i++) {
+ cli_register_command(cli, NULL, shm->user_cmd[i].name,
+ cmd_user_cmd, PRIVILEGE_UNPRIVILEGED,
+ MODE_EXEC, shm->user_cmd[i].help);
+ }
+
+ return cli;
}
ODP_PRINTF_FORMAT(1, 2)
diff --git a/helper/include/odp/helper/ip.h b/helper/include/odp/helper/ip.h
index 3fac438f1..0faf47f82 100644
--- a/helper/include/odp/helper/ip.h
+++ b/helper/include/odp/helper/ip.h
@@ -111,7 +111,7 @@ static inline int odph_ipv4_csum(odp_packet_t pkt,
odph_ipv4hdr_t *ip,
odp_u16sum_t *chksum)
{
- unsigned nleft = ODPH_IPV4HDR_IHL(ip->ver_ihl) * 4;
+ uint32_t nleft = (uint32_t)(ODPH_IPV4HDR_IHL(ip->ver_ihl) * 4);
uint16_t buf[nleft / 2];
int res;
@@ -119,13 +119,13 @@ static inline int odph_ipv4_csum(odp_packet_t pkt,
return -1;
ip->chksum = 0;
memcpy(buf, ip, sizeof(*ip));
- res = odp_packet_copy_to_mem(pkt, offset + sizeof(*ip),
- nleft - sizeof(*ip),
+ res = odp_packet_copy_to_mem(pkt, offset + (uint32_t)sizeof(*ip),
+ nleft - (uint32_t)sizeof(*ip),
buf + sizeof(*ip) / 2);
if (odp_unlikely(res < 0))
return res;
- *chksum = ~odp_chksum_ones_comp16(buf, nleft);
+ *chksum = (odp_u16sum_t)~odp_chksum_ones_comp16(buf, nleft);
return 0;
}
diff --git a/helper/include/odp/helper/threads.h b/helper/include/odp/helper/threads.h
index 53ce8cd07..00f47fd76 100644
--- a/helper/include/odp/helper/threads.h
+++ b/helper/include/odp/helper/threads.h
@@ -83,8 +83,11 @@ typedef struct {
/** Helper internal thread start arguments. Used both in process and thread
* mode */
typedef struct {
- /** Atomic variable to sync status */
- odp_atomic_u32_t status;
+ /** Thread status */
+ uint32_t status;
+
+ /** Thread initialization status */
+ odp_atomic_u32_t *init_status;
/** Process or thread */
odp_mem_model_t mem_model;
diff --git a/helper/threads.c b/helper/threads.c
index 90dd45f50..1b5df8965 100644
--- a/helper/threads.c
+++ b/helper/threads.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,6 +17,7 @@
#include <sys/syscall.h>
#include <errno.h>
#include <limits.h>
+#include <sys/mman.h>
#include <sys/time.h>
#include <sys/resource.h>
@@ -28,9 +29,8 @@
/* Thread status codes */
#define NOT_STARTED 0
-#define SYNC_INIT 1
-#define INIT_DONE 2
-#define STARTED 3
+#define INIT_DONE 1
+#define STARTED 2
static odph_helper_options_t helper_options;
@@ -65,8 +65,8 @@ static void *run_thread(void *arg)
"pthread" : "process",
(int)getpid());
- if (odp_atomic_load_u32(&start_args->status) == SYNC_INIT)
- odp_atomic_store_rel_u32(&start_args->status, INIT_DONE);
+ if (start_args->init_status)
+ odp_atomic_store_rel_u32(start_args->init_status, INIT_DONE);
status = thr_params->start(thr_params->arg);
ret = odp_term_local();
@@ -281,6 +281,7 @@ int odph_thread_create(odph_thread_t thread[],
int i, num_cpu, cpu;
const odp_cpumask_t *cpumask = param->cpumask;
int use_pthread = 1;
+ odp_atomic_u32_t *init_status = NULL;
if (param->thread_model == 1)
use_pthread = 0;
@@ -303,6 +304,16 @@ int odph_thread_create(odph_thread_t thread[],
memset(thread, 0, num * sizeof(odph_thread_t));
+ if (param->sync) {
+ init_status = mmap(NULL, sizeof(odp_atomic_u32_t), PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+
+ if (init_status == MAP_FAILED) {
+ ODPH_ERR("mmap() failed: %s\n", strerror(errno));
+ return -1;
+ }
+ }
+
cpu = odp_cpumask_first(cpumask);
for (i = 0; i < num; i++) {
odph_thread_start_args_t *start_args = &thread[i].start_args;
@@ -314,11 +325,10 @@ int odph_thread_create(odph_thread_t thread[],
start_args->thr_params = thr_param[i];
start_args->instance = param->instance;
-
- if (param->sync)
- odp_atomic_init_u32(&start_args->status, SYNC_INIT);
- else
- odp_atomic_init_u32(&start_args->status, NOT_STARTED);
+ start_args->status = NOT_STARTED;
+ start_args->init_status = init_status;
+ if (init_status)
+ odp_atomic_init_u32(init_status, NOT_STARTED);
if (use_pthread) {
if (create_pthread(&thread[i], cpu, start_args->thr_params.stack_size))
@@ -329,12 +339,11 @@ int odph_thread_create(odph_thread_t thread[],
}
/* Wait newly created thread to update status */
- if (param->sync) {
+ if (init_status) {
odp_time_t t1, t2;
uint64_t diff_ns;
uint32_t status;
int timeout = 0;
- odp_atomic_u32_t *atomic = &start_args->status;
uint64_t timeout_ns = param->sync_timeout;
if (!timeout_ns)
@@ -347,7 +356,7 @@ int odph_thread_create(odph_thread_t thread[],
t2 = odp_time_local();
diff_ns = odp_time_diff_ns(t2, t1);
timeout = diff_ns > timeout_ns;
- status = odp_atomic_load_acq_u32(atomic);
+ status = odp_atomic_load_acq_u32(init_status);
} while (status != INIT_DONE && timeout == 0);
@@ -357,11 +366,16 @@ int odph_thread_create(odph_thread_t thread[],
}
}
- odp_atomic_store_u32(&start_args->status, STARTED);
+ start_args->status = STARTED;
cpu = odp_cpumask_next(cpumask, cpu);
}
+ if (init_status) {
+ if (munmap(init_status, sizeof(odp_atomic_u32_t)))
+ ODPH_ERR("munmap() failed: %s\n", strerror(errno));
+ }
+
return i;
}
@@ -373,7 +387,7 @@ int odph_thread_join(odph_thread_t thread[], int num)
for (i = 0; i < num; i++) {
start_args = &thread[i].start_args;
- if (odp_atomic_load_u32(&start_args->status) != STARTED) {
+ if (start_args->status != STARTED) {
ODPH_DBG("Thread (i:%i) not started.\n", i);
break;
}
@@ -386,7 +400,7 @@ int odph_thread_join(odph_thread_t thread[], int num)
break;
}
- odp_atomic_store_u32(&start_args->status, NOT_STARTED);
+ start_args->status = NOT_STARTED;
}
return i;
diff --git a/include/odp/api/spec/buffer.h b/include/odp/api/spec/buffer.h
index c11a3c7b4..b739e549f 100644
--- a/include/odp/api/spec/buffer.h
+++ b/include/odp/api/spec/buffer.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -78,6 +79,19 @@ void *odp_buffer_addr(odp_buffer_t buf);
uint32_t odp_buffer_size(odp_buffer_t buf);
/**
+ * Buffer user area
+ *
+ * Returns pointer to the user area associated with the buffer. Size of the area is fixed
+ * and defined in buffer pool parameters.
+ *
+ * @param buf Buffer handle
+ *
+ * @return Pointer to the user area of the buffer
+ * @retval NULL The buffer does not have user area
+ */
+void *odp_buffer_user_area(odp_buffer_t buf);
+
+/**
* Check that buffer is valid
*
* This function can be used for debugging purposes to check if a buffer handle represents
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index b45731e1b..4f2961f3c 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -154,14 +154,23 @@ typedef enum {
*/
ODP_CIPHER_ALG_AES_EEA2,
- /** Confidentiality 128-EEA3 algorithm
+ /** ZUC based confidentiality algorithm
*
- * ZUC-based 128-EEA3 algorithm.
+ * 128-EEA3/128-NEA3 algorithm when key length is 128 bits.
*
* IV (128 bit) should be formatted according to the ETSI/SAGE
* 128-EEA3 & 128-EIA3 specification:
* COUNT || BEARER || DIRECTION || 0...0 ||
* COUNT || BEARER || DIRECTION || 0...0 ||
+ *
+ * 256-bit key length support is experimental and subject to
+ * change. The following variants may be supported:
+ *
+ * - ZUC-256 with 25 byte IV (of which 184 bits are variable)
+ * as specified in "The ZUC-256 Stream Cipher".
+ * - ZUC-256 with 16 byte IV as specified in
+ * "An Addendum to the ZUC-256 Stream Cipher",
+ * https://eprint.iacr.org/2021/1439
*/
ODP_CIPHER_ALG_ZUC_EEA3,
@@ -304,15 +313,24 @@ typedef enum {
*/
ODP_AUTH_ALG_AES_EIA2,
- /** Integrity 128-EIA3 algorithm
+ /** ZUC-based integrity algorithm.
*
- * ZUC-based 128-EIA3 algorithm.
+ * 128-EIA3/128-NIA3 algorithm when key length is 128 bits.
*
* IV (128 bit) should be formatted according to the ETSI/SAGE
* 128-EA3 & 128-EIA2 specification:
* COUNT || BEARER ||
* DIRECTION XOR COUNT0 || COUNT1 .. COUNT31 ||
* BEARER || 0...0 || DIRECTION || 0...0
+ *
+ * 256-bit key length support is experimental and subject to
+ * change. The following variants may be supported:
+ *
+ * - ZUC-256 with 25 byte IV (of which 184 bits are variable) and
+ * 32/64/128 bit MAC as specified in "The ZUC-256 Stream Cipher".
+ * - ZUC-256 with 16 byte IV and 32/64/128 bit MAC as specified in
+ * "An Addendum to the ZUC-256 Stream Cipher",
+ * https://eprint.iacr.org/2021/1439
*/
ODP_AUTH_ALG_ZUC_EIA3,
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h
index c745d29ad..a8d4caa8c 100644
--- a/include/odp/api/spec/packet.h
+++ b/include/odp/api/spec/packet.h
@@ -1492,6 +1492,31 @@ void *odp_packet_user_area(odp_packet_t pkt);
uint32_t odp_packet_user_area_size(odp_packet_t pkt);
/**
+ * Check user flag
+ *
+ * Implementation clears user flag during new packet creation (e.g. alloc and packet input)
+ * and reset. User may set the flag with odp_packet_user_flag_set(). Implementation never
+ * sets the flag, only clears it. The flag may be useful e.g. to mark when the user area
+ * content is valid.
+ *
+ * @param pkt Packet handle
+ *
+ * @retval 0 User flag is clear
+ * @retval !0 User flag is set
+ */
+int odp_packet_user_flag(odp_packet_t pkt);
+
+/**
+ * Set user flag
+ *
+ * Set (or clear) the user flag.
+ *
+ * @param pkt Packet handle
+ * @param val New value for the flag. Zero clears the flag, other values set the flag.
+ */
+void odp_packet_user_flag_set(odp_packet_t pkt, int val);
+
+/**
* Layer 2 start pointer
*
* Returns pointer to the start of layer 2. Optionally, outputs number of data
@@ -2213,6 +2238,44 @@ uint32_t odp_packet_vector_size(odp_packet_vector_t pktv);
void odp_packet_vector_size_set(odp_packet_vector_t pktv, uint32_t size);
/**
+ * Packet vector user area
+ *
+ * Returns pointer to the user area associated with the packet vector. Size of the area is fixed
+ * and defined in vector pool parameters.
+ *
+ * @param pktv Packet vector handle
+ *
+ * @return Pointer to the user area of the packet vector
+ * @retval NULL The packet vector does not have user area
+ */
+void *odp_packet_vector_user_area(odp_packet_vector_t pktv);
+
+/**
+ * Check user flag
+ *
+ * Implementation clears user flag during new packet vector creation (e.g. alloc and packet input)
+ * and reset. User may set the flag with odp_packet_vector_user_flag_set(). Implementation never
+ * sets the flag, only clears it. The flag may be useful e.g. to mark when the user area
+ * content is valid.
+ *
+ * @param pktv Packet vector handle
+ *
+ * @retval 0 User flag is clear
+ * @retval !0 User flag is set
+ */
+int odp_packet_vector_user_flag(odp_packet_vector_t pktv);
+
+/**
+ * Set user flag
+ *
+ * Set (or clear) the user flag.
+ *
+ * @param pktv Packet vector handle
+ * @param val New value for the flag. Zero clears the flag, other values set the flag.
+ */
+void odp_packet_vector_user_flag_set(odp_packet_vector_t pktv, int val);
+
+/**
* Check that packet vector is valid
*
* This function can be used for debugging purposes to check if a packet vector handle represents
diff --git a/include/odp/api/spec/pool_types.h b/include/odp/api/spec/pool_types.h
index e8026c642..9f433fba5 100644
--- a/include/odp/api/spec/pool_types.h
+++ b/include/odp/api/spec/pool_types.h
@@ -174,6 +174,9 @@ typedef struct odp_pool_capability_t {
* memory size for the pool. */
uint32_t max_num;
+ /** Maximum user area size in bytes */
+ uint32_t max_uarea_size;
+
/** Minimum size of thread local cache */
uint32_t min_cache_size;
@@ -250,10 +253,7 @@ typedef struct odp_pool_capability_t {
* memory size for the pool. */
uint32_t max_seg_len;
- /** Maximum user area size in bytes
- *
- * The value of zero means that limited only by the available
- * memory size for the pool. */
+ /** Maximum user area size in bytes */
uint32_t max_uarea_size;
/** Maximum number of subparameters
@@ -283,6 +283,9 @@ typedef struct odp_pool_capability_t {
* memory size for the pool. */
uint32_t max_num;
+ /** Maximum user area size in bytes */
+ uint32_t max_uarea_size;
+
/** Minimum size of thread local cache */
uint32_t min_cache_size;
@@ -304,9 +307,12 @@ typedef struct odp_pool_capability_t {
* memory size for the pool. */
uint32_t max_num;
- /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ /** Maximum number of handles (such as odp_packet_t) in a vector. */
uint32_t max_size;
+ /** Maximum user area size in bytes */
+ uint32_t max_uarea_size;
+
/** Minimum size of thread local cache */
uint32_t min_cache_size;
@@ -344,10 +350,10 @@ typedef enum odp_pool_type_t {
/** Timeout pool */
ODP_POOL_TIMEOUT = ODP_EVENT_TIMEOUT,
- /** Vector pool
+ /** Vector event pool
*
- * The pool to hold a vector of general type such as odp_packet_t.
- * Each vector holds an array of generic types of the same type.
+ * Each vector event holds an array of handles. All handles of a vector
+ * are the same type (such as odp_packet_t).
* @see ODP_EVENT_PACKET_VECTOR
*/
ODP_POOL_VECTOR,
@@ -380,6 +386,12 @@ typedef struct odp_pool_param_t {
*/
uint32_t align;
+ /** Minimum user area size in bytes. The maximum value is defined by
+ * pool capability buf.max_uarea_size. Specify as 0 if no user
+ * area is needed. The default value is 0.
+ */
+ uint32_t uarea_size;
+
/** Maximum number of buffers cached locally per thread
*
* A non-zero value allows implementation to cache buffers
@@ -454,9 +466,9 @@ typedef struct odp_pool_param_t {
*/
uint32_t seg_len;
- /** User area size in bytes. The maximum value is defined by
+ /** Minimum user area size in bytes. The maximum value is defined by
* pool capability pkt.max_uarea_size. Specify as 0 if no user
- * area is needed.
+ * area is needed. The default value is 0.
*/
uint32_t uarea_size;
@@ -503,6 +515,12 @@ typedef struct odp_pool_param_t {
/** Number of timeouts in the pool */
uint32_t num;
+ /** Minimum user area size in bytes. The maximum value is defined by
+ * pool capability tmo.max_uarea_size. Specify as 0 if no user
+ * area is needed. The default value is 0.
+ */
+ uint32_t uarea_size;
+
/** Maximum number of timeouts cached locally per thread
*
* See buf.cache_size documentation for details.
@@ -515,9 +533,15 @@ typedef struct odp_pool_param_t {
/** Number of vectors in the pool */
uint32_t num;
- /** Maximum number of general types, such as odp_packet_t, in a vector. */
+ /** Maximum number of handles (such as odp_packet_t) in a vector. */
uint32_t max_size;
+ /** Minimum user area size in bytes. The maximum value is defined by
+ * pool capability vector.max_uarea_size. Specify as 0 if no user
+ * area is needed. The default value is 0.
+ */
+ uint32_t uarea_size;
+
/** Maximum number of vectors cached locally per thread
*
* See buf.cache_size documentation for details.
diff --git a/include/odp/api/spec/stash.h b/include/odp/api/spec/stash.h
index 38e388dd7..f5929c45a 100644
--- a/include/odp/api/spec/stash.h
+++ b/include/odp/api/spec/stash.h
@@ -132,6 +132,25 @@ uint64_t odp_stash_to_u64(odp_stash_t stash);
int32_t odp_stash_put(odp_stash_t stash, const void *obj, int32_t num);
/**
+ * Put batch of object handles into a stash
+ *
+ * Otherwise like odp_stash_put(), except that this function stores either all
+ * 'num' object handles or none. odp_stash_capability_t.max_put_batch defines
+ * the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param obj Points to an array of object handles to be stored.
+ * Object handle size is specified by 'obj_size' in stash
+ * creation parameters. The array must be 'obj_size' aligned
+ * in memory.
+ * @param num Number of object handles to store
+ *
+ * @return Number of object handles actually stored (0 or num)
+ * @retval <0 on failure
+ */
+int32_t odp_stash_put_batch(odp_stash_t stash, const void *obj, int32_t num);
+
+/**
* Put 32-bit integers into a stash
*
* Otherwise like odp_stash_put(), except that this function operates on 32-bit
@@ -148,6 +167,23 @@ int32_t odp_stash_put(odp_stash_t stash, const void *obj, int32_t num);
int32_t odp_stash_put_u32(odp_stash_t stash, const uint32_t val[], int32_t num);
/**
+ * Put batch of 32-bit integers into a stash
+ *
+ * Otherwise like odp_stash_put_u32(), except that this function stores either
+ * all 'num' object handles or none. odp_stash_capability_t.max_put_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param val Points to an array of 32-bit integers to be stored. The array
+ * must be 32-bit aligned in memory.
+ * @param num Number of integers to store
+ *
+ * @return Number of integers actually stored (0 or num)
+ * @retval <0 on failure
+ */
+int32_t odp_stash_put_u32_batch(odp_stash_t stash, const uint32_t val[], int32_t num);
+
+/**
* Put 64-bit integers into a stash
*
* Otherwise like odp_stash_put(), except that this function operates on 64-bit
@@ -164,6 +200,23 @@ int32_t odp_stash_put_u32(odp_stash_t stash, const uint32_t val[], int32_t num);
int32_t odp_stash_put_u64(odp_stash_t stash, const uint64_t val[], int32_t num);
/**
+ * Put batch of 64-bit integers into a stash
+ *
+ * Otherwise like odp_stash_put_u64(), except that this function stores either
+ * all 'num' object handles or none. odp_stash_capability_t.max_put_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param val Points to an array of 64-bit integers to be stored. The array
+ * must be 64-bit aligned in memory.
+ * @param num Number of integers to store
+ *
+ * @return Number of integers actually stored (0 or num)
+ * @retval <0 on failure
+ */
+int32_t odp_stash_put_u64_batch(odp_stash_t stash, const uint64_t val[], int32_t num);
+
+/**
* Put pointers into a stash
*
* Otherwise like odp_stash_put(), except that this function operates on
@@ -181,6 +234,23 @@ int32_t odp_stash_put_u64(odp_stash_t stash, const uint64_t val[], int32_t num);
int32_t odp_stash_put_ptr(odp_stash_t stash, const uintptr_t ptr[], int32_t num);
/**
+ * Put batch of pointers into a stash
+ *
+ * Otherwise like odp_stash_put_ptr(), except that this function stores either
+ * all 'num' object handles or none. odp_stash_capability_t.max_put_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param ptr Points to an array of pointers to be stored. The array must be
+ * pointer size aligned in memory.
+ * @param num Number of pointers to store
+ *
+ * @return Number of pointers actually stored (0 or num)
+ * @retval <0 on failure
+ */
+int32_t odp_stash_put_ptr_batch(odp_stash_t stash, const uintptr_t ptr[], int32_t num);
+
+/**
* Get object handles from a stash
*
* Get previously stored object handles from the stash. Application specifies
@@ -199,6 +269,25 @@ int32_t odp_stash_put_ptr(odp_stash_t stash, const uintptr_t ptr[], int32_t num)
int32_t odp_stash_get(odp_stash_t stash, void *obj, int32_t num);
/**
+ * Get batch of object handles from a stash
+ *
+ * Otherwise like odp_stash_get(), except that this function outputs either
+ * all 'num' object handles or none. odp_stash_capability_t.max_get_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param[out] obj Points to an array of object handles for output.
+ * Object handle size is specified by 'obj_size' in stash
+ * creation parameters. The array must be 'obj_size' aligned
+ * in memory.
+ * @param num Number of object handles to get from the stash
+ *
+ * @return Number of object handles actually output (0 or num) to 'obj' array
+ * @retval <0 on failure
+ */
+int32_t odp_stash_get_batch(odp_stash_t stash, void *obj, int32_t num);
+
+/**
* Get 32-bit integers from a stash
*
* Otherwise like odp_stash_get(), except that this function operates on 32-bit
@@ -215,6 +304,23 @@ int32_t odp_stash_get(odp_stash_t stash, void *obj, int32_t num);
int32_t odp_stash_get_u32(odp_stash_t stash, uint32_t val[], int32_t num);
/**
+ * Get batch of 32-bit integers from a stash
+ *
+ * Otherwise like odp_stash_get_u32(), except that this function outputs either
+ * all 'num' object handles or none. odp_stash_capability_t.max_get_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param[out] val Points to an array of 32-bit integers for output. The
+ * array must be 32-bit aligned in memory.
+ * @param num Number of integers to get from the stash
+ *
+ * @return Number of integers actually output (0 or num) to 'val' array
+ * @retval <0 on failure
+ */
+int32_t odp_stash_get_u32_batch(odp_stash_t stash, uint32_t val[], int32_t num);
+
+/**
* Get 64-bit integers from a stash
*
* Otherwise like odp_stash_get(), except that this function operates on 64-bit
@@ -231,6 +337,23 @@ int32_t odp_stash_get_u32(odp_stash_t stash, uint32_t val[], int32_t num);
int32_t odp_stash_get_u64(odp_stash_t stash, uint64_t val[], int32_t num);
/**
+ * Get batch of 64-bit integers from a stash
+ *
+ * Otherwise like odp_stash_get_u64(), except that this function outputs either
+ * all 'num' object handles or none. odp_stash_capability_t.max_get_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param[out] val Points to an array of 64-bit integers for output. The
+ * array must be 64-bit aligned in memory.
+ * @param num Number of integers to get from the stash
+ *
+ * @return Number of integers actually output (0 or num) to 'val' array
+ * @retval <0 on failure
+ */
+int32_t odp_stash_get_u64_batch(odp_stash_t stash, uint64_t val[], int32_t num);
+
+/**
* Get pointers from a stash
*
* Otherwise like odp_stash_get(), except that this function operates on
@@ -248,6 +371,23 @@ int32_t odp_stash_get_u64(odp_stash_t stash, uint64_t val[], int32_t num);
int32_t odp_stash_get_ptr(odp_stash_t stash, uintptr_t ptr[], int32_t num);
/**
+ * Get batch of pointers from a stash
+ *
+ * Otherwise like odp_stash_get_ptr(), except that this function outputs either
+ * all 'num' object handles or none. odp_stash_capability_t.max_get_batch
+ * defines the maximum supported batch size.
+ *
+ * @param stash Stash handle
+ * @param[out] ptr Points to an array of pointers for output. The array must
+ * be pointer size aligned in memory.
+ * @param num Number of pointers to get from the stash
+ *
+ * @return Number of pointers actually output (0 or num) to 'ptr' array
+ * @retval <0 on failure
+ */
+int32_t odp_stash_get_ptr_batch(odp_stash_t stash, uintptr_t ptr[], int32_t num);
+
+/**
* Flush object handles from the thread local cache
*
* Flushes all object handles from the thread local cache into the stash, so
diff --git a/include/odp/api/spec/stash_types.h b/include/odp/api/spec/stash_types.h
index dc966cb71..5f3e608bb 100644
--- a/include/odp/api/spec/stash_types.h
+++ b/include/odp/api/spec/stash_types.h
@@ -172,6 +172,18 @@ typedef struct odp_stash_capability_t {
/** Maximum size of thread local cache */
uint32_t max_cache_size;
+ /** Maximum number of object handles in batch get operations
+ *
+ * At least 1 object batch size is always supported.
+ */
+ uint32_t max_get_batch;
+
+ /** Maximum number of object handles in batch put operations
+ *
+ * At least 1 object batch size is always supported.
+ */
+ uint32_t max_put_batch;
+
/** Supported statistics counters */
odp_stash_stats_opt_t stats;
diff --git a/include/odp/api/spec/thread.h b/include/odp/api/spec/thread.h
index 52bd82a09..0d85b3432 100644
--- a/include/odp/api/spec/thread.h
+++ b/include/odp/api/spec/thread.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -33,10 +34,13 @@ extern "C" {
/**
* Get thread identifier
*
- * Returns the thread identifier of the current thread. Thread ids range from 0
- * to odp_thread_count_max() - 1. The ODP thread id is assigned by
- * odp_init_local() and freed by odp_term_local(). Thread id is unique within
- * the ODP instance.
+ * Returns the ODP thread identifier of current thread. Thread IDs range from 0
+ * to odp_thread_count_max() - 1 and are unique within an ODP instance.
+ *
+ * Thread IDs are assigned by odp_init_local() and freed by odp_term_local().
+ * IDs are assigned sequentially starting from 0 in the same order threads call
+ * odp_init_local(). Thread IDs freed by odp_term_local() may be reused by
+ * following odp_init_local() calls.
*
* @return Thread identifier of the current thread
*/
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index b51db7dbc..edfa423e7 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -505,6 +505,19 @@ uint64_t odp_timeout_tick(odp_timeout_t tmo);
void *odp_timeout_user_ptr(odp_timeout_t tmo);
/**
+ * Timeout user area
+ *
+ * Returns pointer to the user area associated with the timeout. Size of the area is fixed
+ * and defined in timeout pool parameters.
+ *
+ * @param tmo Timeout handle
+ *
+ * @return Pointer to the user area of the timeout
+ * @retval NULL The timeout does not have user area
+ */
+void *odp_timeout_user_area(odp_timeout_t tmo);
+
+/**
* Timeout alloc
*
* Allocates timeout from pool. Pool must be created with ODP_POOL_TIMEOUT type.
diff --git a/m4/odp_dpdk.m4 b/m4/odp_dpdk.m4
index 1d3f33d54..0f9f1e873 100644
--- a/m4/odp_dpdk.m4
+++ b/m4/odp_dpdk.m4
@@ -143,9 +143,9 @@ CPPFLAGS=$OLD_CPPFLAGS
AC_DEFUN([_ODP_DPDK_LEGACY_SYSTEM], [dnl
DPDK_CFLAGS="-isystem /usr/include/dpdk"
DPDK_LDFLAGS=""
- DPDK_LIB_PATH="`$CC $CFLAGS $LDFLAGS --print-file-name=libdpdk.so`"
+ DPDK_LIB_PATH="`$CC $AM_CFLAGS $CFLAGS $AM_LDFLAGS $LDFLAGS --print-file-name=libdpdk.so`"
if test "$DPDK_LIB_PATH" = "libdpdk.so" ; then
- DPDK_LIB_PATH="`$CC $CFLAGS $LDFLAGS --print-file-name=libdpdk.a`"
+ DPDK_LIB_PATH="`$CC $AM_CFLAGS $CFLAGS $AM_LDFLAGS $LDFLAGS --print-file-name=libdpdk.a`"
AS_IF([test "$DPDK_LIB_PATH" = "libdpdk.a"],
[AC_MSG_FAILURE([Could not locate system DPDK library directory])])
else
diff --git a/m4/odp_pthread.m4 b/m4/odp_pthread.m4
index ad65f4d1a..ad1ecdff4 100644
--- a/m4/odp_pthread.m4
+++ b/m4/odp_pthread.m4
@@ -9,11 +9,15 @@
AC_DEFUN([ODP_PTHREAD], [
AC_MSG_CHECKING([for pthread support in -pthread])
AC_LANG_PUSH([C])
- PTHEAD_CFLAGS="-pthread"
- CFLAGS="$CFLAGS $PTHEAD_CFLAGS"
+ saved_cflags="$CFLAGS"
+ saved_ldflags="$LDFLAGS"
+ PTHREAD_CFLAGS="-pthread"
+ CFLAGS="$AM_CFLAGS $CFLAGS $PTHREAD_CFLAGS"
PTHREAD_LIBS="-pthread"
- LDFLAGS="$LDFLAGS $PTHREAD_LIBS"
+ LDFLAGS="$AM_LDFLAGS $LDFLAGS $PTHREAD_LIBS"
AC_TRY_LINK_FUNC([pthread_create], [pthread=yes])
+ CFLAGS="$saved_cflags"
+ LDFLAGS="$saved_ldflags"
if test x"$pthread" != "xyes"; then
AC_MSG_FAILURE([pthread is not supported])
fi
diff --git a/platform/Makefile.inc b/platform/Makefile.inc
index c820727c2..ed161d83d 100644
--- a/platform/Makefile.inc
+++ b/platform/Makefile.inc
@@ -14,7 +14,7 @@ endif
VPATH = $(srcdir) $(builddir)
lib_LTLIBRARIES =
-AM_LDFLAGS = -version-number '$(ODP_LIBSO_VERSION)'
+AM_LDFLAGS += -version-number '$(ODP_LIBSO_VERSION)'
if ODP_ABI_COMPAT
AM_LDFLAGS += -export-symbols-regex '^(odp_|_deprecated_odp_)'
@@ -22,7 +22,7 @@ else
AM_LDFLAGS += -export-symbols-regex '^(odp_|_odp_|_deprecated_odp_)'
endif
-AM_CFLAGS = "-DODP_VERSION_BUILD=$(VERSION)"
+AM_CFLAGS += "-DODP_VERSION_BUILD=$(VERSION)"
AM_CFLAGS += $(VISIBILITY_CFLAGS)
-AM_CFLAGS += @PTHREAD_CFLAGS@
+AM_CFLAGS += $(PTHREAD_CFLAGS)
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 4fa7b7255..a676380dd 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -129,6 +129,7 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_name_table_internal.h \
include/odp_packet_io_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_parse_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_print_internal.h \
include/odp_errno_define.h \
include/odp_event_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_dpdk.h \
@@ -204,6 +205,7 @@ __LIB__libodp_dpdk_la_SOURCES = \
../linux-generic/pktio/loop.c \
../linux-generic/pktio/null.c \
../linux-generic/odp_pkt_queue.c \
+ ../linux-generic/odp_print.c \
odp_pool.c \
odp_queue_basic.c \
odp_queue_eventdev.c \
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
index e0169579a..26ada3655 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
@@ -26,6 +26,9 @@ extern "C" {
#define _odp_pkt_get(pkt, cast, field) \
(*(cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+#define _odp_pkt_get_ptr(pkt, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
/* Packet header field offsets for inline functions */
typedef struct _odp_packet_inline_offset_t {
uint16_t mb;
@@ -113,12 +116,13 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 7;
+ uint32_t reserved1: 6;
/*
* Init flags
*/
uint32_t user_ptr_set: 1; /* User has set a non-NULL value */
+ uint32_t user_flag: 1;
/*
* Packet output flags
@@ -148,8 +152,8 @@ typedef union {
/* Flag groups */
struct {
- uint32_t reserved2: 7;
- uint32_t other: 18; /* All other flags */
+ uint32_t reserved2: 6;
+ uint32_t other: 19; /* All other flags */
uint32_t error: 7; /* All error flags */
} all;
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
index 5b853abb2..9856d1dd7 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inlines.h
@@ -66,11 +66,14 @@ extern "C" {
#define odp_packet_tailroom __odp_packet_tailroom
#define odp_packet_pool __odp_packet_pool
#define odp_packet_input __odp_packet_input
+ #define odp_packet_input_set __odp_packet_input_set
#define odp_packet_input_index __odp_packet_input_index
#define odp_packet_num_segs __odp_packet_num_segs
#define odp_packet_user_ptr __odp_packet_user_ptr
#define odp_packet_user_area __odp_packet_user_area
#define odp_packet_user_area_size __odp_packet_user_area_size
+ #define odp_packet_user_flag __odp_packet_user_flag
+ #define odp_packet_user_flag_set __odp_packet_user_flag_set
#define odp_packet_l2_offset __odp_packet_l2_offset
#define odp_packet_l3_offset __odp_packet_l3_offset
#define odp_packet_l4_offset __odp_packet_l4_offset
@@ -105,6 +108,12 @@ extern "C" {
#define odp_packet_color __odp_packet_color
#define odp_packet_drop_eligible __odp_packet_drop_eligible
#define odp_packet_shaper_len_adjust __odp_packet_shaper_len_adjust
+ #define odp_packet_buf_data_len __odp_packet_buf_data_len
+ #define odp_packet_buf_size __odp_packet_buf_size
+ #define odp_packet_buf_head __odp_packet_buf_head
+ #define odp_packet_buf_data_offset __odp_packet_buf_data_offset
+ #define odp_packet_buf_data_set __odp_packet_buf_data_set
+ #define odp_packet_buf_from_head __odp_packet_buf_from_head
#else
#undef _ODP_INLINE
#define _ODP_INLINE
@@ -198,6 +207,13 @@ _ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
return _odp_pkt_get(pkt, odp_pktio_t, input);
}
+_ODP_INLINE void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_pktio_t *pktio_ptr = _odp_pkt_get_ptr(pkt, odp_pktio_t, input);
+
+ *pktio_ptr = pktio;
+}
+
_ODP_INLINE int odp_packet_input_index(odp_packet_t pkt)
{
odp_pktio_t pktio = odp_packet_input(pkt);
@@ -234,6 +250,22 @@ _ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
return _odp_pool_get(pool, uint32_t, uarea_size);
}
+_ODP_INLINE int odp_packet_user_flag(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.user_flag;
+}
+
+_ODP_INLINE void odp_packet_user_flag_set(odp_packet_t pkt, int val)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->user_flag = !!val;
+}
+
_ODP_INLINE uint32_t odp_packet_l2_offset(odp_packet_t pkt)
{
return _odp_pkt_get(pkt, uint16_t, l2_offset);
@@ -516,6 +548,58 @@ _ODP_INLINE int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
return (int8_t)flags.shaper_len_adj;
}
+_ODP_INLINE uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ return odp_packet_seg_data_len(ODP_PACKET_INVALID, (odp_packet_seg_t)pkt_buf);
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get((odp_packet_buf_t)pkt_buf, odp_pool_t, pool);
+
+ return _odp_pool_get(pool, uint32_t, seg_len);
+}
+
+_ODP_INLINE void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return NULL;
+
+ return (uint8_t *)(uintptr_t)pkt_buf + head_offset;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ void *data = odp_packet_seg_data(ODP_PACKET_INVALID, (odp_packet_seg_t)pkt_buf);
+ void *head = odp_packet_buf_head(pkt_buf);
+
+ return (uint32_t)((uintptr_t)data - (uintptr_t)head);
+}
+
+_ODP_INLINE void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
+ uint32_t data_len)
+{
+ struct rte_mbuf *mb = (struct rte_mbuf *)pkt_buf;
+
+ mb->data_off = (uint16_t)data_offset;
+ mb->data_len = (uint16_t)data_len;
+}
+
+_ODP_INLINE odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool, void *head)
+{
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return ODP_PACKET_BUF_INVALID;
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
index e3397c4df..ec6804c72 100644
--- a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
@@ -25,6 +25,7 @@ typedef struct _odp_timeout_inline_offset_t {
uint16_t expiration;
uint16_t timer;
uint16_t user_ptr;
+ uint16_t uarea_addr;
} _odp_timeout_inline_offset_t;
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
index 270a6769b..48154a26f 100644
--- a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
@@ -24,6 +24,7 @@ extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset;
#define odp_timeout_timer __odp_timeout_timer
#define odp_timeout_tick __odp_timeout_tick
#define odp_timeout_user_ptr __odp_timeout_user_ptr
+ #define odp_timeout_user_area __odp_timeout_user_area
#define odp_timeout_from_event __odp_timeout_from_event
#define odp_timeout_to_event __odp_timeout_to_event
#else
@@ -45,6 +46,11 @@ _ODP_INLINE void *odp_timeout_user_ptr(odp_timeout_t tmo)
return _odp_timeout_hdr_field(tmo, void *, user_ptr);
}
+_ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
+}
+
_ODP_INLINE odp_timeout_t odp_timeout_from_event(odp_event_t ev)
{
return (odp_timeout_t)ev;
diff --git a/platform/linux-dpdk/include/odp_buffer_internal.h b/platform/linux-dpdk/include/odp_buffer_internal.h
index e7b1d215d..dfffdc2be 100644
--- a/platform/linux-dpdk/include/odp_buffer_internal.h
+++ b/platform/linux-dpdk/include/odp_buffer_internal.h
@@ -52,6 +52,9 @@ typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
/* Common event header */
_odp_event_hdr_t event_hdr;
+ /* User area pointer */
+ void *uarea_addr;
+
} odp_buffer_hdr_t;
/*
diff --git a/platform/linux-dpdk/include/odp_event_vector_internal.h b/platform/linux-dpdk/include/odp_event_vector_internal.h
index c866d9036..5fa8c31c6 100644
--- a/platform/linux-dpdk/include/odp_event_vector_internal.h
+++ b/platform/linux-dpdk/include/odp_event_vector_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2020-2021, Nokia
+/* Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,6 +17,8 @@
#include <odp/api/debug.h>
#include <odp/api/packet.h>
+#include <odp/api/plat/event_vector_inline_types.h>
+
#include <odp_event_internal.h>
#include <stdint.h>
@@ -28,9 +30,15 @@ typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
/* Common event header */
_odp_event_hdr_t event_hdr;
+ /* User area pointer */
+ void *uarea_addr;
+
/* Event vector size */
uint32_t size;
+ /* Flags */
+ _odp_event_vector_flags_t flags;
+
/* Vector of packet handles */
odp_packet_t packet[];
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 1d1ab02e2..a5ffcf517 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -73,7 +73,7 @@ typedef struct ODP_ALIGNED_CACHE {
uint8_t memset_mark;
struct rte_mempool *rte_mempool;
uint32_t seg_len;
- uint32_t hdr_size;
+ uint32_t ext_head_offset;
uint32_t num;
uint32_t num_populated;
odp_pool_type_t type_2;
diff --git a/platform/linux-dpdk/include/odp_timer_internal.h b/platform/linux-dpdk/include/odp_timer_internal.h
index 2e8487689..f504a20aa 100644
--- a/platform/linux-dpdk/include/odp_timer_internal.h
+++ b/platform/linux-dpdk/include/odp_timer_internal.h
@@ -35,6 +35,9 @@ typedef struct ODP_ALIGNED_CACHE odp_timeout_hdr_t {
/* User ptr inherited from parent timer */
const void *user_ptr;
+ /* User area pointer */
+ void *uarea_addr;
+
/* Parent timer */
odp_timer_t timer;
diff --git a/platform/linux-dpdk/m4/odp_libconfig.m4 b/platform/linux-dpdk/m4/odp_libconfig.m4
index b4bfdd719..84d331b8a 100644
--- a/platform/linux-dpdk/m4/odp_libconfig.m4
+++ b/platform/linux-dpdk/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [17])
+m4_define([_odp_config_version_minor], [18])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-dpdk/odp_buffer.c b/platform/linux-dpdk/odp_buffer.c
index 8a2cbb949..758782563 100644
--- a/platform/linux-dpdk/odp_buffer.c
+++ b/platform/linux-dpdk/odp_buffer.c
@@ -40,6 +40,13 @@ int odp_buffer_is_valid(odp_buffer_t buf)
return 1;
}
+void *odp_buffer_user_area(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = _odp_buf_hdr(buf);
+
+ return hdr->uarea_addr;
+}
+
void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index 6e740da77..99dbe07f6 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -626,13 +626,6 @@ void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
pkt_hdr->p.flags.user_ptr_set = 1;
}
-void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- pkt_hdr->input = pktio;
-}
-
int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
@@ -1979,73 +1972,6 @@ int odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
return -ENOTSUP;
}
-static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
-{
- return (odp_packet_hdr_t *)(uintptr_t)pkt_buf;
-}
-
-void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
-
- if (odp_unlikely(pool->pool_ext == 0)) {
- ODP_ERR("Not an external memory pool\n");
- return NULL;
- }
-
- return (uint8_t *)pkt_hdr + pool->hdr_size;
-}
-
-uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
-
- return pool->seg_len;
-}
-
-uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
-{
- void *data = odp_packet_seg_data(ODP_PACKET_INVALID,
- (odp_packet_seg_t)pkt_buf);
- void *head = odp_packet_buf_head(pkt_buf);
-
- return (uintptr_t)data - (uintptr_t)head;
-}
-
-uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
-{
- return odp_packet_seg_data_len(ODP_PACKET_INVALID,
- (odp_packet_seg_t)pkt_buf);
-}
-
-void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
- uint32_t data_len)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
-
- pkt_hdr->event_hdr.mb.data_off = data_offset;
- pkt_hdr->event_hdr.mb.data_len = data_len;
-}
-
-odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl, void *head)
-{
- pool_t *pool = _odp_pool_entry(pool_hdl);
-
- if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
- ODP_ERR("Not a packet pool\n");
- return ODP_PACKET_BUF_INVALID;
- }
-
- if (odp_unlikely(pool->pool_ext == 0)) {
- ODP_ERR("Not an external memory pool\n");
- return ODP_PACKET_BUF_INVALID;
- }
-
- return (odp_packet_buf_t)((uintptr_t)head - pool->hdr_size);
-}
-
uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[],
uint32_t num)
{
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 49d6185a2..e400540d9 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -69,7 +69,9 @@ pool_global_t *_odp_pool_glb;
/* Fill in pool header field offsets for inline functions */
const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
.index = offsetof(pool_t, pool_idx),
- .uarea_size = offsetof(pool_t, params.pkt.uarea_size)
+ .seg_len = offsetof(pool_t, seg_len),
+ .uarea_size = offsetof(pool_t, params.pkt.uarea_size),
+ .ext_head_offset = offsetof(pool_t, ext_head_offset)
};
#include <odp/visibility_end.h>
@@ -250,6 +252,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.max_uarea_size = MAX_UAREA_SIZE;
capa->buf.min_cache_size = 0;
capa->buf.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
capa->buf.stats.all = supported_stats.all;
@@ -273,6 +276,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+ capa->tmo.max_uarea_size = MAX_UAREA_SIZE;
capa->tmo.min_cache_size = 0;
capa->tmo.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
capa->tmo.stats.all = supported_stats.all;
@@ -280,6 +284,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Vector pools */
capa->vector.max_pools = max_pools;
capa->vector.max_num = CONFIG_POOL_MAX_NUM;
+ capa->vector.max_uarea_size = MAX_UAREA_SIZE;
capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
capa->vector.min_cache_size = 0;
capa->vector.max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
@@ -309,10 +314,12 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
struct mbuf_ctor_arg *mb_ctor_arg;
struct rte_mbuf *mb = raw_mbuf;
_odp_event_hdr_t *event_hdr;
+ void *uarea;
- /* The rte_mbuf is at the begninning in all cases */
+ /* The rte_mbuf is at the beginning in all cases */
mb_ctor_arg = (struct mbuf_ctor_arg *)opaque_arg;
mb = (struct rte_mbuf *)raw_mbuf;
+ uarea = mb_ctor_arg->pool->uarea_base_addr + (i * mb_ctor_arg->pool->uarea_size);
RTE_ASSERT(mp->elt_size >= sizeof(struct rte_mbuf));
@@ -359,20 +366,32 @@ odp_dpdk_mbuf_ctor(struct rte_mempool *mp,
event_hdr->type = mb_ctor_arg->type;
event_hdr->event_type = mb_ctor_arg->event_type;
+ /* Initialize buffer metadata */
+ if (mb_ctor_arg->type == ODP_POOL_BUFFER) {
+ odp_buffer_hdr_t *buf_hdr = (void *)raw_mbuf;
+
+ buf_hdr->uarea_addr = uarea;
+ }
+
/* Initialize packet metadata */
if (mb_ctor_arg->type == ODP_POOL_PACKET) {
- odp_packet_hdr_t *pkt_hdr = (odp_packet_hdr_t *)raw_mbuf;
+ odp_packet_hdr_t *pkt_hdr = (void *)raw_mbuf;
- pkt_hdr->uarea_addr = mb_ctor_arg->pool->uarea_base_addr +
- i * mb_ctor_arg->pool->uarea_size;
+ pkt_hdr->uarea_addr = uarea;
}
/* Initialize event vector metadata */
if (mb_ctor_arg->type == ODP_POOL_VECTOR) {
- odp_event_vector_hdr_t *vect_hdr;
+ odp_event_vector_hdr_t *vect_hdr = (void *)raw_mbuf;
+
+ vect_hdr->uarea_addr = uarea;
+ }
+
+ /* Initialize timeout metadata */
+ if (mb_ctor_arg->type == ODP_POOL_TIMEOUT) {
+ odp_timeout_hdr_t *tmo_hdr = (void *)raw_mbuf;
- vect_hdr = (odp_event_vector_hdr_t *)raw_mbuf;
- vect_hdr->size = 0;
+ tmo_hdr->uarea_addr = uarea;
}
}
@@ -424,6 +443,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->buf.uarea_size > capa.buf.max_uarea_size) {
+ ODP_ERR("buf.uarea_size too large %u\n", params->buf.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.buf.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -506,6 +530,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->tmo.uarea_size > capa.tmo.max_uarea_size) {
+ ODP_ERR("tmo.uarea_size too large %u\n", params->tmo.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.tmo.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -539,6 +568,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->vector.uarea_size > capa.vector.max_uarea_size) {
+ ODP_ERR("vector.uarea_size too large %u\n", params->vector.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.vector.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -684,6 +718,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
buf_align = params->buf.align;
blk_size = params->buf.size;
cache_size = params->buf.cache_size;
+ uarea_size = params->buf.uarea_size;
/* Set correct alignment based on input request */
if (buf_align == 0)
@@ -754,6 +789,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
mbp_ctor_arg.mbuf_data_room_size = 0;
num = params->tmo.num;
cache_size = params->tmo.cache_size;
+ uarea_size = params->tmo.uarea_size;
event_type = ODP_EVENT_TIMEOUT;
ODP_DBG("type: tmo, name: %s, num: %u\n", pool_name, num);
@@ -764,6 +800,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
mbp_ctor_arg.mbuf_data_room_size = 0;
num = params->vector.num;
cache_size = params->vector.cache_size;
+ uarea_size = params->vector.uarea_size;
event_type = ODP_EVENT_PACKET_VECTOR;
ODP_DBG("type: vector, name: %s, num: %u\n", pool_name, num);
@@ -1300,7 +1337,7 @@ odp_pool_t odp_pool_ext_create(const char *name,
}
pool->ext_param = *params;
- pool->hdr_size = hdr_size;
+ pool->ext_head_offset = hdr_size;
pool->num = num;
pool->num_populated = 0;
pool->params.pkt.uarea_size = params->pkt.uarea_size;
@@ -1430,8 +1467,7 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size,
mb_ctor_arg.type = params->type;
mb_ctor_arg.event_type = pool->type;
mb_ctor_arg.pool = pool;
- odp_dpdk_mbuf_ctor(mp, (void *)&mb_ctor_arg, (void *)mb,
- mp->populated_size);
+ odp_dpdk_mbuf_ctor(mp, (void *)&mb_ctor_arg, (void *)mb, num_populated);
pool->num_populated++;
}
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index f09a2f72c..7e59775d3 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -183,7 +183,8 @@ const _odp_timeout_inline_offset_t
_odp_timeout_inline_offset ODP_ALIGNED_CACHE = {
.expiration = offsetof(odp_timeout_hdr_t, expiration),
.timer = offsetof(odp_timeout_hdr_t, timer),
- .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr)
+ .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr),
+ .uarea_addr = offsetof(odp_timeout_hdr_t, uarea_addr)
};
#include <odp/visibility_end.h>
diff --git a/platform/linux-dpdk/test/alternate-timer.conf b/platform/linux-dpdk/test/alternate-timer.conf
index bb884013c..0b326f259 100644
--- a/platform/linux-dpdk/test/alternate-timer.conf
+++ b/platform/linux-dpdk/test/alternate-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.17"
+config_file_version = "0.1.18"
timer: {
# Enable alternate DPDK timer implementation
diff --git a/platform/linux-dpdk/test/crypto.conf b/platform/linux-dpdk/test/crypto.conf
index 413e7a043..f3d642963 100644
--- a/platform/linux-dpdk/test/crypto.conf
+++ b/platform/linux-dpdk/test/crypto.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.17"
+config_file_version = "0.1.18"
system: {
# One crypto queue pair is required per thread for lockless operation
diff --git a/platform/linux-dpdk/test/sched-basic.conf b/platform/linux-dpdk/test/sched-basic.conf
index 7093cd810..181136c33 100644
--- a/platform/linux-dpdk/test/sched-basic.conf
+++ b/platform/linux-dpdk/test/sched-basic.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-dpdk"
-config_file_version = "0.1.17"
+config_file_version = "0.1.18"
# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 56a37544c..708526558 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -135,6 +135,7 @@ noinst_HEADERS = \
include/odp_packet_internal.h \
include/odp_packet_io_internal.h \
include/odp_parse_internal.h \
+ include/odp_print_internal.h \
include/odp_socket_common.h \
include/odp_packet_io_stats_common.h \
include/odp_packet_io_stats.h \
@@ -213,6 +214,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_packet_io.c \
odp_parse.c \
odp_pkt_queue.c \
+ odp_print.c \
odp_pool.c \
odp_pool_mem_src_ops.c \
odp_queue_basic.c \
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h
index f530afd4f..e8f33f09e 100644
--- a/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/atomic_inlines.h
@@ -162,7 +162,7 @@ static inline void _odp_atomic_add_u32(odp_atomic_u32_t *atom, uint32_t val)
static inline void _odp_atomic_sub_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- int32_t neg_val = -val;
+ int32_t neg_val = (int32_t)-val;
__asm__ volatile("stadd %w[neg_val], %[atom]"
: [atom] "+Q" (atom->v)
@@ -188,7 +188,7 @@ static inline void _odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
static inline void _odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- int64_t neg_val = -val;
+ int64_t neg_val = (int64_t)-val;
__asm__ volatile("stadd %[neg_val], %[atom]"
: [atom] "+Q" (atom->v)
@@ -215,7 +215,7 @@ static inline void _odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
static inline void _odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
{
- int32_t neg_val = -val;
+ int32_t neg_val = (int32_t)-val;
__asm__ volatile("staddl %w[neg_val], %[atom]"
: [atom] "+Q" (atom->v)
@@ -233,7 +233,7 @@ static inline void _odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
static inline void _odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- int64_t neg_val = -val;
+ int64_t neg_val = (int64_t)-val;
__asm__ volatile("staddl %[neg_val], %[atom]"
: [atom] "+Q" (atom->v)
diff --git a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
index 4531ebc28..32341dd02 100644
--- a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
+++ b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
@@ -904,53 +904,71 @@ int odp_crypto_result(odp_crypto_packet_result_t *result,
return 0;
}
-static
-int crypto_int(odp_packet_t pkt_in,
- odp_packet_t *pkt_out,
- const odp_crypto_packet_op_param_t *param)
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
{
- odp_crypto_generic_session_t *session;
- odp_bool_t allocated = false;
- odp_packet_t out_pkt = *pkt_out;
-
- session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+ int md_copy;
+ int rc;
- /* Resolve output buffer */
- if (odp_unlikely(ODP_PACKET_INVALID == out_pkt) &&
- ODP_POOL_INVALID != session->p.output_pool) {
- out_pkt = odp_packet_alloc(session->p.output_pool,
- odp_packet_len(pkt_in));
- allocated = true;
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
}
- if (odp_unlikely(ODP_PACKET_INVALID == out_pkt)) {
- ODP_DBG("Alloc failed.\n");
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ ODP_ERR("Unable to copy packet data\n");
return -1;
}
- if (odp_unlikely(pkt_in != out_pkt)) {
- int ret;
- int md_copy;
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
- md_copy = _odp_packet_copy_md_possible(session->p.output_pool,
- odp_packet_pool(pkt_in));
- if (odp_unlikely(md_copy < 0)) {
- ODP_ERR("Unable to copy packet metadata\n");
- goto err;
- }
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
- ret = odp_packet_copy_from_pkt(out_pkt,
- 0,
- pkt_in,
- 0,
- odp_packet_len(pkt_in));
- if (odp_unlikely(ret < 0))
- goto err;
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
- _odp_packet_copy_md(packet_hdr(out_pkt), packet_hdr(pkt_in), md_copy);
- odp_packet_free(pkt_in);
- pkt_in = ODP_PACKET_INVALID;
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
}
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
+static
+int crypto_int(odp_packet_t pkt_in,
+ odp_packet_t *pkt_out,
+ const odp_crypto_packet_op_param_t *param)
+{
+ odp_crypto_generic_session_t *session;
+ odp_packet_t out_pkt;
+
+ session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
+
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
+ return -1;
/* Invoke the crypto function */
session->func(out_pkt, param, session);
@@ -961,14 +979,6 @@ int crypto_int(odp_packet_t pkt_in,
*pkt_out = out_pkt;
return 0;
-
-err:
- if (allocated) {
- odp_packet_free(out_pkt);
- *pkt_out = ODP_PACKET_INVALID;
- }
-
- return -1;
}
int odp_crypto_op(const odp_packet_t pkt_in[],
diff --git a/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h b/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
index d466a51ad..31d2f1db9 100644
--- a/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/byteorder_inlines.h
@@ -56,7 +56,11 @@ extern "C" {
* Don't use this function directly, instead see odp_byteorder.h
*/
#if GCC_VERSION < 40800
-#define __odp_builtin_bswap16(u16) ((((u16)&0x00ff) << 8) | \
+/*
+ * We have to explicitly cast back to uint16_t because clang promotes the
+ * left side of << operator to int.
+ */
+#define __odp_builtin_bswap16(u16) ((uint16_t)(((u16)&0x00ff) << 8) | \
(((u16)&0xff00) >> 8))
#else
#define __odp_builtin_bswap16(u16) __builtin_bswap16(u16)
diff --git a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
index 547620df6..723e1a3d1 100644
--- a/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/event_vector_inline_types.h
@@ -15,6 +15,15 @@ extern "C" {
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
+typedef union {
+ uint32_t all_flags;
+
+ struct {
+ uint32_t user_flag : 1;
+ };
+
+} _odp_event_vector_flags_t;
+
/* Event vector field accessors */
#define _odp_event_vect_get(vect, cast, field) \
(*(cast *)(uintptr_t)((uint8_t *)vect + _odp_event_vector_inline.field))
@@ -26,6 +35,9 @@ typedef struct _odp_event_vector_inline_offset_t {
uint16_t packet;
uint16_t pool;
uint16_t size;
+ uint16_t uarea_addr;
+ uint16_t flags;
+
} _odp_event_vector_inline_offset_t;
/** @endcond */
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
index c5293fc86..b00173aca 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -27,6 +27,9 @@ extern "C" {
#define _odp_pkt_get(pkt, cast, field) \
(*(cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+#define _odp_pkt_get_ptr(pkt, cast, field) \
+ ((cast *)(uintptr_t)((uint8_t *)pkt + _odp_packet_inline.field))
+
/* Packet header field offsets for inline functions */
typedef struct _odp_packet_inline_offset_t {
uint16_t seg_data;
@@ -115,12 +118,13 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 7;
+ uint32_t reserved1: 6;
/*
* Init flags
*/
uint32_t user_ptr_set: 1; /* User has set a non-NULL value */
+ uint32_t user_flag: 1;
/*
* Packet output flags
@@ -150,8 +154,8 @@ typedef union {
/* Flag groups */
struct {
- uint32_t reserved2: 7;
- uint32_t other: 18; /* All other flags */
+ uint32_t reserved2: 6;
+ uint32_t other: 19; /* All other flags */
uint32_t error: 7; /* All error flags */
} all;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
index 950ede8d7..da4eabe2e 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inlines.h
@@ -44,11 +44,14 @@
#define odp_packet_tailroom __odp_packet_tailroom
#define odp_packet_pool __odp_packet_pool
#define odp_packet_input __odp_packet_input
+ #define odp_packet_input_set __odp_packet_input_set
#define odp_packet_input_index __odp_packet_input_index
#define odp_packet_num_segs __odp_packet_num_segs
#define odp_packet_user_ptr __odp_packet_user_ptr
#define odp_packet_user_area __odp_packet_user_area
#define odp_packet_user_area_size __odp_packet_user_area_size
+ #define odp_packet_user_flag __odp_packet_user_flag
+ #define odp_packet_user_flag_set __odp_packet_user_flag_set
#define odp_packet_l2_offset __odp_packet_l2_offset
#define odp_packet_l3_offset __odp_packet_l3_offset
#define odp_packet_l4_offset __odp_packet_l4_offset
@@ -76,6 +79,13 @@
#define odp_packet_color __odp_packet_color
#define odp_packet_drop_eligible __odp_packet_drop_eligible
#define odp_packet_shaper_len_adjust __odp_packet_shaper_len_adjust
+ #define odp_packet_buf_data_len __odp_packet_buf_data_len
+ #define odp_packet_buf_size __odp_packet_buf_size
+ #define odp_packet_buf_head __odp_packet_buf_head
+ #define odp_packet_buf_data_offset __odp_packet_buf_data_offset
+ #define odp_packet_buf_data_set __odp_packet_buf_data_set
+ #define odp_packet_buf_from_head __odp_packet_buf_from_head
+
#else
#undef _ODP_INLINE
#define _ODP_INLINE
@@ -135,6 +145,13 @@ _ODP_INLINE odp_pktio_t odp_packet_input(odp_packet_t pkt)
return _odp_pkt_get(pkt, odp_pktio_t, input);
}
+_ODP_INLINE void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
+{
+ odp_pktio_t *pktio_ptr = _odp_pkt_get_ptr(pkt, odp_pktio_t, input);
+
+ *pktio_ptr = pktio;
+}
+
_ODP_INLINE int odp_packet_input_index(odp_packet_t pkt)
{
odp_pktio_t pktio = odp_packet_input(pkt);
@@ -171,6 +188,22 @@ _ODP_INLINE uint32_t odp_packet_user_area_size(odp_packet_t pkt)
return _odp_pool_get(pool, uint32_t, uarea_size);
}
+_ODP_INLINE int odp_packet_user_flag(odp_packet_t pkt)
+{
+ _odp_packet_flags_t flags;
+
+ flags.all_flags = _odp_pkt_get(pkt, uint32_t, flags);
+
+ return flags.user_flag;
+}
+
+_ODP_INLINE void odp_packet_user_flag_set(odp_packet_t pkt, int val)
+{
+ _odp_packet_flags_t *flags = _odp_pkt_get_ptr(pkt, _odp_packet_flags_t, flags);
+
+ flags->user_flag = !!val;
+}
+
_ODP_INLINE uint32_t odp_packet_l2_offset(odp_packet_t pkt)
{
return _odp_pkt_get(pkt, uint16_t, l2_offset);
@@ -400,6 +433,59 @@ _ODP_INLINE int8_t odp_packet_shaper_len_adjust(odp_packet_t pkt)
return (int8_t)flags.shaper_len_adj;
}
+_ODP_INLINE uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
+{
+ return _odp_pkt_get(pkt_buf, uint32_t, seg_len);
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+
+ return _odp_pool_get(pool, uint32_t, ext_pkt_buf_size) -
+ _odp_pool_get(pool, uint32_t, ext_head_offset);
+}
+
+_ODP_INLINE void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
+{
+ odp_pool_t pool = _odp_pkt_get(pkt_buf, odp_pool_t, pool);
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return NULL;
+
+ return (uint8_t *)(uintptr_t)pkt_buf + head_offset;
+}
+
+_ODP_INLINE uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
+{
+ return (uint32_t)((uintptr_t)_odp_pkt_get(pkt_buf, void *, seg_data) -
+ (uintptr_t)odp_packet_buf_head(pkt_buf));
+}
+
+_ODP_INLINE void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset,
+ uint32_t data_len)
+{
+ uint8_t *head = (uint8_t *)odp_packet_buf_head(pkt_buf);
+ uint32_t *seg_len = _odp_pkt_get_ptr(pkt_buf, uint32_t, seg_len);
+ void **seg_data = _odp_pkt_get_ptr(pkt_buf, void *, seg_data);
+
+ *seg_len = data_len;
+ *seg_data = head + data_offset;
+}
+
+_ODP_INLINE odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool, void *head)
+{
+ const uint32_t head_offset = _odp_pool_get(pool, uint32_t, ext_head_offset);
+
+ /* Check that pool is external */
+ if (odp_unlikely(!head_offset))
+ return ODP_PACKET_BUF_INVALID;
+
+ return (odp_packet_buf_t)((uintptr_t)head - head_offset);
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
index c8da1b77a..76604dc4f 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_vector_inlines.h
@@ -33,6 +33,9 @@
#define odp_packet_vector_pool __odp_packet_vector_pool
#define odp_packet_vector_size __odp_packet_vector_size
#define odp_packet_vector_size_set __odp_packet_vector_size_set
+ #define odp_packet_vector_user_area __odp_packet_vector_user_area
+ #define odp_packet_vector_user_flag __odp_packet_vector_user_flag
+ #define odp_packet_vector_user_flag_set __odp_packet_vector_user_flag_set
#else
#undef _ODP_INLINE
#define _ODP_INLINE
@@ -74,6 +77,28 @@ _ODP_INLINE void odp_packet_vector_size_set(odp_packet_vector_t pktv, uint32_t s
*vector_size = size;
}
+_ODP_INLINE void *odp_packet_vector_user_area(odp_packet_vector_t pktv)
+{
+ return _odp_event_vect_get(pktv, void *, uarea_addr);
+}
+
+_ODP_INLINE int odp_packet_vector_user_flag(odp_packet_vector_t pktv)
+{
+ _odp_event_vector_flags_t flags;
+
+ flags.all_flags = _odp_event_vect_get(pktv, uint32_t, flags);
+
+ return flags.user_flag;
+}
+
+_ODP_INLINE void odp_packet_vector_user_flag_set(odp_packet_vector_t pktv, int val)
+{
+ _odp_event_vector_flags_t *flags = _odp_event_vect_get_ptr(pktv, _odp_event_vector_flags_t,
+ flags);
+
+ flags->user_flag = !!val;
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
index 0c356dbf2..9deec89a1 100644
--- a/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/pool_inline_types.h
@@ -26,7 +26,13 @@ typedef struct _odp_pool_inline_offset_t {
/** @internal field offset */
uint16_t index;
/** @internal field offset */
+ uint16_t seg_len;
+ /** @internal field offset */
uint16_t uarea_size;
+ /** @internal field offset */
+ uint16_t ext_head_offset;
+ /** @internal field offset */
+ uint16_t ext_pkt_buf_size;
} _odp_pool_inline_offset_t;
diff --git a/platform/linux-generic/include/odp/api/plat/timer_inline_types.h b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h
index e3397c4df..ec6804c72 100644
--- a/platform/linux-generic/include/odp/api/plat/timer_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/timer_inline_types.h
@@ -25,6 +25,7 @@ typedef struct _odp_timeout_inline_offset_t {
uint16_t expiration;
uint16_t timer;
uint16_t user_ptr;
+ uint16_t uarea_addr;
} _odp_timeout_inline_offset_t;
diff --git a/platform/linux-generic/include/odp/api/plat/timer_inlines.h b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
index 406aefdf6..7642376d0 100644
--- a/platform/linux-generic/include/odp/api/plat/timer_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
@@ -24,6 +24,7 @@ extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset;
#define odp_timeout_timer __odp_timeout_timer
#define odp_timeout_tick __odp_timeout_tick
#define odp_timeout_user_ptr __odp_timeout_user_ptr
+ #define odp_timeout_user_area __odp_timeout_user_area
#define odp_timer_tick_to_ns __odp_timer_tick_to_ns
#define odp_timer_ns_to_tick __odp_timer_ns_to_tick
#define odp_timeout_from_event __odp_timeout_from_event
@@ -47,6 +48,11 @@ _ODP_INLINE void *odp_timeout_user_ptr(odp_timeout_t tmo)
return _odp_timeout_hdr_field(tmo, void *, user_ptr);
}
+_ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
+{
+ return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
+}
+
_ODP_INLINE uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp, uint64_t ticks)
{
(void)tp;
diff --git a/platform/linux-generic/include/odp_atomic_internal.h b/platform/linux-generic/include/odp_atomic_internal.h
index da79b3723..6de8cd485 100644
--- a/platform/linux-generic/include/odp_atomic_internal.h
+++ b/platform/linux-generic/include/odp_atomic_internal.h
@@ -159,31 +159,6 @@ static inline void _odp_atomic_u128_xchg_mm(_odp_atomic_u128_t *ptr,
{
__atomic_exchange(&ptr->v, val, old, mm);
}
-
-/**
- * Atomic compare and exchange (swap) of 16-byte atomic variable
- * "Strong" semantics, will not fail spuriously.
- *
- * @param ptr Pointer to a 16-byte atomic variable
- * @param exp Pointer to expected value (updated on failure)
- * @param val Pointer to new value to write
- * @param succ Memory model associated with a successful compare-and-swap
- * operation
- * @param fail Memory model associated with a failed compare-and-swap
- * operation
- *
- * @retval 1 exchange successul
- * @retval 0 exchange failed and '*exp' updated with current value
- */
-static inline int _odp_atomic_u128_cmp_xchg_mm(_odp_atomic_u128_t *ptr,
- _odp_u128_t *exp,
- _odp_u128_t *val,
- _odp_memmodel_t succ,
- _odp_memmodel_t fail)
-{
- return __atomic_compare_exchange(&ptr->v, exp, val,
- false/*strong*/, succ, fail);
-}
#endif
/**
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index 8625fc5dd..1cececb99 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -36,6 +36,9 @@ typedef struct ODP_ALIGNED_CACHE odp_buffer_hdr_t {
/* Common event header */
_odp_event_hdr_t event_hdr;
+ /* User area pointer */
+ void *uarea_addr;
+
/* Data */
uint8_t data[];
} odp_buffer_hdr_t;
diff --git a/platform/linux-generic/include/odp_debug_internal.h b/platform/linux-generic/include/odp_debug_internal.h
index c5c1890c3..22dca3701 100644
--- a/platform/linux-generic/include/odp_debug_internal.h
+++ b/platform/linux-generic/include/odp_debug_internal.h
@@ -37,15 +37,8 @@ extern "C" {
* level 0 to N. */
#define CONFIG_DEBUG_LEVEL 0
-ODP_PRINTF_FORMAT(1, 2)
-static inline void check_printf_format(const char *fmt, ...)
-{
- (void)fmt;
-}
-
#define _ODP_LOG_FN(level, fmt, ...) \
do { \
- check_printf_format(fmt, ##__VA_ARGS__); \
if (_odp_this_thread && _odp_this_thread->log_fn) \
_odp_this_thread->log_fn(level, fmt, ##__VA_ARGS__); \
else \
diff --git a/platform/linux-generic/include/odp_event_vector_internal.h b/platform/linux-generic/include/odp_event_vector_internal.h
index 33b26d711..55e33b913 100644
--- a/platform/linux-generic/include/odp_event_vector_internal.h
+++ b/platform/linux-generic/include/odp_event_vector_internal.h
@@ -17,6 +17,8 @@
#include <odp/api/debug.h>
#include <odp/api/packet.h>
+#include <odp/api/plat/event_vector_inline_types.h>
+
#include <odp_event_internal.h>
#include <stdint.h>
@@ -28,9 +30,15 @@ typedef struct ODP_ALIGNED_CACHE odp_event_vector_hdr_t {
/* Common event header */
_odp_event_hdr_t event_hdr;
+ /* User area pointer */
+ void *uarea_addr;
+
/* Event vector size */
uint32_t size;
+ /* Flags */
+ _odp_event_vector_flags_t flags;
+
/* Vector of packet handles */
odp_packet_t packet[];
diff --git a/platform/linux-generic/include/odp_global_data.h b/platform/linux-generic/include/odp_global_data.h
index 06e269b4d..462b8d639 100644
--- a/platform/linux-generic/include/odp_global_data.h
+++ b/platform/linux-generic/include/odp_global_data.h
@@ -61,7 +61,7 @@ typedef struct odp_global_data_ro_t {
pid_t main_pid;
pid_t fdserver_pid;
char uid[UID_MAXLEN];
- odp_log_func_t log_fn;
+ odp_log_func_t ODP_PRINTF_FORMAT(2, 3) log_fn;
odp_abort_func_t abort_fn;
system_info_t system_info;
hugepage_info_t hugepage_info;
diff --git a/platform/linux-generic/include/odp_llqueue.h b/platform/linux-generic/include/odp_llqueue.h
index 68325624a..6340d111a 100644
--- a/platform/linux-generic/include/odp_llqueue.h
+++ b/platform/linux-generic/include/odp_llqueue.h
@@ -41,6 +41,7 @@ static odp_bool_t llq_on_queue(struct llnode *node);
*****************************************************************************/
#define SENTINEL ((void *)~(uintptr_t)0)
+#define MAX_SPIN_COUNT 1000
#ifdef CONFIG_LLDSCD
/* Implement queue operations using double-word LL/SC */
@@ -114,6 +115,7 @@ static inline struct llnode *llq_dequeue(struct llqueue *llq)
(void)__atomic_load_n(&head->next, __ATOMIC_RELAXED);
do {
+restart_loop:
old.ui = lld(&llq->u.ui, __ATOMIC_RELAXED);
if (odp_unlikely(old.st.head == NULL)) {
/* Empty list */
@@ -125,13 +127,18 @@ static inline struct llnode *llq_dequeue(struct llqueue *llq)
} else {
/* Multi-element list, dequeue head */
struct llnode *next;
+ int spin_count = 0;
+
/* Wait until llq_enqueue() has written true next
* pointer
*/
while ((next = __atomic_load_n(&old.st.head->next,
__ATOMIC_RELAXED)) ==
- SENTINEL)
+ SENTINEL) {
odp_cpu_pause();
+ if (++spin_count >= MAX_SPIN_COUNT)
+ goto restart_loop;
+ }
neu.st.head = next;
neu.st.tail = old.st.tail;
}
@@ -146,6 +153,7 @@ static inline odp_bool_t llq_dequeue_cond(struct llqueue *llq,
union llht old, neu;
do {
+restart_loop:
old.ui = lld(&llq->u.ui, __ATOMIC_ACQUIRE);
if (odp_unlikely(old.st.head == NULL || old.st.head != exp)) {
/* Empty list or wrong head */
@@ -157,13 +165,17 @@ static inline odp_bool_t llq_dequeue_cond(struct llqueue *llq,
} else {
/* Multi-element list, dequeue head */
struct llnode *next;
+ int spin_count = 0;
/* Wait until llq_enqueue() has written true next
* pointer */
while ((next = __atomic_load_n(&old.st.head->next,
__ATOMIC_RELAXED)) ==
- SENTINEL)
+ SENTINEL) {
odp_cpu_pause();
+ if (++spin_count >= MAX_SPIN_COUNT)
+ goto restart_loop;
+ }
neu.st.head = next;
neu.st.tail = old.st.tail;
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 201dbebf9..1c5b51c3d 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -18,6 +18,7 @@
extern "C" {
#endif
+#include <odp/api/atomic.h>
#include <odp/api/shared_memory.h>
#include <odp/api/ticketlock.h>
#include <odp/api/align.h>
@@ -32,7 +33,7 @@ extern "C" {
typedef struct ODP_ALIGNED_CACHE pool_cache_t {
/* Number of buffers in cache */
- uint32_t cache_num;
+ odp_atomic_u32_t cache_num;
/* Cached buffers */
_odp_event_hdr_t *event_hdr[CONFIG_POOL_CACHE_MAX_SIZE];
@@ -91,6 +92,7 @@ typedef struct pool_t {
uint8_t *uarea_base_addr;
odp_pool_type_t type_2;
odp_pool_ext_param_t ext_param;
+ uint32_t ext_head_offset;
uint32_t skipped_blocks;
uint8_t mem_from_huge_pages;
const struct _odp_pool_mem_src_ops_t *mem_src_ops;
diff --git a/platform/linux-generic/include/odp_print_internal.h b/platform/linux-generic/include/odp_print_internal.h
new file mode 100644
index 000000000..949a1cc70
--- /dev/null
+++ b/platform/linux-generic/include/odp_print_internal.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PRINT_INTERNAL_H_
+#define ODP_PRINT_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+
+int _odp_snprint(char *str, size_t size, const char *format, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index dcd190d07..296a87116 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2016-2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -80,6 +80,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#undef _RING_INIT
#undef _RING_DEQ
#undef _RING_DEQ_MULTI
+#undef _RING_DEQ_BATCH
#undef _RING_ENQ
#undef _RING_ENQ_MULTI
#undef _RING_LEN
@@ -94,6 +95,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#define _RING_INIT ring_u32_init
#define _RING_DEQ ring_u32_deq
#define _RING_DEQ_MULTI ring_u32_deq_multi
+ #define _RING_DEQ_BATCH ring_u32_deq_batch
#define _RING_ENQ ring_u32_enq
#define _RING_ENQ_MULTI ring_u32_enq_multi
#define _RING_LEN ring_u32_len
@@ -104,6 +106,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#define _RING_INIT ring_u64_init
#define _RING_DEQ ring_u64_deq
#define _RING_DEQ_MULTI ring_u64_deq_multi
+ #define _RING_DEQ_BATCH ring_u64_deq_batch
#define _RING_ENQ ring_u64_enq
#define _RING_ENQ_MULTI ring_u64_enq_multi
#define _RING_LEN ring_u64_len
@@ -114,6 +117,7 @@ static inline int cas_mo_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
#define _RING_INIT ring_ptr_init
#define _RING_DEQ ring_ptr_deq
#define _RING_DEQ_MULTI ring_ptr_deq_multi
+ #define _RING_DEQ_BATCH ring_ptr_deq_batch
#define _RING_ENQ ring_ptr_enq
#define _RING_ENQ_MULTI ring_ptr_enq_multi
#define _RING_LEN ring_ptr_len
@@ -208,6 +212,45 @@ static inline uint32_t _RING_DEQ_MULTI(_ring_gen_t *ring, uint32_t mask,
return num;
}
+/* Dequeue batch of data (0 or num) from the ring head. Num is smaller than ring size. */
+static inline uint32_t _RING_DEQ_BATCH(_ring_gen_t *ring, uint32_t mask,
+ _ring_data_t data[], uint32_t num)
+{
+ uint32_t head, tail, new_head, i;
+
+ /* Load/CAS acquire of r_head ensures that w_tail load happens after
+ * r_head load, and thus head value is always behind or equal to tail
+ * value. */
+ head = odp_atomic_load_acq_u32(&ring->r.r_head);
+
+ /* Move reader head. This thread owns data at the new head. */
+ do {
+ tail = odp_atomic_load_acq_u32(&ring->r.w_tail);
+
+ /* Not enough data available */
+ if ((tail - head) < num)
+ return 0;
+
+ new_head = head + num;
+
+ } while (odp_unlikely(cas_mo_u32(&ring->r.r_head, &head, new_head,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0));
+
+ /* Read data. */
+ for (i = 0; i < num; i++)
+ data[i] = ring->data[(head + 1 + i) & mask];
+
+ /* Wait until other readers have updated the tail */
+ while (odp_unlikely(odp_atomic_load_u32(&ring->r.r_tail) != head))
+ odp_cpu_pause();
+
+ /* Update the tail. Writers acquire it. */
+ odp_atomic_store_rel_u32(&ring->r.r_tail, new_head);
+
+ return num;
+}
+
/* Enqueue data into the ring tail */
static inline void _RING_ENQ(_ring_gen_t *ring, uint32_t mask,
_ring_data_t data)
diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h
index b83591446..01ee4a0f3 100644
--- a/platform/linux-generic/include/odp_timer_internal.h
+++ b/platform/linux-generic/include/odp_timer_internal.h
@@ -35,6 +35,9 @@ typedef struct ODP_ALIGNED_CACHE odp_timeout_hdr_t {
/* User ptr inherited from parent timer */
const void *user_ptr;
+ /* User area pointer */
+ void *uarea_addr;
+
/* Parent timer */
odp_timer_t timer;
diff --git a/platform/linux-generic/m4/odp_libconfig.m4 b/platform/linux-generic/m4/odp_libconfig.m4
index 886cc07e8..03dbc929d 100644
--- a/platform/linux-generic/m4/odp_libconfig.m4
+++ b/platform/linux-generic/m4/odp_libconfig.m4
@@ -3,7 +3,7 @@
##########################################################################
m4_define([_odp_config_version_generation], [0])
m4_define([_odp_config_version_major], [1])
-m4_define([_odp_config_version_minor], [21])
+m4_define([_odp_config_version_minor], [22])
m4_define([_odp_config_version],
[_odp_config_version_generation._odp_config_version_major._odp_config_version_minor])
diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c
index 609b0b206..278aa8147 100644
--- a/platform/linux-generic/odp_buffer.c
+++ b/platform/linux-generic/odp_buffer.c
@@ -23,6 +23,13 @@ uint32_t odp_buffer_size(odp_buffer_t buf)
return pool->seg_len;
}
+void *odp_buffer_user_area(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *hdr = _odp_buf_hdr(buf);
+
+ return hdr->uarea_addr;
+}
+
void odp_buffer_print(odp_buffer_t buf)
{
odp_buffer_hdr_t *hdr;
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 6bb4fa3c2..ed0f6723d 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2021, Nokia
+ * Copyright (c) 2019-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -181,6 +181,7 @@ int odp_cls_capability(odp_cls_capability_t *capability)
capability->supported_terms.bit.dip_addr = 1;
capability->supported_terms.bit.sip6_addr = 1;
capability->supported_terms.bit.dip6_addr = 1;
+ capability->supported_terms.bit.ipsec_spi = 1;
capability->supported_terms.bit.custom_frame = 1;
capability->supported_terms.bit.custom_l3 = 1;
capability->random_early_detection = ODP_SUPPORT_NO;
@@ -188,6 +189,12 @@ int odp_cls_capability(odp_cls_capability_t *capability)
capability->threshold_red.all_bits = 0;
capability->threshold_bp.all_bits = 0;
capability->max_hash_queues = CLS_COS_QUEUE_MAX;
+ capability->hash_protocols.proto.ipv4_udp = 1;
+ capability->hash_protocols.proto.ipv4_tcp = 1;
+ capability->hash_protocols.proto.ipv4 = 1;
+ capability->hash_protocols.proto.ipv6_udp = 1;
+ capability->hash_protocols.proto.ipv6_tcp = 1;
+ capability->hash_protocols.proto.ipv6 = 1;
capability->max_mark = MAX_MARK;
capability->stats.cos.counter.discards = 1;
capability->stats.cos.counter.packets = 1;
@@ -290,14 +297,17 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_
cos->num_queue = param.num_queue;
if (param.num_queue > 1) {
- odp_queue_param_init(&cos->queue_param);
+ cos->queue_param = param.queue_param;
cos->queue_group = true;
cos->queue = ODP_QUEUE_INVALID;
_odp_cls_update_hash_proto(cos,
param.hash_proto);
tbl_index = i * CLS_COS_QUEUE_MAX;
for (j = 0; j < param.num_queue; j++) {
- queue = odp_queue_create(NULL, &cos->queue_param);
+ char name[ODP_QUEUE_NAME_LEN];
+
+ snprintf(name, sizeof(name), "_odp_cos_hq_%u_%u", i, j);
+ queue = odp_queue_create(name, &cos->queue_param);
if (queue == ODP_QUEUE_INVALID) {
/* unwind the queues */
_cls_queue_unwind(tbl_index, j);
@@ -309,6 +319,7 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_
}
} else {
+ cos->queue_group = false;
cos->queue = param.queue;
}
@@ -398,6 +409,9 @@ int odp_cos_destroy(odp_cos_t cos_id)
return -1;
}
+ if (cos->queue_group)
+ _cls_queue_unwind(cos->index * CLS_COS_QUEUE_MAX, cos->num_queue);
+
cos->valid = 0;
return 0;
}
@@ -1155,17 +1169,12 @@ static inline int verify_pmr_ipsec_spi(const uint8_t *pkt_addr,
pkt_addr += pkt_hdr->p.l4_offset;
- if (pkt_hdr->p.input_flags.ipsec_ah) {
- const _odp_ahhdr_t *ahhdr = (const _odp_ahhdr_t *)pkt_addr;
-
- spi = odp_be_to_cpu_32(ahhdr->spi);
- } else if (pkt_hdr->p.input_flags.ipsec_esp) {
- const _odp_esphdr_t *esphdr = (const _odp_esphdr_t *)pkt_addr;
-
- spi = odp_be_to_cpu_32(esphdr->spi);
- } else {
+ if (pkt_hdr->p.input_flags.ipsec_ah)
+ spi = ((const _odp_ahhdr_t *)pkt_addr)->spi;
+ else if (pkt_hdr->p.input_flags.ipsec_esp)
+ spi = ((const _odp_esphdr_t *)pkt_addr)->spi;
+ else
return 0;
- }
if (term_value->match.value == (spi & term_value->match.mask))
return 1;
@@ -1959,11 +1968,9 @@ void print_queue_ident(odp_queue_t q)
odp_queue_info_t info;
if (!odp_queue_info(q, &info) && strlen(info.name))
- ODP_PRINT("%s", info.name);
+ ODP_PRINT(" %s\n", info.name);
else
- ODP_PRINT("%" PRIx64, odp_queue_to_u64(q));
-
- ODP_PRINT("\n");
+ ODP_PRINT(" %" PRIx64 "\n", odp_queue_to_u64(q));
}
static
@@ -1978,13 +1985,20 @@ void print_hex(const void *vp, int len)
static
void cls_print_cos(cos_t *cos)
{
+ uint32_t tbl_index = cos->index * CLS_COS_QUEUE_MAX;
uint32_t num_rule = odp_atomic_load_u32(&cos->num_rule);
bool first = true;
ODP_PRINT("cos: ");
print_cos_ident(cos);
- ODP_PRINT(" queue: ");
- print_queue_ident(cos->queue);
+ ODP_PRINT(" queues:\n");
+
+ if (!cos->queue_group) {
+ print_queue_ident(cos->queue);
+ } else {
+ for (uint32_t i = 0; i < cos->num_queue; i++)
+ print_queue_ident(queue_grp_tbl->queue[tbl_index + i]);
+ }
for (uint32_t j = 0; j < num_rule; j++) {
pmr_t *pmr = cos->pmr[j];
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
index 8eb2332a1..981c9239c 100644
--- a/platform/linux-generic/odp_crypto_null.c
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -486,54 +486,72 @@ int odp_crypto_result(odp_crypto_packet_result_t *result,
return 0;
}
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
static
int crypto_int(odp_packet_t pkt_in,
odp_packet_t *pkt_out,
const odp_crypto_packet_op_param_t *param)
{
odp_crypto_generic_session_t *session;
- odp_bool_t allocated = false;
- odp_packet_t out_pkt = *pkt_out;
+ odp_packet_t out_pkt;
odp_crypto_packet_result_t *op_result;
session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
- /* Resolve output buffer */
- if (ODP_PACKET_INVALID == out_pkt &&
- ODP_POOL_INVALID != session->p.output_pool) {
- out_pkt = odp_packet_alloc(session->p.output_pool,
- odp_packet_len(pkt_in));
- allocated = true;
- }
-
- if (odp_unlikely(ODP_PACKET_INVALID == out_pkt)) {
- ODP_DBG("Alloc failed.\n");
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
return -1;
- }
-
- if (pkt_in != out_pkt) {
- int ret;
- int md_copy;
-
- md_copy = _odp_packet_copy_md_possible(session->p.output_pool,
- odp_packet_pool(pkt_in));
- if (odp_unlikely(md_copy < 0)) {
- ODP_ERR("Unable to copy packet metadata\n");
- goto err;
- }
-
- ret = odp_packet_copy_from_pkt(out_pkt,
- 0,
- pkt_in,
- 0,
- odp_packet_len(pkt_in));
- if (odp_unlikely(ret < 0))
- goto err;
-
- _odp_packet_copy_md(packet_hdr(out_pkt), packet_hdr(pkt_in), md_copy);
- odp_packet_free(pkt_in);
- pkt_in = ODP_PACKET_INVALID;
- }
/* Fill in result */
packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
@@ -548,14 +566,6 @@ int crypto_int(odp_packet_t pkt_in,
*pkt_out = out_pkt;
return 0;
-
-err:
- if (allocated) {
- odp_packet_free(out_pkt);
- *pkt_out = ODP_PACKET_INVALID;
- }
-
- return -1;
}
int odp_crypto_op(const odp_packet_t pkt_in[],
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
index 9402c805b..9b5ea4612 100644
--- a/platform/linux-generic/odp_crypto_openssl.c
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -2815,6 +2815,58 @@ int odp_crypto_result(odp_crypto_packet_result_t *result,
return 0;
}
+static int copy_data_and_metadata(odp_packet_t dst, odp_packet_t src)
+{
+ int md_copy;
+ int rc;
+
+ md_copy = _odp_packet_copy_md_possible(odp_packet_pool(dst),
+ odp_packet_pool(src));
+ if (odp_unlikely(md_copy < 0)) {
+ ODP_ERR("Unable to copy packet metadata\n");
+ return -1;
+ }
+
+ rc = odp_packet_copy_from_pkt(dst, 0, src, 0, odp_packet_len(src));
+ if (odp_unlikely(rc < 0)) {
+ ODP_ERR("Unable to copy packet data\n");
+ return -1;
+ }
+
+ _odp_packet_copy_md(packet_hdr(dst), packet_hdr(src), md_copy);
+ return 0;
+}
+
+static odp_packet_t get_output_packet(const odp_crypto_generic_session_t *session,
+ odp_packet_t pkt_in,
+ odp_packet_t pkt_out)
+{
+ int rc;
+
+ if (odp_likely(pkt_in == pkt_out))
+ return pkt_out;
+
+ if (pkt_out == ODP_PACKET_INVALID) {
+ odp_pool_t pool = session->p.output_pool;
+
+ ODP_ASSERT(pool != ODP_POOL_INVALID);
+ if (pool == odp_packet_pool(pkt_in)) {
+ pkt_out = pkt_in;
+ } else {
+ pkt_out = odp_packet_copy(pkt_in, pool);
+ if (odp_likely(pkt_out != ODP_PACKET_INVALID))
+ odp_packet_free(pkt_in);
+ }
+ return pkt_out;
+ }
+ rc = copy_data_and_metadata(pkt_out, pkt_in);
+ if (odp_unlikely(rc < 0))
+ return ODP_PACKET_INVALID;
+
+ odp_packet_free(pkt_in);
+ return pkt_out;
+}
+
static
int crypto_int(odp_packet_t pkt_in,
odp_packet_t *pkt_out,
@@ -2823,48 +2875,14 @@ int crypto_int(odp_packet_t pkt_in,
odp_crypto_alg_err_t rc_cipher = ODP_CRYPTO_ALG_ERR_NONE;
odp_crypto_alg_err_t rc_auth = ODP_CRYPTO_ALG_ERR_NONE;
odp_crypto_generic_session_t *session;
- odp_bool_t allocated = false;
- odp_packet_t out_pkt = *pkt_out;
+ odp_packet_t out_pkt;
odp_crypto_packet_result_t *op_result;
session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
- /* Resolve output buffer */
- if (ODP_PACKET_INVALID == out_pkt &&
- ODP_POOL_INVALID != session->p.output_pool) {
- out_pkt = odp_packet_alloc(session->p.output_pool,
- odp_packet_len(pkt_in));
- allocated = true;
- }
-
- if (odp_unlikely(ODP_PACKET_INVALID == out_pkt)) {
- ODP_DBG("Alloc failed.\n");
+ out_pkt = get_output_packet(session, pkt_in, *pkt_out);
+ if (odp_unlikely(out_pkt == ODP_PACKET_INVALID))
return -1;
- }
-
- if (pkt_in != out_pkt) {
- int ret;
- int md_copy;
-
- md_copy = _odp_packet_copy_md_possible(session->p.output_pool,
- odp_packet_pool(pkt_in));
- if (odp_unlikely(md_copy < 0)) {
- ODP_ERR("Unable to copy packet metadata\n");
- goto err;
- }
-
- ret = odp_packet_copy_from_pkt(out_pkt,
- 0,
- pkt_in,
- 0,
- odp_packet_len(pkt_in));
- if (odp_unlikely(ret < 0))
- goto err;
-
- _odp_packet_copy_md(packet_hdr(out_pkt), packet_hdr(pkt_in), md_copy);
- odp_packet_free(pkt_in);
- pkt_in = ODP_PACKET_INVALID;
- }
crypto_init(session);
@@ -2892,14 +2910,6 @@ int crypto_int(odp_packet_t pkt_in,
*pkt_out = out_pkt;
return 0;
-
-err:
- if (allocated) {
- odp_packet_free(out_pkt);
- *pkt_out = ODP_PACKET_INVALID;
- }
-
- return -1;
}
int odp_crypto_op(const odp_packet_t pkt_in[],
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 04b4b6aeb..98fd2ac53 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -73,12 +73,13 @@ static void wait_for_order(ordering_mode_t mode)
*/
static int set_ipsec_crypto_capa(odp_ipsec_capability_t *capa)
{
- int rc;
odp_crypto_capability_t crypto_capa;
- rc = odp_crypto_capability(&crypto_capa);
- if (rc < 0)
- return rc;
+ crypto_capa.ciphers.all_bits = 0;
+ crypto_capa.auths.all_bits = 0;
+
+ if (odp_crypto_capability(&crypto_capa))
+ return -1;
#define CHECK_CIPHER(field, alg) do { \
if (crypto_capa.ciphers.bit.field && \
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 64d7b6fdf..e689089d3 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -168,6 +168,8 @@ int _odp_ipsec_sad_init_global(void)
if (odp_global_ro.disable.ipsec)
return 0;
+ crypto_capa.max_sessions = 0;
+
if (odp_crypto_capability(&crypto_capa)) {
ODP_ERR("odp_crypto_capability() failed\n");
return -1;
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index d96eb8748..775836b66 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -1039,13 +1039,6 @@ void odp_packet_user_ptr_set(odp_packet_t pkt, const void *ptr)
pkt_hdr->p.flags.user_ptr_set = 1;
}
-void odp_packet_input_set(odp_packet_t pkt, odp_pktio_t pktio)
-{
- odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
-
- pkt_hdr->input = pktio;
-}
-
int odp_packet_l2_offset_set(odp_packet_t pkt, uint32_t offset)
{
odp_packet_hdr_t *pkt_hdr = packet_hdr(pkt);
@@ -2432,75 +2425,6 @@ odp_packet_reass_partial_state(odp_packet_t pkt, odp_packet_t frags[],
return -ENOTSUP;
}
-static inline odp_packet_hdr_t *packet_buf_to_hdr(odp_packet_buf_t pkt_buf)
-{
- return (odp_packet_hdr_t *)(uintptr_t)pkt_buf;
-}
-
-void *odp_packet_buf_head(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
- uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
-
- if (odp_unlikely(pool->pool_ext == 0)) {
- ODP_ERR("Not an external memory pool\n");
- return NULL;
- }
-
- return (uint8_t *)pkt_hdr + head_offset;
-}
-
-uint32_t odp_packet_buf_size(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- pool_t *pool = _odp_pool_entry(pkt_hdr->event_hdr.pool);
- uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
-
- return pool->ext_param.pkt.buf_size - head_offset;
-}
-
-uint32_t odp_packet_buf_data_offset(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
-
- return (uintptr_t)pkt_hdr->seg_data - (uintptr_t)odp_packet_buf_head(pkt_buf);
-}
-
-uint32_t odp_packet_buf_data_len(odp_packet_buf_t pkt_buf)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
-
- return pkt_hdr->seg_len;
-}
-
-void odp_packet_buf_data_set(odp_packet_buf_t pkt_buf, uint32_t data_offset, uint32_t data_len)
-{
- odp_packet_hdr_t *pkt_hdr = packet_buf_to_hdr(pkt_buf);
- uint8_t *head = odp_packet_buf_head(pkt_buf);
-
- pkt_hdr->seg_len = data_len;
- pkt_hdr->seg_data = head + data_offset;
-}
-
-odp_packet_buf_t odp_packet_buf_from_head(odp_pool_t pool_hdl, void *head)
-{
- pool_t *pool = _odp_pool_entry(pool_hdl);
- uint32_t head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
-
- if (odp_unlikely(pool->type != ODP_POOL_PACKET)) {
- ODP_ERR("Not a packet pool\n");
- return ODP_PACKET_BUF_INVALID;
- }
-
- if (odp_unlikely(pool->pool_ext == 0)) {
- ODP_ERR("Not an external memory pool\n");
- return ODP_PACKET_BUF_INVALID;
- }
-
- return (odp_packet_buf_t)((uintptr_t)head - head_offset);
-}
-
uint32_t odp_packet_disassemble(odp_packet_t pkt, odp_packet_buf_t pkt_buf[], uint32_t num)
{
uint32_t i;
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index d98449a4b..c9f43ab9c 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -2154,7 +2154,7 @@ void odp_pktio_extra_stats_print(odp_pktio_t pktio)
return;
}
- printf("Pktio extra statistics\n----------------------\n");
+ ODP_PRINT("Pktio extra statistics\n----------------------\n");
for (i = 0; i < num_stats; i++)
ODP_PRINT(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
ODP_PRINT("\n");
diff --git a/platform/linux-generic/odp_packet_vector.c b/platform/linux-generic/odp_packet_vector.c
index e0b99183f..b3edbf84b 100644
--- a/platform/linux-generic/odp_packet_vector.c
+++ b/platform/linux-generic/odp_packet_vector.c
@@ -24,7 +24,9 @@
const _odp_event_vector_inline_offset_t _odp_event_vector_inline ODP_ALIGNED_CACHE = {
.packet = offsetof(odp_event_vector_hdr_t, packet),
.pool = offsetof(odp_event_vector_hdr_t, event_hdr.pool),
- .size = offsetof(odp_event_vector_hdr_t, size)
+ .size = offsetof(odp_event_vector_hdr_t, size),
+ .uarea_addr = offsetof(odp_event_vector_hdr_t, uarea_addr),
+ .flags = offsetof(odp_event_vector_hdr_t, flags)
};
#include <odp/visibility_end.h>
@@ -59,6 +61,7 @@ void odp_packet_vector_free(odp_packet_vector_t pktv)
odp_event_vector_hdr_t *pktv_hdr = _odp_packet_vector_hdr(pktv);
pktv_hdr->size = 0;
+ pktv_hdr->flags.all_flags = 0;
_odp_event_free(odp_packet_vector_to_event(pktv));
}
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index 90cdd6590..a92cc615d 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -5,12 +5,16 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
+#include <odp/api/atomic.h>
#include <odp/api/pool.h>
#include <odp/api/shared_memory.h>
-#include <odp/api/align.h>
-#include <odp/api/ticketlock.h>
#include <odp/api/system_info.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp/api/plat/pool_inline_types.h>
#include <odp/api/plat/thread_inlines.h>
+#include <odp/api/plat/ticketlock_inlines.h>
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
@@ -31,8 +35,6 @@
#include <stddef.h>
#include <inttypes.h>
-#include <odp/api/plat/pool_inline_types.h>
-#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(a) odp_ticketlock_lock(a)
#define UNLOCK(a) odp_ticketlock_unlock(a)
#define LOCK_INIT(a) odp_ticketlock_init(a)
@@ -73,7 +75,9 @@ static __thread pool_local_t local;
/* Fill in pool header field offsets for inline functions */
const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
.index = offsetof(pool_t, pool_idx),
- .uarea_size = offsetof(pool_t, param_uarea_size)
+ .uarea_size = offsetof(pool_t, param_uarea_size),
+ .ext_head_offset = offsetof(pool_t, ext_head_offset),
+ .ext_pkt_buf_size = offsetof(pool_t, ext_param.pkt.buf_size)
};
#include <odp/visibility_end.h>
@@ -81,12 +85,13 @@ const _odp_pool_inline_offset_t _odp_pool_inline ODP_ALIGNED_CACHE = {
static inline void cache_init(pool_cache_t *cache)
{
memset(cache, 0, sizeof(pool_cache_t));
+ odp_atomic_init_u32(&cache->cache_num, 0);
}
static inline uint32_t cache_pop(pool_cache_t *cache,
_odp_event_hdr_t *event_hdr[], int max_num)
{
- uint32_t cache_num = cache->cache_num;
+ uint32_t cache_num = odp_atomic_load_u32(&cache->cache_num);
uint32_t num_ch = max_num;
uint32_t cache_begin;
uint32_t i;
@@ -100,7 +105,7 @@ static inline uint32_t cache_pop(pool_cache_t *cache,
for (i = 0; i < num_ch; i++)
event_hdr[i] = cache->event_hdr[cache_begin + i];
- cache->cache_num = cache_num - num_ch;
+ odp_atomic_store_u32(&cache->cache_num, cache_num - num_ch);
return num_ch;
}
@@ -108,13 +113,13 @@ static inline uint32_t cache_pop(pool_cache_t *cache,
static inline void cache_push(pool_cache_t *cache, _odp_event_hdr_t *event_hdr[],
uint32_t num)
{
- uint32_t cache_num = cache->cache_num;
+ uint32_t cache_num = odp_atomic_load_u32(&cache->cache_num);
uint32_t i;
for (i = 0; i < num; i++)
cache->event_hdr[cache_num + i] = event_hdr[i];
- cache->cache_num = cache_num + num;
+ odp_atomic_store_u32(&cache->cache_num, cache_num + num);
}
static void cache_flush(pool_cache_t *cache, pool_t *pool)
@@ -152,8 +157,7 @@ static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
}
for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
- /* TODO: thread specific counters should be atomics */
- uint32_t cur = pool->local_cache[i].cache_num;
+ uint32_t cur = odp_atomic_load_u32(&pool->local_cache[i].cache_num);
if (per_thread && i >= first && i <= last)
stats->thread.cache_available[out_idx++] = cur;
@@ -438,14 +442,26 @@ static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
}
static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t event_index,
- uint32_t hdr_len, uint8_t *data_ptr, void *uarea)
+ uint8_t *data_ptr, void *uarea)
{
+ uint32_t hdr_len;
odp_pool_type_t type = pool->type;
+ if (type == ODP_POOL_BUFFER)
+ hdr_len = sizeof(odp_buffer_hdr_t);
+ else if (type == ODP_POOL_PACKET)
+ hdr_len = sizeof(odp_packet_hdr_t);
+ else if (type == ODP_POOL_VECTOR)
+ hdr_len = sizeof(odp_event_vector_hdr_t);
+ else if (type == ODP_POOL_TIMEOUT)
+ hdr_len = sizeof(odp_timeout_hdr_t);
+ else
+ hdr_len = sizeof(_odp_event_hdr_t);
+
+ /* Zero all event and type specific header fields */
memset(event_hdr, 0, hdr_len);
/* Initialize common event metadata */
- event_hdr->index.u32 = 0;
event_hdr->index.pool = pool->pool_idx;
event_hdr->index.event = event_index;
event_hdr->type = type;
@@ -458,6 +474,12 @@ static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t e
event_hdr->buf_end = data_ptr + pool->seg_len + pool->tailroom;
}
+ if (type == ODP_POOL_BUFFER) {
+ odp_buffer_hdr_t *buf_hdr = (void *)event_hdr;
+
+ buf_hdr->uarea_addr = uarea;
+ }
+
/* Initialize segmentation metadata */
if (type == ODP_POOL_PACKET) {
odp_packet_hdr_t *pkt_hdr = (void *)event_hdr;
@@ -476,8 +498,15 @@ static void init_event_hdr(pool_t *pool, _odp_event_hdr_t *event_hdr, uint32_t e
if (type == ODP_POOL_VECTOR) {
odp_event_vector_hdr_t *vect_hdr = (void *)event_hdr;
- vect_hdr->size = 0;
event_hdr->event_type = ODP_EVENT_PACKET_VECTOR;
+ vect_hdr->uarea_addr = uarea;
+ }
+
+ /* Initialize timeout metadata */
+ if (type == ODP_POOL_TIMEOUT) {
+ odp_timeout_hdr_t *tmo_hdr = (void *)event_hdr;
+
+ tmo_hdr->uarea_addr = uarea;
}
}
@@ -492,7 +521,7 @@ static void init_buffers(pool_t *pool)
void *uarea = NULL;
uint8_t *data = NULL;
uint8_t *data_ptr = NULL;
- uint32_t offset, hdr_len;
+ uint32_t offset;
ring_ptr_t *ring;
uint32_t mask;
odp_pool_type_t type;
@@ -550,16 +579,10 @@ static void init_buffers(pool_t *pool)
while (((uintptr_t)&data[offset]) % pool->align != 0)
offset++;
- hdr_len = (uintptr_t)data - (uintptr_t)event_hdr;
data_ptr = &data[offset];
- } else {
- if (type == ODP_POOL_TIMEOUT)
- hdr_len = sizeof(odp_timeout_hdr_t);
- else
- hdr_len = sizeof(odp_event_vector_hdr_t);
}
- init_event_hdr(pool, event_hdr, i, hdr_len, data_ptr, uarea);
+ init_event_hdr(pool, event_hdr, i, data_ptr, uarea);
/* Store buffer into the global pool */
if (!skip)
@@ -727,6 +750,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
case ODP_POOL_BUFFER:
num = params->buf.num;
seg_len = params->buf.size;
+ uarea_size = params->buf.uarea_size;
cache_size = params->buf.cache_size;
break;
@@ -778,11 +802,13 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
case ODP_POOL_TIMEOUT:
num = params->tmo.num;
+ uarea_size = params->tmo.uarea_size;
cache_size = params->tmo.cache_size;
break;
case ODP_POOL_VECTOR:
num = params->vector.num;
+ uarea_size = params->vector.uarea_size;
cache_size = params->vector.cache_size;
seg_len = params->vector.max_size * sizeof(odp_packet_t);
break;
@@ -969,6 +995,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->buf.uarea_size > capa.buf.max_uarea_size) {
+ ODP_ERR("buf.uarea_size too large %u\n", params->buf.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.buf.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -1036,6 +1067,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->tmo.uarea_size > capa.tmo.max_uarea_size) {
+ ODP_ERR("tmo.uarea_size too large %u\n", params->tmo.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.tmo.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -1067,6 +1103,11 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
+ if (params->vector.uarea_size > capa.vector.max_uarea_size) {
+ ODP_ERR("vector.uarea_size too large %u\n", params->vector.uarea_size);
+ return -1;
+ }
+
if (params->stats.all & ~capa.vector.stats.all) {
ODP_ERR("Unsupported pool statistics counter\n");
return -1;
@@ -1278,7 +1319,7 @@ static inline void event_free_to_pool(pool_t *pool,
/* Make room into local cache if needed. Do at least burst size
* transfer. */
- cache_num = cache->cache_num;
+ cache_num = odp_atomic_load_u32(&cache->cache_num);
if (odp_unlikely((int)(cache_size - cache_num) < num)) {
int burst = pool->burst_size;
@@ -1418,6 +1459,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
+ capa->buf.max_uarea_size = MAX_UAREA_SIZE;
capa->buf.min_cache_size = 0;
capa->buf.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
capa->buf.stats.all = supported_stats.all;
@@ -1441,6 +1483,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Timeout pools */
capa->tmo.max_pools = max_pools;
capa->tmo.max_num = CONFIG_POOL_MAX_NUM;
+ capa->tmo.max_uarea_size = MAX_UAREA_SIZE;
capa->tmo.min_cache_size = 0;
capa->tmo.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
capa->tmo.stats.all = supported_stats.all;
@@ -1449,6 +1492,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->vector.max_pools = max_pools;
capa->vector.max_num = CONFIG_POOL_MAX_NUM;
capa->vector.max_size = CONFIG_PACKET_VECTOR_MAX_SIZE;
+ capa->vector.max_uarea_size = MAX_UAREA_SIZE;
capa->vector.min_cache_size = 0;
capa->vector.max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
capa->vector.stats.all = supported_stats.all;
@@ -1844,6 +1888,9 @@ odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *par
pool->seg_len = buf_size - head_offset - headroom - pool->tailroom;
pool->max_seg_len = headroom + pool->seg_len + pool->tailroom;
pool->max_len = PKT_MAX_SEGS * pool->seg_len;
+ pool->ext_head_offset = head_offset;
+ pool->base_addr = (uint8_t *)(uintptr_t)UINT64_MAX;
+ pool->max_addr = 0;
ring_ptr_init(&pool->ring->hdr);
@@ -1868,8 +1915,7 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
ring_ptr_t *ring;
uint32_t i, ring_mask, buf_index, head_offset;
uint32_t num_populated;
- uint8_t *data_ptr;
- uint32_t hdr_size = sizeof(odp_packet_hdr_t);
+ uint8_t *data_ptr, *min_addr, *max_addr;
void *uarea = NULL;
if (pool_hdl == ODP_POOL_INVALID) {
@@ -1884,6 +1930,9 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
return -1;
}
+ min_addr = pool->base_addr;
+ max_addr = pool->max_addr;
+
if (buf_size != pool->ext_param.pkt.buf_size) {
ODP_ERR("Bad buffer size\n");
return -1;
@@ -1909,11 +1958,17 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
ring = &pool->ring->hdr;
ring_mask = pool->ring_mask;
buf_index = pool->num_populated;
- head_offset = sizeof(odp_packet_hdr_t) + pool->ext_param.pkt.app_header_size;
+ head_offset = pool->ext_head_offset;
for (i = 0; i < num; i++) {
event_hdr = buf[i];
+ if ((uint8_t *)event_hdr < min_addr)
+ min_addr = (uint8_t *)event_hdr;
+
+ if ((uint8_t *)event_hdr > max_addr)
+ max_addr = (uint8_t *)event_hdr;
+
if ((uintptr_t)event_hdr & (ODP_CACHE_LINE_SIZE - 1)) {
ODP_ERR("Bad packet buffer align: buf[%u]\n", i);
return -1;
@@ -1928,7 +1983,7 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
uarea = &pool->uarea_base_addr[buf_index * pool->uarea_size];
data_ptr = (uint8_t *)event_hdr + head_offset + pool->headroom;
- init_event_hdr(pool, event_hdr, buf_index, hdr_size, data_ptr, uarea);
+ init_event_hdr(pool, event_hdr, buf_index, data_ptr, uarea);
pool->ring->event_hdr_by_index[buf_index] = event_hdr;
buf_index++;
@@ -1936,6 +1991,11 @@ int odp_pool_ext_populate(odp_pool_t pool_hdl, void *buf[], uint32_t buf_size, u
}
pool->num_populated += num;
+ pool->base_addr = min_addr;
+ pool->max_addr = max_addr;
+
+ if (flags & ODP_POOL_POPULATE_DONE)
+ pool->max_addr = max_addr + buf_size - 1;
return 0;
}
diff --git a/platform/linux-generic/odp_print.c b/platform/linux-generic/odp_print.c
new file mode 100644
index 000000000..30a06c2f4
--- /dev/null
+++ b/platform/linux-generic/odp_print.c
@@ -0,0 +1,47 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/hints.h>
+#include <odp_print_internal.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+/* Helps with snprintf() return value checking
+ *
+ * Otherwise like snprintf(), but returns always the number of characters
+ * printed (without the end mark) or zero on error. Terminates the string
+ * always with the end mark. */
+ODP_PRINTF_FORMAT(3, 4)
+int _odp_snprint(char *str, size_t size, const char *format, ...)
+{
+ va_list args;
+ int len;
+
+ /* No space to print new characters */
+ if (size < 1)
+ return 0;
+
+ if (size < 2) {
+ str[0] = 0;
+ return 0;
+ }
+
+ va_start(args, format);
+ len = vsnprintf(str, size, format, args);
+ va_end(args);
+
+ /* Error. Ensure that string has the end mark */
+ if (len < 0) {
+ str[0] = 0;
+ return 0;
+ }
+
+ /* Print would have been longer. Return the number of characters printed. */
+ if (len >= (int)size)
+ return (int)size - 1;
+
+ return len;
+}
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 81065a8d5..112c392ec 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -37,6 +37,7 @@
#include <odp_global_data.h>
#include <odp_event_internal.h>
#include <odp_macros_internal.h>
+#include <odp_print_internal.h>
#include <string.h>
@@ -52,6 +53,14 @@
/* Spread balancing frequency. Balance every BALANCE_ROUNDS_M1 + 1 scheduling rounds. */
#define BALANCE_ROUNDS_M1 0xfffff
+/* Number of scheduled queue synchronization types */
+#define NUM_SCHED_SYNC 3
+
+/* Queue types used as array indices */
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_PARALLEL == 0, "ODP_SCHED_SYNC_PARALLEL_value_changed");
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_ATOMIC == 1, "ODP_SCHED_SYNC_ATOMIC_value_changed");
+ODP_STATIC_ASSERT(ODP_SCHED_SYNC_ORDERED == 2, "ODP_SCHED_SYNC_ORDERED_value_changed");
+
/* Load of a queue */
#define QUEUE_LOAD 256
@@ -223,8 +232,8 @@ typedef struct ODP_ALIGNED_CACHE {
typedef struct {
struct {
- uint8_t burst_default[NUM_PRIO];
- uint8_t burst_max[NUM_PRIO];
+ uint8_t burst_default[NUM_SCHED_SYNC][NUM_PRIO];
+ uint8_t burst_max[NUM_SCHED_SYNC][NUM_PRIO];
uint8_t num_spread;
uint8_t prefer_ratio;
} config;
@@ -300,11 +309,46 @@ static sched_global_t *sched;
/* Thread local scheduler context */
static __thread sched_local_t sched_local;
+static int read_burst_size_conf(uint8_t out_tbl[], const char *conf_str,
+ int min_val, int max_val, int print)
+{
+ int burst_val[NUM_PRIO];
+ const int max_len = 256;
+ const int n = max_len - 1;
+ char line[max_len];
+ int len = 0;
+
+ if (_odp_libconfig_lookup_array(conf_str, burst_val, NUM_PRIO) !=
+ NUM_PRIO) {
+ ODP_ERR("Config option '%s' not found.\n", conf_str);
+ return -1;
+ }
+
+ char str[strlen(conf_str) + 4];
+
+ snprintf(str, sizeof(str), "%s[]:", conf_str);
+ len += snprintf(&line[len], n - len, " %-38s", str);
+
+ for (int i = 0; i < NUM_PRIO; i++) {
+ int val = burst_val[i];
+
+ if (val > max_val || val < min_val) {
+ ODP_ERR("Bad value for %s: %i\n", conf_str, val);
+ return -1;
+ }
+ len += snprintf(&line[len], n - len, " %3i", val);
+ if (val > 0)
+ out_tbl[i] = val;
+ }
+ if (print)
+ ODP_PRINT("%s\n", line);
+
+ return 0;
+}
+
static int read_config_file(sched_global_t *sched)
{
const char *str;
- int i;
- int burst_val[NUM_PRIO];
int val = 0;
ODP_PRINT("Scheduler config:\n");
@@ -355,46 +399,48 @@ static int read_config_file(sched_global_t *sched)
if (val == 0 || sched->config.num_spread == 1)
sched->load_balance = 0;
+ /* Initialize default values for all queue types */
str = "sched_basic.burst_size_default";
- if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
- NUM_PRIO) {
- ODP_ERR("Config option '%s' not found.\n", str);
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC], str, 1,
+ STASH_SIZE, 1) ||
+ read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL], str, 1,
+ STASH_SIZE, 0) ||
+ read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ORDERED], str, 1,
+ STASH_SIZE, 0))
return -1;
- }
- ODP_PRINT(" %s[] =", str);
- for (i = 0; i < NUM_PRIO; i++) {
- val = burst_val[i];
- sched->config.burst_default[i] = val;
- ODP_PRINT(" %3i", val);
+ str = "sched_basic.burst_size_max";
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC], str, 1,
+ BURST_MAX, 1) ||
+ read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL], str, 1,
+ BURST_MAX, 0) ||
+ read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ORDERED], str, 1,
+ BURST_MAX, 0))
+ return -1;
- if (val > STASH_SIZE || val < 1) {
- ODP_ERR("Bad value %i\n", val);
- return -1;
- }
- }
- ODP_PRINT("\n");
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC],
+ "sched_basic.burst_size_atomic", 0, STASH_SIZE, 1))
+ return -1;
- str = "sched_basic.burst_size_max";
- if (_odp_libconfig_lookup_array(str, burst_val, NUM_PRIO) !=
- NUM_PRIO) {
- ODP_ERR("Config option '%s' not found.\n", str);
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC],
+ "sched_basic.burst_size_max_atomic", 0, BURST_MAX, 1))
+ return -1;
+
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL],
+ "sched_basic.burst_size_parallel", 0, STASH_SIZE, 1))
return -1;
- }
- ODP_PRINT(" %s[] = ", str);
- for (i = 0; i < NUM_PRIO; i++) {
- val = burst_val[i];
- sched->config.burst_max[i] = val;
- ODP_PRINT(" %3i", val);
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL],
+ "sched_basic.burst_size_max_parallel", 0, BURST_MAX, 1))
+ return -1;
- if (val > BURST_MAX || val < 1) {
- ODP_ERR("Bad value %i\n", val);
- return -1;
- }
- }
+ if (read_burst_size_conf(sched->config.burst_default[ODP_SCHED_SYNC_ORDERED],
+ "sched_basic.burst_size_ordered", 0, STASH_SIZE, 1))
+ return -1;
- ODP_PRINT("\n");
+ if (read_burst_size_conf(sched->config.burst_max[ODP_SCHED_SYNC_ORDERED],
+ "sched_basic.burst_size_max_ordered", 0, BURST_MAX, 1))
+ return -1;
str = "sched_basic.group_enable.all";
if (!_odp_libconfig_lookup_int(str, &val)) {
@@ -1245,7 +1291,14 @@ static inline int schedule_grp_prio(odp_queue_t *out_queue, odp_event_t out_ev[]
uint32_t qi;
int num_spread = sched->config.num_spread;
uint32_t ring_mask = sched->ring_mask;
- uint16_t burst_def = sched->config.burst_default[prio];
+ const uint32_t burst_def_sync[NUM_SCHED_SYNC] = {
+ sched->config.burst_default[ODP_SCHED_SYNC_PARALLEL][prio],
+ sched->config.burst_default[ODP_SCHED_SYNC_ATOMIC][prio],
+ sched->config.burst_default[ODP_SCHED_SYNC_ORDERED][prio]};
+ const uint32_t burst_max_sync[NUM_SCHED_SYNC] = {
+ sched->config.burst_max[ODP_SCHED_SYNC_PARALLEL][prio],
+ sched->config.burst_max[ODP_SCHED_SYNC_ATOMIC][prio],
+ sched->config.burst_max[ODP_SCHED_SYNC_ORDERED][prio]};
/* Select the first spread based on weights */
spr = first_spr;
@@ -1256,7 +1309,7 @@ static inline int schedule_grp_prio(odp_queue_t *out_queue, odp_event_t out_ev[]
odp_queue_t handle;
ring_u32_t *ring;
int pktin;
- uint16_t max_deq = burst_def;
+ uint32_t max_deq;
int stashed = 1;
odp_event_t *ev_tbl = sched_local.stash.ev;
@@ -1282,16 +1335,16 @@ static inline int schedule_grp_prio(odp_queue_t *out_queue, odp_event_t out_ev[]
sync_ctx = sched_sync_type(qi);
ordered = (sync_ctx == ODP_SCHED_SYNC_ORDERED);
+ max_deq = burst_def_sync[sync_ctx];
/* When application's array is larger than default burst
* size, output all events directly there. Also, ordered
* queues are not stashed locally to improve
* parallelism. Ordered context can only be released
* when the local cache is empty. */
- if (max_num > burst_def || ordered) {
- uint16_t burst_max;
+ if (max_num > max_deq || ordered) {
+ const uint32_t burst_max = burst_max_sync[sync_ctx];
- burst_max = sched->config.burst_max[prio];
stashed = 0;
ev_tbl = out_ev;
max_deq = max_num;
@@ -2035,12 +2088,14 @@ static int schedule_capability(odp_schedule_capability_t *capa)
static void schedule_print(void)
{
- int spr, prio, grp;
+ int spr, prio, grp, pos;
uint32_t num_queues, num_active;
ring_u32_t *ring;
odp_schedule_capability_t capa;
int num_spread = sched->config.num_spread;
const int col_width = 24;
+ const int size = 512;
+ char str[size];
(void)schedule_capability(&capa);
@@ -2053,42 +2108,42 @@ static void schedule_print(void)
ODP_PRINT(" prefer ratio: %u\n", sched->config.prefer_ratio);
ODP_PRINT("\n");
- ODP_PRINT(" Number of active event queues:\n");
- ODP_PRINT(" spread\n");
- ODP_PRINT(" ");
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " Number of active event queues:\n");
+ pos += _odp_snprint(&str[pos], size - pos, " spread\n");
+ pos += _odp_snprint(&str[pos], size - pos, " ");
for (spr = 0; spr < num_spread; spr++)
- ODP_PRINT(" %7i", spr);
+ pos += _odp_snprint(&str[pos], size - pos, " %7i", spr);
- ODP_PRINT("\n");
+ ODP_PRINT("%s\n", str);
for (prio = 0; prio < NUM_PRIO; prio++) {
- ODP_PRINT(" prio %i", prio);
-
for (grp = 0; grp < NUM_SCHED_GRPS; grp++)
if (sched->prio_q_mask[grp][prio])
break;
- if (grp == NUM_SCHED_GRPS) {
- ODP_PRINT(":-\n");
+ if (grp == NUM_SCHED_GRPS)
continue;
- }
- ODP_PRINT("\n");
+ ODP_PRINT(" prio: %i\n", prio);
for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
if (sched->sched_grp[grp].allocated == 0)
continue;
- ODP_PRINT(" group %i:", grp);
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " group %i:", grp);
for (spr = 0; spr < num_spread; spr++) {
num_queues = sched->prio_q_count[grp][prio][spr];
ring = &sched->prio_q[grp][prio][spr].ring;
num_active = ring_u32_len(ring);
- ODP_PRINT(" %3u/%3u", num_active, num_queues);
+ pos += _odp_snprint(&str[pos], size - pos, " %3u/%3u",
+ num_active, num_queues);
}
- ODP_PRINT("\n");
+
+ ODP_PRINT("%s\n", str);
}
}
@@ -2099,12 +2154,15 @@ static void schedule_print(void)
if (sched->sched_grp[grp].allocated == 0)
continue;
- ODP_PRINT(" group %i: %-*s", grp, col_width, sched->sched_grp[grp].name);
+ pos = 0;
+ pos += _odp_snprint(&str[pos], size - pos, " group %i: %-*s", grp, col_width,
+ sched->sched_grp[grp].name);
for (spr = 0; spr < num_spread; spr++)
- ODP_PRINT(" %u", sched->sched_grp[grp].spread_thrs[spr]);
+ pos += _odp_snprint(&str[pos], size - pos, " %u",
+ sched->sched_grp[grp].spread_thrs[spr]);
- ODP_PRINT("\n");
+ ODP_PRINT("%s\n", str);
}
ODP_PRINT("\n");
diff --git a/platform/linux-generic/odp_stash.c b/platform/linux-generic/odp_stash.c
index 1bbbc8d8b..8fe9c1096 100644
--- a/platform/linux-generic/odp_stash.c
+++ b/platform/linux-generic/odp_stash.c
@@ -1,12 +1,14 @@
-/* Copyright (c) 2020-2021, Nokia
+/* Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/ticketlock.h>
#include <odp/api/shared_memory.h>
#include <odp/api/stash.h>
+#include <odp/api/std_types.h>
+#include <odp/api/ticketlock.h>
+
#include <odp/api/plat/strong_types.h>
#include <odp_config_internal.h>
@@ -118,6 +120,8 @@ int odp_stash_capability(odp_stash_capability_t *capa, odp_stash_type_t type)
capa->max_stashes = CONFIG_MAX_STASHES - CONFIG_INTERNAL_STASHES;
capa->max_num_obj = MAX_RING_SIZE;
capa->max_obj_size = sizeof(uint64_t);
+ capa->max_get_batch = MIN_RING_SIZE;
+ capa->max_put_batch = MIN_RING_SIZE;
capa->stats.bit.count = 1;
return 0;
@@ -311,7 +315,7 @@ odp_stash_t odp_stash_lookup(const char *name)
return ODP_STASH_INVALID;
}
-int32_t odp_stash_put(odp_stash_t st, const void *obj, int32_t num)
+static inline int32_t stash_put(odp_stash_t st, const void *obj, int32_t num)
{
stash_t *stash;
uint32_t obj_size;
@@ -367,7 +371,19 @@ int32_t odp_stash_put(odp_stash_t st, const void *obj, int32_t num)
return -1;
}
-int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
+int32_t odp_stash_put(odp_stash_t st, const void *obj, int32_t num)
+{
+ return stash_put(st, obj, num);
+}
+
+int32_t odp_stash_put_batch(odp_stash_t st, const void *obj, int32_t num)
+{
+ /* Returns always 'num', or -1 on failure. */
+ return stash_put(st, obj, num);
+}
+
+static inline int32_t stash_put_u32(odp_stash_t st, const uint32_t val[],
+ int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -381,7 +397,20 @@ int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
return num;
}
-int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
+int32_t odp_stash_put_u32(odp_stash_t st, const uint32_t val[], int32_t num)
+{
+ return stash_put_u32(st, val, num);
+}
+
+int32_t odp_stash_put_u32_batch(odp_stash_t st, const uint32_t val[],
+ int32_t num)
+{
+ /* Returns always 'num', or -1 on failure. */
+ return stash_put_u32(st, val, num);
+}
+
+static inline int32_t stash_put_u64(odp_stash_t st, const uint64_t val[],
+ int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -395,7 +424,20 @@ int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
return num;
}
-int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
+int32_t odp_stash_put_u64(odp_stash_t st, const uint64_t val[], int32_t num)
+{
+ return stash_put_u64(st, val, num);
+}
+
+int32_t odp_stash_put_u64_batch(odp_stash_t st, const uint64_t val[],
+ int32_t num)
+{
+ /* Returns always 'num', or -1 on failure. */
+ return stash_put_u64(st, val, num);
+}
+
+static inline int32_t stash_put_ptr(odp_stash_t st, const uintptr_t ptr[],
+ int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -416,7 +458,19 @@ int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
return num;
}
-int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
+int32_t odp_stash_put_ptr(odp_stash_t st, const uintptr_t ptr[], int32_t num)
+{
+ return stash_put_ptr(st, ptr, num);
+}
+
+int32_t odp_stash_put_ptr_batch(odp_stash_t st, const uintptr_t ptr[],
+ int32_t num)
+{
+ /* Returns always 'num', or -1 on failure. */
+ return stash_put_ptr(st, ptr, num);
+}
+
+static inline int32_t stash_get(odp_stash_t st, void *obj, int32_t num, odp_bool_t batch)
{
stash_t *stash;
uint32_t obj_size;
@@ -432,13 +486,19 @@ int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
if (obj_size == sizeof(uint64_t)) {
ring_u64_t *ring_u64 = &stash->ring_u64.hdr;
- return ring_u64_deq_multi(ring_u64, stash->ring_mask, obj, num);
+ if (batch)
+ return ring_u64_deq_batch(ring_u64, stash->ring_mask, obj, num);
+ else
+ return ring_u64_deq_multi(ring_u64, stash->ring_mask, obj, num);
}
if (obj_size == sizeof(uint32_t)) {
ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
- return ring_u32_deq_multi(ring_u32, stash->ring_mask, obj, num);
+ if (batch)
+ return ring_u32_deq_batch(ring_u32, stash->ring_mask, obj, num);
+ else
+ return ring_u32_deq_multi(ring_u32, stash->ring_mask, obj, num);
}
if (obj_size == sizeof(uint16_t)) {
@@ -446,8 +506,10 @@ int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
- num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask,
- u32, num);
+ if (batch)
+ num_deq = ring_u32_deq_batch(ring_u32, stash->ring_mask, u32, num);
+ else
+ num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask, u32, num);
for (i = 0; i < num_deq; i++)
u16_ptr[i] = u32[i];
@@ -460,8 +522,10 @@ int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
ring_u32_t *ring_u32 = &stash->ring_u32.hdr;
uint32_t u32[num];
- num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask,
- u32, num);
+ if (batch)
+ num_deq = ring_u32_deq_batch(ring_u32, stash->ring_mask, u32, num);
+ else
+ num_deq = ring_u32_deq_multi(ring_u32, stash->ring_mask, u32, num);
for (i = 0; i < num_deq; i++)
u8_ptr[i] = u32[i];
@@ -472,6 +536,16 @@ int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
return -1;
}
+int32_t odp_stash_get(odp_stash_t st, void *obj, int32_t num)
+{
+ return stash_get(st, obj, num, 0);
+}
+
+int32_t odp_stash_get_batch(odp_stash_t st, void *obj, int32_t num)
+{
+ return stash_get(st, obj, num, 1);
+}
+
int32_t odp_stash_get_u32(odp_stash_t st, uint32_t val[], int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -485,6 +559,18 @@ int32_t odp_stash_get_u32(odp_stash_t st, uint32_t val[], int32_t num)
num);
}
+int32_t odp_stash_get_u32_batch(odp_stash_t st, uint32_t val[], int32_t num)
+{
+ stash_t *stash = (stash_t *)(uintptr_t)st;
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ ODP_ASSERT(stash->obj_size == sizeof(uint32_t));
+
+ return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask, val, num);
+}
+
int32_t odp_stash_get_u64(odp_stash_t st, uint64_t val[], int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -498,6 +584,18 @@ int32_t odp_stash_get_u64(odp_stash_t st, uint64_t val[], int32_t num)
num);
}
+int32_t odp_stash_get_u64_batch(odp_stash_t st, uint64_t val[], int32_t num)
+{
+ stash_t *stash = (stash_t *)(uintptr_t)st;
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ ODP_ASSERT(stash->obj_size == sizeof(uint64_t));
+
+ return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask, val, num);
+}
+
int32_t odp_stash_get_ptr(odp_stash_t st, uintptr_t ptr[], int32_t num)
{
stash_t *stash = (stash_t *)(uintptr_t)st;
@@ -518,6 +616,24 @@ int32_t odp_stash_get_ptr(odp_stash_t st, uintptr_t ptr[], int32_t num)
return -1;
}
+int32_t odp_stash_get_ptr_batch(odp_stash_t st, uintptr_t ptr[], int32_t num)
+{
+ stash_t *stash = (stash_t *)(uintptr_t)st;
+
+ if (odp_unlikely(st == ODP_STASH_INVALID))
+ return -1;
+
+ ODP_ASSERT(stash->obj_size == sizeof(uintptr_t));
+
+ if (sizeof(uintptr_t) == sizeof(uint32_t))
+ return ring_u32_deq_batch(&stash->ring_u32.hdr, stash->ring_mask,
+ (uint32_t *)(uintptr_t)ptr, num);
+ else if (sizeof(uintptr_t) == sizeof(uint64_t))
+ return ring_u64_deq_batch(&stash->ring_u64.hdr, stash->ring_mask,
+ (uint64_t *)(uintptr_t)ptr, num);
+ return -1;
+}
+
int odp_stash_flush_cache(odp_stash_t st)
{
if (odp_unlikely(st == ODP_STASH_INVALID))
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index c122df537..c0ca12ae0 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -112,29 +112,37 @@ const _odp_timeout_inline_offset_t
_odp_timeout_inline_offset ODP_ALIGNED_CACHE = {
.expiration = offsetof(odp_timeout_hdr_t, expiration),
.timer = offsetof(odp_timeout_hdr_t, timer),
- .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr)
+ .user_ptr = offsetof(odp_timeout_hdr_t, user_ptr),
+ .uarea_addr = offsetof(odp_timeout_hdr_t, uarea_addr),
};
#include <odp/visibility_end.h>
-typedef struct
+typedef union
#if USE_128BIT_ATOMICS
ODP_ALIGNED(16) /* 16-byte atomic operations need properly aligned addresses */
#endif
tick_buf_s {
- /* Expiration tick or TMO_xxx */
- odp_atomic_u64_t exp_tck;
- union {
- /* ODP_EVENT_INVALID if timer not active */
- odp_event_t tmo_event;
-
- /* Ensures that tick_buf_t is 128 bits */
- uint64_t tmo_u64;
- };
+#if USE_128BIT_ATOMICS
+ odp_atomic_u128_t tb_atomic_u128;
+
+ odp_u128_t tb_u128;
+#endif
+
+ struct {
+ /* Expiration tick or TMO_xxx */
+ odp_atomic_u64_t exp_tck;
+ union {
+ /* ODP_EVENT_INVALID if timer not active */
+ odp_event_t tmo_event;
+ /* Ensures that tick_buf_t is 128 bits */
+ uint64_t tmo_u64;
+ };
+ };
} tick_buf_t;
-#ifndef ODP_ATOMIC_U64_LOCK
+#if USE_128BIT_ATOMICS
ODP_STATIC_ASSERT(sizeof(tick_buf_t) == 16, "sizeof(tick_buf_t) == 16");
#endif
@@ -683,11 +691,11 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_event_t *tmo_event,
new.tmo_u64 = 0;
old.tmo_u64 = 0;
- do {
- /* Relaxed and non-atomic read of current values */
- old.exp_tck.v = tb->exp_tck.v;
- old.tmo_event = tb->tmo_event;
+ /* Relaxed and non-atomic read of current values */
+ old.exp_tck.v = tb->exp_tck.v;
+ old.tmo_event = tb->tmo_event;
+ do {
/* Check if there actually is a timeout event
* present */
if (old.tmo_event == ODP_EVENT_INVALID) {
@@ -702,9 +710,8 @@ static bool timer_reset(uint32_t idx, uint64_t abs_tck, odp_event_t *tmo_event,
/* Atomic CAS will fail if we experienced torn reads,
* retry update sequence until CAS succeeds */
- } while (!_odp_atomic_u128_cmp_xchg_mm((_odp_atomic_u128_t *)tb,
- (_odp_u128_t *)&old, (_odp_u128_t *)&new,
- _ODP_MEMMODEL_RLS, _ODP_MEMMODEL_RLX));
+ } while (!odp_atomic_cas_rel_u128(&tb->tb_atomic_u128,
+ &old.tb_u128, new.tb_u128));
#else
/* Take a related lock */
while (_odp_atomic_flag_tas(IDX2LOCK(tp, idx)))
@@ -830,11 +837,11 @@ static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx)
new.tmo_u64 = 0;
old.tmo_u64 = 0;
- do {
- /* Relaxed and non-atomic read of current values */
- old.exp_tck.v = tb->exp_tck.v;
- old.tmo_event = tb->tmo_event;
+ /* Relaxed and non-atomic read of current values */
+ old.exp_tck.v = tb->exp_tck.v;
+ old.tmo_event = tb->tmo_event;
+ do {
/* Check if it is not expired already */
if (old.exp_tck.v & TMO_INACTIVE) {
old.tmo_event = ODP_EVENT_INVALID;
@@ -847,11 +854,9 @@ static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx)
/* Atomic CAS will fail if we experienced torn reads,
* retry update sequence until CAS succeeds */
- } while (!_odp_atomic_u128_cmp_xchg_mm((_odp_atomic_u128_t *)tb,
- (_odp_u128_t *)&old,
- (_odp_u128_t *)&new,
- _ODP_MEMMODEL_RLS,
- _ODP_MEMMODEL_RLX));
+ } while (!odp_atomic_cas_rel_u128(&tb->tb_atomic_u128, &old.tb_u128,
+ new.tb_u128));
+
old_event = old.tmo_event;
#else
/* Take a related lock */
@@ -905,9 +910,8 @@ static inline void timer_expire(timer_pool_t *tp, uint32_t idx, uint64_t tick)
new.exp_tck.v = exp_tck | TMO_INACTIVE;
new.tmo_event = ODP_EVENT_INVALID;
- int succ = _odp_atomic_u128_cmp_xchg_mm((_odp_atomic_u128_t *)tb,
- (_odp_u128_t *)&old, (_odp_u128_t *)&new,
- _ODP_MEMMODEL_RLS, _ODP_MEMMODEL_RLX);
+ int succ = odp_atomic_cas_rel_u128(&tb->tb_atomic_u128,
+ &old.tb_u128, new.tb_u128);
if (succ)
tmo_event = old.tmo_event;
/* Else CAS failed, something changed => skip timer
diff --git a/platform/linux-generic/test/inline-timer.conf b/platform/linux-generic/test/inline-timer.conf
index c7379e38a..261aa0141 100644
--- a/platform/linux-generic/test/inline-timer.conf
+++ b/platform/linux-generic/test/inline-timer.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.21"
+config_file_version = "0.1.22"
timer: {
# Enable inline timer implementation
diff --git a/platform/linux-generic/test/packet_align.conf b/platform/linux-generic/test/packet_align.conf
index 433899017..8d2d00e63 100644
--- a/platform/linux-generic/test/packet_align.conf
+++ b/platform/linux-generic/test/packet_align.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.21"
+config_file_version = "0.1.22"
pool: {
pkt: {
diff --git a/platform/linux-generic/test/process-mode.conf b/platform/linux-generic/test/process-mode.conf
index a36c9fc3d..1e0e7cc95 100644
--- a/platform/linux-generic/test/process-mode.conf
+++ b/platform/linux-generic/test/process-mode.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.21"
+config_file_version = "0.1.22"
# Shared memory options
shm: {
diff --git a/platform/linux-generic/test/sched-basic.conf b/platform/linux-generic/test/sched-basic.conf
index 16654595b..e63ffa2f3 100644
--- a/platform/linux-generic/test/sched-basic.conf
+++ b/platform/linux-generic/test/sched-basic.conf
@@ -1,6 +1,6 @@
# Mandatory fields
odp_implementation = "linux-generic"
-config_file_version = "0.1.21"
+config_file_version = "0.1.22"
# Test scheduler with an odd spread value and without dynamic load balance
sched_basic: {
diff --git a/platform/linux-generic/test/validation/api/shmem/Makefile.am b/platform/linux-generic/test/validation/api/shmem/Makefile.am
index 07d311d2f..309eceb92 100644
--- a/platform/linux-generic/test/validation/api/shmem/Makefile.am
+++ b/platform/linux-generic/test/validation/api/shmem/Makefile.am
@@ -5,8 +5,7 @@ test_PROGRAMS = shmem_linux shmem_odp1 shmem_odp2
#shmem_linux is stand alone, pure linux (no ODP):
shmem_linux_SOURCES = shmem_linux.c shmem_linux.h shmem_common.h
-shmem_linux_CFLAGS = $(AM_CFLAGS) -I$(top_builddir)/include
-shmem_linux_LDFLAGS = $(AM_LDFLAGS) -lrt
+shmem_linux_LDFLAGS =
shmem_linux_LDADD =
#shmem_odp1 and shmem_odp2 are the 2 ODP processes:
diff --git a/scripts/ci/build.sh b/scripts/ci/build.sh
index d9f50225b..ce132faeb 100755
--- a/scripts/ci/build.sh
+++ b/scripts/ci/build.sh
@@ -28,17 +28,16 @@ ODP_LIB_NAME=libodp-dpdk
fi
# Additional warning checks
-EXTRA_CHECKS="-Werror -Wall -Wextra -Wconversion -Wfloat-equal -Wpacked"
+EXTRA_CHECKS="-Werror -Wall -Wextra -Wfloat-equal -Wpacked"
# Ignore clang warning about large atomic operations causing significant performance penalty
if [ "${CC#clang}" != "${CC}" ] ; then
EXTRA_CHECKS="${EXTRA_CHECKS} -Wno-unknown-warning-option -Wno-atomic-alignment"
fi
# Ignore warnings from aarch64 DPDK internals
if [ "${TARGET_ARCH}" == "aarch64-linux-gnu" ] ; then
- EXTRA_CHECKS="${EXTRA_CHECKS} -Wno-conversion -Wno-packed"
+ EXTRA_CHECKS="${EXTRA_CHECKS} -Wno-packed"
fi
-CC="${CC:-${TARGET_ARCH}-gcc}"
${CC} ${CFLAGS} ${EXTRA_CHECKS} ${OLDPWD}/example/sysinfo/odp_sysinfo.c -o odp_sysinfo_inst_dynamic \
`PKG_CONFIG_PATH=/opt/odp/lib/pkgconfig:${PKG_CONFIG_PATH} ${PKG_CONFIG} --cflags --libs ${ODP_LIB_NAME}`
diff --git a/scripts/ci/build_armhf.sh b/scripts/ci/build_armhf.sh
index 36f54b047..4ae0f19b1 100755
--- a/scripts/ci/build_armhf.sh
+++ b/scripts/ci/build_armhf.sh
@@ -9,9 +9,12 @@ else
export CC="${TARGET_ARCH}-gcc"
export CXX="${TARGET_ARCH}-g++"
fi
-export CFLAGS="-march=armv7-a -mfpu=neon"
+export CFLAGS="-march=armv7-a"
export CXXFLAGS="-march=armv7-a"
+# No DPDK on ARMv7
+export CONF="${CONF} --disable-dpdk"
+
# Use target libraries
export PKG_CONFIG_PATH=
export PKG_CONFIG_LIBDIR=/usr/lib/${TARGET_ARCH}/pkgconfig
diff --git a/scripts/ci/build_static_x86_64.sh b/scripts/ci/build_static_x86_64.sh
new file mode 100755
index 000000000..f32873afc
--- /dev/null
+++ b/scripts/ci/build_static_x86_64.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+set -e
+
+CONFIG_OPT="--prefix=/opt/odp ${CONF}"
+
+cd "$(dirname "$0")"/../..
+./bootstrap
+echo "./configure $CONFIG_OPT"
+./configure $CONFIG_OPT
+
+make clean
+
+make -j $(nproc)
+
+make install
+
+# Build and run sysinfo with installed libs
+pushd ${HOME}
+
+# Default ODP library name
+if [ -z "$ODP_LIB_NAME" ] ; then
+ODP_LIB_NAME=libodp-dpdk
+fi
+
+${CC} ${CFLAGS} ${OLDPWD}/example/sysinfo/odp_sysinfo.c -static -o odp_sysinfo_inst_static `PKG_CONFIG_PATH=/opt/odp/lib/pkgconfig:${PKG_CONFIG_PATH} pkg-config --cflags --libs --static ${ODP_LIB_NAME}`
+
+echo 1500 | tee /proc/sys/vm/nr_hugepages
+mkdir -p /mnt/huge
+mount -t hugetlbfs nodev /mnt/huge
+
+./odp_sysinfo_inst_static
+
+umount /mnt/huge
+
+popd
diff --git a/scripts/ci/build_x86_64.sh b/scripts/ci/build_x86_64.sh
index 115c0bf26..cf94b88e5 100755
--- a/scripts/ci/build_x86_64.sh
+++ b/scripts/ci/build_x86_64.sh
@@ -1,7 +1,6 @@
#!/bin/bash
set -e
-export TARGET_ARCH=x86_64-linux-gnu
if [ "${CC#clang}" != "${CC}" ] ; then
export CXX="clang++"
sed -i 's/ODP_CHECK_CFLAG(\[\-Wcast-align\])/#ODP_CHECK_CFLAG(\[\-Wcast-align\])/g' /odp/configure.ac
diff --git a/scripts/ci/doxygen.sh b/scripts/ci/doxygen.sh
deleted file mode 100755
index e8972b00b..000000000
--- a/scripts/ci/doxygen.sh
+++ /dev/null
@@ -1,17 +0,0 @@
-#!/bin/bash
-set -e
-
-export TARGET_ARCH=x86_64-linux-gnu
-if [ "${CC#clang}" != "${CC}" ] ; then
- export CXX="clang++"
-fi
-
-exec "$(dirname "$0")"/build.sh
-make doxygen-doc 2>&1 |tee doxygen.log
-fgrep -rq warning ./doxygen.log
-if [ $? -eq 0 ]; then
- exit -1
-else
- exit 0
-fi
-
diff --git a/test/Makefile.inc b/test/Makefile.inc
index 525a196a5..f5235f471 100644
--- a/test/Makefile.inc
+++ b/test/Makefile.inc
@@ -23,12 +23,10 @@ AM_CPPFLAGS = \
$(HELPER_INCLUDES) \
-I$(top_srcdir)/test/common
-AM_CFLAGS = $(CUNIT_CFLAGS)
+AM_CFLAGS += $(CUNIT_CFLAGS)
if STATIC_APPS
-AM_LDFLAGS = -L$(LIB) -static
-else
-AM_LDFLAGS =
+AM_LDFLAGS += -static
endif
AM_LDFLAGS += $(PLAT_DEP_LIBS)
diff --git a/test/common/odp_cunit_common.c b/test/common/odp_cunit_common.c
index 501e5fc98..5ce7fd791 100644
--- a/test/common/odp_cunit_common.c
+++ b/test/common/odp_cunit_common.c
@@ -182,7 +182,7 @@ static int run_thread(void *arg)
return rc;
}
-int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], int priv)
+int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], int priv, int sync)
{
int i, ret;
odp_cpumask_t cpumask;
@@ -228,6 +228,7 @@ int odp_cunit_thread_create(int num, int func_ptr(void *), void *const arg[], in
thr_common.instance = instance;
thr_common.cpumask = &cpumask;
thr_common.share_param = !priv;
+ thr_common.sync = sync;
/* Create and start additional threads */
ret = odph_thread_create(thread_tbl, &thr_common, thr_param, num);
diff --git a/test/common/odp_cunit_common.h b/test/common/odp_cunit_common.h
index 5959163d3..242747f0c 100644
--- a/test/common/odp_cunit_common.h
+++ b/test/common/odp_cunit_common.h
@@ -78,9 +78,11 @@ int odp_cunit_run(void);
*
* Thread arguments table (arg[]) can be set to NULL, when there are no arguments.
* When 'priv' is 0, the same argument pointer (arg[0]) is passed to all threads. Otherwise,
- * a pointer is passed (from arg[]) to each thread. Returns 0 on success.
+ * a pointer is passed (from arg[]) to each thread. When 'sync' is 1, thread
+ * creation is synchronized (odph_thread_common_param_t.sync). Returns 0 on success.
*/
-int odp_cunit_thread_create(int num, int func_ptr(void *arg), void *const arg[], int priv);
+int odp_cunit_thread_create(int num, int func_ptr(void *arg), void *const arg[],
+ int priv, int sync);
/* Wait for previously created threads to exit */
int odp_cunit_thread_join(int num);
diff --git a/test/miscellaneous/.gitignore b/test/miscellaneous/.gitignore
index 6e555c58e..6069e336d 100644
--- a/test/miscellaneous/.gitignore
+++ b/test/miscellaneous/.gitignore
@@ -1,3 +1,4 @@
odp_api_from_cpp
+odp_api_headers
*.trs
*.log
diff --git a/test/miscellaneous/Makefile.am b/test/miscellaneous/Makefile.am
index 3ea5d5e84..e8bcf48de 100644
--- a/test/miscellaneous/Makefile.am
+++ b/test/miscellaneous/Makefile.am
@@ -6,3 +6,10 @@ TESTS = odp_api_from_cpp
endif
odp_api_from_cpp_SOURCES = odp_api_from_cpp.cpp
+
+noinst_PROGRAMS = odp_api_headers
+odp_api_headers_CFLAGS = $(AM_CFLAGS)
+if ODP_ABI_COMPAT
+odp_api_headers_CFLAGS += -Wconversion
+endif
+odp_api_headers_SOURCES = odp_api_headers.c
diff --git a/test/miscellaneous/odp_api_headers.c b/test/miscellaneous/odp_api_headers.c
new file mode 100644
index 000000000..f81a9c58b
--- /dev/null
+++ b/test/miscellaneous/odp_api_headers.c
@@ -0,0 +1,14 @@
+/* Copyright (c) 2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+int main(void)
+{
+ return 0;
+}
diff --git a/test/performance/.gitignore b/test/performance/.gitignore
index 6a891a146..22abea846 100644
--- a/test/performance/.gitignore
+++ b/test/performance/.gitignore
@@ -8,6 +8,7 @@ odp_crc
odp_crypto
odp_dma_perf
odp_ipsec
+odp_ipsecfwd
odp_l2fwd
odp_lock_perf
odp_mem_perf
diff --git a/test/performance/Makefile.am b/test/performance/Makefile.am
index 1323bca4a..c22980e11 100644
--- a/test/performance/Makefile.am
+++ b/test/performance/Makefile.am
@@ -16,6 +16,7 @@ COMPILE_ONLY = odp_cpu_bench \
odp_crypto \
odp_dma_perf \
odp_ipsec \
+ odp_ipsecfwd \
odp_l2fwd \
odp_packet_gen \
odp_pktio_ordered \
@@ -56,6 +57,7 @@ odp_crc_SOURCES = odp_crc.c
odp_crypto_SOURCES = odp_crypto.c
odp_dma_perf_SOURCES = odp_dma_perf.c
odp_ipsec_SOURCES = odp_ipsec.c
+odp_ipsecfwd_SOURCES = odp_ipsecfwd.c
odp_lock_perf_SOURCES = odp_lock_perf.c
odp_mem_perf_SOURCES = odp_mem_perf.c
odp_packet_gen_SOURCES = odp_packet_gen.c
diff --git a/test/performance/odp_crypto.c b/test/performance/odp_crypto.c
index 45e770d42..46eb7141e 100644
--- a/test/performance/odp_crypto.c
+++ b/test/performance/odp_crypto.c
@@ -942,6 +942,9 @@ static int check_cipher_params(const odp_crypto_capability_t *crypto_capa,
return 1;
num = odp_crypto_cipher_capability(param->cipher_alg, NULL, 0);
+ if (num <= 0)
+ return 1;
+
odp_crypto_cipher_capability_t cipher_capa[num];
rc = odp_crypto_cipher_capability(param->cipher_alg, cipher_capa, num);
@@ -984,6 +987,9 @@ static int check_auth_params(const odp_crypto_capability_t *crypto_capa,
return 1;
num = odp_crypto_auth_capability(param->auth_alg, NULL, 0);
+ if (num <= 0)
+ return 1;
+
odp_crypto_auth_capability_t auth_capa[num];
rc = odp_crypto_auth_capability(param->auth_alg, auth_capa, num);
diff --git a/test/performance/odp_ipsecfwd.c b/test/performance/odp_ipsecfwd.c
new file mode 100644
index 000000000..b917a976e
--- /dev/null
+++ b/test/performance/odp_ipsecfwd.c
@@ -0,0 +1,1454 @@
+/* Copyright (c) 2022, Nokia
+ *
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE
+#endif
+
+#include <stdlib.h>
+#include <signal.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <string.h>
+
+#include <errno.h>
+
+#include <odp_api.h>
+#include <odp/helper/odph_api.h>
+
+#define PROG_NAME "odp_ipsecfwd"
+#define SHORT_PROG_NAME "ipsfwd"
+#define DELIMITER ","
+
+#define MIN(a, b) (((a) <= (b)) ? (a) : (b))
+
+#define MAX_IFS 2U
+#define MAX_SAS 4000U
+#define MAX_FWDS 64U
+#define MAX_SPIS (UINT16_MAX + 1U)
+#define MAX_WORKERS (ODP_THREAD_COUNT_MAX - 1)
+#define MAX_QUEUES 64U
+#define MAX_SA_QUEUES 1024U
+#define PKT_SIZE 1024U
+#define PKT_CNT 32768U
+#define MAX_BURST 32U
+#define ORDERED 0U
+
+#define ALG_ENTRY(_alg_name, _type) \
+ { \
+ .idx = (_alg_name), \
+ .type = (_type), \
+ .name = #_alg_name \
+ }
+
+enum {
+ CIPHER_TYPE,
+ COMB_CIPHER_TYPE,
+ AUTH_TYPE,
+ COMB_AUTH_TYPE
+};
+
+typedef enum {
+ PRS_OK,
+ PRS_NOK,
+ PRS_TERM
+} parse_result_t;
+
+enum {
+ DIR_IN = 0,
+ DIR_OUT
+};
+
+typedef struct pktio_s pktio_t;
+
+typedef struct pktio_s {
+ union {
+ odp_pktout_queue_t out_dir_qs[MAX_QUEUES];
+ odp_queue_t out_ev_qs[MAX_QUEUES];
+ };
+
+ odph_ethaddr_t src_mac;
+ char *name;
+ odp_pktio_t handle;
+ odp_bool_t (*send_fn)(const pktio_t *pktio, uint8_t index, odp_packet_t pkt);
+ uint32_t num_tx_qs;
+} pktio_t;
+
+typedef struct {
+ odph_ethaddr_t dst_mac;
+ const pktio_t *pktio;
+ odph_iplookup_prefix_t prefix;
+} fwd_entry_t;
+
+typedef struct {
+ uint64_t ipsec_in_pkts;
+ uint64_t ipsec_out_pkts;
+ uint64_t ipsec_in_errs;
+ uint64_t ipsec_out_errs;
+ uint64_t status_errs;
+ uint64_t fwd_pkts;
+ uint64_t discards;
+} stats_t;
+
+typedef struct prog_config_s prog_config_t;
+
+typedef struct ODP_ALIGNED_CACHE {
+ stats_t stats;
+ prog_config_t *prog_config;
+} thread_config_t;
+
+typedef struct prog_config_s {
+ odph_thread_t thread_tbl[MAX_WORKERS];
+ thread_config_t thread_config[MAX_WORKERS];
+ odp_ipsec_sa_t sas[MAX_SAS];
+ fwd_entry_t fwd_entries[MAX_FWDS];
+ odp_queue_t sa_qs[MAX_SA_QUEUES];
+ pktio_t pktios[MAX_IFS];
+ char *sa_conf_file;
+ char *fwd_conf_file;
+ odp_instance_t odp_instance;
+ odp_queue_t compl_q;
+ odp_pool_t pktio_pool;
+ odph_table_t fwd_tbl;
+ odp_barrier_t init_barrier;
+ odp_barrier_t term_barrier;
+ uint32_t num_input_qs;
+ uint32_t num_sa_qs;
+ uint32_t num_output_qs;
+ uint32_t num_pkts;
+ uint32_t pkt_len;
+ uint32_t num_ifs;
+ uint32_t num_sas;
+ uint32_t num_fwds;
+ int num_thrs;
+ uint8_t mode;
+} prog_config_t;
+
+typedef struct {
+ const char *name;
+ int idx;
+ int type;
+} exposed_alg_t;
+
+static exposed_alg_t exposed_algs[] = {
+ ALG_ENTRY(ODP_CIPHER_ALG_NULL, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_DES, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_3DES_CBC, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CBC, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CTR, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_ECB, CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_GCM, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_AES_CCM, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_CIPHER_ALG_CHACHA20_POLY1305, COMB_CIPHER_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_NULL, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_MD5_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA1_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA224_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA256_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA384_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_SHA512_HMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_GCM, COMB_AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_GMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_CCM, COMB_AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_CMAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_AES_XCBC_MAC, AUTH_TYPE),
+ ALG_ENTRY(ODP_AUTH_ALG_CHACHA20_POLY1305, COMB_AUTH_TYPE)
+};
+
+/* SPIs for in and out directions */
+static odp_ipsec_sa_t *spi_to_sa_map[2U][MAX_SPIS];
+static odp_atomic_u32_t is_running;
+static const int ipsec_out_mark;
+
+static void init_config(prog_config_t *config)
+{
+ memset(config, 0, sizeof(*config));
+ config->compl_q = ODP_QUEUE_INVALID;
+ config->pktio_pool = ODP_POOL_INVALID;
+ config->num_input_qs = 1;
+ config->num_sa_qs = 1;
+ config->num_output_qs = 1;
+ config->num_thrs = 1;
+}
+
+static void terminate(int signal ODP_UNUSED)
+{
+ odp_atomic_store_u32(&is_running, 0U);
+}
+
+static void parse_interfaces(prog_config_t *config, const char *optarg)
+{
+ char *tmp_str = strdup(optarg), *tmp;
+
+ if (tmp_str == NULL)
+ return;
+
+ tmp = strtok(tmp_str, DELIMITER);
+
+ while (tmp && config->num_ifs < MAX_IFS) {
+ config->pktios[config->num_ifs].name = strdup(tmp);
+
+ if (config->pktios[config->num_ifs].name != NULL)
+ ++config->num_ifs;
+
+ tmp = strtok(NULL, DELIMITER);
+ }
+
+ free(tmp_str);
+}
+
+static void print_supported_algos(const odp_ipsec_capability_t *ipsec_capa)
+{
+ int c_cnt, a_cnt;
+ const size_t len = sizeof(exposed_algs) / sizeof(exposed_algs[0]);
+
+ printf(" Cipher algorithms:\n");
+
+ for (size_t i = 0U; i < len; ++i) {
+ if ((exposed_algs[i].type == CIPHER_TYPE ||
+ exposed_algs[i].type == COMB_CIPHER_TYPE) &&
+ (ipsec_capa->ciphers.all_bits & (1 << exposed_algs[i].idx)) > 0U) {
+ c_cnt = odp_ipsec_cipher_capability(exposed_algs[i].idx, NULL, 0);
+
+ if (c_cnt < 0)
+ continue;
+
+ printf(" %d: %s",
+ exposed_algs[i].idx, exposed_algs[i].name);
+ printf(exposed_algs[i].type == COMB_CIPHER_TYPE ? " (combined)" : "");
+
+ odp_ipsec_cipher_capability_t capa[c_cnt];
+
+ (void)odp_ipsec_cipher_capability(exposed_algs[i].idx, capa, c_cnt);
+
+ for (int j = 0; j < c_cnt; ++j)
+ printf(j == 0 ? " (key lengths: %u" : ", %u", capa[j].key_len);
+
+ printf(")\n");
+ }
+ }
+
+ printf(" Authentication algorithms:\n");
+
+ for (size_t i = 0U; i < len; ++i) {
+ if ((exposed_algs[i].type == AUTH_TYPE ||
+ exposed_algs[i].type == COMB_AUTH_TYPE) &&
+ (ipsec_capa->auths.all_bits & (1 << exposed_algs[i].idx)) > 0U) {
+ a_cnt = odp_ipsec_auth_capability(exposed_algs[i].idx, NULL, 0);
+
+ if (a_cnt < 0)
+ continue;
+
+ printf(" %d: %s",
+ exposed_algs[i].idx, exposed_algs[i].name);
+ printf(exposed_algs[i].type == COMB_AUTH_TYPE ? " (combined)" : "");
+
+ odp_ipsec_auth_capability_t capa[a_cnt];
+
+ (void)odp_ipsec_auth_capability(exposed_algs[i].idx, capa, a_cnt);
+
+ for (int j = 0; j < a_cnt; ++j)
+ printf(j == 0 ? " (key/icv lengths: %u/%u" : ", %u/%u",
+ capa[j].key_len, capa[j].icv_len);
+
+ printf(")\n");
+ }
+ }
+}
+
+static void print_usage(void)
+{
+ odp_pool_capability_t pool_capa;
+ odp_ipsec_capability_t ipsec_capa;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return;
+ }
+
+ if (odp_ipsec_capability(&ipsec_capa) < 0) {
+ ODPH_ERR("Error querying IPsec capabilities\n");
+ return;
+ }
+
+ printf("\n"
+ "Simple IPsec performance tester. Forward and process plain and ipsec packets.\n"
+ "\n"
+ "Examples:\n"
+ " %s -i ens9f1 -s /etc/odp/sa.conf -f /etc/odp/fwd.conf\n"
+ "\n"
+ " With sa.conf containing, for example:\n"
+ " 0 222 192.168.1.10 192.168.1.16 4 jWnZr4t7w!zwC*F- 0 2"
+ " n2r5u7x!A%%D*G-KaPdSg 0 12\n"
+ "\n"
+ " With fwd.conf containing, for example:\n"
+ " 192.168.1.0/24 ens9f1 aa:bb:cc:dd:11:22\n"
+ "\n"
+ "Usage: %s [options]\n"
+ "\n"
+ " -i, --interfaces Ethernet interfaces for packet I/O, comma-separated,\n"
+ " no spaces.\n"
+ " -n, --num_pkts Number of packet buffers allocated for packet I/O pool.\n"
+ " %u by default.\n"
+ " -l, --pkt_len Maximum size of packet buffers in packet I/O pool. %u by\n"
+ " default.\n"
+ " -c, --count Worker thread count, 1 by default.\n"
+ " -m, --mode Queueing mode.\n"
+ " 0: ordered (default)\n"
+ " 1: parallel\n"
+ " -s, --sa SA configuration file. Individual SA configuration is\n"
+ " expected to be within a single line, values whitespace\n"
+ " separated:\n"
+ "\n"
+ " <line in file> Dir SPI TunSrcIPv4 TunDstIPv4"
+ " CipherAlgoIdx CipherKey CipherKeyExtra AuthAlgIdx AuthKey AuthKeyExtra ICVLen\n"
+ "\n"
+ " With combined algorithms, authentication data is ignored.\n"
+ " Traffic is mapped to SAs based on UDP port: the port is\n"
+ " used as the SPI. Non-zero Dir value declares an outbound\n"
+ " SA whereas zero Dir value declares an inbound SA.\n"
+ "\n"
+ " Supported cipher and authentication algorithms:\n",
+ PROG_NAME, PROG_NAME, MIN(pool_capa.pkt.max_num, PKT_CNT),
+ MIN(pool_capa.pkt.max_len, PKT_SIZE));
+ print_supported_algos(&ipsec_capa);
+ printf(" -f, --fwd_table Forwarding configuration file. Individual forwarding\n"
+ " configuration is expected to be within a single line,\n"
+ " values whitespace separated:\n"
+ "\n"
+ " <line in file> IPv4Prefix/MaskLen NetIf DstMac\n"
+ "\n"
+ " IPv4Prefix and MaskLen define a matchable prefix and NetIf\n"
+ " and DstMac define the outgoing interface and destination\n"
+ " MAC address for a match. NetIf should be one of the\n"
+ " interfaces passed with \"--interfaces\" option\n"
+ " -I, --num_input_qs Input queue count. 1 by default.\n"
+ " -S, --num_sa_qs SA queue count. 1 by default.\n"
+ " -O, --num_output_qs Output queue count. 1 by default.\n"
+ " -h, --help This help.\n"
+ "\n");
+}
+
+static odp_bool_t setup_ipsec(prog_config_t *config)
+{
+ odp_queue_param_t q_param;
+ odp_ipsec_config_t ipsec_config;
+ char q_name[ODP_QUEUE_NAME_LEN];
+
+ snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_status");
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ q_param.sched.prio = odp_schedule_default_prio();
+ q_param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q_param.sched.group = ODP_SCHED_GROUP_ALL;
+ config->compl_q = odp_queue_create(q_name, &q_param);
+
+ if (config->compl_q == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating IPsec completion queue\n");
+ return false;
+ }
+
+ odp_ipsec_config_init(&ipsec_config);
+ ipsec_config.inbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ ipsec_config.outbound_mode = ODP_IPSEC_OP_MODE_ASYNC;
+ ipsec_config.inbound.default_queue = config->compl_q;
+ /* For tunnel to tunnel, we need to parse up to this to check the UDP port for SA. */
+ ipsec_config.inbound.parse_level = ODP_PROTO_LAYER_L4;
+
+ if (odp_ipsec_config(&ipsec_config) < 0) {
+ ODPH_ERR("Error configuring IPsec\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t create_sa_dest_queues(odp_ipsec_capability_t *ipsec_capa,
+ prog_config_t *config)
+{
+ odp_queue_param_t q_param;
+ const uint32_t max_sa_qs = MIN(MAX_SA_QUEUES, ipsec_capa->max_queues);
+
+ if (config->num_sa_qs == 0U || config->num_sa_qs > max_sa_qs) {
+ ODPH_ERR("Invalid number of SA queues: %u (min: 1, max: %u)\n", config->num_sa_qs,
+ max_sa_qs);
+ config->num_sa_qs = 0U;
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_sa_qs; ++i) {
+ char q_name[ODP_QUEUE_NAME_LEN];
+
+ snprintf(q_name, sizeof(q_name), SHORT_PROG_NAME "_sa_compl_%u", i);
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ q_param.sched.prio = odp_schedule_max_prio();
+ q_param.sched.sync = config->mode == ORDERED ? ODP_SCHED_SYNC_ORDERED :
+ ODP_SCHED_SYNC_PARALLEL;
+ q_param.sched.group = ODP_SCHED_GROUP_ALL;
+ config->sa_qs[i] = odp_queue_create(q_name, &q_param);
+
+ if (config->sa_qs[i] == ODP_QUEUE_INVALID) {
+ ODPH_ERR("Error creating SA destination queue (created count: %u)\n", i);
+ config->num_sa_qs = i;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void create_sa_entry(uint32_t dir, uint32_t spi, const char *src_ip_str,
+ const char *dst_ip_str, int cipher_idx, uint8_t *cipher_key,
+ uint8_t *cipher_key_extra, int auth_idx, uint8_t *auth_key,
+ uint8_t *auth_key_extra, uint32_t icv_len, uint32_t ar_ws,
+ uint32_t max_num_sa, prog_config_t *config)
+{
+ uint32_t src_ip, dst_ip;
+ odp_ipsec_sa_param_t sa_param;
+ odp_ipsec_crypto_param_t crypto_param;
+ odp_ipsec_sa_t sa;
+
+ if (config->num_sas == max_num_sa) {
+ ODPH_ERR("Maximum number of SAs parsed (%u), ignoring rest\n", max_num_sa);
+ return;
+ }
+
+ if (odph_ipv4_addr_parse(&src_ip, src_ip_str) < 0 ||
+ odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) {
+ ODPH_ERR("Error parsing IP addresses for SA %u\n", spi);
+ return;
+ }
+
+ if (spi > UINT16_MAX) {
+ ODPH_ERR("Unsupported SPI value for SA %u (> %u)\n", spi, UINT16_MAX);
+ return;
+ }
+
+ if (spi_to_sa_map[dir][spi] != NULL) {
+ ODPH_ERR("Non-unique SPIs not supported for SA %u\n", spi);
+ return;
+ }
+
+ src_ip = odp_cpu_to_be_32(src_ip);
+ dst_ip = odp_cpu_to_be_32(dst_ip);
+ odp_ipsec_sa_param_init(&sa_param);
+ sa_param.proto = ODP_IPSEC_ESP;
+ sa_param.mode = ODP_IPSEC_MODE_TUNNEL;
+ sa_param.spi = spi;
+ sa_param.dest_queue = config->sa_qs[config->num_sas % config->num_sa_qs];
+
+ if (dir > 0U) {
+ sa_param.dir = ODP_IPSEC_DIR_OUTBOUND;
+ sa_param.outbound.tunnel.ipv4.src_addr = &src_ip;
+ sa_param.outbound.tunnel.ipv4.dst_addr = &dst_ip;
+ } else {
+ sa_param.dir = ODP_IPSEC_DIR_INBOUND;
+ sa_param.inbound.lookup_mode = ODP_IPSEC_LOOKUP_DISABLED;
+ sa_param.inbound.antireplay_ws = ar_ws;
+ }
+
+ crypto_param.cipher_alg = cipher_idx;
+ crypto_param.cipher_key.data = cipher_key;
+ crypto_param.cipher_key.length = strlen((const char *)cipher_key);
+ crypto_param.cipher_key_extra.data = cipher_key_extra;
+ crypto_param.cipher_key_extra.length = strlen((const char *)cipher_key_extra);
+ crypto_param.auth_alg = auth_idx;
+ crypto_param.auth_key.data = auth_key;
+ crypto_param.auth_key.length = strlen((const char *)auth_key);
+ crypto_param.auth_key_extra.data = auth_key_extra;
+ crypto_param.auth_key_extra.length = strlen((const char *)auth_key_extra);
+ crypto_param.icv_len = icv_len;
+ sa_param.crypto = crypto_param;
+ sa = odp_ipsec_sa_create(&sa_param);
+
+ if (sa == ODP_IPSEC_SA_INVALID) {
+ ODPH_ERR("Error creating SA handle for SA %u\n", spi);
+ return;
+ }
+
+ config->sas[config->num_sas] = sa;
+ spi_to_sa_map[dir][spi] = &config->sas[config->num_sas];
+ ++config->num_sas;
+}
+
+static void parse_sas(prog_config_t *config)
+{
+ odp_ipsec_capability_t ipsec_capa;
+ FILE *file;
+ int cipher_idx, auth_idx;
+ uint32_t ar_ws, max_num_sa, dir, spi, icv_len;
+ char src_ip[16U] = { 0 }, dst_ip[16U] = { 0 };
+ uint8_t cipher_key[65U] = { 0U }, cipher_key_extra[5U] = { 0U }, auth_key[65U] = { 0U },
+ auth_key_extra[5U] = { 0U };
+
+ if (config->sa_conf_file == NULL)
+ return;
+
+ if (odp_ipsec_capability(&ipsec_capa) < 0) {
+ ODPH_ERR("Error querying IPsec capabilities\n");
+ return;
+ }
+
+ if (!setup_ipsec(config))
+ return;
+
+ if (!create_sa_dest_queues(&ipsec_capa, config))
+ return;
+
+ file = fopen(config->sa_conf_file, "r");
+
+ if (file == NULL) {
+ ODPH_ERR("Error opening SA configuration file: %s\n", strerror(errno));
+ return;
+ }
+
+ ar_ws = MIN(32U, ipsec_capa.max_antireplay_ws);
+ max_num_sa = MIN(MAX_SAS, ipsec_capa.max_num_sa);
+
+ while (fscanf(file, "%u%u%s%s%d%s%s%d%s%s%u", &dir, &spi, src_ip, dst_ip,
+ &cipher_idx, cipher_key, cipher_key_extra, &auth_idx, auth_key,
+ auth_key_extra, &icv_len) == 11)
+ create_sa_entry(!!dir, spi, src_ip, dst_ip, cipher_idx, cipher_key,
+ cipher_key_extra, auth_idx, auth_key, auth_key_extra, icv_len,
+ ar_ws, max_num_sa, config);
+
+ (void)fclose(file);
+}
+
+static const pktio_t *get_pktio(const char *iface, const prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ if (strcmp(iface, config->pktios[i].name) == 0)
+ return &config->pktios[i];
+ }
+
+ return NULL;
+}
+
+static void create_fwd_table_entry(const char *dst_ip_str, const char *iface,
+ const char *dst_mac_str, uint8_t mask, prog_config_t *config)
+{
+ fwd_entry_t *entry;
+ odph_ethaddr_t dst_mac;
+ uint32_t dst_ip;
+ odph_iplookup_prefix_t prefix;
+
+ if (config->num_fwds == MAX_FWDS) {
+ ODPH_ERR("Maximum number of forwarding entries parsed (%u), ignoring rest\n",
+ MAX_FWDS);
+ return;
+ }
+
+ entry = &config->fwd_entries[config->num_fwds];
+
+ if (odph_eth_addr_parse(&dst_mac, dst_mac_str) < 0 ||
+ odph_ipv4_addr_parse(&dst_ip, dst_ip_str) < 0) {
+ ODPH_ERR("Error parsing MAC and IP addresses for forwarding entry\n");
+ return;
+ }
+
+ entry->pktio = get_pktio(iface, config);
+
+ if (entry->pktio == NULL) {
+ ODPH_ERR("Invalid interface in forwarding entry: %s\n", iface);
+ return;
+ }
+
+ entry->dst_mac = dst_mac;
+ prefix.ip = dst_ip;
+ prefix.cidr = mask;
+ entry->prefix = prefix;
+ ++config->num_fwds;
+}
+
+static void parse_fwd_table(prog_config_t *config)
+{
+ FILE *file;
+ char dst_ip[16U] = { 0 }, iface[64U] = { 0 }, dst_mac[18U] = { 0 };
+ uint32_t mask;
+
+ if (config->fwd_conf_file == NULL) {
+ ODPH_ERR("Invalid forwarding configuration file\n");
+ return;
+ }
+
+ file = fopen(config->fwd_conf_file, "r");
+
+ if (file == NULL) {
+ ODPH_ERR("Error opening forwarding configuration file: %s\n", strerror(errno));
+ return;
+ }
+
+ while (fscanf(file, " %[^/]/%u%s%s", dst_ip, &mask, iface, dst_mac) == 4)
+ create_fwd_table_entry(dst_ip, iface, dst_mac, mask, config);
+
+ (void)fclose(file);
+}
+
+static parse_result_t check_options(prog_config_t *config)
+{
+ odp_pool_capability_t pool_capa;
+
+ if (odp_pool_capability(&pool_capa) < 0) {
+ ODPH_ERR("Error querying pool capabilities\n");
+ return PRS_NOK;
+ }
+
+ if (config->num_ifs == 0U) {
+ ODPH_ERR("Invalid number of interfaces: %u (min: 1, max: %u)\n", config->num_ifs,
+ MAX_IFS);
+ return PRS_NOK;
+ }
+
+ if (config->sa_conf_file != NULL && config->num_sas == 0U) {
+ ODPH_ERR("Invalid SA configuration\n");
+ return PRS_NOK;
+ }
+
+ if (config->num_fwds == 0U) {
+ ODPH_ERR("Invalid number of forwarding entries: %u (min: 1, max: %u)\n",
+ config->num_fwds, MAX_FWDS);
+ return PRS_NOK;
+ }
+
+ if (config->num_pkts > pool_capa.pkt.max_num) {
+ ODPH_ERR("Invalid pool packet count: %u (max: %u)\n", config->num_pkts,
+ pool_capa.pkt.max_num);
+ return PRS_NOK;
+ }
+
+ if (config->num_pkts == 0U)
+ config->num_pkts = MIN(pool_capa.pkt.max_num, PKT_CNT);
+
+ if (config->pkt_len > pool_capa.pkt.max_len) {
+ ODPH_ERR("Invalid pool packet length: %u (max: %u)\n", config->pkt_len,
+ pool_capa.pkt.max_len);
+ return PRS_NOK;
+ }
+
+ if (config->pkt_len == 0U)
+ config->pkt_len = MIN(pool_capa.pkt.max_len, PKT_SIZE);
+
+ if (config->num_thrs <= 0 || config->num_thrs > MAX_WORKERS) {
+ ODPH_ERR("Invalid thread count: %d (min: 1, max: %d)\n", config->num_thrs,
+ MAX_WORKERS);
+ return PRS_NOK;
+ }
+
+ return PRS_OK;
+}
+
+static parse_result_t parse_options(int argc, char **argv, prog_config_t *config)
+{
+ int opt, long_index;
+
+ static const struct option longopts[] = {
+ { "interfaces", required_argument, NULL, 'i'},
+ { "num_pkts", required_argument, NULL, 'n'},
+ { "pkt_len", required_argument, NULL, 'l'},
+ { "count", required_argument, NULL, 'c' },
+ { "mode", required_argument, NULL, 'm' },
+ { "sa", required_argument, NULL, 's'},
+ { "fwd_table", required_argument, NULL, 'f' },
+ { "num_input_qs", required_argument, NULL, 'I' },
+ { "num_sa_qs", required_argument, NULL, 'S' },
+ { "num_output_qs", required_argument, NULL, 'O' },
+ { "help", no_argument, NULL, 'h' },
+ { NULL, 0, NULL, 0 }
+ };
+
+ static const char *shortopts = "i:n:l:c:m:s:f:I:S:O:h";
+
+ while (true) {
+ opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
+
+ if (opt == -1)
+ break;
+
+ switch (opt) {
+ case 'i':
+ parse_interfaces(config, optarg);
+ break;
+ case 'n':
+ config->num_pkts = atoi(optarg);
+ break;
+ case 'l':
+ config->pkt_len = atoi(optarg);
+ break;
+ case 'c':
+ config->num_thrs = atoi(optarg);
+ break;
+ case 'm':
+ config->mode = !!atoi(optarg);
+ break;
+ case 's':
+ config->sa_conf_file = strdup(optarg);
+ break;
+ case 'f':
+ config->fwd_conf_file = strdup(optarg);
+ break;
+ case 'I':
+ config->num_input_qs = atoi(optarg);
+ break;
+ case 'S':
+ config->num_sa_qs = atoi(optarg);
+ break;
+ case 'O':
+ config->num_output_qs = atoi(optarg);
+ break;
+ case 'h':
+ print_usage();
+ return PRS_TERM;
+ case '?':
+ default:
+ print_usage();
+ return PRS_NOK;
+ }
+ }
+
+ parse_sas(config);
+ parse_fwd_table(config);
+
+ return check_options(config);
+}
+
+static parse_result_t setup_program(int argc, char **argv, prog_config_t *config)
+{
+ struct sigaction action = { .sa_handler = terminate };
+
+ if (sigemptyset(&action.sa_mask) == -1 || sigaddset(&action.sa_mask, SIGINT) == -1 ||
+ sigaddset(&action.sa_mask, SIGTERM) == -1 ||
+ sigaddset(&action.sa_mask, SIGHUP) == -1 || sigaction(SIGINT, &action, NULL) == -1 ||
+ sigaction(SIGTERM, &action, NULL) == -1 || sigaction(SIGHUP, &action, NULL) == -1) {
+ ODPH_ERR("Error installing signal handler\n");
+ return PRS_NOK;
+ }
+
+ return parse_options(argc, argv, config);
+}
+
+static odp_bool_t send(const pktio_t *pktio, uint8_t index, odp_packet_t pkt)
+{
+ return odp_pktout_send(pktio->out_dir_qs[index], &pkt, 1) == 1;
+}
+
+static odp_bool_t enqueue(const pktio_t *pktio, uint8_t index, odp_packet_t pkt)
+{
+ return odp_queue_enq(pktio->out_ev_qs[index], odp_packet_to_event(pkt)) == 0;
+}
+
+static odp_bool_t setup_pktios(prog_config_t *config)
+{
+ odp_pool_param_t pool_param;
+ pktio_t *pktio;
+ odp_pktio_param_t pktio_param;
+ odp_pktin_queue_param_t pktin_param;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t pktout_param;
+ odp_pktio_config_t pktio_config;
+ uint32_t max_output_qs;
+
+ odp_pool_param_init(&pool_param);
+ pool_param.pkt.seg_len = config->pkt_len;
+ pool_param.pkt.len = config->pkt_len;
+ pool_param.pkt.num = config->num_pkts;
+ pool_param.type = ODP_POOL_PACKET;
+ config->pktio_pool = odp_pool_create(PROG_NAME, &pool_param);
+
+ if (config->pktio_pool == ODP_POOL_INVALID) {
+ ODPH_ERR("Error creating packet I/O pool\n");
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i) {
+ pktio = &config->pktios[i];
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = config->mode == ORDERED ? ODP_PKTOUT_MODE_QUEUE :
+ ODP_PKTOUT_MODE_DIRECT;
+ pktio->handle = odp_pktio_open(pktio->name, config->pktio_pool, &pktio_param);
+
+ if (pktio->handle == ODP_PKTIO_INVALID) {
+ ODPH_ERR("Error opening packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_capability(pktio->handle, &capa) < 0) {
+ ODPH_ERR("Error querying packet I/O capabilities (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (config->num_input_qs == 0U || config->num_input_qs > capa.max_input_queues) {
+ ODPH_ERR("Invalid number of input queues for packet I/O: %u (min: 1, max: "
+ "%u) (%s)\n", config->num_input_qs, capa.max_input_queues,
+ pktio->name);
+ return false;
+ }
+
+ max_output_qs = MIN(MAX_QUEUES, capa.max_output_queues);
+
+ if (config->num_output_qs == 0U || config->num_output_qs > max_output_qs) {
+ ODPH_ERR("Invalid number of output queues for packet I/O: %u (min: 1, "
+ "max: %u) (%s)\n", config->num_output_qs, max_output_qs,
+ pktio->name);
+ return false;
+ }
+
+ odp_pktin_queue_param_init(&pktin_param);
+
+ if (config->mode == ORDERED)
+ pktin_param.queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+
+ if (config->num_input_qs > 1U) {
+ pktin_param.hash_enable = true;
+ pktin_param.hash_proto.proto.ipv4_udp = 1U;
+ pktin_param.num_queues = config->num_input_qs;
+ }
+
+ if (odp_pktin_queue_config(pktio->handle, &pktin_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O input queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ pktio->send_fn = config->mode == ORDERED ? enqueue : send;
+ pktio->num_tx_qs = config->num_output_qs;
+ odp_pktout_queue_param_init(&pktout_param);
+ pktout_param.num_queues = pktio->num_tx_qs;
+ pktout_param.op_mode = config->num_thrs > (int)pktio->num_tx_qs ?
+ ODP_PKTIO_OP_MT : ODP_PKTIO_OP_MT_UNSAFE;
+
+ if (odp_pktout_queue_config(pktio->handle, &pktout_param) < 0) {
+ ODPH_ERR("Error configuring packet I/O output queues (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (config->mode == ORDERED) {
+ if (odp_pktout_event_queue(pktio->handle, pktio->out_ev_qs,
+ pktio->num_tx_qs) != (int)pktio->num_tx_qs) {
+ ODPH_ERR("Error querying packet I/O output event queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ } else {
+ if (odp_pktout_queue(pktio->handle, pktio->out_dir_qs, pktio->num_tx_qs)
+ != (int)pktio->num_tx_qs) {
+ ODPH_ERR("Error querying packet I/O output queue (%s)\n",
+ pktio->name);
+ return false;
+ }
+ }
+
+ odp_pktio_config_init(&pktio_config);
+
+ if (odp_pktio_config(pktio->handle, &pktio_config) < 0) {
+ ODPH_ERR("Error configuring packet I/O extra options (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_mac_addr(pktio->handle, &pktio->src_mac, sizeof(pktio->src_mac))
+ != sizeof(pktio->src_mac)) {
+ ODPH_ERR("Error getting packet I/O MAC address (%s)\n", pktio->name);
+ return false;
+ }
+
+ if (odp_pktio_start(pktio->handle) < 0) {
+ ODPH_ERR("Error starting packet I/O (%s)\n", pktio->name);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_fwd_table(prog_config_t *config)
+{
+ fwd_entry_t *fwd_e;
+
+ config->fwd_tbl = odph_iplookup_table_create(SHORT_PROG_NAME "_fwd_tbl", 0U, 0U,
+ sizeof(fwd_entry_t *));
+
+ if (config->fwd_tbl == NULL) {
+ ODPH_ERR("Error creating forwarding table\n");
+ return false;
+ }
+
+ for (uint32_t i = 0U; i < config->num_fwds; ++i) {
+ fwd_e = &config->fwd_entries[i];
+
+ if (odph_iplookup_table_put_value(config->fwd_tbl, &fwd_e->prefix, &fwd_e) < 0) {
+ ODPH_ERR("Error populating forwarding table\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static inline odp_ipsec_sa_t *get_in_sa(odp_packet_t pkt)
+{
+ odph_esphdr_t esp;
+ uint32_t spi;
+
+ if (!odp_packet_has_ipsec(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_ESPHDR_LEN, &esp) < 0)
+ return NULL;
+
+ spi = odp_be_to_cpu_32(esp.spi);
+
+ return spi <= UINT16_MAX ? spi_to_sa_map[DIR_IN][spi] : NULL;
+}
+
+static inline int process_ipsec_in(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_in_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], NULL);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_in_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline odp_ipsec_sa_t *get_out_sa(odp_packet_t pkt)
+{
+ odph_udphdr_t udp;
+ uint16_t dst_port;
+
+ if (!odp_packet_has_udp(pkt))
+ return NULL;
+
+ if (odp_packet_copy_to_mem(pkt, odp_packet_l4_offset(pkt), ODPH_UDPHDR_LEN, &udp) < 0)
+ return NULL;
+
+ dst_port = odp_be_to_cpu_16(udp.dst_port);
+
+ return dst_port ? spi_to_sa_map[DIR_OUT][dst_port] : NULL;
+}
+
+static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sas[], int num)
+{
+ odp_ipsec_out_param_t param;
+ int left, sent = 0, ret;
+
+ memset(&param, 0, sizeof(param));
+ /* IPsec in/out need to be identified somehow, so use user_ptr for this. */
+ for (int i = 0; i < num; ++i)
+ odp_packet_user_ptr_set(pkts[i], &ipsec_out_mark);
+
+ while (sent < num) {
+ left = num - sent;
+ param.num_sa = left;
+ param.sa = &sas[sent];
+ ret = odp_ipsec_out_enq(&pkts[sent], left, &param);
+
+ if (odp_unlikely(ret <= 0))
+ break;
+
+ sent += ret;
+ }
+
+ return sent;
+}
+
+static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl,
+ uint8_t *hash)
+{
+ const uint32_t l3_off = odp_packet_l3_offset(pkt);
+ odph_ipv4hdr_t ipv4;
+ uint32_t dst_ip, src_ip;
+ fwd_entry_t *fwd;
+ odph_ethhdr_t eth;
+
+ if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0)
+ return NULL;
+
+ dst_ip = odp_be_to_cpu_32(ipv4.dst_addr);
+
+ if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL)
+ return NULL;
+
+ if (l3_off != ODPH_ETHHDR_LEN) {
+ if (l3_off > ODPH_ETHHDR_LEN) {
+ if (odp_packet_pull_head(pkt, l3_off - ODPH_ETHHDR_LEN) == NULL)
+ return NULL;
+ } else {
+ if (odp_packet_push_head(pkt, ODPH_ETHHDR_LEN - l3_off) == NULL)
+ return NULL;
+ }
+ }
+
+ eth.dst = fwd->dst_mac;
+ eth.src = fwd->pktio->src_mac;
+ eth.type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
+
+ if (odp_packet_copy_from_mem(pkt, 0U, ODPH_ETHHDR_LEN, &eth) < 0)
+ return NULL;
+
+ src_ip = odp_be_to_cpu_32(ipv4.src_addr);
+ *hash = src_ip ^ dst_ip;
+
+ return fwd->pktio;
+}
+
+static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl)
+{
+ odp_packet_t pkt;
+ uint8_t hash = 0U;
+ const pktio_t *pktio;
+ uint32_t num_procd = 0U;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ pktio = lookup_and_apply(pkt, fwd_tbl, &hash);
+
+ if (pktio == NULL) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(!pktio->send_fn(pktio, hash % pktio->num_tx_qs, pkt))) {
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ ++num_procd;
+ }
+
+ return num_procd;
+}
+
+static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ sa = get_out_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_out(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ num_procd = num_procd < 0 ? 0 : num_procd;
+ stats->ipsec_out_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static inline void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST];
+ odp_ipsec_sa_t *sa, sas[MAX_BURST];
+ int num_pkts_ips = 0, num_pkts_out = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ ++stats->discards;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ sa = get_in_sa(pkt);
+
+ if (sa != NULL) {
+ sas[num_pkts_ips] = *sa;
+ pkts_ips[num_pkts_ips] = pkt;
+ ++num_pkts_ips;
+ } else {
+ pkts_out[num_pkts_out++] = pkt;
+ }
+ }
+
+ if (num_pkts_ips > 0) {
+ num_procd = process_ipsec_in(pkts_ips, sas, num_pkts_ips);
+
+ if (odp_unlikely(num_procd < num_pkts_ips)) {
+ num_procd = num_procd < 0 ? 0 : num_procd;
+ stats->ipsec_in_errs += num_pkts_ips - num_procd;
+ odp_packet_free_multi(&pkts_ips[num_procd], num_pkts_ips - num_procd);
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
+}
+
+static inline odp_bool_t is_ipsec_in(odp_packet_t pkt)
+{
+ return odp_packet_user_ptr(pkt) == NULL;
+}
+
+static inline void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+ stats_t *stats)
+{
+ odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST];
+ odp_bool_t is_in;
+ odp_ipsec_packet_result_t result;
+ int num_pkts_out = 0, num_pkts_fwd = 0, num_procd;
+
+ for (int i = 0; i < num; ++i) {
+ pkt = pkts[i];
+ is_in = is_ipsec_in(pkt);
+
+ if (odp_unlikely(odp_ipsec_result(&result, pkt) < 0)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (odp_unlikely(result.status.all != ODP_IPSEC_OK)) {
+ is_in ? ++stats->ipsec_in_errs : ++stats->ipsec_out_errs;
+ odp_packet_free(pkt);
+ continue;
+ }
+
+ if (is_in) {
+ ++stats->ipsec_in_pkts;
+ pkts_out[num_pkts_out++] = pkt;
+ } else {
+ ++stats->ipsec_out_pkts;
+ pkts_fwd[num_pkts_fwd++] = pkt;
+ }
+ }
+
+ if (num_pkts_out > 0)
+ process_packets_out(pkts_out, num_pkts_out, fwd_tbl, stats);
+
+ if (num_pkts_fwd > 0) {
+ num_procd = forward_packets(pkts_fwd, num_pkts_fwd, fwd_tbl);
+ stats->discards += num_pkts_fwd - num_procd;
+ stats->fwd_pkts += num_procd;
+ }
+}
+
+static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats)
+{
+ odp_ipsec_status_t status;
+
+ if (odp_unlikely(odp_ipsec_status(&status, ev) < 0 || status.result < 0))
+ ++stats->status_errs;
+
+ odp_event_free(ev);
+}
+
+static void drain_events(void)
+{
+ odp_event_t ev;
+
+ while (true) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ break;
+
+ odp_event_free(ev);
+ }
+}
+
+static int process_packets(void *args)
+{
+ thread_config_t *config = args;
+ odp_event_t evs[MAX_BURST], ev;
+ int cnt;
+ odp_event_type_t type;
+ odp_event_subtype_t subtype;
+ odp_packet_t pkt, pkts_in[MAX_BURST], pkts_ips[MAX_BURST];
+ odph_table_t fwd_tbl = config->prog_config->fwd_tbl;
+ stats_t *stats = &config->stats;
+
+ odp_barrier_wait(&config->prog_config->init_barrier);
+
+ while (odp_atomic_load_u32(&is_running)) {
+ int num_pkts_in = 0, num_pkts_ips = 0;
+ /* TODO: Add possibility to configure scheduler and ipsec enq/deq burst sizes. */
+ cnt = odp_schedule_multi_no_wait(NULL, evs, MAX_BURST);
+
+ if (cnt == 0)
+ continue;
+
+ for (int i = 0; i < cnt; ++i) {
+ ev = evs[i];
+ type = odp_event_types(ev, &subtype);
+ pkt = odp_packet_from_event(ev);
+
+ if (type == ODP_EVENT_PACKET) {
+ if (subtype == ODP_EVENT_PACKET_BASIC) {
+ pkts_in[num_pkts_in++] = pkt;
+ } else if (subtype == ODP_EVENT_PACKET_IPSEC) {
+ pkts_ips[num_pkts_ips++] = pkt;
+ } else {
+ ++stats->discards;
+ odp_event_free(ev);
+ }
+ } else if (type == ODP_EVENT_IPSEC_STATUS) {
+ check_ipsec_status_ev(ev, stats);
+ } else {
+ ++stats->discards;
+ odp_event_free(ev);
+ }
+ }
+
+ if (num_pkts_in > 0)
+ process_packets_in(pkts_in, num_pkts_in, fwd_tbl, stats);
+
+ if (num_pkts_ips > 0)
+ complete_ipsec_ops(pkts_ips, num_pkts_ips, fwd_tbl, stats);
+ }
+
+ odp_barrier_wait(&config->prog_config->term_barrier);
+ drain_events();
+
+ return 0;
+}
+
+static odp_bool_t setup_workers(prog_config_t *config)
+{
+ odph_thread_common_param_t thr_common;
+ odph_thread_param_t thr_param[config->num_thrs];
+ odp_cpumask_t cpumask;
+ int num_workers;
+
+ num_workers = odp_cpumask_default_worker(&cpumask, config->num_thrs);
+ odph_thread_common_param_init(&thr_common);
+ thr_common.instance = config->odp_instance;
+ thr_common.cpumask = &cpumask;
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ odph_thread_param_init(&thr_param[i]);
+ thr_param[i].start = process_packets;
+ thr_param[i].thr_type = ODP_THREAD_WORKER;
+ config->thread_config[i].prog_config = config;
+ thr_param[i].arg = &config->thread_config[i];
+ }
+
+ num_workers = odph_thread_create(config->thread_tbl, &thr_common, thr_param, num_workers);
+
+ if (num_workers != config->num_thrs) {
+ ODPH_ERR("Error configuring worker threads\n");
+ return false;
+ }
+
+ return true;
+}
+
+static odp_bool_t setup_test(prog_config_t *config)
+{
+ odp_barrier_init(&config->init_barrier, config->num_thrs + 1);
+ odp_barrier_init(&config->term_barrier, config->num_thrs + 1);
+
+ if (!setup_pktios(config))
+ return false;
+
+ if (!setup_fwd_table(config))
+ return false;
+
+ if (!setup_workers(config))
+ return false;
+
+ odp_barrier_wait(&config->init_barrier);
+
+ return true;
+}
+
+static void stop_test(prog_config_t *config)
+{
+ for (uint32_t i = 0U; i < config->num_ifs; ++i)
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID)
+ (void)odp_pktio_stop(config->pktios[i].handle);
+
+ odp_barrier_wait(&config->term_barrier);
+ (void)odph_thread_join(config->thread_tbl, config->num_thrs);
+}
+
+static void wait_sas_disabled(uint32_t num_sas)
+{
+ uint32_t num_sas_dis = 0U;
+ odp_event_t ev;
+ odp_ipsec_status_t status;
+
+ while (num_sas_dis < num_sas) {
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ if (odp_event_type(ev) != ODP_EVENT_IPSEC_STATUS) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ if (odp_ipsec_status(&status, ev) < 0) {
+ odp_event_free(ev);
+ continue;
+ }
+
+ if (status.id == ODP_IPSEC_STATUS_SA_DISABLE)
+ ++num_sas_dis;
+
+ odp_event_free(ev);
+ }
+}
+
+static void teardown_test(const prog_config_t *config)
+{
+ (void)odph_iplookup_table_destroy(config->fwd_tbl);
+
+ for (uint32_t i = 0U; i < config->num_ifs; ++i)
+ if (config->pktios[i].handle != ODP_PKTIO_INVALID) {
+ (void)odp_pktio_close(config->pktios[i].handle);
+ free(config->pktios[i].name);
+ }
+
+ if (config->pktio_pool != ODP_POOL_INVALID)
+ (void)odp_pool_destroy(config->pktio_pool);
+
+ for (uint32_t i = 0U; i < config->num_sas; ++i)
+ (void)odp_ipsec_sa_disable(config->sas[i]);
+
+ /* Drain SA status events. */
+ wait_sas_disabled(config->num_sas);
+
+ for (uint32_t i = 0U; i < config->num_sas; ++i)
+ (void)odp_ipsec_sa_destroy(config->sas[i]);
+
+ for (uint32_t i = 0U; i < config->num_sa_qs; ++i)
+ (void)odp_queue_destroy(config->sa_qs[i]);
+
+ if (config->compl_q != ODP_QUEUE_INVALID)
+ (void)odp_queue_destroy(config->compl_q);
+
+ free(config->sa_conf_file);
+ free(config->fwd_conf_file);
+}
+
+static void print_stats(const prog_config_t *config)
+{
+ const stats_t *stats;
+
+ printf("\nProgram finished:\n");
+
+ for (int i = 0; i < config->num_thrs; ++i) {
+ stats = &config->thread_config[i].stats;
+
+ printf("\n Worker %d:\n"
+ " IPsec in packets: %" PRIu64 "\n"
+ " IPsec out packets: %" PRIu64 "\n"
+ " IPsec in packet errors: %" PRIu64 "\n"
+ " IPsec out packet errors: %" PRIu64 "\n"
+ " IPsec status errors: %" PRIu64 "\n"
+ " Packets forwarded: %" PRIu64 "\n"
+ " Packets dropped: %" PRIu64 "\n", i, stats->ipsec_in_pkts,
+ stats->ipsec_out_pkts, stats->ipsec_in_errs, stats->ipsec_out_errs,
+ stats->status_errs, stats->fwd_pkts, stats->discards);
+ }
+}
+
+int main(int argc, char **argv)
+{
+ odp_instance_t odp_instance;
+ parse_result_t parse_res;
+ prog_config_t config;
+ int ret = EXIT_SUCCESS;
+
+ if (odp_init_global(&odp_instance, NULL, NULL) < 0) {
+ ODPH_ERR("ODP global init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_init_local(odp_instance, ODP_THREAD_CONTROL) < 0) {
+ ODPH_ERR("ODP local init failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ init_config(&config);
+
+ if (odp_schedule_config(NULL) < 0) {
+ ODPH_ERR("Error configuring scheduler\n");
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ parse_res = setup_program(argc, argv, &config);
+
+ if (parse_res == PRS_NOK) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ if (parse_res == PRS_TERM) {
+ ret = EXIT_SUCCESS;
+ goto out_test;
+ }
+
+ config.odp_instance = odp_instance;
+ odp_atomic_init_u32(&is_running, 1U);
+
+ if (!setup_test(&config)) {
+ ret = EXIT_FAILURE;
+ goto out_test;
+ }
+
+ while (odp_atomic_load_u32(&is_running))
+ odp_cpu_pause();
+
+ stop_test(&config);
+ print_stats(&config);
+
+out_test:
+ teardown_test(&config);
+
+ if (odp_term_local() < 0) {
+ ODPH_ERR("ODP local terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (odp_term_global(odp_instance) < 0) {
+ ODPH_ERR("ODP global terminate failed, exiting\n");
+ exit(EXIT_FAILURE);
+ }
+
+ return ret;
+}
diff --git a/test/validation/api/atomic/atomic.c b/test/validation/api/atomic/atomic.c
index 5a1fdf11b..d4329bc6b 100644
--- a/test/validation/api/atomic/atomic.c
+++ b/test/validation/api/atomic/atomic.c
@@ -1276,7 +1276,7 @@ static void test_atomic_functional(int test_fn(void *), void validate_fn(void))
test_atomic_init();
test_atomic_store();
- odp_cunit_thread_create(num, test_fn, NULL, 0);
+ odp_cunit_thread_create(num, test_fn, NULL, 0, 0);
odp_cunit_thread_join(num);
validate_fn();
}
diff --git a/test/validation/api/barrier/barrier.c b/test/validation/api/barrier/barrier.c
index 710947997..ce52fd2d7 100644
--- a/test/validation/api/barrier/barrier.c
+++ b/test/validation/api/barrier/barrier.c
@@ -404,7 +404,7 @@ static void barrier_test_no_barrier_functional(void)
int num = global_mem->g_num_threads;
barrier_test_init();
- odp_cunit_thread_create(num, no_barrier_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, no_barrier_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -413,7 +413,7 @@ static void barrier_test_barrier_functional(void)
int num = global_mem->g_num_threads;
barrier_test_init();
- odp_cunit_thread_create(num, barrier_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, barrier_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index 19f39e1d3..c3484e14a 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019, Nokia
+ * Copyright (c) 2019-2022, Nokia
* Copyright (c) 2022, Marvell
* All rights reserved.
*
@@ -7,6 +7,7 @@
*/
#include <odp_api.h>
+#include <odp/helper/odph_debug.h>
#include "odp_cunit_common.h"
#define BUF_ALIGN ODP_CACHE_LINE_SIZE
@@ -491,6 +492,58 @@ static void buffer_test_pool_max_pools_max_cache(void)
test_pool_max_pools(&param);
}
+static void buffer_test_user_area(void)
+{
+ odp_pool_t pool;
+ odp_pool_param_t param;
+ uint32_t i, num;
+ void *addr;
+ void *prev = NULL;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ const uint32_t max_size = pool_capa.buf.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Buffer user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.uarea_size = size;
+
+ num = param.buf.num;
+
+ odp_buffer_t buffer[num];
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ buffer[i] = odp_buffer_alloc(pool);
+
+ if (buffer[i] == ODP_BUFFER_INVALID)
+ break;
+ num_alloc++;
+
+ addr = odp_buffer_user_area(buffer[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ if (num_alloc)
+ odp_buffer_free_multi(buffer, num_alloc);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
odp_testinfo_t buffer_suite[] = {
ODP_TEST_INFO(buffer_test_pool_alloc_free),
ODP_TEST_INFO(buffer_test_pool_alloc_free_min_cache),
@@ -507,6 +560,7 @@ odp_testinfo_t buffer_suite[] = {
ODP_TEST_INFO(buffer_test_pool_max_pools),
ODP_TEST_INFO(buffer_test_pool_max_pools_min_cache),
ODP_TEST_INFO(buffer_test_pool_max_pools_max_cache),
+ ODP_TEST_INFO(buffer_test_user_area),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
index 4fecba1e2..f914ea2ec 100644
--- a/test/validation/api/classification/odp_classification_basic.c
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -18,7 +18,10 @@ static void test_defaults(uint8_t fill)
memset(&cos_param, fill, sizeof(cos_param));
odp_cls_cos_param_init(&cos_param);
- CU_ASSERT_EQUAL(cos_param.num_queue, 1);
+
+ CU_ASSERT(cos_param.action == ODP_COS_ACTION_ENQUEUE);
+ CU_ASSERT(cos_param.num_queue == 1);
+ CU_ASSERT_EQUAL(cos_param.stats_enable, false);
CU_ASSERT_EQUAL(cos_param.red.enable, false);
CU_ASSERT_EQUAL(cos_param.bp.enable, false);
CU_ASSERT_EQUAL(cos_param.vector.enable, false);
@@ -392,6 +395,49 @@ static void classification_test_pmr_composite_create(void)
odp_pktio_close(pktio);
}
+static void classification_test_create_cos_with_hash_queues(void)
+{
+ odp_pool_t pool;
+ odp_cls_capability_t capa;
+ int ret;
+ odp_queue_param_t q_param;
+ odp_cls_cos_param_t cls_param;
+ odp_cos_t cos;
+
+ pool = pool_create("cls_basic_pool");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ ret = odp_cls_capability(&capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT_FATAL(capa.hash_protocols.all_bits != 0);
+
+ odp_queue_param_init(&q_param);
+ q_param.type = ODP_QUEUE_TYPE_SCHED;
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.num_queue = capa.max_hash_queues;
+ cls_param.queue_param = q_param;
+ cls_param.hash_proto.all_bits = capa.hash_protocols.all_bits;
+ cls_param.pool = pool;
+
+ cos = odp_cls_cos_create(NULL, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ ret = odp_cos_destroy(cos);
+ CU_ASSERT(ret == 0);
+
+ odp_pool_destroy(pool);
+}
+
+static int check_capa_cos_hashing(void)
+{
+ odp_cls_capability_t capa;
+
+ if (odp_cls_capability(&capa) < 0)
+ return ODP_TEST_INACTIVE;
+
+ return capa.max_hash_queues > 1 ? ODP_TEST_ACTIVE : ODP_TEST_INACTIVE;
+}
+
odp_testinfo_t classification_suite_basic[] = {
ODP_TEST_INFO(classification_test_default_values),
ODP_TEST_INFO(classification_test_create_cos),
@@ -403,5 +449,7 @@ odp_testinfo_t classification_suite_basic[] = {
ODP_TEST_INFO(classification_test_cos_set_drop),
ODP_TEST_INFO(classification_test_cos_set_pool),
ODP_TEST_INFO(classification_test_pmr_composite_create),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_create_cos_with_hash_queues,
+ check_capa_cos_hashing),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index 8eac41a1e..4c24099a5 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -26,6 +26,7 @@ static uint8_t IPV6_DST_ADDR[ODPH_IPV6ADDR_LEN] = {
};
#define ODP_GTPU_UDP_PORT 2152
+#define AH_HDR_LEN 24
odp_pktio_t create_pktio(odp_queue_type_t q_type, odp_pool_t pool,
odp_bool_t cls_enable)
@@ -306,6 +307,8 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odph_ipv6hdr_t *ipv6;
odph_gtphdr_t *gtpu;
odph_igmphdr_t *igmp;
+ odph_ahhdr_t *ah;
+ odph_esphdr_t *esp;
uint8_t *hlen = 0;
uint16_t payload_len;
uint32_t addr = 0;
@@ -359,6 +362,14 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
next_hdr = ODPH_IPPROTO_IGMP;
l4_hdr_len = ODP_IGMP_HLEN;
break;
+ case CLS_PKT_L4_AH:
+ next_hdr = ODPH_IPPROTO_AH;
+ l4_hdr_len = AH_HDR_LEN;
+ break;
+ case CLS_PKT_L4_ESP:
+ next_hdr = ODPH_IPPROTO_ESP;
+ l4_hdr_len = ODPH_ESPHDR_LEN;
+ break;
default:
ODPH_ASSERT(0);
}
@@ -444,9 +455,11 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
udp = (odph_udphdr_t *)(buf + l4_offset);
sctp = (odph_sctphdr_t *)(buf + l4_offset);
icmp = (odph_icmphdr_t *)(buf + l4_offset);
+ igmp = (odph_igmphdr_t *)(buf + l4_offset);
+ ah = (odph_ahhdr_t *)(buf + l4_offset);
+ esp = (odph_esphdr_t *)(buf + l4_offset);
if (pkt_info.l4_type == CLS_PKT_L4_IGMP) {
- igmp = (odph_igmphdr_t *)odp_packet_l4_ptr(pkt, NULL);
igmp->group = odp_cpu_to_be_32(CLS_MAGIC_VAL);
igmp->type = 0x12;
igmp->code = 0;
@@ -496,6 +509,15 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
ODPH_ERR("odph_udp_tcp_chksum failed\n");
return ODP_PACKET_INVALID;
}
+ } else if (pkt_info.l4_type == CLS_PKT_L4_AH) {
+ ah->next_header = ODPH_IPV4;
+ ah->ah_len = AH_HDR_LEN / 4 - 2;
+ ah->pad = 0;
+ ah->spi = 256;
+ ah->seq_no = 1;
+ } else if (pkt_info.l4_type == CLS_PKT_L4_ESP) {
+ esp->spi = 256;
+ esp->seq_no = 1;
} else {
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index e69f077a2..9a2e32f4b 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -1873,6 +1873,98 @@ static void classification_test_pmr_term_custom_l3(void)
test_pmr_term_custom(1);
}
+static void test_pmr_term_ipsec_spi_ah(odp_bool_t is_ipv6)
+{
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ odp_packet_t pkt;
+ odph_ahhdr_t *ah;
+
+ val = odp_cpu_to_be_32(0x11223344);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPSEC_SPI;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_AH;
+ pkt_info.ipv6 = is_ipv6;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ah = (odph_ahhdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ ah->spi = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ ah = (odph_ahhdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ ah->spi = val + 1;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_ipsec_spi_ah_ipv4(void)
+{
+ test_pmr_term_ipsec_spi_ah(TEST_IPV4);
+}
+
+static void test_pmr_term_ipsec_spi_esp(odp_bool_t is_ipv6)
+{
+ uint32_t val;
+ uint32_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+ odp_packet_t pkt;
+ odph_esphdr_t *esp;
+
+ val = odp_cpu_to_be_32(0x11223344);
+ mask = odp_cpu_to_be_32(0xffffffff);
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IPSEC_SPI;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.l4_type = CLS_PKT_L4_ESP;
+ pkt_info.ipv6 = is_ipv6;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ esp = (odph_esphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ esp->spi = val;
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ esp = (odph_esphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ esp->spi = val + 1;
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_ipsec_spi_esp_ipv4(void)
+{
+ test_pmr_term_ipsec_spi_esp(TEST_IPV4);
+}
+
+static void classification_test_pmr_term_ipsec_spi_ah_ipv6(void)
+{
+ test_pmr_term_ipsec_spi_ah(TEST_IPV6);
+}
+
+static void classification_test_pmr_term_ipsec_spi_esp_ipv6(void)
+{
+ test_pmr_term_ipsec_spi_esp(TEST_IPV6);
+}
+
static int check_capa_tcp_dport(void)
{
return cls_capa.supported_terms.bit.tcp_dport;
@@ -1968,6 +2060,11 @@ static int check_capa_custom_l3(void)
return cls_capa.supported_terms.bit.custom_l3;
}
+static int check_capa_ipsec_spi(void)
+{
+ return cls_capa.supported_terms.bit.ipsec_spi;
+}
+
static int check_capa_pmr_series(void)
{
uint64_t support;
@@ -2088,6 +2185,14 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_custom_frame),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_custom_l3,
check_capa_custom_l3),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipsec_spi_ah_ipv4,
+ check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipsec_spi_esp_ipv4,
+ check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipsec_spi_ah_ipv6,
+ check_capa_ipsec_spi),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipsec_spi_esp_ipv6,
+ check_capa_ipsec_spi),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_serial,
check_capa_pmr_series),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_parallel,
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 592f37cd6..8fa43099d 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -19,6 +19,8 @@ typedef enum cls_packet_l4_info {
CLS_PKT_L4_ICMP,
CLS_PKT_L4_GTP,
CLS_PKT_L4_IGMP,
+ CLS_PKT_L4_AH,
+ CLS_PKT_L4_ESP
} cls_packet_l4_info;
typedef struct cls_packet_info {
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 0112bf27a..150a470ec 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -1342,12 +1342,11 @@ static void test_auth_hashes_in_auth_range(void)
odp_auth_alg_t auth = auth_algs[n];
int num;
- if (check_alg_support(ODP_CIPHER_ALG_NULL, auth)
- == ODP_TEST_INACTIVE)
+ if (check_alg_support(ODP_CIPHER_ALG_NULL, auth) == ODP_TEST_INACTIVE)
continue;
num = odp_crypto_auth_capability(auth, NULL, 0);
- CU_ASSERT(num > 0);
+ CU_ASSERT_FATAL(num > 0);
odp_crypto_auth_capability_t capa[num];
diff --git a/test/validation/api/crypto/test_vectors.h b/test/validation/api/crypto/test_vectors.h
index c07be12f7..b9a9c2f06 100644
--- a/test/validation/api/crypto/test_vectors.h
+++ b/test/validation/api/crypto/test_vectors.h
@@ -2493,7 +2493,7 @@ static crypto_test_reference_t aes_eia2_reference[] = {
*/
static crypto_test_reference_t zuc_eea3_reference[] = {
{
- .cipher_key_length = ZUC_EIA3_KEY_LEN,
+ .cipher_key_length = ZUC_EEA3_KEY_LEN,
.cipher_key = { 0xe5, 0xbd, 0x3e, 0xa0, 0xeb, 0x55, 0xad, 0xe8,
0x66, 0xc6, 0xac, 0x58, 0xbd, 0x54, 0x30, 0x2a},
.cipher_iv_length = ZUC_EEA3_IV_LEN,
@@ -2527,7 +2527,132 @@ static crypto_test_reference_t zuc_eea3_reference[] = {
0xdf, 0x5a, 0x47, 0x3a, 0x57, 0xa4, 0xa0, 0x0d,
0x98, 0x5e, 0xba, 0xd8, 0x80, 0xd6, 0xf2, 0x38,
0x64, 0xa0, 0x7b, 0x01 }
- }
+ },
+
+ /* Privately generated test data */
+ {
+ .cipher_key_length = ZUC_EEA3_256_KEY_LEN,
+ .cipher_key = { 0xf7, 0xb4, 0x04, 0x5a, 0x81, 0x5c, 0x1b, 0x01,
+ 0x82, 0xf9, 0xf4, 0x26, 0x80, 0xd4, 0x56, 0x26,
+ 0xd5, 0xf7, 0x4b, 0x68, 0x48, 0x6b, 0x92, 0x6a,
+ 0x34, 0x1f, 0x86, 0x66, 0x60, 0x0a, 0xfc, 0x57},
+ .cipher_iv_length = ZUC_EEA3_256_IV_LEN,
+ .cipher_iv = { 0x8e, 0x5d, 0xbc, 0x3f, 0xb9, 0xae, 0x66, 0xa3,
+ 0xb9, 0x5c, 0x12, 0x14, 0xdb, 0xc5, 0xbc, 0x18,
+ 0x48, 0x12, 0x09, 0x06, 0x25, 0x33, 0x2e, 0x12,
+ 0x12 },
+ .length = 1024,
+ .is_length_in_bits = true,
+ .plaintext = { 0x36, 0xdb, 0x63, 0x68, 0xb5, 0x1f, 0x4e, 0x92,
+ 0x46, 0x1f, 0xde, 0xdb, 0xc2, 0xec, 0xfa, 0x7e,
+ 0x49, 0x85, 0x77, 0xaa, 0x46, 0x98, 0x30, 0x2d,
+ 0x3b, 0xc4, 0x11, 0x24, 0x98, 0x20, 0xa9, 0xce,
+ 0xfb, 0x0d, 0x36, 0xb0, 0x2c, 0x85, 0x42, 0x72,
+ 0xa4, 0x21, 0x4e, 0x66, 0x0d, 0x48, 0xe4, 0x57,
+ 0xce, 0x5b, 0x01, 0x14, 0xf3, 0x31, 0x42, 0x2e,
+ 0xf5, 0x53, 0x52, 0x8d, 0x73, 0xfc, 0x5c, 0x6e,
+ 0x09, 0x92, 0x1e, 0x35, 0x17, 0x60, 0xa8, 0xbb,
+ 0x81, 0xf6, 0x21, 0x8f, 0x3e, 0x05, 0xe6, 0x0c,
+ 0x60, 0xe7, 0x21, 0x53, 0x18, 0x63, 0x81, 0x0d,
+ 0xb6, 0xd4, 0x9a, 0x29, 0xd0, 0xf6, 0x97, 0xd9,
+ 0x89, 0xb5, 0x0e, 0xa0, 0x15, 0xb6, 0x5c, 0x97,
+ 0xac, 0x7d, 0x26, 0xeb, 0x83, 0x0c, 0xf7, 0xe3,
+ 0xf3, 0x18, 0x37, 0x0b, 0x7b, 0xb8, 0x18, 0x31,
+ 0x8c, 0xb2, 0x5a, 0x5c, 0xa9, 0xf1, 0x35, 0x32 },
+ .ciphertext = { 0xa6, 0xe5, 0x71, 0x58, 0x5c, 0xcf, 0x5d, 0x0d,
+ 0x59, 0xb5, 0x51, 0xab, 0xf5, 0xfa, 0x31, 0xf9,
+ 0x8d, 0x4f, 0xf0, 0x3c, 0x7d, 0x61, 0x8d, 0x7a,
+ 0x6b, 0xcb, 0x2c, 0x79, 0xca, 0x99, 0x06, 0x6f,
+ 0xff, 0x5d, 0x12, 0x5f, 0x0e, 0x7a, 0x33, 0x6b,
+ 0x51, 0xbc, 0x58, 0x53, 0xff, 0xbd, 0x85, 0xc9,
+ 0xac, 0x5f, 0x33, 0xc2, 0xa2, 0xf1, 0x17, 0x7a,
+ 0xd9, 0x3f, 0x81, 0x82, 0x2f, 0x0a, 0xb0, 0xaf,
+ 0xb9, 0x19, 0x3b, 0xfa, 0xcd, 0xa4, 0x06, 0x81,
+ 0x2a, 0x7a, 0xbf, 0x2c, 0x07, 0xde, 0xc1, 0xa4,
+ 0x8c, 0x15, 0x85, 0x81, 0xa6, 0xd3, 0x73, 0x1c,
+ 0x29, 0x0b, 0xee, 0x3c, 0x57, 0xfa, 0x82, 0xad,
+ 0x6f, 0xe0, 0xa1, 0x54, 0x8d, 0xa4, 0x92, 0x29,
+ 0xf4, 0xfa, 0x6d, 0x01, 0xe3, 0x6c, 0xb9, 0x76,
+ 0x80, 0x53, 0xbb, 0x27, 0xb8, 0x18, 0x47, 0x6c,
+ 0xae, 0xb5, 0x44, 0x60, 0x43, 0x9d, 0xa7, 0x3f }
+ },
+ /* Privately generated test data */
+ {
+ .cipher_key_length = ZUC_EEA3_256_KEY_LEN,
+ .cipher_key = { 0x1d, 0x0f, 0x0e, 0x75, 0x86, 0xb3, 0xfc, 0x65,
+ 0x94, 0xbf, 0xaa, 0xa8, 0xf5, 0xd0, 0x0f, 0xe8,
+ 0x14, 0x7a, 0x96, 0x61, 0x15, 0x49, 0x79, 0x71,
+ 0x13, 0x82, 0xb4, 0xae, 0x34, 0x04, 0x75, 0x51 },
+ .cipher_iv_length = ZUC_EEA3_256_IV_LEN,
+ .cipher_iv = { 0x98, 0xcc, 0x89, 0x9f, 0xaf, 0x6d, 0x64, 0xb6,
+ 0xb1, 0xe8, 0x21, 0x72, 0xee, 0xb6, 0xcc, 0xe3,
+ 0xcf, 0x32, 0x28, 0x21, 0x21, 0x0d, 0x1e, 0x1c,
+ 0x34 },
+ .length = 1928,
+ .is_length_in_bits = true,
+ .plaintext = { 0xa4, 0xcb, 0x6e, 0x76, 0x99, 0xfb, 0x0c, 0xab,
+ 0x6d, 0x57, 0xb1, 0x69, 0xc0, 0x47, 0x80, 0x63,
+ 0x00, 0xe1, 0xf9, 0x51, 0x10, 0xbe, 0xc0, 0x0f,
+ 0x99, 0x62, 0x2d, 0x71, 0xca, 0x75, 0xa0, 0x6e,
+ 0x41, 0x0e, 0xe4, 0xda, 0x09, 0xf1, 0x86, 0x76,
+ 0x48, 0x37, 0xe0, 0x08, 0x7e, 0x60, 0x6c, 0x7f,
+ 0x41, 0x65, 0xd0, 0x51, 0x24, 0x91, 0x61, 0xbd,
+ 0xf3, 0x8e, 0x2e, 0xbd, 0x04, 0xce, 0x2b, 0x45,
+ 0xdc, 0x0f, 0x1f, 0xe5, 0x00, 0xa5, 0x5c, 0x48,
+ 0xdd, 0x3c, 0x51, 0x5b, 0x9c, 0xbd, 0xda, 0xde,
+ 0x22, 0xab, 0x2f, 0x46, 0x3c, 0x90, 0x03, 0x2f,
+ 0x1f, 0x31, 0xec, 0x23, 0xff, 0x17, 0x68, 0xdb,
+ 0x26, 0x87, 0xc1, 0x27, 0x2d, 0x1d, 0x6f, 0x0a,
+ 0x59, 0xc0, 0x65, 0xf5, 0x7d, 0x40, 0xd3, 0xa0,
+ 0xeb, 0x03, 0xe6, 0x27, 0x93, 0xea, 0x56, 0xb2,
+ 0x1b, 0x42, 0xd5, 0x1b, 0x59, 0x3d, 0xf6, 0x7f,
+ 0xc5, 0xb7, 0xa6, 0xf2, 0xd4, 0x16, 0xfc, 0x2d,
+ 0xd6, 0x61, 0x23, 0x54, 0xa1, 0xf6, 0xf4, 0x8c,
+ 0xf9, 0xda, 0xb3, 0x8d, 0xc4, 0x09, 0x3f, 0xe0,
+ 0x4b, 0x15, 0xfb, 0xa4, 0x52, 0xf1, 0x24, 0x17,
+ 0xa9, 0xca, 0x09, 0x7d, 0xe0, 0x05, 0xab, 0xb7,
+ 0x67, 0xce, 0x0b, 0x08, 0xc4, 0xff, 0x95, 0xbe,
+ 0xd9, 0x48, 0x4b, 0x9e, 0x52, 0x8a, 0x7e, 0x9d,
+ 0x9f, 0x79, 0x42, 0xf2, 0x6a, 0x66, 0x09, 0x13,
+ 0x30, 0x13, 0x91, 0x11, 0x18, 0x3c, 0xc8, 0x7f,
+ 0x0a, 0xd3, 0x88, 0xce, 0xd2, 0x1d, 0x8c, 0xab,
+ 0x65, 0xd7, 0x49, 0xb7, 0x62, 0xc7, 0x55, 0x01,
+ 0x40, 0x97, 0xf3, 0xab, 0xfd, 0xfd, 0xbe, 0x2d,
+ 0x10, 0x4f, 0x3e, 0x28, 0x8b, 0x06, 0xa8, 0x95,
+ 0xd9, 0x30, 0x64, 0xab, 0x4d, 0xf0, 0x57, 0xb2,
+ 0xc8 },
+ .ciphertext = { 0xd0, 0xf9, 0xff, 0xce, 0x03, 0x81, 0x14, 0x9c,
+ 0xd5, 0xf2, 0xbf, 0xe5, 0xff, 0xc8, 0x15, 0x4a,
+ 0x9c, 0x06, 0x2b, 0x17, 0x99, 0xe3, 0x48, 0x70,
+ 0x37, 0x01, 0x5e, 0x24, 0x80, 0x9a, 0x46, 0x4e,
+ 0xa8, 0xc0, 0x59, 0xd7, 0x03, 0x74, 0x28, 0x91,
+ 0x79, 0xb4, 0xb5, 0xd6, 0x52, 0x92, 0x04, 0x77,
+ 0x5b, 0x4f, 0x34, 0xd1, 0xbe, 0xaa, 0x74, 0xd9,
+ 0x01, 0x40, 0x24, 0xc7, 0x8c, 0x62, 0x2a, 0x51,
+ 0x5a, 0x58, 0x0e, 0xc8, 0x70, 0x12, 0x06, 0x1c,
+ 0x62, 0x7f, 0xf5, 0x23, 0xcb, 0x3c, 0xc1, 0xbe,
+ 0x8b, 0x7f, 0x9d, 0x12, 0xb8, 0x26, 0xc8, 0xa3,
+ 0x77, 0x7e, 0x83, 0xda, 0x83, 0xe1, 0x9f, 0xef,
+ 0x33, 0x62, 0x17, 0xa7, 0x74, 0x68, 0x34, 0x5e,
+ 0x16, 0xcc, 0xbc, 0x6c, 0x33, 0x2f, 0x73, 0xf0,
+ 0xfc, 0xe5, 0x2c, 0x2d, 0xfb, 0x81, 0xbe, 0x1e,
+ 0x6e, 0x4f, 0xf4, 0x14, 0x37, 0x7c, 0x97, 0xac,
+ 0xa9, 0xac, 0x68, 0x95, 0xf3, 0x55, 0xb3, 0xfb,
+ 0xf6, 0x64, 0xd9, 0x1b, 0xe1, 0x54, 0x79, 0x6e,
+ 0xfa, 0x21, 0xa4, 0x19, 0x9f, 0xb4, 0x4b, 0xb7,
+ 0xef, 0x52, 0xd8, 0x44, 0x75, 0x99, 0x07, 0x6d,
+ 0xa9, 0xcf, 0x32, 0xc5, 0xc1, 0x31, 0x0c, 0xa8,
+ 0x86, 0x40, 0x75, 0xeb, 0x12, 0xcf, 0x26, 0x5c,
+ 0x5f, 0xa3, 0x3c, 0xb6, 0x12, 0x45, 0xf3, 0x0a,
+ 0x38, 0x09, 0xa8, 0x36, 0x32, 0x4a, 0x2f, 0xad,
+ 0x50, 0x11, 0x38, 0xba, 0x8f, 0xdd, 0xd1, 0x58,
+ 0xd7, 0x3d, 0x3a, 0x40, 0x7c, 0x3f, 0xa7, 0x98,
+ 0xf3, 0x12, 0x7f, 0x9f, 0x89, 0xcf, 0x48, 0x58,
+ 0x01, 0xeb, 0x98, 0x7c, 0x59, 0x11, 0x9f, 0x57,
+ 0x74, 0x5f, 0x70, 0x72, 0x74, 0xa4, 0x82, 0x3c,
+ 0x36, 0xe6, 0x31, 0x9e, 0xba, 0x7b, 0x53, 0xfc,
+ 0x56 }
+ },
};
static crypto_test_reference_t zuc_eia3_reference[] = {
@@ -2562,7 +2687,143 @@ static crypto_test_reference_t zuc_eia3_reference[] = {
0x00, 0x00, 0x00, 0x00 },
.digest_length = ZUC_EIA3_DIGEST_LEN,
.digest = { 0x24, 0xa8, 0x42, 0xb3 }
- }
+ },
+ /* Privately generated test data */
+ {
+ .auth_key_length = ZUC_EIA3_256_KEY_LEN,
+ .auth_key = { 0xe3, 0x8e, 0xaf, 0x08, 0xde, 0x8c, 0x08, 0x41,
+ 0x7f, 0x2b, 0x97, 0x20, 0x10, 0x87, 0xc7, 0xf7,
+ 0xbe, 0x3c, 0xd2, 0x68, 0x80, 0x10, 0x1e, 0x71,
+ 0xfd, 0xb2, 0xbb, 0xad, 0x25, 0x0f, 0x06, 0x08 },
+ .auth_iv_length = ZUC_EIA3_256_IV_LEN,
+ .auth_iv = { 0xf5, 0x8d, 0x08, 0x26, 0x94, 0x14, 0xc7, 0x4d,
+ 0xf5, 0x7c, 0x9c, 0xaa, 0x45, 0x53, 0xfd, 0x85,
+ 0x23, 0x0b, 0x00, 0x0e, 0x26, 0x2b, 0x0f, 0x01,
+ 0x26 },
+ .length = 360,
+ .is_length_in_bits = true,
+ .plaintext = { 0x08, 0xba, 0x8d, 0xf1, 0xf8, 0x62, 0xa6, 0xaf,
+ 0xf9, 0x03, 0x88, 0x9c, 0xa3, 0x68, 0x6b, 0x87,
+ 0xb6, 0x92, 0xd1, 0x47, 0x3e, 0x54, 0xaf, 0x46,
+ 0x07, 0x8f, 0x89, 0xea, 0x26, 0x9d, 0x0e, 0x2f,
+ 0x57, 0x9b, 0x20, 0x4f, 0xfe, 0xc7, 0xfe, 0xf7,
+ 0xca, 0x86, 0x93, 0x6d, 0xee },
+ .ciphertext = { 0x08, 0xba, 0x8d, 0xf1, 0xf8, 0x62, 0xa6, 0xaf,
+ 0xf9, 0x03, 0x88, 0x9c, 0xa3, 0x68, 0x6b, 0x87,
+ 0xb6, 0x92, 0xd1, 0x47, 0x3e, 0x54, 0xaf, 0x46,
+ 0x07, 0x8f, 0x89, 0xea, 0x26, 0x9d, 0x0e, 0x2f,
+ 0x57, 0x9b, 0x20, 0x4f, 0xfe, 0xc7, 0xfe, 0xf7,
+ 0xca, 0x86, 0x93, 0x6d, 0xee },
+ .digest_length = ZUC_EIA3_DIGEST_LEN,
+ .digest = {0x58, 0x19, 0xab, 0xa5}
+ },
+ /* Privately generated test data */
+ {
+ .auth_key_length = ZUC_EIA3_256_KEY_LEN,
+ .auth_key = { 0x6a, 0x7e, 0x4c, 0x7e, 0x51, 0x25, 0xb3, 0x48,
+ 0x84, 0x53, 0x3a, 0x94, 0xfb, 0x31, 0x99, 0x90,
+ 0x32, 0x57, 0x44, 0xee, 0x9b, 0xbc, 0xe9, 0xe5,
+ 0x25, 0xcf, 0x08, 0xf5, 0xe9, 0xe2, 0x5e, 0x53 },
+ .auth_iv_length = ZUC_EIA3_256_IV_LEN,
+ .auth_iv = { 0x60, 0xaa, 0xd2, 0xb2, 0xd0, 0x85, 0xfa, 0x54,
+ 0xd8, 0x35, 0xe8, 0xd4, 0x66, 0x82, 0x64, 0x98,
+ 0xd9, 0x2a, 0x08, 0x1d, 0x35, 0x19, 0x17, 0x01,
+ 0x1A },
+ .length = 2872,
+ .is_length_in_bits = true,
+ .plaintext = { 0xc6, 0x69, 0x73, 0x51, 0xff, 0x4a, 0xec, 0x29,
+ 0xcd, 0xba, 0xab, 0xf2, 0xfb, 0xe3, 0x46, 0x7c,
+ 0xc2, 0x54, 0xf8, 0x1b, 0xe8, 0xe7, 0x8d, 0x76,
+ 0x5a, 0x2e, 0x63, 0x33, 0x9f, 0xc9, 0x9a, 0x66,
+ 0x32, 0x0d, 0xb7, 0x31, 0x58, 0xa3, 0x5a, 0x25,
+ 0x5d, 0x05, 0x17, 0x58, 0xe9, 0x5e, 0xd4, 0xab,
+ 0xb2, 0xcd, 0xc6, 0x9b, 0xb4, 0x54, 0x11, 0x0e,
+ 0x82, 0x74, 0x41, 0x21, 0x3d, 0xdc, 0x87, 0x70,
+ 0xe9, 0x3e, 0xa1, 0x41, 0xe1, 0xfc, 0x67, 0x3e,
+ 0x01, 0x7e, 0x97, 0xea, 0xdc, 0x6b, 0x96, 0x8f,
+ 0x38, 0x5c, 0x2a, 0xec, 0xb0, 0x3b, 0xfb, 0x32,
+ 0xaf, 0x3c, 0x54, 0xec, 0x18, 0xdb, 0x5c, 0x02,
+ 0x1a, 0xfe, 0x43, 0xfb, 0xfa, 0xaa, 0x3a, 0xfb,
+ 0x29, 0xd1, 0xe6, 0x05, 0x3c, 0x7c, 0x94, 0x75,
+ 0xd8, 0xbe, 0x61, 0x89, 0xf9, 0x5c, 0xbb, 0xa8,
+ 0x99, 0x0f, 0x95, 0xb1, 0xeb, 0xf1, 0xb3, 0x05,
+ 0xef, 0xf7, 0x00, 0xe9, 0xa1, 0x3a, 0xe5, 0xca,
+ 0x0b, 0xcb, 0xd0, 0x48, 0x47, 0x64, 0xbd, 0x1f,
+ 0x23, 0x1e, 0xa8, 0x1c, 0x7b, 0x64, 0xc5, 0x14,
+ 0x73, 0x5a, 0xc5, 0x5e, 0x4b, 0x79, 0x63, 0x3b,
+ 0x70, 0x64, 0x24, 0x11, 0x9e, 0x09, 0xdc, 0xaa,
+ 0xd4, 0xac, 0xf2, 0x1b, 0x10, 0xaf, 0x3b, 0x33,
+ 0xcd, 0xe3, 0x50, 0x48, 0x47, 0x15, 0x5c, 0xbb,
+ 0x6f, 0x22, 0x19, 0xba, 0x9b, 0x7d, 0xf5, 0x0b,
+ 0xe1, 0x1a, 0x1c, 0x7f, 0x23, 0xf8, 0x29, 0xf8,
+ 0xa4, 0x1b, 0x13, 0xb5, 0xca, 0x4e, 0xe8, 0x98,
+ 0x32, 0x38, 0xe0, 0x79, 0x4d, 0x3d, 0x34, 0xbc,
+ 0x5f, 0x4e, 0x77, 0xfa, 0xcb, 0x6c, 0x05, 0xac,
+ 0x86, 0x21, 0x2b, 0xaa, 0x1a, 0x55, 0xa2, 0xbe,
+ 0x70, 0xb5, 0x73, 0x3b, 0x04, 0x5c, 0xd3, 0x36,
+ 0x94, 0xb3, 0xaf, 0xe2, 0xf0, 0xe4, 0x9e, 0x4f,
+ 0x32, 0x15, 0x49, 0xfd, 0x82, 0x4e, 0xa9, 0x08,
+ 0x70, 0xd4, 0xb2, 0x8a, 0x29, 0x54, 0x48, 0x9a,
+ 0x0a, 0xbc, 0xd5, 0x0e, 0x18, 0xa8, 0x44, 0xac,
+ 0x5b, 0xf3, 0x8e, 0x4c, 0xd7, 0x2d, 0x9b, 0x09,
+ 0x42, 0xe5, 0x06, 0xc4, 0x33, 0xaf, 0xcd, 0xa3,
+ 0x84, 0x7f, 0x2d, 0xad, 0xd4, 0x76, 0x47, 0xde,
+ 0x32, 0x1c, 0xec, 0x4a, 0xc4, 0x30, 0xf6, 0x20,
+ 0x23, 0x85, 0x6c, 0xfb, 0xb2, 0x07, 0x04, 0xf4,
+ 0xec, 0x0b, 0xb9, 0x20, 0xba, 0x86, 0xc3, 0x3e,
+ 0x05, 0xf1, 0xec, 0xd9, 0x67, 0x33, 0xb7, 0x99,
+ 0x50, 0xa3, 0xe3, 0x14, 0xd3, 0xd9, 0x34, 0xf7,
+ 0x5e, 0xa0, 0xf2, 0x10, 0xa8, 0xf6, 0x05, 0x94,
+ 0x01, 0xbe, 0xb4, 0xbc, 0x44, 0x78, 0xfa, 0x49,
+ 0x69, 0xe6, 0x23, 0xd0, 0x1a, 0xda, 0x69 },
+ .ciphertext = { 0xc6, 0x69, 0x73, 0x51, 0xff, 0x4a, 0xec, 0x29,
+ 0xcd, 0xba, 0xab, 0xf2, 0xfb, 0xe3, 0x46, 0x7c,
+ 0xc2, 0x54, 0xf8, 0x1b, 0xe8, 0xe7, 0x8d, 0x76,
+ 0x5a, 0x2e, 0x63, 0x33, 0x9f, 0xc9, 0x9a, 0x66,
+ 0x32, 0x0d, 0xb7, 0x31, 0x58, 0xa3, 0x5a, 0x25,
+ 0x5d, 0x05, 0x17, 0x58, 0xe9, 0x5e, 0xd4, 0xab,
+ 0xb2, 0xcd, 0xc6, 0x9b, 0xb4, 0x54, 0x11, 0x0e,
+ 0x82, 0x74, 0x41, 0x21, 0x3d, 0xdc, 0x87, 0x70,
+ 0xe9, 0x3e, 0xa1, 0x41, 0xe1, 0xfc, 0x67, 0x3e,
+ 0x01, 0x7e, 0x97, 0xea, 0xdc, 0x6b, 0x96, 0x8f,
+ 0x38, 0x5c, 0x2a, 0xec, 0xb0, 0x3b, 0xfb, 0x32,
+ 0xaf, 0x3c, 0x54, 0xec, 0x18, 0xdb, 0x5c, 0x02,
+ 0x1a, 0xfe, 0x43, 0xfb, 0xfa, 0xaa, 0x3a, 0xfb,
+ 0x29, 0xd1, 0xe6, 0x05, 0x3c, 0x7c, 0x94, 0x75,
+ 0xd8, 0xbe, 0x61, 0x89, 0xf9, 0x5c, 0xbb, 0xa8,
+ 0x99, 0x0f, 0x95, 0xb1, 0xeb, 0xf1, 0xb3, 0x05,
+ 0xef, 0xf7, 0x00, 0xe9, 0xa1, 0x3a, 0xe5, 0xca,
+ 0x0b, 0xcb, 0xd0, 0x48, 0x47, 0x64, 0xbd, 0x1f,
+ 0x23, 0x1e, 0xa8, 0x1c, 0x7b, 0x64, 0xc5, 0x14,
+ 0x73, 0x5a, 0xc5, 0x5e, 0x4b, 0x79, 0x63, 0x3b,
+ 0x70, 0x64, 0x24, 0x11, 0x9e, 0x09, 0xdc, 0xaa,
+ 0xd4, 0xac, 0xf2, 0x1b, 0x10, 0xaf, 0x3b, 0x33,
+ 0xcd, 0xe3, 0x50, 0x48, 0x47, 0x15, 0x5c, 0xbb,
+ 0x6f, 0x22, 0x19, 0xba, 0x9b, 0x7d, 0xf5, 0x0b,
+ 0xe1, 0x1a, 0x1c, 0x7f, 0x23, 0xf8, 0x29, 0xf8,
+ 0xa4, 0x1b, 0x13, 0xb5, 0xca, 0x4e, 0xe8, 0x98,
+ 0x32, 0x38, 0xe0, 0x79, 0x4d, 0x3d, 0x34, 0xbc,
+ 0x5f, 0x4e, 0x77, 0xfa, 0xcb, 0x6c, 0x05, 0xac,
+ 0x86, 0x21, 0x2b, 0xaa, 0x1a, 0x55, 0xa2, 0xbe,
+ 0x70, 0xb5, 0x73, 0x3b, 0x04, 0x5c, 0xd3, 0x36,
+ 0x94, 0xb3, 0xaf, 0xe2, 0xf0, 0xe4, 0x9e, 0x4f,
+ 0x32, 0x15, 0x49, 0xfd, 0x82, 0x4e, 0xa9, 0x08,
+ 0x70, 0xd4, 0xb2, 0x8a, 0x29, 0x54, 0x48, 0x9a,
+ 0x0a, 0xbc, 0xd5, 0x0e, 0x18, 0xa8, 0x44, 0xac,
+ 0x5b, 0xf3, 0x8e, 0x4c, 0xd7, 0x2d, 0x9b, 0x09,
+ 0x42, 0xe5, 0x06, 0xc4, 0x33, 0xaf, 0xcd, 0xa3,
+ 0x84, 0x7f, 0x2d, 0xad, 0xd4, 0x76, 0x47, 0xde,
+ 0x32, 0x1c, 0xec, 0x4a, 0xc4, 0x30, 0xf6, 0x20,
+ 0x23, 0x85, 0x6c, 0xfb, 0xb2, 0x07, 0x04, 0xf4,
+ 0xec, 0x0b, 0xb9, 0x20, 0xba, 0x86, 0xc3, 0x3e,
+ 0x05, 0xf1, 0xec, 0xd9, 0x67, 0x33, 0xb7, 0x99,
+ 0x50, 0xa3, 0xe3, 0x14, 0xd3, 0xd9, 0x34, 0xf7,
+ 0x5e, 0xa0, 0xf2, 0x10, 0xa8, 0xf6, 0x05, 0x94,
+ 0x01, 0xbe, 0xb4, 0xbc, 0x44, 0x78, 0xfa, 0x49,
+ 0x69, 0xe6, 0x23, 0xd0, 0x1a, 0xda, 0x69 },
+ .digest_length = ZUC_EIA3_DIGEST_LEN,
+ .digest = {0xd1, 0x1e, 0x33, 0xf6}
+ },
};
/*
diff --git a/test/validation/api/crypto/test_vectors_len.h b/test/validation/api/crypto/test_vectors_len.h
index 9edf2999e..3818b57a0 100644
--- a/test/validation/api/crypto/test_vectors_len.h
+++ b/test/validation/api/crypto/test_vectors_len.h
@@ -8,7 +8,7 @@
/* Maximum */
#define MAX_KEY_LEN 64
-#define MAX_IV_LEN 16
+#define MAX_IV_LEN 32
#define MAX_DATA_LEN 1000
#define MAX_AAD_LEN 12
#define MAX_DIGEST_LEN 64
@@ -101,6 +101,10 @@
#define ZUC_EEA3_KEY_LEN 16
#define ZUC_EEA3_IV_LEN 16
+/* ZUC_EEA3_256 */
+#define ZUC_EEA3_256_KEY_LEN 32
+#define ZUC_EEA3_256_IV_LEN 25
+
/* KASUMI_F9 */
#define KASUMI_F9_KEY_LEN 16
#define KASUMI_F9_IV_LEN 9
@@ -121,6 +125,10 @@
#define ZUC_EIA3_IV_LEN 16
#define ZUC_EIA3_DIGEST_LEN 4
+/* ZUC_EIA3_256 */
+#define ZUC_EIA3_256_KEY_LEN 32
+#define ZUC_EIA3_256_IV_LEN 25
+
/* MD5 */
#define MD5_DIGEST_LEN 16
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index d12234a5a..c98afae4e 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -614,6 +614,7 @@ static int recv_pkts_inline(const ipsec_test_part *part,
num_pkts = 1;
break;
case ODP_PACKET_REASS_INCOMPLETE:
+ reass_state.num_frags = 0;
CU_ASSERT(0 ==
odp_packet_reass_partial_state(pkt, frags, &reass_state));
num_pkts = reass_state.num_frags;
diff --git a/test/validation/api/lock/lock.c b/test/validation/api/lock/lock.c
index c5e07c776..bf9318e76 100644
--- a/test/validation/api/lock/lock.c
+++ b/test/validation/api/lock/lock.c
@@ -1007,7 +1007,7 @@ static void lock_test_no_lock_functional(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, no_lock_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, no_lock_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1021,7 +1021,7 @@ static void lock_test_spinlock_api(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, spinlock_api_tests, NULL, 0);
+ odp_cunit_thread_create(num, spinlock_api_tests, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1030,7 +1030,7 @@ static void lock_test_spinlock_functional(void)
int num = global_mem->g_num_threads;
odp_spinlock_init(&global_mem->global_spinlock);
- odp_cunit_thread_create(num, spinlock_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, spinlock_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1038,7 +1038,7 @@ static void lock_test_spinlock_recursive_api(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, spinlock_recursive_api_tests, NULL, 0);
+ odp_cunit_thread_create(num, spinlock_recursive_api_tests, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1047,7 +1047,7 @@ static void lock_test_spinlock_recursive_functional(void)
int num = global_mem->g_num_threads;
odp_spinlock_recursive_init(&global_mem->global_recursive_spinlock);
- odp_cunit_thread_create(num, spinlock_recursive_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, spinlock_recursive_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1068,7 +1068,7 @@ static void lock_test_ticketlock_api(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, ticketlock_api_tests, NULL, 0);
+ odp_cunit_thread_create(num, ticketlock_api_tests, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1077,7 +1077,7 @@ static void lock_test_ticketlock_functional(void)
int num = global_mem->g_num_threads;
odp_ticketlock_init(&global_mem->global_ticketlock);
- odp_cunit_thread_create(num, ticketlock_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, ticketlock_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1092,7 +1092,7 @@ static void lock_test_rwlock_api(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, rwlock_api_tests, NULL, 0);
+ odp_cunit_thread_create(num, rwlock_api_tests, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1101,7 +1101,7 @@ static void lock_test_rwlock_functional(void)
int num = global_mem->g_num_threads;
odp_rwlock_init(&global_mem->global_rwlock);
- odp_cunit_thread_create(num, rwlock_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, rwlock_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1115,7 +1115,7 @@ static void lock_test_rwlock_recursive_api(void)
{
int num = global_mem->g_num_threads;
- odp_cunit_thread_create(num, rwlock_recursive_api_tests, NULL, 0);
+ odp_cunit_thread_create(num, rwlock_recursive_api_tests, NULL, 0, 0);
odp_cunit_thread_join(num);
}
@@ -1124,7 +1124,7 @@ static void lock_test_rwlock_recursive_functional(void)
int num = global_mem->g_num_threads;
odp_rwlock_recursive_init(&global_mem->global_recursive_rwlock);
- odp_cunit_thread_create(num, rwlock_recursive_functional_test, NULL, 0);
+ odp_cunit_thread_create(num, rwlock_recursive_functional_test, NULL, 0, 0);
odp_cunit_thread_join(num);
}
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index 1ddff1ae1..d81fba3de 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2022, Nokia
* Copyright (c) 2020, Marvell
* All rights reserved.
*
@@ -189,7 +189,7 @@ static int packet_suite_init(void)
odp_pool_param_t params;
odp_packet_t pkt_tbl[PACKET_POOL_NUM_SEG];
struct udata_struct *udat;
- uint32_t udat_size;
+ uint32_t uarea_size;
uint8_t data = 0;
uint32_t i;
uint32_t num = PACKET_POOL_NUM;
@@ -201,6 +201,10 @@ static int packet_suite_init(void)
printf("pool_capability failed\n");
return -1;
}
+
+ if (pool_capa.pkt.max_uarea_size == 0)
+ printf("Warning: Packet user area not supported\n");
+
if (pool_capa.pkt.max_segs_per_pkt == 0)
pool_capa.pkt.max_segs_per_pkt = 10;
@@ -232,6 +236,10 @@ static int packet_suite_init(void)
params.pkt.num = num;
params.pkt.uarea_size = sizeof(struct udata_struct);
+ if (params.pkt.uarea_size > pool_capa.pkt.max_uarea_size)
+ params.pkt.uarea_size = pool_capa.pkt.max_uarea_size;
+
+ uarea_size = params.pkt.uarea_size;
memcpy(&default_param, &params, sizeof(odp_pool_param_t));
default_pool = odp_pool_create("default_pool", &params);
@@ -288,23 +296,22 @@ static int packet_suite_init(void)
}
udat = odp_packet_user_area(test_packet);
- udat_size = odp_packet_user_area_size(test_packet);
- if (!udat || udat_size != sizeof(struct udata_struct)) {
- printf("packet_user_area failed: 1\n");
+ if (odp_packet_user_area_size(test_packet) < uarea_size) {
+ printf("Bad packet user area size %u\n", odp_packet_user_area_size(test_packet));
return -1;
}
odp_pool_print(default_pool);
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+ memcpy(udat, &test_packet_udata, uarea_size);
udat = odp_packet_user_area(segmented_test_packet);
- udat_size = odp_packet_user_area_size(segmented_test_packet);
- if (udat == NULL || udat_size != sizeof(struct udata_struct)) {
- printf("packet_user_area failed: 2\n");
+ if (odp_packet_user_area_size(segmented_test_packet) < uarea_size) {
+ printf("Bad segmented packet user area size %u\n",
+ odp_packet_user_area_size(segmented_test_packet));
return -1;
}
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+ memcpy(udat, &test_packet_udata, uarea_size);
return 0;
}
@@ -320,6 +327,58 @@ static int packet_suite_term(void)
return 0;
}
+static void packet_set_inflags(odp_packet_t pkt, int val)
+{
+ odp_packet_has_l2_set(pkt, val);
+ odp_packet_has_l3_set(pkt, val);
+ odp_packet_has_l4_set(pkt, val);
+ odp_packet_has_eth_set(pkt, val);
+ odp_packet_has_eth_bcast_set(pkt, val);
+ odp_packet_has_eth_mcast_set(pkt, val);
+ odp_packet_has_jumbo_set(pkt, val);
+ odp_packet_has_vlan_set(pkt, val);
+ odp_packet_has_vlan_qinq_set(pkt, val);
+ odp_packet_has_arp_set(pkt, val);
+ odp_packet_has_ipv4_set(pkt, val);
+ odp_packet_has_ipv6_set(pkt, val);
+ odp_packet_has_ip_bcast_set(pkt, val);
+ odp_packet_has_ip_mcast_set(pkt, val);
+ odp_packet_has_ipfrag_set(pkt, val);
+ odp_packet_has_ipopt_set(pkt, val);
+ odp_packet_has_ipsec_set(pkt, val);
+ odp_packet_has_udp_set(pkt, val);
+ odp_packet_has_tcp_set(pkt, val);
+ odp_packet_has_sctp_set(pkt, val);
+ odp_packet_has_icmp_set(pkt, val);
+ odp_packet_user_flag_set(pkt, val);
+}
+
+static void packet_check_inflags(odp_packet_t pkt, int val)
+{
+ CU_ASSERT(odp_packet_has_l2(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l3(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_l4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_eth_mcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_jumbo(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_vlan(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_vlan_qinq(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_arp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipv4(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipv6(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ip_bcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ip_mcast(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipfrag(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipopt(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_ipsec(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_udp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_tcp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_sctp(pkt) == !!val);
+ CU_ASSERT(odp_packet_has_icmp(pkt) == !!val);
+ CU_ASSERT(odp_packet_user_flag(pkt) == !!val);
+}
+
static void packet_test_alloc_free(void)
{
odp_pool_t pool;
@@ -356,6 +415,9 @@ static void packet_test_alloc_free(void)
/* User pointer should be NULL after alloc */
CU_ASSERT(odp_packet_user_ptr(packet) == NULL);
+ /* Packet flags should be zero */
+ packet_check_inflags(packet, 0);
+
/* Pool should have only one packet */
CU_ASSERT_FATAL(odp_packet_alloc(pool, packet_len)
== ODP_PACKET_INVALID);
@@ -873,11 +935,10 @@ static void packet_test_reset(void)
CU_ASSERT(odp_packet_reset(pkt, len) == 0);
CU_ASSERT(odp_packet_len(pkt) == len);
- CU_ASSERT(odp_packet_has_udp(pkt) == 0);
- odp_packet_has_udp_set(pkt, 1);
- CU_ASSERT(odp_packet_has_udp(pkt) != 0);
+ packet_set_inflags(pkt, 1);
+ packet_check_inflags(pkt, 1);
CU_ASSERT(odp_packet_reset(pkt, len) == 0);
- CU_ASSERT(odp_packet_has_udp(pkt) == 0);
+ packet_check_inflags(pkt, 0);
CU_ASSERT(odp_packet_reset(pkt, len - 1) == 0);
CU_ASSERT(odp_packet_len(pkt) == (len - 1));
@@ -906,10 +967,11 @@ static void packet_test_debug(void)
static void packet_test_context(void)
{
- odp_packet_t pkt = test_packet;
- char ptr_test_value = 2;
void *prev_ptr;
struct udata_struct *udat;
+ uint32_t uarea_size;
+ odp_packet_t pkt = test_packet;
+ char ptr_test_value = 2;
prev_ptr = odp_packet_user_ptr(pkt);
odp_packet_user_ptr_set(pkt, &ptr_test_value);
@@ -917,11 +979,15 @@ static void packet_test_context(void)
odp_packet_user_ptr_set(pkt, prev_ptr);
udat = odp_packet_user_area(pkt);
- CU_ASSERT_PTR_NOT_NULL(udat);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(udat, &test_packet_udata, sizeof(struct udata_struct))
- == 0);
+ uarea_size = odp_packet_user_area_size(pkt);
+ CU_ASSERT(uarea_size >= default_param.pkt.uarea_size);
+
+ if (uarea_size) {
+ CU_ASSERT(udat != NULL);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, default_param.pkt.uarea_size) == 0);
+ } else {
+ CU_ASSERT(udat == NULL);
+ }
odp_packet_user_ptr_set(pkt, NULL);
CU_ASSERT(odp_packet_user_ptr(pkt) == NULL);
@@ -1296,37 +1362,46 @@ static void packet_test_segment_last(void)
#define TEST_INFLAG(packet, flag) \
do { \
- odp_packet_has_##flag##_set(packet, 0); \
- CU_ASSERT(odp_packet_has_##flag(packet) == 0); \
- odp_packet_has_##flag##_set(packet, 1); \
- CU_ASSERT(odp_packet_has_##flag(packet) != 0); \
+ odp_packet_##flag##_set(packet, 0); \
+ CU_ASSERT(odp_packet_##flag(packet) == 0); \
+ odp_packet_##flag##_set(packet, 1); \
+ CU_ASSERT(odp_packet_##flag(packet) != 0); \
} while (0)
static void packet_test_in_flags(void)
{
odp_packet_t pkt = test_packet;
- TEST_INFLAG(pkt, l2);
- TEST_INFLAG(pkt, l3);
- TEST_INFLAG(pkt, l4);
- TEST_INFLAG(pkt, eth);
- TEST_INFLAG(pkt, eth_bcast);
- TEST_INFLAG(pkt, eth_mcast);
- TEST_INFLAG(pkt, jumbo);
- TEST_INFLAG(pkt, vlan);
- TEST_INFLAG(pkt, vlan_qinq);
- TEST_INFLAG(pkt, arp);
- TEST_INFLAG(pkt, ipv4);
- TEST_INFLAG(pkt, ipv6);
- TEST_INFLAG(pkt, ip_bcast);
- TEST_INFLAG(pkt, ip_mcast);
- TEST_INFLAG(pkt, ipfrag);
- TEST_INFLAG(pkt, ipopt);
- TEST_INFLAG(pkt, ipsec);
- TEST_INFLAG(pkt, udp);
- TEST_INFLAG(pkt, tcp);
- TEST_INFLAG(pkt, sctp);
- TEST_INFLAG(pkt, icmp);
+ packet_set_inflags(pkt, 0);
+ packet_check_inflags(pkt, 0);
+ packet_set_inflags(pkt, 1);
+ packet_check_inflags(pkt, 1);
+
+ TEST_INFLAG(pkt, has_l2);
+ TEST_INFLAG(pkt, has_l3);
+ TEST_INFLAG(pkt, has_l4);
+ TEST_INFLAG(pkt, has_eth);
+ TEST_INFLAG(pkt, has_eth_bcast);
+ TEST_INFLAG(pkt, has_eth_mcast);
+ TEST_INFLAG(pkt, has_jumbo);
+ TEST_INFLAG(pkt, has_vlan);
+ TEST_INFLAG(pkt, has_vlan_qinq);
+ TEST_INFLAG(pkt, has_arp);
+ TEST_INFLAG(pkt, has_ipv4);
+ TEST_INFLAG(pkt, has_ipv6);
+ TEST_INFLAG(pkt, has_ip_bcast);
+ TEST_INFLAG(pkt, has_ip_mcast);
+ TEST_INFLAG(pkt, has_ipfrag);
+ TEST_INFLAG(pkt, has_ipopt);
+ TEST_INFLAG(pkt, has_ipsec);
+ TEST_INFLAG(pkt, has_udp);
+ TEST_INFLAG(pkt, has_tcp);
+ TEST_INFLAG(pkt, has_sctp);
+ TEST_INFLAG(pkt, has_icmp);
+ TEST_INFLAG(pkt, user_flag);
+
+ packet_set_inflags(pkt, 0);
+ packet_check_inflags(pkt, 0);
}
static void packet_test_error_flags(void)
@@ -1364,9 +1439,10 @@ static void packet_test_add_rem_data(void)
odp_packet_t pkt, new_pkt;
uint32_t pkt_len, offset, add_len;
void *usr_ptr;
- struct udata_struct *udat, *new_udat;
+ struct udata_struct *udat;
int ret;
uint32_t min_seg_len;
+ uint32_t uarea_size = default_param.pkt.uarea_size;
min_seg_len = pool_capa.pkt.min_seg_len;
@@ -1375,10 +1451,14 @@ static void packet_test_add_rem_data(void)
pkt_len = odp_packet_len(pkt);
usr_ptr = odp_packet_user_ptr(pkt);
- udat = odp_packet_user_area(pkt);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- sizeof(struct udata_struct));
- memcpy(udat, &test_packet_udata, sizeof(struct udata_struct));
+
+ if (uarea_size) {
+ udat = odp_packet_user_area(pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT_FATAL(odp_packet_user_area_size(pkt) >= uarea_size);
+ memcpy(udat, &test_packet_udata, uarea_size);
+ }
offset = pkt_len / 2;
@@ -1401,13 +1481,14 @@ static void packet_test_add_rem_data(void)
/* Verify that user metadata is preserved */
CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
- /* Verify that user metadata has been preserved */
- new_udat = odp_packet_user_area(new_pkt);
- CU_ASSERT_PTR_NOT_NULL(new_udat);
- CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(new_udat, &test_packet_udata,
- sizeof(struct udata_struct)) == 0);
+ if (uarea_size) {
+ /* Verify that user metadata has been preserved */
+ udat = odp_packet_user_area(new_pkt);
+
+ CU_ASSERT_FATAL(udat != NULL);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
pkt = new_pkt;
@@ -1422,13 +1503,14 @@ static void packet_test_add_rem_data(void)
CU_ASSERT(odp_packet_len(new_pkt) == pkt_len - add_len);
CU_ASSERT(odp_packet_user_ptr(new_pkt) == usr_ptr);
- /* Verify that user metadata has been preserved */
- new_udat = odp_packet_user_area(new_pkt);
- CU_ASSERT_PTR_NOT_NULL(new_udat);
- CU_ASSERT(odp_packet_user_area_size(new_pkt) ==
- sizeof(struct udata_struct));
- CU_ASSERT(memcmp(new_udat, &test_packet_udata,
- sizeof(struct udata_struct)) == 0);
+ if (uarea_size) {
+ /* Verify that user metadata has been preserved */
+ udat = odp_packet_user_area(new_pkt);
+
+ CU_ASSERT(udat != NULL);
+ CU_ASSERT(odp_packet_user_area_size(new_pkt) >= uarea_size);
+ CU_ASSERT(memcmp(udat, &test_packet_udata, uarea_size) == 0);
+ }
pkt = new_pkt;
@@ -1436,44 +1518,42 @@ free_packet:
odp_packet_free(pkt);
}
-#define COMPARE_HAS_INFLAG(p1, p2, flag) \
- CU_ASSERT(odp_packet_has_##flag(p1) == odp_packet_has_##flag(p2))
-
#define COMPARE_INFLAG(p1, p2, flag) \
CU_ASSERT(odp_packet_##flag(p1) == odp_packet_##flag(p2))
-static void _packet_compare_inflags(odp_packet_t pkt1, odp_packet_t pkt2)
+static void packet_compare_inflags(odp_packet_t pkt1, odp_packet_t pkt2)
{
- COMPARE_HAS_INFLAG(pkt1, pkt2, l2);
- COMPARE_HAS_INFLAG(pkt1, pkt2, l3);
- COMPARE_HAS_INFLAG(pkt1, pkt2, l4);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth_bcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, eth_mcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, jumbo);
- COMPARE_HAS_INFLAG(pkt1, pkt2, vlan);
- COMPARE_HAS_INFLAG(pkt1, pkt2, vlan_qinq);
- COMPARE_HAS_INFLAG(pkt1, pkt2, arp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipv4);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipv6);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ip_bcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ip_mcast);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipfrag);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipopt);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ipsec);
- COMPARE_HAS_INFLAG(pkt1, pkt2, udp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, tcp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, sctp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, icmp);
- COMPARE_HAS_INFLAG(pkt1, pkt2, flow_hash);
- COMPARE_HAS_INFLAG(pkt1, pkt2, ts);
+ COMPARE_INFLAG(pkt1, pkt2, has_l2);
+ COMPARE_INFLAG(pkt1, pkt2, has_l3);
+ COMPARE_INFLAG(pkt1, pkt2, has_l4);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth_bcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_eth_mcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_jumbo);
+ COMPARE_INFLAG(pkt1, pkt2, has_vlan);
+ COMPARE_INFLAG(pkt1, pkt2, has_vlan_qinq);
+ COMPARE_INFLAG(pkt1, pkt2, has_arp);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipv4);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipv6);
+ COMPARE_INFLAG(pkt1, pkt2, has_ip_bcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_ip_mcast);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipfrag);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipopt);
+ COMPARE_INFLAG(pkt1, pkt2, has_ipsec);
+ COMPARE_INFLAG(pkt1, pkt2, has_udp);
+ COMPARE_INFLAG(pkt1, pkt2, has_tcp);
+ COMPARE_INFLAG(pkt1, pkt2, has_sctp);
+ COMPARE_INFLAG(pkt1, pkt2, has_icmp);
+ COMPARE_INFLAG(pkt1, pkt2, user_flag);
+ COMPARE_INFLAG(pkt1, pkt2, has_flow_hash);
+ COMPARE_INFLAG(pkt1, pkt2, has_ts);
COMPARE_INFLAG(pkt1, pkt2, color);
COMPARE_INFLAG(pkt1, pkt2, drop_eligible);
COMPARE_INFLAG(pkt1, pkt2, shaper_len_adjust);
}
-static void _packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
+static void packet_compare_udata(odp_packet_t pkt1, odp_packet_t pkt2)
{
uint32_t usize1 = odp_packet_user_area_size(pkt1);
uint32_t usize2 = odp_packet_user_area_size(pkt2);
@@ -1545,53 +1625,16 @@ static void packet_test_meta_data_copy(void)
pkt = odp_packet_alloc(pool, packet_len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_has_l2(pkt) == 0);
- CU_ASSERT(odp_packet_has_l3(pkt) == 0);
- CU_ASSERT(odp_packet_has_l4(pkt) == 0);
- CU_ASSERT(odp_packet_has_eth(pkt) == 0);
- CU_ASSERT(odp_packet_has_eth_bcast(pkt) == 0);
- CU_ASSERT(odp_packet_has_eth_mcast(pkt) == 0);
- CU_ASSERT(odp_packet_has_jumbo(pkt) == 0);
- CU_ASSERT(odp_packet_has_vlan(pkt) == 0);
- CU_ASSERT(odp_packet_has_vlan_qinq(pkt) == 0);
- CU_ASSERT(odp_packet_has_arp(pkt) == 0);
- CU_ASSERT(odp_packet_has_ipv4(pkt) == 0);
- CU_ASSERT(odp_packet_has_ipv6(pkt) == 0);
- CU_ASSERT(odp_packet_has_ip_bcast(pkt) == 0);
- CU_ASSERT(odp_packet_has_ip_mcast(pkt) == 0);
- CU_ASSERT(odp_packet_has_ipfrag(pkt) == 0);
- CU_ASSERT(odp_packet_has_ipopt(pkt) == 0);
- CU_ASSERT(odp_packet_has_ipsec(pkt) == 0);
- CU_ASSERT(odp_packet_has_udp(pkt) == 0);
- CU_ASSERT(odp_packet_has_tcp(pkt) == 0);
- CU_ASSERT(odp_packet_has_sctp(pkt) == 0);
- CU_ASSERT(odp_packet_has_icmp(pkt) == 0);
+
+ packet_check_inflags(pkt, 0);
+
CU_ASSERT(odp_packet_input(pkt) == ODP_PKTIO_INVALID);
CU_ASSERT(odp_packet_l3_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
CU_ASSERT(odp_packet_l4_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
CU_ASSERT(odp_packet_payload_offset(pkt) == ODP_PACKET_OFFSET_INVALID);
- odp_packet_has_l2_set(pkt, 1);
- odp_packet_has_l3_set(pkt, 1);
- odp_packet_has_l4_set(pkt, 1);
- odp_packet_has_eth_set(pkt, 1);
- odp_packet_has_eth_bcast_set(pkt, 1);
- odp_packet_has_eth_mcast_set(pkt, 1);
- odp_packet_has_jumbo_set(pkt, 1);
- odp_packet_has_vlan_set(pkt, 1);
- odp_packet_has_vlan_qinq_set(pkt, 1);
- odp_packet_has_arp_set(pkt, 1);
- odp_packet_has_ipv4_set(pkt, 1);
- odp_packet_has_ipv6_set(pkt, 1);
- odp_packet_has_ip_bcast_set(pkt, 1);
- odp_packet_has_ip_mcast_set(pkt, 1);
- odp_packet_has_ipfrag_set(pkt, 1);
- odp_packet_has_ipopt_set(pkt, 1);
- odp_packet_has_ipsec_set(pkt, 1);
- odp_packet_has_udp_set(pkt, 1);
- odp_packet_has_tcp_set(pkt, 1);
- odp_packet_has_sctp_set(pkt, 1);
- odp_packet_has_icmp_set(pkt, 1);
+ packet_set_inflags(pkt, 1);
+ packet_check_inflags(pkt, 1);
odp_packet_input_set(pkt, pktio);
odp_packet_user_ptr_set(pkt, (void *)(uintptr_t)0xdeadbeef);
@@ -1609,7 +1652,7 @@ static void packet_test_meta_data_copy(void)
copy = odp_packet_copy(pkt, pool);
CU_ASSERT_FATAL(copy != ODP_PACKET_INVALID);
- _packet_compare_inflags(pkt, copy);
+ packet_compare_inflags(pkt, copy);
CU_ASSERT(odp_packet_input(copy) == pktio);
CU_ASSERT(odp_packet_user_ptr(copy) == (void *)(uintptr_t)0xdeadbeef);
CU_ASSERT(odp_packet_l2_offset(copy) == 20);
@@ -1633,69 +1676,73 @@ static void packet_test_meta_data_copy(void)
static void packet_test_copy(void)
{
odp_packet_t pkt;
- odp_packet_t pkt_copy, pkt_part;
+ odp_packet_t pkt_part;
odp_pool_param_t param;
- odp_pool_t pool, pool_double_uarea, pool_no_uarea;
- uint32_t i, plen, src_offset, dst_offset;
- uint32_t seg_len = 0;
+ odp_pool_t pool, pool_min_uarea, pool_large_uarea;
void *pkt_data;
+ uint32_t i, plen, src_offset, dst_offset, uarea_size;
+ uint32_t seg_len = 0;
memcpy(&param, &default_param, sizeof(odp_pool_param_t));
param.pkt.uarea_size = 0;
- pool_no_uarea = odp_pool_create("no_uarea", &param);
- CU_ASSERT_FATAL(pool_no_uarea != ODP_POOL_INVALID);
+ pool_min_uarea = odp_pool_create("min_uarea", &param);
+ CU_ASSERT_FATAL(pool_min_uarea != ODP_POOL_INVALID);
+
+ uarea_size = 2 * sizeof(struct udata_struct);
+ if (uarea_size > pool_capa.pkt.max_uarea_size)
+ uarea_size = pool_capa.pkt.max_uarea_size;
+
+ param.pkt.uarea_size = uarea_size;
+
+ pool_large_uarea = odp_pool_create("large_uarea", &param);
+ CU_ASSERT_FATAL(pool_large_uarea != ODP_POOL_INVALID);
+
+ /* Pool with minimal user area */
+ pkt = odp_packet_copy(test_packet, pool_min_uarea);
+ if (pkt != ODP_PACKET_INVALID) {
+ /* Pool has enough user area also when zero was requested */
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= sizeof(struct udata_struct));
- param.pkt.uarea_size = 2 * sizeof(struct udata_struct);
- pool_double_uarea = odp_pool_create("double_uarea", &param);
- CU_ASSERT_FATAL(pool_double_uarea != ODP_POOL_INVALID);
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
+ packet_compare_data(pkt, test_packet);
- pkt = odp_packet_copy(test_packet, pool_no_uarea);
- CU_ASSERT(pkt == ODP_PACKET_INVALID);
- if (pkt != ODP_PACKET_INVALID)
odp_packet_free(pkt);
+ }
+ /* The same pool */
pkt = odp_packet_copy(test_packet, odp_packet_pool(test_packet));
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != test_packet);
+ CU_ASSERT(odp_packet_pool(pkt) == odp_packet_pool(test_packet));
+ CU_ASSERT(odp_packet_user_area(pkt) != odp_packet_user_area(test_packet));
+ CU_ASSERT(odp_packet_user_area_size(pkt) == odp_packet_user_area_size(test_packet));
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(test_packet));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(test_packet));
+
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
packet_compare_data(pkt, test_packet);
- pool = odp_packet_pool(pkt);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pkt_copy = odp_packet_copy(pkt, pool);
- CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
-
- CU_ASSERT(pkt != pkt_copy);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
-
- _packet_compare_inflags(pkt, pkt_copy);
- packet_compare_data(pkt, pkt_copy);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- odp_packet_user_area_size(test_packet));
- _packet_compare_udata(pkt, pkt_copy);
- odp_packet_free(pkt_copy);
+
odp_packet_free(pkt);
- pkt = odp_packet_copy(test_packet, pool_double_uarea);
+ /* Pool with larger user area */
+ pkt = odp_packet_copy(test_packet, pool_large_uarea);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(pkt != test_packet);
+ CU_ASSERT(odp_packet_pool(pkt) == pool_large_uarea);
+ CU_ASSERT(odp_packet_user_area(pkt) != odp_packet_user_area(test_packet));
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= uarea_size);
+ CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(test_packet));
+ CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(test_packet));
+
+ packet_compare_inflags(pkt, test_packet);
+ packet_compare_udata(pkt, test_packet);
packet_compare_data(pkt, test_packet);
- pool = odp_packet_pool(pkt);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pkt_copy = odp_packet_copy(pkt, pool);
- CU_ASSERT_FATAL(pkt_copy != ODP_PACKET_INVALID);
-
- CU_ASSERT(pkt != pkt_copy);
- CU_ASSERT(odp_packet_data(pkt) != odp_packet_data(pkt_copy));
- CU_ASSERT(odp_packet_len(pkt) == odp_packet_len(pkt_copy));
-
- _packet_compare_inflags(pkt, pkt_copy);
- packet_compare_data(pkt, pkt_copy);
- CU_ASSERT(odp_packet_user_area_size(pkt) ==
- 2 * odp_packet_user_area_size(test_packet));
- _packet_compare_udata(pkt, pkt_copy);
- _packet_compare_udata(pkt, test_packet);
- odp_packet_free(pkt_copy);
/* Now test copy_part */
+ pool = pool_large_uarea;
pkt_part = odp_packet_copy_part(pkt, 0, odp_packet_len(pkt) + 1, pool);
CU_ASSERT(pkt_part == ODP_PACKET_INVALID);
pkt_part = odp_packet_copy_part(pkt, odp_packet_len(pkt), 1, pool);
@@ -1743,8 +1790,8 @@ static void packet_test_copy(void)
odp_packet_free(pkt_part);
odp_packet_free(pkt);
- CU_ASSERT(odp_pool_destroy(pool_no_uarea) == 0);
- CU_ASSERT(odp_pool_destroy(pool_double_uarea) == 0);
+ CU_ASSERT(odp_pool_destroy(pool_min_uarea) == 0);
+ CU_ASSERT(odp_pool_destroy(pool_large_uarea) == 0);
}
static void packet_test_copydata(void)
@@ -3049,6 +3096,19 @@ static void packet_vector_test_alloc_free(void)
CU_ASSERT(odp_packet_vector_to_u64(pktv) !=
odp_packet_vector_to_u64(ODP_PACKET_VECTOR_INVALID));
+ /* User flag should be initially zero */
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+ odp_packet_vector_user_flag_set(pktv, 1);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) != 0);
+ odp_packet_vector_user_flag_set(pktv, 0);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+
+ /* Free with flag still set, alloc should clear it. */
+ odp_packet_vector_user_flag_set(pktv, 1);
+ odp_packet_vector_free(pktv);
+ pktv = odp_packet_vector_alloc(pool);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
+
/* Since it was only one buffer pool, more vector packets can't be
* allocated.
*/
@@ -3111,6 +3171,60 @@ static void packet_vector_basic_test(void)
CU_ASSERT(odp_packet_vector_valid(pktv_default) == 1);
}
+static void packet_vector_test_user_area(void)
+{
+ odp_pool_param_t param;
+ odp_pool_t pool;
+ uint32_t i;
+ void *addr;
+ uint32_t num = 10;
+ void *prev = NULL;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ const uint32_t max_size = pool_capa.vector.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Packet vector user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_VECTOR;
+ param.vector.num = num;
+ param.vector.max_size = pool_capa.vector.max_size;
+ param.vector.uarea_size = size;
+
+ odp_packet_vector_t pktv[num];
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ pktv[i] = odp_packet_vector_alloc(pool);
+
+ if (pktv[i] == ODP_PACKET_VECTOR_INVALID)
+ break;
+ num_alloc++;
+
+ addr = odp_packet_vector_user_area(pktv[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_packet_vector_free(pktv[i]);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
static int packet_vector_suite_init(void)
{
uint32_t num_pkt = PKT_VEC_PACKET_NUM;
@@ -3278,27 +3392,33 @@ static void packet_test_user_area(void)
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
pkt = odp_packet_alloc(pool, param.pkt.len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(odp_packet_user_area(pkt) == NULL);
- CU_ASSERT(odp_packet_user_area_size(pkt) == 0);
+ CU_ASSERT(odp_packet_user_area_size(pkt) <= pool_capa.pkt.max_uarea_size);
+ if (odp_packet_user_area_size(pkt)) {
+ /* CU_ASSERT needs these extra bracets */
+ CU_ASSERT(odp_packet_user_area(pkt) != NULL);
+ } else {
+ CU_ASSERT(odp_packet_user_area(pkt) == NULL);
+ }
+
odp_packet_free(pkt);
CU_ASSERT(odp_pool_destroy(pool) == 0);
+ if (pool_capa.pkt.max_uarea_size == 0)
+ return;
+
param.pkt.uarea_size = 1;
pool = odp_pool_create("one_uarea", &param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
pkt = odp_packet_alloc(pool, param.pkt.len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
CU_ASSERT_FATAL(odp_packet_user_area(pkt) != NULL);
- CU_ASSERT(odp_packet_user_area_size(pkt) == 1);
+ CU_ASSERT(odp_packet_user_area_size(pkt) >= 1);
*(char *)odp_packet_user_area(pkt) = 0;
CU_ASSERT_FATAL(odp_packet_is_valid(pkt) == 1);
odp_packet_free(pkt);
CU_ASSERT(odp_pool_destroy(pool) == 0);
- if (pool_capa.pkt.max_uarea_size)
- param.pkt.uarea_size = pool_capa.pkt.max_uarea_size;
- else
- param.pkt.uarea_size = 512;
+ param.pkt.uarea_size = pool_capa.pkt.max_uarea_size;
pool = odp_pool_create("max_uarea", &param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
pkt = odp_packet_alloc(pool, param.pkt.len);
@@ -4309,6 +4429,7 @@ odp_testinfo_t packet_vector_parse_suite[] = {
ODP_TEST_INFO(packet_vector_basic_test),
ODP_TEST_INFO(packet_vector_test_alloc_free),
ODP_TEST_INFO(packet_vector_test_tbl),
+ ODP_TEST_INFO(packet_vector_test_user_area),
ODP_TEST_INFO(packet_vector_test_event_conversion),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 84fad32b6..2eb1a5a28 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -695,6 +695,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
pktv = odp_packet_vector_from_event(evt_tbl[i]);
pktv_len = odp_packet_vector_tbl(pktv, &pkts);
+ CU_ASSERT(odp_packet_vector_user_flag(pktv) == 0);
/* Make sure too many packets are not received */
if (num_pkts + pktv_len > num) {
@@ -996,6 +997,7 @@ static void pktio_txrx_multi(pktio_info_t *pktio_info_a,
CU_ASSERT(odp_packet_has_udp(pkt));
}
+ CU_ASSERT(odp_packet_user_flag(pkt) == 0);
CU_ASSERT(odp_packet_user_ptr(pkt) == NULL);
CU_ASSERT(odp_packet_cls_mark(pkt) == 0);
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index c5f57d17f..e8bf38c9d 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -51,17 +51,21 @@ static void test_param_init(uint8_t fill)
memset(&param, fill, sizeof(param));
odp_pool_param_init(&param);
+ CU_ASSERT(param.buf.uarea_size == 0);
CU_ASSERT(param.buf.cache_size >= global_pool_capa.buf.min_cache_size &&
param.buf.cache_size <= global_pool_capa.buf.max_cache_size);
CU_ASSERT(param.pkt.max_num == 0);
CU_ASSERT(param.pkt.num_subparam == 0);
+ CU_ASSERT(param.pkt.uarea_size == 0);
CU_ASSERT(param.pkt.cache_size >= global_pool_capa.pkt.min_cache_size &&
param.pkt.cache_size <= global_pool_capa.pkt.max_cache_size);
+ CU_ASSERT(param.tmo.uarea_size == 0);
CU_ASSERT(param.tmo.cache_size >= global_pool_capa.tmo.min_cache_size &&
param.tmo.cache_size <= global_pool_capa.tmo.max_cache_size);
+ CU_ASSERT(param.vector.uarea_size == 0);
CU_ASSERT(param.vector.cache_size >= global_pool_capa.vector.min_cache_size &&
param.vector.cache_size <= global_pool_capa.vector.max_cache_size);
}
@@ -915,7 +919,7 @@ static void pool_test_create_after_fork(void)
odp_atomic_init_u32(&global_mem->index, 0);
/* Fork here */
- odp_cunit_thread_create(num, run_pool_test_create_after_fork, NULL, 0);
+ odp_cunit_thread_create(num, run_pool_test_create_after_fork, NULL, 0, 0);
/* Wait until thread 0 has created the test pool */
odp_barrier_wait(&global_mem->init_barrier);
@@ -1027,7 +1031,7 @@ static void pool_test_create_max_pkt_pools(void)
CU_ASSERT(num_shm == shm_capa.max_blocks);
/* Create maximum number of packet pools */
- if (global_pool_capa.pkt.max_uarea_size && global_pool_capa.pkt.max_uarea_size < uarea_size)
+ if (uarea_size > global_pool_capa.pkt.max_uarea_size)
uarea_size = global_pool_capa.pkt.max_uarea_size;
odp_pool_param_init(&param);
@@ -1656,6 +1660,8 @@ static void packet_pool_ext_alloc(int len_test)
if (pkt[i] == ODP_PACKET_INVALID)
break;
+ CU_ASSERT(odp_packet_is_valid(pkt[i]) == 1);
+ CU_ASSERT(odp_event_is_valid(odp_packet_to_event(pkt[i])) == 1);
CU_ASSERT(odp_packet_len(pkt[i]) == pkt_len);
CU_ASSERT(odp_packet_headroom(pkt[i]) >= headroom);
buf_index = find_buf(pkt[i], buf, num_buf, head_offset);
@@ -1683,7 +1689,7 @@ static void packet_pool_ext_alloc(int len_test)
if (uarea_size) {
CU_ASSERT(odp_packet_user_area(pkt[i]) != NULL);
- CU_ASSERT(odp_packet_user_area_size(pkt[i]) == uarea_size);
+ CU_ASSERT(odp_packet_user_area_size(pkt[i]) >= uarea_size);
}
/* Check that application header content has not changed */
diff --git a/test/validation/api/queue/queue.c b/test/validation/api/queue/queue.c
index 8f1278f61..f661da075 100644
--- a/test/validation/api/queue/queue.c
+++ b/test/validation/api/queue/queue.c
@@ -547,7 +547,7 @@ static void test_pair(odp_nonblocking_t nonblocking,
/* Create one worker thread */
arg = globals;
- odp_cunit_thread_create(1, queue_pair_work_loop, &arg, 0);
+ odp_cunit_thread_create(1, queue_pair_work_loop, &arg, 0, 0);
/* Run this thread as the second thread */
CU_ASSERT(queue_pair_work_loop(globals) == 0);
@@ -1018,7 +1018,7 @@ static void multithread_test(odp_nonblocking_t nonblocking)
CU_ASSERT(alloc_and_enqueue(queue, pool, num) == num);
arg = globals;
- odp_cunit_thread_create(num_workers, queue_test_worker, &arg, 0);
+ odp_cunit_thread_create(num_workers, queue_test_worker, &arg, 0, 0);
/* Wait for worker threads to terminate */
odp_cunit_thread_join(num_workers);
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 878e99e07..cd9ad1ac1 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -1422,7 +1422,7 @@ static void chaos_run(unsigned int qtype)
num_thr = globals->num_workers - 1;
arg_ptr = args;
if (num_thr > 0)
- odp_cunit_thread_create(num_thr, chaos_thread, &arg_ptr, 0);
+ odp_cunit_thread_create(num_thr, chaos_thread, &arg_ptr, 0, 0);
chaos_thread(args);
@@ -1865,7 +1865,7 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
num = globals->num_workers - 1;
arg_ptr = args;
if (num > 0)
- odp_cunit_thread_create(num, schedule_common_, &arg_ptr, 0);
+ odp_cunit_thread_create(num, schedule_common_, &arg_ptr, 0, 0);
schedule_common_(args);
@@ -2426,7 +2426,7 @@ static void scheduler_test_order_wait_2_threads(void)
odp_atomic_init_u32(&globals->order_wait.helper_ready, 0);
odp_atomic_init_u32(&globals->order_wait.helper_active, 0);
- ret = odp_cunit_thread_create(num, order_wait_helper, NULL, 0);
+ ret = odp_cunit_thread_create(num, order_wait_helper, NULL, 0, 0);
CU_ASSERT_FATAL(ret == num);
/* Send an event to the helper thread */
@@ -2677,7 +2677,7 @@ static void scheduler_test_sched_and_plain(odp_schedule_sync_t sync)
num = globals->num_workers - 1;
arg_ptr = args;
if (num > 0)
- odp_cunit_thread_create(num, sched_and_plain_thread, &arg_ptr, 0);
+ odp_cunit_thread_create(num, sched_and_plain_thread, &arg_ptr, 0, 0);
sched_and_plain_thread(args);
@@ -2980,7 +2980,7 @@ static void scheduler_fifo_mt(odp_schedule_sync_t sync, int multi)
arg[i] = i;
if (num_thr > 1)
- odp_cunit_thread_create(num_thr - 1, scheduler_fifo_test, (void **)&arg[1], 1);
+ odp_cunit_thread_create(num_thr - 1, scheduler_fifo_test, (void **)&arg[1], 1, 0);
/* Main thread runs as thread 0 */
scheduler_fifo_test(0);
@@ -3126,7 +3126,7 @@ static void scheduler_test_atomicity(void)
num = globals->num_workers - 1;
arg_ptr = args;
if (num > 0)
- odp_cunit_thread_create(num, atomicity_test_run, &arg_ptr, 0);
+ odp_cunit_thread_create(num, atomicity_test_run, &arg_ptr, 0, 0);
atomicity_test_run(args);
diff --git a/test/validation/api/shmem/shmem.c b/test/validation/api/shmem/shmem.c
index f68021310..a10e7d1d9 100644
--- a/test/validation/api/shmem/shmem.c
+++ b/test/validation/api/shmem/shmem.c
@@ -207,7 +207,7 @@ static void shmem_test_multi_thread(void)
num = MAX_WORKERS;
odp_barrier_init(&shared_test_data->test_barrier1, num);
- odp_cunit_thread_create(num, run_test_basic_thread, NULL, 0);
+ odp_cunit_thread_create(num, run_test_basic_thread, NULL, 0, 0);
CU_ASSERT(odp_cunit_thread_join(num) >= 0);
odp_shm_print(shm);
@@ -561,7 +561,7 @@ static void shmem_test_reserve_after_fork(void)
odp_barrier_init(&glob_data->test_barrier2, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(num, run_test_reserve_after_fork, NULL, 0);
+ odp_cunit_thread_create(num, run_test_reserve_after_fork, NULL, 0, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
@@ -756,7 +756,7 @@ static void shmem_test_singleva_after_fork(void)
odp_barrier_init(&glob_data->test_barrier4, num + 1);
odp_atomic_store_u32(&glob_data->index, 0);
- odp_cunit_thread_create(num, run_test_singleva_after_fork, NULL, 0);
+ odp_cunit_thread_create(num, run_test_singleva_after_fork, NULL, 0, 0);
/* wait until all threads have made their shm_reserve: */
odp_barrier_wait(&glob_data->test_barrier1);
@@ -994,7 +994,7 @@ static void shmem_test_stress(void)
glob_data->stress[i].state = STRESS_FREE;
/* create threads */
- odp_cunit_thread_create(num, run_test_stress, NULL, 0);
+ odp_cunit_thread_create(num, run_test_stress, NULL, 0, 0);
/* wait for all thread endings: */
CU_ASSERT(odp_cunit_thread_join(num) >= 0);
diff --git a/test/validation/api/stash/stash.c b/test/validation/api/stash/stash.c
index f1de7ec00..f4ddfa2e1 100644
--- a/test/validation/api/stash/stash.c
+++ b/test/validation/api/stash/stash.c
@@ -24,6 +24,7 @@
#define CACHE_SIZE 8
#define BURST 32
+#define BATCH 16
#define MAX_RETRY 1024
#define RETRY_MSEC 100
@@ -118,6 +119,8 @@ static void stash_capability(void)
CU_ASSERT(capa.max_stashes > 0);
CU_ASSERT(capa.max_num_obj > 0);
CU_ASSERT(capa.max_obj_size >= sizeof(uint32_t));
+ CU_ASSERT(capa.max_get_batch >= 1);
+ CU_ASSERT(capa.max_put_batch >= 1);
memset(&capa, 0, sizeof(odp_stash_capability_t));
ret = odp_stash_capability(&capa, ODP_STASH_TYPE_FIFO);
@@ -126,6 +129,8 @@ static void stash_capability(void)
if (ret == 0 && capa.max_stashes) {
CU_ASSERT(capa.max_num_obj > 0);
CU_ASSERT(capa.max_obj_size >= sizeof(uint32_t));
+ CU_ASSERT(capa.max_get_batch >= 1);
+ CU_ASSERT(capa.max_put_batch >= 1);
}
}
@@ -444,13 +449,24 @@ static void stash_stats_u32(void)
CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
}
-static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
+static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op, int batch)
{
odp_stash_t stash;
odp_stash_param_t param;
int32_t i, ret, retry, num_left;
- int32_t num;
+ int32_t num, max_burst;
void *input, *output;
+
+ if (batch) {
+ CU_ASSERT_FATAL(global.capa_default.max_get_batch >= 1);
+ CU_ASSERT_FATAL(global.capa_default.max_put_batch >= 1);
+
+ if (burst > (int32_t)global.capa_default.max_get_batch)
+ burst = global.capa_default.max_get_batch;
+ if (burst > (int32_t)global.capa_default.max_put_batch)
+ burst = global.capa_default.max_put_batch;
+ }
+
uint64_t input_u64[burst];
uint64_t output_u64[burst + 2];
uint32_t input_u32[burst];
@@ -499,19 +515,38 @@ static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
retry = MAX_RETRY;
num_left = num;
- while (num_left) {
- if (op == STASH_GEN)
- ret = odp_stash_put(stash, input, burst);
- else if (op == STASH_U32)
- ret = odp_stash_put_u32(stash, input_u32, burst);
- else if (op == STASH_U64)
- ret = odp_stash_put_u64(stash, input_u64, burst);
- else if (op == STASH_PTR)
- ret = odp_stash_put_ptr(stash, input, burst);
- else
+ max_burst = burst;
+ while (num_left > 0) {
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_put_batch(stash, input, burst);
+ else
+ ret = odp_stash_put(stash, input, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_put_u32_batch(stash, input_u32, burst);
+ else
+ ret = odp_stash_put_u32(stash, input_u32, burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_put_u64_batch(stash, input_u64, burst);
+ else
+ ret = odp_stash_put_u64(stash, input_u64, burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_put_ptr_batch(stash, input, burst);
+ else
+ ret = odp_stash_put_ptr(stash, input, burst);
+ } else {
ret = -1;
+ }
CU_ASSERT_FATAL(ret >= 0);
CU_ASSERT_FATAL(ret <= burst);
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
if (ret) {
num_left -= ret;
@@ -522,9 +557,10 @@ static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
}
}
+ burst = max_burst;
retry = MAX_RETRY;
num_left = num;
- while (num_left) {
+ while (num_left > 0) {
memset(output, 0, burst * size);
/* Init first and last array element for under-/overflow checking */
@@ -541,16 +577,29 @@ static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
output_u8[0] = MAGIC_U8;
output_u8[burst + 1] = MAGIC_U8;
}
- if (op == STASH_GEN)
- ret = odp_stash_get(stash, output, burst);
- else if (op == STASH_U32)
- ret = odp_stash_get_u32(stash, &output_u32[1], burst);
- else if (op == STASH_U64)
- ret = odp_stash_get_u64(stash, &output_u64[1], burst);
- else if (op == STASH_PTR)
- ret = odp_stash_get_ptr(stash, output, burst);
- else
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_get_batch(stash, output, burst);
+ else
+ ret = odp_stash_get(stash, output, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_get_u32_batch(stash, &output_u32[1], burst);
+ else
+ ret = odp_stash_get_u32(stash, &output_u32[1], burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_get_u64_batch(stash, &output_u64[1], burst);
+ else
+ ret = odp_stash_get_u64(stash, &output_u64[1], burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_get_ptr_batch(stash, output, burst);
+ else
+ ret = odp_stash_get_ptr(stash, output, burst);
+ } else {
ret = -1;
+ }
CU_ASSERT_FATAL(ret >= 0);
CU_ASSERT_FATAL(ret <= burst);
@@ -568,6 +617,12 @@ static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
CU_ASSERT_FATAL(output_u8[burst + 1] == MAGIC_U8);
}
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
if (ret) {
for (i = 0; i < ret; i++) {
if (size == sizeof(uint64_t)) {
@@ -597,13 +652,24 @@ static void stash_default_put(uint32_t size, int32_t burst, stash_op_t op)
CU_ASSERT_FATAL(odp_stash_destroy(stash) == 0);
}
-static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
+static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op, int batch)
{
odp_stash_t stash;
odp_stash_param_t param;
int32_t i, ret, retry, num_left;
- int32_t num;
+ int32_t num, max_burst;
void *input, *output;
+
+ if (batch) {
+ CU_ASSERT_FATAL(global.capa_fifo.max_get_batch >= 1);
+ CU_ASSERT_FATAL(global.capa_fifo.max_put_batch >= 1);
+
+ if (burst > (int32_t)global.capa_fifo.max_get_batch)
+ burst = global.capa_fifo.max_get_batch;
+ if (burst > (int32_t)global.capa_fifo.max_put_batch)
+ burst = global.capa_fifo.max_put_batch;
+ }
+
uint64_t input_u64[burst];
uint64_t output_u64[burst + 2];
uint32_t input_u32[burst];
@@ -645,7 +711,8 @@ static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
retry = MAX_RETRY;
num_left = num;
- while (num_left) {
+ max_burst = burst;
+ while (num_left > 0) {
for (i = 0; i < burst; i++) {
if (size == sizeof(uint64_t))
input_u64[i] = VAL_U64 + num_left - i;
@@ -656,20 +723,40 @@ static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
else
input_u8[i] = VAL_U8 + num_left - i;
}
- if (op == STASH_GEN)
- ret = odp_stash_put(stash, input, burst);
- else if (op == STASH_U32)
- ret = odp_stash_put_u32(stash, input_u32, burst);
- else if (op == STASH_U64)
- ret = odp_stash_put_u64(stash, input_u64, burst);
- else if (op == STASH_PTR)
- ret = odp_stash_put_ptr(stash, input, burst);
- else
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_put_batch(stash, input, burst);
+ else
+ ret = odp_stash_put(stash, input, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_put_u32_batch(stash, input_u32, burst);
+ else
+ ret = odp_stash_put_u32(stash, input_u32, burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_put_u64_batch(stash, input_u64, burst);
+ else
+ ret = odp_stash_put_u64(stash, input_u64, burst);
+
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_put_ptr_batch(stash, input, burst);
+ else
+ ret = odp_stash_put_ptr(stash, input, burst);
+ } else {
ret = -1;
+ }
CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
+
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
if (ret) {
- CU_ASSERT_FATAL(ret <= burst);
num_left -= ret;
retry = MAX_RETRY;
} else {
@@ -678,9 +765,10 @@ static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
}
}
+ burst = max_burst;
retry = MAX_RETRY;
num_left = num;
- while (num_left) {
+ while (num_left > 0) {
memset(output, 0, burst * size);
/* Init first and last array element for under-/overflow checking */
@@ -698,17 +786,31 @@ static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
output_u8[burst + 1] = MAGIC_U8;
}
- if (op == STASH_GEN)
- ret = odp_stash_get(stash, output, burst);
- else if (op == STASH_U32)
- ret = odp_stash_get_u32(stash, &output_u32[1], burst);
- else if (op == STASH_U64)
- ret = odp_stash_get_u64(stash, &output_u64[1], burst);
- else if (op == STASH_PTR)
- ret = odp_stash_get_ptr(stash, output, burst);
- else
+ if (op == STASH_GEN) {
+ if (batch)
+ ret = odp_stash_get_batch(stash, output, burst);
+ else
+ ret = odp_stash_get(stash, output, burst);
+ } else if (op == STASH_U32) {
+ if (batch)
+ ret = odp_stash_get_u32_batch(stash, &output_u32[1], burst);
+ else
+ ret = odp_stash_get_u32(stash, &output_u32[1], burst);
+ } else if (op == STASH_U64) {
+ if (batch)
+ ret = odp_stash_get_u64_batch(stash, &output_u64[1], burst);
+ else
+ ret = odp_stash_get_u64(stash, &output_u64[1], burst);
+ } else if (op == STASH_PTR) {
+ if (batch)
+ ret = odp_stash_get_ptr_batch(stash, output, burst);
+ else
+ ret = odp_stash_get_ptr(stash, output, burst);
+ } else {
ret = -1;
+ }
CU_ASSERT_FATAL(ret >= 0);
+ CU_ASSERT_FATAL(ret <= burst);
if (size == sizeof(uint64_t)) {
CU_ASSERT_FATAL(output_u64[0] == MAGIC_U64);
@@ -724,8 +826,13 @@ static void stash_fifo_put(uint32_t size, int32_t burst, stash_op_t op)
CU_ASSERT_FATAL(output_u8[burst + 1] == MAGIC_U8);
}
+ if (batch) {
+ CU_ASSERT(ret == 0 || ret == burst);
+ if (num_left - ret < burst)
+ burst = num_left - ret;
+ }
+
if (ret) {
- CU_ASSERT_FATAL(ret <= burst);
for (i = 0; i < ret; i++) {
if (size == sizeof(uint64_t)) {
uint64_t val = VAL_U64 + num_left - i;
@@ -805,142 +912,282 @@ static int check_support_fifo(void)
static void stash_default_put_u64_1(void)
{
- stash_default_put(sizeof(uint64_t), 1, STASH_GEN);
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 0);
}
static void stash_default_put_u64_n(void)
{
- stash_default_put(sizeof(uint64_t), BURST, STASH_GEN);
+ stash_default_put(sizeof(uint64_t), BURST, STASH_GEN, 0);
}
static void stash_default_u64_put_u64_1(void)
{
- stash_default_put(sizeof(uint64_t), 1, STASH_U64);
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 0);
}
static void stash_default_u64_put_u64_n(void)
{
- stash_default_put(sizeof(uint64_t), BURST, STASH_U64);
+ stash_default_put(sizeof(uint64_t), BURST, STASH_U64, 0);
}
static void stash_default_put_ptr_1(void)
{
- stash_default_put(sizeof(uintptr_t), 1, STASH_PTR);
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 0);
}
static void stash_default_put_ptr_n(void)
{
- stash_default_put(sizeof(uintptr_t), BURST, STASH_PTR);
+ stash_default_put(sizeof(uintptr_t), BURST, STASH_PTR, 0);
+}
+
+static void stash_default_put_u64_1_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_GEN, 1);
+}
+
+static void stash_default_put_u64_n_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_default_u64_put_u64_1_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), 1, STASH_U64, 1);
+}
+
+static void stash_default_u64_put_u64_n_batch(void)
+{
+ stash_default_put(sizeof(uint64_t), BATCH, STASH_U64, 1);
+}
+
+static void stash_default_put_ptr_1_batch(void)
+{
+ stash_default_put(sizeof(uintptr_t), 1, STASH_PTR, 1);
+}
+
+static void stash_default_put_ptr_n_batch(void)
+{
+ stash_default_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1);
}
static void stash_default_put_u32_1(void)
{
- stash_default_put(sizeof(uint32_t), 1, STASH_GEN);
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 0);
}
static void stash_default_put_u32_n(void)
{
- stash_default_put(sizeof(uint32_t), BURST, STASH_GEN);
+ stash_default_put(sizeof(uint32_t), BURST, STASH_GEN, 0);
}
static void stash_default_u32_put_u32_1(void)
{
- stash_default_put(sizeof(uint32_t), 1, STASH_U32);
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 0);
}
static void stash_default_u32_put_u32_n(void)
{
- stash_default_put(sizeof(uint32_t), BURST, STASH_U32);
+ stash_default_put(sizeof(uint32_t), BURST, STASH_U32, 0);
}
static void stash_default_put_u16_1(void)
{
- stash_default_put(sizeof(uint16_t), 1, STASH_GEN);
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 0);
}
static void stash_default_put_u16_n(void)
{
- stash_default_put(sizeof(uint16_t), BURST, STASH_GEN);
+ stash_default_put(sizeof(uint16_t), BURST, STASH_GEN, 0);
}
static void stash_default_put_u8_1(void)
{
- stash_default_put(sizeof(uint8_t), 1, STASH_GEN);
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 0);
}
static void stash_default_put_u8_n(void)
{
- stash_default_put(sizeof(uint8_t), BURST, STASH_GEN);
+ stash_default_put(sizeof(uint8_t), BURST, STASH_GEN, 0);
+}
+
+static void stash_default_put_u32_1_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_GEN, 1);
+}
+
+static void stash_default_put_u32_n_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_default_u32_put_u32_1_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), 1, STASH_U32, 1);
+}
+
+static void stash_default_u32_put_u32_n_batch(void)
+{
+ stash_default_put(sizeof(uint32_t), BATCH, STASH_U32, 1);
+}
+
+static void stash_default_put_u16_1_batch(void)
+{
+ stash_default_put(sizeof(uint16_t), 1, STASH_GEN, 1);
+}
+
+static void stash_default_put_u16_n_batch(void)
+{
+ stash_default_put(sizeof(uint16_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_default_put_u8_1_batch(void)
+{
+ stash_default_put(sizeof(uint8_t), 1, STASH_GEN, 1);
+}
+
+static void stash_default_put_u8_n_batch(void)
+{
+ stash_default_put(sizeof(uint8_t), BATCH, STASH_GEN, 1);
}
static void stash_fifo_put_u64_1(void)
{
- stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 0);
}
static void stash_fifo_put_u64_n(void)
{
- stash_fifo_put(sizeof(uint64_t), BURST, STASH_GEN);
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_GEN, 0);
}
static void stash_fifo_u64_put_u64_1(void)
{
- stash_fifo_put(sizeof(uint64_t), 1, STASH_U64);
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 0);
}
static void stash_fifo_u64_put_u64_n(void)
{
- stash_fifo_put(sizeof(uint64_t), BURST, STASH_U64);
+ stash_fifo_put(sizeof(uint64_t), BURST, STASH_U64, 0);
}
static void stash_fifo_put_ptr_1(void)
{
- stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR);
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 0);
}
static void stash_fifo_put_ptr_n(void)
{
- stash_fifo_put(sizeof(uintptr_t), BURST, STASH_PTR);
+ stash_fifo_put(sizeof(uintptr_t), BURST, STASH_PTR, 0);
}
static void stash_fifo_put_u32_1(void)
{
- stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 0);
}
static void stash_fifo_put_u32_n(void)
{
- stash_fifo_put(sizeof(uint32_t), BURST, STASH_GEN);
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_GEN, 0);
}
static void stash_fifo_u32_put_u32_1(void)
{
- stash_fifo_put(sizeof(uint32_t), 1, STASH_U32);
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 0);
}
static void stash_fifo_u32_put_u32_n(void)
{
- stash_fifo_put(sizeof(uint32_t), BURST, STASH_U32);
+ stash_fifo_put(sizeof(uint32_t), BURST, STASH_U32, 0);
}
static void stash_fifo_put_u16_1(void)
{
- stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN);
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 0);
}
static void stash_fifo_put_u16_n(void)
{
- stash_fifo_put(sizeof(uint16_t), BURST, STASH_GEN);
+ stash_fifo_put(sizeof(uint16_t), BURST, STASH_GEN, 0);
}
static void stash_fifo_put_u8_1(void)
{
- stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN);
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 0);
}
static void stash_fifo_put_u8_n(void)
{
- stash_fifo_put(sizeof(uint8_t), BURST, STASH_GEN);
+ stash_fifo_put(sizeof(uint8_t), BURST, STASH_GEN, 0);
+}
+
+static void stash_fifo_put_u64_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_GEN, 1);
+}
+
+static void stash_fifo_put_u64_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_fifo_u64_put_u64_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), 1, STASH_U64, 1);
+}
+
+static void stash_fifo_u64_put_u64_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint64_t), BATCH, STASH_U64, 1);
+}
+
+static void stash_fifo_put_ptr_1_batch(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), 1, STASH_PTR, 1);
+}
+
+static void stash_fifo_put_ptr_n_batch(void)
+{
+ stash_fifo_put(sizeof(uintptr_t), BATCH, STASH_PTR, 1);
+}
+
+static void stash_fifo_put_u32_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_GEN, 1);
+}
+
+static void stash_fifo_put_u32_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_fifo_u32_put_u32_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), 1, STASH_U32, 1);
+}
+
+static void stash_fifo_u32_put_u32_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint32_t), BATCH, STASH_U32, 1);
+}
+
+static void stash_fifo_put_u16_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint16_t), 1, STASH_GEN, 1);
+}
+
+static void stash_fifo_put_u16_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint16_t), BATCH, STASH_GEN, 1);
+}
+
+static void stash_fifo_put_u8_1_batch(void)
+{
+ stash_fifo_put(sizeof(uint8_t), 1, STASH_GEN, 1);
+}
+
+static void stash_fifo_put_u8_n_batch(void)
+{
+ stash_fifo_put(sizeof(uint8_t), BATCH, STASH_GEN, 1);
}
odp_testinfo_t stash_suite[] = {
@@ -954,6 +1201,12 @@ odp_testinfo_t stash_suite[] = {
ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_n, check_support_64),
ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_1, check_support_ptr),
ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_n, check_support_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_1_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_u64_n_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_1_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_u64_put_u64_n_batch, check_support_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_1_batch, check_support_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_default_put_ptr_n_batch, check_support_ptr),
ODP_TEST_INFO(stash_default_put_u32_1),
ODP_TEST_INFO(stash_default_put_u32_n),
ODP_TEST_INFO(stash_default_u32_put_u32_1),
@@ -962,6 +1215,14 @@ odp_testinfo_t stash_suite[] = {
ODP_TEST_INFO(stash_default_put_u16_n),
ODP_TEST_INFO(stash_default_put_u8_1),
ODP_TEST_INFO(stash_default_put_u8_n),
+ ODP_TEST_INFO(stash_default_put_u32_1_batch),
+ ODP_TEST_INFO(stash_default_put_u32_n_batch),
+ ODP_TEST_INFO(stash_default_u32_put_u32_1_batch),
+ ODP_TEST_INFO(stash_default_u32_put_u32_n_batch),
+ ODP_TEST_INFO(stash_default_put_u16_1_batch),
+ ODP_TEST_INFO(stash_default_put_u16_n_batch),
+ ODP_TEST_INFO(stash_default_put_u8_1_batch),
+ ODP_TEST_INFO(stash_default_put_u8_n_batch),
ODP_TEST_INFO_CONDITIONAL(stash_create_u64_all, check_support_64),
ODP_TEST_INFO(stash_create_u32_all),
ODP_TEST_INFO(stash_stats_u32),
@@ -979,6 +1240,20 @@ odp_testinfo_t stash_suite[] = {
ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_n, check_support_fifo),
ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_1, check_support_fifo),
ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_n, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_1_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u64_n_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_1_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u64_put_u64_n_batch, check_support_fifo_64),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_1_batch, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_ptr_n_batch, check_support_fifo_ptr),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u32_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_u32_put_u32_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u16_n_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_1_batch, check_support_fifo),
+ ODP_TEST_INFO_CONDITIONAL(stash_fifo_put_u8_n_batch, check_support_fifo),
ODP_TEST_INFO_CONDITIONAL(stash_create_fifo_u64_all,
check_support_fifo_64),
ODP_TEST_INFO_CONDITIONAL(stash_create_fifo_u32_all,
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index f279dd16f..2dc799c2a 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -16,6 +16,9 @@ typedef struct {
/* Test thread entry and exit synchronization barriers */
odp_barrier_t bar_entry;
odp_barrier_t bar_exit;
+
+ /* Storage for thread ID assignment order test */
+ int thread_id[ODP_THREAD_COUNT_MAX];
} global_shared_mem_t;
static global_shared_mem_t *global_mem;
@@ -89,6 +92,9 @@ static void thread_test_odp_thread_id(void)
{
int id = odp_thread_id();
+ /* First thread which called odp_init_local() */
+ CU_ASSERT(id == 0);
+
CU_ASSERT(id >= 0);
CU_ASSERT(id < odp_thread_count_max());
CU_ASSERT(id < ODP_THREAD_COUNT_MAX);
@@ -107,14 +113,22 @@ static void thread_test_odp_thread_count(void)
CU_ASSERT(odp_thread_count_max() <= ODP_THREAD_COUNT_MAX);
}
-static int thread_func(void *arg ODP_UNUSED)
+static int thread_func(void *arg)
{
- /* indicate that thread has started */
+ int *id_ptr = arg;
+
+ /* Indicate that thread has started */
odp_barrier_wait(&global_mem->bar_entry);
+ /* Record thread identifier for ID assignment order check */
+ *id_ptr = odp_thread_id();
+
+ CU_ASSERT(*id_ptr > 0);
+ CU_ASSERT(*id_ptr < odp_thread_count_max());
+
CU_ASSERT(odp_thread_type() == ODP_THREAD_WORKER);
- /* wait for indication that we can exit */
+ /* Wait for indication that we can exit */
odp_barrier_wait(&global_mem->bar_exit);
return CU_get_number_of_failures();
@@ -124,10 +138,22 @@ static void thread_test_odp_thrmask_worker(void)
{
odp_thrmask_t mask;
int ret;
- int num = 1;
+ int num = odp_cpumask_default_worker(NULL, 0);
+ CU_ASSERT_FATAL(num > 0);
CU_ASSERT_FATAL(odp_thread_type() == ODP_THREAD_CONTROL);
+ /* Control and worker threads may share CPUs */
+ if (num > 1)
+ num--;
+
+ void *args[num];
+
+ for (int i = 0; i < num; i++) {
+ global_mem->thread_id[i] = -1;
+ args[i] = &global_mem->thread_id[i];
+ }
+
odp_barrier_init(&global_mem->bar_entry, num + 1);
odp_barrier_init(&global_mem->bar_exit, num + 1);
@@ -137,7 +163,7 @@ static void thread_test_odp_thrmask_worker(void)
CU_ASSERT(ret == 0);
/* start the test thread(s) */
- ret = odp_cunit_thread_create(num, thread_func, NULL, 0);
+ ret = odp_cunit_thread_create(num, thread_func, args, 1, 1);
CU_ASSERT(ret == num);
if (ret != num)
@@ -154,6 +180,10 @@ static void thread_test_odp_thrmask_worker(void)
/* allow thread(s) to exit */
odp_barrier_wait(&global_mem->bar_exit);
+ /* Thread ID 0 is used by this control thread */
+ for (int i = 0; i < num; i++)
+ CU_ASSERT(global_mem->thread_id[i] == i + 1);
+
odp_cunit_thread_join(num);
}
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index bf0fd3baf..1c0a16349 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -33,6 +33,7 @@
#define EXTRA_TIMERS 256
#define NAME "timer_pool"
+#define MSEC ODP_TIME_MSEC_IN_NS
#define THREE_POINT_THREE_MSEC (10 * ODP_TIME_MSEC_IN_NS / 3)
#define USER_PTR ((void *)0xdead)
#define TICK_INVALID (~(uint64_t)0)
@@ -40,10 +41,19 @@
/* Test case options */
#define PRIV 1
#define EXP_RELAX 1
-#define WAIT 0
-#define CANCEL 1
-#define RESTART 1
#define FIRST_TICK 1
+#define RELATIVE ODP_TIMER_TICK_REL
+#define ABSOLUTE ODP_TIMER_TICK_ABS
+
+enum {
+ TIMEOUT = 0,
+ CANCEL
+};
+
+enum {
+ START = 0,
+ RESTART
+};
/* Timer helper structure */
struct test_timer {
@@ -441,6 +451,62 @@ static void timer_test_timeout_pool_free(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void timer_test_timeout_user_area(void)
+{
+ odp_pool_t pool;
+ odp_pool_capability_t pool_capa;
+ odp_pool_param_t param;
+ uint32_t i, max_size;
+ void *addr;
+ void *prev = NULL;
+ const uint32_t num = 10;
+ uint32_t num_alloc = 0;
+ uint32_t size = 1024;
+ odp_timeout_t tmo[num];
+
+ CU_ASSERT_FATAL(!odp_pool_capability(&pool_capa));
+ max_size = pool_capa.tmo.max_uarea_size;
+
+ if (max_size == 0) {
+ ODPH_DBG("Timeout user area not supported\n");
+ return;
+ }
+
+ if (size > max_size)
+ size = max_size;
+
+ odp_pool_param_init(&param);
+ param.type = ODP_POOL_TIMEOUT;
+ param.tmo.num = num;
+ param.tmo.uarea_size = size;
+
+ pool = odp_pool_create("test_user_area", &param);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ for (i = 0; i < num; i++) {
+ tmo[i] = odp_timeout_alloc(pool);
+
+ if (tmo[i] == ODP_TIMEOUT_INVALID)
+ break;
+
+ num_alloc++;
+
+ addr = odp_timeout_user_area(tmo[i]);
+ CU_ASSERT_FATAL(addr != NULL);
+ CU_ASSERT(prev != addr);
+
+ prev = addr;
+ memset(addr, 0, size);
+ }
+
+ CU_ASSERT(i == num);
+
+ for (i = 0; i < num_alloc; i++)
+ odp_timeout_free(tmo[i]);
+
+ CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
+}
+
static void timer_pool_create_destroy(void)
{
odp_timer_pool_param_t tparam;
@@ -729,9 +795,10 @@ static void free_schedule_context(odp_queue_type_t queue_type)
}
static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t tick_type,
- int restart, int cancel)
+ int restart, int cancel, int rounds, uint64_t tmo_ns)
{
odp_timer_capability_t capa;
+ odp_timer_res_capability_t res_capa;
odp_timer_pool_param_t tp_param;
odp_queue_param_t queue_param;
odp_pool_param_t pool_param;
@@ -743,20 +810,36 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
odp_timeout_t tmo;
odp_event_t ev;
odp_time_t t1, t2;
- uint64_t tick, nsec;
- int ret;
- uint64_t tmo_ns = ODP_TIME_SEC_IN_NS;
- uint64_t res_ns = ODP_TIME_SEC_IN_NS / 10;
+ uint64_t tick, nsec, res_ns, min_tmo;
+ int ret, i;
memset(&capa, 0, sizeof(capa));
ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT_FATAL(capa.max_tmo.max_tmo > 0);
- if (capa.max_tmo.max_tmo < tmo_ns) {
+ /* Use timeout and resolution values that are within capability limits */
+ if (capa.max_tmo.max_tmo < tmo_ns)
tmo_ns = capa.max_tmo.max_tmo;
- res_ns = capa.max_tmo.res_ns;
- }
+
+ memset(&res_capa, 0, sizeof(res_capa));
+ res_capa.max_tmo = tmo_ns;
+
+ ret = odp_timer_res_capability(ODP_CLOCK_DEFAULT, &res_capa);
+ CU_ASSERT_FATAL(ret == 0);
+ CU_ASSERT_FATAL(res_capa.res_ns > 0);
+
+ res_ns = tmo_ns / 10;
+
+ if (res_ns < res_capa.res_ns)
+ res_ns = res_capa.res_ns;
+
+ /* Test expects better resolution than 0.5x timeout */
+ CU_ASSERT_FATAL(res_ns < tmo_ns / 2);
+
+ min_tmo = tmo_ns / 4;
+ if (min_tmo < res_capa.min_tmo)
+ min_tmo = res_capa.min_tmo;
odp_pool_param_init(&pool_param);
pool_param.type = ODP_POOL_TIMEOUT;
@@ -776,7 +859,7 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
odp_timer_pool_param_init(&tp_param);
tp_param.res_ns = res_ns;
- tp_param.min_tmo = tmo_ns / 4;
+ tp_param.min_tmo = min_tmo;
tp_param.max_tmo = tmo_ns;
tp_param.num_timers = 1;
@@ -796,67 +879,73 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
if (restart)
nsec = tmo_ns / 2;
- tick = odp_timer_ns_to_tick(tp, nsec);
- if (tick_type == ODP_TIMER_TICK_ABS)
- tick += odp_timer_current_tick(tp);
-
- start_param.tick_type = tick_type;
- start_param.tick = tick;
- start_param.tmo_ev = ev;
-
- ret = odp_timer_start(timer, &start_param);
- CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
-
- if (restart) {
- tick = odp_timer_ns_to_tick(tp, tmo_ns);
+ for (i = 0; i < rounds; i++) {
+ tick = odp_timer_ns_to_tick(tp, nsec);
if (tick_type == ODP_TIMER_TICK_ABS)
tick += odp_timer_current_tick(tp);
- start_param.tick = tick;
- start_param.tmo_ev = ODP_EVENT_INVALID;
+ start_param.tick_type = tick_type;
+ start_param.tick = tick;
+ start_param.tmo_ev = ev;
- ret = odp_timer_restart(timer, &start_param);
+ ret = odp_timer_start(timer, &start_param);
CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
- }
- ev = ODP_EVENT_INVALID;
+ if (restart) {
+ tick = odp_timer_ns_to_tick(tp, tmo_ns);
+ if (tick_type == ODP_TIMER_TICK_ABS)
+ tick += odp_timer_current_tick(tp);
- if (cancel) {
- ret = odp_timer_cancel(timer, &ev);
- CU_ASSERT(ret == 0);
+ start_param.tick = tick;
+ start_param.tmo_ev = ODP_EVENT_INVALID;
- if (ret == 0)
- CU_ASSERT(ev != ODP_EVENT_INVALID);
- } else {
- uint64_t diff_ns;
+ ret = odp_timer_restart(timer, &start_param);
+ CU_ASSERT_FATAL(ret == ODP_TIMER_SUCCESS);
+ }
- t1 = odp_time_global();
- ev = wait_event(queue_type, queue, t1, 10 * tmo_ns);
- t2 = odp_time_global();
- diff_ns = odp_time_diff_ns(t2, t1);
+ ev = ODP_EVENT_INVALID;
- CU_ASSERT(ev != ODP_EVENT_INVALID);
- CU_ASSERT(diff_ns < 2 * tmo_ns);
- CU_ASSERT((double)diff_ns > 0.5 * tmo_ns);
- }
+ if (cancel) {
+ ret = odp_timer_cancel(timer, &ev);
+ CU_ASSERT(ret == 0);
- if (ev != ODP_EVENT_INVALID) {
- CU_ASSERT_FATAL(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
- tmo = odp_timeout_from_event(ev);
- CU_ASSERT(odp_timeout_user_ptr(tmo) == USER_PTR);
- CU_ASSERT(odp_timeout_timer(tmo) == timer);
+ if (ret == 0)
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ } else {
+ uint64_t diff_ns;
- if (!cancel) {
- if (tick_type == ODP_TIMER_TICK_ABS) {
- /* CU_ASSERT needs these extra brackets */
- CU_ASSERT(odp_timeout_tick(tmo) == tick);
- } else {
- CU_ASSERT(odp_timeout_tick(tmo) > tick);
+ t1 = odp_time_global();
+ ev = wait_event(queue_type, queue, t1, 10 * tmo_ns);
+ t2 = odp_time_global();
+ diff_ns = odp_time_diff_ns(t2, t1);
+
+ CU_ASSERT(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(diff_ns < 2 * tmo_ns);
+ CU_ASSERT((double)diff_ns > 0.5 * tmo_ns);
+ }
+
+ if (ev != ODP_EVENT_INVALID) {
+ CU_ASSERT_FATAL(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
+ tmo = odp_timeout_from_event(ev);
+ CU_ASSERT(odp_timeout_user_ptr(tmo) == USER_PTR);
+ CU_ASSERT(odp_timeout_timer(tmo) == timer);
+
+ if (!cancel) {
+ if (tick_type == ODP_TIMER_TICK_ABS) {
+ /* CU_ASSERT needs these extra brackets */
+ CU_ASSERT(odp_timeout_tick(tmo) == tick);
+ } else {
+ CU_ASSERT(odp_timeout_tick(tmo) > tick);
+ }
}
+ } else {
+ ODPH_DBG("Event missing\n");
+ break;
}
+ }
+ if (ev != ODP_EVENT_INVALID)
odp_event_free(ev);
- }
free_schedule_context(queue_type);
@@ -869,82 +958,92 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
static void timer_plain_rel_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_REL, 0, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, START, TIMEOUT, 2, 500 * MSEC);
}
static void timer_plain_abs_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_ABS, 0, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, TIMEOUT, 2, 500 * MSEC);
}
static void timer_plain_rel_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_REL, 0, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, START, CANCEL, 5, 1000 * MSEC);
}
static void timer_plain_abs_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_ABS, 0, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, CANCEL, 5, 1000 * MSEC);
}
static void timer_plain_rel_restart_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_REL, RESTART, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, RESTART, TIMEOUT, 2, 600 * MSEC);
}
static void timer_plain_abs_restart_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_ABS, RESTART, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, RESTART, TIMEOUT, 2, 600 * MSEC);
}
static void timer_plain_rel_restart_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_REL, RESTART, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, RELATIVE, RESTART, CANCEL, 5, 1000 * MSEC);
}
static void timer_plain_abs_restart_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ODP_TIMER_TICK_ABS, RESTART, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, RESTART, CANCEL, 5, 1000 * MSEC);
+}
+
+static void timer_plain_abs_wait_3sec(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_PLAIN, ABSOLUTE, START, TIMEOUT, 30, 110 * MSEC);
}
static void timer_sched_rel_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_REL, 0, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, START, TIMEOUT, 2, 500 * MSEC);
}
static void timer_sched_abs_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_ABS, 0, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, TIMEOUT, 2, 500 * MSEC);
}
static void timer_sched_rel_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_REL, 0, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, START, CANCEL, 5, 1000 * MSEC);
}
static void timer_sched_abs_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_ABS, 0, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, CANCEL, 5, 1000 * MSEC);
}
static void timer_sched_rel_restart_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_REL, RESTART, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, RESTART, TIMEOUT, 2, 600 * MSEC);
}
static void timer_sched_abs_restart_wait(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_ABS, RESTART, WAIT);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, RESTART, TIMEOUT, 2, 600 * MSEC);
}
static void timer_sched_rel_restart_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_REL, RESTART, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, RELATIVE, RESTART, CANCEL, 5, 1000 * MSEC);
}
static void timer_sched_abs_restart_cancel(void)
{
- timer_single_shot(ODP_QUEUE_TYPE_SCHED, ODP_TIMER_TICK_ABS, RESTART, CANCEL);
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, RESTART, CANCEL, 5, 1000 * MSEC);
+}
+
+static void timer_sched_abs_wait_3sec(void)
+{
+ timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, TIMEOUT, 30, 110 * MSEC);
}
static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
@@ -2200,7 +2299,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
/* Create and start worker threads */
global_mem->test_queue_type = queue_type;
- odp_cunit_thread_create(num_workers, worker_entrypoint, NULL, 0);
+ odp_cunit_thread_create(num_workers, worker_entrypoint, NULL, 0, 0);
/* Wait for worker threads to exit */
odp_cunit_thread_join(num_workers);
@@ -2503,6 +2602,7 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_param_init),
ODP_TEST_INFO(timer_test_timeout_pool_alloc),
ODP_TEST_INFO(timer_test_timeout_pool_free),
+ ODP_TEST_INFO(timer_test_timeout_user_area),
ODP_TEST_INFO(timer_pool_create_destroy),
ODP_TEST_INFO(timer_pool_create_max),
ODP_TEST_INFO(timer_pool_max_res),
@@ -2515,6 +2615,7 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_restart_wait, check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_plain_rel_restart_cancel, check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_restart_cancel, check_plain_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_plain_abs_wait_3sec, check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_wait, check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_wait, check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_cancel, check_sched_queue_support),
@@ -2523,6 +2624,7 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_restart_wait, check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_sched_rel_restart_cancel, check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_restart_cancel, check_sched_queue_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_sched_abs_wait_3sec, check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_plain,
check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_sched,