aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2024-01-30 11:10:37 +0200
committerGitHub <noreply@github.com>2024-01-30 11:10:37 +0200
commit72c5001709bf6f846cf67c40c48fc66e89356598 (patch)
tree94381110518ad938cb9e2752634ad2f36b0d3d50
parent1dc95af0293405de4b3c91e06aedb7703413a1bd (diff)
parent219bf470229c42d77bf9cf6314b360ad9f233db1 (diff)
Merge ODP v1.43.0.0v1.43.0.0_DPDK_22.11
Merge ODP linux-generic v1.43.0.0 into linux-dpdk.
-rw-r--r--.github/workflows/ci-pipeline-arm64.yml104
-rw-r--r--.github/workflows/ci-pipeline.yml115
-rw-r--r--CHANGELOG108
-rw-r--r--configure.ac7
-rw-r--r--doc/implementers-guide/implementers-guide.adoc2
-rw-r--r--doc/users-guide/users-guide-cls.adoc30
-rw-r--r--doc/users-guide/users-guide-timer.adoc33
-rw-r--r--example/debug/odp_debug.c28
-rw-r--r--example/ipfragreass/odp_ipfragreass.c2
-rw-r--r--example/packet/odp_pktio.c3
-rw-r--r--example/sysinfo/odp_sysinfo.c10
-rw-r--r--example/timer/odp_timer_accuracy.c16
-rw-r--r--example/timer/odp_timer_simple.c25
-rw-r--r--example/timer/odp_timer_test.c14
-rw-r--r--helper/include/odp/helper/threads.h68
-rw-r--r--helper/test/debug.c2
-rw-r--r--helper/test/macros.c2
-rw-r--r--helper/test/parse.c2
-rw-r--r--helper/test/version.c2
-rw-r--r--helper/threads.c127
-rw-r--r--include/Makefile.am8
-rw-r--r--include/odp/api/abi-default/event_types.h1
-rw-r--r--include/odp/api/abi-default/schedule_types.h21
-rw-r--r--include/odp/api/abi-default/thread.h16
-rw-r--r--include/odp/api/abi-default/thread_types.h26
-rw-r--r--include/odp/api/spec/classification.h111
-rw-r--r--include/odp/api/spec/crypto.h1
-rw-r--r--include/odp/api/spec/crypto_types.h37
-rw-r--r--include/odp/api/spec/event.h14
-rw-r--r--include/odp/api/spec/hints.h2
-rw-r--r--include/odp/api/spec/init.h2
-rw-r--r--include/odp/api/spec/packet.h45
-rw-r--r--include/odp/api/spec/pool.h22
-rw-r--r--include/odp/api/spec/schedule_types.h85
-rw-r--r--include/odp/api/spec/thread.h66
-rw-r--r--include/odp/api/spec/thread_types.h7
-rw-r--r--include/odp/api/spec/time.h58
-rw-r--r--include/odp/api/spec/time_types.h12
-rw-r--r--include/odp/api/spec/timer.h127
-rw-r--r--include/odp/api/spec/timer_types.h41
-rw-r--r--include/odp/api/thread.h2
-rw-r--r--include/odp/api/thread_types.h26
-rw-r--r--include/odp/arch/arm32-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/arch/arm64-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/arch/default-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/arch/power64-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/arch/x86_32-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/arch/x86_64-linux/odp/api/abi/thread_types.h5
-rw-r--r--include/odp/autoheader_external.h.in3
-rw-r--r--platform/linux-dpdk/Makefile.am21
-rw-r--r--platform/linux-dpdk/README85
l---------platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h1
l---------platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h1
l---------platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/time_inlines.h27
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h8
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/timer_inlines.h48
-rw-r--r--platform/linux-dpdk/include/odp_config_internal.h14
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h6
-rw-r--r--platform/linux-dpdk/include/odp_pool_internal.h2
-rw-r--r--platform/linux-dpdk/m4/configure.m48
l---------platform/linux-dpdk/m4/odp_wfe.m41
-rw-r--r--platform/linux-dpdk/odp_packet_dpdk.c2
-rw-r--r--platform/linux-dpdk/odp_pool.c42
-rw-r--r--platform/linux-dpdk/odp_schedule_eventdev.c5
-rw-r--r--platform/linux-dpdk/odp_system_info.c43
-rw-r--r--platform/linux-dpdk/odp_thread.c20
-rw-r--r--platform/linux-dpdk/odp_time.c30
-rw-r--r--platform/linux-dpdk/odp_timer.c169
-rw-r--r--platform/linux-generic/Makefile.am21
-rw-r--r--platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h47
-rw-r--r--platform/linux-generic/arch/aarch64/odp_llsc.h8
-rw-r--r--platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h13
-rw-r--r--platform/linux-generic/arch/common/odp_time_cpu.c28
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/time_inlines.h1
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/wait_until.h5
-rw-r--r--platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h25
-rw-r--r--platform/linux-generic/arch/default/odp_time.c40
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/event_types.h1
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/thread.h3
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/thread_types.h5
-rw-r--r--platform/linux-generic/include/odp/api/plat/crypto_inlines.h8
-rw-r--r--platform/linux-generic/include/odp/api/plat/event_inlines.h16
-rw-r--r--platform/linux-generic/include/odp/api/plat/thread_inline_types.h2
-rw-r--r--platform/linux-generic/include/odp/api/plat/thread_inlines.h8
-rw-r--r--platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h5
-rw-r--r--platform/linux-generic/include/odp/api/plat/time_inlines.h16
-rw-r--r--platform/linux-generic/include/odp/api/plat/timer_inlines.h11
-rw-r--r--platform/linux-generic/include/odp_chksum_internal.h29
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h36
-rw-r--r--platform/linux-generic/include/odp_config_internal.h22
-rw-r--r--platform/linux-generic/include/odp_event_internal.h2
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h2
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h6
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h2
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable.h8
-rw-r--r--platform/linux-generic/m4/configure.m48
-rw-r--r--platform/linux-generic/m4/odp_wfe.m414
-rw-r--r--platform/linux-generic/odp_classification.c200
-rw-r--r--platform/linux-generic/odp_dma.c10
-rw-r--r--platform/linux-generic/odp_fdserver.c3
-rw-r--r--platform/linux-generic/odp_packet_io.c32
-rw-r--r--platform/linux-generic/odp_pcapng.c4
-rw-r--r--platform/linux-generic/odp_pool.c66
-rw-r--r--platform/linux-generic/odp_schedule_basic.c13
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c12
-rw-r--r--platform/linux-generic/odp_schedule_sp.c11
-rw-r--r--platform/linux-generic/odp_system_info.c41
-rw-r--r--platform/linux-generic/odp_thread.c20
-rw-r--r--platform/linux-generic/odp_timer.c141
-rw-r--r--platform/linux-generic/pktio/stats/ethtool_stats.c6
-rw-r--r--platform/linux-generic/test/pktio_ipc/ipc_common.c4
-rw-r--r--test/miscellaneous/odp_api_headers.c2
-rw-r--r--test/performance/bench_common.c115
-rw-r--r--test/performance/odp_bench_misc.c63
-rw-r--r--test/performance/odp_bench_timer.c20
-rw-r--r--test/performance/odp_dma_perf.c471
-rwxr-xr-xtest/performance/odp_dma_perf_run.sh33
-rw-r--r--test/performance/odp_ipsecfwd.c111
-rw-r--r--test/performance/odp_l2fwd.c43
-rw-r--r--test/performance/odp_sched_pktio.c23
-rw-r--r--test/performance/odp_stress.c29
-rw-r--r--test/performance/odp_timer_perf.c158
-rw-r--r--test/validation/api/buffer/buffer.c2
-rw-r--r--test/validation/api/classification/odp_classification_basic.c88
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c27
-rw-r--r--test/validation/api/classification/odp_classification_tests.c137
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h3
-rw-r--r--test/validation/api/crypto/crypto_op_test.c4
-rw-r--r--test/validation/api/dma/dma.c11
-rw-r--r--test/validation/api/ipsec/ipsec.c4
-rw-r--r--test/validation/api/packet/packet.c19
-rw-r--r--test/validation/api/pktio/pktio.c24
-rw-r--r--test/validation/api/pool/pool.c74
-rw-r--r--test/validation/api/thread/thread.c28
-rw-r--r--test/validation/api/time/time.c83
-rw-r--r--test/validation/api/timer/timer.c392
138 files changed, 2650 insertions, 2267 deletions
diff --git a/.github/workflows/ci-pipeline-arm64.yml b/.github/workflows/ci-pipeline-arm64.yml
index 9e85a7881..5b4f7e13c 100644
--- a/.github/workflows/ci-pipeline-arm64.yml
+++ b/.github/workflows/ci-pipeline-arm64.yml
@@ -11,28 +11,52 @@ env:
OS: ubuntu_20.04
jobs:
- Build:
+ Build_gcc:
if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
runs-on: [self-hosted, ARM64]
strategy:
fail-fast: false
matrix:
- cc: [gcc, clang]
- conf: ['', 'CFLAGS=-O3', 'CFLAGS=-O1', 'CFLAGS=-O0 --enable-debug=full', 'CFLAGS=-Os', 'CFLAGS=-pedantic',
- '--enable-lto', '--enable-lto --enable-abi-compat', '--enable-pcapng-support']
- exclude:
- - cc: clang
- conf: '--enable-lto'
- - cc: clang
- conf: '--enable-lto --enable-abi-compat'
+ conf: ['',
+ 'CFLAGS=-O3',
+ 'CFLAGS=-O1',
+ 'CFLAGS=-O0 --enable-debug=full',
+ 'CFLAGS=-Os',
+ 'CFLAGS=-pedantic',
+ '--enable-lto',
+ '--enable-lto --enable-abi-compat',
+ '--enable-pcapng-support']
steps:
- uses: OpenDataPlane/action-clean-up@main
- uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=gcc
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
- if: ${{ failure() }}
uses: ./.github/actions/build-failure-log
+ Build_clang:
+ if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
+ runs-on: [self-hosted, ARM64]
+ strategy:
+ fail-fast: false
+ matrix:
+ conf: ['',
+ 'CFLAGS=-O3',
+ 'CFLAGS=-O1',
+ 'CFLAGS=-O0 --enable-debug=full',
+ 'CFLAGS=-Os',
+ 'CFLAGS=-pedantic',
+ '--enable-pcapng-support',
+ '--without-openssl --without-pcap',
+ '--enable-wfe-locks']
+ steps:
+ - uses: OpenDataPlane/action-clean-up@main
+ - uses: actions/checkout@v3
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=clang
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/build_${ARCH}.sh
+ - if: ${{ failure() }}
+ uses: ./.github/actions/build-failure-log
+
Build_static_u22:
if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
runs-on: [self-hosted, ARM64]
@@ -59,7 +83,7 @@ jobs:
fail-fast: false
matrix:
cc: [gcc, clang]
- os: ['ubuntu_18.04', 'rocky_linux_8']
+ os: ['rocky_linux_8']
steps:
- uses: OpenDataPlane/action-clean-up@main
- uses: actions/checkout@v3
@@ -113,28 +137,50 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- Run:
+ Run_gcc:
if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
runs-on: [self-hosted, ARM64]
strategy:
fail-fast: false
matrix:
- cc: [gcc, clang]
- conf: ['', '--enable-abi-compat', '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
+ conf: ['',
+ '--enable-abi-compat',
+ '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
'--enable-dpdk-zero-copy --disable-static-applications',
'--disable-host-optimization --enable-event-validation=warn',
'--disable-host-optimization --enable-abi-compat',
- '--without-openssl --without-pcap']
+ '--without-openssl --without-pcap',
+ '--enable-wfe-locks']
steps:
- uses: OpenDataPlane/action-clean-up@main
- uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
- -e CXX=g++-10 -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=gcc -e ARCH="${ARCH}"
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- if: ${{ success() }}
uses: ./.github/actions/dump-log
+ Run_clang:
+ if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
+ runs-on: [self-hosted, ARM64]
+ strategy:
+ fail-fast: false
+ matrix:
+ conf: ['',
+ '--enable-abi-compat',
+ '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
+ '--enable-dpdk-zero-copy --disable-static-applications',
+ '--disable-host-optimization --enable-event-validation=warn',
+ '--disable-host-optimization --enable-abi-compat']
+ steps:
+ - uses: OpenDataPlane/action-clean-up@main
+ - uses: actions/checkout@v3
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=clang -e ARCH="${ARCH}"
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native /odp/scripts/ci/check.sh
+ - if: ${{ failure() }}
+ uses: ./.github/actions/run-failure-log
+
Run_CFLAGS:
if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
runs-on: [self-hosted, ARM64]
@@ -157,7 +203,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- os: ['ubuntu_18.04', 'ubuntu_22.04']
+ os: ['ubuntu_22.04']
steps:
- uses: OpenDataPlane/action-clean-up@main
- uses: actions/checkout@v3
@@ -211,28 +257,6 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- Run_dpdk-19_11:
- if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
- runs-on: [self-hosted, ARM64]
- steps:
- - uses: OpenDataPlane/action-clean-up@main
- - uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native-dpdk_19.11 /odp/scripts/ci/check.sh
- - if: ${{ failure() }}
- uses: ./.github/actions/run-failure-log
-
- Run_dpdk-20_11:
- if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
- runs-on: [self-hosted, ARM64]
- steps:
- - uses: OpenDataPlane/action-clean-up@main
- - uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native-dpdk_20.11 /odp/scripts/ci/check.sh
- - if: ${{ failure() }}
- uses: ./.github/actions/run-failure-log
-
Run_dpdk-21_11:
if: ${{ github.repository == 'OpenDataPlane/odp-dpdk' }}
runs-on: [self-hosted, ARM64]
diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml
index 13108c1bf..d0b6a0647 100644
--- a/.github/workflows/ci-pipeline.yml
+++ b/.github/workflows/ci-pipeline.yml
@@ -36,7 +36,7 @@ jobs:
./scripts/ci-checkpatches.sh ${COMMIT_RANGE}
Documentation:
- runs-on: ubuntu-20.04
+ runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- name: Install dependencies
@@ -57,26 +57,47 @@ jobs:
make doxygen-doc 2>&1 | tee ./doxygen.log
! fgrep -rq warning ./doxygen.log
- Build:
+ Build_gcc:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
- cc: [gcc, clang]
- conf: ['', 'CFLAGS=-O3', 'CFLAGS=-O1', 'CFLAGS=-O0 --enable-debug=full', 'CFLAGS=-Os', 'CFLAGS=-pedantic',
- '--enable-lto', '--enable-lto --enable-abi-compat', '--enable-pcapng-support']
- exclude:
- - cc: clang
- conf: '--enable-lto'
- - cc: clang
- conf: '--enable-lto --enable-abi-compat'
+ conf: ['',
+ 'CFLAGS=-O3',
+ 'CFLAGS=-O1',
+ 'CFLAGS=-O0 --enable-debug=full',
+ 'CFLAGS=-Os',
+ 'CFLAGS=-pedantic',
+ '--enable-lto',
+ '--enable-lto --enable-abi-compat',
+ '--enable-pcapng-support']
steps:
- uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=gcc
-e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- if: ${{ failure() }}
uses: ./.github/actions/build-failure-log
+ Build_clang:
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ conf: ['',
+ 'CFLAGS=-O3',
+ 'CFLAGS=-O1',
+ 'CFLAGS=-O0 --enable-debug=full',
+ 'CFLAGS=-Os',
+ 'CFLAGS=-pedantic',
+ '--enable-pcapng-support',
+ '--without-openssl --without-pcap']
+ steps:
+ - uses: actions/checkout@v3
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=clang
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
+ - if: ${{ failure() }}
+ uses: ./.github/actions/build-failure-log
+
Build_static_u22:
runs-on: ubuntu-20.04
env:
@@ -103,7 +124,8 @@ jobs:
fail-fast: false
matrix:
cc: [gcc, clang]
- conf: ['', '--enable-abi-compat', 'CFLAGS=-march=armv8.2-a', 'CFLAGS=-march=armv8-a+lse']
+ conf: ['', '--enable-abi-compat', 'CFLAGS=-march=armv8.2-a', 'CFLAGS=-march=armv8-a+lse',
+ '--enable-wfe-locks']
steps:
- uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
@@ -111,22 +133,6 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/build-failure-log
- Build_arm64_u18:
- runs-on: ubuntu-20.04
- env:
- ARCH: arm64
- OS: ubuntu_18.04
- strategy:
- fail-fast: false
- matrix:
- cc: [gcc, clang]
- steps:
- - uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/build_${ARCH}.sh
- - if: ${{ failure() }}
- uses: ./.github/actions/build-failure-log
-
Build_ppc64el:
runs-on: ubuntu-20.04
env:
@@ -175,10 +181,10 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/build-failure-log
- Build_gcc_u18:
+ Build_gcc_u20:
runs-on: ubuntu-20.04
env:
- OS: ubuntu_18.04
+ OS: ubuntu_20.04
strategy:
fail-fast: false
matrix:
@@ -241,33 +247,52 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- Run:
+ Run_gcc:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
- cc: [gcc, clang]
- conf: ['', '--enable-abi-compat', '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
+ conf: ['',
+ '--enable-abi-compat',
+ '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
'--disable-static-applications',
'--disable-host-optimization --enable-event-validation=warn',
'--disable-host-optimization --enable-abi-compat',
'--without-openssl --without-pcap']
steps:
- uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
- -e CXX=g++-10 -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=gcc -e ARCH="${ARCH}"
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- if: ${{ success() }}
uses: ./.github/actions/dump-log
+ Run_clang:
+ runs-on: ubuntu-20.04
+ strategy:
+ fail-fast: false
+ matrix:
+ conf: ['',
+ '--enable-abi-compat',
+ '--enable-deprecated --enable-helper-deprecated --enable-debug=full',
+ '--disable-static-applications',
+ '--disable-host-optimization --enable-event-validation=warn',
+ '--disable-host-optimization --enable-abi-compat']
+ steps:
+ - uses: actions/checkout@v3
+ - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC=clang -e ARCH="${ARCH}"
+ -e CONF="${{matrix.conf}}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH} /odp/scripts/ci/check.sh
+ - if: ${{ failure() }}
+ uses: ./.github/actions/run-failure-log
+
Run_OS:
runs-on: ubuntu-20.04
strategy:
fail-fast: false
matrix:
cc: [gcc, clang]
- os: ['ubuntu_18.04', 'ubuntu_22.04']
+ os: ['ubuntu_22.04']
steps:
- uses: actions/checkout@v3
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${{matrix.cc}}" -e ARCH="${ARCH}"
@@ -322,24 +347,6 @@ jobs:
- if: ${{ failure() }}
uses: ./.github/actions/run-failure-log
- Run_dpdk-19_11:
- runs-on: ubuntu-20.04
- steps:
- - uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-dpdk_19.11 /odp/scripts/ci/check.sh
- - if: ${{ failure() }}
- uses: ./.github/actions/run-failure-log
-
- Run_dpdk-20_11:
- runs-on: ubuntu-20.04
- steps:
- - uses: actions/checkout@v3
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-dpdk_20.11 /odp/scripts/ci/check.sh
- - if: ${{ failure() }}
- uses: ./.github/actions/run-failure-log
-
Run_dpdk-21_11:
runs-on: ubuntu-20.04
steps:
diff --git a/CHANGELOG b/CHANGELOG
index 4cb2a57cb..dd3d04048 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,111 @@
+== OpenDataPlane (1.43.0.0)
+
+=== Backward incompatible API changes
+==== Classifier
+* Add new `odp_cls_capability_t.max_pmr` and
+`odp_cls_capability_t.max_pmr_per_cos` packet matching rule capabilities.
+* Rename `odp_cls_capability_t.max_pmr_terms` field to
+`odp_cls_capability_t.max_terms_per_pmr`.
+
+==== Time
+* Change local/global time specification to allow timestamp values to start from
+an implementation defined value, instead of always starting from zero.
+
+==== Timer
+* Deprecate `odp_timer_set_t` type. Use `odp_timer_retval_t` instead.
+* Deprecate `odp_timer_pool_start()` function in comments. Will be properly
+deprecated in an upcoming release. Use `odp_timer_pool_start_multi()` instead.
+* Deprecate `odp_timeout_fresh()` function.
+* Change `odp_timer_free()` specification to remove the possibility to free a
+timer that is running.
+* Change `odp_timer_pool_create()` specification to state that timer pool
+handles must not be used by other APIs, except `odp_timer_pool_to_u64()`,
+ before being started.
+
+=== Backward compatible API changes
+==== Event
+* Add `odp_event_pool()` function, which returns a handle to the pool where the
+event was allocated from.
+
+==== Hints
+* Fix a type conversion issue in `odp_unlikely()` implementation.
+
+==== Packet
+* Clarify that `ODP_PACKET_FREE_CTRL_DONT_FREE` option does not affect direct
+packet free calls.
+* Clarify that packet IO time is specific to the packet IO interface.
+
+==== Pool
+* Clarify that disabled/unused per thread statistics counters will not
+necessarily be zeroed by `odp_pool_stats()`.
+
+==== Scheduler
+* Clarify event ordering in ordered scheduling contexts.
+
+==== Thread
+* Add new functions `odp_thread_control_count_max()` and
+`odp_thread_worker_count_max()` for reading the maximum number of control and
+worker threads.
+* Add new functions `odp_thread_control_count()` and `odp_thread_worker_count()`
+for reading the current number of control and worker threads.
+
+==== Time
+* Add `odp_time_add_ns()` function for adding nanoseconds into a time value.
+* Add `odp_time_startup()` function for requesting ODP instance startup time.
+* Clarify `odp_time_sum()` specification by adding a notification that resulting
+timestamp may wrap around if large timestamp values are summed up.
+
+==== Timer
+* Add `odp_timer_pool_start_multi()` function for starting timer pools, which
+takes the to-be-started pool handles as arguments.
+* Clarify that timer ticks and related nanosecond values are specific to a timer
+pool. Also, state explicitly that those may not start from zero.
+
+=== Remove deprecated APIs
+==== Classifier
+* Remove deprecated `odp_cls_drop_t` enum.
+* Remove deprecated `odp_cos_drop_set()` function.
+* Remove deprecated `odp_cos_drop()` function.
+* Remove deprecated `odp_cos_with_l2_priority()` function.
+* Remove deprecated `odp_cos_with_l3_qos()` function.
+
+==== Crypto
+* Remove deprecated `ODP_CRYPTO_SES_CREATE_ERR_NONE`,
+`ODP_CRYPTO_SES_CREATE_ERR_ENOMEM`, `ODP_CRYPTO_SES_CREATE_ERR_INV_CIPHER`, and
+`ODP_CRYPTO_SES_CREATE_ERR_INV_AUTH` defines.
+* Remove deprecated `odp_crypto_alg_err_t.ODP_CRYPTO_ALG_ERR_KEY_SIZE` and
+`odp_crypto_alg_err_t.ODP_CRYPTO_ALG_ERR_IV_INVALID` enums.
+* Remove deprecated `odp_crypto_hw_err_t` enum.
+* Remove deprecated `odp_crypto_packet_result_t.ok` field.
+
+==== Scheduler
+* Remove deprecated `ODP_SCHED_PRIO_HIGHEST`, `ODP_SCHED_PRIO_NORMAL`,
+`ODP_SCHED_PRIO_LOWEST`, and `ODP_SCHED_PRIO_DEFAULT` defines.
+
+==== Timer
+* Remove deprecated `ODP_CLOCK_CPU` and `ODP_CLOCK_EXT` defines.
+* Remove deprecated `ODP_TIMER_TOOEARLY`, `ODP_TIMER_TOOLATE` and
+`ODP_TIMER_NOEVENT` defines.
+* Remove deprecated `odp_timer_set_abs()` function.
+* Remove deprecated `odp_timer_set_rel()` function.
+
+=== Helper (1.5.0)
+==== Backward incompatible changes
+* Remove deprecated `odph_odpthread_t` and `odph_odpthread_params_t` types.
+* Remove deprecated `odph_thread_param_t.instance` field.
+* Remove deprecated `odph_odpthreads_create()` and `odph_odpthreads_join()`
+functions.
+
+=== Implementation
+==== Ticketlock
+* Add WFE based aarch64 ticketlock implementation (`--enable-wfe-locks`) for
+power saving.
+
+=== Performance Tests
+==== dma_perf
+* Add option for using software memory copy in addition to DMA transfers.
+* Add options for using sparse packets and memory as the transfer segment type.
+
== OpenDataPlane (1.42.1.0)
=== Backward compatible API changes
diff --git a/configure.ac b/configure.ac
index 33aba2688..fc1f17403 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,8 +3,8 @@ AC_PREREQ([2.5])
# ODP API version
##########################################################################
m4_define([odp_version_generation], [1])
-m4_define([odp_version_major], [42])
-m4_define([odp_version_minor], [1])
+m4_define([odp_version_major], [43])
+m4_define([odp_version_minor], [0])
m4_define([odp_version_patch], [0])
m4_define([odp_version_api],
@@ -27,7 +27,7 @@ AC_SUBST(ODP_VERSION_API)
# Helper library version
##########################################################################
m4_define([odph_version_generation], [1])
-m4_define([odph_version_major], [4])
+m4_define([odph_version_major], [5])
m4_define([odph_version_minor], [0])
m4_define([odph_version],
@@ -120,6 +120,7 @@ AS_IF([test "$GCC" == yes],
AS_IF([test `$CC -dumpversion | cut -d '.' -f 1` -ge 10],
ODP_CHECK_CFLAG([-Wno-error=array-bounds])
ODP_CHECK_CFLAG([-Wno-error=stringop-overflow])
+ ODP_CHECK_CXXFLAG([-Wno-error=stringop-overflow])
)
)
diff --git a/doc/implementers-guide/implementers-guide.adoc b/doc/implementers-guide/implementers-guide.adoc
index 568760e3a..7b234ecea 100644
--- a/doc/implementers-guide/implementers-guide.adoc
+++ b/doc/implementers-guide/implementers-guide.adoc
@@ -716,7 +716,7 @@ reference implementation in the file
/*
* Maximum number of pools
*/
-#define ODP_CONFIG_POOLS 64
+#define CONFIG_POOLS 64
-----
Here two fundamental limits, the number of CPUs supported and the maximum
diff --git a/doc/users-guide/users-guide-cls.adoc b/doc/users-guide/users-guide-cls.adoc
index 59163a2da..41badebed 100644
--- a/doc/users-guide/users-guide-cls.adoc
+++ b/doc/users-guide/users-guide-cls.adoc
@@ -135,29 +135,13 @@ using ODP_POOL_INVALID or ODP_QUEUE_INVALID field then any packet assigned to th
=== Packet Classification
For each odp_pktio port, the API allows the assignment of a class-of-service to
-a packet using one of three methods:
-
-1. The packet may be assigned a specific class-of-service based on its Layer-2
-(802.1P/902.1Q VLAN tag) priority field. Since the standard field defines 8
-discrete priority levels, the API allows to assign an odp_cos to each of these
-priority levels with the `odp_cos_with_l2_priority()` function.
-
-2. Similarly, a class-of-service may be assigned using the Layer-3 (IP DiffServ)
-header field. The application supplies an array of odp_cos values that covers
-the entire range of the standard protocol header field, where array elements do
-not need to contain unique values. There is also a need to specify if Layer-3
-priority takes precedence over Layer-2 priority in a packet with both headers
-present.
-
-3. Additionally, the application may also program a number of pattern matching
-rules that assign a class-of-service for packets with header fields matching
-specified values. The field-matching rules take precedence over the previously
-described priority-based assignment of a class-of-service. Using these matching
-rules the application should be able for example to identify all packets
-containing VoIP traffic based on the protocol being UDP, and a specific
-destination or source port numbers, and appropriately assign these packets a
-class-of-service that maps to a higher priority queue, assuring voice packets a
-lower and bound latency.
+a packet. Application can program a number of pattern matching rules that
+assign a class-of-service for packets with header fields matching specified
+values. Using these matching rules the application should be able for example
+to identify all packets containing VoIP traffic based on the protocol being
+UDP, and a specific destination or source port numbers, and appropriately
+assign these packets a class-of-service that maps to a higher priority queue,
+assuring voice packets a lower and bound latency.
=== Packet meta data Elements
diff --git a/doc/users-guide/users-guide-timer.adoc b/doc/users-guide/users-guide-timer.adoc
index f644477a0..45507a872 100644
--- a/doc/users-guide/users-guide-timer.adoc
+++ b/doc/users-guide/users-guide-timer.adoc
@@ -61,16 +61,11 @@ expired.
=== Timer Pool Management
To facilitate implementation of the ODP timer APIs, an additional timer API is
provided. During initialization, applications are expected to create the timer
-pools they need and then call `odp_timer_pool_start()`. ODP implementations
-may or may not fail further attempts to create timer pools after this API is
-called. For best portability, applications should not attempt to create
-further timer pools after calling `odp_timer_pool_start()`. Note that no such
-restrictions exist on timeout pools, as these are just ordinary ODP pools.
-
-Following start, applications may allocate, set, cancel, and free timers
-from their associated timer pools. During termination processing, after all
-timers allocated from a timer pool have been freed, the pool itself should be
-released via a call to `odp_timer_pool_destroy()`.
+pools they need and then call `odp_timer_pool_start_multi()`. Following start,
+applications may allocate, set, cancel, and free timers from their associated
+timer pools. During termination processing, after all timers allocated from a
+timer pool have been freed, the pool itself should be released via a call to
+`odp_timer_pool_destroy()`.
=== Timeout Event Management
The purpose of ODP timers is to schedule their associated timeout events, which
@@ -118,11 +113,6 @@ while (1) {
void *userptr = odp_timeout_user_ptr(timeout);
uint64_t expiration = odp_timeout_tick(timeout);
- if (!odp_timeout_fresh(timeout)) {
- odp_timeout_free(timeout);
- continue;
- }
-
...process the timeout event
break;
@@ -130,16 +120,3 @@ while (1) {
}
}
-----
-When a worker thread receives a timeout event via `odp_schedule()`, it needs
-to determine whether the event is still relevant. A timeout event that is still
-relevant is said to be _fresh_ while one that is no longer relevant is said to
-be _stale_. Timeouts may be stale for any number of reasons, most of which are
-known only to the application itself. However, there are a few cases where the
-ODP implementation may be able to assist in this determination and for those
-cases the `odp_timeout_fresh()` API is provided.
-
-ODP defines a fresh timeout simply as one that has not been reset or
-canceled since it expired. So if `odp_timeout_fresh()` returns 0 then it is
-likely that the application should ignore this event, however if it returns 1
-then it remains an application responsibility to handle the event appropriate
-to its needs.
diff --git a/example/debug/odp_debug.c b/example/debug/odp_debug.c
index 6683e46b4..a2f322e09 100644
--- a/example/debug/odp_debug.c
+++ b/example/debug/odp_debug.c
@@ -39,7 +39,8 @@ static void print_usage(void)
" -s, --shm Create a SHM and call odp_shm_print()\n"
" -p, --pool Create various types of pools and call odp_pool_print()\n"
" -q, --queue Create various types of queues and call odp_queue_print()\n"
- " -i, --interface Create packet IO interface (loop) and call odp_pktio_print()\n"
+ " -i, --interface Create packet IO interface (loop), and call both odp_pktio_print()\n"
+ " and odp_pktio_extra_stats_print()\n"
" -I, --ipsec Call odp_ipsec_print()\n"
" -t, --timer Call timer pool, timer and timeout print functions\n"
" -a, --stash Create stash and call odp_stash_print()\n"
@@ -342,6 +343,9 @@ static int pktio_debug(void)
printf("\n");
odp_pktio_print(pktio);
+ printf("\n");
+ odp_pktio_extra_stats_print(pktio);
+
if (odp_pktio_close(pktio)) {
ODPH_ERR("Pktio close failed\n");
return -1;
@@ -380,6 +384,7 @@ static int timer_debug(void)
uint64_t tick;
uint64_t max_tmo = ODP_TIME_SEC_IN_NS;
uint64_t res = 100 * ODP_TIME_MSEC_IN_NS;
+ int started = 0;
odp_pool_param_init(&pool_param);
pool_param.type = ODP_POOL_TIMEOUT;
@@ -430,7 +435,10 @@ static int timer_debug(void)
return -1;
}
- odp_timer_pool_start();
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
odp_queue_param_init(&queue_param);
if (timer_capa.queue_type_sched)
@@ -458,16 +466,17 @@ static int timer_debug(void)
start_param.tick = tick;
start_param.tmo_ev = event;
- if (odp_timer_start(timer, &start_param) != ODP_TIMER_SUCCESS)
+ if (odp_timer_start(timer, &start_param) == ODP_TIMER_SUCCESS)
+ started = 1;
+ else
ODPH_ERR("Timer start failed.\n");
printf("\n");
odp_timer_print(timer);
- event = odp_timer_free(timer);
-
- if (event == ODP_EVENT_INVALID) {
- ODPH_ERR("Timer free failed.\n");
+ if (started && odp_timer_cancel(timer, &event) != ODP_TIMER_SUCCESS) {
+ ODPH_ERR("Timer cancel failed\n");
+ return -1;
} else {
timeout = odp_timeout_from_event(event);
@@ -477,6 +486,11 @@ static int timer_debug(void)
odp_timeout_free(timeout);
}
+ if (odp_timer_free(timer)) {
+ ODPH_ERR("Timer free failed\n");
+ return -1;
+ }
+
odp_timer_pool_destroy(timer_pool);
if (odp_queue_destroy(queue)) {
diff --git a/example/ipfragreass/odp_ipfragreass.c b/example/ipfragreass/odp_ipfragreass.c
index db5779915..55c76d7cc 100644
--- a/example/ipfragreass/odp_ipfragreass.c
+++ b/example/ipfragreass/odp_ipfragreass.c
@@ -228,7 +228,7 @@ static int run_worker(void *arg ODP_UNUSED)
/**
* ODP fragmentation and reassembly example main function
*/
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
odp_instance_t instance;
odp_pool_t fragment_pool;
diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
index df4528d18..191d9fdb6 100644
--- a/example/packet/odp_pktio.c
+++ b/example/packet/odp_pktio.c
@@ -521,7 +521,10 @@ static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len)
odp_packet_free(pkt); /* Drop */
pkt_cnt--;
} else if (odp_unlikely(i != j++)) {
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
pkt_tbl[j-1] = pkt;
+#pragma GCC diagnostic pop
}
}
diff --git a/example/sysinfo/odp_sysinfo.c b/example/sysinfo/odp_sysinfo.c
index 0bebac4a2..151388263 100644
--- a/example/sysinfo/odp_sysinfo.c
+++ b/example/sysinfo/odp_sysinfo.c
@@ -579,7 +579,10 @@ static int timer_capability(appl_args_t *appl_args)
return -1;
}
- odp_timer_pool_start();
+ if (odp_timer_pool_start_multi(&pool, 1) != 1) {
+ ODPH_ERR("odp_timer_pool_start_multi() failed for clock source: %d\n", i);
+ return -1;
+ }
ret = odp_timer_pool_info(pool, info);
if (ret) {
@@ -984,8 +987,9 @@ int main(int argc, char **argv)
printf("\n");
printf(" CLASSIFIER\n");
printf(" supported_terms: 0x%" PRIx64 "\n", cls_capa.supported_terms.all_bits);
- printf(" max_pmr_terms: %u\n", cls_capa.max_pmr_terms);
- printf(" available_pmr_terms: %u\n", cls_capa.available_pmr_terms);
+ printf(" max_pmr: %u\n", cls_capa.max_pmr);
+ printf(" max_pmr_per_cos: %u\n", cls_capa.max_pmr_per_cos);
+ printf(" max_terms_per_pmr: %u\n", cls_capa.max_terms_per_pmr);
printf(" max_cos: %u\n", cls_capa.max_cos);
printf(" max_hash_queues: %u\n", cls_capa.max_hash_queues);
printf(" hash_protocols: 0x%x\n", cls_capa.hash_protocols.all_bits);
diff --git a/example/timer/odp_timer_accuracy.c b/example/timer/odp_timer_accuracy.c
index 9a4593a9a..23c40e66f 100644
--- a/example/timer/odp_timer_accuracy.c
+++ b/example/timer/odp_timer_accuracy.c
@@ -197,6 +197,7 @@ static int parse_options(int argc, char *argv[], test_opt_t *test_opt)
memset(test_opt, 0, sizeof(*test_opt));
+ test_opt->cpu_count = 1;
test_opt->period_ns = 200 * ODP_TIME_MSEC_IN_NS;
test_opt->res_ns = 0;
test_opt->res_hz = 0;
@@ -670,7 +671,11 @@ static int create_timers(test_global_t *test_global)
return -1;
}
- odp_timer_pool_start();
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
odp_timer_pool_print(timer_pool);
/* Spend some time so that current tick would not be zero */
@@ -805,7 +810,6 @@ static int destroy_timers(test_global_t *test_global)
{
uint64_t i, alloc_timers;
odp_timer_t timer;
- odp_event_t ev;
int ret = 0;
alloc_timers = test_global->opt.alloc_timers;
@@ -816,10 +820,10 @@ static int destroy_timers(test_global_t *test_global)
if (timer == ODP_TIMER_INVALID)
break;
- ev = odp_timer_free(timer);
-
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
+ if (odp_timer_free(timer)) {
+ printf("Timer free failed: %" PRIu64 "\n", i);
+ ret = -1;
+ }
}
if (test_global->timer_pool != ODP_TIMER_POOL_INVALID)
diff --git a/example/timer/odp_timer_simple.c b/example/timer/odp_timer_simple.c
index fdf38c9d3..681f95714 100644
--- a/example/timer/odp_timer_simple.c
+++ b/example/timer/odp_timer_simple.c
@@ -82,7 +82,12 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
goto err;
}
- odp_timer_pool_start();
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ ret += 1;
+ goto err;
+ }
+
/* Configure scheduler */
odp_schedule_config(NULL);
@@ -159,18 +164,24 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
}
/* Destroy created resources */
- rc += odp_timer_cancel(tim, &ev);
- rc += -(odp_timer_free(tim) == ODP_EVENT_INVALID);
odp_event_free(ev);
- ret += odp_queue_destroy(queue);
+ if (odp_timer_free(tim))
+ ret++;
+
+ if (odp_queue_destroy(queue))
+ ret++;
err:
odp_timer_pool_destroy(timer_pool);
err_tp:
- ret += odp_pool_destroy(timeout_pool);
- ret += odp_term_local();
+ if (odp_pool_destroy(timeout_pool))
+ ret++;
+
+ if (odp_term_local())
+ ret++;
err_local:
- ret += odp_term_global(instance);
+ if (odp_term_global(instance))
+ ret++;
err_global:
return ret;
}
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 924ffe13f..a397a6ac9 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -165,13 +165,7 @@ static void test_abs_timeouts(int thr, test_globals_t *gbls)
tick = odp_timeout_tick(tmo);
ttp = odp_timeout_user_ptr(tmo);
ttp->ev = ev;
- if (!odp_timeout_fresh(tmo)) {
- /* Not the expected expiration tick, timer has
- * been reset or cancelled or freed */
- ODPH_ABORT("Unexpected timeout received (timer "
- "%" PRIu64 ", tick %" PRIu64 ")\n",
- odp_timer_to_u64(ttp->tim), tick);
- }
+
ODPH_DBG(" [%i] timeout, tick %" PRIu64 "\n", thr, tick);
uint32_t rx_num = odp_atomic_fetch_dec_u32(&gbls->remain);
@@ -456,7 +450,11 @@ int main(int argc, char *argv[])
ODPH_ERR("Timer pool create failed.\n");
goto err;
}
- odp_timer_pool_start();
+
+ if (odp_timer_pool_start_multi(&gbls->tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
odp_shm_print_all();
(void)odp_timer_pool_info(gbls->tp, &tpinfo);
diff --git a/helper/include/odp/helper/threads.h b/helper/include/odp/helper/threads.h
index 358543c85..c18a46e8a 100644
--- a/helper/include/odp/helper/threads.h
+++ b/helper/include/odp/helper/threads.h
@@ -21,7 +21,6 @@
extern "C" {
#endif
-#include <odp/helper/deprecated.h>
#include <odp_api.h>
#include <pthread.h>
@@ -70,13 +69,7 @@ typedef struct {
/** ODP thread type */
odp_thread_type_t thr_type;
- /** @deprecated ODP instance handle for odph_odpthreads_create(). */
- odp_instance_t ODPH_DEPRECATE(instance);
-
- /**
- * Minimum stack size in bytes. 0 = use default. Ignored by
- * odph_odpthreads_create().
- */
+ /** Minimum stack size in bytes. 0 = use default. */
uint64_t stack_size;
} odph_thread_param_t;
@@ -134,12 +127,6 @@ typedef struct {
odp_mem_model_t mem_model; /**< Process or thread */
} odph_helper_options_t;
-/** @deprecated Legacy thread table entry */
-typedef odph_thread_t ODPH_DEPRECATE(odph_odpthread_t);
-
-/** @deprecated Legacy thread parameters */
-typedef odph_thread_param_t ODPH_DEPRECATE(odph_odpthread_params_t);
-
/** Common parameters for odph_thread_create() call */
typedef struct {
/**
@@ -226,12 +213,11 @@ void odph_thread_common_param_init(odph_thread_common_param_t *param);
/**
* Create and pin threads (as Linux pthreads or processes)
*
- * This is an updated version of odph_odpthreads_create() call. It may be called
- * multiple times to create threads in steps. Each call launches 'num' threads
- * and pins those to separate CPUs based on the cpumask. Use 'thread_model'
- * parameter to select if Linux pthreads or processes are used. This selection
- * may be overridden with ODP helper options. See e.g. --odph_proc under
- * odph_options() documentation.
+ * Function may be called multiple times to create threads in steps. Each call
+ * launches 'num' threads and pins those to separate CPUs based on the cpumask.
+ * Use 'thread_model' parameter to select if Linux pthreads or processes are
+ * used. This selection may be overridden with ODP helper options. See e.g.
+ * --odph_proc under odph_options() documentation.
*
* Thread creation may be synchronized by setting 'sync' parameter. It
* serializes thread start up (odp_init_local() calls), which helps to
@@ -270,11 +256,10 @@ int odph_thread_create(odph_thread_t thread[],
/**
* Wait previously launched threads to exit
*
- * This is an updated version of odph_odpthreads_join() call. It waits for
- * threads launched with odph_thread_create() to exit. Threads may be waited to
- * exit in a different order than those were created. A function call may be
- * used to wait any number of launched threads to exit. A particular thread
- * may be waited only once.
+ * Function waits for threads launched with odph_thread_create() to exit.
+ * Threads may be waited to exit in a different order than those were created.
+ * A function call may be used to wait any number of launched threads to exit.
+ * A particular thread may be waited only once.
*
* @param thread Table of threads to exit
* @param num Number of threads to exit
@@ -287,39 +272,6 @@ int odph_thread_create(odph_thread_t thread[],
int odph_thread_join(odph_thread_t thread[], int num);
/**
- * Creates and launches odpthreads (as linux threads or processes)
- *
- * Creates, pins and launches threads to separate CPU's based on the cpumask.
- *
- * @param thread_tbl Thread table
- * @param mask CPU mask
- * @param thr_params ODP thread parameters
- *
- * @return Number of threads created
- *
- * @deprecated Use odph_thread_create() instead.
- */
-int ODPH_DEPRECATE(odph_odpthreads_create)(
- ODPH_DEPRECATE(odph_odpthread_t) *thread_tbl,
- const odp_cpumask_t *mask,
- const ODPH_DEPRECATE(odph_odpthread_params_t) *thr_params);
-
-/**
- * Waits odpthreads (as linux threads or processes) to exit.
- *
- * Returns when all odpthreads have terminated.
- *
- * @param thread_tbl Thread table
- * @return The number of joined threads or -1 on error.
- * (error occurs if any of the start_routine return non-zero or if
- * the thread join/process wait itself failed -e.g. as the result of a kill)
- *
- * @deprecated Use odph_thread_join() instead.
- */
-int ODPH_DEPRECATE(odph_odpthreads_join)(
- ODPH_DEPRECATE(odph_odpthread_t) *thread_tbl);
-
-/**
* Set CPU affinity of the current odp thread
*
* CPU affinity determines the CPU core on which the thread is
diff --git a/helper/test/debug.c b/helper/test/debug.c
index 78ffb6347..8c50edc75 100644
--- a/helper/test/debug.c
+++ b/helper/test/debug.c
@@ -10,7 +10,7 @@
#include <stdio.h>
#include <string.h>
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
printf("\nHelper library version is: %s\n\n", odph_version_str());
diff --git a/helper/test/macros.c b/helper/test/macros.c
index e6a203e20..7ecf7bb2d 100644
--- a/helper/test/macros.c
+++ b/helper/test/macros.c
@@ -8,7 +8,7 @@
#include <stdio.h>
#include <stdlib.h>
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
int a, b;
int ret = 0;
diff --git a/helper/test/parse.c b/helper/test/parse.c
index 6f4dcc16c..e5a6cf17e 100644
--- a/helper/test/parse.c
+++ b/helper/test/parse.c
@@ -342,7 +342,7 @@ static int test_ipv4(void)
return 0;
}
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
int ret = 0;
diff --git a/helper/test/version.c b/helper/test/version.c
index 17c971886..6af6df490 100644
--- a/helper/test/version.c
+++ b/helper/test/version.c
@@ -8,7 +8,7 @@
#include <stdio.h>
#include <string.h>
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
printf("\nHelper library versions is: %s\n\n", odph_version_str());
diff --git a/helper/threads.c b/helper/threads.c
index 63b4cc019..72003d0f4 100644
--- a/helper/threads.c
+++ b/helper/threads.c
@@ -42,8 +42,7 @@ static void *run_thread(void *arg)
int status;
int ret;
odp_instance_t instance;
- ODPH_DEPRECATE(odph_odpthread_params_t) *thr_params;
-
+ odph_thread_param_t *thr_params;
odph_thread_start_args_t *start_args = arg;
thr_params = &start_args->thr_params;
@@ -408,130 +407,6 @@ int odph_thread_join(odph_thread_t thread[], int num)
return i;
}
-/*
- * create an odpthread set (as linux processes or linux threads or both)
- */
-int ODPH_DEPRECATE(odph_odpthreads_create)(
- ODPH_DEPRECATE(odph_odpthread_t) *thread_tbl,
- const odp_cpumask_t *mask,
- const ODPH_DEPRECATE(odph_odpthread_params_t) *thr_params)
-{
- int i;
- int num;
- int cpu_count;
- int cpu;
-
- num = odp_cpumask_count(mask);
-
- memset(thread_tbl, 0, num * sizeof(*thread_tbl));
-
- cpu_count = odp_cpu_count();
-
- if (num < 1 || num > cpu_count) {
- ODPH_ERR("Invalid number of odpthreads:%d"
- " (%d cores available)\n",
- num, cpu_count);
- return -1;
- }
-
- cpu = odp_cpumask_first(mask);
- for (i = 0; i < num; i++) {
- odph_thread_start_args_t *start_args;
-
- start_args = &thread_tbl[i].start_args;
-
- /* Copy thread parameters */
- start_args->thr_params = *thr_params;
- start_args->instance = thr_params->ODPH_DEPRECATE(instance);
-
- if (helper_options.mem_model == ODP_MEM_MODEL_THREAD) {
- if (create_pthread(&thread_tbl[i], cpu, 0))
- break;
- } else {
- if (create_process(&thread_tbl[i], cpu, 0))
- break;
- }
-
- cpu = odp_cpumask_next(mask, cpu);
- }
- thread_tbl[num - 1].last = 1;
-
- return i;
-}
-
-/*
- * wait for the odpthreads termination (linux processes and threads)
- */
-int ODPH_DEPRECATE(odph_odpthreads_join)(
- ODPH_DEPRECATE(odph_odpthread_t) *thread_tbl)
-{
- pid_t pid;
- int i = 0;
- int terminated = 0;
- /* child process return code (!=0 is error) */
- int status = 0;
- /* "child" thread return code (!NULL is error) */
- void *thread_ret = NULL;
- int ret;
- int retval = 0;
-
- /* joins linux threads or wait for processes */
- do {
- if (thread_tbl[i].cpu == FAILED_CPU) {
- ODPH_DBG("ODP thread %d not started.\n", i);
- continue;
- }
- /* pthreads: */
- if (thread_tbl[i].start_args.mem_model ==
- ODP_MEM_MODEL_THREAD) {
- /* Wait thread to exit */
- ret = pthread_join(thread_tbl[i].thread.thread_id,
- &thread_ret);
- if (ret != 0) {
- ODPH_ERR("Failed to join thread from cpu #%d\n",
- thread_tbl[i].cpu);
- retval = -1;
- } else {
- terminated++;
- if (thread_ret != NULL) {
- ODPH_ERR("Bad exit status cpu #%d %p\n",
- thread_tbl[i].cpu, thread_ret);
- retval = -1;
- }
- }
- pthread_attr_destroy(&thread_tbl[i].thread.attr);
- } else {
- /* processes: */
- pid = waitpid(thread_tbl[i].proc.pid, &status, 0);
-
- if (pid < 0) {
- ODPH_ERR("waitpid() failed\n");
- retval = -1;
- break;
- }
-
- terminated++;
-
- /* Examine the child process' termination status */
- if (WIFEXITED(status) &&
- WEXITSTATUS(status) != EXIT_SUCCESS) {
- ODPH_ERR("Child exit status:%d (pid:%d)\n",
- WEXITSTATUS(status), (int)pid);
- retval = -1;
- }
- if (WIFSIGNALED(status)) {
- int signo = WTERMSIG(status);
-
- ODPH_ERR("Child term signo:%d - %s (pid:%d)\n",
- signo, strsignal(signo), (int)pid);
- retval = -1;
- }
- }
- } while (!thread_tbl[i++].last);
-
- return (retval < 0) ? retval : terminated;
-}
-
/* man gettid() notes:
* Glibc does not provide a wrapper for this system call;
*/
diff --git a/include/Makefile.am b/include/Makefile.am
index ea3f17add..bc28a5cba 100644
--- a/include/Makefile.am
+++ b/include/Makefile.am
@@ -64,6 +64,7 @@ odpapiinclude_HEADERS = \
odp/api/sync.h \
odp/api/system_info.h \
odp/api/thread.h \
+ odp/api/thread_types.h \
odp/api/threshold.h \
odp/api/thrmask.h \
odp/api/ticketlock.h \
@@ -193,6 +194,7 @@ odpapiabidefaultinclude_HEADERS = \
odp/api/abi-default/std_types.h \
odp/api/abi-default/sync.h \
odp/api/abi-default/thread.h \
+ odp/api/abi-default/thread_types.h \
odp/api/abi-default/thrmask.h \
odp/api/abi-default/ticketlock.h \
odp/api/abi-default/time.h \
@@ -255,6 +257,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm32-linux/odp/api/abi/std_types.h \
odp/arch/arm32-linux/odp/api/abi/sync.h \
odp/arch/arm32-linux/odp/api/abi/thread.h \
+ odp/arch/arm32-linux/odp/api/abi/thread_types.h \
odp/arch/arm32-linux/odp/api/abi/thrmask.h \
odp/arch/arm32-linux/odp/api/abi/ticketlock.h \
odp/arch/arm32-linux/odp/api/abi/time.h \
@@ -313,6 +316,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/arm64-linux/odp/api/abi/std_types.h \
odp/arch/arm64-linux/odp/api/abi/sync.h \
odp/arch/arm64-linux/odp/api/abi/thread.h \
+ odp/arch/arm64-linux/odp/api/abi/thread_types.h \
odp/arch/arm64-linux/odp/api/abi/thrmask.h \
odp/arch/arm64-linux/odp/api/abi/ticketlock.h \
odp/arch/arm64-linux/odp/api/abi/time.h \
@@ -371,6 +375,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/default-linux/odp/api/abi/std_types.h \
odp/arch/default-linux/odp/api/abi/sync.h \
odp/arch/default-linux/odp/api/abi/thread.h \
+ odp/arch/default-linux/odp/api/abi/thread_types.h \
odp/arch/default-linux/odp/api/abi/thrmask.h \
odp/arch/default-linux/odp/api/abi/ticketlock.h \
odp/arch/default-linux/odp/api/abi/time.h \
@@ -429,6 +434,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/power64-linux/odp/api/abi/std_types.h \
odp/arch/power64-linux/odp/api/abi/sync.h \
odp/arch/power64-linux/odp/api/abi/thread.h \
+ odp/arch/power64-linux/odp/api/abi/thread_types.h \
odp/arch/power64-linux/odp/api/abi/thrmask.h \
odp/arch/power64-linux/odp/api/abi/ticketlock.h \
odp/arch/power64-linux/odp/api/abi/time.h \
@@ -487,6 +493,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_32-linux/odp/api/abi/std_types.h \
odp/arch/x86_32-linux/odp/api/abi/sync.h \
odp/arch/x86_32-linux/odp/api/abi/thread.h \
+ odp/arch/x86_32-linux/odp/api/abi/thread_types.h \
odp/arch/x86_32-linux/odp/api/abi/thrmask.h \
odp/arch/x86_32-linux/odp/api/abi/ticketlock.h \
odp/arch/x86_32-linux/odp/api/abi/time.h \
@@ -545,6 +552,7 @@ odpapiabiarchinclude_HEADERS = \
odp/arch/x86_64-linux/odp/api/abi/std_types.h \
odp/arch/x86_64-linux/odp/api/abi/sync.h \
odp/arch/x86_64-linux/odp/api/abi/thread.h \
+ odp/arch/x86_64-linux/odp/api/abi/thread_types.h \
odp/arch/x86_64-linux/odp/api/abi/thrmask.h \
odp/arch/x86_64-linux/odp/api/abi/ticketlock.h \
odp/arch/x86_64-linux/odp/api/abi/time.h \
diff --git a/include/odp/api/abi-default/event_types.h b/include/odp/api/abi-default/event_types.h
index 7955e53b8..d6231a98f 100644
--- a/include/odp/api/abi-default/event_types.h
+++ b/include/odp/api/abi-default/event_types.h
@@ -11,7 +11,6 @@ extern "C" {
#endif
#include <stdint.h>
-#include <odp/api/deprecated.h>
/** @internal Dummy type for strong typing */
typedef struct { char dummy; /**< @internal Dummy */ } _odp_abi_event_t;
diff --git a/include/odp/api/abi-default/schedule_types.h b/include/odp/api/abi-default/schedule_types.h
index 45366ffa6..eeb18771e 100644
--- a/include/odp/api/abi-default/schedule_types.h
+++ b/include/odp/api/abi-default/schedule_types.h
@@ -15,7 +15,6 @@
extern "C" {
#endif
-#include <odp/api/deprecated.h>
#include <odp/api/std_types.h>
/** @addtogroup odp_scheduler
@@ -27,26 +26,6 @@ extern "C" {
#define ODP_SCHED_GROUP_NAME_LEN 32
-#if ODP_DEPRECATED_API
-#define ODP_SCHED_PRIO_HIGHEST (odp_schedule_max_prio())
-
-#define ODP_SCHED_PRIO_NORMAL (odp_schedule_default_prio())
-
-#define ODP_SCHED_PRIO_LOWEST (odp_schedule_min_prio())
-
-#define ODP_SCHED_PRIO_DEFAULT (odp_schedule_default_prio())
-#else
-/* Required to prevent Doxygen warning */
-#define ODP_SCHED_PRIO_HIGHEST
-#define ODP_SCHED_PRIO_NORMAL
-#define ODP_SCHED_PRIO_LOWEST
-#define ODP_SCHED_PRIO_DEFAULT
-#undef ODP_SCHED_PRIO_HIGHEST
-#undef ODP_SCHED_PRIO_NORMAL
-#undef ODP_SCHED_PRIO_LOWEST
-#undef ODP_SCHED_PRIO_DEFAULT
-#endif
-
typedef int odp_schedule_sync_t;
#define ODP_SCHED_SYNC_PARALLEL 0
diff --git a/include/odp/api/abi-default/thread.h b/include/odp/api/abi-default/thread.h
index 3113278d3..3b7ce41dc 100644
--- a/include/odp/api/abi-default/thread.h
+++ b/include/odp/api/abi-default/thread.h
@@ -2,12 +2,6 @@
* Copyright (c) 2015-2018 Linaro Limited
*/
-/**
- * @file
- *
- * ODP thread
- */
-
#ifndef ODP_ABI_THREAD_H_
#define ODP_ABI_THREAD_H_
@@ -15,15 +9,7 @@
extern "C" {
#endif
-/** @addtogroup odp_thread
- * @{
- */
-
-#define ODP_THREAD_COUNT_MAX 256
-
-/**
- * @}
- */
+/* Empty header required due to the inline functions */
#ifdef __cplusplus
}
diff --git a/include/odp/api/abi-default/thread_types.h b/include/odp/api/abi-default/thread_types.h
new file mode 100644
index 000000000..1511f488d
--- /dev/null
+++ b/include/odp/api/abi-default/thread_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#ifndef ODP_ABI_THREAD_TYPES_H_
+#define ODP_ABI_THREAD_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @ingroup odp_thread
+ * @{
+ */
+
+#define ODP_THREAD_COUNT_MAX 256
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index f513eea94..518b2bd3c 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -21,7 +21,6 @@ extern "C" {
#include <odp/api/pool_types.h>
#include <odp/api/std_types.h>
#include <odp/api/threshold.h>
-#include <odp/api/deprecated.h>
/** @defgroup odp_classification ODP CLASSIFICATION
* Packet input classification.
@@ -530,26 +529,36 @@ typedef struct odp_cls_stats_capability_t {
*/
typedef struct odp_cls_capability_t {
/** PMR terms supported by the classifier
- * A bit mask of one bit for each of odp_pmr_term_t
- */
+ *
+ * A bit mask of one bit for each of odp_pmr_term_t. */
odp_cls_pmr_terms_t supported_terms;
- /** Maximum number of PMR terms */
- uint32_t max_pmr_terms;
+ /** Maximum number of single-term PMRs
+ *
+ * Depending on the implementation, using several/composite terms for a
+ * single PMR may end up incurring more than one PMR element from this
+ * total capacity. */
+ uint32_t max_pmr;
+
+ /** Maximum number of PMRs per CoS */
+ uint32_t max_pmr_per_cos;
- /** Number of PMR terms available for use now */
- uint32_t available_pmr_terms;
+ /** Maximum number of terms per composite PMR */
+ uint32_t max_terms_per_pmr;
/** Maximum number of CoS supported */
uint32_t max_cos;
- /** Maximum number of CoSes that can have statistics enabled at the same
+ /** Maximum number of concurrent CoS stats
+ *
+ * Maximum number of CoSes that can have statistics enabled at the same
* time. If this value is zero, then CoS level statistics are not
* supported. */
uint32_t max_cos_stats;
/** Maximum number of queues supported per CoS
- * if the value is 1, then hashing is not supported*/
+ *
+ * If the value is 1, then hashing is not supported. */
uint32_t max_hash_queues;
/** Protocol header combination supported for Hashing */
@@ -578,21 +587,6 @@ typedef struct odp_cls_capability_t {
} odp_cls_capability_t;
-#if ODP_DEPRECATED_API
-
-/**
- * class of service packet drop policies
- *
- * @deprecated Drop policy will be removed from the API.
- */
-typedef enum {
- ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */
- ODP_COS_DROP_NEVER, /**< Never drop, ignoring buffer pool policy */
-
-} odp_cls_drop_t;
-
-#endif
-
/**
* Enumeration of actions for CoS.
*/
@@ -682,11 +676,6 @@ typedef struct odp_cls_cos_param {
*/
odp_pool_t pool;
-#if ODP_DEPRECATED_API
- /** Drop policy associated with CoS */
- odp_cls_drop_t drop_policy;
-#endif
-
/** Random Early Detection configuration */
odp_red_param_t red;
@@ -856,67 +845,6 @@ uint32_t odp_cls_cos_num_queue(odp_cos_t cos);
*/
uint32_t odp_cls_cos_queues(odp_cos_t cos, odp_queue_t queue[], uint32_t num);
-#if ODP_DEPRECATED_API
-
-/**
- * Assign packet drop policy for specific class-of-service
- *
- * @param cos CoS handle
- * @param drop_policy Desired packet drop policy for this class.
- *
- * @retval 0 on success
- * @retval <0 on failure
- *
- * @note Optional.
- */
-int odp_cos_drop_set(odp_cos_t cos, odp_cls_drop_t drop_policy);
-
-/**
-* Get the drop policy configured for a specific class-of-service instance.
-*
-* @param cos CoS handle
-*
-* @retval Drop policy configured with the given class-of-service
-*/
-odp_cls_drop_t odp_cos_drop(odp_cos_t cos);
-
-#endif
-
-/**
- * Request to override per-port class of service based on Layer-2 priority field if present.
- *
- * @deprecated Use #ODP_PMR_VLAN_PCP_0 instead.
- *
- * @param pktio_in Ingress port identifier.
- * @param num_qos Number of QoS levels, typically 8.
- * @param qos_table Values of the Layer-2 QoS header field.
- * @param cos_table Class-of-service assigned to each of the allowed
- * Layer-2 QOS levels.
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-int ODP_DEPRECATE(odp_cos_with_l2_priority)(odp_pktio_t pktio_in, uint8_t num_qos,
- uint8_t qos_table[], odp_cos_t cos_table[]);
-
-/**
- * Request to override per-port class of service based on Layer-3 priority field if present.
- *
- * @deprecated Use #ODP_PMR_IP_DSCP instead.
- *
- * @param pktio_in Ingress port identifier.
- * @param num_qos Number of allowed Layer-3 QoS levels.
- * @param qos_table Values of the Layer-3 QoS header field.
- * @param cos_table Class-of-service assigned to each of the allowed
- * Layer-3 QOS levels.
- * @param l3_preference when true, Layer-3 QoS overrides L2 QoS when present.
- *
- * @retval 0 on success
- * @retval <0 on failure
- */
-int ODP_DEPRECATE(odp_cos_with_l3_qos)(odp_pktio_t pktio_in, uint32_t num_qos, uint8_t qos_table[],
- odp_cos_t cos_table[], odp_bool_t l3_preference);
-
/**
* Get statistics for a CoS
*
@@ -988,7 +916,8 @@ void odp_cls_pmr_create_opt_init(odp_pmr_create_opt_t *opt);
* considered to match only if a packet matches with all its terms. It is implementation specific
* which term combinations are supported as composite PMRs. When creating a composite PMR,
* application should check the return value and perform appropriate fallback actions if the create
- * call returns failure.
+ * call returns failure. See odp_cls_capability_t::max_pmr and
+ * odp_cls_capability_t::max_terms_per_pmr for related capabilities.
*
* Use odp_cls_pmr_param_init() to initialize parameters into their default values.
*
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index e0e232dbe..49bc29ee4 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -14,7 +14,6 @@
#include <odp/visibility_begin.h>
#include <odp/api/crypto_types.h>
-#include <odp/api/deprecated.h>
#include <odp/api/packet_types.h>
#include <odp/api/std_types.h>
diff --git a/include/odp/api/spec/crypto_types.h b/include/odp/api/spec/crypto_types.h
index 7752f1957..31214c0e9 100644
--- a/include/odp/api/spec/crypto_types.h
+++ b/include/odp/api/spec/crypto_types.h
@@ -12,7 +12,6 @@
#define ODP_API_SPEC_CRYPTO_TYPES_H_
#include <odp/visibility_begin.h>
-#include <odp/api/deprecated.h>
#include <odp/api/packet_types.h>
#include <odp/api/pool_types.h>
#include <odp/api/std_types.h>
@@ -880,17 +879,6 @@ typedef enum {
ODP_CRYPTO_SES_ERR_PARAMS,
} odp_crypto_ses_create_err_t;
-#if ODP_DEPRECATED_API
-/** This synonym for backward compatibility has been deprecated */
-#define ODP_CRYPTO_SES_CREATE_ERR_NONE ODP_CRYPTO_SES_ERR_NONE
-/** This synonym for backward compatibility has been deprecated */
-#define ODP_CRYPTO_SES_CREATE_ERR_ENOMEM ODP_CRYPTO_SES_ERR_ENOMEM
-/** This synonym for backward compatibility has been deprecated */
-#define ODP_CRYPTO_SES_CREATE_ERR_INV_CIPHER ODP_CRYPTO_SES_ERR_CIPHER
-/** This synonym for backward compatibility has been deprecated */
-#define ODP_CRYPTO_SES_CREATE_ERR_INV_AUTH ODP_CRYPTO_SES_ERR_AUTH
-#endif
-
/**
* Crypto API algorithm return code
*/
@@ -899,49 +887,24 @@ typedef enum {
ODP_CRYPTO_ALG_ERR_NONE,
/** Invalid range or packet size */
ODP_CRYPTO_ALG_ERR_DATA_SIZE,
- /** Key size invalid for algorithm */
- ODP_DEPRECATE(ODP_CRYPTO_ALG_ERR_KEY_SIZE),
/** Computed ICV value mismatch */
ODP_CRYPTO_ALG_ERR_ICV_CHECK,
- /** IV value not specified */
- ODP_DEPRECATE(ODP_CRYPTO_ALG_ERR_IV_INVALID),
/** Other error */
ODP_CRYPTO_ALG_ERR_OTHER,
} odp_crypto_alg_err_t;
/**
- * Crypto API hardware centric return code
- */
-typedef enum {
- /** Operation completed successfully */
- ODP_DEPRECATE(ODP_CRYPTO_HW_ERR_NONE),
- /** Error detected during DMA of data */
- ODP_DEPRECATE(ODP_CRYPTO_HW_ERR_DMA),
- /** Operation failed due to pool depletion */
- ODP_DEPRECATE(ODP_CRYPTO_HW_ERR_BP_DEPLETED),
-} ODP_DEPRECATE(odp_crypto_hw_err_t);
-
-/**
* Crypto API per packet operation completion status
*/
typedef struct odp_crypto_op_status {
/** Algorithm specific return code */
odp_crypto_alg_err_t alg_err;
-
- /** Hardware specific return code */
- ODP_DEPRECATE(odp_crypto_hw_err_t) ODP_DEPRECATE(hw_err);
} odp_crypto_op_status_t;
/**
* Crypto packet API operation result
*/
typedef struct odp_crypto_packet_result_t {
- /** Request completed successfully.
- *
- * @deprecated Check the return value of odp_crypto_result() instead.
- */
- odp_bool_t ODP_DEPRECATE(ok);
-
/** Input packet passed to odp_crypo_op_enq() when the operation
* type of the session is ODP_CRYPTO_OP_TYPE_OOP. In other cases
* this field does not have a valid value.
diff --git a/include/odp/api/spec/event.h b/include/odp/api/spec/event.h
index a8737d3a5..220c955c3 100644
--- a/include/odp/api/spec/event.h
+++ b/include/odp/api/spec/event.h
@@ -19,6 +19,7 @@ extern "C" {
#include <odp/api/event_types.h>
#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
/** @defgroup odp_event ODP EVENT
* Generic event metadata and operations.
@@ -94,6 +95,19 @@ int odp_event_type_multi(const odp_event_t event[], int num,
odp_event_type_t *type);
/**
+ * Event pool
+ *
+ * Returns handle to the pool where the event was allocated from. If the
+ * underlying event type does not have an API for returning pool handle
+ * (e.g. ODP_EVENT_IPSEC_STATUS), ODP_POOL_INVALID is returned.
+ *
+ * @param event Event handle
+ *
+ * @return Pool handle or ODP_POOL_INVALID depending on event type
+ */
+odp_pool_t odp_event_pool(odp_event_t event);
+
+/**
* Event user area
*
* Returns pointer to the user area associated with the event. This maps to the
diff --git a/include/odp/api/spec/hints.h b/include/odp/api/spec/hints.h
index 032b483df..fa5b1b9bb 100644
--- a/include/odp/api/spec/hints.h
+++ b/include/odp/api/spec/hints.h
@@ -61,7 +61,7 @@ extern "C" {
/**
* Branch unlikely taken
*/
-#define odp_unlikely(x) __builtin_expect((x), 0)
+#define odp_unlikely(x) __builtin_expect(!!(x), 0)
/*
* __builtin_prefetch (const void *addr, rw, locality)
diff --git a/include/odp/api/spec/init.h b/include/odp/api/spec/init.h
index f0491ad5b..ee787665f 100644
--- a/include/odp/api/spec/init.h
+++ b/include/odp/api/spec/init.h
@@ -17,7 +17,7 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/hints.h>
-#include <odp/api/spec/thread_types.h>
+#include <odp/api/thread_types.h>
#include <odp/api/cpumask.h>
/** @defgroup odp_initialization ODP INITIALIZATION
diff --git a/include/odp/api/spec/packet.h b/include/odp/api/spec/packet.h
index 187aedbb9..ca22280ec 100644
--- a/include/odp/api/spec/packet.h
+++ b/include/odp/api/spec/packet.h
@@ -1847,17 +1847,18 @@ void odp_packet_flow_hash_set(odp_packet_t pkt, uint32_t flow_hash);
/**
* Packet timestamp
*
- * Returns packet timestamp value as odp_time_t type. Use time API for
- * additional operations on packet timestamp values or conversion into
- * nanoseconds. Use odp_packet_has_ts() to check if packet has a valid
- * timestamp. Packet input interface timestamp resolution can be checked with
- * odp_pktio_ts_res().
+ * Returns timestamp value recorded into the packet. Use odp_packet_has_ts() to check if the packet
+ * has a valid timestamp. Packet input uses packet IO interface specific time source and thus
+ * timestamp (or nanosecond) values from one interface cannot be mixed with values from another
+ * interface (or time source in general). Packet IO interface timestamp resolution can be checked
+ * with odp_pktio_ts_res() and current time with odp_pktio_time().
+ *
+ * Time API operations (e.g. odp_time_diff()) can be used with packet timestamp values or
+ * when converting those into nanoseconds (odp_time_to_ns()).
*
* @param pkt Packet handle
*
* @return Timestamp value
- *
- * @see odp_pktio_ts_res(), odp_packet_has_ts(), odp_time_to_ns()
*/
odp_time_t odp_packet_ts(odp_packet_t pkt);
@@ -2086,21 +2087,21 @@ int odp_packet_has_tx_compl_request(odp_packet_t pkt);
/**
* Set packet free control option
*
- * This option enables application to control which packets are freed/not freed back into pool
- * after ODP implementation has finished processing those. The option affects only packets that
- * are transmitted directly through a packet output interface (also with LSO), i.e. packets
- * transmitted through inline IPsec or TM are not affected.
- *
- * When the option is set to #ODP_PACKET_FREE_CTRL_DONT_FREE, packet output interface will not free
- * the packet after transmit and application may reuse the packet as soon as its transmit is
- * complete (see e.g. odp_packet_tx_compl_done()).
- *
- * The option must not be enabled on packets that have multiple references.
- *
- * Check packet IO interface capability free_ctrl.dont_free (odp_pktio_capability_t::dont_free) for
- * the option support. When an interface does not support the option, it ignores the value.
- *
- * The default value is #ODP_PACKET_FREE_CTRL_DISABLED.
+ * This option enables application to control if a packet is freed/not freed back into a pool
+ * after an ODP offload feature has finished processing it. The option does not have an effect on
+ * odp_packet_free() or other direct free calls. It affects only packets that are sent directly
+ * through a packet output interface queue (odp_pktout_queue_t or odp_queue_t), also when packets
+ * are LSO offloaded. Packets transmitted through inline IPsec or TM are not affected.
+ *
+ * Packet output interface frees transmitted packets by default. When the option is set to
+ * #ODP_PACKET_FREE_CTRL_DONT_FREE, packet output will not free the packet after transmit and
+ * application may reuse or free the packet as soon as its transmission is complete
+ * (see e.g. odp_packet_tx_compl_done()). Check packet IO interface capability free_ctrl.dont_free
+ * (odp_pktio_capability_t.free_ctrl) for the option support. When an interface does not support
+ * the option, it ignores the value.
+ *
+ * The option must not be enabled on packets that have multiple references. The default value is
+ * #ODP_PACKET_FREE_CTRL_DISABLED.
*
* @param pkt Packet handle
* @param ctrl Packet free control option value
diff --git a/include/odp/api/spec/pool.h b/include/odp/api/spec/pool.h
index 4fade6470..b02c0d294 100644
--- a/include/odp/api/spec/pool.h
+++ b/include/odp/api/spec/pool.h
@@ -154,17 +154,21 @@ unsigned int odp_pool_max_index(void);
int odp_pool_index(odp_pool_t pool);
/**
- * Get statistics for pool
+ * Read pool statistics
*
- * Read the statistics counters enabled using odp_pool_stats_opt_t during pool creation. The
- * inactive counters are set to zero by the implementation. Depending on the implementation, there
- * may be some delay until performed pool operations are visible in the statistics.
+ * Read statistics counters that were enabled in pool creation parameters (odp_pool_param_t.stats).
+ * The function writes all disabled counters to zero, except per thread counters
+ * (thread.cache_available[]) which have undefined values.
*
- * A single call may read statistics from one to ODP_POOL_MAX_THREAD_STATS
- * threads. Set 'stats.thread.first' and 'stats.thread.last' to select the
- * threads ('first' <= 'last'). Valid values range from 0 to odp_thread_count_max() - 1.
- * A successful call fills the output array starting always from the first element
- * 'stats.thread.cache_available[0]' (='stats.thread.first').
+ * When per thread counters are enabled, application sets 'stats.thread.first' and
+ * 'stats.thread.last' to select the threads ('first' <= 'last'). A single call may read statistics
+ * from one to #ODP_POOL_MAX_THREAD_STATS threads. Valid thread ID values range from 0 to
+ * odp_thread_count_max() - 1. A successful call fills the output array starting always from the
+ * first element 'stats.thread.cache_available[0]' (='stats.thread.first'). Unused array elements
+ * have undefined values.
+ *
+ * Depending on the implementation, there may be some delay until performed pool operations are
+ * visible in the statistics.
*
* @param pool Pool handle
* @param[in,out] stats Output buffer for counters
diff --git a/include/odp/api/spec/schedule_types.h b/include/odp/api/spec/schedule_types.h
index 5e2f5a2da..b15397b96 100644
--- a/include/odp/api/spec/schedule_types.h
+++ b/include/odp/api/spec/schedule_types.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2015-2018 Linaro Limited
+ * Copyright (c) 2023 Nokia
*/
/**
@@ -39,30 +40,6 @@ extern "C" {
*/
/**
- * @def ODP_SCHED_PRIO_HIGHEST
- * @deprecated This macro is equivalent of calling odp_schedule_max_prio(). Use
- * direct function call instead.
- */
-
-/**
- * @def ODP_SCHED_PRIO_NORMAL
- * @deprecated This macro is equivalent of calling odp_schedule_default_prio().
- * Use direct function call instead.
- */
-
-/**
- * @def ODP_SCHED_PRIO_LOWEST
- * @deprecated This macro is equivalent of calling odp_schedule_min_prio(). Use
- * direct function call instead.
- */
-
-/**
- * @def ODP_SCHED_PRIO_DEFAULT
- * @deprecated This macro is equivalent of calling odp_schedule_default_prio().
- * Use direct function call instead.
- */
-
-/**
* @typedef odp_schedule_sync_t
* Scheduler synchronization method
*/
@@ -109,31 +86,53 @@ extern "C" {
* enables the user to achieve high single flow throughput by avoiding
* SW synchronization for ordering between threads.
*
- * The source queue (dequeue) ordering is maintained when
- * events are enqueued to their destination queue(s) within the same ordered
- * queue synchronization context. A thread holds the context until it
- * requests another event from the scheduler, which implicitly releases the
- * context. User may allow the scheduler to release the context earlier than
- * that by calling odp_schedule_release_ordered(). However, this call is just
- * a hint to the implementation and the context may be held until the next
- * schedule call.
+ * When odp_schedule() returns an event, the calling thread is associated
+ * with an ordered scheduling synchronization context. The contexts arising
+ * from the same ordered queue have the same mutual ordering as the
+ * corresponding events had in the queue.
+ *
+ * When odp_schedule_multi() returns more than one event from an ordered
+ * queue, the events returned were consecutive in the queue and the calling
+ * thread is associated with single ordered scheduling synchronization
+ * context that is ordered with respect to other contexts as if just the
+ * first event was returned.
+ *
+ * When threads holding ordered scheduling synchronization contexts, which
+ * arise from the same ordered queue, enqueue events to destination queues,
+ * the order of events in each destination queue will be as follows:
*
- * Events from the same (source) queue appear in their original order
- * when dequeued from a destination queue. The destination queue can have any
- * queue type and synchronization method. Event ordering is based on the
- * received event(s), but also other (newly allocated or stored) events are
- * ordered when enqueued within the same ordered context. Events not enqueued
- * (e.g. freed or stored) within the context are considered missing from
- * reordering and are skipped at this time (but can be ordered again within
- * another context).
+ * - Events enqueued by one thread have the order in which the enqueue
+ * calls were made.
+ *
+ * - Two events enqueued by different threads have the same mutual order
+ * as the scheduling synchronization contexts of the enqueuing threads.
+ *
+ * The ordering rules above apply to all events, not just those that were
+ * scheduled from the ordered queue. For instance, newly allocated events
+ * and previously stored events are ordered in the destination queue based
+ * on the scheduling synchronization context. The ordering rules apply
+ * regarless of the type (scheduled or plain) or schedule type (atomic,
+ * ordered, or parallel) of the destination queue. If the order type of
+ * the destination queue is ODP_QUEUE_ORDER_IGNORE, then the order between
+ * events enqueued by different threads is not guaranteed.
+ *
+ * An ordered scheduling synchronization context is implicitly released when
+ * the thread holding the context requests a new event from the scheduler.
+ * User may allow the scheduler to release the context earlier than that by
+ * calling odp_schedule_release_ordered(). However, this call is just a hint
+ * to the implementation and the context may be held until the next schedule
+ * call.
+ *
+ * Enqueue calls by different threads may return in a different order than
+ * the final order of the enqueued events in the destination queue.
*
* Unnecessary event re-ordering may be avoided for those destination queues
- * that do not need to maintain the original event order by setting 'order'
+ * that do not need to maintain the specified event order by setting 'order'
* queue parameter to ODP_QUEUE_ORDER_IGNORE.
*
* When scheduler is enabled as flow-aware, the event flow id value affects
- * scheduling of the event and synchronization is maintained per flow within
- * each queue.
+ * scheduling of the event and synchronization is maintained and order is
+ * defined per flow within each queue.
*/
/**
diff --git a/include/odp/api/spec/thread.h b/include/odp/api/spec/thread.h
index d595a9563..dbb033da1 100644
--- a/include/odp/api/spec/thread.h
+++ b/include/odp/api/spec/thread.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright (c) 2013-2018 Linaro Limited
- * Copyright (c) 2022 Nokia
+ * Copyright (c) 2022-2023 Nokia
*/
/**
@@ -17,19 +17,14 @@
extern "C" {
#endif
+#include <odp/api/thread_types.h>
+
/** @defgroup odp_thread ODP THREAD
* Thread types, masks and IDs.
* @{
*/
/**
- * @def ODP_THREAD_COUNT_MAX
- * Maximum number of threads supported in build time. Use
- * odp_thread_count_max() for maximum number of threads supported in run time,
- * which depend on system configuration and may be lower than this number.
- */
-
-/**
* Get thread identifier
*
* Returns the ODP thread identifier of current thread. Thread IDs range from 0
@@ -48,25 +43,70 @@ int odp_thread_id(void);
* Thread count
*
* Returns the current ODP thread count. This is the number of active threads
- * running the ODP instance. Each odp_init_local() call increments and each
- * odp_term_local() call decrements the count. The count is always between 1 and
- * odp_thread_count_max().
+ * of any type running in the ODP instance. Each odp_init_local() call
+ * increments and each odp_term_local() call decrements the count. The count is
+ * always between 1 and odp_thread_count_max().
*
* @return Current thread count
*/
int odp_thread_count(void);
/**
+ * Control thread count
+ *
+ * Otherwise like odp_thread_count(), but returns the number of active threads
+ * of type #ODP_THREAD_CONTROL. The count is always between 0 and
+ * odp_thread_control_count_max().
+ *
+ * @return Current control thread count
+ */
+int odp_thread_control_count(void);
+
+/**
+ * Worker thread count
+ *
+ * Otherwise like odp_thread_count(), but returns the number of active threads
+ * of type #ODP_THREAD_WORKER. The count is always between 0 and
+ * odp_thread_worker_count_max().
+ *
+ * @return Current worker thread count
+ */
+int odp_thread_worker_count(void);
+
+/**
* Maximum thread count
*
- * Returns the maximum thread count, which is a constant value and set in
- * ODP initialization phase. This may be lower than ODP_THREAD_COUNT_MAX.
+ * Returns the maximum number of threads of any type. This is a constant value
+ * and set in ODP initialization phase. The value may be lower than
+ * #ODP_THREAD_COUNT_MAX.
*
* @return Maximum thread count
*/
int odp_thread_count_max(void);
/**
+ * Maximum control thread count
+ *
+ * Otherwise like odp_thread_count_max(), but returns the maximum number of
+ * control threads (#ODP_THREAD_CONTROL). The returned value is always <=
+ * odp_thread_count_max().
+ *
+ * @return Maximum control thread count
+ */
+int odp_thread_control_count_max(void);
+
+/**
+ * Maximum worker thread count
+ *
+ * Otherwise like odp_thread_count_max(), but returns the maximum number of
+ * worker threads (#ODP_THREAD_WORKER). The returned value is always <=
+ * odp_thread_count_max().
+ *
+ * @return Maximum worker thread count
+ */
+int odp_thread_worker_count_max(void);
+
+/**
* Thread type
*
* Returns the thread type of the current thread.
diff --git a/include/odp/api/spec/thread_types.h b/include/odp/api/spec/thread_types.h
index 60cf4897b..204d28cad 100644
--- a/include/odp/api/spec/thread_types.h
+++ b/include/odp/api/spec/thread_types.h
@@ -20,6 +20,13 @@ extern "C" {
*/
/**
+ * @def ODP_THREAD_COUNT_MAX
+ * Maximum number of threads supported in build time. Use odp_thread_count_max()
+ * for maximum number of threads supported in run time, which depends on system
+ * configuration and may be lower than this number.
+ */
+
+/**
* Thread type
*/
typedef enum odp_thread_type_e {
diff --git a/include/odp/api/spec/time.h b/include/odp/api/spec/time.h
index 54ab5f19a..c3571f7fa 100644
--- a/include/odp/api/spec/time.h
+++ b/include/odp/api/spec/time.h
@@ -21,20 +21,24 @@ extern "C" {
#include <odp/api/time_types.h>
/** @defgroup odp_time ODP TIME
- * Chip and CPU level wall clock time.
+ * SoC global and CPU local wall clock time
+ *
* @{
*/
/**
* Current local time
*
- * Returns current local time stamp value. The local time source provides high
- * resolution time, it is initialized to zero during ODP startup and will not
- * wrap around in at least 10 years.
- * Local time stamps are local to the calling thread and must not be shared
- * with other threads.
+ * Returns current CPU local time stamp value. The used time source is specific to the calling
+ * thread and the CPU it is running on during the call. Time stamp values from different
+ * time sources cannot be compared or otherwise mixed.
+ *
+ * Local time stamp value advances with a constant rate defined by odp_time_local_res(). The rate
+ * remains constant even during dynamic CPU frequency scaling. Local time stamp and related
+ * nanosecond values may not start from zero, but are guaranteed not to wrap around in at least
+ * 10 years from the ODP instance startup.
*
- * @return Local time stamp.
+ * @return CPU local time stamp value
*/
odp_time_t odp_time_local(void);
@@ -70,12 +74,15 @@ uint64_t odp_time_local_strict_ns(void);
/**
* Current global time
*
- * Returns current global time stamp value. The global time source provides high
- * resolution time, it is initialized to zero during ODP startup and will not
- * wrap around in at least 10 years.
- * Global time stamps can be shared between threads.
+ * Returns current SoC global time stamp value. Global time stamp values read by different threads
+ * (or CPUs) may be compared or otherwise mixed as those come from the same time source.
*
- * @return Global time stamp.
+ * Global time stamp value advances with a constant rate defined by odp_time_global_res(). The rate
+ * remains constant even during dynamic CPU frequency scaling. Global time stamp and related
+ * nanosecond values may not start from zero, but are guaranteed not to wrap around in at least
+ * 10 years from the ODP instance startup.
+ *
+ * @return SoC global time stamp value
*/
odp_time_t odp_time_global(void);
@@ -128,8 +135,25 @@ odp_time_t odp_time_diff(odp_time_t t2, odp_time_t t1);
uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1);
/**
+ * Add nanoseconds into time
+ *
+ * Adds 'ns' nanoseconds into the time stamp value. The resulting time may wrap around, if
+ * the sum of 'time' and 'ns' is more than 10 years from the ODP instance startup.
+ *
+ * @param time Time stamp
+ * @param ns Nanoseconds to be added
+ *
+ * @return Time stamp incremented by 'ns' nanoseconds
+ */
+odp_time_t odp_time_add_ns(odp_time_t time, uint64_t ns);
+
+/**
* Time sum
*
+ * Returns the sum of time stamp values. Time stamps must be from the same time source (global or
+ * local). The resulting time may wrap around, if the sum exceeds 10 years from the ODP instance
+ * startup.
+ *
* @param t1 Time stamp
* @param t2 Time stamp
*
@@ -209,6 +233,16 @@ void odp_time_wait_until(odp_time_t time);
void odp_time_wait_ns(uint64_t ns);
/**
+ * Get ODP instance startup time
+ *
+ * Outputs time stamp values captured at ODP instance startup. Application may use those
+ * to calculate time stamp values relative to ODP startup time.
+ *
+ * @param[out] startup Startup time structure for output
+ */
+void odp_time_startup(odp_time_startup_t *startup);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/time_types.h b/include/odp/api/spec/time_types.h
index 5f5685745..cf0393ef0 100644
--- a/include/odp/api/spec/time_types.h
+++ b/include/odp/api/spec/time_types.h
@@ -50,6 +50,18 @@ extern "C" {
*/
/**
+ * Time stamp values at ODP startup
+ */
+typedef struct odp_time_startup_t {
+ /** Global time at ODP startup */
+ odp_time_t global;
+
+ /** Global time in nanoseconds at ODP startup */
+ uint64_t global_ns;
+
+} odp_time_startup_t;
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index c42c5cf5e..1e7a06ad4 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -17,7 +17,6 @@
extern "C" {
#endif
-#include <odp/api/deprecated.h>
#include <odp/api/timer_types.h>
#include <odp/api/event_types.h>
#include <odp/api/pool_types.h>
@@ -114,6 +113,10 @@ void odp_timer_pool_param_init(odp_timer_pool_param_t *param);
* The use of pool name is optional. Unique names are not required. Use odp_timer_pool_param_init()
* to initialize timer pool parameters into their default values.
*
+ * After creation a timer pool can be either started (see odp_timer_pool_start_multi()) or
+ * destroyed. The returned pool handle cannot be used with any other APIs, except
+ * odp_timer_pool_to_u64(), before the pool is successfully started.
+ *
* Periodic timer expiration frequency is a multiple of the timer pool base frequency
* (odp_timer_pool_param_t::base_freq_hz). Depending on implementation, the base frequency may need
* to be selected carefully with respect to the timer pool source clock frequency. Use
@@ -137,10 +140,31 @@ odp_timer_pool_t odp_timer_pool_create(const char *name, const odp_timer_pool_pa
* The purpose of this call is to coordinate the creation of multiple timer
* pools that may use the same underlying HW resources.
* This function may be called multiple times.
+ *
+ * @deprecated Use odp_timer_pool_start_multi() instead
*/
void odp_timer_pool_start(void);
/**
+ * Start timer pools
+ *
+ * Start given timer pools. After a pool has been successfully started the pool handle can be used
+ * with other APIs. Each timer pool can be started only once.
+ *
+ * Returns 'num' when all given timer pools have been successfully started. If the return value
+ * N < 'num', only the first N pools started successfully and at least some of the remaining ones
+ * failed to start. In case of a negative return value, none of the pools were started. The
+ * unstarted timer pools cannot be used anymore (can only be destroyed).
+ *
+ * @param timer_pool Array of timer pool handles
+ * @param num Number of pools to start
+ *
+ * @retval num on success
+ * @retval <num on failure
+ */
+int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num);
+
+/**
* Destroy a timer pool
*
* Destroy a timer pool, freeing all resources.
@@ -174,10 +198,14 @@ uint64_t odp_timer_ns_to_tick(odp_timer_pool_t timer_pool, uint64_t ns);
* Current tick value
*
* Returns the current tick value of the timer pool. Timer tick is an implementation defined unit
- * of time. Timer tick value increments with a constant, timer pool specific frequency. Tick
- * frequency may be equal or higher than the requested timer pool resolution. The frequency can be
- * checked with odp_timer_pool_info(). Tick value increments with implementation specific step
- * sizes. The value will not wrap around in at least 10 years from the ODP instance startup.
+ * of time. Ticks and related nanosecond values are timer pool specific. Those may not start from
+ * zero, but are guaranteed not to wrap around in at least 10 years from the ODP instance startup.
+ *
+ * Timer tick value increments with a constant, timer pool specific frequency. Tick frequency may
+ * be equal or higher than the requested timer pool resolution. The frequency can be checked with
+ * odp_timer_pool_info(). Tick value increments with implementation specific step sizes.
+ *
+ * Use odp_timer_sample_ticks() to determine offset between tick values of two or more timer pools.
*
* @param timer_pool Timer pool
*
@@ -247,19 +275,21 @@ odp_timer_t odp_timer_alloc(odp_timer_pool_t timer_pool, odp_queue_t queue, cons
/**
* Free a timer
*
- * Free (destroy) a timer, reclaiming associated resources.
- * The timeout event for an active timer will be returned.
- * The timeout event for an expired timer will not be returned. It is the
- * responsibility of the application to handle this timeout when it is received.
+ * Frees a previously allocated timer. The timer must be inactive when calling this function.
+ * In other words, the application must cancel an active single shot timer (odp_timer_cancel())
+ * successfully or wait it to expire before freeing it. Similarly for an active periodic timer, the
+ * application must cancel it (odp_timer_periodic_cancel()) and receive the last event from
+ * the timer (odp_timer_periodic_ack()) before freeing it.
*
- * A periodic timer must be cancelled successfully before freeing it.
+ * The call returns failure only on non-recoverable errors. Application must not use the timer
+ * handle anymore after the call, regardless of the return value.
*
- * @param timer Timer
+ * @param timer Timer
*
- * @return Event handle of timeout event
- * @retval ODP_EVENT_INVALID on failure
+ * @retval 0 on success
+ * @retval <0 on failure
*/
-odp_event_t odp_timer_free(odp_timer_t timer);
+int odp_timer_free(odp_timer_t timer);
/**
* Start a timer
@@ -389,71 +419,6 @@ int odp_timer_periodic_ack(odp_timer_t timer, odp_event_t tmo_ev);
int odp_timer_periodic_cancel(odp_timer_t timer);
/**
- * Set (or reset) a timer with absolute expiration time
- *
- * This function sets a timer to expire at a specific time. If the timer is
- * already running (set and not yet expired), the function updates (resets) it
- * with a new expiration time and optionally with a new event. A successful
- * reset operation with a new event outputs the old event. A failed reset
- * operation does not modify the timer.
- *
- * The user provided event can be of any event type, but only ODP_EVENT_TIMEOUT
- * type events (odp_timeout_t) carry timeout specific metadata. Furthermore,
- * timer performance may have been optimized for that event type. When the timer
- * expires, the event is enqueued to the destination queue of the timer.
- *
- * @param timer Timer
- * @param abs_tick Absolute expiration time in timer ticks
- * @param[in,out] tmo_ev Pointer to an event handle. The event is enqueued
- * when the timer expires. Use NULL when resetting the
- * timer without changing the event. When resetting the
- * timer with a new event, a successful operation
- * outputs the old event here.
- *
- * @retval ODP_TIMER_SUCCESS Success
- * @retval ODP_TIMER_TOO_NEAR Failure. Expiration time is too near to
- * the current time.
- * @retval ODP_TIMER_TOO_FAR Failure. Expiration time is too far from
- * the current time.
- * @retval ODP_TIMER_FAIL Failure. Set operation: No event provided.
- * Reset operation: Too late to reset the timer.
- *
- * @see odp_timer_set_rel(), odp_timer_alloc(), odp_timer_cancel()
- *
- * @deprecated Use odp_timer_start() or odp_timer_restart() instead
- */
-int ODP_DEPRECATE(odp_timer_set_abs)(odp_timer_t timer, uint64_t abs_tick, odp_event_t *tmo_ev);
-
-/**
- * Set (or reset) a timer with relative expiration time
- *
- * Like odp_timer_set_abs(), but the expiration time is relative to the current
- * time: expiration tick = odp_timer_current_tick() + 'rel_tick'.
- *
- * @param timer Timer
- * @param rel_tick Expiration time relative to current time of
- * the timer pool in timer ticks
- * @param[in,out] tmo_ev Pointer to an event handle. The event is enqueued
- * when the timer expires. Use NULL when resetting the
- * timer without changing the event. When resetting the
- * timer with a new event, a successful operation
- * outputs the old event here.
- *
- * @retval ODP_TIMER_SUCCESS Success
- * @retval ODP_TIMER_TOO_NEAR Failure. Expiration time is too near to
- * the current time.
- * @retval ODP_TIMER_TOO_FAR Failure. Expiration time is too far from
- * the current time.
- * @retval ODP_TIMER_FAIL Failure. Set operation: No event provided.
- * Reset operation: Too late to reset the timer.
- *
- * @see odp_timer_set_abs(), odp_timer_alloc(), odp_timer_cancel()
- *
- * @deprecated Use odp_timer_start() or odp_timer_restart() instead
- */
-int ODP_DEPRECATE(odp_timer_set_rel)(odp_timer_t timer, uint64_t rel_tick, odp_event_t *tmo_ev);
-
-/**
* Cancel a timer
*
* Cancels a previously started single shot timer. A successful operation (#ODP_TIMER_SUCCESS)
@@ -513,8 +478,10 @@ odp_event_t odp_timeout_to_event(odp_timeout_t tmo);
* @param tmo Timeout handle
* @retval 1 Timeout is fresh
* @retval 0 Timeout is stale
+ *
+ * @deprecated The function will be removed in a future API version.
*/
-int odp_timeout_fresh(odp_timeout_t tmo);
+int ODP_DEPRECATE(odp_timeout_fresh)(odp_timeout_t tmo);
/**
* Return timer handle for the timeout
diff --git a/include/odp/api/spec/timer_types.h b/include/odp/api/spec/timer_types.h
index 0fd5d4f70..7db57c340 100644
--- a/include/odp/api/spec/timer_types.h
+++ b/include/odp/api/spec/timer_types.h
@@ -251,22 +251,6 @@ typedef enum {
/** The default clock source */
#define ODP_CLOCK_DEFAULT ODP_CLOCK_SRC_0
-#if ODP_DEPRECATED_API
-/**
- * For backwards compatibility, ODP_CLOCK_CPU is synonym of ODP_CLOCK_DEFAULT.
- *
- * @deprecated Use #ODP_CLOCK_DEFAULT instead.
- */
-#define ODP_CLOCK_CPU ODP_CLOCK_DEFAULT
-
-/**
- * For backwards compatibility, ODP_CLOCK_EXT is synonym of ODP_CLOCK_SRC_1.
- *
- * @deprecated Use #ODP_CLOCK_SRC_1 instead.
- */
-#define ODP_CLOCK_EXT ODP_CLOCK_SRC_1
-#endif
-
/**
* Timer expiration mode
*
@@ -553,30 +537,7 @@ typedef enum {
*
* @deprecated Use odp_timer_retval_t instead.
*/
-typedef odp_timer_retval_t odp_timer_set_t;
-
-#if ODP_DEPRECATED_API
-/**
- * For backwards compatibility, ODP_TIMER_TOOEARLY is synonym of ODP_TIMER_TOO_NEAR.
- *
- * @deprecated Use #ODP_TIMER_TOO_NEAR instead.
- */
-#define ODP_TIMER_TOOEARLY ODP_TIMER_TOO_NEAR
-
-/**
- * For backwards compatibility, ODP_TIMER_TOOLATE is synonym of ODP_TIMER_TOO_FAR.
- *
- * @deprecated Use #ODP_TIMER_TOO_FAR instead.
- */
-#define ODP_TIMER_TOOLATE ODP_TIMER_TOO_FAR
-
-/**
- * For backwards compatibility, ODP_TIMER_NOEVENT is synonym of ODP_TIMER_FAIL.
- *
- * @deprecated Use #ODP_TIMER_FAIL instead.
- */
-#define ODP_TIMER_NOEVENT ODP_TIMER_FAIL
-#endif
+typedef odp_timer_retval_t ODP_DEPRECATE(odp_timer_set_t);
/**
* Timer tick information
diff --git a/include/odp/api/thread.h b/include/odp/api/thread.h
index 2e0288886..24199a166 100644
--- a/include/odp/api/thread.h
+++ b/include/odp/api/thread.h
@@ -15,8 +15,6 @@
extern "C" {
#endif
-#include <odp/api/spec/thread_types.h>
-
#include <odp/api/abi/thread.h>
#include <odp/api/spec/thread.h>
diff --git a/include/odp/api/thread_types.h b/include/odp/api/thread_types.h
new file mode 100644
index 000000000..54ea5b714
--- /dev/null
+++ b/include/odp/api/thread_types.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+/**
+ * @file
+ *
+ * ODP thread
+ */
+
+#ifndef ODP_API_THREAD_TYPES_H_
+#define ODP_API_THREAD_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/abi/thread_types.h>
+
+#include <odp/api/spec/thread_types.h>
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/arch/arm32-linux/odp/api/abi/thread_types.h b/include/odp/arch/arm32-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/arm32-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/arch/arm64-linux/odp/api/abi/thread_types.h b/include/odp/arch/arm64-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/arm64-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/arch/default-linux/odp/api/abi/thread_types.h b/include/odp/arch/default-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/default-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/arch/power64-linux/odp/api/abi/thread_types.h b/include/odp/arch/power64-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/power64-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/arch/x86_32-linux/odp/api/abi/thread_types.h b/include/odp/arch/x86_32-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/x86_32-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/arch/x86_64-linux/odp/api/abi/thread_types.h b/include/odp/arch/x86_64-linux/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/include/odp/arch/x86_64-linux/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/include/odp/autoheader_external.h.in b/include/odp/autoheader_external.h.in
index 74662b2e0..ebdb9fbc6 100644
--- a/include/odp/autoheader_external.h.in
+++ b/include/odp/autoheader_external.h.in
@@ -14,4 +14,7 @@
/* Define to 1 or 2 to enable event validation */
#undef _ODP_EVENT_VALIDATION
+/* Define to 1 to enable WFE based lock implementation on aarch64 */
+#undef _ODP_WFE_LOCKS
+
#endif
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 3eb6225af..886fe8f48 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -113,6 +113,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/std_types.h \
include-abi/odp/api/abi/sync.h \
include-abi/odp/api/abi/thread.h \
+ include-abi/odp/api/abi/thread_types.h \
include-abi/odp/api/abi/thrmask.h \
include-abi/odp/api/abi/ticketlock.h \
include-abi/odp/api/abi/time.h \
@@ -296,7 +297,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_generic.h \
arch/arm/odp/api/abi/cpu_inlines.h \
arch/arm/odp/api/abi/cpu.h \
- arch/default/odp/api/abi/sync_inlines.h
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/arm/odp_atomic.h \
arch/arm/odp_cpu.h \
@@ -324,7 +327,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_generic.h \
arch/aarch64/odp/api/abi/cpu_inlines.h \
arch/aarch64/odp/api/abi/cpu.h \
- arch/aarch64/odp/api/abi/sync_inlines.h
+ arch/aarch64/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/aarch64/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/aarch64/odp_atomic.h \
arch/aarch64/odp_cpu.h \
@@ -346,7 +351,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_generic.h \
arch/default/odp/api/abi/cpu_inlines.h \
arch/default/odp/api/abi/cpu.h \
- arch/default/odp/api/abi/sync_inlines.h
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
@@ -366,7 +373,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_generic.h \
arch/default/odp/api/abi/cpu_inlines.h \
arch/powerpc/odp/api/abi/cpu.h \
- arch/default/odp/api/abi/sync_inlines.h
+ arch/default/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
@@ -388,7 +397,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/atomic_inlines.h \
arch/x86/odp/api/abi/cpu_inlines.h \
arch/x86/odp/api/abi/cpu.h \
- arch/x86/odp/api/abi/sync_inlines.h
+ arch/x86/odp/api/abi/sync_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/x86/cpu_flags.h \
arch/x86/odp_cpu.h \
diff --git a/platform/linux-dpdk/README b/platform/linux-dpdk/README
index 55e723573..421d3f958 100644
--- a/platform/linux-dpdk/README
+++ b/platform/linux-dpdk/README
@@ -1,5 +1,5 @@
Copyright (c) 2018-2019, Linaro Limited
-Copyright (c) 2019-2023, Nokia
+Copyright (c) 2019-2024, Nokia
All rights reserved.
SPDX-License-Identifier: BSD-3-Clause
@@ -19,8 +19,8 @@ Prerequisites and considerations:
- it's also possible to use odp-dpdk for evaluation purposes without a DPDK
compatible NIC, using the pcap poll mode driver
- DPDK code must be downloaded, configured and compiled, details below
-- ODP-DPDK has been compiled and tested on an x86 host with Ubuntu 18.04 LTS
- (4.15.0 kernel).
+- ODP-DPDK has been compiled and tested on an x86 host with Ubuntu 22.04 LTS
+ (5.15.0 kernel).
- DPDK only works on a selected range of network cards. The list of known and
supported devices can be found in the DPDK documentation:
https://doc.dpdk.org/guides/nics/index.html
@@ -36,44 +36,12 @@ cmds below for Ubuntu, where it has been compiled and tested.
On Ubuntu install pcap development library:
sudo apt-get install libpcap-dev
-Right now ODP-DPDK supports DPDK v19.11, v20.11, v21.11, and v22.11 (recommended version).
+Right now ODP-DPDK supports DPDK v21.11 and v22.11 (recommended version).
-Compile DPDK 19.11
-------------------
-Fetch the DPDK code:
- git clone https://dpdk.org/git/dpdk-stable --branch 19.11 --depth 1 ./<dpdk-dir>
-
-This has to be done only once:
- cd <dpdk-dir>
- make config T=x86_64-native-linuxapp-gcc O=x86_64-native-linuxapp-gcc
-
-Enable pcap pmd to use ODP-DPDK without DPDK supported NIC's:
- cd <dpdk-dir>/x86_64-native-linuxapp-gcc
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_PCAP=).*,\1y,' .config
-
-Enable openssl crypto pmd to use openssl with odp-dpdk:
- sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_OPENSSL=).*,\1y,' .config
-
-Optionally, add more queues to eventdev:
- sed -ri 's,(CONFIG_RTE_EVENT_MAX_QUEUES_PER_DEV=).*,\1255,' .config
-
-Now return to parent directory and build DPDK:
- cd ..
-
-The last step depends on whether ODP shared libraries will be built with this
-deployment of DPDK:
-SHARED libraries:
- make install T=x86_64-native-linuxapp-gcc DESTDIR=./install EXTRA_CFLAGS="-fPIC"
-
-STATIC libraries:
- make install T=x86_64-native-linuxapp-gcc DESTDIR=./install
-
-This only ensures building DPDK, but traffic is not tested with this build yet.
-
-Compile DPDK from v20.11 onwards
---------------------------------
+Compile DPDK
+------------
Fetch the DPDK code:
- git clone https://dpdk.org/git/dpdk-stable --branch 20.11 --depth 1 ./<dpdk-dir>
+ git clone https://dpdk.org/git/dpdk-stable --branch 22.11 --depth 1 ./<dpdk-dir>
Prepare the build directory:
cd <dpdk-dir>
@@ -98,14 +66,11 @@ path with the --with-dpdk-path option.
./bootstrap
The following step depends on whether ODP shared libraries are to be built.
-SHARED libraries (requires building DPDK with -fPIC, see above):
- ./configure --with-dpdk-path=<dpdk-dir>/install
+SHARED libraries:
+ ./configure --enable-dpdk-shared
STATIC libraries (better performance):
- ./configure --with-dpdk-path=<dpdk-dir>/install --disable-shared
-
-Starting from DPDK 20.11, --with-dpdk-path option is not used anymore:
- ./configure
+ ./configure --disable-shared
Or, if DPDK was not installed to the default location, set PKG_CONFIG_PATH:
PKG_CONFIG_PATH=<dpdk-dir>/install/lib/x86_64-linux-gnu/pkgconfig ./configure
@@ -223,7 +188,7 @@ rte_eal_[mp_]remote_launch(), but not through ODP API's. Nevertheless,
odp_local_init() makes sure for the rest of the DPDK libraries ODP threads look
like proper DPDK threads.
-Exaple how to run an ODP-DPDK L2 forwarding application:
+Example how to run an ODP-DPDK L2 forwarding application:
sudo ./odp_l2fwd -i 0,1 -c 2
@@ -298,27 +263,11 @@ Get the Intel Multi-Buffer Crypto library from
https://github.com/intel/intel-ipsec-mb and follow the README from the repo on
how to build the library.
-Building DPDK 19.11
--------------------
-Follow the instructions from "Compile DPDK" section and make sure to enable the
-necessary crypto PMDs:
-E.g.
-# Compile PMD for AESNI backed device
-sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_AESNI_MB=).*,\1y,' .config
-
-# Compile PMD for AESNI GCM device
-sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM=).*,\1y,' .config
-
-# Compile PMD for SNOW 3G device
-sed -ri 's,(CONFIG_RTE_LIBRTE_PMD_SNOW3G=).*,\1y,' .config
-
-AESNI_MULTI_BUFFER_LIB_PATH=/path-to/Intel-multi-buffer-crypto/ \
- make install T=x86_64-native-linuxapp-gcc DESTDIR=./install EXTRA_CFLAGS="-fPIC"
-
-Building DPDK 20.11
--------------------
-If libIPSec_MB has been installed outside the normal search paths,
-configure the compiler and linker options with
+Building DPDK
+-------------
+Follow the instructions from "Compile DPDK" section. If libIPSec_MB has been
+installed outside the normal search paths, configure the compiler and linker
+options with:
meson configure -Dc_args=-I/path-to/Intel-multi-buffer-crypto/include \
-Dc_link_args=-L/path-to/Intel-multi-buffer-crypto/lib
@@ -341,7 +290,7 @@ yet tested in the ODP CI.
To use eventdev one must set ODP_SCHEDULER environment variable to "eventdev"
and provide the necessary platform parameters to DPDK.
-Refer to DPDK event device driver documentation for platform defails:
+Refer to DPDK event device driver documentation for platform details:
https://doc.dpdk.org/guides/eventdevs/index.html
In case of the standard software eventdev implementation one must enable a DPDK
diff --git a/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h b/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h
new file mode 120000
index 000000000..65a5f4381
--- /dev/null
+++ b/platform/linux-dpdk/arch/aarch64/odp/api/abi/wait_until.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/aarch64/odp/api/abi/wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h
new file mode 120000
index 000000000..ae06629ce
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/wait_until.h \ No newline at end of file
diff --git a/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h
new file mode 120000
index 000000000..c43ede9ad
--- /dev/null
+++ b/platform/linux-dpdk/arch/default/odp/api/abi/wait_until_generic.h
@@ -0,0 +1 @@
+../../../../../../linux-generic/arch/default/odp/api/abi/wait_until_generic.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h b/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h
new file mode 120000
index 000000000..b665090d0
--- /dev/null
+++ b/platform/linux-dpdk/include-abi/odp/api/abi/thread_types.h
@@ -0,0 +1 @@
+../../../../../linux-generic/include-abi/odp/api/abi/thread_types.h \ No newline at end of file
diff --git a/platform/linux-dpdk/include/odp/api/plat/time_inlines.h b/platform/linux-dpdk/include/odp/api/plat/time_inlines.h
index 8e4eec59c..f3d2a6947 100644
--- a/platform/linux-dpdk/include/odp/api/plat/time_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/time_inlines.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2020-2023, Nokia
+ * Copyright (c) 2020-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -21,8 +21,9 @@
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
typedef struct _odp_time_global_t {
- uint64_t start_cycles;
uint64_t freq_hz;
+ uint64_t start_cycles;
+ uint64_t start_ns;
} _odp_time_global_t;
@@ -32,7 +33,7 @@ static inline odp_time_t _odp_time_cur(void)
{
odp_time_t time;
- time.u64 = rte_get_timer_cycles() - _odp_time_glob.start_cycles;
+ time.u64 = rte_get_timer_cycles();
return time;
}
@@ -42,7 +43,7 @@ static inline odp_time_t _odp_time_cur_strict(void)
odp_time_t time;
rte_mb();
- time.u64 = rte_get_timer_cycles() - _odp_time_glob.start_cycles;
+ time.u64 = rte_get_timer_cycles();
return time;
}
@@ -104,10 +105,11 @@ static inline odp_time_t _odp_time_from_ns(uint64_t ns)
#define odp_time_cmp __odp_time_cmp
#define odp_time_diff __odp_time_diff
#define odp_time_diff_ns __odp_time_diff_ns
+ #define odp_time_add_ns __odp_time_add_ns
#define odp_time_sum __odp_time_sum
#define odp_time_wait_ns __odp_time_wait_ns
#define odp_time_wait_until __odp_time_wait_until
-
+ #define odp_time_startup __odp_time_startup
#else
#define _ODP_INLINE
#endif
@@ -206,6 +208,15 @@ _ODP_INLINE uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1)
return odp_time_to_ns(time);
}
+_ODP_INLINE odp_time_t odp_time_add_ns(odp_time_t time, uint64_t ns)
+{
+ odp_time_t t = _odp_time_from_ns(ns);
+
+ t.u64 += time.u64;
+
+ return t;
+}
+
_ODP_INLINE odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
{
odp_time_t time;
@@ -238,6 +249,12 @@ _ODP_INLINE void odp_time_wait_until(odp_time_t time)
_odp_time_wait_until(time);
}
+_ODP_INLINE void odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.u64 = _odp_time_glob.start_cycles;
+ startup->global_ns = _odp_time_glob.start_ns;
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
index 330cbe4ce..6c997e80d 100644
--- a/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inline_types.h
@@ -31,6 +31,14 @@ typedef struct _odp_timeout_inline_offset_t {
extern const _odp_timeout_inline_offset_t _odp_timeout_inline_offset;
+/* Timer global data */
+typedef struct _odp_timer_global_t {
+ uint64_t freq_hz;
+
+} _odp_timer_global_t;
+
+extern _odp_timer_global_t _odp_timer_glob;
+
/** @endcond */
#ifdef __cplusplus
diff --git a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
index 66327acdb..a85c7582d 100644
--- a/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
+++ b/platform/linux-dpdk/include/odp/api/plat/timer_inlines.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2022-2023, Nokia
+/* Copyright (c) 2022-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,11 +8,16 @@
#define ODP_PLAT_TIMER_INLINES_H_
#include <odp/api/event.h>
+#include <odp/api/hints.h>
+#include <odp/api/time_types.h>
#include <odp/api/timer_types.h>
#include <odp/api/plat/debug_inlines.h>
#include <odp/api/plat/timer_inline_types.h>
+#include <rte_config.h>
+#include <rte_cycles.h>
+
#include <stdint.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
@@ -24,9 +29,12 @@
#define odp_timeout_tick __odp_timeout_tick
#define odp_timeout_user_ptr __odp_timeout_user_ptr
#define odp_timeout_user_area __odp_timeout_user_area
+ #define odp_timer_current_tick __odp_timer_current_tick
#define odp_timeout_from_event __odp_timeout_from_event
#define odp_timeout_from_event_multi __odp_timeout_from_event_multi
#define odp_timeout_to_event __odp_timeout_to_event
+ #define odp_timer_tick_to_ns __odp_timer_tick_to_ns
+ #define odp_timer_ns_to_tick __odp_timer_ns_to_tick
#else
#define _ODP_INLINE
#endif
@@ -51,6 +59,11 @@ _ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
}
+_ODP_INLINE uint64_t odp_timer_current_tick(odp_timer_pool_t tp ODP_UNUSED)
+{
+ return rte_get_timer_cycles();
+}
+
_ODP_INLINE odp_timeout_t odp_timeout_from_event(odp_event_t ev)
{
_ODP_ASSERT(odp_event_type(ev) == ODP_EVENT_TIMEOUT);
@@ -72,6 +85,39 @@ _ODP_INLINE odp_event_t odp_timeout_to_event(odp_timeout_t tmo)
return (odp_event_t)tmo;
}
+_ODP_INLINE uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp ODP_UNUSED, uint64_t ticks)
+{
+ uint64_t nsec;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_timer_glob.freq_hz;
+
+ if (ticks >= freq_hz) {
+ sec = ticks / freq_hz;
+ ticks = ticks - sec * freq_hz;
+ }
+
+ nsec = (ODP_TIME_SEC_IN_NS * ticks) / freq_hz;
+
+ return (sec * ODP_TIME_SEC_IN_NS) + nsec;
+}
+
+_ODP_INLINE uint64_t odp_timer_ns_to_tick(odp_timer_pool_t tp ODP_UNUSED, uint64_t ns)
+{
+ uint64_t ticks;
+ uint64_t sec = 0;
+ const uint64_t freq_hz = _odp_timer_glob.freq_hz;
+
+ if (ns >= ODP_TIME_SEC_IN_NS) {
+ sec = ns / ODP_TIME_SEC_IN_NS;
+ ns = ns - sec * ODP_TIME_SEC_IN_NS;
+ }
+
+ ticks = sec * freq_hz;
+ ticks += (ns * freq_hz) / ODP_TIME_SEC_IN_NS;
+
+ return ticks;
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-dpdk/include/odp_config_internal.h b/platform/linux-dpdk/include/odp_config_internal.h
index ec1d2c617..6593b613b 100644
--- a/platform/linux-dpdk/include/odp_config_internal.h
+++ b/platform/linux-dpdk/include/odp_config_internal.h
@@ -26,7 +26,7 @@ extern "C" {
/*
* Maximum number of packet IO resources
*/
-#define ODP_CONFIG_PKTIO_ENTRIES 64
+#define CONFIG_PKTIO_ENTRIES 64
/*
* Maximum number of DMA sessions
@@ -37,12 +37,12 @@ extern "C" {
* Pools reserved for internal usage, 1 for IPsec status events, one per packet
* I/O for TX completion and one per DMA session
*/
-#define CONFIG_INTERNAL_POOLS (1 + ODP_CONFIG_PKTIO_ENTRIES + CONFIG_MAX_DMA_SESSIONS)
+#define CONFIG_INTERNAL_POOLS (1 + CONFIG_PKTIO_ENTRIES + CONFIG_MAX_DMA_SESSIONS)
/*
* Maximum number of pools
*/
-#define ODP_CONFIG_POOLS 256
+#define CONFIG_POOLS 256
/*
* Queues reserved for ODP internal use
@@ -89,7 +89,7 @@ extern "C" {
* This defines the minimum supported buffer alignment. Requests for values
* below this will be rounded up to this value.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MIN 16
+#define CONFIG_BUFFER_ALIGN_MIN 16
/*
* Maximum buffer alignment
@@ -97,7 +97,7 @@ extern "C" {
* This defines the maximum supported buffer alignment. Requests for values
* above this will fail.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
+#define CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
/*
* Default packet tailroom
@@ -134,7 +134,7 @@ extern "C" {
#define CONFIG_PACKET_MAX_SEG_LEN (CONFIG_PACKET_SEG_SIZE - \
RTE_PKTMBUF_HEADROOM - \
CONFIG_PACKET_TAILROOM - \
- ODP_CONFIG_BUFFER_ALIGN_MIN)
+ CONFIG_BUFFER_ALIGN_MIN)
/*
* Number of shared memory blocks reserved for implementation internal use.
@@ -143,7 +143,7 @@ extern "C" {
* module global data, and one block per packet I/O is reserved for TX
* completion usage.
*/
-#define CONFIG_INTERNAL_SHM_BLOCKS (ODP_CONFIG_POOLS + 20 + ODP_CONFIG_PKTIO_ENTRIES)
+#define CONFIG_INTERNAL_SHM_BLOCKS (CONFIG_POOLS + 20 + CONFIG_PKTIO_ENTRIES)
/*
* Maximum number of shared memory blocks.
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index ece33b538..bab750a22 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -168,7 +168,7 @@ typedef struct {
uint32_t tx_compl_pool_size;
} config;
- pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
+ pktio_entry_t entries[CONFIG_PKTIO_ENTRIES];
lso_profile_t lso_profile[PKTIO_LSO_PROFILES];
int num_lso_profiles;
@@ -241,9 +241,9 @@ static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
if (odp_unlikely(pktio == ODP_PKTIO_INVALID))
return NULL;
- if (odp_unlikely(_odp_typeval(pktio) > ODP_CONFIG_PKTIO_ENTRIES)) {
+ if (odp_unlikely(_odp_typeval(pktio) > CONFIG_PKTIO_ENTRIES)) {
_ODP_DBG("pktio limit %" PRIuPTR "/%d exceed\n",
- _odp_typeval(pktio), ODP_CONFIG_PKTIO_ENTRIES);
+ _odp_typeval(pktio), CONFIG_PKTIO_ENTRIES);
return NULL;
}
diff --git a/platform/linux-dpdk/include/odp_pool_internal.h b/platform/linux-dpdk/include/odp_pool_internal.h
index 8512806f7..b8fd17314 100644
--- a/platform/linux-dpdk/include/odp_pool_internal.h
+++ b/platform/linux-dpdk/include/odp_pool_internal.h
@@ -92,7 +92,7 @@ typedef struct ODP_ALIGNED_CACHE {
} pool_t;
typedef struct pool_global_t {
- pool_t pool[ODP_CONFIG_POOLS];
+ pool_t pool[CONFIG_POOLS];
odp_shm_t shm;
struct {
diff --git a/platform/linux-dpdk/m4/configure.m4 b/platform/linux-dpdk/m4/configure.m4
index a7c58eb91..0fcd4a5b3 100644
--- a/platform/linux-dpdk/m4/configure.m4
+++ b/platform/linux-dpdk/m4/configure.m4
@@ -10,6 +10,7 @@ m4_include([platform/linux-dpdk/m4/odp_libconfig.m4])
m4_include([platform/linux-dpdk/m4/odp_openssl.m4])
m4_include([platform/linux-dpdk/m4/odp_pcapng.m4])
m4_include([platform/linux-dpdk/m4/odp_scheduler.m4])
+m4_include([platform/linux-dpdk/m4/odp_wfe.m4])
ODP_EVENT_VALIDATION
ODP_PTHREAD
@@ -73,9 +74,10 @@ AS_VAR_APPEND([PLAT_CFG_TEXT], ["
event_validation: ${enable_event_validation}
openssl: ${with_openssl}
openssl_rand: ${openssl_rand}
- pcap: ${have_pmd_pcap}
- pcapng: ${have_pcapng}
- default_config_path: ${default_config_path}"])
+ pcap: ${have_pmd_pcap}
+ pcapng: ${have_pcapng}
+ wfe_locks: ${use_wfe_locks}
+ default_config_path: ${default_config_path}"])
ODP_CHECK_CFLAG([-Wno-error=cast-align])
diff --git a/platform/linux-dpdk/m4/odp_wfe.m4 b/platform/linux-dpdk/m4/odp_wfe.m4
new file mode 120000
index 000000000..2526a9c83
--- /dev/null
+++ b/platform/linux-dpdk/m4/odp_wfe.m4
@@ -0,0 +1 @@
+../../linux-generic/m4/odp_wfe.m4 \ No newline at end of file
diff --git a/platform/linux-dpdk/odp_packet_dpdk.c b/platform/linux-dpdk/odp_packet_dpdk.c
index 63e00d8e5..da76db06d 100644
--- a/platform/linux-dpdk/odp_packet_dpdk.c
+++ b/platform/linux-dpdk/odp_packet_dpdk.c
@@ -186,7 +186,7 @@ const pktio_if_ops_t * const _odp_pktio_if_ops[] = {
NULL
};
-extern void *pktio_entry_ptr[ODP_CONFIG_PKTIO_ENTRIES];
+extern void *pktio_entry_ptr[CONFIG_PKTIO_ENTRIES];
static uint32_t mtu_get_pkt_dpdk(pktio_entry_t *pktio_entry);
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 541557f48..f7726f97b 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -58,14 +58,14 @@
#define POOL_NAME_FORMAT "%" PRIu64 "-%d-%s"
/* Define a practical limit for contiguous memory allocations */
-#define MAX_SIZE (CONFIG_PACKET_SEG_SIZE - ODP_CONFIG_BUFFER_ALIGN_MIN)
+#define MAX_SIZE (CONFIG_PACKET_SEG_SIZE - CONFIG_BUFFER_ALIGN_MIN)
/* Maximum packet user area size */
#define MAX_UAREA_SIZE 2048
#define ROUNDUP_DIV(a, b) (((a) + ((b) - 1)) / (b))
-ODP_STATIC_ASSERT(CONFIG_INTERNAL_POOLS < ODP_CONFIG_POOLS,
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_POOLS < CONFIG_POOLS,
"Internal pool count needs to be less than total configured pool count");
/* The pool table ptr - resides in shared memory */
@@ -116,7 +116,7 @@ static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
{
int i;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
struct mem_cb_arg_t args;
@@ -182,7 +182,7 @@ int _odp_pool_init_global(void)
return -1;
}
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
LOCK_INIT(&pool->lock);
@@ -251,7 +251,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
{
odp_pool_stats_opt_t supported_stats;
/* Reserve pools for internal usage */
- unsigned int max_pools = ODP_CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
+ unsigned int max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
memset(capa, 0, sizeof(odp_pool_capability_t));
@@ -262,7 +262,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Buffer pools */
capa->buf.max_pools = max_pools;
- capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
+ capa->buf.max_align = CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
capa->buf.max_uarea_size = MAX_UAREA_SIZE;
@@ -272,7 +272,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
capa->buf.stats.all = supported_stats.all;
/* Packet pools */
- capa->pkt.max_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+ capa->pkt.max_align = CONFIG_BUFFER_ALIGN_MIN;
capa->pkt.max_pools = max_pools;
capa->pkt.max_len = CONFIG_PACKET_MAX_SEG_LEN;
capa->pkt.max_num = _odp_pool_glb->config.pkt_max_num;
@@ -586,7 +586,7 @@ static pool_t *get_unused_pool(void)
{
pool_t *pool;
- for (int i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (int i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -684,7 +684,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
cache_size = params->buf.cache_size;
priv_size = get_mbuf_priv_size(sizeof(odp_buffer_hdr_t));
align = params->buf.align > 0 ? params->buf.align : ODP_CACHE_LINE_SIZE;
- align = _ODP_MAX(align, (uint32_t)ODP_CONFIG_BUFFER_ALIGN_MIN);
+ align = _ODP_MAX(align, (uint32_t)CONFIG_BUFFER_ALIGN_MIN);
data_size = _ODP_ROUNDUP_ALIGN(params->buf.size + trailer, align);
uarea_size = params->buf.uarea_size;
priv_data.event_type = ODP_EVENT_BUFFER;
@@ -693,8 +693,8 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
num = params->pkt.num;
cache_size = params->pkt.cache_size;
priv_size = get_mbuf_priv_size(sizeof(odp_packet_hdr_t));
- align = params->pkt.align > 0 ? params->pkt.align : ODP_CONFIG_BUFFER_ALIGN_MIN;
- align = _ODP_MAX(align, (uint32_t)ODP_CONFIG_BUFFER_ALIGN_MIN);
+ align = params->pkt.align > 0 ? params->pkt.align : CONFIG_BUFFER_ALIGN_MIN;
+ align = _ODP_MAX(align, (uint32_t)CONFIG_BUFFER_ALIGN_MIN);
data_size = _ODP_MAX(params->pkt.seg_len, (uint32_t)CONFIG_PACKET_SEG_LEN_MIN);
data_size = _ODP_MAX(data_size, params->pkt.len);
@@ -793,7 +793,7 @@ odp_pool_t odp_pool_lookup(const char *name)
uint32_t i;
pool_t *pool;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -877,7 +877,7 @@ void odp_pool_print_all(void)
_ODP_PRINT("-----------------\n");
_ODP_PRINT(" idx %-*s type free tot cache elt_len ext\n", col_width, "name");
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -1002,13 +1002,12 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl)
unsigned int odp_pool_max_index(void)
{
- return ODP_CONFIG_POOLS - 1;
+ return CONFIG_POOLS - 1;
}
int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
{
pool_t *pool;
- uint16_t first, last;
if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
_ODP_ERR("Invalid pool handle\n");
@@ -1020,14 +1019,9 @@ int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
}
pool = _odp_pool_entry(pool_hdl);
- first = stats->thread.first;
- last = stats->thread.last;
- memset(stats, 0, sizeof(odp_pool_stats_t));
-
- /* Restore input parameters */
- stats->thread.first = first;
- stats->thread.last = last;
+ /* Zero everything else but per thread statistics */
+ memset(stats, 0, offsetof(odp_pool_stats_t, thread));
if (pool->params.stats.bit.available)
stats->available = rte_mempool_avail_count(pool->rte_mempool);
@@ -1109,7 +1103,7 @@ int odp_pool_ext_capability(odp_pool_type_t type,
memset(capa, 0, sizeof(odp_pool_ext_capability_t));
capa->type = type;
- capa->max_pools = ODP_CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
+ capa->max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
capa->min_cache_size = 0;
capa->max_cache_size = RTE_MEMPOOL_CACHE_MAX_SIZE;
capa->stats.all = supported_stats.all;
@@ -1221,7 +1215,7 @@ odp_pool_t odp_pool_ext_create(const char *name,
}
/* Find an unused buffer pool slot and initialize it as requested */
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
uint32_t num;
struct rte_mempool *mp;
diff --git a/platform/linux-dpdk/odp_schedule_eventdev.c b/platform/linux-dpdk/odp_schedule_eventdev.c
index 07cce4643..d6f3ba2f7 100644
--- a/platform/linux-dpdk/odp_schedule_eventdev.c
+++ b/platform/linux-dpdk/odp_schedule_eventdev.c
@@ -621,7 +621,7 @@ static inline uint16_t input_cached(odp_event_t out_ev[], unsigned int max_num,
static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
odp_event_t out_ev[], unsigned int max_num)
{
- odp_time_t next, wtime;
+ odp_time_t next;
struct rte_event ev[max_num];
int first = 1;
uint16_t num_deq;
@@ -669,8 +669,7 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
return 0;
if (first) {
- wtime = odp_time_local_from_ns(wait);
- next = odp_time_sum(odp_time_local(), wtime);
+ next = odp_time_add_ns(odp_time_local(), wait);
first = 0;
continue;
}
diff --git a/platform/linux-dpdk/odp_system_info.c b/platform/linux-dpdk/odp_system_info.c
index 886f7f216..0ebb3e09f 100644
--- a/platform/linux-dpdk/odp_system_info.c
+++ b/platform/linux-dpdk/odp_system_info.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2020-2022, Nokia
+ * Copyright (c) 2020-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -528,22 +528,29 @@ void odp_sys_config_print(void)
_ODP_PRINT("\n\nodp_config_internal.h values:\n"
"-----------------------------\n");
- _ODP_PRINT("CONFIG_NUM_CPU_IDS: %i\n", CONFIG_NUM_CPU_IDS);
- _ODP_PRINT("ODP_CONFIG_POOLS: %i\n", ODP_CONFIG_POOLS);
- _ODP_PRINT("CONFIG_INTERNAL_QUEUES: %i\n", CONFIG_INTERNAL_QUEUES);
- _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
- _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
- _ODP_PRINT("CONFIG_MAX_QUEUES: %i\n", CONFIG_MAX_QUEUES);
- _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
- _ODP_PRINT("ODP_CONFIG_PKTIO_ENTRIES: %i\n", ODP_CONFIG_PKTIO_ENTRIES);
- _ODP_PRINT("ODP_CONFIG_BUFFER_ALIGN_MIN: %i\n", ODP_CONFIG_BUFFER_ALIGN_MIN);
- _ODP_PRINT("ODP_CONFIG_BUFFER_ALIGN_MAX: %i\n", ODP_CONFIG_BUFFER_ALIGN_MAX);
- _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
- _ODP_PRINT("CONFIG_PACKET_SEG_SIZE: %i\n", CONFIG_PACKET_SEG_SIZE);
- _ODP_PRINT("CONFIG_PACKET_SEG_LEN_MIN: %i\n", CONFIG_PACKET_SEG_LEN_MIN);
- _ODP_PRINT("CONFIG_PACKET_MAX_SEG_LEN: %i\n", CONFIG_PACKET_MAX_SEG_LEN);
- _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
- _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
- _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
+ _ODP_PRINT("CONFIG_NUM_CPU_IDS: %i\n", CONFIG_NUM_CPU_IDS);
+ _ODP_PRINT("CONFIG_INTERNAL_QUEUES: %i\n", CONFIG_INTERNAL_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_QUEUES: %i\n", CONFIG_MAX_QUEUES);
+ _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
+ _ODP_PRINT("CONFIG_MAX_DMA_SESSIONS: %i\n", CONFIG_MAX_DMA_SESSIONS);
+ _ODP_PRINT("CONFIG_INTERNAL_STASHES: %i\n", CONFIG_INTERNAL_STASHES);
+ _ODP_PRINT("CONFIG_MAX_STASHES: %i\n", CONFIG_MAX_STASHES);
+ _ODP_PRINT("CONFIG_PKTIO_ENTRIES: %i\n", CONFIG_PKTIO_ENTRIES);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MIN: %i\n", CONFIG_BUFFER_ALIGN_MIN);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MAX: %i\n", CONFIG_BUFFER_ALIGN_MAX);
+ _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
+ _ODP_PRINT("CONFIG_PACKET_SEG_SIZE: %i\n", CONFIG_PACKET_SEG_SIZE);
+ _ODP_PRINT("CONFIG_PACKET_MAX_SEG_LEN: %i\n", CONFIG_PACKET_MAX_SEG_LEN);
+ _ODP_PRINT("CONFIG_PACKET_SEG_LEN_MIN: %i\n", CONFIG_PACKET_SEG_LEN_MIN);
+ _ODP_PRINT("CONFIG_PACKET_VECTOR_MAX_SIZE: %i\n", CONFIG_PACKET_VECTOR_MAX_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_SHM_BLOCKS: %i\n", CONFIG_INTERNAL_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_POOLS: %i\n", CONFIG_INTERNAL_POOLS);
+ _ODP_PRINT("CONFIG_POOLS: %i\n", CONFIG_POOLS);
+ _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
+ _ODP_PRINT("CONFIG_IPSEC_MAX_NUM_SA: %i\n", CONFIG_IPSEC_MAX_NUM_SA);
_ODP_PRINT("\n");
}
diff --git a/platform/linux-dpdk/odp_thread.c b/platform/linux-dpdk/odp_thread.c
index 9f30aabb0..10b5de5e0 100644
--- a/platform/linux-dpdk/odp_thread.c
+++ b/platform/linux-dpdk/odp_thread.c
@@ -291,11 +291,31 @@ int odp_thread_count(void)
return thread_globals->num;
}
+int odp_thread_control_count(void)
+{
+ return thread_globals->num_control;
+}
+
+int odp_thread_worker_count(void)
+{
+ return thread_globals->num_worker;
+}
+
int odp_thread_count_max(void)
{
return thread_globals->num_max;
}
+int odp_thread_control_count_max(void)
+{
+ return thread_globals->num_max;
+}
+
+int odp_thread_worker_count_max(void)
+{
+ return thread_globals->num_max;
+}
+
int odp_thrmask_worker(odp_thrmask_t *mask)
{
odp_thrmask_copy(mask, &thread_globals->worker);
diff --git a/platform/linux-dpdk/odp_time.c b/platform/linux-dpdk/odp_time.c
index 2fc25eb96..fbee02df1 100644
--- a/platform/linux-dpdk/odp_time.c
+++ b/platform/linux-dpdk/odp_time.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2021-2023, Nokia
+ * Copyright (c) 2021-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -13,8 +13,11 @@
#include <rte_config.h>
#include <rte_cycles.h>
+#include <inttypes.h>
#include <string.h>
+#define YEAR_IN_SEC (365 * 24 * 3600)
+
#include <odp/visibility_begin.h>
_odp_time_global_t _odp_time_glob;
@@ -23,6 +26,9 @@ _odp_time_global_t _odp_time_glob;
int _odp_time_init_global(void)
{
+ uint64_t diff, years;
+ odp_time_t time;
+
memset(&_odp_time_glob, 0, sizeof(_odp_time_global_t));
#ifdef RTE_LIBEAL_USE_HPET
@@ -37,6 +43,28 @@ int _odp_time_init_global(void)
return -1;
}
+ time.u64 = _odp_time_glob.start_cycles;
+ _odp_time_glob.start_ns = _odp_time_to_ns(time);
+
+ /* Make sure that counters will not wrap */
+ diff = UINT64_MAX - _odp_time_glob.start_cycles;
+ years = (diff / _odp_time_glob.freq_hz) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time counter would wrap in 10 years: %" PRIu64 "\n",
+ _odp_time_glob.start_cycles);
+ return -1;
+ }
+
+ diff = UINT64_MAX - _odp_time_glob.start_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ _odp_time_glob.start_ns);
+ return -1;
+ }
+
return 0;
}
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index aa841ae0f..e01a541ad 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2019-2023, Nokia
+ * Copyright (c) 2019-2024, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -47,9 +47,6 @@
#define EXPIRED 1
#define TICKING 2
-/* One second in nanoseconds */
-#define SEC_IN_NS ((uint64_t)1000000000)
-
/* Maximum number of timer pools */
#define MAX_TIMER_POOLS 8
@@ -198,6 +195,9 @@ _odp_timeout_inline_offset ODP_ALIGNED_CACHE = {
.uarea_addr = offsetof(odp_timeout_hdr_t, uarea_addr)
};
+/* Global data for inline functions */
+_odp_timer_global_t _odp_timer_glob;
+
#include <odp/visibility_end.h>
static void timer_cb(struct rte_timer *rte_timer, void *arg ODP_UNUSED)
@@ -395,6 +395,13 @@ int _odp_timer_init_global(const odp_init_t *params)
timer_global->ops.reset = timer_reset;
}
+ _odp_timer_glob.freq_hz = rte_get_timer_hz();
+ if (_odp_timer_glob.freq_hz == 0) {
+ _ODP_ERR("Reading timer frequency failed\n");
+ odp_shm_free(shm);
+ return -1;
+ }
+
return 0;
}
@@ -750,6 +757,19 @@ void odp_timer_pool_start(void)
/* Nothing to do */
}
+int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num)
+{
+ _ODP_ASSERT(timer_pool != NULL);
+ _ODP_ASSERT(num > 0);
+ if (ODP_DEBUG) {
+ for (int i = 0; i < num; i++)
+ _ODP_ASSERT(timer_pool[i] != ODP_TIMER_POOL_INVALID);
+ }
+
+ /* Nothing to do here, timer pools are started by the create call. */
+ return num;
+}
+
void odp_timer_pool_destroy(odp_timer_pool_t tp)
{
timer_pool_t *timer_pool = timer_pool_from_hdl(tp);
@@ -766,48 +786,6 @@ void odp_timer_pool_destroy(odp_timer_pool_t tp)
odp_ticketlock_unlock(&timer_global->lock);
}
-uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp, uint64_t ticks)
-{
- uint64_t nsec;
- uint64_t freq_hz = rte_get_timer_hz();
- uint64_t sec = 0;
- (void)tp;
-
- if (ticks >= freq_hz) {
- sec = ticks / freq_hz;
- ticks = ticks - sec * freq_hz;
- }
-
- nsec = (SEC_IN_NS * ticks) / freq_hz;
-
- return (sec * SEC_IN_NS) + nsec;
-}
-
-uint64_t odp_timer_ns_to_tick(odp_timer_pool_t tp, uint64_t ns)
-{
- uint64_t ticks;
- uint64_t freq_hz = rte_get_timer_hz();
- uint64_t sec = 0;
- (void)tp;
-
- if (ns >= SEC_IN_NS) {
- sec = ns / SEC_IN_NS;
- ns = ns - sec * SEC_IN_NS;
- }
-
- ticks = sec * freq_hz;
- ticks += (ns * freq_hz) / SEC_IN_NS;
-
- return ticks;
-}
-
-uint64_t odp_timer_current_tick(odp_timer_pool_t tp)
-{
- (void)tp;
-
- return rte_get_timer_cycles();
-}
-
int odp_timer_sample_ticks(odp_timer_pool_t tp[], uint64_t tick[], uint64_t clk_count[], int num)
{
uint64_t now;
@@ -857,9 +835,10 @@ int odp_timer_pool_info(odp_timer_pool_t tp,
info->name = timer_pool->name;
info->tick_info.freq.integer = freq_hz;
- info->tick_info.nsec.integer = SEC_IN_NS / freq_hz;
- if (SEC_IN_NS % freq_hz) {
- info->tick_info.nsec.numer = SEC_IN_NS - (info->tick_info.nsec.integer * freq_hz);
+ info->tick_info.nsec.integer = ODP_TIME_SEC_IN_NS / freq_hz;
+ if (ODP_TIME_SEC_IN_NS % freq_hz) {
+ info->tick_info.nsec.numer = ODP_TIME_SEC_IN_NS - (info->tick_info.nsec.integer *
+ freq_hz);
info->tick_info.nsec.denom = freq_hz;
}
/* Leave source clock information to zero as there is no direct link
@@ -918,30 +897,18 @@ odp_timer_t odp_timer_alloc(odp_timer_pool_t tp,
return (odp_timer_t)timer;
}
-odp_event_t odp_timer_free(odp_timer_t timer_hdl)
+int odp_timer_free(odp_timer_t timer_hdl)
{
- odp_event_t ev;
timer_entry_t *timer = timer_from_hdl(timer_hdl);
timer_pool_t *timer_pool = timer->timer_pool;
uint32_t timer_idx = timer->timer_idx;
-retry:
odp_ticketlock_lock(&timer->lock);
- if (timer->state == TICKING) {
- _ODP_DBG("Freeing active timer.\n");
-
- if (timer_global->ops.stop(&timer->rte_timer)) {
- /* Another core runs timer callback function. */
- odp_ticketlock_unlock(&timer->lock);
- goto retry;
- }
-
- ev = timer->tmo_event;
- timer->tmo_event = ODP_EVENT_INVALID;
- timer->state = NOT_TICKING;
- } else {
- ev = ODP_EVENT_INVALID;
+ if (odp_unlikely(timer->state == TICKING)) {
+ odp_ticketlock_unlock(&timer->lock);
+ _ODP_ERR("Timer is active\n");
+ return -1;
}
/* Remove timer from queue */
@@ -958,7 +925,7 @@ retry:
ring_u32_enq(&timer_pool->free_timer.ring_hdr,
timer_pool->free_timer.ring_mask, timer_idx);
- return ev;
+ return 0;
}
static inline odp_timeout_hdr_t *timeout_to_hdr(odp_timeout_t tmo)
@@ -966,10 +933,9 @@ static inline odp_timeout_hdr_t *timeout_to_hdr(odp_timeout_t tmo)
return (odp_timeout_hdr_t *)(uintptr_t)tmo;
}
-static inline int timer_set(odp_timer_t timer_hdl, uint64_t tick,
- odp_event_t *event, int absolute)
+static inline int timer_set(odp_timer_t timer_hdl, uint64_t tick, odp_event_t event, int absolute)
{
- odp_event_t old_ev, tmo_event;
+ odp_event_t tmo_event;
uint64_t cur_tick, rel_tick, abs_tick;
timer_entry_t *timer = timer_from_hdl(timer_hdl);
int num_retry = 0;
@@ -998,12 +964,20 @@ retry:
odp_ticketlock_lock(&timer->lock);
- if (timer->tmo_event == ODP_EVENT_INVALID)
- if (event == NULL || (event && *event == ODP_EVENT_INVALID)) {
+ if (timer->tmo_event == ODP_EVENT_INVALID) {
+ if (odp_unlikely(event == ODP_EVENT_INVALID)) {
odp_ticketlock_unlock(&timer->lock);
/* Event missing, or timer already expired and
* enqueued the event. */
return ODP_TIMER_FAIL;
+ }
+ } else {
+ /* Check that timer was not active */
+ if (odp_unlikely(event != ODP_EVENT_INVALID)) {
+ _ODP_ERR("Timer was already active\n");
+ odp_ticketlock_unlock(&timer->lock);
+ return ODP_TIMER_FAIL;
+ }
}
if (odp_unlikely(timer_global->ops.reset(&timer->rte_timer, rel_tick,
@@ -1040,14 +1014,8 @@ retry:
return ODP_TIMER_FAIL;
}
- if (event) {
- old_ev = timer->tmo_event;
-
- if (*event != ODP_EVENT_INVALID)
- timer->tmo_event = *event;
-
- *event = old_ev;
- }
+ if (event != ODP_EVENT_INVALID)
+ timer->tmo_event = event;
tmo_event = timer->tmo_event;
timer->tick = abs_tick;
@@ -1066,34 +1034,16 @@ retry:
return ODP_TIMER_SUCCESS;
}
-int ODP_DEPRECATE(odp_timer_set_abs)(odp_timer_t timer_hdl, uint64_t abs_tick,
- odp_event_t *tmo_ev)
-{
- return timer_set(timer_hdl, abs_tick, tmo_ev, 1);
-}
-
-int ODP_DEPRECATE(odp_timer_set_rel)(odp_timer_t timer_hdl, uint64_t rel_tick,
- odp_event_t *tmo_ev)
-{
- return timer_set(timer_hdl, rel_tick, tmo_ev, 0);
-}
-
int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
{
odp_event_t tmo_ev = start_param->tmo_ev;
int abs = start_param->tick_type == ODP_TIMER_TICK_ABS;
int ret;
- ret = timer_set(timer, start_param->tick, &tmo_ev, abs);
+ ret = timer_set(timer, start_param->tick, tmo_ev, abs);
if (odp_unlikely(ret != ODP_TIMER_SUCCESS))
return ret;
- /* Check that timer was not active */
- if (odp_unlikely(tmo_ev != ODP_EVENT_INVALID)) {
- _ODP_ERR("Timer was active already\n");
- odp_event_free(tmo_ev);
- }
-
return ODP_TIMER_SUCCESS;
}
@@ -1102,7 +1052,7 @@ int odp_timer_restart(odp_timer_t timer, const odp_timer_start_t *start_param)
int abs = start_param->tick_type == ODP_TIMER_TICK_ABS;
/* Reset timer without changing the event */
- return timer_set(timer, start_param->tick, NULL, abs);
+ return timer_set(timer, start_param->tick, ODP_EVENT_INVALID, abs);
}
int odp_timer_periodic_start(odp_timer_t timer_hdl,
@@ -1154,16 +1104,10 @@ int odp_timer_periodic_start(odp_timer_t timer_hdl,
absolute = 1;
}
- ret = timer_set(timer_hdl, first_tick, &tmo_ev, absolute);
+ ret = timer_set(timer_hdl, first_tick, tmo_ev, absolute);
if (odp_unlikely(ret != ODP_TIMER_SUCCESS))
return ret;
- /* Check that timer was not active */
- if (odp_unlikely(tmo_ev != ODP_EVENT_INVALID)) {
- _ODP_ERR("Timer was active already\n");
- odp_event_free(tmo_ev);
- }
-
return ODP_TIMER_SUCCESS;
}
@@ -1182,8 +1126,10 @@ int odp_timer_periodic_ack(odp_timer_t timer_hdl, odp_event_t tmo_ev)
abs_tick = timer->periodic_ticks;
- if (odp_unlikely(abs_tick == PERIODIC_CANCELLED))
+ if (odp_unlikely(abs_tick == PERIODIC_CANCELLED)) {
+ timer->tmo_event = ODP_EVENT_INVALID;
return 2;
+ }
acc = (uint64_t)timer->periodic_ticks_frac_acc + (uint64_t)timer->periodic_ticks_frac;
@@ -1198,7 +1144,7 @@ int odp_timer_periodic_ack(odp_timer_t timer_hdl, odp_event_t tmo_ev)
abs_tick += timeout_hdr->expiration;
timeout_hdr->expiration = abs_tick;
- ret = timer_set(timer_hdl, abs_tick, NULL, 1);
+ ret = timer_set(timer_hdl, abs_tick, ODP_EVENT_INVALID, 1);
if (odp_likely(ret == ODP_TIMER_SUCCESS))
return 0;
@@ -1275,9 +1221,12 @@ int odp_timer_periodic_cancel(odp_timer_t timer_hdl)
/* Timer successfully cancelled, so send the final event manually. */
if (ret == 0 && timer->state == TICKING) {
timer->state = NOT_TICKING;
+ timer->tmo_event = ODP_EVENT_INVALID;
if (odp_unlikely(odp_queue_enq(timer->queue, event))) {
_ODP_ERR("Failed to enqueue final timeout event\n");
_odp_event_free(event);
+ odp_ticketlock_unlock(&timer->lock);
+ return -1;
}
}
@@ -1296,7 +1245,7 @@ uint64_t odp_timeout_to_u64(odp_timeout_t tmo)
return (uint64_t)(uintptr_t)tmo;
}
-int odp_timeout_fresh(odp_timeout_t tmo)
+int ODP_DEPRECATE(odp_timeout_fresh)(odp_timeout_t tmo)
{
timer_entry_t *timer;
odp_timeout_hdr_t *timeout_hdr = timeout_to_hdr(tmo);
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 92d87b441..f3707ab3a 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -115,6 +115,7 @@ odpapiabiarchinclude_HEADERS += \
include-abi/odp/api/abi/std_types.h \
include-abi/odp/api/abi/sync.h \
include-abi/odp/api/abi/thread.h \
+ include-abi/odp/api/abi/thread_types.h \
include-abi/odp/api/abi/thrmask.h \
include-abi/odp/api/abi/ticketlock.h \
include-abi/odp/api/abi/time.h \
@@ -340,7 +341,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/arm/odp/api/abi/cpu_inlines.h \
arch/arm/odp/api/abi/cpu.h \
arch/default/odp/api/abi/sync_inlines.h \
- arch/default/odp/api/abi/time_inlines.h
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/arm/odp_atomic.h \
arch/arm/odp_cpu.h \
@@ -370,7 +373,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/aarch64/odp/api/abi/cpu.h \
arch/aarch64/odp/api/abi/sync_inlines.h \
arch/common/odp/api/abi/time_cpu_inlines.h \
- arch/aarch64/odp/api/abi/time_inlines.h
+ arch/aarch64/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/aarch64/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/aarch64/odp_atomic.h \
arch/aarch64/odp_cpu.h \
@@ -394,7 +399,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_inlines.h \
arch/default/odp/api/abi/cpu.h \
arch/default/odp/api/abi/sync_inlines.h \
- arch/default/odp/api/abi/time_inlines.h
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
@@ -416,7 +423,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/default/odp/api/abi/cpu_inlines.h \
arch/powerpc/odp/api/abi/cpu.h \
arch/default/odp/api/abi/sync_inlines.h \
- arch/default/odp/api/abi/time_inlines.h
+ arch/default/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/default/odp_atomic.h \
arch/default/odp_cpu.h \
@@ -442,7 +451,9 @@ odpapiabiarchinclude_HEADERS += arch/default/odp/api/abi/atomic_generic.h \
arch/x86/odp/api/abi/cpu.h \
arch/x86/odp/api/abi/sync_inlines.h \
arch/common/odp/api/abi/time_cpu_inlines.h \
- arch/x86/odp/api/abi/time_inlines.h
+ arch/x86/odp/api/abi/time_inlines.h \
+ arch/default/odp/api/abi/wait_until_generic.h \
+ arch/default/odp/api/abi/wait_until.h
endif
noinst_HEADERS += arch/x86/cpu_flags.h \
arch/x86/odp_cpu.h \
diff --git a/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h b/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h
new file mode 100644
index 000000000..73a3d476a
--- /dev/null
+++ b/platform/linux-generic/arch/aarch64/odp/api/abi/wait_until.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#ifndef ODP_API_ABI_WAIT_UNTIL_H_
+#define ODP_API_ABI_WAIT_UNTIL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/autoheader_external.h>
+
+#ifdef _ODP_WFE_LOCKS
+
+#include <stdint.h>
+
+#include <odp/api/atomic.h>
+
+static inline void
+_odp_wait_until_equal_acq_u32(odp_atomic_u32_t *addr, uint32_t expected)
+{
+ uint32_t value;
+ uint32_t *var = &addr->v;
+
+ __asm__ volatile("sevl" : : : "memory");
+ do {
+ __asm__ volatile("wfe" : : : "memory");
+ __asm__ volatile("ldaxr %w0, [%1]"
+ : "=&r" (value)
+ : "r" (var)
+ : "memory");
+ } while (expected != value);
+}
+
+#else /* !_ODP_WFE_LOCKS*/
+
+/* Use generic implementation */
+#include <odp/api/abi/wait_until_generic.h>
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/aarch64/odp_llsc.h b/platform/linux-generic/arch/aarch64/odp_llsc.h
index ba7a447d4..498785bd4 100644
--- a/platform/linux-generic/arch/aarch64/odp_llsc.h
+++ b/platform/linux-generic/arch/aarch64/odp_llsc.h
@@ -150,15 +150,15 @@ static inline uint32_t scd(_odp_u128_t *var, _odp_u128_t neu, int mm)
if (mm == __ATOMIC_RELEASE)
__asm__ volatile("stlxp %w0, %1, %2, [%3]"
: "=&r" (ret)
- : "r" (((union i128)neu).i64[0]),
- "r" (((union i128)neu).i64[1]),
+ : "r" (((*(union i128 *)&neu)).i64[0]),
+ "r" (((*(union i128 *)&neu)).i64[1]),
"r" (var)
: "memory");
else if (mm == __ATOMIC_RELAXED)
__asm__ volatile("stxp %w0, %1, %2, [%3]"
: "=&r" (ret)
- : "r" (((union i128)neu).i64[0]),
- "r" (((union i128)neu).i64[1]),
+ : "r" (((*(union i128 *)&neu)).i64[0]),
+ "r" (((*(union i128 *)&neu)).i64[1]),
"r" (var)
: );
else
diff --git a/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h b/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h
index c154c5f1a..553114666 100644
--- a/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h
+++ b/platform/linux-generic/arch/common/odp/api/abi/time_cpu_inlines.h
@@ -21,8 +21,9 @@ extern "C" {
#define _ODP_TIME_GIGA_HZ 1000000000ULL
typedef struct _odp_time_global_t {
- uint64_t start_time;
uint64_t freq_hz;
+ uint64_t start_time;
+ uint64_t start_time_ns;
} _odp_time_global_t;
@@ -32,7 +33,7 @@ static inline odp_time_t _odp_time_cur(void)
{
odp_time_t time;
- time.count = _odp_time_cpu_global() - _odp_time_glob.start_time;
+ time.count = _odp_time_cpu_global();
return time;
}
@@ -40,7 +41,7 @@ static inline odp_time_t _odp_time_cur_strict(void)
{
odp_time_t time;
- time.count = _odp_time_cpu_global_strict() - _odp_time_glob.start_time;
+ time.count = _odp_time_cpu_global_strict();
return time;
}
@@ -86,6 +87,12 @@ static inline uint64_t _odp_time_res(void)
return _odp_time_glob.freq_hz;
}
+static inline void _odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.count = _odp_time_glob.start_time;
+ startup->global_ns = _odp_time_glob.start_time_ns;
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/arch/common/odp_time_cpu.c b/platform/linux-generic/arch/common/odp_time_cpu.c
index bbfe82e21..3c392de0c 100644
--- a/platform/linux-generic/arch/common/odp_time_cpu.c
+++ b/platform/linux-generic/arch/common/odp_time_cpu.c
@@ -17,6 +17,8 @@
#include <stdint.h>
#include <string.h>
+#define YEAR_IN_SEC (365 * 24 * 3600)
+
#include <odp/visibility_begin.h>
_odp_time_global_t _odp_time_glob;
@@ -25,6 +27,8 @@ _odp_time_global_t _odp_time_glob;
int _odp_time_init_global(void)
{
+ uint64_t count, diff, years;
+ odp_time_t time;
_odp_time_global_t *global = &_odp_time_glob;
memset(global, 0, sizeof(_odp_time_global_t));
@@ -38,7 +42,29 @@ int _odp_time_init_global(void)
_ODP_PRINT("HW time counter freq: %" PRIu64 " hz\n\n", global->freq_hz);
- global->start_time = _odp_time_cpu_global();
+ count = _odp_time_cpu_global();
+ time.count = count;
+ global->start_time = count;
+ global->start_time_ns = _odp_time_to_ns(time);
+
+ /* Make sure that counters will not wrap */
+ diff = UINT64_MAX - count;
+ years = (diff / global->freq_hz) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time counter would wrap in 10 years: %" PRIu64 "\n", count);
+ return -1;
+ }
+
+ diff = UINT64_MAX - global->start_time_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ global->start_time_ns);
+ return -1;
+ }
+
return 0;
}
diff --git a/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h b/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h
index b38e52dac..ed0ffdb3f 100644
--- a/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h
+++ b/platform/linux-generic/arch/default/odp/api/abi/time_inlines.h
@@ -18,6 +18,7 @@ extern "C" {
odp_time_t _odp_time_cur(void);
uint64_t _odp_time_res(void);
+void _odp_time_startup(odp_time_startup_t *startup);
static inline odp_time_t _odp_time_cur_strict(void)
{
diff --git a/platform/linux-generic/arch/default/odp/api/abi/wait_until.h b/platform/linux-generic/arch/default/odp/api/abi/wait_until.h
new file mode 100644
index 000000000..35e8d2566
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/wait_until.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#include <odp/api/abi/wait_until_generic.h>
diff --git a/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h b/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h
new file mode 100644
index 000000000..3d3fce175
--- /dev/null
+++ b/platform/linux-generic/arch/default/odp/api/abi/wait_until_generic.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 ARM Limited
+ */
+
+#ifndef ODP_API_ABI_WAIT_UNTIL_GENERIC_H_
+#define ODP_API_ABI_WAIT_UNTIL_GENERIC_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/api/atomic.h>
+
+static inline void
+_odp_wait_until_equal_acq_u32(odp_atomic_u32_t *addr, uint32_t expected)
+{
+ while (odp_atomic_load_acq_u32(addr) != expected)
+ odp_cpu_pause();
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/default/odp_time.c b/platform/linux-generic/arch/default/odp_time.c
index 919a3ba68..664a5deae 100644
--- a/platform/linux-generic/arch/default/odp_time.c
+++ b/platform/linux-generic/arch/default/odp_time.c
@@ -16,31 +16,24 @@
#include <odp_debug_internal.h>
#include <odp_init_internal.h>
+#include <inttypes.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
+#define YEAR_IN_SEC (365 * 24 * 3600)
+
typedef struct _odp_time_global_t {
struct timespec start_time;
+ uint64_t start_time_ns;
} _odp_time_global_t;
_odp_time_global_t _odp_time_glob;
-static inline uint64_t time_diff_nsec(struct timespec *t2, struct timespec *t1)
+static inline uint64_t time_nsec(struct timespec *t)
{
- struct timespec diff;
- uint64_t nsec;
-
- diff.tv_sec = t2->tv_sec - t1->tv_sec;
- diff.tv_nsec = t2->tv_nsec - t1->tv_nsec;
-
- if (diff.tv_nsec < 0) {
- diff.tv_nsec += ODP_TIME_SEC_IN_NS;
- diff.tv_sec -= 1;
- }
-
- nsec = (diff.tv_sec * ODP_TIME_SEC_IN_NS) + diff.tv_nsec;
+ uint64_t nsec = (t->tv_sec * ODP_TIME_SEC_IN_NS) + t->tv_nsec;
return nsec;
}
@@ -52,13 +45,12 @@ odp_time_t _odp_time_cur(void)
int ret;
odp_time_t time;
struct timespec sys_time;
- struct timespec *start_time = &_odp_time_glob.start_time;
ret = clock_gettime(CLOCK_MONOTONIC_RAW, &sys_time);
if (odp_unlikely(ret != 0))
_ODP_ABORT("clock_gettime() failed\n");
- time.nsec = time_diff_nsec(&sys_time, start_time);
+ time.nsec = time_nsec(&sys_time);
return time;
}
@@ -75,11 +67,18 @@ uint64_t _odp_time_res(void)
return ODP_TIME_SEC_IN_NS / (uint64_t)tres.tv_nsec;
}
+void _odp_time_startup(odp_time_startup_t *startup)
+{
+ startup->global.nsec = _odp_time_glob.start_time_ns;
+ startup->global_ns = _odp_time_glob.start_time_ns;
+}
+
#include <odp/visibility_end.h>
int _odp_time_init_global(void)
{
struct timespec *start_time;
+ uint64_t diff, years;
int ret = 0;
_odp_time_global_t *global = &_odp_time_glob;
@@ -93,6 +92,17 @@ int _odp_time_init_global(void)
if (ret)
_ODP_ERR("clock_gettime() failed: %d\n", ret);
+ global->start_time_ns = time_nsec(start_time);
+
+ diff = UINT64_MAX - global->start_time_ns;
+ years = (diff / ODP_TIME_SEC_IN_NS) / YEAR_IN_SEC;
+
+ if (years < 10) {
+ _ODP_ERR("Time in nsec would wrap in 10 years: %" PRIu64 "\n",
+ global->start_time_ns);
+ return -1;
+ }
+
return ret;
}
diff --git a/platform/linux-generic/include-abi/odp/api/abi/event_types.h b/platform/linux-generic/include-abi/odp/api/abi/event_types.h
index 7eb539827..8ff5acd6b 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/event_types.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/event_types.h
@@ -19,7 +19,6 @@ extern "C" {
#endif
#include <odp/api/plat/strong_types.h>
-#include <odp/api/deprecated.h>
/** @ingroup odp_event
* @{
diff --git a/platform/linux-generic/include-abi/odp/api/abi/thread.h b/platform/linux-generic/include-abi/odp/api/abi/thread.h
index d5628a740..14c074b95 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/thread.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/thread.h
@@ -4,6 +4,5 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/abi-default/thread.h>
-
+/* Inlined API functions */
#include <odp/api/plat/thread_inlines.h>
diff --git a/platform/linux-generic/include-abi/odp/api/abi/thread_types.h b/platform/linux-generic/include-abi/odp/api/abi/thread_types.h
new file mode 100644
index 000000000..e695c233b
--- /dev/null
+++ b/platform/linux-generic/include-abi/odp/api/abi/thread_types.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Nokia
+ */
+
+#include <odp/api/abi-default/thread_types.h>
diff --git a/platform/linux-generic/include/odp/api/plat/crypto_inlines.h b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
index ddf7debf4..f350edfea 100644
--- a/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/crypto_inlines.h
@@ -55,14 +55,8 @@ _ODP_INLINE int odp_crypto_result(odp_crypto_packet_result_t *result, odp_packet
ok = op_result->cipher_status.alg_err == ODP_CRYPTO_ALG_ERR_NONE &&
op_result->auth_status.alg_err == ODP_CRYPTO_ALG_ERR_NONE;
- if (result) {
+ if (result)
*result = *op_result;
-#if ODP_DEPRECATED_API
- result->ok = ok;
- result->cipher_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
- result->auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
-#endif
- }
return ok ? 0 : -1;
}
diff --git a/platform/linux-generic/include/odp/api/plat/event_inlines.h b/platform/linux-generic/include/odp/api/plat/event_inlines.h
index 2e7c7db5e..b68ced244 100644
--- a/platform/linux-generic/include/odp/api/plat/event_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/event_inlines.h
@@ -11,6 +11,7 @@
#include <odp/api/buffer_types.h>
#include <odp/api/event_types.h>
#include <odp/api/packet_types.h>
+#include <odp/api/pool_types.h>
#include <odp/api/timer_types.h>
#include <odp/api/plat/buffer_inline_types.h>
@@ -27,6 +28,7 @@
#define _ODP_INLINE static inline
#define odp_event_type __odp_event_type
#define odp_event_type_multi __odp_event_type_multi
+ #define odp_event_pool __odp_event_pool
#define odp_event_user_area __odp_event_user_area
#define odp_event_user_area_and_flag __odp_event_user_area_and_flag
#define odp_event_subtype __odp_event_subtype
@@ -68,6 +70,20 @@ _ODP_INLINE int odp_event_type_multi(const odp_event_t event[], int num,
return i;
}
+_ODP_INLINE odp_pool_t odp_event_pool(odp_event_t event)
+{
+ const odp_event_type_t type = __odp_event_type_get(event);
+
+ switch (type) {
+ case ODP_EVENT_BUFFER:
+ case ODP_EVENT_PACKET:
+ case ODP_EVENT_PACKET_VECTOR:
+ return _odp_event_hdr_field(event, odp_pool_t, pool);
+ default:
+ return ODP_POOL_INVALID;
+ }
+}
+
_ODP_INLINE void *odp_event_user_area(odp_event_t event)
{
const odp_event_type_t type = __odp_event_type_get(event);
diff --git a/platform/linux-generic/include/odp/api/plat/thread_inline_types.h b/platform/linux-generic/include/odp/api/plat/thread_inline_types.h
index c9a15b06b..d24263fa7 100644
--- a/platform/linux-generic/include/odp/api/plat/thread_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/thread_inline_types.h
@@ -9,7 +9,7 @@
#define ODP_PLAT_THREAD_INLINE_TYPES_H_
#include <odp/api/init.h>
-#include <odp/api/spec/thread_types.h>
+#include <odp/api/thread_types.h>
#ifdef __cplusplus
extern "C" {
diff --git a/platform/linux-generic/include/odp/api/plat/thread_inlines.h b/platform/linux-generic/include/odp/api/plat/thread_inlines.h
index 2f2e50b06..2b6957064 100644
--- a/platform/linux-generic/include/odp/api/plat/thread_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/thread_inlines.h
@@ -7,14 +7,14 @@
#ifndef ODP_PLAT_THREAD_INLINES_H_
#define ODP_PLAT_THREAD_INLINES_H_
-#include <odp/api/init.h>
-
-#include <odp/api/plat/thread_inline_types.h>
-
#ifdef __cplusplus
extern "C" {
#endif
+#include <odp/api/thread_types.h>
+
+#include <odp/api/plat/thread_inline_types.h>
+
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
#ifndef _ODP_NO_INLINE
diff --git a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h b/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
index eef052f8d..b596d1609 100644
--- a/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/ticketlock_inlines.h
@@ -11,7 +11,7 @@
#include <odp/api/cpu.h>
#include <odp/api/abi/ticketlock.h>
-
+#include <odp/api/abi/wait_until.h>
/** @cond _ODP_HIDE_FROM_DOXYGEN_ */
#ifndef _ODP_NO_INLINE
@@ -47,8 +47,7 @@ _ODP_INLINE void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
/* Spin waiting for our turn. Use load-acquire so that we acquire
* all stores from the previous lock owner */
- while (ticket != odp_atomic_load_acq_u32(&ticketlock->cur_ticket))
- odp_cpu_pause();
+ _odp_wait_until_equal_acq_u32(&ticketlock->cur_ticket, ticket);
}
_ODP_INLINE int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
diff --git a/platform/linux-generic/include/odp/api/plat/time_inlines.h b/platform/linux-generic/include/odp/api/plat/time_inlines.h
index 35a35c72e..8ead06f7b 100644
--- a/platform/linux-generic/include/odp/api/plat/time_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/time_inlines.h
@@ -35,6 +35,7 @@
#define odp_time_cmp __odp_time_cmp
#define odp_time_diff __odp_time_diff
#define odp_time_diff_ns __odp_time_diff_ns
+ #define odp_time_add_ns __odp_time_add_ns
#define odp_time_sum __odp_time_sum
#define odp_time_local_from_ns __odp_time_local_from_ns
@@ -45,6 +46,7 @@
#define odp_time_wait_ns __odp_time_wait_ns
#define odp_time_wait_until __odp_time_wait_until
+ #define odp_time_startup __odp_time_startup
#else
#define _ODP_INLINE
#endif
@@ -123,6 +125,15 @@ _ODP_INLINE uint64_t odp_time_diff_ns(odp_time_t t2, odp_time_t t1)
return odp_time_to_ns(time);
}
+_ODP_INLINE odp_time_t odp_time_add_ns(odp_time_t time, uint64_t ns)
+{
+ odp_time_t t = _odp_time_from_ns(ns);
+
+ t.u64 += time.u64;
+
+ return t;
+}
+
_ODP_INLINE odp_time_t odp_time_sum(odp_time_t t1, odp_time_t t2)
{
odp_time_t time;
@@ -170,6 +181,11 @@ _ODP_INLINE void odp_time_wait_ns(uint64_t ns)
odp_time_wait_until(end_time);
}
+_ODP_INLINE void odp_time_startup(odp_time_startup_t *startup)
+{
+ _odp_time_startup(startup);
+}
+
/** @endcond */
#endif
diff --git a/platform/linux-generic/include/odp/api/plat/timer_inlines.h b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
index 9ba0287e0..d2982079f 100644
--- a/platform/linux-generic/include/odp/api/plat/timer_inlines.h
+++ b/platform/linux-generic/include/odp/api/plat/timer_inlines.h
@@ -10,6 +10,8 @@
#include <odp/api/event.h>
#include <odp/api/timer_types.h>
+#include <odp/api/abi/time_inlines.h>
+
#include <odp/api/plat/debug_inlines.h>
#include <odp/api/plat/timer_inline_types.h>
@@ -24,6 +26,7 @@
#define odp_timeout_tick __odp_timeout_tick
#define odp_timeout_user_ptr __odp_timeout_user_ptr
#define odp_timeout_user_area __odp_timeout_user_area
+ #define odp_timer_current_tick __odp_timer_current_tick
#define odp_timer_tick_to_ns __odp_timer_tick_to_ns
#define odp_timer_ns_to_tick __odp_timer_ns_to_tick
#define odp_timeout_from_event __odp_timeout_from_event
@@ -53,6 +56,14 @@ _ODP_INLINE void *odp_timeout_user_area(odp_timeout_t tmo)
return _odp_timeout_hdr_field(tmo, void *, uarea_addr);
}
+_ODP_INLINE uint64_t odp_timer_current_tick(odp_timer_pool_t tpid)
+{
+ (void)tpid;
+
+ /* This is equal to odp_time_global_ns(). Cannot call inlined API function from here. */
+ return _odp_time_to_ns(_odp_time_cur());
+}
+
_ODP_INLINE uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tp, uint64_t ticks)
{
(void)tp;
diff --git a/platform/linux-generic/include/odp_chksum_internal.h b/platform/linux-generic/include/odp_chksum_internal.h
index 5a134ae2d..e589ecb94 100644
--- a/platform/linux-generic/include/odp_chksum_internal.h
+++ b/platform/linux-generic/include/odp_chksum_internal.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2020, Nokia
+/* Copyright (c) 2020, 2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -62,8 +62,23 @@ static inline uint16_t chksum_finalize(uint64_t sum)
static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
{
const uint8_t *b;
- const uint16_t *w;
- const uint32_t *d;
+#if _ODP_UNALIGNED
+ /*
+ * _ODP_UNALIGNED does not guarantee that all possible ways of
+ * accessing memory can be unaligned. Make the compiler aware
+ * of the possible unalignment so that it does not generate
+ * instructions (such as LDM of AArch32) that require higher
+ * alignment than one byte.
+ */
+ typedef uint32_t x_uint32_t ODP_ALIGNED(1);
+ typedef uint16_t x_uint16_t ODP_ALIGNED(1);
+#else
+ /* In this case we can use normal types as we align manually. */
+ typedef uint32_t x_uint32_t;
+ typedef uint16_t x_uint16_t;
+#endif
+ const x_uint16_t *w;
+ const x_uint32_t *d;
uint64_t sum = 0;
/*
@@ -77,7 +92,7 @@ static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
* We have efficient unaligned access. Just read
* dwords starting at the given address.
*/
- d = (const uint32_t *)addr;
+ d = (const x_uint32_t *)addr;
} else {
/*
* We must avoid unaligned access, so align to 4 bytes
@@ -102,7 +117,7 @@ static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
* This cast increases alignment, but it's OK, since
* we've made sure that the pointer value is aligned.
*/
- w = (const uint16_t *)(uintptr_t)b;
+ w = (const x_uint16_t *)(uintptr_t)b;
if ((uintptr_t)w & 2 && len >= 2) {
/* Align bytes by handling an odd word. */
@@ -111,7 +126,7 @@ static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
}
/* Increases alignment. */
- d = (const uint32_t *)(uintptr_t)w;
+ d = (const x_uint32_t *)(uintptr_t)w;
}
while (len >= 32) {
@@ -159,7 +174,7 @@ static uint64_t chksum_partial(const void *addr, uint32_t len, uint32_t offset)
len &= 3;
- w = (const uint16_t *)d;
+ w = (const x_uint16_t *)d;
if (len > 1) {
/* Last word. */
sum += *w++;
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index 376dbd1db..c042a5308 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -40,14 +40,6 @@ extern "C" {
#define CLS_PMRTERM_MAX 8
/* Maximum PMRs attached in PKTIO Level */
#define CLS_PMR_PER_COS_MAX 8
-/* L2 Priority Bits */
-#define CLS_COS_L2_QOS_BITS 3
-/* Max L2 QoS value */
-#define CLS_COS_MAX_L2_QOS (1 << CLS_COS_L2_QOS_BITS)
-/* L2 DSCP Bits */
-#define CLS_COS_L3_QOS_BITS 6
-/* Max L3 QoS Value */
-#define CLS_COS_MAX_L3_QOS (1 << CLS_COS_L3_QOS_BITS)
/* Max PMR Term size */
#define MAX_PMR_TERM_SIZE 16
/* Max queue per Class of service */
@@ -145,9 +137,6 @@ typedef struct ODP_ALIGNED_CACHE cos_s {
bool queue_group;
odp_cls_hash_proto_t hash_proto;
odp_pktin_vector_config_t vector; /* Packet vector config */
-#if ODP_DEPRECATED_API
- odp_cls_drop_t drop_policy; /* Associated Drop Policy */
-#endif
size_t headroom; /* Headroom for this CoS */
odp_spinlock_t lock; /* cos lock */
odp_queue_param_t queue_param;
@@ -174,28 +163,6 @@ typedef struct ODP_ALIGNED_CACHE {
} _cls_queue_grp_tbl_t;
/**
-L2 QoS and CoS Map
-
-This structure holds the mapping between L2 QoS value and
-corresponding cos_t object
-**/
-typedef struct pmr_l2_cos {
- odp_spinlock_t lock; /* pmr_l2_cos lock */
- cos_t *cos[CLS_COS_MAX_L2_QOS]; /* Array of CoS objects */
-} pmr_l2_cos_t;
-
-/**
-L3 QoS and CoS Map
-
-This structure holds the mapping between L3 QoS value and
-corresponding cos_t object
-**/
-typedef struct pmr_l3_cos {
- odp_spinlock_t lock; /* pmr_l3_cos lock */
- cos_t *cos[CLS_COS_MAX_L3_QOS]; /* Array of CoS objects */
-} pmr_l3_cos_t;
-
-/**
Linux Generic Classifier
This structure is stored in pktio_entry and holds all
@@ -204,9 +171,6 @@ the classifier configuration value.
typedef struct classifier {
cos_t *error_cos; /* Associated Error CoS */
cos_t *default_cos; /* Associated Default CoS */
- uint32_t l3_precedence; /* L3 QoS precedence */
- pmr_l2_cos_t l2_cos_table; /* L2 QoS-CoS table map */
- pmr_l3_cos_t l3_cos_table; /* L3 Qos-CoS table map */
size_t headroom; /* Pktio Headroom */
size_t skip; /* Pktio Skip Offset */
} classifier_t;
diff --git a/platform/linux-generic/include/odp_config_internal.h b/platform/linux-generic/include/odp_config_internal.h
index 279a43687..8fd8c4be7 100644
--- a/platform/linux-generic/include/odp_config_internal.h
+++ b/platform/linux-generic/include/odp_config_internal.h
@@ -20,9 +20,20 @@ extern "C" {
#define CONFIG_NUM_CPU_IDS 256
/*
+ * Maximum number of packet IO resources
+ */
+#define CONFIG_PKTIO_ENTRIES 64
+
+/*
+ * Pools reserved for internal usage, 1 for IPsec status events and one per packet
+ * I/O for TX completion
+ */
+#define CONFIG_INTERNAL_POOLS (1 + CONFIG_PKTIO_ENTRIES)
+
+/*
* Maximum number of pools.
*/
-#define ODP_CONFIG_POOLS 32
+#define CONFIG_POOLS 128
/*
* Queues reserved for ODP internal use
@@ -69,17 +80,12 @@ extern "C" {
#define CONFIG_MAX_STASHES 2048
/*
- * Maximum number of packet IO resources
- */
-#define ODP_CONFIG_PKTIO_ENTRIES 64
-
-/*
* Maximum buffer alignment
*
* This defines the maximum supported buffer alignment. Requests for values
* above this will fail.
*/
-#define ODP_CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
+#define CONFIG_BUFFER_ALIGN_MAX (4 * 1024)
/*
* Default packet headroom
@@ -138,7 +144,7 @@ extern "C" {
* are reserved for per ODP module global data and one block per packet I/O is
* reserved for TX completion usage.
*/
-#define CONFIG_INTERNAL_SHM_BLOCKS ((ODP_CONFIG_POOLS * 3) + 20 + ODP_CONFIG_PKTIO_ENTRIES)
+#define CONFIG_INTERNAL_SHM_BLOCKS ((CONFIG_POOLS * 3) + 20 + CONFIG_PKTIO_ENTRIES)
/*
* Maximum number of shared memory blocks.
diff --git a/platform/linux-generic/include/odp_event_internal.h b/platform/linux-generic/include/odp_event_internal.h
index 4bc28d708..d9957e530 100644
--- a/platform/linux-generic/include/odp_event_internal.h
+++ b/platform/linux-generic/include/odp_event_internal.h
@@ -35,7 +35,7 @@ typedef union _odp_event_index_t {
} _odp_event_index_t;
/* Check that pool index fit into bit field */
-ODP_STATIC_ASSERT(ODP_CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
+ODP_STATIC_ASSERT(CONFIG_POOLS <= (0xFF + 1), "TOO_MANY_POOLS");
/* Check that buffer index fit into bit field */
ODP_STATIC_ASSERT(CONFIG_POOL_MAX_NUM <= (0xFFFFFF + 1), "TOO_LARGE_POOL");
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index eef0239f2..41a44b83c 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -176,7 +176,7 @@ typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
* grow over 256 bytes. */
ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 256, "PACKET_HDR_SIZE_ERROR");
-ODP_STATIC_ASSERT(ODP_CONFIG_PKTIO_ENTRIES < UINT8_MAX, "MS_PKTIO_IDX_SIZE_ERROR");
+ODP_STATIC_ASSERT(CONFIG_PKTIO_ENTRIES < UINT8_MAX, "MS_PKTIO_IDX_SIZE_ERROR");
/**
* Return the packet header
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 5490c3d01..6c8a2305b 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -184,7 +184,7 @@ typedef struct {
uint32_t tx_compl_pool_size;
} config;
- pktio_entry_t entries[ODP_CONFIG_PKTIO_ENTRIES];
+ pktio_entry_t entries[CONFIG_PKTIO_ENTRIES];
lso_profile_t lso_profile[PKTIO_LSO_PROFILES];
int num_lso_profiles;
@@ -257,9 +257,9 @@ static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
if (odp_unlikely(pktio == ODP_PKTIO_INVALID))
return NULL;
- if (odp_unlikely(_odp_typeval(pktio) > ODP_CONFIG_PKTIO_ENTRIES)) {
+ if (odp_unlikely(_odp_typeval(pktio) > CONFIG_PKTIO_ENTRIES)) {
_ODP_DBG("pktio limit %" PRIuPTR "/%d exceed\n",
- _odp_typeval(pktio), ODP_CONFIG_PKTIO_ENTRIES);
+ _odp_typeval(pktio), CONFIG_PKTIO_ENTRIES);
return NULL;
}
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index c8d2168f3..2c33bb4a2 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -116,7 +116,7 @@ typedef struct pool_t {
} pool_t;
typedef struct pool_global_t {
- pool_t pool[ODP_CONFIG_POOLS];
+ pool_t pool[CONFIG_POOLS];
odp_shm_t shm;
struct {
diff --git a/platform/linux-generic/include/odp_schedule_scalable.h b/platform/linux-generic/include/odp_schedule_scalable.h
index 207573f4c..28c0a9894 100644
--- a/platform/linux-generic/include/odp_schedule_scalable.h
+++ b/platform/linux-generic/include/odp_schedule_scalable.h
@@ -19,10 +19,10 @@
#include <odp_llqueue.h>
/*
- * ODP_SCHED_PRIO_HIGHEST/NORMAL/LOWEST/DEFAULT are compile time
- * constants, but not ODP_SCHED_PRIO_NUM. The current API for this
- * is odp_schedule_num_prio(). The other schedulers also define
- * this internally as NUM_PRIO.
+ * Define scalable scheduler internal maximum priority count
+ * ODP_SCHED_PRIO_NUM as it is not compile-time constant at API
+ * level. The current API for this is odp_schedule_num_prio().
+ * The other schedulers also define this internally as NUM_PRIO.
*
* One additional priority level for idle pktin queues.
* This is only for internal use and not visible to the user.
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index 556f01c19..61b65540f 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -29,6 +29,7 @@ m4_include([platform/linux-generic/m4/odp_crypto.m4])
m4_include([platform/linux-generic/m4/odp_ipsec_mb.m4])
m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
+m4_include([platform/linux-generic/m4/odp_wfe.m4])
m4_include([platform/linux-generic/m4/odp_xdp.m4])
ODP_EVENT_VALIDATION
ODP_SCHEDULER
@@ -42,9 +43,10 @@ AS_VAR_APPEND([PLAT_CFG_TEXT], ["
openssl: ${with_openssl}
openssl_rand: ${openssl_rand}
crypto: ${with_crypto}
- pcap: ${have_pcap}
- pcapng: ${have_pcapng}
- default_config_path: ${default_config_path}"])
+ pcap: ${have_pcap}
+ pcapng: ${have_pcapng}
+ wfe_locks: ${use_wfe_locks}
+ default_config_path: ${default_config_path}"])
# Ignore Clang specific errors about fields with variable sized type not at the
# end of a struct. This style is used by e.g. odp_packet_hdr_t and
diff --git a/platform/linux-generic/m4/odp_wfe.m4 b/platform/linux-generic/m4/odp_wfe.m4
new file mode 100644
index 000000000..f0f0542f7
--- /dev/null
+++ b/platform/linux-generic/m4/odp_wfe.m4
@@ -0,0 +1,14 @@
+##########################################################################
+# Enable/disable WFE based lock implementations
+##########################################################################
+use_wfe_locks=no
+AC_ARG_ENABLE([wfe-locks],
+ [AS_HELP_STRING([--enable-wfe-locks],
+ [enable WFE based lock implementations on aarch64]
+ [[default=disabled] (linux-generic)])],
+ [use_wfe_locks=$enableval])
+
+if test x$use_wfe_locks = xyes; then
+ AC_DEFINE([_ODP_WFE_LOCKS], [1],
+ [Define to 1 to enable WFE based lock implementations on aarch64])
+fi
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index eee64e78c..0e6eea3ae 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -139,9 +139,6 @@ void odp_cls_cos_param_init(odp_cls_cos_param_t *param)
param->queue = ODP_QUEUE_INVALID;
param->pool = ODP_POOL_INVALID;
-#if ODP_DEPRECATED_API
- param->drop_policy = ODP_COS_DROP_NEVER;
-#endif
param->num_queue = 1;
param->vector.enable = false;
odp_queue_param_init(&param->queue_param);
@@ -154,16 +151,10 @@ void odp_cls_pmr_param_init(odp_pmr_param_t *param)
int odp_cls_capability(odp_cls_capability_t *capability)
{
- uint32_t count = 0;
-
memset(capability, 0, sizeof(odp_cls_capability_t));
-
- for (int i = 0; i < CLS_PMR_MAX_ENTRY; i++)
- if (!pmr_tbl->pmr[i].valid)
- count++;
-
- capability->max_pmr_terms = CLS_PMR_MAX_ENTRY;
- capability->available_pmr_terms = count;
+ capability->max_pmr = CLS_PMR_MAX_ENTRY;
+ capability->max_pmr_per_cos = CLS_PMR_PER_COS_MAX;
+ capability->max_terms_per_pmr = CLS_PMRTERM_MAX;
capability->max_cos = CLS_COS_MAX_ENTRY;
capability->max_cos_stats = capability->max_cos;
capability->pmr_range_supported = false;
@@ -238,9 +229,6 @@ static inline void _cls_queue_unwind(uint32_t tbl_index, uint32_t j)
odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_in)
{
-#if ODP_DEPRECATED_API
- odp_cls_drop_t drop_policy;
-#endif
uint32_t i, j;
odp_queue_t queue;
cos_t *cos;
@@ -284,10 +272,6 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_
}
}
-#if ODP_DEPRECATED_API
- drop_policy = param.drop_policy;
-#endif
-
for (i = 0; i < CLS_COS_MAX_ENTRY; i++) {
cos = &cos_tbl->cos_entry[i];
LOCK(&cos->lock);
@@ -347,9 +331,6 @@ odp_cos_t odp_cls_cos_create(const char *name, const odp_cls_cos_param_t *param_
cos->pool = param.pool;
cos->headroom = 0;
cos->valid = 1;
-#if ODP_DEPRECATED_API
- cos->drop_policy = drop_policy;
-#endif
odp_atomic_init_u32(&cos->num_rule, 0);
cos->index = i;
cos->vector = param.vector;
@@ -548,36 +529,6 @@ uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[],
return cos->num_queue;
}
-#if ODP_DEPRECATED_API
-
-int odp_cos_drop_set(odp_cos_t cos_id, odp_cls_drop_t drop_policy)
-{
- cos_t *cos = get_cos_entry(cos_id);
-
- if (!cos) {
- _ODP_ERR("Invalid odp_cos_t handle\n");
- return -1;
- }
-
- /*Drop policy is not supported in v1.0*/
- cos->drop_policy = drop_policy;
- return 0;
-}
-
-odp_cls_drop_t odp_cos_drop(odp_cos_t cos_id)
-{
- cos_t *cos = get_cos_entry(cos_id);
-
- if (!cos) {
- _ODP_ERR("Invalid odp_cos_t handle\n");
- return -1;
- }
-
- return cos->drop_policy;
-}
-
-#endif
-
int odp_pktio_default_cos_set(odp_pktio_t pktio_in, odp_cos_t default_cos)
{
pktio_entry_t *entry;
@@ -643,62 +594,6 @@ int odp_pktio_headroom_set(odp_pktio_t pktio_in, uint32_t headroom)
return 0;
}
-int ODP_DEPRECATE(odp_cos_with_l2_priority)(odp_pktio_t pktio_in, uint8_t num_qos,
- uint8_t qos_table[], odp_cos_t cos_table[])
-{
- pmr_l2_cos_t *l2_cos;
- uint32_t i;
- cos_t *cos;
- pktio_entry_t *entry = get_pktio_entry(pktio_in);
-
- if (entry == NULL) {
- _ODP_ERR("Invalid odp_pktio_t handle\n");
- return -1;
- }
- l2_cos = &entry->cls.l2_cos_table;
-
- LOCK(&l2_cos->lock);
- /* Update the L2 QoS table*/
- for (i = 0; i < num_qos; i++) {
- cos = get_cos_entry(cos_table[i]);
- if (cos != NULL) {
- if (CLS_COS_MAX_L2_QOS > qos_table[i])
- l2_cos->cos[qos_table[i]] = cos;
- }
- }
- UNLOCK(&l2_cos->lock);
- return 0;
-}
-
-int ODP_DEPRECATE(odp_cos_with_l3_qos)(odp_pktio_t pktio_in, uint32_t num_qos, uint8_t qos_table[],
- odp_cos_t cos_table[], odp_bool_t l3_preference)
-{
- pmr_l3_cos_t *l3_cos;
- uint32_t i;
- pktio_entry_t *entry = get_pktio_entry(pktio_in);
- cos_t *cos;
-
- if (entry == NULL) {
- _ODP_ERR("Invalid odp_pktio_t handle\n");
- return -1;
- }
-
- entry->cls.l3_precedence = l3_preference;
- l3_cos = &entry->cls.l3_cos_table;
-
- LOCK(&l3_cos->lock);
- /* Update the L3 QoS table*/
- for (i = 0; i < num_qos; i++) {
- cos = get_cos_entry(cos_table[i]);
- if (cos != NULL) {
- if (CLS_COS_MAX_L3_QOS > qos_table[i])
- l3_cos->cos[qos_table[i]] = cos;
- }
- }
- UNLOCK(&l3_cos->lock);
- return 0;
-}
-
static int pmr_create_term(pmr_term_value_t *value,
const odp_pmr_param_t *param)
{
@@ -1714,17 +1609,11 @@ int _odp_pktio_classifier_init(pktio_entry_t *entry)
return 0;
}
-static
-cos_t *match_qos_cos(pktio_entry_t *entry, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr);
-
/**
Select a CoS for the given Packet based on pktio
This function will call all the PMRs associated with a pktio for
a given packet and will return the matched COS object.
-This function will check PMR, L2 and L3 QoS COS object associated
-with the PKTIO interface.
Returns the default cos if the packet does not match any PMR
Returns the error_cos if the packet has an error
@@ -1753,12 +1642,6 @@ static inline cos_t *cls_select_cos(pktio_entry_t *entry,
return cos;
}
- cos = match_qos_cos(entry, pkt_addr, pkt_hdr);
- if (cos) {
- ODP_DBG_RAW(CLS_DBG, " QoS matched -> cos: %s(%u)\n", cos->name, cos->index);
- goto done;
- }
-
ODP_DBG_RAW(CLS_DBG, " No match -> default cos\n");
cos = cls->default_cos;
@@ -1895,83 +1778,6 @@ static uint32_t packet_rss_hash(odp_packet_hdr_t *pkt_hdr,
return hash;
}
-static
-cos_t *match_qos_l3_cos(pmr_l3_cos_t *l3_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
-{
- uint8_t dscp;
- cos_t *cos = NULL;
- const _odp_ipv4hdr_t *ipv4;
- const _odp_ipv6hdr_t *ipv6;
-
- if (hdr->p.input_flags.l3 && hdr->p.input_flags.ipv4) {
- ipv4 = (const _odp_ipv4hdr_t *)(pkt_addr + hdr->p.l3_offset);
- dscp = _ODP_IPV4HDR_DSCP(ipv4->tos);
- cos = l3_cos->cos[dscp];
- } else if (hdr->p.input_flags.l3 && hdr->p.input_flags.ipv6) {
- ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + hdr->p.l3_offset);
- dscp = _ODP_IPV6HDR_DSCP(ipv6->ver_tc_flow);
- cos = l3_cos->cos[dscp];
- }
-
- return cos;
-}
-
-static
-cos_t *match_qos_l2_cos(pmr_l2_cos_t *l2_cos, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
-{
- cos_t *cos = NULL;
- const _odp_ethhdr_t *eth;
- const _odp_vlanhdr_t *vlan;
- uint16_t qos;
-
- if (packet_hdr_has_l2(hdr) && hdr->p.input_flags.vlan &&
- packet_hdr_has_eth(hdr)) {
- eth = (const _odp_ethhdr_t *)(pkt_addr + hdr->p.l2_offset);
- vlan = (const _odp_vlanhdr_t *)(eth + 1);
- qos = odp_be_to_cpu_16(vlan->tci);
- qos = ((qos >> 13) & 0x07);
- cos = l2_cos->cos[qos];
- }
- return cos;
-}
-
-/*
- * Select a CoS for the given Packet based on QoS values
- * This function returns the COS object matching the L2 and L3 QoS
- * based on the l3_preference value of the pktio
-*/
-static
-cos_t *match_qos_cos(pktio_entry_t *entry, const uint8_t *pkt_addr,
- odp_packet_hdr_t *hdr)
-{
- classifier_t *cls = &entry->cls;
- pmr_l2_cos_t *l2_cos;
- pmr_l3_cos_t *l3_cos;
- cos_t *cos;
-
- l2_cos = &cls->l2_cos_table;
- l3_cos = &cls->l3_cos_table;
-
- if (cls->l3_precedence) {
- cos = match_qos_l3_cos(l3_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- cos = match_qos_l2_cos(l2_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- } else {
- cos = match_qos_l2_cos(l2_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- cos = match_qos_l3_cos(l3_cos, pkt_addr, hdr);
- if (cos)
- return cos;
- }
- return NULL;
-}
-
uint64_t odp_cos_to_u64(odp_cos_t hdl)
{
return _odp_pri(hdl);
diff --git a/platform/linux-generic/odp_dma.c b/platform/linux-generic/odp_dma.c
index 5ffeb20ce..b3439498c 100644
--- a/platform/linux-generic/odp_dma.c
+++ b/platform/linux-generic/odp_dma.c
@@ -16,6 +16,7 @@
#include <odp/api/queue.h>
#include <odp/api/plat/std_inlines.h>
+#include <odp/api/plat/strong_types.h>
#include <odp_global_data.h>
#include <odp_debug_internal.h>
@@ -758,12 +759,12 @@ odp_pool_t odp_dma_pool_create(const char *name, const odp_dma_pool_param_t *dma
uint64_t odp_dma_to_u64(odp_dma_t dma)
{
- return (uint64_t)(uintptr_t)dma;
+ return _odp_pri(dma);
}
uint64_t odp_dma_compl_to_u64(odp_dma_compl_t dma_compl)
{
- return (uint64_t)(uintptr_t)dma_compl;
+ return _odp_pri(dma_compl);
}
void odp_dma_print(odp_dma_t dma)
@@ -796,12 +797,11 @@ void odp_dma_compl_print(odp_dma_compl_t dma_compl)
_ODP_PRINT("\nDMA completion\n");
_ODP_PRINT("--------------\n");
- _ODP_PRINT(" Compl event handle: 0x%" PRIx64 "\n", (uint64_t)(uintptr_t)dma_compl);
+ _ODP_PRINT(" Compl event handle: 0x%" PRIx64 "\n", _odp_pri(dma_compl));
if (ret == 0) {
_ODP_PRINT(" Result: %s\n", result.success ? "success" : "fail");
- _ODP_PRINT(" User pointer: 0x%" PRIx64 "\n",
- (uint64_t)(uintptr_t)result.user_ptr);
+ _ODP_PRINT(" User pointer: 0x%" PRIx64 "\n", _odp_pri(result.user_ptr));
} else {
_ODP_PRINT(" No result metadata\n");
}
diff --git a/platform/linux-generic/odp_fdserver.c b/platform/linux-generic/odp_fdserver.c
index 4995efb94..e72df0669 100644
--- a/platform/linux-generic/odp_fdserver.c
+++ b/platform/linux-generic/odp_fdserver.c
@@ -37,6 +37,7 @@
*/
#include <odp_posix_extensions.h>
+#include <odp_config_internal.h>
#include <odp_global_data.h>
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
@@ -72,7 +73,7 @@
#define FD_DBG 3
/* define the tables of file descriptors handled by this server: */
-#define FDSERVER_MAX_ENTRIES 256
+#define FDSERVER_MAX_ENTRIES (CONFIG_SHM_BLOCKS + CONFIG_INTERNAL_SHM_BLOCKS)
typedef struct fdentry_s {
fd_server_context_e context;
uint64_t key;
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 38c1c2b03..236813e80 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -74,7 +74,7 @@ typedef struct {
static pktio_global_t *pktio_global;
/* pktio pointer entries ( for inlines) */
-void *_odp_pktio_entry_ptr[ODP_CONFIG_PKTIO_ENTRIES];
+void *_odp_pktio_entry_ptr[CONFIG_PKTIO_ENTRIES];
static inline pktio_entry_t *pktio_entry_by_index(int index)
{
@@ -145,14 +145,12 @@ int _odp_pktio_init_global(void)
return -1;
}
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
pktio_entry = &pktio_global->entries[i];
pktio_entry->handle = _odp_cast_scalar(odp_pktio_t, i + 1);
odp_ticketlock_init(&pktio_entry->rxl);
odp_ticketlock_init(&pktio_entry->txl);
- odp_spinlock_init(&pktio_entry->cls.l2_cos_table.lock);
- odp_spinlock_init(&pktio_entry->cls.l3_cos_table.lock);
_odp_pktio_entry_ptr[i] = pktio_entry;
}
@@ -311,7 +309,7 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
if_name = strip_pktio_type(name, pktio_type);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
pktio_entry = &pktio_global->entries[i];
if (is_free(pktio_entry)) {
lock_entry(pktio_entry);
@@ -322,7 +320,7 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
}
}
- if (i == ODP_CONFIG_PKTIO_ENTRIES) {
+ if (i == CONFIG_PKTIO_ENTRIES) {
_ODP_ERR("All pktios used already\n");
return ODP_PKTIO_INVALID;
}
@@ -809,7 +807,7 @@ odp_pktio_t odp_pktio_lookup(const char *name)
odp_spinlock_lock(&pktio_global->lock);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
entry = pktio_entry_by_index(i);
if (!entry || is_free(entry))
continue;
@@ -1509,7 +1507,7 @@ int _odp_pktio_term_global(void)
if (pktio_global == NULL)
return 0;
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; ++i) {
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; ++i) {
pktio_entry_t *pktio_entry;
pktio_entry = &pktio_global->entries[i];
@@ -1626,12 +1624,12 @@ int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
return 0;
}
-ODP_STATIC_ASSERT(ODP_CONFIG_PKTIO_ENTRIES - 1 <= ODP_PKTIO_MAX_INDEX,
- "ODP_CONFIG_PKTIO_ENTRIES larger than ODP_PKTIO_MAX_INDEX");
+ODP_STATIC_ASSERT(CONFIG_PKTIO_ENTRIES - 1 <= ODP_PKTIO_MAX_INDEX,
+ "CONFIG_PKTIO_ENTRIES larger than ODP_PKTIO_MAX_INDEX");
unsigned int odp_pktio_max_index(void)
{
- return ODP_CONFIG_PKTIO_ENTRIES - 1;
+ return CONFIG_PKTIO_ENTRIES - 1;
}
int odp_pktio_stats(odp_pktio_t pktio,
@@ -2568,14 +2566,12 @@ int odp_pktin_recv_tmo(odp_pktin_queue_t queue, odp_packet_t packets[], int num,
/* Avoid unnecessary system calls. Record the start time
* only when needed and after the first call to recv. */
if (odp_unlikely(!started)) {
- odp_time_t t;
-
/* Avoid overflow issues for large wait times */
if (wait > MAX_WAIT_TIME)
wait = MAX_WAIT_TIME;
- t = odp_time_local_from_ns(wait * 1000);
+
started = 1;
- t1 = odp_time_sum(odp_time_local(), t);
+ t1 = odp_time_add_ns(odp_time_local(), wait * 1000);
}
/* Check every SLEEP_CHECK rounds if total wait time
@@ -2652,14 +2648,12 @@ int odp_pktin_recv_mq_tmo(const odp_pktin_queue_t queues[], uint32_t num_q, uint
return 0;
if (odp_unlikely(!started)) {
- odp_time_t t;
-
/* Avoid overflow issues for large wait times */
if (wait > MAX_WAIT_TIME)
wait = MAX_WAIT_TIME;
- t = odp_time_local_from_ns(wait * 1000);
+
started = 1;
- t1 = odp_time_sum(odp_time_local(), t);
+ t1 = odp_time_add_ns(odp_time_local(), wait * 1000);
}
/* Check every SLEEP_CHECK rounds if total wait time
diff --git a/platform/linux-generic/odp_pcapng.c b/platform/linux-generic/odp_pcapng.c
index 4423b0483..7f11f4340 100644
--- a/platform/linux-generic/odp_pcapng.c
+++ b/platform/linux-generic/odp_pcapng.c
@@ -99,7 +99,7 @@ typedef struct ODP_ALIGNED_CACHE {
int inotify_watch_fd;
int inotify_is_running;
odp_spinlock_t lock;
- pcapng_entry_t entry[ODP_CONFIG_PKTIO_ENTRIES];
+ pcapng_entry_t entry[CONFIG_PKTIO_ENTRIES];
} pcapng_global_t;
static pcapng_global_t *pcapng_gbl;
@@ -230,7 +230,7 @@ static pktio_entry_t *pktio_from_event(struct inotify_event *event)
odp_spinlock_lock(&pcapng_gbl->lock);
- for (i = 0; i < ODP_CONFIG_PKTIO_ENTRIES; i++) {
+ for (i = 0; i < CONFIG_PKTIO_ENTRIES; i++) {
pktio_entry_t *entry = pcapng_gbl->entry[i].pktio_entry;
if (entry == NULL)
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index b3b6f9c40..94461e6b1 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -61,9 +61,12 @@ ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_LEN_MIN >= 256,
ODP_STATIC_ASSERT(CONFIG_PACKET_SEG_SIZE < 0xffff,
"Segment size must be less than 64k (16 bit offsets)");
+ODP_STATIC_ASSERT(CONFIG_INTERNAL_POOLS < CONFIG_POOLS,
+ "Internal pool count needs to be less than total configured pool count");
+
/* Thread local variables */
typedef struct pool_local_t {
- pool_cache_t *cache[ODP_CONFIG_POOLS];
+ pool_cache_t *cache[CONFIG_POOLS];
int thr_id;
} pool_local_t;
@@ -145,11 +148,14 @@ static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
uint64_t cached = 0;
const uint16_t first = stats->thread.first;
const uint16_t last = stats->thread.last;
+ const odp_bool_t cache_available = pool->params.stats.bit.cache_available;
const odp_bool_t per_thread = pool->params.stats.bit.thread_cache_available;
+ const int max_threads = odp_thread_count_max();
uint16_t out_idx = 0;
+ int i, idx_limit;
if (per_thread) {
- if (first > last || last >= odp_thread_count_max()) {
+ if (first > last || last >= max_threads) {
_ODP_ERR("Bad thread ids: first=%" PRIu16 " last=%" PRIu16 "\n",
first, last);
return -1;
@@ -161,7 +167,15 @@ static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
}
}
- for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (cache_available) {
+ i = 0;
+ idx_limit = max_threads;
+ } else {
+ i = first;
+ idx_limit = last + 1;
+ }
+
+ for (; i < idx_limit; i++) {
uint32_t cur = odp_atomic_load_u32(&pool->local_cache[i].cache_num);
if (per_thread && i >= first && i <= last)
@@ -170,7 +184,7 @@ static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
cached += cur;
}
- if (pool->params.stats.bit.cache_available)
+ if (cache_available)
stats->cache_available = cached;
return 0;
@@ -179,8 +193,9 @@ static inline int cache_available(pool_t *pool, odp_pool_stats_t *stats)
static inline uint64_t cache_total_available(pool_t *pool)
{
uint64_t cached = 0;
+ const int max_threads = odp_thread_count_max();
- for (int i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ for (int i = 0; i < max_threads; i++)
cached += odp_atomic_load_u32(&pool->local_cache[i].cache_num);
return cached;
@@ -328,7 +343,7 @@ int _odp_pool_init_global(void)
return -1;
}
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
LOCK_INIT(&pool->lock);
@@ -357,7 +372,7 @@ int _odp_pool_term_global(void)
if (_odp_pool_glb == NULL)
return 0;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -385,7 +400,7 @@ int _odp_pool_init_local(void)
memset(&local, 0, sizeof(pool_local_t));
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
local.cache[i] = &pool->local_cache[thr_id];
cache_init(local.cache[i]);
@@ -399,7 +414,7 @@ int _odp_pool_term_local(void)
{
int i;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
cache_flush(local.cache[i], pool);
@@ -416,7 +431,7 @@ static pool_t *reserve_pool(uint32_t shmflags, uint8_t pool_ext, uint32_t num)
pool_t *pool;
char ring_name[ODP_POOL_NAME_LEN];
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -756,7 +771,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
}
/* Validate requested buffer alignment */
- if (align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
+ if (align > CONFIG_BUFFER_ALIGN_MAX ||
align != _ODP_ROUNDDOWN_POWER2(align, align)) {
_ODP_ERR("Bad align requirement\n");
return ODP_POOL_INVALID;
@@ -1166,6 +1181,7 @@ odp_pool_t odp_pool_create(const char *name, const odp_pool_param_t *params)
int odp_pool_destroy(odp_pool_t pool_hdl)
{
pool_t *pool = _odp_pool_entry(pool_hdl);
+ const int max_threads = odp_thread_count_max();
int i;
if (pool == NULL)
@@ -1183,7 +1199,7 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
pool->mem_src_ops->unbind(pool->mem_src_data);
/* Make sure local caches are empty */
- for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
+ for (i = 0; i < max_threads; i++)
cache_flush(&pool->local_cache[i], pool);
if (pool->pool_ext == 0)
@@ -1205,7 +1221,7 @@ odp_pool_t odp_pool_lookup(const char *name)
uint32_t i;
pool_t *pool;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -1463,8 +1479,8 @@ int odp_pool_capability(odp_pool_capability_t *capa)
{
odp_pool_stats_opt_t supported_stats;
uint32_t max_seg_len = CONFIG_PACKET_MAX_SEG_LEN;
- /* Reserve one for internal usage */
- int max_pools = ODP_CONFIG_POOLS - 1;
+ /* Reserve pools for internal usage */
+ unsigned int max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
memset(capa, 0, sizeof(odp_pool_capability_t));
@@ -1483,7 +1499,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
/* Buffer pools */
capa->buf.max_pools = max_pools;
- capa->buf.max_align = ODP_CONFIG_BUFFER_ALIGN_MAX;
+ capa->buf.max_align = CONFIG_BUFFER_ALIGN_MAX;
capa->buf.max_size = MAX_SIZE;
capa->buf.max_num = CONFIG_POOL_MAX_NUM;
capa->buf.max_uarea_size = MAX_UAREA_SIZE;
@@ -1614,7 +1630,7 @@ void odp_pool_print_all(void)
_ODP_PRINT("-----------------\n");
_ODP_PRINT(" idx %-*s type free tot cache buf_len ext\n", col_width, "name");
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
LOCK(&pool->lock);
@@ -1666,13 +1682,12 @@ uint64_t odp_pool_to_u64(odp_pool_t hdl)
unsigned int odp_pool_max_index(void)
{
- return ODP_CONFIG_POOLS - 1;
+ return CONFIG_POOLS - 1;
}
int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
{
pool_t *pool;
- uint16_t first, last;
if (odp_unlikely(pool_hdl == ODP_POOL_INVALID)) {
_ODP_ERR("Invalid pool handle\n");
@@ -1684,14 +1699,9 @@ int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
}
pool = _odp_pool_entry(pool_hdl);
- first = stats->thread.first;
- last = stats->thread.last;
-
- memset(stats, 0, sizeof(odp_pool_stats_t));
- /* Restore input parameters */
- stats->thread.first = first;
- stats->thread.last = last;
+ /* Zero everything else but per thread statistics */
+ memset(stats, 0, offsetof(odp_pool_stats_t, thread));
if (pool->params.stats.bit.available)
stats->available = ring_ptr_len(&pool->ring->hdr);
@@ -1800,7 +1810,7 @@ static pool_t *find_pool(_odp_event_hdr_t *event_hdr)
int i;
uint8_t *ptr = (uint8_t *)event_hdr;
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ for (i = 0; i < CONFIG_POOLS; i++) {
pool_t *pool = _odp_pool_entry_from_idx(i);
if (pool->reserved == 0)
@@ -1877,7 +1887,7 @@ int odp_pool_ext_capability(odp_pool_type_t type, odp_pool_ext_capability_t *cap
memset(capa, 0, sizeof(odp_pool_ext_capability_t));
capa->type = type;
- capa->max_pools = ODP_CONFIG_POOLS - 1;
+ capa->max_pools = CONFIG_POOLS - CONFIG_INTERNAL_POOLS;
capa->min_cache_size = 0;
capa->max_cache_size = CONFIG_POOL_CACHE_MAX_SIZE;
capa->stats.all = supported_stats.all;
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index d289f1e21..7bd8cbfed 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -96,7 +96,7 @@ ODP_STATIC_ASSERT((QUEUE_LOAD * CONFIG_MAX_SCHED_QUEUES) < UINT32_MAX, "Load_val
#define RANDOM_TBL_SIZE 128
/* Maximum number of packet IO interfaces */
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
+#define NUM_PKTIO CONFIG_PKTIO_ENTRIES
/* Maximum pktin index. Needs to fit into 8 bits. */
#define MAX_PKTIN_INDEX 255
@@ -1628,7 +1628,7 @@ static inline int schedule_run(odp_queue_t *out_queue, odp_event_t out_ev[], uin
static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
odp_event_t out_ev[], uint32_t max_num)
{
- odp_time_t next, wtime;
+ odp_time_t next;
int first = 1;
int ret;
@@ -1647,8 +1647,7 @@ static inline int schedule_loop(odp_queue_t *out_queue, uint64_t wait,
break;
if (first) {
- wtime = odp_time_local_from_ns(wait);
- next = odp_time_sum(odp_time_local(), wtime);
+ next = odp_time_add_ns(odp_time_local(), wait);
first = 0;
continue;
}
@@ -1677,11 +1676,9 @@ static inline int schedule_loop_sleep(odp_queue_t *out_queue, uint64_t wait,
if (first) {
start = odp_time_local();
- start_sleep =
- odp_time_sum(start,
- odp_time_local_from_ns(sched->powersave.poll_time));
+ start_sleep = odp_time_add_ns(start, sched->powersave.poll_time);
if (wait != ODP_SCHED_WAIT)
- end = odp_time_sum(start, odp_time_local_from_ns(wait));
+ end = odp_time_add_ns(start, wait);
first = 0;
continue;
}
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 28245f66b..6d60c048f 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -67,7 +67,7 @@ typedef struct {
odp_spinlock_t init_lock;
/** Per thread state */
sched_scalable_thread_state_t thread_state[MAXTHREADS];
- uint16_t poll_count[ODP_CONFIG_PKTIO_ENTRIES];
+ uint16_t poll_count[CONFIG_PKTIO_ENTRIES];
/* Scheduler interface config options (not used in fast path) */
schedule_config_t config_if;
} sched_global_t;
@@ -713,7 +713,7 @@ static void pktio_start(int pktio_idx,
queue_entry_t *qentry;
sched_elem_t *elem;
- _ODP_ASSERT(pktio_idx < ODP_CONFIG_PKTIO_ENTRIES);
+ _ODP_ASSERT(pktio_idx < CONFIG_PKTIO_ENTRIES);
for (i = 0; i < num_in_queue; i++) {
rxq = in_queue_idx[i];
_ODP_ASSERT(rxq < ODP_PKTIN_MAX_QUEUES);
@@ -1221,7 +1221,6 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t ev[],
sched_scalable_thread_state_t *ts;
int n;
odp_time_t start;
- odp_time_t delta;
odp_time_t deadline;
ts = _odp_sched_ts;
@@ -1262,8 +1261,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait, odp_event_t ev[],
if (odp_likely(n > 0))
return n;
- delta = odp_time_local_from_ns(wait);
- deadline = odp_time_sum(start, delta);
+ deadline = odp_time_add_ns(start, wait);
while (odp_time_cmp(deadline, odp_time_local()) > 0) {
n = _schedule(from, ev, num);
@@ -1281,7 +1279,6 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
sched_scalable_thread_state_t *ts;
int n;
odp_time_t start;
- odp_time_t delta;
odp_time_t deadline;
ts = _odp_sched_ts;
@@ -1324,8 +1321,7 @@ static odp_event_t schedule(odp_queue_t *from, uint64_t wait)
if (odp_likely(n > 0))
return ev;
- delta = odp_time_local_from_ns(wait);
- deadline = odp_time_sum(start, delta);
+ deadline = odp_time_add_ns(start, wait);
while (odp_time_cmp(deadline, odp_time_local()) > 0) {
n = _schedule(from, &ev, num);
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index c4d18e66e..030e95171 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -38,7 +38,7 @@
#define NUM_THREAD ODP_THREAD_COUNT_MAX
#define NUM_QUEUE CONFIG_MAX_SCHED_QUEUES
-#define NUM_PKTIO ODP_CONFIG_PKTIO_ENTRIES
+#define NUM_PKTIO CONFIG_PKTIO_ENTRIES
#define NUM_ORDERED_LOCKS 1
#define NUM_STATIC_GROUP 3
#define NUM_GROUP (NUM_STATIC_GROUP + 9)
@@ -683,8 +683,7 @@ static int schedule_multi(odp_queue_t *from, uint64_t wait,
continue;
if (update_t1) {
- t1 = odp_time_sum(odp_time_local(),
- odp_time_local_from_ns(wait));
+ t1 = odp_time_add_ns(odp_time_local(), wait);
update_t1 = 0;
continue;
}
@@ -800,6 +799,11 @@ static odp_schedule_group_t schedule_group_create(const char *name,
if (!sched_group->s.group[i].allocated) {
char *grp_name = sched_group->s.group[i].name;
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#if __GNUC__ >= 13
+#pragma GCC diagnostic ignored "-Wstringop-overflow"
+#endif
if (name == NULL) {
grp_name[0] = 0;
} else {
@@ -807,6 +811,7 @@ static odp_schedule_group_t schedule_group_create(const char *name,
ODP_SCHED_GROUP_NAME_LEN - 1);
grp_name[ODP_SCHED_GROUP_NAME_LEN - 1] = 0;
}
+#pragma GCC diagnostic pop
odp_thrmask_copy(&sched_group->s.group[i].mask, thrmask);
sched_group->s.group[i].allocated = 1;
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index bb0eaa9b1..52f1000f1 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -599,18 +599,33 @@ void odp_sys_config_print(void)
_ODP_PRINT("\n\nodp_config_internal.h values:\n"
"-----------------------------\n");
- _ODP_PRINT("ODP_CONFIG_POOLS: %i\n", ODP_CONFIG_POOLS);
- _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
- _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
- _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
- _ODP_PRINT("ODP_CONFIG_PKTIO_ENTRIES: %i\n", ODP_CONFIG_PKTIO_ENTRIES);
- _ODP_PRINT("CONFIG_PACKET_HEADROOM: %i\n", CONFIG_PACKET_HEADROOM);
- _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
- _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
- _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
- _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
- _ODP_PRINT("CONFIG_POOL_CACHE_MAX_SIZE: %i\n", CONFIG_POOL_CACHE_MAX_SIZE);
- _ODP_PRINT("CONFIG_TIMER_128BIT_ATOMICS: %i\n", CONFIG_TIMER_128BIT_ATOMICS);
- _ODP_PRINT("CONFIG_TIMER_PROFILE_INLINE: %i\n", CONFIG_TIMER_PROFILE_INLINE);
+ _ODP_PRINT("CONFIG_NUM_CPU_IDS: %i\n", CONFIG_NUM_CPU_IDS);
+ _ODP_PRINT("CONFIG_INTERNAL_QUEUES: %i\n", CONFIG_INTERNAL_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_PLAIN_QUEUES: %i\n", CONFIG_MAX_PLAIN_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_SCHED_QUEUES: %i\n", CONFIG_MAX_SCHED_QUEUES);
+ _ODP_PRINT("CONFIG_MAX_QUEUES: %i\n", CONFIG_MAX_QUEUES);
+ _ODP_PRINT("CONFIG_QUEUE_MAX_ORD_LOCKS: %i\n", CONFIG_QUEUE_MAX_ORD_LOCKS);
+ _ODP_PRINT("CONFIG_MAX_DMA_SESSIONS: %i\n", CONFIG_MAX_DMA_SESSIONS);
+ _ODP_PRINT("CONFIG_INTERNAL_STASHES: %i\n", CONFIG_INTERNAL_STASHES);
+ _ODP_PRINT("CONFIG_MAX_STASHES: %i\n", CONFIG_MAX_STASHES);
+ _ODP_PRINT("CONFIG_PKTIO_ENTRIES: %i\n", CONFIG_PKTIO_ENTRIES);
+ _ODP_PRINT("CONFIG_BUFFER_ALIGN_MAX: %i\n", CONFIG_BUFFER_ALIGN_MAX);
+ _ODP_PRINT("CONFIG_PACKET_HEADROOM: %i\n", CONFIG_PACKET_HEADROOM);
+ _ODP_PRINT("CONFIG_PACKET_TAILROOM: %i\n", CONFIG_PACKET_TAILROOM);
+ _ODP_PRINT("CONFIG_PACKET_SEG_SIZE: %i\n", CONFIG_PACKET_SEG_SIZE);
+ _ODP_PRINT("CONFIG_PACKET_MAX_SEG_LEN: %i\n", CONFIG_PACKET_MAX_SEG_LEN);
+ _ODP_PRINT("CONFIG_PACKET_SEG_LEN_MIN: %i\n", CONFIG_PACKET_SEG_LEN_MIN);
+ _ODP_PRINT("CONFIG_PACKET_VECTOR_MAX_SIZE: %i\n", CONFIG_PACKET_VECTOR_MAX_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_SHM_BLOCKS: %i\n", CONFIG_INTERNAL_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_SHM_BLOCKS: %i\n", CONFIG_SHM_BLOCKS);
+ _ODP_PRINT("CONFIG_BURST_SIZE: %i\n", CONFIG_BURST_SIZE);
+ _ODP_PRINT("CONFIG_INTERNAL_POOLS: %i\n", CONFIG_INTERNAL_POOLS);
+ _ODP_PRINT("CONFIG_POOLS: %i\n", CONFIG_POOLS);
+ _ODP_PRINT("CONFIG_POOL_MAX_NUM: %i\n", CONFIG_POOL_MAX_NUM);
+ _ODP_PRINT("CONFIG_POOL_CACHE_MAX_SIZE: %i\n", CONFIG_POOL_CACHE_MAX_SIZE);
+ _ODP_PRINT("CONFIG_POOL_STATISTICS: %i\n", CONFIG_POOL_STATISTICS);
+ _ODP_PRINT("CONFIG_IPSEC_MAX_NUM_SA: %i\n", CONFIG_IPSEC_MAX_NUM_SA);
+ _ODP_PRINT("CONFIG_TIMER_128BIT_ATOMICS: %i\n", CONFIG_TIMER_128BIT_ATOMICS);
+ _ODP_PRINT("CONFIG_TIMER_PROFILE_INLINE: %i\n", CONFIG_TIMER_PROFILE_INLINE);
_ODP_PRINT("\n");
}
diff --git a/platform/linux-generic/odp_thread.c b/platform/linux-generic/odp_thread.c
index bdfd758d7..88aec8f06 100644
--- a/platform/linux-generic/odp_thread.c
+++ b/platform/linux-generic/odp_thread.c
@@ -257,11 +257,31 @@ int odp_thread_count(void)
return thread_globals->num;
}
+int odp_thread_control_count(void)
+{
+ return thread_globals->num_control;
+}
+
+int odp_thread_worker_count(void)
+{
+ return thread_globals->num_worker;
+}
+
int odp_thread_count_max(void)
{
return thread_globals->num_max;
}
+int odp_thread_control_count_max(void)
+{
+ return thread_globals->num_max;
+}
+
+int odp_thread_worker_count_max(void)
+{
+ return thread_globals->num_max;
+}
+
int odp_thrmask_worker(odp_thrmask_t *mask)
{
odp_thrmask_copy(mask, &thread_globals->worker);
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index eaedcea8c..daf187390 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2022, Nokia
+ * Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,7 +17,6 @@
#include <odp/api/atomic.h>
#include <odp/api/cpu.h>
#include <odp/api/debug.h>
-#include <odp/api/deprecated.h>
#include <odp/api/event.h>
#include <odp/api/hints.h>
#include <odp/api/pool.h>
@@ -170,7 +169,6 @@ typedef struct {
typedef struct timer_pool_s {
/* Put frequently accessed fields in the first cache line */
uint64_t nsec_per_scan;
- odp_time_t start_time;
odp_atomic_u64_t cur_tick;/* Current tick value */
uint64_t min_rel_tck;
uint64_t max_rel_tck;
@@ -184,7 +182,12 @@ typedef struct timer_pool_s {
odp_timer_pool_param_t param;
char name[ODP_TIMER_POOL_NAME_LEN];
timer_t timerid;
- int notify_overrun;
+ /*
+ * Timer pool overrun notification (debug print). Initialize to 0
+ * (don't notify). When value is 0 and a timer is started, set to 1
+ * (notify). When notification is done, set to 2 (don't notify).
+ */
+ odp_atomic_u32_t notify_overrun;
int owner;
pthread_t thr_pthread; /* pthread_t of timer thread */
pid_t thr_pid; /* gettid() for timer thread */
@@ -576,14 +579,20 @@ static odp_event_t timer_set_unused(timer_pool_t *tp, uint32_t idx)
return old_event;
}
-static inline odp_event_t timer_free(timer_pool_t *tp, uint32_t idx)
+int odp_timer_free(odp_timer_t hdl)
{
+ timer_pool_t *tp = handle_to_tp(hdl);
+ uint32_t idx = handle_to_idx(hdl, tp);
_odp_timer_t *tim = &tp->timers[idx];
tick_buf_t *tb = &tp->tick_buf[idx];
/* Free the timer by setting timer state to unused and
* grab any timeout event */
odp_event_t old_event = timer_set_unused(tp, idx);
+ if (old_event != ODP_EVENT_INVALID) {
+ _ODP_ERR("Timer is active\n");
+ return -1;
+ }
/* Remove timer from queue */
_odp_queue_fn->timer_rem(tim->queue);
@@ -602,7 +611,7 @@ static inline odp_event_t timer_free(timer_pool_t *tp, uint32_t idx)
tp->num_alloc--;
odp_spinlock_unlock(&tp->lock);
- return old_event;
+ return 0;
}
static odp_event_t timer_cancel(timer_pool_t *tp, uint32_t idx)
@@ -779,22 +788,6 @@ static inline void timer_pool_scan(timer_pool_t *tp, uint64_t tick)
* Inline timer processing
*****************************************************************************/
-static inline uint64_t time_nsec(timer_pool_t *tp, odp_time_t now)
-{
- odp_time_t start = tp->start_time;
-
- return odp_time_diff_ns(now, start);
-}
-
-static inline uint64_t current_nsec(timer_pool_t *tp)
-{
- odp_time_t now;
-
- now = odp_time_global();
-
- return time_nsec(tp, now);
-}
-
static inline void timer_pool_scan_inline(int num, odp_time_t now)
{
timer_pool_t *tp;
@@ -819,7 +812,7 @@ static inline void timer_pool_scan_inline(int num, odp_time_t now)
continue;
}
- nsec = time_nsec(tp, now);
+ nsec = odp_time_to_ns(now);
new_tick = nsec / tp->nsec_per_scan;
old_tick = odp_atomic_load_u64(&tp->cur_tick);
diff = new_tick - old_tick;
@@ -828,14 +821,15 @@ static inline void timer_pool_scan_inline(int num, odp_time_t now)
continue;
if (odp_atomic_cas_u64(&tp->cur_tick, &old_tick, new_tick)) {
- if (tp->notify_overrun && diff > 1) {
+ if (ODP_DEBUG_PRINT && odp_atomic_load_u32(&tp->notify_overrun) == 1 &&
+ diff > 1) {
if (old_tick == 0) {
_ODP_DBG("Timer pool (%s) missed %" PRIi64 " scans in start up\n",
tp->name, diff - 1);
} else {
_ODP_DBG("Timer pool (%s) resolution too high: %" PRIi64 " scans missed\n",
tp->name, diff - 1);
- tp->notify_overrun = 0;
+ odp_atomic_store_u32(&tp->notify_overrun, 2);
}
}
timer_pool_scan(tp, nsec);
@@ -896,12 +890,12 @@ static inline void timer_run_posix(timer_pool_t *tp)
uint64_t nsec;
int overrun;
- if (tp->notify_overrun) {
+ if (ODP_DEBUG_PRINT && odp_atomic_load_u32(&tp->notify_overrun) == 1) {
overrun = timer_getoverrun(tp->timerid);
if (overrun) {
_ODP_DBG("\n\t%d ticks overrun on timer pool \"%s\", timer resolution too high\n",
overrun, tp->name);
- tp->notify_overrun = 0;
+ odp_atomic_store_u32(&tp->notify_overrun, 2);
}
}
@@ -911,7 +905,7 @@ static inline void timer_run_posix(timer_pool_t *tp)
for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / sizeof(array[0]))
__builtin_prefetch(&array[i], 0, 0);
- nsec = current_nsec(tp);
+ nsec = odp_time_global_ns();
timer_pool_scan(tp, nsec);
}
@@ -1089,6 +1083,12 @@ static void posix_timer_start(timer_pool_t *tp)
* processed. Warm up helps avoiding overrun on the first timeout. */
while (odp_atomic_load_acq_u32(&tp->thr_ready) == 0)
sched_yield();
+
+ if (ODP_DEBUG_PRINT) {
+ uint32_t old_val = 0;
+
+ odp_atomic_cas_u32(&tp->notify_overrun, &old_val, 1);
+ }
}
static odp_timer_pool_t timer_pool_new(const char *name, const odp_timer_pool_param_t *param)
@@ -1234,8 +1234,8 @@ static odp_timer_pool_t timer_pool_new(const char *name, const odp_timer_pool_pa
}
tp->num_alloc = 0;
odp_atomic_init_u32(&tp->high_wm, 0);
+ odp_atomic_init_u32(&tp->notify_overrun, 0);
tp->first_free = 0;
- tp->notify_overrun = 1;
tp->owner = -1;
if (param->priv)
@@ -1259,7 +1259,6 @@ static odp_timer_pool_t timer_pool_new(const char *name, const odp_timer_pool_pa
}
tp->tp_idx = tp_idx;
odp_spinlock_init(&tp->lock);
- tp->start_time = odp_time_global();
odp_ticketlock_lock(&timer_global->lock);
@@ -1452,23 +1451,28 @@ void odp_timer_pool_start(void)
/* Nothing to do here, timer pools are started by the create call */
}
-void odp_timer_pool_destroy(odp_timer_pool_t tpid)
+int odp_timer_pool_start_multi(odp_timer_pool_t timer_pool[], int num)
{
- odp_timer_pool_del(timer_pool_from_hdl(tpid));
+ _ODP_ASSERT(timer_pool != NULL);
+ _ODP_ASSERT(num > 0);
+ if (ODP_DEBUG) {
+ for (int i = 0; i < num; i++)
+ _ODP_ASSERT(timer_pool[i] != ODP_TIMER_POOL_INVALID);
+ }
+
+ /* Nothing to do here, timer pools are started by the create call. */
+ return num;
}
-uint64_t odp_timer_current_tick(odp_timer_pool_t tpid)
+void odp_timer_pool_destroy(odp_timer_pool_t tpid)
{
- timer_pool_t *tp = timer_pool_from_hdl(tpid);
-
- return current_nsec(tp);
+ odp_timer_pool_del(timer_pool_from_hdl(tpid));
}
int odp_timer_sample_ticks(odp_timer_pool_t timer_pool[], uint64_t tick[], uint64_t clk_count[],
int num)
{
- timer_pool_t *tp[MAX_TIMER_POOLS];
- odp_time_t now;
+ uint64_t nsec;
int i;
if (num <= 0 || num > MAX_TIMER_POOLS) {
@@ -1481,14 +1485,12 @@ int odp_timer_sample_ticks(odp_timer_pool_t timer_pool[], uint64_t tick[], uint6
_ODP_ERR("Invalid timer pool\n");
return -1;
}
-
- tp[i] = timer_pool_from_hdl(timer_pool[i]);
}
- now = odp_time_global();
+ nsec = odp_time_global_ns();
for (i = 0; i < num; i++) {
- tick[i] = time_nsec(tp[i], now);
+ tick[i] = nsec;
if (clk_count)
clk_count[i] = tick[i];
@@ -1545,52 +1547,11 @@ odp_timer_t odp_timer_alloc(odp_timer_pool_t tpid, odp_queue_t queue, const void
return timer_alloc(tp, queue, user_ptr);
}
-odp_event_t odp_timer_free(odp_timer_t hdl)
-{
- timer_pool_t *tp = handle_to_tp(hdl);
- uint32_t idx = handle_to_idx(hdl, tp);
-
- return timer_free(tp, idx);
-}
-
-int ODP_DEPRECATE(odp_timer_set_abs)(odp_timer_t hdl, uint64_t abs_tck, odp_event_t *tmo_ev)
-{
- timer_pool_t *tp = handle_to_tp(hdl);
- uint64_t cur_tick = current_nsec(tp);
- uint32_t idx = handle_to_idx(hdl, tp);
-
- if (odp_unlikely(abs_tck < cur_tick + tp->min_rel_tck))
- return ODP_TIMER_TOO_NEAR;
- if (odp_unlikely(abs_tck > cur_tick + tp->max_rel_tck))
- return ODP_TIMER_TOO_FAR;
- if (timer_reset(idx, abs_tck, tmo_ev, tp))
- return ODP_TIMER_SUCCESS;
- else
- return ODP_TIMER_FAIL;
-}
-
-int ODP_DEPRECATE(odp_timer_set_rel)(odp_timer_t hdl, uint64_t rel_tck, odp_event_t *tmo_ev)
-{
- timer_pool_t *tp = handle_to_tp(hdl);
- uint64_t cur_tick = current_nsec(tp);
- uint64_t abs_tck = cur_tick + rel_tck;
- uint32_t idx = handle_to_idx(hdl, tp);
-
- if (odp_unlikely(rel_tck < tp->min_rel_tck))
- return ODP_TIMER_TOO_NEAR;
- if (odp_unlikely(rel_tck > tp->max_rel_tck))
- return ODP_TIMER_TOO_FAR;
- if (timer_reset(idx, abs_tck, tmo_ev, tp))
- return ODP_TIMER_SUCCESS;
- else
- return ODP_TIMER_FAIL;
-}
-
int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
{
uint64_t abs_tick, rel_tick;
timer_pool_t *tp = handle_to_tp(timer);
- uint64_t cur_tick = current_nsec(tp);
+ uint64_t cur_tick = odp_time_global_ns();
uint32_t idx = handle_to_idx(timer, tp);
odp_event_t tmo_ev = start_param->tmo_ev;
@@ -1620,6 +1581,12 @@ int odp_timer_start(odp_timer_t timer, const odp_timer_start_t *start_param)
odp_event_free(tmo_ev);
}
+ if (ODP_DEBUG_PRINT) {
+ uint32_t old_val = 0;
+
+ odp_atomic_cas_u32(&tp->notify_overrun, &old_val, 1);
+ }
+
return ODP_TIMER_SUCCESS;
}
@@ -1627,7 +1594,7 @@ int odp_timer_restart(odp_timer_t timer, const odp_timer_start_t *start_param)
{
uint64_t abs_tick, rel_tick;
timer_pool_t *tp = handle_to_tp(timer);
- uint64_t cur_tick = current_nsec(tp);
+ uint64_t cur_tick = odp_time_global_ns();
uint32_t idx = handle_to_idx(timer, tp);
if (start_param->tick_type == ODP_TIMER_TICK_ABS) {
@@ -1658,7 +1625,7 @@ int odp_timer_periodic_start(odp_timer_t timer, const odp_timer_periodic_start_t
{
uint64_t abs_tick, period_ns;
timer_pool_t *tp = handle_to_tp(timer);
- uint64_t cur_tick = current_nsec(tp);
+ uint64_t cur_tick = odp_time_global_ns();
uint32_t idx = handle_to_idx(timer, tp);
odp_event_t tmo_ev = start_param->tmo_ev;
_odp_timer_t *tim = &tp->timers[idx];
@@ -1832,7 +1799,7 @@ uint64_t odp_timeout_to_u64(odp_timeout_t tmo)
return _odp_pri(tmo);
}
-int odp_timeout_fresh(odp_timeout_t tmo)
+int ODP_DEPRECATE(odp_timeout_fresh)(odp_timeout_t tmo)
{
const odp_timeout_hdr_t *hdr = timeout_hdr(tmo);
odp_timer_t hdl = hdr->timer;
diff --git a/platform/linux-generic/pktio/stats/ethtool_stats.c b/platform/linux-generic/pktio/stats/ethtool_stats.c
index deb00bf59..bbf0729f1 100644
--- a/platform/linux-generic/pktio/stats/ethtool_stats.c
+++ b/platform/linux-generic/pktio/stats/ethtool_stats.c
@@ -29,9 +29,11 @@
static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
{
- struct {
+ union {
struct ethtool_sset_info hdr;
- uint32_t buf[1]; /* overlaps with hdr.data[] */
+ /* Reserve space for hdr.data. */
+ uint8_t buf[sizeof(struct ethtool_sset_info) +
+ sizeof(((struct ethtool_sset_info *)0)->data[0])];
} sset_info;
struct ethtool_drvinfo drvinfo;
uint32_t len;
diff --git a/platform/linux-generic/test/pktio_ipc/ipc_common.c b/platform/linux-generic/test/pktio_ipc/ipc_common.c
index d30aaf473..f693feeb2 100644
--- a/platform/linux-generic/test/pktio_ipc/ipc_common.c
+++ b/platform/linux-generic/test/pktio_ipc/ipc_common.c
@@ -18,14 +18,12 @@ int ipc_odp_packet_send_or_free(odp_pktio_t pktio,
int sent = 0;
odp_time_t start_time;
odp_time_t end_time;
- odp_time_t wait;
odp_pktout_queue_t pktout;
int i;
memset(&pktout, 0, sizeof(pktout));
start_time = odp_time_local();
- wait = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
- end_time = odp_time_sum(start_time, wait);
+ end_time = odp_time_add_ns(start_time, ODP_TIME_SEC_IN_NS);
if (odp_pktout_queue(pktio, &pktout, 1) != 1) {
ODPH_ERR("no output queue\n");
diff --git a/test/miscellaneous/odp_api_headers.c b/test/miscellaneous/odp_api_headers.c
index 843de2a02..0dd6b0a2e 100644
--- a/test/miscellaneous/odp_api_headers.c
+++ b/test/miscellaneous/odp_api_headers.c
@@ -8,7 +8,7 @@
#include <odp_api.h>
#include <odp/helper/odph_api.h>
-int main(void)
+int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED)
{
odp_instance_t inst;
diff --git a/test/performance/bench_common.c b/test/performance/bench_common.c
index 5a948c9c7..f838954ab 100644
--- a/test/performance/bench_common.c
+++ b/test/performance/bench_common.c
@@ -51,36 +51,35 @@ int bench_run(void *arg)
bench_suite_t *suite = arg;
const uint64_t repeat_count = suite->repeat_count;
const odp_bool_t meas_time = suite->measure_time;
+ double result;
printf("\nAverage %s per function call\n", meas_time ? "time (nsec)" : "CPU cycles");
printf("-------------------------------------------------\n");
- /* Run each test twice. Results from the first warm-up round are ignored. */
- for (int i = 0; i < 2; i++) {
+ for (int j = 0; j < suite->num_bench; j++) {
+ int ret;
+ const char *desc;
+ const bench_info_t *bench = &suite->bench[j];
+ uint64_t max_rounds = suite->rounds;
uint64_t total = 0;
- uint64_t round = 1;
-
- for (int j = 0; j < suite->num_bench; round++) {
- int ret;
- const char *desc;
- const bench_info_t *bench = &suite->bench[j];
- uint64_t max_rounds = suite->rounds;
-
- if (bench->max_rounds && bench->max_rounds < max_rounds)
- max_rounds = bench->max_rounds;
-
- /* Run selected test indefinitely */
- if (suite->indef_idx) {
- if ((j + 1) != suite->indef_idx) {
- j++;
- continue;
- }
- bench_run_indef(&suite->bench[j], &suite->exit_worker);
- return 0;
+
+ if (bench->max_rounds && bench->max_rounds < max_rounds)
+ max_rounds = bench->max_rounds;
+
+ /* Run selected test indefinitely */
+ if (suite->indef_idx) {
+ if ((j + 1) != suite->indef_idx) {
+ j++;
+ continue;
}
+ bench_run_indef(&suite->bench[j], &suite->exit_worker);
+ return 0;
+ }
- desc = bench->desc != NULL ? bench->desc : bench->name;
+ desc = bench->desc != NULL ? bench->desc : bench->name;
+ /* The zeroeth round is a warmup round that will be ignored */
+ for (uint64_t round = 0; round <= max_rounds; round++) {
if (bench->init != NULL)
bench->init();
@@ -105,29 +104,20 @@ int bench_run(void *arg)
return -1;
}
+ if (odp_unlikely(round == 0))
+ continue;
if (meas_time)
total += odp_time_diff_ns(t2, t1);
else
total += odp_cpu_cycles_diff(c2, c1);
+ }
- if (round >= max_rounds) {
- double result;
-
- /* Each benchmark runs internally 'repeat_count' times. */
- result = ((double)total) / (max_rounds * repeat_count);
-
- /* No print or results from warm-up round */
- if (i > 0) {
- printf("[%02d] odp_%-26s: %12.2f\n", j + 1, desc, result);
+ /* Each benchmark runs internally 'repeat_count' times. */
+ result = ((double)total) / (max_rounds * repeat_count);
- if (suite->result)
- suite->result[j] = result;
- }
- j++;
- total = 0;
- round = 1;
- }
- }
+ printf("[%02d] odp_%-26s: %12.2f\n", j + 1, desc, result);
+ if (suite->result)
+ suite->result[j] = result;
}
printf("\n");
/* Print dummy result to prevent compiler to optimize it away*/
@@ -205,29 +195,31 @@ int bench_tm_run(void *arg)
printf("\nLatency (nsec) per function call min avg max\n");
printf("------------------------------------------------------------------------------\n");
- /* Run each test twice. Results from the first warm-up round are ignored. */
- for (uint32_t i = 0; i < 2; i++) {
- for (uint32_t j = 0; j < suite->num_bench; j++) {
- const bench_tm_info_t *bench = &suite->bench[j];
- uint64_t rounds = suite->rounds;
- bench_tm_result_t res;
+ for (uint32_t j = 0; j < suite->num_bench; j++) {
+ const bench_tm_info_t *bench = &suite->bench[j];
+ uint64_t rounds = suite->rounds;
+ bench_tm_result_t res;
- if (odp_atomic_load_u32(&suite->exit_worker))
- return 0;
+ /* Run only selected test case */
+ if (suite->bench_idx && (j + 1) != suite->bench_idx)
+ continue;
- /* Run only selected test case */
- if (suite->bench_idx && (j + 1) != suite->bench_idx)
- continue;
+ if (bench->cond != NULL && !bench->cond()) {
+ printf("[%02d] %-41s n/a n/a n/a\n",
+ j + 1, bench->name);
+ continue;
+ }
- if (bench->max_rounds && bench->max_rounds < rounds)
- rounds = bench->max_rounds;
+ if (bench->max_rounds && bench->max_rounds < rounds)
+ rounds = bench->max_rounds;
- if (bench->cond != NULL && !bench->cond()) {
- if (i > 0)
- printf("[%02d] %-41s n/a n/a n/a\n",
- j + 1, bench->name);
- continue;
- }
+ /*
+ * Run each test twice.
+ * Results from the first warm-up round are ignored.
+ */
+ for (uint32_t i = 0; i < 2; i++) {
+ if (odp_atomic_load_u32(&suite->exit_worker))
+ return 0;
init_result(&res);
@@ -243,12 +235,9 @@ int bench_tm_run(void *arg)
if (bench->term != NULL)
bench->term();
- /* No print or results from warm-up round */
- if (i > 0) {
- printf("[%02d] %-26s\n", j + 1, bench->name);
- print_results(&res);
- }
}
+ printf("[%02d] %-26s\n", j + 1, bench->name);
+ print_results(&res);
}
printf("\n");
diff --git a/test/performance/odp_bench_misc.c b/test/performance/odp_bench_misc.c
index e21208ff2..64318938a 100644
--- a/test/performance/odp_bench_misc.c
+++ b/test/performance/odp_bench_misc.c
@@ -47,10 +47,17 @@ typedef struct {
/* Common benchmark suite data */
bench_suite_t suite;
- /* Test case input / output data */
+ /* Time stamp 1 */
odp_time_t t1[REPEAT_COUNT];
+ /* Time stamp 2 */
odp_time_t t2[REPEAT_COUNT];
+ /* Resulting time stamp */
odp_time_t t3[REPEAT_COUNT];
+
+ odp_time_t global_short[REPEAT_COUNT];
+ odp_time_t global_long[REPEAT_COUNT];
+
+ /* Integer input / output data */
uint64_t a1[REPEAT_COUNT];
uint64_t a2[REPEAT_COUNT];
uint32_t b1[REPEAT_COUNT];
@@ -253,6 +260,19 @@ static int time_diff_ns(void)
return i;
}
+static int time_add_ns(void)
+{
+ int i;
+ odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t3 = gbl_args->t3;
+ uint64_t *a1 = gbl_args->a1;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ t3[i] = odp_time_add_ns(t1[i], a1[i]);
+
+ return i;
+}
+
static int time_sum(void)
{
int i;
@@ -266,14 +286,28 @@ static int time_sum(void)
return i;
}
-static int time_to_ns(void)
+static int time_to_ns_short(void)
{
int i;
- odp_time_t *t1 = gbl_args->t1;
+ odp_time_t *t = gbl_args->global_short;
uint64_t res = 0;
for (i = 0; i < REPEAT_COUNT; i++)
- res += odp_time_to_ns(t1[i]);
+ res += odp_time_to_ns(t[i]);
+
+ gbl_args->suite.dummy += res;
+
+ return i;
+}
+
+static int time_to_ns_long(void)
+{
+ int i;
+ odp_time_t *t = gbl_args->global_long;
+ uint64_t res = 0;
+
+ for (i = 0; i < REPEAT_COUNT; i++)
+ res += odp_time_to_ns(t[i]);
gbl_args->suite.dummy += res;
@@ -341,6 +375,20 @@ static int time_global_res(void)
return i;
}
+static int time_startup(void)
+{
+ int i;
+ uint64_t *a1 = gbl_args->a1;
+ odp_time_startup_t startup;
+
+ for (i = 0; i < REPEAT_COUNT; i++) {
+ odp_time_startup(&startup);
+ a1[i] = startup.global_ns;
+ }
+
+ return i;
+}
+
static int cpu_id(void)
{
int i;
@@ -697,13 +745,16 @@ bench_info_t test_suite[] = {
BENCH_INFO(time_diff, init_time_global, 0, "time_diff (global)"),
BENCH_INFO(time_diff, init_time_local, 0, "time_diff (local)"),
BENCH_INFO(time_diff_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_add_ns, init_time_global, 0, NULL),
BENCH_INFO(time_sum, init_time_global, 0, NULL),
- BENCH_INFO(time_to_ns, init_time_global, 0, NULL),
+ BENCH_INFO(time_to_ns_short, NULL, 0, "time_to_ns (short)"),
+ BENCH_INFO(time_to_ns_long, NULL, 0, "time_to_ns (long)"),
BENCH_INFO(time_local_from_ns, init_time_global, 0, NULL),
BENCH_INFO(time_global_from_ns, init_time_global, 0, NULL),
BENCH_INFO(time_cmp, init_time_global, 0, NULL),
BENCH_INFO(time_local_res, NULL, 0, NULL),
BENCH_INFO(time_global_res, NULL, 0, NULL),
+ BENCH_INFO(time_startup, NULL, 0, NULL),
BENCH_INFO(cpu_id, NULL, 0, NULL),
BENCH_INFO(cpu_count, NULL, 0, NULL),
BENCH_INFO(cpu_hz, NULL, 1, NULL),
@@ -884,6 +935,8 @@ int main(int argc, char *argv[])
gbl_args->t1[i] = ODP_TIME_NULL;
gbl_args->t2[i] = ODP_TIME_NULL;
gbl_args->t3[i] = ODP_TIME_NULL;
+ gbl_args->global_short[i] = odp_time_global_from_ns(ODP_TIME_MSEC_IN_NS);
+ gbl_args->global_long[i] = odp_time_global_from_ns(10 * ODP_TIME_SEC_IN_NS);
gbl_args->a1[i] = i;
gbl_args->a2[i] = i;
gbl_args->b1[i] = i;
diff --git a/test/performance/odp_bench_timer.c b/test/performance/odp_bench_timer.c
index b1c43c178..a53671460 100644
--- a/test/performance/odp_bench_timer.c
+++ b/test/performance/odp_bench_timer.c
@@ -170,18 +170,6 @@ static int timeout_from_event(void)
return i;
}
-static int timeout_fresh(void)
-{
- int i;
- odp_timeout_t timeout = gbl_args->timeout;
- uint64_t *a1 = gbl_args->a1;
-
- for (i = 0; i < REPEAT_COUNT; i++)
- a1[i] = odp_timeout_fresh(timeout);
-
- return i;
-}
-
static int timeout_timer(void)
{
int i;
@@ -274,7 +262,6 @@ bench_info_t test_suite[] = {
BENCH_INFO(timer_ns_to_tick, 0, NULL),
BENCH_INFO(timeout_to_event, 0, NULL),
BENCH_INFO(timeout_from_event, 0, NULL),
- BENCH_INFO(timeout_fresh, 0, NULL),
BENCH_INFO(timeout_timer, 0, NULL),
BENCH_INFO(timeout_tick, 0, NULL),
BENCH_INFO(timeout_user_ptr, 0, NULL),
@@ -424,9 +411,12 @@ static int create_timer(void)
return -1;
}
- gbl_args->timer_pool = tp;
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
- odp_timer_pool_start();
+ gbl_args->timer_pool = tp;
gbl_args->timer_nsec = TIMER_NSEC;
if (TIMER_NSEC < tp_param.min_tmo)
diff --git a/test/performance/odp_dma_perf.c b/test/performance/odp_dma_perf.c
index 7e9d489df..21c9c0558 100644
--- a/test/performance/odp_dma_perf.c
+++ b/test/performance/odp_dma_perf.c
@@ -1,8 +1,14 @@
-/* Copyright (c) 2021-2023, Nokia
- *
- * All rights reserved.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2021-2023 Nokia
+ */
+
+/**
+ * DMA performance tester
*
- * SPDX-License-Identifier: BSD-3-Clause
+ * This tester application can be used to profile the performance of an ODP DMA implementation.
+ * Tester workflow is simple and consists of issuing as many back-to-back DMA transfers as the
+ * implementation allows and then recording key performance statistics (such as function overhead,
+ * latencies etc.).
*/
#ifndef _GNU_SOURCE
@@ -22,13 +28,16 @@
#define PROG_NAME "odp_dma_perf"
enum {
- SYNC = 0U,
- ASYNC
+ SYNC_DMA = 0U,
+ ASYNC_DMA,
+ SW_COPY
};
enum {
- PACKET = 0U,
- MEMORY
+ DENSE_PACKET = 0U,
+ SPARSE_PACKET,
+ DENSE_MEMORY,
+ SPARSE_MEMORY
};
enum {
@@ -41,10 +50,10 @@ enum {
MANY
};
-#define DEF_TRS_TYPE SYNC
+#define DEF_TRS_TYPE SYNC_DMA
#define DEF_SEG_CNT 1U
#define DEF_LEN 1024U
-#define DEF_SEG_TYPE PACKET
+#define DEF_SEG_TYPE DENSE_PACKET
#define DEF_MODE POLL
#define DEF_INFLIGHT 1U
#define DEF_TIME 10U
@@ -53,11 +62,14 @@ enum {
#define MAX_SEGS 1024U
#define MAX_WORKERS 24
+#define MAX_MEMORY (256U * 1024U * 1024U)
#define GIGAS 1000000000
#define MEGAS 1000000
#define KILOS 1000
+#define DATA 0xAA
+
typedef enum {
PRS_OK,
PRS_NOK,
@@ -71,6 +83,7 @@ typedef struct {
uint64_t poll_errs;
uint64_t scheduler_timeouts;
uint64_t transfer_errs;
+ uint64_t data_errs;
uint64_t tot_tm;
uint64_t trs_tm;
uint64_t max_trs_tm;
@@ -100,7 +113,10 @@ typedef struct {
odp_bool_t is_running;
} trs_info_t;
-typedef struct ODP_ALIGNED_CACHE {
+typedef struct sd_s sd_t;
+typedef void (*ver_fn_t)(trs_info_t *info, stats_t *stats);
+
+typedef struct ODP_ALIGNED_CACHE sd_s {
struct {
trs_info_t infos[MAX_SEGS];
odp_dma_seg_t src_seg[MAX_SEGS];
@@ -126,9 +142,19 @@ typedef struct ODP_ALIGNED_CACHE {
odp_shm_t dst_shm;
void *src;
void *dst;
+ void *src_high;
+ void *dst_high;
+ void *cur_src;
+ void *cur_dst;
+ uint64_t shm_size;
+ uint8_t seg_type;
} seg;
odp_schedule_group_t grp;
+ /* Prepare single transfer. */
+ void (*prep_trs_fn)(sd_t *sd, trs_info_t *info);
+ /* Verify single transfer. */
+ ver_fn_t ver_fn;
} sd_t;
typedef struct prog_config_s prog_config_t;
@@ -170,6 +196,7 @@ typedef struct prog_config_s {
odp_dma_compl_mode_t compl_mode_mask;
odp_pool_t src_pool;
odp_pool_t dst_pool;
+ uint64_t shm_size;
uint32_t num_in_segs;
uint32_t num_out_segs;
uint32_t src_seg_len;
@@ -177,7 +204,10 @@ typedef struct prog_config_s {
uint32_t num_inflight;
uint32_t time_sec;
uint32_t num_sessions;
+ uint32_t src_cache_size;
+ uint32_t dst_cache_size;
int num_workers;
+ odp_bool_t is_verify;
uint8_t trs_type;
uint8_t seg_type;
uint8_t compl_mode;
@@ -251,15 +281,17 @@ static void print_usage(void)
"\n"
" E.g. " PROG_NAME "\n"
" " PROG_NAME " -s 10240\n"
- " " PROG_NAME " -t 0 -i 1 -o 1 -s 51200 -S 1 -f 64 -T 10\n"
+ " " PROG_NAME " -t 0 -i 1 -o 1 -s 51200 -S 2 -f 64 -T 10\n"
" " PROG_NAME " -t 1 -i 10 -o 10 -s 4096 -S 0 -m 1 -f 10 -c 4 -p 1\n"
+ " " PROG_NAME " -t 2 -i 10 -o 1 -s 1024 -S 3 -f 10 -c 4 -p 1\n"
"\n"
"Optional OPTIONS:\n"
"\n"
" -t, --trs_type Transfer type for test data. %u by default.\n"
" Types:\n"
- " 0: synchronous\n"
- " 1: asynchronous\n"
+ " 0: synchronous DMA\n"
+ " 1: asynchronous DMA\n"
+ " 2: SW memory copy\n"
" -i, --num_in_seg Number of input segments to transfer. 0 means the maximum\n"
" count supported by the implementation. %u by default.\n"
" -o, --num_out_seg Number of output segments to transfer to. 0 means the\n"
@@ -269,10 +301,16 @@ static void print_usage(void)
" segment length supported by the implementation. The actual\n"
" maximum might be limited by what type of data is\n"
" transferred (packet/memory). %u by default.\n"
- " -S, --in_seg_type Input segment data type. %u by default.\n"
+ " -S, --in_seg_type Input segment data type. Dense types can load the DMA\n"
+ " subsystem more heavily as transfer resources are\n"
+ " pre-configured. Sparse types might on the other hand\n"
+ " reflect application usage more precisely as transfer\n"
+ " resources are configured in runtime. %u by default.\n"
" Types:\n"
- " 0: packet\n"
- " 1: memory\n"
+ " 0: dense packet\n"
+ " 1: sparse packet\n"
+ " 2: dense memory\n"
+ " 3: sparse memory\n"
" -m, --compl_mode Completion mode for transfers. %u by default.\n"
" Modes:\n"
" 0: poll\n"
@@ -286,6 +324,8 @@ static void print_usage(void)
" Policies:\n"
" 0: One session shared by workers\n"
" 1: One session per worker\n"
+ " -v, --verify Verify transfers. Checks correctness of destination data\n"
+ " after successful transfers.\n"
" -h, --help This help.\n"
"\n", DEF_TRS_TYPE, DEF_SEG_CNT, DEF_SEG_CNT, DEF_LEN, DEF_SEG_TYPE, DEF_MODE,
DEF_INFLIGHT, DEF_TIME, DEF_WORKERS, DEF_POLICY);
@@ -301,12 +341,14 @@ static parse_result_t check_options(prog_config_t *config)
odp_shm_capability_t shm_capa;
uint64_t shm_size = 0U;
- if (config->trs_type != SYNC && config->trs_type != ASYNC) {
+ if (config->trs_type != SYNC_DMA && config->trs_type != ASYNC_DMA &&
+ config->trs_type != SW_COPY) {
ODPH_ERR("Invalid transfer type: %u\n", config->trs_type);
return PRS_NOK;
}
- if (config->seg_type != PACKET && config->seg_type != MEMORY) {
+ if (config->seg_type != DENSE_PACKET && config->seg_type != SPARSE_PACKET &&
+ config->seg_type != DENSE_MEMORY && config->seg_type != SPARSE_MEMORY) {
ODPH_ERR("Invalid segment type: %u\n", config->seg_type);
return PRS_NOK;
}
@@ -369,7 +411,7 @@ static parse_result_t check_options(prog_config_t *config)
return PRS_NOT_SUP;
}
- if (config->trs_type == ASYNC) {
+ if (config->trs_type == ASYNC_DMA) {
if (config->compl_mode != POLL && config->compl_mode != EVENT) {
ODPH_ERR("Invalid completion mode: %u\n", config->compl_mode);
return PRS_NOK;
@@ -440,7 +482,7 @@ static parse_result_t check_options(prog_config_t *config)
return PRS_NOT_SUP;
}
- if (config->seg_type == PACKET) {
+ if (config->seg_type == DENSE_PACKET || config->seg_type == SPARSE_PACKET) {
if (odp_pool_capability(&pool_capa) < 0) {
ODPH_ERR("Error querying pool capabilities\n");
return PRS_NOK;
@@ -464,6 +506,11 @@ static parse_result_t check_options(prog_config_t *config)
max_segs * num_sessions, pool_capa.pkt.max_num);
return PRS_NOT_SUP;
}
+
+ config->src_cache_size = ODPH_MIN(ODPH_MAX(max_in, pool_capa.pkt.min_cache_size),
+ pool_capa.pkt.max_cache_size);
+ config->dst_cache_size = ODPH_MIN(ODPH_MAX(max_out, pool_capa.pkt.min_cache_size),
+ pool_capa.pkt.max_cache_size);
} else {
/* If SHM implementation capabilities are very puny, program will have already
* failed when reserving memory for global program configuration. */
@@ -488,6 +535,12 @@ static parse_result_t check_options(prog_config_t *config)
" (max: %" PRIu64 ")\n", shm_size, shm_capa.max_size);
return PRS_NOT_SUP;
}
+
+ if (config->seg_type == SPARSE_MEMORY && shm_size < MAX_MEMORY)
+ shm_size = shm_capa.max_size != 0U ?
+ ODPH_MIN(shm_capa.max_size, MAX_MEMORY) : MAX_MEMORY;
+
+ config->shm_size = shm_size;
}
return PRS_OK;
@@ -507,10 +560,11 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
{ "time_sec", required_argument, NULL, 'T' },
{ "worker_count", required_argument, NULL, 'c' },
{ "policy", required_argument, NULL, 'p' },
+ { "verify", no_argument, NULL, 'v' },
{ "help", no_argument, NULL, 'h' },
{ NULL, 0, NULL, 0 }
};
- static const char *shortopts = "t:i:o:s:S:m:f:T:c:p:h";
+ static const char *shortopts = "t:i:o:s:S:m:f:T:c:p:vh";
init_config(config);
@@ -551,6 +605,9 @@ static parse_result_t parse_options(int argc, char **argv, prog_config_t *config
case 'p':
config->policy = atoi(optarg);
break;
+ case 'v':
+ config->is_verify = true;
+ break;
case 'h':
print_usage();
return PRS_TERM;
@@ -582,15 +639,18 @@ static parse_result_t setup_program(int argc, char **argv, prog_config_t *config
static odp_pool_t get_src_packet_pool(void)
{
odp_pool_param_t param;
+ uint32_t num_pkts_per_worker = ODPH_MAX(prog_conf->num_inflight * prog_conf->num_in_segs,
+ prog_conf->src_cache_size);
if (prog_conf->src_pool != ODP_POOL_INVALID)
return prog_conf->src_pool;
odp_pool_param_init(&param);
param.type = ODP_POOL_PACKET;
- param.pkt.num = prog_conf->num_inflight * prog_conf->num_in_segs * prog_conf->num_sessions;
+ param.pkt.num = num_pkts_per_worker * prog_conf->num_workers;
param.pkt.len = prog_conf->src_seg_len;
param.pkt.seg_len = prog_conf->src_seg_len;
+ param.pkt.cache_size = prog_conf->src_cache_size;
prog_conf->src_pool = odp_pool_create(PROG_NAME "_src_pkts", &param);
return prog_conf->src_pool;
@@ -599,16 +659,18 @@ static odp_pool_t get_src_packet_pool(void)
static odp_pool_t get_dst_packet_pool(void)
{
odp_pool_param_t param;
+ uint32_t num_pkts_per_worker = ODPH_MAX(prog_conf->num_inflight * prog_conf->num_out_segs,
+ prog_conf->dst_cache_size);
if (prog_conf->dst_pool != ODP_POOL_INVALID)
return prog_conf->dst_pool;
odp_pool_param_init(&param);
param.type = ODP_POOL_PACKET;
- param.pkt.num = prog_conf->num_inflight * prog_conf->num_out_segs *
- prog_conf->num_sessions;
+ param.pkt.num = num_pkts_per_worker * prog_conf->num_workers;
param.pkt.len = prog_conf->dst_seg_len;
param.pkt.seg_len = prog_conf->dst_seg_len;
+ param.pkt.cache_size = prog_conf->dst_cache_size;
prog_conf->dst_pool = odp_pool_create(PROG_NAME "_dst_pkts", &param);
return prog_conf->dst_pool;
@@ -658,10 +720,16 @@ static odp_bool_t allocate_packets(sd_t *sd)
static odp_bool_t setup_packet_segments(sd_t *sd)
{
- return configure_packets(sd) && allocate_packets(sd);
+ return configure_packets(sd) &&
+ (sd->seg.seg_type == DENSE_PACKET ? allocate_packets(sd) : true);
}
-static void configure_packet_dma_transfer(sd_t *sd)
+static inline void fill_data(uint8_t *data, uint32_t len)
+{
+ memset(data, DATA, len);
+}
+
+static void configure_packet_transfer(sd_t *sd)
{
odp_dma_seg_t *start_src_seg, *start_dst_seg, *seg;
uint32_t k = 0U, z = 0U, len;
@@ -678,6 +746,9 @@ static void configure_packet_dma_transfer(sd_t *sd)
seg->packet = pkt;
seg->offset = 0U;
seg->len = sd->dma.src_seg_len;
+
+ if (seg->packet != ODP_PACKET_INVALID)
+ fill_data(odp_packet_data(seg->packet), seg->len);
}
len = sd->dma.num_in_segs * sd->dma.src_seg_len;
@@ -717,11 +788,9 @@ static void free_packets(const sd_t *sd)
static odp_bool_t allocate_memory(sd_t *sd)
{
- const uint64_t num_segs = (uint64_t)sd->dma.num_in_segs * sd->dma.num_inflight;
-
- sd->seg.src_shm = odp_shm_reserve(PROG_NAME "_src_shm", sd->dma.src_seg_len * num_segs,
+ sd->seg.src_shm = odp_shm_reserve(PROG_NAME "_src_shm", sd->seg.shm_size,
ODP_CACHE_LINE_SIZE, 0U);
- sd->seg.dst_shm = odp_shm_reserve(PROG_NAME "_dst_shm", sd->dma.dst_seg_len * num_segs,
+ sd->seg.dst_shm = odp_shm_reserve(PROG_NAME "_dst_shm", sd->seg.shm_size,
ODP_CACHE_LINE_SIZE, 0U);
if (sd->seg.src_shm == ODP_SHM_INVALID || sd->seg.dst_shm == ODP_SHM_INVALID) {
@@ -737,6 +806,11 @@ static odp_bool_t allocate_memory(sd_t *sd)
return false;
}
+ sd->seg.src_high = (uint8_t *)sd->seg.src + sd->seg.shm_size - sd->dma.src_seg_len;
+ sd->seg.dst_high = (uint8_t *)sd->seg.dst + sd->seg.shm_size - sd->dma.dst_seg_len;
+ sd->seg.cur_src = sd->seg.src;
+ sd->seg.cur_dst = sd->seg.dst;
+
return true;
}
@@ -745,7 +819,7 @@ static odp_bool_t setup_memory_segments(sd_t *sd)
return allocate_memory(sd);
}
-static void configure_address_dma_transfer(sd_t *sd)
+static void configure_address_transfer(sd_t *sd)
{
odp_dma_seg_t *start_src_seg, *start_dst_seg, *seg;
uint32_t k = 0U, z = 0U, len;
@@ -757,15 +831,20 @@ static void configure_address_dma_transfer(sd_t *sd)
for (uint32_t j = 0U; j < sd->dma.num_in_segs; ++j, ++k) {
seg = &start_src_seg[j];
- seg->addr = (uint8_t *)sd->seg.src + k * sd->dma.src_seg_len;
+ seg->addr = sd->seg.seg_type == SPARSE_MEMORY ?
+ NULL : (uint8_t *)sd->seg.src + k * sd->dma.src_seg_len;
seg->len = sd->dma.src_seg_len;
+
+ if (seg->addr != NULL)
+ fill_data(seg->addr, seg->len);
}
len = sd->dma.num_in_segs * sd->dma.src_seg_len;
for (uint32_t j = 0U; j < sd->dma.num_out_segs; ++j, ++z) {
seg = &start_dst_seg[j];
- seg->addr = (uint8_t *)sd->seg.dst + z * sd->dma.dst_seg_len;
+ seg->addr = sd->seg.seg_type == SPARSE_MEMORY ?
+ NULL : (uint8_t *)sd->seg.dst + z * sd->dma.dst_seg_len;
seg->len = ODPH_MIN(len, sd->dma.dst_seg_len);
len -= sd->dma.dst_seg_len;
}
@@ -790,7 +869,7 @@ static void free_memory(const sd_t *sd)
(void)odp_shm_free(sd->seg.dst_shm);
}
-static void run_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats)
+static void run_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats, ver_fn_t ver_fn)
{
uint64_t start_tm, end_tm, start_cc, end_cc, trs_tm, trs_cc, start_cc_diff;
odp_dma_result_t res;
@@ -820,10 +899,14 @@ static void run_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats)
stats->start_cc += start_cc_diff;
++stats->start_cnt;
- if (odp_unlikely(!res.success))
+ if (odp_unlikely(!res.success)) {
++stats->transfer_errs;
- else
+ } else {
++stats->completed;
+
+ if (ver_fn != NULL)
+ ver_fn(info, stats);
+ }
}
}
@@ -831,10 +914,16 @@ static void run_transfers_mt_unsafe(sd_t *sd, stats_t *stats)
{
const uint32_t count = sd->dma.num_inflight;
odp_dma_t handle = sd->dma.handle;
- trs_info_t *infos = sd->dma.infos;
+ trs_info_t *infos = sd->dma.infos, *info;
- for (uint32_t i = 0U; i < count; ++i)
- run_transfer(handle, &infos[i], stats);
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_transfer(handle, info, stats, sd->ver_fn);
+ }
}
static void run_transfers_mt_safe(sd_t *sd, stats_t *stats)
@@ -847,7 +936,10 @@ static void run_transfers_mt_safe(sd_t *sd, stats_t *stats)
info = &infos[i];
if (odp_ticketlock_trylock(&info->lock)) {
- run_transfer(handle, info, stats);
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_transfer(handle, info, stats, sd->ver_fn);
odp_ticketlock_unlock(&info->lock);
}
}
@@ -873,9 +965,10 @@ static odp_bool_t configure_poll_compl(sd_t *sd)
return true;
}
-static void poll_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats)
+static void poll_transfer(sd_t *sd, trs_info_t *info, stats_t *stats)
{
uint64_t start_cc, end_cc, trs_tm, trs_cc, wait_cc, start_tm, start_cc_diff;
+ odp_dma_t handle = sd->dma.handle;
odp_dma_result_t res;
int ret;
@@ -910,13 +1003,20 @@ static void poll_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats)
stats->trs_poll_cnt += info->trs_poll_cnt;
++stats->trs_cnt;
- if (odp_unlikely(!res.success))
+ if (odp_unlikely(!res.success)) {
++stats->transfer_errs;
- else
+ } else {
++stats->completed;
+ if (sd->ver_fn != NULL)
+ sd->ver_fn(info, stats);
+ }
+
info->is_running = false;
} else {
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
start_tm = odp_time_global_strict_ns();
start_cc = odp_cpu_cycles();
ret = odp_dma_transfer_start(handle, &info->trs_param, &info->compl_param);
@@ -941,29 +1041,46 @@ static void poll_transfer(odp_dma_t handle, trs_info_t *info, stats_t *stats)
static void poll_transfers_mt_unsafe(sd_t *sd, stats_t *stats)
{
const uint32_t count = sd->dma.num_inflight;
- odp_dma_t handle = sd->dma.handle;
trs_info_t *infos = sd->dma.infos;
for (uint32_t i = 0U; i < count; ++i)
- poll_transfer(handle, &infos[i], stats);
+ poll_transfer(sd, &infos[i], stats);
}
static void poll_transfers_mt_safe(sd_t *sd, stats_t *stats)
{
const uint32_t count = sd->dma.num_inflight;
- odp_dma_t handle = sd->dma.handle;
trs_info_t *infos = sd->dma.infos, *info;
for (uint32_t i = 0U; i < count; ++i) {
info = &infos[i];
if (odp_ticketlock_trylock(&info->lock)) {
- poll_transfer(handle, info, stats);
+ poll_transfer(sd, info, stats);
odp_ticketlock_unlock(&info->lock);
}
}
}
+static void drain_poll_transfers(sd_t *sd)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+ odp_dma_t handle = sd->dma.handle;
+ int rc;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (info->is_running) {
+ do {
+ rc = odp_dma_transfer_done(handle, info->compl_param.transfer_id,
+ NULL);
+ } while (rc == 0);
+ }
+ }
+}
+
static odp_bool_t configure_event_compl_session(sd_t *sd)
{
odp_thrmask_t zero;
@@ -1035,6 +1152,10 @@ static odp_bool_t start_initial_transfers(sd_t *sd)
for (uint32_t i = 0U; i < sd->dma.num_inflight; ++i) {
info = &sd->dma.infos[i];
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
start_tm = odp_time_global_strict_ns();
start_cc = odp_cpu_cycles();
ret = odp_dma_transfer_start(sd->dma.handle, &info->trs_param, &info->compl_param);
@@ -1085,11 +1206,18 @@ static void wait_compl_event(sd_t *sd, stats_t *stats)
stats->wait_cc += wait_cc;
++stats->wait_cnt;
- if (odp_unlikely(!res.success))
+ if (odp_unlikely(!res.success)) {
++stats->transfer_errs;
- else
+ } else {
++stats->completed;
+ if (sd->ver_fn != NULL)
+ sd->ver_fn(info, stats);
+ }
+
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
start_tm = odp_time_global_strict_ns();
start_cc = odp_cpu_cycles();
ret = odp_dma_transfer_start(sd->dma.handle, &info->trs_param, &info->compl_param);
@@ -1120,43 +1248,127 @@ static void drain_compl_events(ODP_UNUSED sd_t *sd)
}
}
-static void drain_poll_transfers(sd_t *sd)
+static void run_memcpy(trs_info_t *info, stats_t *stats, ver_fn_t ver_fn)
+{
+ uint64_t start_tm, end_tm, start_cc, end_cc, trs_tm, trs_cc, start_cc_diff;
+ const odp_dma_transfer_param_t *param = &info->trs_param;
+ uint32_t tot_len, src_len, dst_len, min_len, len, i = 0U, j = 0U, src_off = 0U,
+ dst_off = 0U, src_rem, dst_rem;
+ const odp_bool_t is_addr = param->src_format == ODP_DMA_FORMAT_ADDR;
+ uint8_t *src_data, *dst_data;
+
+ /* Test data is configured so that total source and total destination sizes always match,
+ * all source and all destination segments have the same size and in case of packets,
+ * there's always just a single segment. */
+ tot_len = param->num_src * param->src_seg->len;
+ src_len = param->src_seg->len;
+ dst_len = param->dst_seg->len;
+ min_len = ODPH_MIN(src_len, dst_len);
+ len = min_len;
+ start_tm = odp_time_local_strict_ns();
+ start_cc = odp_cpu_cycles();
+
+ while (tot_len > 0U) {
+ if (is_addr) {
+ src_data = param->src_seg[i].addr;
+ dst_data = param->dst_seg[j].addr;
+ } else {
+ src_data = odp_packet_data(param->src_seg[i].packet);
+ dst_data = odp_packet_data(param->dst_seg[j].packet);
+ }
+
+ memcpy(dst_data + dst_off, src_data + src_off, len);
+ dst_off += len;
+ src_off += len;
+ src_rem = src_len - src_off;
+ dst_rem = dst_len - dst_off;
+ tot_len -= len;
+ len = ODPH_MIN(ODPH_MAX(src_rem, dst_rem), min_len);
+
+ if (dst_rem > 0U) {
+ ++i;
+ src_off = 0U;
+ } else {
+ ++j;
+ dst_off = 0U;
+ }
+ }
+
+ end_cc = odp_cpu_cycles();
+ end_tm = odp_time_local_strict_ns();
+ trs_tm = end_tm - start_tm;
+ stats->max_trs_tm = ODPH_MAX(trs_tm, stats->max_trs_tm);
+ stats->min_trs_tm = ODPH_MIN(trs_tm, stats->min_trs_tm);
+ stats->trs_tm += trs_tm;
+ trs_cc = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_trs_cc = ODPH_MAX(trs_cc, stats->max_trs_cc);
+ stats->min_trs_cc = ODPH_MIN(trs_cc, stats->min_trs_cc);
+ stats->trs_cc += trs_cc;
+ ++stats->trs_cnt;
+ start_cc_diff = odp_cpu_cycles_diff(end_cc, start_cc);
+ stats->max_start_cc = ODPH_MAX(start_cc_diff, stats->max_start_cc);
+ stats->min_start_cc = ODPH_MIN(start_cc_diff, stats->min_start_cc);
+ stats->start_cc += start_cc_diff;
+ ++stats->start_cnt;
+ ++stats->completed;
+
+ if (ver_fn != NULL)
+ ver_fn(info, stats);
+}
+
+static void run_memcpy_mt_unsafe(sd_t *sd, stats_t *stats)
{
const uint32_t count = sd->dma.num_inflight;
trs_info_t *infos = sd->dma.infos, *info;
- odp_dma_t handle = sd->dma.handle;
- int rc;
for (uint32_t i = 0U; i < count; ++i) {
info = &infos[i];
- if (info->is_running) {
- do {
- rc = odp_dma_transfer_done(handle, info->compl_param.transfer_id,
- NULL);
- } while (rc == 0);
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_memcpy(info, stats, sd->ver_fn);
+ }
+}
+
+static void run_memcpy_mt_safe(sd_t *sd, stats_t *stats)
+{
+ const uint32_t count = sd->dma.num_inflight;
+ trs_info_t *infos = sd->dma.infos, *info;
+
+ for (uint32_t i = 0U; i < count; ++i) {
+ info = &infos[i];
+
+ if (odp_ticketlock_trylock(&info->lock)) {
+ if (sd->prep_trs_fn != NULL)
+ sd->prep_trs_fn(sd, info);
+
+ run_memcpy(info, stats, sd->ver_fn);
+ odp_ticketlock_unlock(&info->lock);
}
}
}
static void setup_api(prog_config_t *config)
{
- if (config->seg_type == PACKET) {
+ if (config->seg_type == DENSE_PACKET || config->seg_type == SPARSE_PACKET) {
config->api.setup_fn = setup_packet_segments;
- config->api.trs_fn = configure_packet_dma_transfer;
+ config->api.trs_fn = configure_packet_transfer;
config->api.free_fn = free_packets;
} else {
config->api.setup_fn = setup_memory_segments;
- config->api.trs_fn = configure_address_dma_transfer;
+ config->api.trs_fn = configure_address_transfer;
config->api.free_fn = free_memory;
}
- if (config->trs_type == SYNC) {
+ if (config->trs_type == SYNC_DMA) {
+ config->api.session_cfg_fn = NULL;
config->api.compl_fn = NULL;
+ config->api.bootstrap_fn = NULL;
config->api.wait_fn = config->num_workers == 1 || config->policy == MANY ?
run_transfers_mt_unsafe : run_transfers_mt_safe;
config->api.drain_fn = NULL;
- } else {
+ } else if (config->trs_type == ASYNC_DMA) {
if (config->compl_mode == POLL) {
config->api.session_cfg_fn = NULL;
config->api.compl_fn = configure_poll_compl;
@@ -1171,6 +1383,97 @@ static void setup_api(prog_config_t *config)
config->api.wait_fn = wait_compl_event;
config->api.drain_fn = drain_compl_events;
}
+ } else {
+ config->api.session_cfg_fn = NULL;
+ config->api.compl_fn = NULL;
+ config->api.bootstrap_fn = NULL;
+ config->api.wait_fn = config->num_workers == 1 || config->policy == MANY ?
+ run_memcpy_mt_unsafe : run_memcpy_mt_safe;
+ config->api.drain_fn = NULL;
+ }
+}
+
+static void prepare_packet_transfer(sd_t *sd, trs_info_t *info)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ odp_dma_seg_t *seg;
+
+ for (uint32_t i = 0U; i < param->num_src; ++i) {
+ seg = &param->src_seg[i];
+
+ if (odp_likely(seg->packet != ODP_PACKET_INVALID))
+ odp_packet_free(seg->packet);
+
+ seg->packet = odp_packet_alloc(sd->seg.src_pool, seg->len);
+
+ if (odp_unlikely(seg->packet == ODP_PACKET_INVALID))
+ /* There should always be enough packets. */
+ ODPH_ABORT("Failed to allocate packet, aborting\n");
+
+ fill_data(odp_packet_data(seg->packet), seg->len);
+ }
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ seg = &param->dst_seg[i];
+
+ if (odp_likely(seg->packet != ODP_PACKET_INVALID))
+ odp_packet_free(seg->packet);
+
+ seg->packet = odp_packet_alloc(sd->seg.dst_pool, seg->len);
+
+ if (odp_unlikely(seg->packet == ODP_PACKET_INVALID))
+ /* There should always be enough packets. */
+ ODPH_ABORT("Failed to allocate packet, aborting\n");
+ }
+}
+
+static void prepare_address_transfer(sd_t *sd, trs_info_t *info)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ uint8_t *addr = sd->seg.cur_src;
+ odp_dma_seg_t *seg;
+
+ for (uint32_t i = 0U; i < param->num_src; ++i) {
+ seg = &param->src_seg[i];
+
+ if (odp_unlikely(addr > (uint8_t *)sd->seg.src_high))
+ addr = sd->seg.src;
+
+ seg->addr = addr;
+ addr += sd->dma.src_seg_len;
+ fill_data(seg->addr, seg->len);
+ }
+
+ sd->seg.cur_src = addr + ODP_CACHE_LINE_SIZE;
+ addr = sd->seg.cur_dst;
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ if (odp_unlikely(addr > (uint8_t *)sd->seg.dst_high))
+ addr = sd->seg.dst;
+
+ param->dst_seg[i].addr = addr;
+ addr += sd->dma.dst_seg_len;
+ }
+
+ sd->seg.cur_dst = addr + ODP_CACHE_LINE_SIZE;
+}
+
+static void verify_transfer(trs_info_t *info, stats_t *stats)
+{
+ odp_dma_transfer_param_t *param = &info->trs_param;
+ odp_dma_seg_t *seg;
+ const odp_bool_t is_addr = param->dst_format == ODP_DMA_FORMAT_ADDR;
+ uint8_t *data;
+
+ for (uint32_t i = 0U; i < param->num_dst; ++i) {
+ seg = &param->dst_seg[i];
+ data = is_addr ? seg->addr : odp_packet_data(seg->packet);
+
+ for (uint32_t j = 0U; j < seg->len; ++j)
+ if (odp_unlikely(data[j] != DATA)) {
+ ++stats->data_errs;
+ return;
+ }
}
}
@@ -1206,6 +1509,13 @@ static odp_bool_t setup_session_descriptors(prog_config_t *config)
if (config->api.session_cfg_fn != NULL && !config->api.session_cfg_fn(sd))
return false;
+
+ sd->seg.shm_size = config->shm_size;
+ sd->seg.seg_type = config->seg_type;
+ sd->prep_trs_fn = config->seg_type == SPARSE_PACKET ? prepare_packet_transfer :
+ config->seg_type == SPARSE_MEMORY ?
+ prepare_address_transfer : NULL;
+ sd->ver_fn = config->is_verify ? verify_transfer : NULL;
}
return true;
@@ -1402,11 +1712,16 @@ static void print_stats(const prog_config_t *config)
" segment type: %s\n"
" inflight count: %u\n"
" session policy: %s\n\n",
- config->trs_type == SYNC ? "synchronous" : config->compl_mode == POLL ?
- "asynchronous-poll" : "asynchronous-event", config->num_in_segs,
+ config->trs_type == SYNC_DMA ? "DMA synchronous" :
+ config->trs_type == ASYNC_DMA && config->compl_mode == POLL ?
+ "DMA asynchronous-poll" :
+ config->trs_type == ASYNC_DMA && config->compl_mode == EVENT ?
+ "DMA asynchronous-event" : "SW", config->num_in_segs,
config->num_out_segs, config->src_seg_len,
- config->seg_type == PACKET ? "packet" : "memory", config->num_inflight,
- config->policy == SINGLE ? "shared" : "per-worker");
+ config->seg_type == DENSE_PACKET ? "dense packet" :
+ config->seg_type == SPARSE_PACKET ? "sparse packet" :
+ config->seg_type == DENSE_MEMORY ? "dense memory" : "sparse memory",
+ config->num_inflight, config->policy == SINGLE ? "shared" : "per-worker");
for (int i = 0; i < config->num_workers; ++i) {
stats = &config->thread_config[i].stats;
@@ -1425,7 +1740,7 @@ static void print_stats(const prog_config_t *config)
" start errors: %" PRIu64 "\n",
stats->completed, stats->start_errs);
- if (config->trs_type == ASYNC) {
+ if (config->trs_type == ASYNC_DMA) {
if (config->compl_mode == POLL)
printf(" poll errors: %" PRIu64 "\n",
stats->poll_errs);
@@ -1434,12 +1749,15 @@ static void print_stats(const prog_config_t *config)
stats->scheduler_timeouts);
}
- printf(" transfer errors: %" PRIu64 "\n"
- " run time: %" PRIu64 " ns\n",
- stats->transfer_errs, stats->tot_tm);
+ printf(" transfer errors: %" PRIu64 "\n", stats->transfer_errs);
+
+ if (config->is_verify)
+ printf(" data errors: %" PRIu64 "\n", stats->data_errs);
+
+ printf(" run time: %" PRIu64 " ns\n", stats->tot_tm);
if (config->policy == MANY) {
- printf(" DMA session:\n"
+ printf(" session:\n"
" average time per transfer: %" PRIu64 " "
"(min: %" PRIu64 ", max: %" PRIu64 ") ns\n"
" average cycles per transfer: %" PRIu64 " "
@@ -1461,11 +1779,16 @@ static void print_stats(const prog_config_t *config)
avg_start_cc = stats->start_cnt > 0U ? stats->start_cc / stats->start_cnt : 0U;
printf(" average cycles breakdown:\n");
- if (config->trs_type == SYNC) {
+ if (config->trs_type == SYNC_DMA) {
printf(" odp_dma_transfer(): %" PRIu64 " "
"(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
avg_start_cc > 0U ? stats->min_start_cc : 0U,
avg_start_cc > 0U ? stats->max_start_cc : 0U);
+ } else if (config->trs_type == SW_COPY) {
+ printf(" memcpy(): %" PRIu64 " "
+ "(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
+ avg_start_cc > 0U ? stats->min_start_cc : 0U,
+ avg_start_cc > 0U ? stats->max_start_cc : 0U);
} else {
printf(" odp_dma_transfer_start(): %" PRIu64 " "
"(min: %" PRIu64 ", max: %" PRIu64 ")\n", avg_start_cc,
@@ -1476,7 +1799,7 @@ static void print_stats(const prog_config_t *config)
if (config->compl_mode == POLL) {
printf(" odp_dma_transfer_done(): %" PRIu64 ""
- " (min: %" PRIu64 ", max: %" PRIu64 ", x %" PRIu64 ""
+ " (min: %" PRIu64 ", max: %" PRIu64 ", x%" PRIu64 ""
" per transfer)\n", avg_wait_cc,
avg_wait_cc > 0U ? stats->min_wait_cc : 0U,
avg_wait_cc > 0U ? stats->max_wait_cc : 0U,
diff --git a/test/performance/odp_dma_perf_run.sh b/test/performance/odp_dma_perf_run.sh
index 37bc4382f..f5d567740 100755
--- a/test/performance/odp_dma_perf_run.sh
+++ b/test/performance/odp_dma_perf_run.sh
@@ -25,24 +25,45 @@ check_result()
fi
}
-echo "odp_dma_perf: synchronous transfer"
+echo "odp_dma_perf: synchronous DMA transfer 1"
echo "===================================="
-${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 0 -i $SEGC -o $SEGC -s $SEGS -S 0 -f $INFL -T $TIME
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 0 -i $SEGC -o $SEGC -s $SEGS -S 0 -f $INFL -T $TIME -v
check_result $?
-echo "odp_dma_perf: asynchronous transfer 1"
+echo "odp_dma_perf: synchronous DMA transfer 2"
+echo "===================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 0 -i $SEGC -o $SEGC -s $SEGS -S 1 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: asynchronous DMA transfer 1"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 2 -m 0 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: asynchronous DMA transfer 2"
+echo "====================================="
+
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 3 -m 1 -f $INFL -T $TIME -v
+
+check_result $?
+
+echo "odp_dma_perf: SW transfer 1"
echo "====================================="
-${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 1 -m 0 -f $INFL -T $TIME
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 2 -i $SEGC -o $SEGC -s $SEGS -S 0 -f $INFL -T $TIME -v
check_result $?
-echo "odp_dma_perf: asynchronous transfer 2"
+echo "odp_dma_perf: SW transfer 2"
echo "====================================="
-${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 1 -i $SEGC -o $SEGC -s $SEGS -S 1 -m 1 -f $INFL -T $TIME
+${TEST_DIR}/${BIN_NAME}${EXEEXT} -t 2 -i $SEGC -o $SEGC -s $SEGS -S 2 -f $INFL -T $TIME -v
check_result $?
diff --git a/test/performance/odp_ipsecfwd.c b/test/performance/odp_ipsecfwd.c
index f529d9a4a..5c35d67f7 100644
--- a/test/performance/odp_ipsecfwd.c
+++ b/test/performance/odp_ipsecfwd.c
@@ -33,6 +33,7 @@
#define PKT_CNT 32768U
#define MAX_BURST 32U
#define ORDERED 0U
+#define IP_ADDR_LEN 32U
#define ALG_ENTRY(_alg_name, _type) \
{ \
@@ -77,12 +78,18 @@ typedef struct pktio_s {
} pktio_t;
typedef struct {
+ uint32_t prefix;
+ uint32_t mask;
odph_ethaddr_t dst_mac;
const pktio_t *pktio;
- odph_iplookup_prefix_t prefix;
} fwd_entry_t;
typedef struct {
+ fwd_entry_t entries[MAX_FWDS];
+ uint32_t num;
+} lookup_table_t;
+
+typedef struct {
uint64_t ipsec_in_pkts;
uint64_t ipsec_out_pkts;
uint64_t ipsec_in_errs;
@@ -113,7 +120,7 @@ typedef struct {
} sa_config_t;
typedef uint32_t (*rx_fn_t)(thread_config_t *config, odp_event_t evs[], int num);
-typedef void (*ipsec_fn_t)(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats);
+typedef void (*ipsec_fn_t)(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl, stats_t *stats);
typedef void (*drain_fn_t)(prog_config_t *config);
typedef struct {
@@ -127,9 +134,9 @@ typedef struct prog_config_s {
odph_thread_t thread_tbl[MAX_WORKERS];
thread_config_t thread_config[MAX_WORKERS];
odp_ipsec_sa_t sas[MAX_SAS];
- fwd_entry_t fwd_entries[MAX_FWDS];
odp_queue_t sa_qs[MAX_SA_QUEUES];
pktio_t pktios[MAX_IFS];
+ lookup_table_t fwd_tbl;
odp_atomic_u32_t is_running;
sa_config_t default_cfg;
ops_t ops;
@@ -137,7 +144,6 @@ typedef struct prog_config_s {
odp_instance_t odp_instance;
odp_queue_t compl_q;
odp_pool_t pktio_pool;
- odph_table_t fwd_tbl;
odp_barrier_t init_barrier;
odp_barrier_t term_barrier;
uint32_t num_input_qs;
@@ -147,7 +153,6 @@ typedef struct prog_config_s {
uint32_t pkt_len;
uint32_t num_ifs;
uint32_t num_sas;
- uint32_t num_fwds;
int num_thrs;
odp_bool_t is_dir_rx;
odp_bool_t is_hashed_tx;
@@ -367,6 +372,11 @@ static void print_usage(void)
" prefix: \"192.168.1.0/24\"\n"
" if: \"ens9f1\"\n"
" dst_mac: \"00:00:05:00:07:00\"\n"
+ " },\n"
+ " {\n"
+ " prefix: \"192.1.0.0/16\"\n"
+ " if: \"ens9f0\"\n"
+ " dst_mac: \"00:00:05:00:08:00\"\n"
" }\n"
" );\n"
"\n"
@@ -389,7 +399,8 @@ static void print_usage(void)
" in 'fwd'-named list. With forwarding entries, every\n"
" parameter is always required and interfaces present in\n"
" forwarding entries should be one of the interfaces passed\n"
- " with '--interfaces' option. See example above for\n"
+ " with '--interfaces' option. The entries are looked up\n"
+ " in the order they are in the list. See example above for\n"
" potential SA and forwarding configuration.\n"
"\n"
" Supported cipher and authentication algorithms for SAs:\n",
@@ -501,21 +512,36 @@ static inline int process_ipsec_out_enq(odp_packet_t pkts[], const odp_ipsec_sa_
return sent;
}
-static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd_tbl,
+static inline const fwd_entry_t *get_fwd_entry(lookup_table_t *table, uint32_t ip)
+{
+ fwd_entry_t *entry;
+
+ for (uint32_t i = 0U; i < table->num; ++i) {
+ entry = &table->entries[i];
+
+ if ((ip & entry->mask) == entry->prefix)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, lookup_table_t *fwd_tbl,
uint8_t *q_idx)
{
const uint32_t l3_off = odp_packet_l3_offset(pkt);
odph_ipv4hdr_t ipv4;
uint32_t dst_ip, src_ip;
- fwd_entry_t *fwd = NULL;
+ const fwd_entry_t *fwd;
odph_ethhdr_t eth;
if (odp_packet_copy_to_mem(pkt, l3_off, ODPH_IPV4HDR_LEN, &ipv4) < 0)
return NULL;
dst_ip = odp_be_to_cpu_32(ipv4.dst_addr);
+ fwd = get_fwd_entry(fwd_tbl, dst_ip);
- if (odph_iplookup_table_get_value(fwd_tbl, &dst_ip, &fwd, 0U) < 0 || fwd == NULL)
+ if (fwd == NULL)
return NULL;
if (l3_off != ODPH_ETHHDR_LEN) {
@@ -543,7 +569,7 @@ static inline const pktio_t *lookup_and_apply(odp_packet_t pkt, odph_table_t fwd
return fwd->pktio;
}
-static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_t fwd_tbl)
+static inline uint32_t forward_packets(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl)
{
odp_packet_t pkt;
odp_bool_t is_hashed_tx = ifs.is_hashed_tx;
@@ -599,7 +625,7 @@ static inline uint32_t forward_packets(odp_packet_t pkts[], int num, odph_table_
return num_procd;
}
-static inline void process_packets_out_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+static inline void process_packets_out_enq(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
stats_t *stats)
{
odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST];
@@ -635,7 +661,7 @@ static inline void process_packets_out_enq(odp_packet_t pkts[], int num, odph_ta
}
}
-static void process_packets_in_enq(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+static void process_packets_in_enq(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
stats_t *stats)
{
odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST];
@@ -680,7 +706,8 @@ static inline odp_bool_t is_ipsec_in(odp_packet_t pkt)
return odp_packet_user_ptr(pkt) == NULL;
}
-static void complete_ipsec_ops(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats)
+static void complete_ipsec_ops(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
{
odp_packet_t pkt, pkts_out[MAX_BURST], pkts_fwd[MAX_BURST];
odp_bool_t is_in;
@@ -784,7 +811,7 @@ static inline int process_ipsec_out(odp_packet_t pkts[], const odp_ipsec_sa_t sa
return sent;
}
-static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_t fwd_tbl,
+static inline void process_packets_out(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
stats_t *stats)
{
odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_fwd[MAX_BURST], pkts_ips_out[MAX_BURST];
@@ -840,7 +867,8 @@ static inline void process_packets_out(odp_packet_t pkts[], int num, odph_table_
}
}
-static void process_packets_in(odp_packet_t pkts[], int num, odph_table_t fwd_tbl, stats_t *stats)
+static void process_packets_in(odp_packet_t pkts[], int num, lookup_table_t *fwd_tbl,
+ stats_t *stats)
{
odp_packet_t pkt, pkts_ips[MAX_BURST], pkts_out[MAX_BURST], pkts_ips_out[MAX_BURST];
odp_ipsec_sa_t *sa, sas[MAX_BURST];
@@ -1297,9 +1325,8 @@ static void create_fwd_table_entry(config_setting_t *cfg, prog_config_t *config)
odph_ethaddr_t dst_mac;
const pktio_t *pktio = NULL;
fwd_entry_t *entry;
- odph_iplookup_prefix_t prefix;
- if (config->num_fwds == MAX_FWDS) {
+ if (config->fwd_tbl.num == MAX_FWDS) {
ODPH_ERR("Maximum number of forwarding entries parsed (%u), ignoring rest\n",
MAX_FWDS);
return;
@@ -1315,6 +1342,11 @@ static void create_fwd_table_entry(config_setting_t *cfg, prog_config_t *config)
ODPH_ERR("Syntax error in IP address for forwarding entry\n");
return;
}
+
+ if (mask > IP_ADDR_LEN) {
+ ODPH_ERR("Invalid subnet mask for forwarding entry: %u\n", mask);
+ return;
+ }
} else {
return;
}
@@ -1339,13 +1371,13 @@ static void create_fwd_table_entry(config_setting_t *cfg, prog_config_t *config)
return;
}
- entry = &config->fwd_entries[config->num_fwds];
+ mask = mask > 0U ? 0xFFFFFFFF << (IP_ADDR_LEN - mask) : 0U;
+ entry = &config->fwd_tbl.entries[config->fwd_tbl.num];
+ entry->prefix = dst_ip & mask;
+ entry->mask = mask;
entry->dst_mac = dst_mac;
entry->pktio = pktio;
- prefix.ip = dst_ip;
- prefix.cidr = mask;
- entry->prefix = prefix;
- ++config->num_fwds;
+ ++config->fwd_tbl.num;
}
static void parse_fwd_table(config_t *cfg, prog_config_t *config)
@@ -1385,9 +1417,9 @@ static parse_result_t check_options(prog_config_t *config)
return PRS_NOK;
}
- if (config->num_fwds == 0U) {
+ if (config->fwd_tbl.num == 0U) {
ODPH_ERR("Invalid number of forwarding entries: %u (min: 1, max: %u)\n",
- config->num_fwds, MAX_FWDS);
+ config->fwd_tbl.num, MAX_FWDS);
return PRS_NOK;
}
@@ -1710,30 +1742,6 @@ static odp_bool_t setup_pktios(prog_config_t *config)
return true;
}
-static odp_bool_t setup_fwd_table(prog_config_t *config)
-{
- fwd_entry_t *fwd_e;
-
- config->fwd_tbl = odph_iplookup_table_create(SHORT_PROG_NAME "_fwd_tbl", 0U, 0U,
- sizeof(fwd_entry_t *));
-
- if (config->fwd_tbl == NULL) {
- ODPH_ERR("Error creating forwarding table\n");
- return false;
- }
-
- for (uint32_t i = 0U; i < config->num_fwds; ++i) {
- fwd_e = &config->fwd_entries[i];
-
- if (odph_iplookup_table_put_value(config->fwd_tbl, &fwd_e->prefix, &fwd_e) < 0) {
- ODPH_ERR("Error populating forwarding table\n");
- return false;
- }
- }
-
- return true;
-}
-
static inline void check_ipsec_status_ev(odp_event_t ev, stats_t *stats)
{
odp_ipsec_status_t status;
@@ -1755,7 +1763,7 @@ static int process_packets(void *args)
odp_event_type_t type;
odp_event_subtype_t subtype;
odp_packet_t pkt, pkts_in[MAX_BURST], pkts_ips[MAX_BURST];
- odph_table_t fwd_tbl = config->prog_config->fwd_tbl;
+ lookup_table_t *fwd_tbl = &config->prog_config->fwd_tbl;
stats_t *stats = &config->stats;
ifs.is_hashed_tx = config->prog_config->is_hashed_tx;
@@ -1844,9 +1852,6 @@ static odp_bool_t setup_test(prog_config_t *config)
if (!setup_pktios(config))
return false;
- if (!setup_fwd_table(config))
- return false;
-
if (!setup_workers(config))
return false;
@@ -1930,8 +1935,6 @@ static void wait_sas_disabled(uint32_t num_sas)
static void teardown_test(const prog_config_t *config)
{
- (void)odph_iplookup_table_destroy(config->fwd_tbl);
-
for (uint32_t i = 0U; i < config->num_ifs; ++i) {
free(config->pktios[i].name);
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 6080fa8eb..1e9b79db0 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -928,6 +928,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
odp_pktio_op_mode_t mode_tx;
pktin_mode_t in_mode = gbl_args->appl.in_mode;
odp_pktio_info_t info;
+ uint8_t *addr;
odp_pktio_param_init(&pktio_param);
@@ -950,9 +951,6 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
return -1;
}
- printf("created pktio %" PRIu64 ", dev: %s, drv: %s\n",
- odp_pktio_to_u64(pktio), dev, info.drv_name);
-
if (gbl_args->appl.verbose)
odp_pktio_print(pktio);
@@ -968,7 +966,6 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
config.parser.layer = ODP_PROTO_LAYER_ALL;
if (gbl_args->appl.chksum) {
- printf("Checksum offload enabled\n");
config.pktout.bit.ipv4_chksum_ena = 1;
config.pktout.bit.udp_chksum_ena = 1;
config.pktout.bit.tcp_chksum_ena = 1;
@@ -1057,16 +1054,16 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (num_rx > (int)pktio_capa.max_input_queues) {
num_rx = pktio_capa.max_input_queues;
mode_rx = ODP_PKTIO_OP_MT;
- printf("Maximum number of input queues: %i\n", num_rx);
+ printf("Warning: %s: maximum number of input queues: %i\n", dev, num_rx);
}
if (num_rx < gbl_args->appl.num_workers)
- printf("Sharing %i input queues between %i workers\n",
- num_rx, gbl_args->appl.num_workers);
+ printf("Warning: %s: sharing %i input queues between %i workers\n",
+ dev, num_rx, gbl_args->appl.num_workers);
if (num_tx > (int)pktio_capa.max_output_queues) {
- printf("Sharing %i output queues between %i workers\n",
- pktio_capa.max_output_queues, num_tx);
+ printf("Warning: %s: sharing %i output queues between %i workers\n",
+ dev, pktio_capa.max_output_queues, num_tx);
num_tx = pktio_capa.max_output_queues;
mode_tx = ODP_PKTIO_OP_MT;
}
@@ -1122,8 +1119,16 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
}
}
- printf("created %i input and %i output queues on (%s)\n",
- num_rx, num_tx, dev);
+ if (odp_pktio_mac_addr(pktio, gbl_args->port_eth_addr[idx].addr,
+ ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
+ ODPH_ERR("Reading interface Ethernet address failed: %s\n", dev);
+ return -1;
+ }
+ addr = gbl_args->port_eth_addr[idx].addr;
+
+ printf(" dev: %s, drv: %s, rx_queues: %i, tx_queues: %i, mac: "
+ "%02x:%02x:%02x:%02x:%02x:%02x\n", dev, info.drv_name, num_rx, num_tx,
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
gbl_args->pktios[idx].num_rx_queue = num_rx;
gbl_args->pktios[idx].num_tx_queue = num_tx;
@@ -2094,6 +2099,9 @@ int main(int argc, char *argv[])
gbl_args->appl.num_workers = num_workers;
+ /* Print application information */
+ print_info();
+
for (i = 0; i < num_workers; i++)
gbl_args->thread_args[i].thr_idx = i;
@@ -2260,6 +2268,8 @@ int main(int argc, char *argv[])
pool = pool_tbl[0];
vec_pool = vec_pool_tbl[0];
+ printf("\nInterfaces\n----------\n");
+
for (i = 0; i < if_count; ++i) {
const char *dev = gbl_args->appl.if_names[i];
int num_rx, num_tx;
@@ -2286,14 +2296,6 @@ int main(int argc, char *argv[])
if (create_pktio(dev, i, num_rx, num_tx, pool, vec_pool, grp))
exit(EXIT_FAILURE);
- /* Save interface ethernet address */
- if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
- gbl_args->port_eth_addr[i].addr,
- ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- ODPH_ERR("Interface ethernet address unknown\n");
- exit(EXIT_FAILURE);
- }
-
/* Save destination eth address */
if (gbl_args->appl.dst_change) {
/* 02:00:00:00:00:XX */
@@ -2317,9 +2319,6 @@ int main(int argc, char *argv[])
gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
- /* Print application information */
- print_info();
-
bind_queues();
init_port_lookup_tbl();
diff --git a/test/performance/odp_sched_pktio.c b/test/performance/odp_sched_pktio.c
index 927d35cbd..3a85a91a5 100644
--- a/test/performance/odp_sched_pktio.c
+++ b/test/performance/odp_sched_pktio.c
@@ -1081,10 +1081,10 @@ static int stop_pktios(test_global_t *test_global)
return ret;
}
-static void empty_queues(void)
+static void empty_queues(uint64_t wait_ns)
{
odp_event_t ev;
- uint64_t wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS / 2);
+ uint64_t wait_time = odp_schedule_wait_time(wait_ns);
/* Drop all events from all queues */
while (1) {
@@ -1284,12 +1284,15 @@ static int create_timers(test_global_t *test_global)
return -1;
}
+ if (odp_timer_pool_start_multi(&timer_pool, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
+
test_global->timer.timer_pool = timer_pool;
tick = odp_timer_ns_to_tick(timer_pool, timeout_ns);
test_global->timer.timeout_tick = tick;
- odp_timer_pool_start();
-
for (i = 0; i < num_pktio; i++) {
for (j = 0; j < num_queue; j++) {
queue = test_global->pktio[i].input_queue[j];
@@ -1365,7 +1368,6 @@ static int start_timers(test_global_t *test_global)
static void destroy_timers(test_global_t *test_global)
{
int i, j;
- odp_event_t event;
odp_timer_t timer;
int num_pktio = test_global->opt.num_pktio;
int num_queue = test_global->opt.num_pktio_queue;
@@ -1375,6 +1377,9 @@ static void destroy_timers(test_global_t *test_global)
if (timer_pool == ODP_TIMER_POOL_INVALID)
return;
+ /* Wait any remaining timers to expire */
+ empty_queues(2000 * test_global->opt.timeout_us);
+
for (i = 0; i < num_pktio; i++) {
for (j = 0; j < num_queue; j++) {
timer = test_global->timer.timer[i][j];
@@ -1382,10 +1387,8 @@ static void destroy_timers(test_global_t *test_global)
if (timer == ODP_TIMER_INVALID)
break;
- event = odp_timer_free(timer);
-
- if (event != ODP_EVENT_INVALID)
- odp_event_free(event);
+ if (odp_timer_free(timer))
+ printf("Timer free failed: %i, %i\n", i, j);
}
}
@@ -1552,7 +1555,7 @@ int main(int argc, char *argv[])
quit:
stop_pktios(test_global);
- empty_queues();
+ empty_queues(ODP_TIME_SEC_IN_NS / 2);
close_pktios(test_global);
destroy_pipeline_queues(test_global);
destroy_timers(test_global);
diff --git a/test/performance/odp_stress.c b/test/performance/odp_stress.c
index d5e3142f6..84bc4fe6c 100644
--- a/test/performance/odp_stress.c
+++ b/test/performance/odp_stress.c
@@ -61,6 +61,7 @@ typedef struct test_global_t {
test_stat_t stat[ODP_THREAD_COUNT_MAX];
thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
test_stat_sum_t stat_sum;
+ odp_atomic_u64_t tot_rounds;
} test_global_t;
@@ -219,19 +220,21 @@ static int worker_thread(void *arg)
test_global_t *global = thread_arg->global;
test_options_t *test_options = &global->test_options;
int mode = test_options->mode;
+ int group_mode = test_options->group_mode;
uint64_t mem_size = test_options->mem_size;
uint64_t copy_size = mem_size / 2;
uint64_t rounds = 0;
int ret = 0;
uint32_t done = 0;
uint64_t wait = ODP_SCHED_WAIT;
+ uint64_t tot_rounds = test_options->rounds * test_options->num_cpu;
thr = odp_thread_id();
max_nsec = 2 * test_options->rounds * test_options->period_ns;
max_time = odp_time_local_from_ns(max_nsec);
printf("Thread %i starting on CPU %i\n", thr, odp_cpu_id());
- if (test_options->group_mode == 0) {
+ if (group_mode == 0) {
/* Timeout events are load balanced. Using this
* period to poll exit status. */
wait = odp_schedule_wait_time(100 * ODP_TIME_MSEC_IN_NS);
@@ -280,7 +283,15 @@ static int worker_thread(void *arg)
rounds++;
- if (rounds < test_options->rounds) {
+ if (group_mode) {
+ if (rounds >= test_options->rounds)
+ done = 1;
+ } else {
+ if (odp_atomic_fetch_inc_u64(&global->tot_rounds) >= (tot_rounds - 1))
+ done = 1;
+ }
+
+ if (done == 0) {
tmo = odp_timeout_from_event(ev);
timer = odp_timeout_timer(tmo);
start_param.tmo_ev = ev;
@@ -291,8 +302,6 @@ static int worker_thread(void *arg)
ODPH_ERR("Timer start failed (%" PRIu64 ")\n", rounds);
done = 1;
}
- } else {
- done = 1;
}
/* Do work */
@@ -423,7 +432,10 @@ static int create_timers(test_global_t *global)
return -1;
}
- odp_timer_pool_start();
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed\n");
+ return -1;
+ }
global->period_ticks = odp_timer_ns_to_tick(tp, period_ns);
@@ -530,7 +542,6 @@ static int start_timers(test_global_t *global)
static void destroy_timers(test_global_t *global)
{
uint32_t i;
- odp_event_t ev;
test_options_t *test_options = &global->test_options;
uint32_t num_cpu = test_options->num_cpu;
@@ -540,9 +551,8 @@ static void destroy_timers(test_global_t *global)
if (timer == ODP_TIMER_INVALID)
continue;
- ev = odp_timer_free(timer);
- if (ev != ODP_EVENT_INVALID)
- odp_event_free(ev);
+ if (odp_timer_free(timer))
+ ODPH_ERR("Timer free failed (%u)\n", i);
}
if (global->timer_pool != ODP_TIMER_POOL_INVALID)
@@ -744,6 +754,7 @@ int main(int argc, char **argv)
memset(global, 0, sizeof(test_global_t));
odp_atomic_init_u32(&global->exit_test, 0);
+ odp_atomic_init_u64(&global->tot_rounds, 0);
global->timer_pool = ODP_TIMER_POOL_INVALID;
global->tmo_pool = ODP_POOL_INVALID;
diff --git a/test/performance/odp_timer_perf.c b/test/performance/odp_timer_perf.c
index 279c5600e..8632fcb73 100644
--- a/test/performance/odp_timer_perf.c
+++ b/test/performance/odp_timer_perf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2019-2022, Nokia
+/* Copyright (c) 2019-2023, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -45,7 +45,8 @@ typedef struct test_stat_t {
uint64_t rounds;
uint64_t events;
uint64_t nsec;
- uint64_t cycles;
+ uint64_t cycles_0;
+ uint64_t cycles_1;
uint64_t cancels;
uint64_t sets;
@@ -59,7 +60,8 @@ typedef struct test_stat_sum_t {
uint64_t rounds;
uint64_t events;
uint64_t nsec;
- uint64_t cycles;
+ uint64_t cycles_0;
+ uint64_t cycles_1;
uint64_t cancels;
uint64_t sets;
@@ -133,7 +135,8 @@ static void print_usage(void)
" -m, --mode Select test mode. Default: 0\n"
" 0: Measure odp_schedule() overhead when using timers\n"
" 1: Measure timer set + cancel performance\n"
- " 2: Measure odp_schedule() overhead while continuously restarting expiring timers\n"
+ " 2: Measure schedule and timer start overhead while continuously\n"
+ " restarting expiring timers\n"
" -R, --rounds Number of test rounds in timer set + cancel test.\n"
" Default: 100000\n"
" -h, --help This help\n"
@@ -391,6 +394,11 @@ static int create_timer_pools(test_global_t *global)
return -1;
}
+ if (odp_timer_pool_start_multi(&tp, 1) != 1) {
+ ODPH_ERR("Timer pool start failed (%u)\n", i);
+ return -1;
+ }
+
pool = odp_pool_create(tp_name, &pool_param);
global->pool[i] = pool;
if (pool == ODP_POOL_INVALID) {
@@ -410,8 +418,6 @@ static int create_timer_pools(test_global_t *global)
global->timer_pool[i].start_tick = odp_timer_ns_to_tick(tp, START_NS);
}
- odp_timer_pool_start();
-
printf(" start %" PRIu64 " tick\n", global->timer_pool[0].start_tick);
printf(" period %" PRIu64 " ticks\n", global->timer_pool[0].period_tick);
printf("\n");
@@ -511,7 +517,6 @@ static int destroy_timer_pool(test_global_t *global)
odp_pool_t pool;
odp_queue_t queue;
odp_timer_t timer;
- odp_event_t ev;
uint32_t i, j;
test_options_t *test_options = &global->test_options;
uint32_t num_timer = test_options->num_timer;
@@ -524,13 +529,8 @@ static int destroy_timer_pool(test_global_t *global)
if (timer == ODP_TIMER_INVALID)
break;
- ev = odp_timer_free(timer);
-
- if (ev != ODP_EVENT_INVALID) {
- if (test_options->mode == MODE_SCHED_OVERH)
- printf("Event from timer free %i/%i\n", i, j);
- odp_event_free(ev);
- }
+ if (odp_timer_free(timer))
+ printf("Timer free failed: %i/%i\n", i, j);
}
queue = global->queue[i];
@@ -637,7 +637,7 @@ static int sched_mode_worker(void *arg)
/* Update stats*/
global->stat[thr].events = events;
- global->stat[thr].cycles = cycles;
+ global->stat[thr].cycles_0 = cycles;
global->stat[thr].rounds = rounds;
global->stat[thr].nsec = nsec;
global->stat[thr].before = before;
@@ -646,15 +646,17 @@ static int sched_mode_worker(void *arg)
return ret;
}
-static void cancel_timers(test_global_t *global, uint32_t worker_idx)
+static int cancel_timers(test_global_t *global, uint32_t worker_idx)
{
uint32_t i, j;
+ int r;
odp_timer_t timer;
odp_event_t ev;
test_options_t *test_options = &global->test_options;
uint32_t num_tp = test_options->num_tp;
uint32_t num_timer = test_options->num_timer;
uint32_t num_worker = test_options->num_cpu;
+ int ret = 0;
for (i = 0; i < num_tp; i++) {
for (j = 0; j < num_timer; j++) {
@@ -665,10 +667,20 @@ static void cancel_timers(test_global_t *global, uint32_t worker_idx)
if (timer == ODP_TIMER_INVALID)
continue;
- if (odp_timer_cancel(timer, &ev) == ODP_TIMER_SUCCESS)
+ r = odp_timer_cancel(timer, &ev);
+
+ if (r == ODP_TIMER_SUCCESS) {
odp_event_free(ev);
+ } else if (r == ODP_TIMER_TOO_NEAR) {
+ ret = 1;
+ } else {
+ ret = -1;
+ break;
+ }
}
}
+
+ return ret;
}
static int set_cancel_mode_worker(void *arg)
@@ -812,13 +824,14 @@ static int set_cancel_mode_worker(void *arg)
diff = odp_cpu_cycles_diff(c2, c1);
/* Cancel all timers that belong to this thread */
- cancel_timers(global, worker_idx);
+ if (cancel_timers(global, worker_idx))
+ ODPH_ERR("Timer cancel failed\n");
/* Update stats */
global->stat[thr].events = num_tmo;
global->stat[thr].rounds = test_options->test_rounds - test_rounds;
global->stat[thr].nsec = nsec;
- global->stat[thr].cycles = diff;
+ global->stat[thr].cycles_0 = diff;
global->stat[thr].cancels = num_cancel;
global->stat[thr].sets = num_set;
@@ -832,7 +845,7 @@ static int set_expire_mode_worker(void *arg)
uint32_t i, j, exit_test;
odp_event_t ev;
odp_timeout_t tmo;
- uint64_t c2, diff, nsec, time_ns, target_ns, period_tick;
+ uint64_t c2, c3, c4, diff, nsec, time_ns, target_ns, period_tick, wait;
odp_timer_t timer;
odp_timer_start_t start_param;
odp_time_t t1, t2;
@@ -842,7 +855,8 @@ static int set_expire_mode_worker(void *arg)
test_global_t *global = thread_arg->global;
test_options_t *opt = &global->test_options;
uint32_t num_tp = opt->num_tp;
- uint64_t cycles = 0;
+ uint64_t sched_cycles = 0;
+ uint64_t start_cycles = 0;
uint64_t events = 0;
uint64_t rounds = 0;
uint64_t c1 = 0;
@@ -874,7 +888,7 @@ static int set_expire_mode_worker(void *arg)
c2 = odp_cpu_cycles();
diff = odp_cpu_cycles_diff(c2, c1);
- cycles += diff;
+ sched_cycles += diff;
if (ev == ODP_EVENT_INVALID && exit_test >= num_tp)
break;
@@ -914,8 +928,14 @@ static int set_expire_mode_worker(void *arg)
ctx->target_tick += period_tick;
start_param.tick = ctx->target_tick;
start_param.tmo_ev = ev;
+ c3 = odp_cpu_cycles();
+
status = odp_timer_start(timer, &start_param);
+ c4 = odp_cpu_cycles();
+ diff = odp_cpu_cycles_diff(c4, c3);
+ start_cycles += diff;
+
if (status != ODP_TIMER_SUCCESS) {
ODPH_ERR("Timer set (tmo) failed (ret %i)\n", status);
ret = -1;
@@ -927,11 +947,15 @@ static int set_expire_mode_worker(void *arg)
nsec = odp_time_diff_ns(t2, t1);
/* Cancel all timers that belong to this thread */
- cancel_timers(global, thread_arg->worker_idx);
+ status = cancel_timers(global, thread_arg->worker_idx);
- /* Free already scheduled events */
+ wait = ODP_SCHED_NO_WAIT;
+ if (status > 0)
+ wait = odp_schedule_wait_time(opt->period_ns);
+
+ /* Wait and free remaining events */
while (1) {
- ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ ev = odp_schedule(NULL, wait);
if (ev == ODP_EVENT_INVALID)
break;
odp_event_free(ev);
@@ -939,7 +963,8 @@ static int set_expire_mode_worker(void *arg)
/* Update stats*/
global->stat[thr].events = events;
- global->stat[thr].cycles = cycles;
+ global->stat[thr].cycles_0 = sched_cycles;
+ global->stat[thr].cycles_1 = start_cycles;
global->stat[thr].rounds = rounds;
global->stat[thr].nsec = nsec;
global->stat[thr].before = before;
@@ -1001,7 +1026,8 @@ static void sum_stat(test_global_t *global)
sum->num++;
sum->events += global->stat[i].events;
sum->rounds += global->stat[i].rounds;
- sum->cycles += global->stat[i].cycles;
+ sum->cycles_0 += global->stat[i].cycles_0;
+ sum->cycles_1 += global->stat[i].cycles_1;
sum->nsec += global->stat[i].nsec;
sum->cancels += global->stat[i].cancels;
sum->sets += global->stat[i].sets;
@@ -1041,7 +1067,7 @@ static void print_stat_sched_mode(test_global_t *global)
if ((num % 10) == 0)
printf("\n ");
- printf("%6.1f ", (double)global->stat[i].cycles / global->stat[i].rounds);
+ printf("%6.1f ", (double)global->stat[i].cycles_0 / global->stat[i].rounds);
num++;
}
}
@@ -1083,11 +1109,11 @@ static void print_stat_set_cancel_mode(test_global_t *global)
printf(" 1 2 3 4 5 6 7 8 9 10");
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
- if (global->stat[i].rounds) {
+ if (global->stat[i].sets) {
if ((num % 10) == 0)
printf("\n ");
- printf("%6.1f ", (double)global->stat[i].cycles / global->stat[i].sets);
+ printf("%6.1f ", (double)global->stat[i].cycles_0 / global->stat[i].sets);
num++;
}
}
@@ -1107,6 +1133,72 @@ static void print_stat_set_cancel_mode(test_global_t *global)
printf("\n");
}
+static void print_stat_expire_mode(test_global_t *global)
+{
+ int i;
+ test_stat_sum_t *sum = &global->stat_sum;
+ double round_ave = 0.0;
+ double before_ave = 0.0;
+ double after_ave = 0.0;
+ int num = 0;
+
+ printf("\n");
+ printf("RESULTS\n");
+ printf("odp_schedule() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].rounds) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles_0 / global->stat[i].rounds);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ num = 0;
+ printf("odp_timer_start() cycles per thread:\n");
+ printf("-------------------------------------------------\n");
+ printf(" 1 2 3 4 5 6 7 8 9 10");
+
+ for (i = 0; i < ODP_THREAD_COUNT_MAX; i++) {
+ if (global->stat[i].events) {
+ if ((num % 10) == 0)
+ printf("\n ");
+
+ printf("%6.1f ", (double)global->stat[i].cycles_1 / global->stat[i].events);
+ num++;
+ }
+ }
+
+ printf("\n\n");
+
+ if (sum->num)
+ round_ave = (double)sum->rounds / sum->num;
+
+ if (sum->before.num)
+ before_ave = (double)sum->before.sum_ns / sum->before.num;
+
+ if (sum->after.num)
+ after_ave = (double)sum->after.sum_ns / sum->after.num;
+
+ printf("TOTAL (%i workers)\n", sum->num);
+ printf(" events: %" PRIu64 "\n", sum->events);
+ printf(" ave time: %.2f sec\n", sum->time_ave);
+ printf(" ave rounds per sec: %.2fM\n", (round_ave / sum->time_ave) / 1000000.0);
+ printf(" num before: %" PRIu64 "\n", sum->before.num);
+ printf(" ave before: %.1f nsec\n", before_ave);
+ printf(" max before: %" PRIu64 " nsec\n", sum->before.max_ns);
+ printf(" num after: %" PRIu64 "\n", sum->after.num);
+ printf(" ave after: %.1f nsec\n", after_ave);
+ printf(" max after: %" PRIu64 " nsec\n", sum->after.max_ns);
+ printf("\n");
+}
+
static void sig_handler(int signo)
{
(void)signo;
@@ -1240,10 +1332,12 @@ int main(int argc, char **argv)
sum_stat(global);
- if (mode == MODE_SCHED_OVERH || mode == MODE_START_EXPIRE)
+ if (mode == MODE_SCHED_OVERH)
print_stat_sched_mode(global);
- else
+ else if (mode == MODE_START_CANCEL)
print_stat_set_cancel_mode(global);
+ else
+ print_stat_expire_mode(global);
destroy_timer_pool(global);
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index 909608ed8..91cfbfb5f 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -94,6 +94,8 @@ static void test_pool_alloc_free(const odp_pool_param_t *param)
odp_buffer_from_event_multi(&buf, &ev, 1);
CU_ASSERT(buf == buffer[i]);
+ CU_ASSERT(odp_event_pool(ev) == pool);
+
if (odp_event_type(ev) != ODP_EVENT_BUFFER)
wrong_type = true;
if (odp_event_subtype(ev) != ODP_EVENT_NO_SUBTYPE)
diff --git a/test/validation/api/classification/odp_classification_basic.c b/test/validation/api/classification/odp_classification_basic.c
index 2eb5c86b1..ca0b58ad5 100644
--- a/test/validation/api/classification/odp_classification_basic.c
+++ b/test/validation/api/classification/odp_classification_basic.c
@@ -9,8 +9,6 @@
#include "odp_classification_testsuites.h"
#include "classification.h"
-#define PMR_SET_NUM 5
-
/* Limit handle array allocation from stack to about 256kB */
#define MAX_HANDLES (32 * 1024)
@@ -56,9 +54,6 @@ static void cls_create_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(NULL, &cls_param);
CU_ASSERT(odp_cos_to_u64(cos) != odp_cos_to_u64(ODP_COS_INVALID));
@@ -203,9 +198,6 @@ static void cls_destroy_cos(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(name, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -253,9 +245,6 @@ static void cls_create_pmr_match(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create("pmr_match", &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -330,7 +319,7 @@ static void cls_max_pmr_from_default_action(int drop)
CU_ASSERT_FATAL(num_cos > 1);
- num_pmr = num_cos - 1;
+ num_pmr = capa.max_pmr_per_cos;
odp_cos_t cos[num_cos];
odp_queue_t queue[num_cos];
@@ -388,8 +377,10 @@ static void cls_max_pmr_from_default_action(int drop)
for (i = 0; i < num_pmr; i++) {
pmr[i] = odp_cls_pmr_create(&pmr_param, 1, default_cos, cos[i + 1]);
- if (pmr[i] == ODP_PMR_INVALID)
+ if (pmr[i] == ODP_PMR_INVALID) {
+ ODPH_ERR("odp_cls_pmr_create() failed %u / %u\n", i + 1, num_pmr);
break;
+ }
val++;
pmr_created++;
@@ -398,6 +389,8 @@ static void cls_max_pmr_from_default_action(int drop)
printf("\n Number of CoS created: %u\n Number of PMR created: %u\n", cos_created,
pmr_created);
+ CU_ASSERT(pmr_created == num_pmr);
+
for (i = 0; i < pmr_created; i++)
CU_ASSERT(odp_cls_pmr_destroy(pmr[i]) == 0);
@@ -455,6 +448,8 @@ static void cls_create_pmr_multi(void)
CU_ASSERT_FATAL(num_cos > 1);
num_pmr = num_cos - 1;
+ if (num_pmr > capa.max_pmr)
+ num_pmr = capa.max_pmr;
odp_cos_t src_cos[num_cos];
odp_cos_t cos[num_cos];
@@ -561,9 +556,7 @@ static void cls_cos_set_queue(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
+
cos_queue = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos_queue != ODP_COS_INVALID);
@@ -605,9 +598,7 @@ static void cls_cos_set_pool(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
+
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -625,50 +616,11 @@ static void cls_cos_set_pool(void)
odp_pool_destroy(cos_pool);
}
-#if ODP_DEPRECATED_API
-
-static void cls_cos_set_drop(void)
-{
- int retval;
- char cosname[ODP_COS_NAME_LEN];
- odp_cos_t cos_drop;
- odp_queue_t queue;
- odp_pool_t pool;
- odp_cls_cos_param_t cls_param;
-
- pool = pool_create("cls_basic_pool");
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
-
- queue = queue_create("cls_basic_queue", true);
- CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
-
- sprintf(cosname, "CoSDrop");
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue;
- cls_param.drop_policy = ODP_COS_DROP_POOL;
- cos_drop = odp_cls_cos_create(cosname, &cls_param);
- CU_ASSERT_FATAL(cos_drop != ODP_COS_INVALID);
-
- retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_POOL);
- CU_ASSERT(retval == 0);
- CU_ASSERT(ODP_COS_DROP_POOL == odp_cos_drop(cos_drop));
-
- retval = odp_cos_drop_set(cos_drop, ODP_COS_DROP_NEVER);
- CU_ASSERT(retval == 0);
- CU_ASSERT(ODP_COS_DROP_NEVER == odp_cos_drop(cos_drop));
- odp_cos_destroy(cos_drop);
- odp_pool_destroy(pool);
- odp_queue_destroy(queue);
-}
-
-#endif
-
static void cls_pmr_composite_create(void)
{
+ odp_cls_capability_t capa;
odp_pmr_t pmr_composite;
int retval;
- odp_pmr_param_t pmr_terms[PMR_SET_NUM];
odp_cos_t default_cos;
odp_cos_t cos;
odp_queue_t default_queue;
@@ -678,9 +630,11 @@ static void cls_pmr_composite_create(void)
odp_pool_t pkt_pool;
odp_cls_cos_param_t cls_param;
odp_pktio_t pktio;
+ uint32_t max_terms_per_pmr;
uint16_t val = 1024;
uint16_t mask = 0xffff;
- int i;
+
+ CU_ASSERT_FATAL(odp_cls_capability(&capa) == 0);
pkt_pool = pool_create("pkt_pool");
CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
@@ -700,14 +654,14 @@ static void cls_pmr_composite_create(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create("pmr_match", &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
- for (i = 0; i < PMR_SET_NUM; i++) {
+ max_terms_per_pmr = capa.max_terms_per_pmr;
+ odp_pmr_param_t pmr_terms[max_terms_per_pmr];
+
+ for (uint32_t i = 0; i < max_terms_per_pmr; i++) {
odp_cls_pmr_param_init(&pmr_terms[i]);
pmr_terms[i].term = ODP_PMR_TCP_DPORT;
pmr_terms[i].match.value = &val;
@@ -716,8 +670,7 @@ static void cls_pmr_composite_create(void)
pmr_terms[i].val_sz = sizeof(val);
}
- pmr_composite = odp_cls_pmr_create(pmr_terms, PMR_SET_NUM,
- default_cos, cos);
+ pmr_composite = odp_cls_pmr_create(pmr_terms, max_terms_per_pmr, default_cos, cos);
CU_ASSERT(odp_pmr_to_u64(pmr_composite) !=
odp_pmr_to_u64(ODP_PMR_INVALID));
@@ -793,9 +746,6 @@ odp_testinfo_t classification_suite_basic[] = {
ODP_TEST_INFO(cls_max_pmr_from_default_drop),
ODP_TEST_INFO(cls_max_pmr_from_default_enqueue),
ODP_TEST_INFO(cls_cos_set_queue),
-#if ODP_DEPRECATED_API
- ODP_TEST_INFO(cls_cos_set_drop),
-#endif
ODP_TEST_INFO(cls_cos_set_pool),
ODP_TEST_INFO(cls_pmr_composite_create),
ODP_TEST_INFO_CONDITIONAL(cls_create_cos_with_hash_queues, check_capa_cos_hashing),
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index b88f7beca..7db0e1b5e 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -77,9 +77,6 @@ void configure_default_cos(odp_pktio_t pktio, odp_cos_t *cos,
odp_cls_cos_param_init(&cls_param);
cls_param.pool = default_pool;
cls_param.queue = default_queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
default_cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(default_cos != ODP_COS_INVALID);
@@ -153,9 +150,6 @@ static void cls_pktin_classifier_flag(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -250,9 +244,6 @@ static void cls_pmr_term_tcp_dport_n(int num_pkt)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT(cos != ODP_COS_INVALID);
@@ -426,9 +417,6 @@ static void test_pmr(const odp_pmr_param_t *pmr_param, odp_packet_t pkt,
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create("PMR test cos", &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -700,9 +688,6 @@ static void cls_pmr_term_dmac(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1041,9 +1026,6 @@ static void cls_pmr_pool_set(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1143,9 +1125,6 @@ static void cls_pmr_queue_set(void)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos = odp_cls_cos_create(cosname, &cls_param);
CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
@@ -1447,9 +1426,6 @@ static void test_pmr_series(const int num_udp, int marking)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue_ip;
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos_ip = odp_cls_cos_create("cos_ip", &cls_param);
CU_ASSERT_FATAL(cos_ip != ODP_COS_INVALID);
@@ -1491,9 +1467,6 @@ static void test_pmr_series(const int num_udp, int marking)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool;
cls_param.queue = queue_udp[i];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
cos_udp[i] = odp_cls_cos_create(name, &cls_param);
CU_ASSERT_FATAL(cos_udp[i] != ODP_COS_INVALID);
diff --git a/test/validation/api/classification/odp_classification_tests.c b/test/validation/api/classification/odp_classification_tests.c
index 008034d9a..d81884006 100644
--- a/test/validation/api/classification/odp_classification_tests.c
+++ b/test/validation/api/classification/odp_classification_tests.c
@@ -20,10 +20,6 @@ static odp_pktio_t pktio_loop;
static odp_pktio_capability_t pktio_capa;
static odp_cls_testcase_u tc;
-#ifdef ODP_DEPRECATED
-static int global_num_l2_qos;
-#endif
-
#define NUM_COS_PMR_CHAIN 2
#define NUM_COS_DEFAULT 1
#define NUM_COS_DROP 1
@@ -262,9 +258,6 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_SRC];
cls_param.queue = queue_list[CLS_PMR_CHAIN_SRC];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -294,9 +287,6 @@ void configure_cls_pmr_chain(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_CHAIN_DST];
cls_param.queue = queue_list[CLS_PMR_CHAIN_DST];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -416,9 +406,6 @@ void configure_pktio_default_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_DEFAULT];
cls_param.queue = queue_list[CLS_DEFAULT];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -632,9 +619,6 @@ void configure_pktio_error_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_ERROR];
cls_param.queue = queue_list[CLS_ERROR];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -709,110 +693,6 @@ static void cls_pktio_set_headroom(void)
CU_ASSERT(retval < 0);
}
-#ifdef ODP_DEPRECATED
-
-void configure_cos_with_l2_priority(odp_bool_t enable_pktv)
-{
- uint8_t num_qos = CLS_L2_QOS_MAX;
- odp_cos_t cos_tbl[CLS_L2_QOS_MAX];
- odp_queue_t queue_tbl[CLS_L2_QOS_MAX];
- odp_pool_t pool;
- uint8_t qos_tbl[CLS_L2_QOS_MAX];
- char cosname[ODP_COS_NAME_LEN];
- char queuename[ODP_QUEUE_NAME_LEN];
- char poolname[ODP_POOL_NAME_LEN];
- int retval;
- int i;
- odp_queue_param_t qparam;
- odp_cls_cos_param_t cls_param;
-
- /** Initialize scalar variable qos_tbl **/
- for (i = 0; i < CLS_L2_QOS_MAX; i++)
- qos_tbl[i] = 0;
-
- if (odp_schedule_num_prio() < num_qos)
- num_qos = odp_schedule_num_prio();
-
- global_num_l2_qos = num_qos;
-
- odp_queue_param_init(&qparam);
- qparam.type = ODP_QUEUE_TYPE_SCHED;
- qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
- for (i = 0; i < num_qos; i++) {
- qparam.sched.prio = odp_schedule_min_prio() + i;
- sprintf(queuename, "%s_%d", "L2_Queue", i);
- queue_tbl[i] = odp_queue_create(queuename, &qparam);
- CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
- queue_list[CLS_L2_QOS_0 + i] = queue_tbl[i];
-
- sprintf(poolname, "%s_%d", "L2_Pool", i);
- pool = pool_create(poolname);
- CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- pool_list[CLS_L2_QOS_0 + i] = pool;
-
- sprintf(cosname, "%s_%d", "L2_Cos", i);
- odp_cls_cos_param_init(&cls_param);
- cls_param.pool = pool;
- cls_param.queue = queue_tbl[i];
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-
- if (enable_pktv) {
- cls_param.vector.enable = true;
- cls_param.vector.pool = pktv_config.pool;
- cls_param.vector.max_size = pktv_config.max_size;
- cls_param.vector.max_tmo_ns = pktv_config.max_tmo_ns;
- }
-
- cos_tbl[i] = odp_cls_cos_create(cosname, &cls_param);
- if (cos_tbl[i] == ODP_COS_INVALID)
- break;
-
- cos_list[CLS_L2_QOS_0 + i] = cos_tbl[i];
- qos_tbl[i] = i;
- }
- /* count 'i' is passed instead of num_qos to handle the rare scenario
- if the odp_cls_cos_create() failed in the middle*/
- retval = odp_cos_with_l2_priority(pktio_loop, i, qos_tbl, cos_tbl);
- CU_ASSERT(retval == 0);
-}
-
-void test_cos_with_l2_priority(odp_bool_t enable_pktv)
-{
- odp_packet_t pkt;
- odph_ethhdr_t *ethhdr;
- odph_vlanhdr_t *vlan;
- odp_queue_t queue;
- odp_pool_t pool;
- uint32_t seqno = 0;
- cls_packet_info_t pkt_info;
- uint8_t i;
-
- pkt_info = default_pkt_info;
- pkt_info.l4_type = CLS_PKT_L4_UDP;
- pkt_info.vlan = true;
-
- for (i = 0; i < global_num_l2_qos; i++) {
- pkt = create_packet(pkt_info);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- seqno = cls_pkt_get_seq(pkt);
- CU_ASSERT(seqno != TEST_SEQ_INVALID);
- ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- vlan = (odph_vlanhdr_t *)(ethhdr + 1);
- vlan->tci = odp_cpu_to_be_16(i << 13);
- enqueue_pktio_interface(pkt, pktio_loop);
- pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS, enable_pktv);
- CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
- CU_ASSERT(queue == queue_list[CLS_L2_QOS_0 + i]);
- pool = odp_packet_pool(pkt);
- CU_ASSERT(pool == pool_list[CLS_L2_QOS_0 + i]);
- CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
- odp_packet_free(pkt);
- }
-}
-
-#endif
-
void configure_pmr_cos(odp_bool_t enable_pktv)
{
uint16_t val;
@@ -842,9 +722,6 @@ void configure_pmr_cos(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR];
cls_param.queue = queue_list[CLS_PMR];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -927,9 +804,6 @@ void configure_pktio_pmr_composite(odp_bool_t enable_pktv)
odp_cls_cos_param_init(&cls_param);
cls_param.pool = pool_list[CLS_PMR_SET];
cls_param.queue = queue_list[CLS_PMR_SET];
-#if ODP_DEPRECATED_API
- cls_param.drop_policy = ODP_COS_DROP_POOL;
-#endif
if (enable_pktv) {
cls_param.vector.enable = true;
@@ -1029,13 +903,6 @@ static void cls_pktio_configure_common(odp_bool_t enable_pktv)
tc.pmr_chain = 1;
num_cos -= NUM_COS_PMR_CHAIN;
}
-#ifdef ODP_DEPRECATED
- if (num_cos >= NUM_COS_L2_PRIO && TEST_L2_QOS) {
- configure_cos_with_l2_priority(enable_pktv);
- tc.l2_priority = 1;
- num_cos -= NUM_COS_L2_PRIO;
- }
-#endif
if (num_cos >= NUM_COS_PMR && TEST_PMR) {
configure_pmr_cos(enable_pktv);
tc.pmr_cos = 1;
@@ -1070,10 +937,6 @@ static void cls_pktio_test_common(odp_bool_t enable_pktv)
test_pktio_error_cos(enable_pktv);
if (tc.pmr_chain && TEST_PMR_CHAIN)
test_cls_pmr_chain(enable_pktv);
-#ifdef ODP_DEPRECATED
- if (tc.l2_priority && TEST_L2_QOS)
- test_cos_with_l2_priority(enable_pktv);
-#endif
if (tc.pmr_cos && TEST_PMR)
test_pmr_cos(enable_pktv);
if (tc.pmr_composite_cos && TEST_PMR_SET)
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 06e98d4cb..888613b1f 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -40,9 +40,6 @@ typedef union odp_cls_testcase {
uint32_t drop_cos:1;
uint32_t error_cos:1;
uint32_t pmr_chain:1;
-#ifdef ODP_DEPRECATED
- uint32_t l2_priority:1;
-#endif
uint32_t pmr_cos:1;
uint32_t pmr_composite_cos:1;
};
diff --git a/test/validation/api/crypto/crypto_op_test.c b/test/validation/api/crypto/crypto_op_test.c
index 9e8ab0584..1c6944fdf 100644
--- a/test/validation/api/crypto/crypto_op_test.c
+++ b/test/validation/api/crypto/crypto_op_test.c
@@ -115,10 +115,6 @@ int crypto_op(odp_packet_t pkt_in,
*ok = (rc == 0);
-#if ODP_DEPRECATED_API
- CU_ASSERT(*ok == result.ok);
-#endif
-
return 0;
fail:
odp_packet_free(pkt_in);
diff --git a/test/validation/api/dma/dma.c b/test/validation/api/dma/dma.c
index 7129315ef..4f454168d 100644
--- a/test/validation/api/dma/dma.c
+++ b/test/validation/api/dma/dma.c
@@ -330,6 +330,7 @@ static void test_dma_compl_pool(void)
odp_pool_t pool;
odp_pool_info_t pool_info;
odp_dma_compl_t compl[global.dma_capa.max_transfers];
+ odp_event_t ev;
uint64_t u64;
int ret;
uint32_t i, j;
@@ -359,6 +360,11 @@ static void test_dma_compl_pool(void)
if (compl[i] == ODP_DMA_COMPL_INVALID)
break;
+ /* No source pool for DMA completion events */
+ ev = odp_dma_compl_to_event(compl[i]);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
printf("\n DMA compl handle: 0x%" PRIx64 "\n", u64);
odp_dma_compl_print(compl[i]);
}
@@ -1108,7 +1114,7 @@ static void test_dma_pkt_segs_to_addr_sync(void)
uint8_t *dst;
odp_packet_t pkt;
uint32_t i, len, num_segs;
- uint32_t pkt_len = global.pkt_len;
+ uint32_t pkt_len = ODPH_MIN(global.pkt_len, global.len);
memset(global.dst_addr, 0, global.data_size);
@@ -1128,9 +1134,6 @@ static void test_dma_pkt_segs_to_addr_sync(void)
CU_ASSERT_FATAL(odp_packet_copy_from_mem(pkt, 0, pkt_len, global.src_addr) == 0);
len = pkt_len - OFFSET - TRAILER;
- if (len > global.len)
- len = global.len;
-
dst = global.dst_addr + OFFSET;
memset(&src_seg, 0, sizeof(odp_dma_seg_t));
diff --git a/test/validation/api/ipsec/ipsec.c b/test/validation/api/ipsec/ipsec.c
index da60c77b3..9849a44b5 100644
--- a/test/validation/api/ipsec/ipsec.c
+++ b/test/validation/api/ipsec/ipsec.c
@@ -459,11 +459,13 @@ static void ipsec_status_event_handle(odp_event_t ev_status,
CU_ASSERT_EQUAL(1, odp_event_is_valid(ev_status));
CU_ASSERT_EQUAL_FATAL(ODP_EVENT_IPSEC_STATUS, odp_event_type(ev_status));
- /* No user area for IPsec status events */
+ /* No user area or source pool for IPsec status events */
CU_ASSERT(odp_event_user_area(ev_status) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev_status, &flag) == NULL);
CU_ASSERT(flag < 0);
+ CU_ASSERT(odp_event_pool(ev_status) == ODP_POOL_INVALID);
+
CU_ASSERT_EQUAL(0, odp_ipsec_status(&status, ev_status));
CU_ASSERT_EQUAL(ODP_IPSEC_STATUS_WARN, status.id);
CU_ASSERT_EQUAL(sa, status.sa);
diff --git a/test/validation/api/packet/packet.c b/test/validation/api/packet/packet.c
index b33291a28..77a0f8494 100644
--- a/test/validation/api/packet/packet.c
+++ b/test/validation/api/packet/packet.c
@@ -402,6 +402,7 @@ static void packet_test_alloc_free(void)
odp_packet_t packet;
odp_pool_param_t params;
odp_event_subtype_t subtype;
+ odp_event_t ev;
odp_pool_param_init(&params);
@@ -418,13 +419,15 @@ static void packet_test_alloc_free(void)
packet = odp_packet_alloc(pool, packet_len);
CU_ASSERT_FATAL(packet != ODP_PACKET_INVALID);
CU_ASSERT(odp_packet_len(packet) == packet_len);
- CU_ASSERT(odp_event_type(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET);
- CU_ASSERT(odp_event_subtype(odp_packet_to_event(packet)) ==
- ODP_EVENT_PACKET_BASIC);
- CU_ASSERT(odp_event_types(odp_packet_to_event(packet), &subtype) ==
- ODP_EVENT_PACKET);
+
+ ev = odp_packet_to_event(packet);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_type(ev) == ODP_EVENT_PACKET);
+ CU_ASSERT(odp_event_subtype(ev) == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_types(ev, &subtype) == ODP_EVENT_PACKET);
CU_ASSERT(subtype == ODP_EVENT_PACKET_BASIC);
+ CU_ASSERT(odp_event_pool(ev) == pool);
+
CU_ASSERT(odp_packet_subtype(packet) == ODP_EVENT_PACKET_BASIC);
CU_ASSERT(odp_packet_to_u64(packet) !=
odp_packet_to_u64(ODP_PACKET_INVALID));
@@ -3199,6 +3202,7 @@ static void packet_vector_basic_test(void)
odp_pool_capability_t capa;
uint32_t i, num;
uint32_t max_size = PKT_VEC_PACKET_NUM;
+ odp_event_t ev;
CU_ASSERT_FATAL(odp_pool_capability(&capa) == 0);
if (capa.vector.max_size < max_size)
@@ -3209,6 +3213,9 @@ static void packet_vector_basic_test(void)
/* Making sure default vector packet is from default vector pool */
CU_ASSERT(odp_packet_vector_pool(pktv_default) == vector_default_pool)
+ ev = odp_packet_vector_to_event(pktv_default);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ CU_ASSERT(odp_event_pool(ev) == vector_default_pool);
/* Get packet vector table */
num = odp_packet_vector_tbl(pktv_default, &pkt_tbl);
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 5feeea4c0..780d83066 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -3648,11 +3648,13 @@ static void pktio_test_pktout_compl_event(bool use_plain_queue)
CU_ASSERT(odp_packet_tx_compl_user_ptr(tx_compl) ==
(const void *)&pkt_seq[i]);
- /* No user area for TX completion events */
+ /* No user area or source pool for TX completion events */
CU_ASSERT(odp_event_user_area(ev) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
CU_ASSERT(flag < 0);
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
/* Alternatively call event free / compl free */
if (i % 2)
odp_packet_tx_compl_free(tx_compl);
@@ -3688,11 +3690,13 @@ static void pktio_test_pktout_compl_event(bool use_plain_queue)
}
}
- /* No user area for TX completion events */
+ /* No user area or source pool for TX completion events */
CU_ASSERT(odp_event_user_area(ev) == NULL);
CU_ASSERT(odp_event_user_area_and_flag(ev, &flag) == NULL);
CU_ASSERT(flag < 0);
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
+
/* Check that sequence number is found */
CU_ASSERT(j < TX_BATCH_LEN);
@@ -3772,6 +3776,11 @@ static void pktio_test_pktout_compl_poll(void)
pktio_rx_info.inq = ODP_QUEUE_INVALID;
pktio_rx_info.in_mode = ODP_PKTIN_MODE_DIRECT;
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ /* Completion status is initially zero */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) == 0);
+ }
+
ret = create_packets(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx, pktio_rx);
CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
@@ -3801,6 +3810,9 @@ static void pktio_test_pktout_compl_poll(void)
CU_ASSERT(odp_packet_has_tx_compl_request(pkt_tbl[i]) != 0);
/* Set pkt sequence number as its user ptr */
odp_packet_user_ptr_set(pkt_tbl[i], (const void *)&pkt_seq[i]);
+
+ /* Completion status should be still zero after odp_packet_tx_compl_request() */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) == 0);
}
CU_ASSERT_FATAL(odp_pktout_send(pktout_queue, pkt_tbl, TX_BATCH_LEN) == TX_BATCH_LEN);
@@ -3811,10 +3823,12 @@ static void pktio_test_pktout_compl_poll(void)
for (i = 0; i < num_rx; i++)
odp_packet_free(pkt_tbl[i]);
- /* Transmits should be complete since we received the packets already */
for (i = 0; i < num_rx; i++) {
- ret = odp_packet_tx_compl_done(pktio_tx, i);
- CU_ASSERT(ret > 0);
+ /* Transmits should be complete since we received the packets already */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) > 0);
+
+ /* Check that the previous call did not clear the status */
+ CU_ASSERT(odp_packet_tx_compl_done(pktio_tx, i) > 0);
}
for (i = 0; i < num_ifaces; i++) {
diff --git a/test/validation/api/pool/pool.c b/test/validation/api/pool/pool.c
index 8515f9b64..ee8aa4b67 100644
--- a/test/validation/api/pool/pool.c
+++ b/test/validation/api/pool/pool.c
@@ -1324,7 +1324,7 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
odp_pool_stats_selected_t selected;
odp_pool_param_t param;
odp_pool_stats_opt_t supported;
- uint32_t i, j, num_pool, num_obj, cache_size;
+ uint32_t i, j, num_pool, num_obj, cache_size, num_thr;
uint32_t max_pools = 2;
uint16_t first = 0;
uint16_t last = ODP_POOL_MAX_THREAD_STATS - 1;
@@ -1407,9 +1407,14 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
uint32_t num_events = 0;
uint32_t num_fails = 0;
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+ memset(&selected, 0xff, sizeof(odp_pool_stats_selected_t));
+
CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
+
stats.thread.first = first;
stats.thread.last = last;
+ num_thr = last - first + 1;
CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
@@ -1440,8 +1445,11 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
CU_ASSERT(stats.thread.first == first);
CU_ASSERT(stats.thread.last == last);
- for (j = 0; j < ODP_POOL_MAX_THREAD_STATS; j++)
- CU_ASSERT(stats.thread.cache_available[j] <= stats.cache_available);
+
+ if (supported.bit.thread_cache_available) {
+ for (j = 0; j < num_thr; j++)
+ CU_ASSERT(stats.thread.cache_available[j] <= stats.cache_available);
+ }
/* Allocate the events */
for (j = 0; j < num_allocs; j++) {
@@ -1487,26 +1495,31 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
if (supported.bit.cache_available)
CU_ASSERT(selected.cache_available <= num_obj - num_events);
- while (first_id < odp_thread_count_max()) {
- stats.thread.first = first_id;
- stats.thread.last = last_id;
- CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
-
- for (int i = 0; i < ODP_POOL_MAX_THREAD_STATS; i++) {
- uint64_t cached = stats.thread.cache_available[i];
-
- CU_ASSERT(cached <= num_obj - num_events);
- total_cached += cached;
- }
- first_id = last_id + 1;
- last_id += ODP_POOL_MAX_THREAD_STATS;
- if (last_id >= odp_thread_count_max())
- last_id = odp_thread_count_max() - 1;
- };
-
- if (supported.bit.cache_available && supported.bit.thread_cache_available &&
- ODP_POOL_MAX_THREAD_STATS >= odp_thread_count_max())
- CU_ASSERT(stats.cache_available == total_cached);
+ if (supported.bit.thread_cache_available) {
+ while (first_id < odp_thread_count_max()) {
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+
+ stats.thread.first = first_id;
+ stats.thread.last = last_id;
+ num_thr = last_id - first_id + 1;
+ CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
+
+ for (uint32_t k = 0; k < num_thr; k++) {
+ uint64_t cached = stats.thread.cache_available[k];
+
+ CU_ASSERT(cached <= num_obj - num_events);
+ total_cached += cached;
+ }
+ first_id = last_id + 1;
+ last_id += ODP_POOL_MAX_THREAD_STATS;
+ if (last_id >= odp_thread_count_max())
+ last_id = odp_thread_count_max() - 1;
+ };
+
+ if (supported.bit.cache_available &&
+ ODP_POOL_MAX_THREAD_STATS >= odp_thread_count_max())
+ CU_ASSERT(stats.cache_available == total_cached);
+ }
}
CU_ASSERT(num_events == num_obj);
@@ -1515,8 +1528,12 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
/* Allow implementation some time to update counters */
odp_time_wait_ns(ODP_TIME_MSEC_IN_NS);
+ memset(&stats, 0xff, sizeof(odp_pool_stats_t));
+ memset(&selected, 0xff, sizeof(odp_pool_stats_selected_t));
+
stats.thread.first = first;
stats.thread.last = last;
+ num_thr = last - first + 1;
CU_ASSERT_FATAL(odp_pool_stats(pool[i], &stats) == 0);
CU_ASSERT_FATAL(odp_pool_stats_selected(pool[i], &selected, &supported) == 0);
@@ -1528,8 +1545,10 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
CU_ASSERT(stats.cache_available == 0);
if (supported.bit.cache_available)
CU_ASSERT(selected.cache_available == 0);
- for (j = 0; j < ODP_POOL_MAX_THREAD_STATS; j++)
- CU_ASSERT(stats.thread.cache_available[j] == 0);
+ if (supported.bit.thread_cache_available) {
+ for (j = 0; j < num_thr; j++)
+ CU_ASSERT(stats.thread.cache_available[j] == 0);
+ }
if (supported.bit.alloc_ops) {
CU_ASSERT(stats.alloc_ops > 0 && stats.alloc_ops <= num_allocs);
CU_ASSERT(selected.alloc_ops > 0 && selected.alloc_ops <= num_allocs);
@@ -1587,8 +1606,9 @@ static void pool_test_pool_statistics(odp_pool_type_t pool_type)
printf(" cache_available: %" PRIu64 "\n", stats.cache_available);
printf(" cache_alloc_ops: %" PRIu64 "\n", stats.cache_alloc_ops);
printf(" cache_free_ops: %" PRIu64 "\n", stats.cache_free_ops);
- printf(" thread.cache_available[0]: %" PRIu64 "\n",
- stats.thread.cache_available[0]);
+ if (supported.bit.thread_cache_available)
+ printf(" thread.cache_available[0]: %" PRIu64 "\n",
+ stats.thread.cache_available[0]);
}
CU_ASSERT_FATAL(odp_pool_stats_reset(pool[i]) == 0);
diff --git a/test/validation/api/thread/thread.c b/test/validation/api/thread/thread.c
index 87c23e74e..840256fcf 100644
--- a/test/validation/api/thread/thread.c
+++ b/test/validation/api/thread/thread.c
@@ -104,13 +104,31 @@ static void thread_test_odp_thread_count(void)
{
int count = odp_thread_count();
- /* One thread running */
+ /* One control thread running */
CU_ASSERT(count == 1);
+ CU_ASSERT(odp_thread_control_count() == 1);
+ CU_ASSERT(odp_thread_control_count() <= odp_thread_control_count_max());
+ CU_ASSERT(odp_thread_worker_count() == 0);
CU_ASSERT(count >= 1);
CU_ASSERT(count <= odp_thread_count_max());
CU_ASSERT(count <= ODP_THREAD_COUNT_MAX);
- CU_ASSERT(odp_thread_count_max() <= ODP_THREAD_COUNT_MAX);
+}
+
+static void thread_test_odp_thread_count_max(void)
+{
+ int max_threads = odp_thread_count_max();
+ int max_control = odp_thread_control_count_max();
+ int max_worker = odp_thread_worker_count_max();
+
+ CU_ASSERT(max_threads > 0);
+ CU_ASSERT(max_threads <= ODP_THREAD_COUNT_MAX);
+
+ CU_ASSERT(max_control >= 0);
+ CU_ASSERT(max_control <= max_threads);
+
+ CU_ASSERT(max_worker >= 0);
+ CU_ASSERT(max_worker <= max_threads);
}
static int thread_func(void *arg)
@@ -175,7 +193,9 @@ static void thread_test_odp_thrmask_worker(void)
ret = odp_thrmask_worker(&mask);
CU_ASSERT(ret == odp_thrmask_count(&mask));
CU_ASSERT(ret == num);
+ CU_ASSERT(ret == odp_thread_worker_count());
CU_ASSERT(ret <= odp_thread_count_max());
+ CU_ASSERT(ret <= odp_thread_worker_count_max());
/* allow thread(s) to exit */
odp_barrier_wait(&global_mem->bar_exit);
@@ -194,9 +214,10 @@ static void thread_test_odp_thrmask_control(void)
CU_ASSERT(odp_thread_type() == ODP_THREAD_CONTROL);
- /* should start out with 1 worker thread */
+ /* Should start out with 1 control thread */
ret = odp_thrmask_control(&mask);
CU_ASSERT(ret == odp_thrmask_count(&mask));
+ CU_ASSERT(ret == odp_thread_control_count());
CU_ASSERT(ret == 1);
}
@@ -204,6 +225,7 @@ odp_testinfo_t thread_suite[] = {
ODP_TEST_INFO(thread_test_odp_cpu_id),
ODP_TEST_INFO(thread_test_odp_thread_id),
ODP_TEST_INFO(thread_test_odp_thread_count),
+ ODP_TEST_INFO(thread_test_odp_thread_count_max),
ODP_TEST_INFO(thread_test_odp_thrmask_to_from_str),
ODP_TEST_INFO(thread_test_odp_thrmask_equal),
ODP_TEST_INFO(thread_test_odp_thrmask_zero),
diff --git a/test/validation/api/time/time.c b/test/validation/api/time/time.c
index 7664ec542..8d3481e26 100644
--- a/test/validation/api/time/time.c
+++ b/test/validation/api/time/time.c
@@ -25,6 +25,7 @@
#define TIME_TOLERANCE_NS 1000000
#define TIME_TOLERANCE_CI_NS 40000000
#define GLOBAL_SHM_NAME "GlobalTimeTest"
+#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
static uint64_t local_res;
static uint64_t global_res;
@@ -145,6 +146,35 @@ static void time_test_constants(void)
CU_ASSERT(ns == ODP_TIME_USEC_IN_NS);
}
+static void time_test_startup_time(void)
+{
+ odp_time_startup_t startup;
+ uint64_t ns1, ns2, ns3;
+ odp_time_t time;
+
+ memset(&startup, 0, sizeof(odp_time_startup_t));
+
+ odp_time_startup(&startup);
+ ns1 = startup.global_ns;
+ ns2 = odp_time_to_ns(startup.global);
+
+ CU_ASSERT(UINT64_MAX - ns1 >= 10 * YEAR_IN_NS);
+ CU_ASSERT(UINT64_MAX - ns2 >= 10 * YEAR_IN_NS);
+
+ time = odp_time_global();
+ ns3 = odp_time_to_ns(time);
+ CU_ASSERT(odp_time_cmp(time, startup.global) > 0);
+
+ time = odp_time_global_from_ns(10 * YEAR_IN_NS);
+ time = odp_time_sum(startup.global, time);
+ CU_ASSERT(odp_time_cmp(time, startup.global) > 0);
+
+ printf("\n");
+ printf(" Startup time in nsec: %" PRIu64 "\n", ns1);
+ printf(" Startup time to nsec: %" PRIu64 "\n", ns2);
+ printf(" Nsec since startup: %" PRIu64 "\n\n", ns3 - startup.global_ns);
+}
+
static void time_test_res(time_res_cb time_res, uint64_t *res)
{
uint64_t rate;
@@ -225,6 +255,7 @@ static void time_test_monotony(void)
uint64_t gs_ns1, gs_ns2, gs_ns3;
uint64_t ns1, ns2, ns3;
uint64_t s_ns1, s_ns2, s_ns3;
+ uint64_t limit;
l_t1 = odp_time_local();
ls_t1 = odp_time_local_strict();
@@ -276,12 +307,13 @@ static void time_test_monotony(void)
s_ns2 = odp_time_to_ns(ls_t2);
s_ns3 = odp_time_to_ns(ls_t3);
- /* Time counting starts from zero. Assuming that the ODP instance has run
- * less than 10 minutes before running this test case. */
- CU_ASSERT(ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(s_ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(l_ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(ls_ns1 < 10 * ODP_TIME_MIN_IN_NS);
+ /* Time should not wrap in at least 10 years from ODP start. Ignoring delay from start up
+ * and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ CU_ASSERT(UINT64_MAX - ns1 > limit);
+ CU_ASSERT(UINT64_MAX - s_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - l_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - ls_ns1 > limit);
/* Time stamp */
CU_ASSERT(ns2 > ns1);
@@ -321,12 +353,13 @@ static void time_test_monotony(void)
s_ns2 = odp_time_to_ns(gs_t2);
s_ns3 = odp_time_to_ns(gs_t3);
- /* Time counting starts from zero. Assuming that the ODP instance has run
- * less than 10 minutes before running this test case. */
- CU_ASSERT(ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(s_ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(g_ns1 < 10 * ODP_TIME_MIN_IN_NS);
- CU_ASSERT(gs_ns1 < 10 * ODP_TIME_MIN_IN_NS);
+ /* Time should not wrap in at least 10 years from ODP start. Ignoring delay from start up
+ * and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ CU_ASSERT(UINT64_MAX - ns1 > limit);
+ CU_ASSERT(UINT64_MAX - s_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - g_ns1 > limit);
+ CU_ASSERT(UINT64_MAX - gs_ns1 > limit);
/* Time stamp */
CU_ASSERT(ns2 > ns1);
@@ -540,7 +573,7 @@ static void time_test_sum(time_cb time_cur,
uint64_t res)
{
odp_time_t sum, t1, t2;
- uint64_t nssum, ns1, ns2, ns;
+ uint64_t nssum, ns1, ns2, ns, diff;
uint64_t upper_limit, lower_limit;
/* sum timestamp and interval */
@@ -574,6 +607,27 @@ static void time_test_sum(time_cb time_cur,
/* test on 0 */
sum = odp_time_sum(t2, ODP_TIME_NULL);
CU_ASSERT(odp_time_cmp(t2, sum) == 0);
+
+ /* test add nsec */
+ ns = ODP_TIME_SEC_IN_NS;
+ upper_limit = ns + 2 * res;
+ lower_limit = ns - 2 * res;
+
+ t1 = time_cur();
+ t2 = odp_time_add_ns(t1, ns);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff_ns(t2, t1);
+ CU_ASSERT((diff <= upper_limit) && (diff >= lower_limit));
+
+ t1 = ODP_TIME_NULL;
+ t2 = odp_time_add_ns(t1, ns);
+
+ CU_ASSERT(odp_time_cmp(t2, t1) > 0);
+
+ diff = odp_time_diff_ns(t2, t1);
+ CU_ASSERT((diff <= upper_limit) && (diff >= lower_limit));
}
static void time_test_local_sum(void)
@@ -704,7 +758,7 @@ static void time_test_accuracy(time_cb time_cur,
wait = odp_time_sum(t1[0], sec);
for (i = 0; i < 5; i++) {
odp_time_wait_until(wait);
- wait = odp_time_sum(wait, sec);
+ wait = odp_time_add_ns(wait, ODP_TIME_SEC_IN_NS);
}
i = clock_gettime(CLOCK_MONOTONIC, &ts2);
@@ -914,6 +968,7 @@ static void time_test_global_sync_control(void)
odp_testinfo_t time_suite_time[] = {
ODP_TEST_INFO(time_test_constants),
+ ODP_TEST_INFO(time_test_startup_time),
ODP_TEST_INFO(time_test_local_res),
ODP_TEST_INFO(time_test_local_conversion),
ODP_TEST_INFO(time_test_local_cmp),
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index db66b3664..28b1399a2 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -37,6 +37,7 @@
#define THREE_POINT_THREE_MSEC (10 * ODP_TIME_MSEC_IN_NS / 3)
#define USER_PTR ((void *)0xdead)
#define TICK_INVALID (~(uint64_t)0)
+#define YEAR_IN_NS (365 * 24 * ODP_TIME_HOUR_IN_NS)
/* Test case options */
#define PRIV 1
@@ -64,14 +65,8 @@ struct test_timer {
};
typedef struct {
- /* Clock source support flags */
- uint8_t clk_supported[ODP_CLOCK_NUM_SRC];
-
- /* Periodic timer support per clock source*/
- uint8_t periodic_support[ODP_CLOCK_NUM_SRC];
-
- /* Periodic timers not supported with any clock source */
- int no_periodic;
+ /* Periodic timer support */
+ int periodic_support;
/* Default resolution / timeout parameters */
struct {
@@ -106,19 +101,21 @@ typedef struct {
} global_shared_mem_t;
+typedef struct {
+ odp_timer_clk_src_t clk_src;
+ global_shared_mem_t global_mem;
+
+} test_global_t;
+
static global_shared_mem_t *global_mem;
+static test_global_t *test_global;
+static odp_shm_t global_shm;
+static odp_instance_t inst;
-static int timer_global_init(odp_instance_t *inst)
+static int global_init(void)
{
- odp_shm_t global_shm;
odp_init_t init_param;
odph_helper_options_t helper_options;
- odp_timer_capability_t capa;
- odp_timer_res_capability_t res_capa;
- uint64_t res_ns, min_tmo, max_tmo;
- unsigned int range;
- int i;
- int num_periodic = 0;
if (odph_options(&helper_options)) {
ODPH_ERR("odph_options() failed\n");
@@ -128,35 +125,73 @@ static int timer_global_init(odp_instance_t *inst)
odp_init_param_init(&init_param);
init_param.mem_model = helper_options.mem_model;
- if (0 != odp_init_global(inst, &init_param, NULL)) {
+ if (0 != odp_init_global(&inst, &init_param, NULL)) {
ODPH_ERR("odp_init_global() failed\n");
return -1;
}
- if (0 != odp_init_local(*inst, ODP_THREAD_CONTROL)) {
+ if (0 != odp_init_local(inst, ODP_THREAD_CONTROL)) {
ODPH_ERR("odp_init_local() failed\n");
return -1;
}
global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
- sizeof(global_shared_mem_t),
+ sizeof(test_global_t),
ODP_CACHE_LINE_SIZE, 0);
if (global_shm == ODP_SHM_INVALID) {
ODPH_ERR("Unable to reserve memory for global_shm\n");
return -1;
}
- global_mem = odp_shm_addr(global_shm);
- memset(global_mem, 0, sizeof(global_shared_mem_t));
+ test_global = odp_shm_addr(global_shm);
+ memset(test_global, 0, sizeof(*test_global));
+ global_mem = &test_global->global_mem;
/* Configure scheduler */
odp_schedule_config(NULL);
+ return 0;
+}
+
+static int global_term(void)
+{
+ if (0 != odp_shm_free(global_shm)) {
+ ODPH_ERR("odp_shm_free() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_local()) {
+ ODPH_ERR("odp_term_local() failed\n");
+ return -1;
+ }
+
+ if (0 != odp_term_global(inst)) {
+ ODPH_ERR("odp_term_global() failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int timer_global_init(odp_instance_t *instance)
+{
+ odp_timer_capability_t capa;
+ odp_timer_res_capability_t res_capa;
+ uint64_t res_ns, min_tmo, max_tmo;
+ unsigned int range;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
+
+ *instance = inst;
+
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
memset(&capa, 0, sizeof(capa));
- if (odp_timer_capability(ODP_CLOCK_DEFAULT, &capa)) {
+ if (odp_timer_capability(clk_src, &capa)) {
ODPH_ERR("Timer capability failed\n");
return -1;
}
+ global_mem->periodic_support = capa.periodic.max_pools > 0;
+
/* By default 20 msec resolution */
res_ns = 20 * ODP_TIME_MSEC_IN_NS;
if (res_ns < capa.max_res.res_ns)
@@ -165,7 +200,7 @@ static int timer_global_init(odp_instance_t *inst)
memset(&res_capa, 0, sizeof(res_capa));
res_capa.res_ns = res_ns;
- if (odp_timer_res_capability(ODP_CLOCK_DEFAULT, &res_capa)) {
+ if (odp_timer_res_capability(clk_src, &res_capa)) {
ODPH_ERR("Timer resolution capability failed\n");
return -1;
}
@@ -193,42 +228,12 @@ static int timer_global_init(odp_instance_t *inst)
global_mem->param.queue_type_plain = capa.queue_type_plain;
global_mem->param.queue_type_sched = capa.queue_type_sched;
- /* Check which clock sources are supported */
- for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- if (odp_timer_capability(ODP_CLOCK_SRC_0 + i, &capa) == 0) {
- global_mem->clk_supported[i] = 1;
-
- if (capa.periodic.max_pools) {
- global_mem->periodic_support[i] = 1;
- num_periodic++;
- }
- }
- }
-
- global_mem->no_periodic = !num_periodic;
-
return 0;
}
static int timer_global_term(odp_instance_t inst)
{
- odp_shm_t shm;
-
- shm = odp_shm_lookup(GLOBAL_SHM_NAME);
- if (0 != odp_shm_free(shm)) {
- ODPH_ERR("odp_shm_free() failed\n");
- return -1;
- }
-
- if (0 != odp_term_local()) {
- ODPH_ERR("odp_term_local() failed\n");
- return -1;
- }
-
- if (0 != odp_term_global(inst)) {
- ODPH_ERR("odp_term_global() failed\n");
- return -1;
- }
+ (void)inst;
return 0;
}
@@ -253,15 +258,15 @@ check_plain_queue_support(void)
static int check_periodic_support(void)
{
- if (global_mem->no_periodic)
- return ODP_TEST_INACTIVE;
+ if (global_mem->periodic_support)
+ return ODP_TEST_ACTIVE;
- return ODP_TEST_ACTIVE;
+ return ODP_TEST_INACTIVE;
}
static int check_periodic_sched_support(void)
{
- if (global_mem->periodic_support[0] && global_mem->param.queue_type_sched)
+ if (global_mem->periodic_support && global_mem->param.queue_type_sched)
return ODP_TEST_ACTIVE;
return ODP_TEST_INACTIVE;
@@ -269,17 +274,18 @@ static int check_periodic_sched_support(void)
static int check_periodic_plain_support(void)
{
- if (global_mem->periodic_support[0] && global_mem->param.queue_type_plain)
+ if (global_mem->periodic_support && global_mem->param.queue_type_plain)
return ODP_TEST_ACTIVE;
return ODP_TEST_INACTIVE;
}
-static void timer_test_capa_run(odp_timer_clk_src_t clk_src)
+static void timer_test_capa(void)
{
odp_timer_capability_t capa;
odp_timer_res_capability_t res_capa;
int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
ret = odp_timer_capability(clk_src, &capa);
@@ -352,8 +358,9 @@ static void timer_test_capa_run(odp_timer_clk_src_t clk_src)
}
}
-static void timer_test_capa(void)
+static void timer_test_capa_allsrc(void)
{
+ odp_timer_capability_t capa;
odp_timer_clk_src_t clk_src;
int i;
@@ -365,25 +372,16 @@ static void timer_test_capa(void)
CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 4 == ODP_CLOCK_SRC_4);
CU_ASSERT_FATAL(ODP_CLOCK_SRC_0 + 5 == ODP_CLOCK_SRC_5);
CU_ASSERT_FATAL(ODP_CLOCK_SRC_5 + 1 == ODP_CLOCK_NUM_SRC);
-#if ODP_DEPRECATED_API
- CU_ASSERT_FATAL(ODP_CLOCK_CPU == ODP_CLOCK_DEFAULT);
- CU_ASSERT_FATAL(ODP_CLOCK_EXT == ODP_CLOCK_SRC_1);
-#endif
+
+ CU_ASSERT(odp_timer_capability(ODP_CLOCK_DEFAULT, &capa) == 0);
for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- odp_timer_capability_t capa;
int ret;
clk_src = ODP_CLOCK_SRC_0 + i;
ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT(ret == 0 || ret == -1);
-
- if (global_mem->clk_supported[i]) {
- ODPH_DBG("\nTesting clock source: %i\n", clk_src);
- CU_ASSERT(ret == 0);
- timer_test_capa_run(clk_src);
- }
}
}
@@ -449,6 +447,9 @@ static void timer_test_timeout_pool_alloc(void)
wrong_type = true;
if (subtype != ODP_EVENT_NO_SUBTYPE)
wrong_subtype = true;
+
+ /* No source pool for timeout events */
+ CU_ASSERT(odp_event_pool(ev) == ODP_POOL_INVALID);
}
/* Check that the pool had at least num items */
@@ -675,9 +676,10 @@ static void timer_pool_create_destroy(void)
odp_timer_t tim;
odp_queue_t queue;
int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_queue_param_init(&queue_param);
@@ -696,16 +698,16 @@ static void timer_pool_create_destroy(void)
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = 100;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_DEFAULT;
+ tparam.clk_src = clk_src;
tp[0] = odp_timer_pool_create("timer_pool_a", &tparam);
CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp[0], 1) == 1);
tim = odp_timer_alloc(tp[0], queue, USER_PTR);
CU_ASSERT(tim != ODP_TIMER_INVALID);
- CU_ASSERT(odp_timer_free(tim) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
odp_timer_pool_destroy(tp[0]);
@@ -714,14 +716,14 @@ static void timer_pool_create_destroy(void)
tp[1] = odp_timer_pool_create("timer_pool_c", &tparam);
CU_ASSERT(tp[1] != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, 2) == 2);
odp_timer_pool_destroy(tp[0]);
tp[0] = odp_timer_pool_create("timer_pool_d", &tparam);
CU_ASSERT(tp[0] != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp[0], 1) == 1);
memset(&info, 0, sizeof(odp_timer_pool_info_t));
CU_ASSERT(odp_timer_pool_info(tp[1], &info) == 0);
@@ -729,7 +731,7 @@ static void timer_pool_create_destroy(void)
tim = odp_timer_alloc(tp[1], queue, USER_PTR);
CU_ASSERT(tim != ODP_TIMER_INVALID);
- CU_ASSERT(odp_timer_free(tim) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
odp_timer_pool_destroy(tp[1]);
@@ -739,7 +741,7 @@ static void timer_pool_create_destroy(void)
tim = odp_timer_alloc(tp[0], queue, USER_PTR);
CU_ASSERT(tim != ODP_TIMER_INVALID);
- CU_ASSERT(odp_timer_free(tim) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
odp_timer_pool_destroy(tp[0]);
@@ -756,9 +758,10 @@ static void timer_pool_create_max(void)
int ret;
uint64_t tmo_ns = ODP_TIME_SEC_IN_NS;
uint64_t res_ns = ODP_TIME_SEC_IN_NS / 10;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
uint32_t num = capa.max_pools;
@@ -788,6 +791,7 @@ static void timer_pool_create_max(void)
tp_param.min_tmo = tmo_ns / 2;
tp_param.max_tmo = tmo_ns;
tp_param.num_timers = 1;
+ tp_param.clk_src = clk_src;
for (i = 0; i < num; i++) {
tp[i] = odp_timer_pool_create("test_max", &tp_param);
@@ -797,7 +801,7 @@ static void timer_pool_create_max(void)
CU_ASSERT_FATAL(tp[i] != ODP_TIMER_POOL_INVALID);
}
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, num) == (int)num);
for (i = 0; i < num; i++) {
timer[i] = odp_timer_alloc(tp[i], queue, USER_PTR);
@@ -812,7 +816,7 @@ static void timer_pool_create_max(void)
}
for (i = 0; i < num; i++)
- CU_ASSERT(odp_timer_free(timer[i]) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
for (i = 0; i < num; i++)
odp_timer_pool_destroy(tp[i]);
@@ -835,9 +839,10 @@ static void timer_pool_max_res(void)
odp_event_t ev;
uint64_t tick;
int ret, i;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_pool_param_init(&pool_param);
@@ -874,12 +879,12 @@ static void timer_pool_max_res(void)
tp_param.max_tmo = capa.max_res.max_tmo;
tp_param.num_timers = 100;
tp_param.priv = 0;
- tp_param.clk_src = ODP_CLOCK_DEFAULT;
+ tp_param.clk_src = clk_src;
tp = odp_timer_pool_create("high_res_tp", &tp_param);
CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
/* Maximum timeout length with maximum resolution */
tick = odp_timer_ns_to_tick(tp, capa.max_res.max_tmo);
@@ -907,7 +912,7 @@ static void timer_pool_max_res(void)
odp_event_free(ev);
}
- CU_ASSERT(odp_timer_free(timer) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer) == 0);
odp_timer_pool_destroy(tp);
}
@@ -978,9 +983,10 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
odp_time_t t1, t2;
uint64_t tick, nsec, res_ns, min_tmo;
int ret, i;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT_FATAL(capa.max_tmo.max_tmo > 0);
@@ -991,7 +997,7 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
memset(&res_capa, 0, sizeof(res_capa));
res_capa.max_tmo = tmo_ns;
- ret = odp_timer_res_capability(ODP_CLOCK_DEFAULT, &res_capa);
+ ret = odp_timer_res_capability(clk_src, &res_capa);
CU_ASSERT_FATAL(ret == 0);
CU_ASSERT_FATAL(res_capa.res_ns > 0);
@@ -1028,11 +1034,12 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
tp_param.min_tmo = min_tmo;
tp_param.max_tmo = tmo_ns;
tp_param.num_timers = 1;
+ tp_param.clk_src = clk_src;
tp = odp_timer_pool_create("test_single", &tp_param);
CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
timer = odp_timer_alloc(tp, queue, USER_PTR);
CU_ASSERT_FATAL(timer != ODP_TIMER_INVALID);
@@ -1115,7 +1122,7 @@ static void timer_single_shot(odp_queue_type_t queue_type, odp_timer_tick_type_t
free_schedule_context(queue_type);
- CU_ASSERT(odp_timer_free(timer) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer) == 0);
odp_timer_pool_destroy(tp);
CU_ASSERT(odp_queue_destroy(queue) == 0);
@@ -1212,13 +1219,14 @@ static void timer_sched_abs_wait_3sec(void)
timer_single_shot(ODP_QUEUE_TYPE_SCHED, ABSOLUTE, START, TIMEOUT, 30, 110 * MSEC);
}
-static void timer_pool_current_tick_run(odp_timer_clk_src_t clk_src)
+static void timer_pool_current_tick(void)
{
odp_timer_capability_t capa;
odp_timer_pool_param_t tp_param;
odp_timer_pool_t tp;
- uint64_t t1, t2, ticks, min, max;
+ uint64_t t1, t2, ticks, min, max, limit;
uint64_t nsec = 100 * ODP_TIME_MSEC_IN_NS;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
@@ -1234,6 +1242,7 @@ static void timer_pool_current_tick_run(odp_timer_clk_src_t clk_src)
tp = odp_timer_pool_create("cur_tick", &tp_param);
CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+ /* API to be deprecated */
odp_timer_pool_start();
/* Allow +-10% error margin */
@@ -1252,6 +1261,13 @@ static void timer_pool_current_tick_run(odp_timer_clk_src_t clk_src)
CU_ASSERT(ticks >= min);
CU_ASSERT(ticks <= max);
+ /* Timer tick (or tick in nsec) should not wrap in at least 10 years from ODP start.
+ * Ignoring delay from start up and other test cases, which should be few seconds. */
+ limit = 10 * YEAR_IN_NS;
+ nsec = odp_timer_tick_to_ns(tp, t1);
+ CU_ASSERT(UINT64_MAX - nsec > limit);
+ CU_ASSERT(UINT64_MAX - t1 > odp_timer_ns_to_tick(tp, limit));
+
printf("\nClock source %i\n", clk_src);
printf(" Time nsec: %" PRIu64 "\n", nsec);
printf(" Measured ticks: %" PRIu64 "\n", ticks);
@@ -1260,21 +1276,7 @@ static void timer_pool_current_tick_run(odp_timer_clk_src_t clk_src)
odp_timer_pool_destroy(tp);
}
-static void timer_pool_current_tick(void)
-{
- odp_timer_clk_src_t clk_src;
- int i;
-
- for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- clk_src = ODP_CLOCK_SRC_0 + i;
- if (global_mem->clk_supported[i]) {
- ODPH_DBG("\nTesting clock source: %i\n", clk_src);
- timer_pool_current_tick_run(clk_src);
- }
- }
-}
-
-static void timer_pool_sample_ticks_run(odp_timer_clk_src_t clk_2)
+static void timer_pool_sample_ticks(void)
{
odp_timer_capability_t capa;
odp_timer_pool_param_t tp_param;
@@ -1282,6 +1284,7 @@ static void timer_pool_sample_ticks_run(odp_timer_clk_src_t clk_2)
uint64_t t1[2], t2[2], ticks[2], min[2], max[2];
uint64_t clk_count[2] = {0};
odp_timer_clk_src_t clk_1 = ODP_CLOCK_DEFAULT;
+ odp_timer_clk_src_t clk_2 = test_global->clk_src;
uint64_t nsec = 100 * ODP_TIME_MSEC_IN_NS;
/* Highest resolution */
@@ -1310,7 +1313,7 @@ static void timer_pool_sample_ticks_run(odp_timer_clk_src_t clk_2)
tp[1] = odp_timer_pool_create("timer_pool_1", &tp_param);
CU_ASSERT_FATAL(tp[1] != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(tp, 2) == 2);
/* Allow +-10% error margin */
min[0] = odp_timer_ns_to_tick(tp[0], 0.9 * nsec);
@@ -1346,15 +1349,7 @@ static void timer_pool_sample_ticks_run(odp_timer_clk_src_t clk_2)
odp_timer_pool_destroy(tp[1]);
}
-static void timer_pool_sample_ticks(void)
-{
- for (int i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- if (global_mem->clk_supported[i])
- timer_pool_sample_ticks_run(ODP_CLOCK_SRC_0 + i);
- }
-}
-
-static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
+static void timer_pool_tick_info(void)
{
odp_timer_capability_t capa;
odp_timer_pool_param_t tp_param;
@@ -1362,6 +1357,7 @@ static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
odp_timer_pool_info_t info;
uint64_t ticks_per_sec;
double tick_hz, tick_nsec, tick_to_nsec, tick_low;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
@@ -1378,7 +1374,7 @@ static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
tp = odp_timer_pool_create("tick_info_tp", &tp_param);
CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
memset(&info, 0, sizeof(odp_timer_pool_info_t));
CU_ASSERT_FATAL(odp_timer_pool_info(tp, &info) == 0);
@@ -1431,20 +1427,6 @@ static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
odp_timer_pool_destroy(tp);
}
-static void timer_pool_tick_info(void)
-{
- odp_timer_clk_src_t clk_src;
- int i;
-
- for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- clk_src = ODP_CLOCK_SRC_0 + i;
- if (global_mem->clk_supported[i]) {
- ODPH_DBG("\nTesting clock source: %i\n", clk_src);
- timer_pool_tick_info_run(clk_src);
- }
- }
-}
-
static void timer_test_event_type(odp_queue_type_t queue_type,
odp_event_type_t event_type, int rounds)
{
@@ -1466,6 +1448,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
int test_print = 0;
int num = 5;
odp_timer_t timer[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
odp_timer_pool_param_init(&timer_param);
timer_param.res_ns = global_mem->param.res_ns;
@@ -1473,13 +1456,13 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
period_ns = 2 * global_mem->param.min_tmo;
timer_param.max_tmo = global_mem->param.max_tmo;
timer_param.num_timers = num;
- timer_param.clk_src = ODP_CLOCK_DEFAULT;
+ timer_param.clk_src = clk_src;
timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
if (timer_pool == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
odp_pool_param_init(&pool_param);
@@ -1603,7 +1586,7 @@ static void timer_test_event_type(odp_queue_type_t queue_type,
}
for (i = 0; i < num; i++)
- CU_ASSERT(odp_timer_free(timer[i]) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
odp_timer_pool_destroy(timer_pool);
CU_ASSERT(odp_queue_destroy(queue) == 0);
@@ -1677,6 +1660,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv, int exp
uint64_t target_tick[num];
uint64_t target_nsec[num];
void *user_ptr[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
odp_pool_param_init(&params);
params.type = ODP_POOL_TIMEOUT;
@@ -1693,7 +1677,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv, int exp
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = num + 1;
tparam.priv = priv;
- tparam.clk_src = ODP_CLOCK_DEFAULT;
+ tparam.clk_src = clk_src;
if (exp_relax)
tparam.exp_mode = ODP_TIMER_EXP_RELAXED;
@@ -1707,7 +1691,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv, int exp
if (tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
odp_queue_param_init(&queue_param);
if (queue_type == ODP_QUEUE_TYPE_SCHED) {
@@ -1808,7 +1792,7 @@ static void timer_test_queue_type(odp_queue_type_t queue_type, int priv, int exp
tick, nsec, target, (int64_t)(nsec - target));
odp_timeout_free(tmo);
- CU_ASSERT(odp_timer_free(tim) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(tim) == 0);
num_tmo++;
}
@@ -1886,9 +1870,10 @@ static void timer_test_cancel(void)
odp_timeout_t tmo;
odp_timer_retval_t rc;
int ret;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&capa, 0, sizeof(capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &capa);
+ ret = odp_timer_capability(clk_src, &capa);
CU_ASSERT_FATAL(ret == 0);
odp_pool_param_init(&params);
@@ -1906,13 +1891,12 @@ static void timer_test_cancel(void)
tparam.max_tmo = global_mem->param.max_tmo;
tparam.num_timers = 1;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_DEFAULT;
+ tparam.clk_src = clk_src;
tp = odp_timer_pool_create(NULL, &tparam);
if (tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
- /* Start all created timer pools */
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
odp_queue_param_init(&queue_param);
if (capa.queue_type_plain) {
@@ -1963,9 +1947,7 @@ static void timer_test_cancel(void)
odp_timeout_free(tmo);
- ev = odp_timer_free(tim);
- if (ev != ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Free returned event");
+ CU_ASSERT_FATAL(odp_timer_free(tim) == 0);
odp_timer_pool_destroy(tp);
@@ -1995,9 +1977,10 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
int i, ret, num_tmo;
int num = 5;
odp_timer_t timer[num];
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&timer_capa, 0, sizeof(timer_capa));
- ret = odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa);
+ ret = odp_timer_capability(clk_src, &timer_capa);
CU_ASSERT_FATAL(ret == 0);
if (max_res) {
@@ -2017,13 +2000,13 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
timer_param.min_tmo = min_tmo;
timer_param.max_tmo = max_tmo;
timer_param.num_timers = num;
- timer_param.clk_src = ODP_CLOCK_DEFAULT;
+ timer_param.clk_src = clk_src;
timer_pool = odp_timer_pool_create("timer_pool", &timer_param);
if (timer_pool == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
odp_pool_param_init(&pool_param);
pool_param.type = ODP_POOL_TIMEOUT;
@@ -2058,6 +2041,14 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
ODPH_DBG(" tmo_ns %" PRIu64 "\n", tmo_ns);
ODPH_DBG(" tmo_tick %" PRIu64 "\n\n", tmo_tick);
+ if (min) {
+ /*
+ * Prevent the test from taking too long by asserting that the
+ * timeout is reasonably short.
+ */
+ CU_ASSERT_FATAL(tmo_ns < 5 * ODP_TIME_SEC_IN_NS);
+ }
+
for (i = 0; i < num; i++) {
timer[i] = odp_timer_alloc(timer_pool, queue, NULL);
CU_ASSERT_FATAL(timer[i] != ODP_TIMER_INVALID);
@@ -2142,7 +2133,7 @@ static void timer_test_tmo_limit(odp_queue_type_t queue_type,
CU_ASSERT(num_tmo == num);
for (i = 0; i < num; i++)
- CU_ASSERT(odp_timer_free(timer[i]) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer[i]) == 0);
odp_timer_pool_destroy(timer_pool);
CU_ASSERT(odp_queue_destroy(queue) == 0);
@@ -2241,9 +2232,10 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
CU_FAIL("odp_timeout_timer() wrong timer");
if (!stale) {
+#if ODP_DEPRECATED_API
if (!odp_timeout_fresh(tmo))
CU_FAIL("Wrong status (stale) for fresh timeout");
-
+#endif
/* tmo tick cannot be smaller than pre-calculated tick */
if (tick < ttp->tick) {
ODPH_DBG("Too small tick: pre-calculated %" PRIu64 " "
@@ -2295,7 +2287,7 @@ static int worker_entrypoint(void *arg ODP_UNUSED)
uint64_t min_tmo = global_mem->param.min_tmo;
odp_queue_param_t queue_param;
odp_thrmask_t thr_mask;
- odp_schedule_group_t group;
+ odp_schedule_group_t group = ODP_SCHED_GROUP_INVALID;
uint64_t sched_tmo;
uint64_t res_ns = global_mem->param.res_ns;
odp_queue_type_t queue_type = global_mem->test_queue_type;
@@ -2514,7 +2506,7 @@ sleep:
}
for (i = 0; i < allocated; i++) {
- if (odp_timer_free(tt[i].tim) != ODP_EVENT_INVALID)
+ if (odp_timer_free(tt[i].tim))
CU_FAIL("odp_timer_free");
}
@@ -2532,6 +2524,9 @@ sleep:
odp_event_free(tt[i].ev);
}
+ if (queue_type == ODP_QUEUE_TYPE_SCHED)
+ CU_ASSERT(odp_schedule_group_destroy(group) == 0);
+
free(tt);
ODPH_DBG("Thread %u: exiting\n", thr);
return CU_get_number_of_failures();
@@ -2554,6 +2549,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
uint32_t num_timers;
uint32_t num_workers;
int timers_per_thread;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
CU_ASSERT_FATAL(odp_schedule_capability(&sched_capa) == 0);
/* Reserve at least one core for running other processes so the timer
@@ -2574,7 +2570,7 @@ static void timer_test_all(odp_queue_type_t queue_type)
num_workers = 1;
num_timers = num_workers * NTIMERS;
- CU_ASSERT_FATAL(!odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa));
+ CU_ASSERT_FATAL(!odp_timer_capability(clk_src, &timer_capa));
if (timer_capa.max_timers && timer_capa.max_timers < num_timers)
num_timers = timer_capa.max_timers;
@@ -2607,14 +2603,13 @@ static void timer_test_all(odp_queue_type_t queue_type)
tparam.max_tmo = max_tmo;
tparam.num_timers = num_timers;
tparam.priv = 0;
- tparam.clk_src = ODP_CLOCK_DEFAULT;
+ tparam.clk_src = clk_src;
global_mem->tp = odp_timer_pool_create(NAME, &tparam);
if (global_mem->tp == ODP_TIMER_POOL_INVALID)
CU_FAIL_FATAL("Timer pool create failed");
tp = global_mem->tp;
- /* Start all created timer pools */
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&tp, 1) == 1);
if (odp_timer_pool_info(tp, &tpinfo) != 0)
CU_FAIL("odp_timer_pool_info");
@@ -2703,7 +2698,7 @@ static void timer_test_sched_all(void)
timer_test_all(ODP_QUEUE_TYPE_SCHED);
}
-static void timer_test_periodic_capa_run(odp_timer_clk_src_t clk_src)
+static void timer_test_periodic_capa(void)
{
odp_timer_capability_t timer_capa;
odp_timer_periodic_capability_t capa;
@@ -2713,6 +2708,7 @@ static void timer_test_periodic_capa_run(odp_timer_clk_src_t clk_src)
int ret;
uint32_t i, j;
uint32_t num = 100;
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&timer_capa, 0, sizeof(odp_timer_capability_t));
CU_ASSERT_FATAL(odp_timer_capability(clk_src, &timer_capa) == 0);
@@ -2843,20 +2839,6 @@ static void timer_test_periodic_capa_run(odp_timer_clk_src_t clk_src)
}
}
-static void timer_test_periodic_capa(void)
-{
- odp_timer_clk_src_t clk_src;
- int i;
-
- for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
- clk_src = ODP_CLOCK_SRC_0 + i;
- if (global_mem->periodic_support[i]) {
- ODPH_DBG("\nTesting clock source: %i\n", clk_src);
- timer_test_periodic_capa_run(clk_src);
- }
- }
-}
-
static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int rounds,
int reuse_event)
{
@@ -2883,9 +2865,10 @@ static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int
/* Test frequency: 1x 100Hz, or 1x min/max_base_freq */
const uint64_t multiplier = 1;
odp_fract_u64_t base_freq = {100, 0, 0};
+ odp_timer_clk_src_t clk_src = test_global->clk_src;
memset(&timer_capa, 0, sizeof(odp_timer_capability_t));
- CU_ASSERT_FATAL(odp_timer_capability(ODP_CLOCK_DEFAULT, &timer_capa) == 0);
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &timer_capa) == 0);
CU_ASSERT_FATAL(timer_capa.periodic.max_pools);
CU_ASSERT_FATAL(timer_capa.periodic.max_timers);
@@ -2910,7 +2893,7 @@ static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int
periodic_capa.base_freq_hz = base_freq;
periodic_capa.max_multiplier = multiplier;
- ret = odp_timer_periodic_capability(ODP_CLOCK_DEFAULT, &periodic_capa);
+ ret = odp_timer_periodic_capability(clk_src, &periodic_capa);
CU_ASSERT(ret == 0 || ret == 1);
if (ret < 0) {
@@ -2946,7 +2929,7 @@ static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int
timer_param.timer_type = ODP_TIMER_TYPE_PERIODIC;
timer_param.res_ns = 2 * periodic_capa.res_ns;
timer_param.num_timers = 1;
- timer_param.clk_src = ODP_CLOCK_DEFAULT;
+ timer_param.clk_src = clk_src;
timer_param.periodic.base_freq_hz = base_freq;
timer_param.periodic.max_multiplier = multiplier;
@@ -2968,7 +2951,7 @@ static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int
timer_pool = odp_timer_pool_create("periodic_timer", &timer_param);
CU_ASSERT_FATAL(timer_pool != ODP_TIMER_POOL_INVALID);
- odp_timer_pool_start();
+ CU_ASSERT_FATAL(odp_timer_pool_start_multi(&timer_pool, 1) == 1);
odp_pool_param_init(&pool_param);
pool_param.type = ODP_POOL_TIMEOUT;
@@ -3123,7 +3106,7 @@ static void timer_test_periodic(odp_queue_type_t queue_type, int use_first, int
CU_ASSERT(ret == 2);
}
- CU_ASSERT(odp_timer_free(timer) == ODP_EVENT_INVALID);
+ CU_ASSERT(odp_timer_free(timer) == 0);
odp_timer_pool_destroy(timer_pool);
CU_ASSERT(odp_queue_destroy(queue) == 0);
CU_ASSERT(odp_pool_destroy(pool) == 0);
@@ -3159,14 +3142,24 @@ static void timer_test_periodic_event_reuse(void)
timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0, 2, 1);
}
-odp_testinfo_t timer_suite[] = {
- ODP_TEST_INFO(timer_test_capa),
+odp_testinfo_t timer_general_suite[] = {
ODP_TEST_INFO(timer_test_param_init),
ODP_TEST_INFO(timer_test_timeout_pool_alloc),
ODP_TEST_INFO(timer_test_timeout_pool_alloc_multi),
ODP_TEST_INFO(timer_test_timeout_from_event),
ODP_TEST_INFO(timer_test_timeout_pool_free),
ODP_TEST_INFO(timer_test_timeout_user_area),
+ ODP_TEST_INFO(timer_test_capa_allsrc),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t timer_general_suites[] = {
+ {"Timer general", NULL, NULL, timer_general_suite},
+ ODP_SUITE_INFO_NULL,
+};
+
+odp_testinfo_t timer_suite[] = {
+ ODP_TEST_INFO(timer_test_capa),
ODP_TEST_INFO(timer_pool_create_destroy),
ODP_TEST_INFO(timer_pool_create_max),
ODP_TEST_INFO(timer_pool_max_res),
@@ -3266,17 +3259,52 @@ odp_suiteinfo_t timer_suites[] = {
int main(int argc, char *argv[])
{
+ int ret = 0;
+
/* parse common options: */
if (odp_cunit_parse_options(argc, argv))
return -1;
+ if (global_init())
+ return -1;
+
odp_cunit_register_global_init(timer_global_init);
odp_cunit_register_global_term(timer_global_term);
- int ret = odp_cunit_register(timer_suites);
+ if (odp_cunit_register(timer_general_suites))
+ goto fail;
- if (ret == 0)
- ret = odp_cunit_run();
+ if (odp_cunit_run())
+ goto fail;
+ for (int i = ODP_CLOCK_SRC_0; i < ODP_CLOCK_NUM_SRC; i++) {
+ odp_timer_capability_t capa;
+
+ if (odp_timer_capability(i, &capa))
+ continue;
+
+ printf("\n\n"
+ "-------------------------------------------------------------------------------\n"
+ " Running tests with clock source %d\n"
+ "-------------------------------------------------------------------------------\n\n",
+ i);
+
+ test_global->clk_src = i;
+
+ odp_cunit_register_global_init(timer_global_init);
+ odp_cunit_register_global_term(timer_global_term);
+
+ if (odp_cunit_register(timer_suites))
+ goto fail;
+
+ if (odp_cunit_run())
+ ret = -1;
+ }
+
+ global_term();
return ret;
+
+fail:
+ global_term();
+ return -1;
}