aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2021-07-16 16:03:37 +0300
committerGitHub <noreply@github.com>2021-07-16 16:03:37 +0300
commit5f953128e03004479ffaac088563f6c509659269 (patch)
tree39eaf1d9260bff50a28b06d11338598fcb71c0cb
parent6c7ac017886e2f1f63a27871254326d7cd1b48d1 (diff)
parent3f95752f55b5b6f7dc2ad0e269fec757aa9009fa (diff)
Merge ODP v1.30.1.0v1.30.1.0_DPDK_19.11
Merge ODP linux-generic v1.30.1.0 into ODP-DPDK.
-rw-r--r--.github/workflows/ci-pipeline-arm64.yml10
-rw-r--r--.github/workflows/ci-pipeline.yml11
-rw-r--r--CHANGELOG37
-rw-r--r--DEPENDENCIES34
-rw-r--r--configure.ac2
-rw-r--r--example/debug/odp_debug.c3
-rw-r--r--example/ipsec_api/Makefile.am2
-rw-r--r--example/ipsec_api/odp_ipsec.c2
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_ah_in.sh2
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_ah_out.sh2
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_ah_tun_in.sh2
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_ah_tun_out.sh2
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_live.sh4
-rwxr-xr-xexample/ipsec_api/odp_ipsec_api_run_router.sh4
-rw-r--r--example/ipsec_crypto/odp_ipsec.c2
-rwxr-xr-xexample/ipsec_crypto/odp_ipsec_crypto_run_live.sh4
-rwxr-xr-xexample/ipsec_crypto/odp_ipsec_crypto_run_router.sh4
-rw-r--r--example/ipsec_crypto/odp_ipsec_misc.h4
-rw-r--r--example/ipsec_crypto/odp_ipsec_sa_db.c4
-rw-r--r--example/ipsec_crypto/odp_ipsec_stream.c45
-rw-r--r--example/ipsec_crypto/odp_ipsec_stream.h3
-rw-r--r--include/odp/api/abi-default/packet_io.h2
-rw-r--r--include/odp/api/spec/ipsec.h8
-rw-r--r--include/odp/api/spec/packet_io.h8
-rw-r--r--include/odp/api/spec/packet_io_stats.h335
-rw-r--r--include/odp/api/spec/schedule.h8
-rw-r--r--include/odp/api/spec/std_types.h34
-rw-r--r--include/odp/api/spec/timer.h51
-rw-r--r--platform/linux-dpdk/Makefile.am1
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h8
-rw-r--r--platform/linux-dpdk/odp_packet_dpdk.c179
-rw-r--r--platform/linux-dpdk/odp_schedule_eventdev.c17
-rw-r--r--platform/linux-dpdk/odp_schedule_if.c5
-rw-r--r--platform/linux-dpdk/odp_timer.c21
-rw-r--r--platform/linux-dpdk/test/example/ipsec_api/pktio_env6
-rw-r--r--platform/linux-generic/Makefile.am1
-rw-r--r--platform/linux-generic/include-abi/odp/api/abi/packet_io.h2
-rw-r--r--platform/linux-generic/include/odp_ethtool_stats.h6
-rw-r--r--platform/linux-generic/include/odp_ipsec_internal.h42
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h10
-rw-r--r--platform/linux-generic/include/odp_packet_io_stats.h12
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h1
-rw-r--r--platform/linux-generic/include/odp_sysfs_stats.h8
-rw-r--r--platform/linux-generic/m4/odp_dpdk.m413
-rw-r--r--platform/linux-generic/odp_classification.c2
-rw-r--r--platform/linux-generic/odp_fractional.c19
-rw-r--r--platform/linux-generic/odp_ipsec.c3
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c131
-rw-r--r--platform/linux-generic/odp_packet_io.c285
-rw-r--r--platform/linux-generic/odp_schedule_basic.c270
-rw-r--r--platform/linux-generic/odp_schedule_if.c5
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c17
-rw-r--r--platform/linux-generic/odp_schedule_sp.c21
-rw-r--r--platform/linux-generic/odp_timer.c27
-rw-r--r--platform/linux-generic/pktio/dpdk.c269
-rw-r--r--platform/linux-generic/pktio/dpdk_parse.c4
-rw-r--r--platform/linux-generic/pktio/loop.c33
-rw-r--r--platform/linux-generic/pktio/netmap.c25
-rw-r--r--platform/linux-generic/pktio/pcap.c5
-rw-r--r--platform/linux-generic/pktio/socket.c30
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c30
-rw-r--r--platform/linux-generic/pktio/stats/ethtool_stats.c140
-rw-r--r--platform/linux-generic/pktio/stats/packet_io_stats.c93
-rw-r--r--platform/linux-generic/pktio/stats/sysfs_stats.c123
-rw-r--r--platform/linux-generic/test/example/ipsec_api/pktio_env6
-rwxr-xr-xscripts/ci/build_arm64.sh2
-rw-r--r--test/performance/odp_l2fwd.c17
-rw-r--r--test/performance/odp_sched_perf.c81
-rw-r--r--test/validation/api/ipsec/ipsec_test_out.c7
-rw-r--r--test/validation/api/pktio/pktio.c543
-rw-r--r--test/validation/api/scheduler/scheduler.c64
-rw-r--r--test/validation/api/timer/timer.c89
72 files changed, 2987 insertions, 315 deletions
diff --git a/.github/workflows/ci-pipeline-arm64.yml b/.github/workflows/ci-pipeline-arm64.yml
index ceedec3e3..936a4ac2d 100644
--- a/.github/workflows/ci-pipeline-arm64.yml
+++ b/.github/workflows/ci-pipeline-arm64.yml
@@ -187,18 +187,16 @@ jobs:
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
- Run_dpdk-18_11:
+ Run_dpdk-20_11:
if: ${{ github.repository == 'OpenDataPlane/odp' }}
runs-on: [self-hosted, ARM64]
- strategy:
- fail-fast: false
- matrix:
- os: ['ubuntu_18.04']
+ env:
+ OS: ubuntu_20.04
steps:
- uses: AutoModality/action-clean@v1.1.0
- uses: actions/checkout@v2
- run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${{matrix.os}}-${ARCH}-native-dpdk_18.11 /odp/scripts/ci/check.sh
+ -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-native-dpdk_20.11 /odp/scripts/ci/check.sh
- name: Failure log
if: ${{ failure() }}
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
diff --git a/.github/workflows/ci-pipeline.yml b/.github/workflows/ci-pipeline.yml
index 089d96c2c..a0b5388d6 100644
--- a/.github/workflows/ci-pipeline.yml
+++ b/.github/workflows/ci-pipeline.yml
@@ -263,20 +263,9 @@ jobs:
if: ${{ failure() }}
run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
- Run_dpdk-18_11:
- runs-on: ubuntu-18.04
- steps:
- - uses: actions/checkout@v2
- - run: sudo docker run -i -v `pwd`:/odp --privileged --shm-size 8g -e CC="${CC}" -e ARCH="${ARCH}"
- -e CONF="${CONF}" $CONTAINER_NAMESPACE/odp-ci-${OS}-${ARCH}-dpdk_18.11 /odp/scripts/ci/check.sh
- - name: Failure log
- if: ${{ failure() }}
- run: find . -name "*.trs" | xargs grep -l '^.test-result. FAIL' | while read trs ; do echo FAILURE detected at $trs; cat ${trs%%.trs}.log ; done
-
Run_dpdk-20_11:
runs-on: ubuntu-18.04
env:
- ARCH: x86_64
OS: ubuntu_20.04
steps:
- uses: actions/checkout@v2
diff --git a/CHANGELOG b/CHANGELOG
index 5d771cf7d..21c13be48 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,40 @@
+== OpenDataPlane (1.30.1.0)
+=== API
+==== Packet IO
+* Modified `odp_pktio_stats_t.in_octets/out_octets` definitions to not include
+CRC (4 bytes)
+* Added `odp_pktio_stats_capability_t` struct to `odp_pktio_capability_t` to
+inform about supported statistics counters
+* Added new statistics counters and related capabilities for
+received/transmitted multicast and broadcast Ethernet packets (`in_mcast_pkts`,
+`in_bcast_pkts`, `out_mcast_pkts`, `out_bcast_pkts`)
+* Added new pktio input/output queue specific statistics counters
+(`odp_pktin_queue_stats_t` and `odp_pktout_queue_stats_t`) and related
+functions and capabilities
+* Added new functions for reading and printing ODP implementation specific
+custom packet IO statistics counters: `odp_pktio_extra_stat_info()`,
+`odp_pktio_extra_stats()`, `odp_pktio_extra_stats_print()`
+* Specified that the default value of `odp_pktio_config_t.enable_loop` option is
+false
+
+==== IPsec
+* Specified that the default values of the IPsec specific reassembly enables
+are false
+
+==== Scheduler
+* Added `odp_schedule_print()` function for printing debug information about
+scheduler status into the ODP log
+
+==== Standard Types
+* Added `odp_fract_u64_t` type which can be used to define fractional numbers
+accurately
+* Added `odp_fract_u64_to_dbl()` function for converting `odp_fract_u64_t`
+fractional numbers into doubles
+
+==== Timer
+* Added timer tick info as part of `odp_timer_pool_info_t`. This enables timer
+implementation to specify tick frequency and duration very accurately.
+
== OpenDataPlane (1.30.0.0)
=== API
==== IPsec
diff --git a/DEPENDENCIES b/DEPENDENCIES
index ba63313cb..5b5f66904 100644
--- a/DEPENDENCIES
+++ b/DEPENDENCIES
@@ -180,7 +180,7 @@ Prerequisites for building the OpenDataPlane (ODP) API
3.4 DPDK packet I/O support (optional)
Use DPDK for ODP packet I/O. Currently supported DPDK versions are v19.11
- (recommended) and v18.11.
+ (recommended) and v20.11.
Note: only packet I/O is accelerated with DPDK. See
https://github.com/OpenDataPlane/odp-dpdk.git
@@ -196,11 +196,11 @@ Prerequisites for building the OpenDataPlane (ODP) API
$ sudo yum install numactl-devel
3.4.2 Native DPDK install
- # Debian/Ubuntu starting from 18.04
+ # Debian/Ubuntu starting from 20.04
$ sudo apt-get install dpdk-dev
-3.4.2 Build DPDK from source
- $ git clone --branch=19.11 http://dpdk.org/git/dpdk-stable dpdk
+3.4.3 Build DPDK v19.11 from source
+ $ git clone https://dpdk.org/git/dpdk-stable --branch 19.11 --depth 1 ./<dpdk-dir>
# Make and edit DPDK configuration
$ export TARGET="x86_64-native-linuxapp-gcc"
@@ -214,10 +214,28 @@ Prerequisites for building the OpenDataPlane (ODP) API
# Build and install DPDK
$ make install T=${TARGET} EXTRA_CFLAGS="-fPIC" DESTDIR=./install
- # Compile ODP
- $ ./configure --with-dpdk-path=<dpdk_install_dir>
+ # Configure ODP
+ $ ./configure --with-dpdk-path=<dpdk-dir>
-3.4.3 Setup system
+3.4.4 Build DPDK v20.11 from source
+ $ git clone https://dpdk.org/git/dpdk-stable --branch 20.11 --depth 1 ./<dpdk-dir>
+
+ # Prepare the build directory
+ $ cd <dpdk-dir>
+ $ meson build
+ $ cd build
+
+ # Configure the location where DPDK will be installed
+ $ meson configure -Dprefix=$(pwd)/../install
+
+ # Build and install DPDK
+ $ ninja install
+
+ # Configure ODP
+ # Instead of using --with-dpdk-path option, set PKG_CONFIG_PATH
+ $ PKG_CONFIG_PATH=<dpdk-dir>/install/lib/x86_64-linux-gnu/pkgconfig ./configure --enable-dpdk
+
+3.4.5 Setup system
# Load DPDK modules
$ sudo modprobe uio
@@ -229,7 +247,7 @@ Prerequisites for building the OpenDataPlane (ODP) API
512 x 2MB huge pages. All this can be done with the DPDK setup script
(<dpdk-dir>/usertools/dpdk-setup.sh).
-3.4.4 Running ODP with DPDK pktio
+3.4.6 Running ODP with DPDK pktio
ODP applications will try use DPDK for packet I/O by default. If some other
I/O type is desired instead, DPDK I/O can be disabled by setting the
diff --git a/configure.ac b/configure.ac
index 1bc682712..2f6f4af89 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@ AC_PREREQ([2.5])
##########################################################################
m4_define([odpapi_generation_version], [1])
m4_define([odpapi_major_version], [30])
-m4_define([odpapi_minor_version], [0])
+m4_define([odpapi_minor_version], [1])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
[odpapi_generation_version.odpapi_major_version.odpapi_minor_version.odpapi_point_version])
diff --git a/example/debug/odp_debug.c b/example/debug/odp_debug.c
index 24364a688..8b1ef52ba 100644
--- a/example/debug/odp_debug.c
+++ b/example/debug/odp_debug.c
@@ -283,6 +283,9 @@ static int queue_debug(void)
printf("\n");
odp_queue_print(queue);
+ printf("\n");
+ odp_schedule_print();
+
if (odp_queue_destroy(queue)) {
ODPH_ERR("Queue destroy failed: %s\n", name);
return -1;
diff --git a/example/ipsec_api/Makefile.am b/example/ipsec_api/Makefile.am
index 05471008d..5a71d04e5 100644
--- a/example/ipsec_api/Makefile.am
+++ b/example/ipsec_api/Makefile.am
@@ -12,6 +12,8 @@ TESTS = \
odp_ipsec_api_run_esp_out.sh \
odp_ipsec_api_run_esp_tun_in.sh \
odp_ipsec_api_run_esp_tun_out.sh \
+ odp_ipsec_api_run_live.sh \
+ odp_ipsec_api_run_router.sh \
odp_ipsec_api_run_simple.sh
endif
diff --git a/example/ipsec_api/odp_ipsec.c b/example/ipsec_api/odp_ipsec.c
index d23eeb2c6..5cdb59da3 100644
--- a/example/ipsec_api/odp_ipsec.c
+++ b/example/ipsec_api/odp_ipsec.c
@@ -1346,7 +1346,7 @@ static void usage(char *progname)
" -r, --route SubNet,Intf,NextHopMAC\n"
" -p, --policy SrcSubNet,DstSubNet,(in|out),(ah|esp)\n"
" -e, --esp SrcIP,DstIP,(3des|null),SPI,Key192\n"
- " -a, --ah SrcIP,DstIP,(sha256|md5|null),SPI,Key(256|128)\n"
+ " -a, --ah SrcIP,DstIP,(sha256|sha1|md5|null),SPI,Key(256|160|128)\n"
"\n"
" Where: NextHopMAC is raw hex/colon notation, i.e. 03:BA:44:9A:CE:02\n"
" IP is decimal/dot notation, i.e. 192.168.1.1\n"
diff --git a/example/ipsec_api/odp_ipsec_api_run_ah_in.sh b/example/ipsec_api/odp_ipsec_api_run_ah_in.sh
index 3aa8ea577..087c52aa3 100755
--- a/example/ipsec_api/odp_ipsec_api_run_ah_in.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_ah_in.sh
@@ -21,7 +21,7 @@ setup_interfaces
./odp_ipsec_api -i $IF_LIST \
-r 192.168.111.2/32,$ROUTE_IF_INB,08:00:27:76:B5:E0 \
-p 192.168.222.0/24,192.168.111.0/24,in,ah \
- -a 192.168.222.2,192.168.111.2,md5,300,27f6d123d7077b361662fc6e451f65d8 \
+ -a 192.168.222.2,192.168.111.2,sha1,300,27f6d123d7077b361662fc6e451f65d800000000 \
-s 192.168.222.2,192.168.111.2,$OUT_IF,$IN_IF,10,100 \
-c 2 "$@"
diff --git a/example/ipsec_api/odp_ipsec_api_run_ah_out.sh b/example/ipsec_api/odp_ipsec_api_run_ah_out.sh
index 2712da52a..6acc8628a 100755
--- a/example/ipsec_api/odp_ipsec_api_run_ah_out.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_ah_out.sh
@@ -21,7 +21,7 @@ setup_interfaces
./odp_ipsec_api -i $IF_LIST \
-r 192.168.222.2/32,$ROUTE_IF_OUTB,08:00:27:F5:8B:DB \
-p 192.168.111.0/24,192.168.222.0/24,out,ah \
- -a 192.168.111.2,192.168.222.2,md5,200,a731649644c5dee92cbd9c2e7e188ee6 \
+ -a 192.168.111.2,192.168.222.2,sha1,200,a731649644c5dee92cbd9c2e7e188ee600000000 \
-s 192.168.111.2,192.168.222.2,$IN_IF,$OUT_IF,10,100 \
-c 2 "$@"
diff --git a/example/ipsec_api/odp_ipsec_api_run_ah_tun_in.sh b/example/ipsec_api/odp_ipsec_api_run_ah_tun_in.sh
index f6efc3869..02fe3df3a 100755
--- a/example/ipsec_api/odp_ipsec_api_run_ah_tun_in.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_ah_tun_in.sh
@@ -21,7 +21,7 @@ setup_interfaces
./odp_ipsec_api -i $IF_LIST \
-r 192.168.111.2/32,$ROUTE_IF_INB,08:00:27:76:B5:E0 \
-p 192.168.222.0/24,192.168.111.0/24,in,ah \
- -a 192.168.222.2,192.168.111.2,md5,300,27f6d123d7077b361662fc6e451f65d8 \
+ -a 192.168.222.2,192.168.111.2,sha1,300,27f6d123d7077b361662fc6e451f65d800000000 \
-t 192.168.222.2,192.168.111.2,10.0.222.2,10.0.111.2 \
-s 192.168.222.2,192.168.111.2,$OUT_IF,$IN_IF,10,100 \
-c 2 "$@"
diff --git a/example/ipsec_api/odp_ipsec_api_run_ah_tun_out.sh b/example/ipsec_api/odp_ipsec_api_run_ah_tun_out.sh
index ec888d314..4efb4c23d 100755
--- a/example/ipsec_api/odp_ipsec_api_run_ah_tun_out.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_ah_tun_out.sh
@@ -21,7 +21,7 @@ setup_interfaces
./odp_ipsec_api -i $IF_LIST \
-r 192.168.222.2/32,$ROUTE_IF_OUTB,08:00:27:F5:8B:DB \
-p 192.168.111.0/24,192.168.222.0/24,out,ah \
- -a 192.168.111.2,192.168.222.2,md5,200,a731649644c5dee92cbd9c2e7e188ee6 \
+ -a 192.168.111.2,192.168.222.2,sha1,200,a731649644c5dee92cbd9c2e7e188ee600000000 \
-t 192.168.111.2,192.168.222.2,10.0.111.2,10.0.222.2 \
-s 192.168.111.2,192.168.222.2,$IN_IF,$OUT_IF,10,100 \
-c 2 "$@"
diff --git a/example/ipsec_api/odp_ipsec_api_run_live.sh b/example/ipsec_api/odp_ipsec_api_run_live.sh
index da8523485..9febf2868 100755
--- a/example/ipsec_api/odp_ipsec_api_run_live.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_live.sh
@@ -33,8 +33,6 @@ PID=app_pid
-e 192.168.222.2,192.168.111.2,3des,301,c966199f24d095f3990a320d749056401e82b26570320292 \
-c 2 "$@" & echo $! > $PID) | tee $LOG &
-APP_PID=`cat $PID`
-
# Wait till application thread starts.
APP_READY="Pktio thread \[..\] starts"
@@ -48,6 +46,8 @@ tail -f $LOG | grep -qm 1 "$APP_READY"
validate_result
ret=$?
+APP_PID=`cat $PID`
+
kill -2 ${APP_PID}
# Wait till the application exits
diff --git a/example/ipsec_api/odp_ipsec_api_run_router.sh b/example/ipsec_api/odp_ipsec_api_run_router.sh
index 79a0ef5c6..198721ea0 100755
--- a/example/ipsec_api/odp_ipsec_api_run_router.sh
+++ b/example/ipsec_api/odp_ipsec_api_run_router.sh
@@ -29,8 +29,6 @@ PID=app_pid
-r 192.168.222.2/32,$IF1,$NEXT_HOP_MAC1 \
-c 1 "$@" & echo $! > $PID) | tee -a $LOG &
-APP_PID=`cat $PID`
-
# Wait till application thread starts.
APP_READY="Pktio thread \[..\] starts"
@@ -44,6 +42,8 @@ tail -f $LOG | grep -qm 1 "$APP_READY"
validate_result
ret=$?
+APP_PID=`cat $PID`
+
kill -2 ${APP_PID}
# Wait till the application stops
diff --git a/example/ipsec_crypto/odp_ipsec.c b/example/ipsec_crypto/odp_ipsec.c
index d6102b866..a55aa6aba 100644
--- a/example/ipsec_crypto/odp_ipsec.c
+++ b/example/ipsec_crypto/odp_ipsec.c
@@ -1646,7 +1646,7 @@ static void usage(char *progname)
" -r, --route SubNet,Intf,NextHopMAC\n"
" -p, --policy SrcSubNet,DstSubNet,(in|out),(ah|esp|both)\n"
" -e, --esp SrcIP,DstIP,(3des|null),SPI,Key192\n"
- " -a, --ah SrcIP,DstIP,(sha256|md5|null),SPI,Key(256|128)\n"
+ " -a, --ah SrcIP,DstIP,(sha256|sha1|md5|null),SPI,Key(256|160|128)\n"
"\n"
" Where: NextHopMAC is raw hex/colon notation, i.e. 03:BA;44:9A:CE:02\n"
" IP is decimal/dot notation, i.e. 192.168.1.1\n"
diff --git a/example/ipsec_crypto/odp_ipsec_crypto_run_live.sh b/example/ipsec_crypto/odp_ipsec_crypto_run_live.sh
index 90dda11ec..1393c7af3 100755
--- a/example/ipsec_crypto/odp_ipsec_crypto_run_live.sh
+++ b/example/ipsec_crypto/odp_ipsec_crypto_run_live.sh
@@ -37,8 +37,6 @@ PID=app_pid
-a 192.168.222.2,192.168.111.2,md5,300,27f6d123d7077b361662fc6e451f65d8 \
-c 2 "$@" & echo $! > $PID) | tee -a $LOG &
-APP_PID=`cat $PID`
-
# Wait till application thread starts.
APP_READY="Pktio thread \[..\] starts"
@@ -52,6 +50,8 @@ tail -f $LOG | grep -qm 1 "$APP_READY"
validate_result
ret=$?
+APP_PID=`cat $PID`
+
kill -2 ${APP_PID}
# Wait till the application exits
diff --git a/example/ipsec_crypto/odp_ipsec_crypto_run_router.sh b/example/ipsec_crypto/odp_ipsec_crypto_run_router.sh
index 675027b59..40c353ee0 100755
--- a/example/ipsec_crypto/odp_ipsec_crypto_run_router.sh
+++ b/example/ipsec_crypto/odp_ipsec_crypto_run_router.sh
@@ -29,8 +29,6 @@ PID=app_pid
-r 192.168.222.2/32,$IF1,$NEXT_HOP_MAC1 \
-c 1 "$@" & echo $! > $PID) | tee -a $LOG &
-APP_PID=`cat $PID`
-
# Wait till application thread starts.
APP_READY="Pktio thread \[..\] starts"
@@ -44,6 +42,8 @@ tail -f $LOG | grep -qm 1 "$APP_READY"
validate_result
ret=$?
+APP_PID=`cat $PID`
+
kill -2 ${APP_PID}
# Wait till the application exits
diff --git a/example/ipsec_crypto/odp_ipsec_misc.h b/example/ipsec_crypto/odp_ipsec_misc.h
index 0ff3fc0c7..23b89ae84 100644
--- a/example/ipsec_crypto/odp_ipsec_misc.h
+++ b/example/ipsec_crypto/odp_ipsec_misc.h
@@ -28,6 +28,7 @@ extern "C" {
#define KEY_BITS_3DES 192 /**< 3DES cipher key length in bits */
#define KEY_BITS_MD5_96 128 /**< MD5_96 auth key length in bits */
+#define KEY_BITS_SHA1_96 160 /**< MD5_96 auth key length in bits */
#define KEY_BITS_SHA256_128 256 /**< SHA256_128 auth key length in bits */
/**< Number of bits represnted by a string of hexadecimal characters */
@@ -101,6 +102,9 @@ int parse_key_string(char *keystring,
if ((alg->u.auth == ODP_AUTH_ALG_MD5_HMAC) &&
(KEY_BITS_MD5_96 == key_bits_in))
key->length = key_bits_in / 8;
+ else if ((alg->u.auth == ODP_AUTH_ALG_SHA1_HMAC) &&
+ (KEY_BITS_SHA1_96 == key_bits_in))
+ key->length = key_bits_in / 8;
else if ((alg->u.auth == ODP_AUTH_ALG_SHA256_HMAC) &&
(KEY_BITS_SHA256_128 == key_bits_in))
key->length = key_bits_in / 8;
diff --git a/example/ipsec_crypto/odp_ipsec_sa_db.c b/example/ipsec_crypto/odp_ipsec_sa_db.c
index ff9d7e3c7..0d60cbc7a 100644
--- a/example/ipsec_crypto/odp_ipsec_sa_db.c
+++ b/example/ipsec_crypto/odp_ipsec_sa_db.c
@@ -123,6 +123,10 @@ int create_sa_db_entry(char *input, odp_bool_t cipher)
entry->alg.u.auth =
ODP_AUTH_ALG_MD5_HMAC;
entry->icv_len = 12;
+ } else if (!strcmp(token, "sha1")) {
+ entry->alg.u.auth =
+ ODP_AUTH_ALG_SHA1_HMAC;
+ entry->icv_len = 12;
} else if (!strcmp(token, "sha256")) {
entry->alg.u.auth =
ODP_AUTH_ALG_SHA256_HMAC;
diff --git a/example/ipsec_crypto/odp_ipsec_stream.c b/example/ipsec_crypto/odp_ipsec_stream.c
index 0ca5138bc..110f7d5df 100644
--- a/example/ipsec_crypto/odp_ipsec_stream.c
+++ b/example/ipsec_crypto/odp_ipsec_stream.c
@@ -15,7 +15,6 @@
#include <openssl/des.h>
#include <openssl/rand.h>
#include <openssl/hmac.h>
-#include <openssl/evp.h>
#include <odp_api.h>
@@ -139,6 +138,27 @@ int create_stream_db_entry(char *input)
return 0;
}
+static const EVP_MD *get_evp_md(odp_auth_alg_t auth)
+{
+ const EVP_MD *evp_md;
+
+ switch (auth) {
+ case ODP_AUTH_ALG_MD5_HMAC:
+ evp_md = EVP_md5();
+ break;
+ case ODP_AUTH_ALG_SHA1_HMAC:
+ evp_md = EVP_sha1();
+ break;
+ case ODP_AUTH_ALG_SHA256_HMAC:
+ evp_md = EVP_sha256();
+ break;
+ default:
+ evp_md = NULL;
+ }
+
+ return evp_md;
+}
+
void resolve_stream_db(void)
{
stream_db_entry_t *stream = NULL;
@@ -156,6 +176,9 @@ void resolve_stream_db(void)
stream->input.pktio = odp_pktio_lookup(stream->input.intf);
+ if (entry)
+ stream->evp_md = get_evp_md(entry->ah.alg);
+
/* Lookup output entry */
entry = find_ipsec_cache_entry_out(stream->src_ip,
stream->dst_ip,
@@ -163,6 +186,9 @@ void resolve_stream_db(void)
stream->output.entry = entry;
stream->output.pktio = odp_pktio_lookup(stream->output.intf);
+
+ if (stream->evp_md == NULL && entry)
+ stream->evp_md = get_evp_md(entry->ah.alg);
}
}
@@ -238,6 +264,7 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
if (entry && (entry == stream->input.entry) &&
(ODP_AUTH_ALG_NULL != entry->ah.alg)) {
if (entry->ah.alg != ODP_AUTH_ALG_MD5_HMAC &&
+ entry->ah.alg != ODP_AUTH_ALG_SHA1_HMAC &&
entry->ah.alg != ODP_AUTH_ALG_SHA256_HMAC)
abort();
@@ -359,7 +386,7 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
ah->next_header = ip->proto;
ip->proto = ODPH_IPPROTO_AH;
- HMAC(EVP_md5(),
+ HMAC(stream->evp_md,
entry->ah.key.data,
entry->ah.key.length,
(uint8_t *)ip,
@@ -367,7 +394,7 @@ odp_packet_t create_ipv4_packet(stream_db_entry_t *stream,
hash,
NULL);
- memcpy(ah->icv, hash, 12);
+ memcpy(ah->icv, hash, entry->ah.icv_len);
}
/* Correct set packet length offsets */
@@ -446,7 +473,9 @@ odp_bool_t verify_ipv4_packet(stream_db_entry_t *stream,
return FALSE;
if (odp_be_to_cpu_32(ah->spi) != entry->ah.spi)
return FALSE;
- if (ODP_AUTH_ALG_MD5_HMAC != entry->ah.alg)
+ if (ODP_AUTH_ALG_MD5_HMAC != entry->ah.alg &&
+ ODP_AUTH_ALG_SHA1_HMAC != entry->ah.alg &&
+ ODP_AUTH_ALG_SHA256_HMAC != entry->ah.alg)
abort();
} else {
if (entry && (ODP_AUTH_ALG_NULL != entry->ah.alg))
@@ -473,7 +502,7 @@ odp_bool_t verify_ipv4_packet(stream_db_entry_t *stream,
uint8_t ip_tos;
uint8_t ip_ttl;
uint16_t ip_frag_offset;
- uint8_t icv[12];
+ uint8_t icv[entry->ah.icv_len];
uint8_t hash[EVP_MAX_MD_SIZE];
/* Save/clear mutable fields */
@@ -484,11 +513,11 @@ odp_bool_t verify_ipv4_packet(stream_db_entry_t *stream,
ip->ttl = 0;
ip->frag_offset = 0;
ip->chksum = 0;
- memcpy(icv, ah->icv, 12);
- memset(ah->icv, 0, 12);
+ memcpy(icv, ah->icv, entry->ah.icv_len);
+ memset(ah->icv, 0, entry->ah.icv_len);
/* Calculate HMAC and compare */
- HMAC(EVP_md5(),
+ HMAC(stream->evp_md,
entry->ah.key.data,
entry->ah.key.length,
(uint8_t *)ip,
diff --git a/example/ipsec_crypto/odp_ipsec_stream.h b/example/ipsec_crypto/odp_ipsec_stream.h
index 685b4ee86..2055d3f00 100644
--- a/example/ipsec_crypto/odp_ipsec_stream.h
+++ b/example/ipsec_crypto/odp_ipsec_stream.h
@@ -11,6 +11,8 @@
extern "C" {
#endif
+#include <openssl/evp.h>
+
#include <odp_api.h>
#include <odp_ipsec_misc.h>
#include <odp_ipsec_cache.h>
@@ -27,6 +29,7 @@ typedef struct stream_db_entry_s {
uint32_t length; /**< Packet payload length */
uint32_t created; /**< Number successfully created */
uint32_t verified; /**< Number successfully verified */
+ const EVP_MD *evp_md; /**< Digest method */
struct {
const char *intf; /**< Input interface name */
odp_pktio_t pktio; /**< Input PktI/O interface */
diff --git a/include/odp/api/abi-default/packet_io.h b/include/odp/api/abi-default/packet_io.h
index d545b7074..74b9abef2 100644
--- a/include/odp/api/abi-default/packet_io.h
+++ b/include/odp/api/abi-default/packet_io.h
@@ -53,6 +53,8 @@ typedef struct odp_pktout_queue_t {
#define ODP_PKTIN_NO_WAIT 0
+#define ODP_PKTIO_STATS_EXTRA_NAME_LEN 64
+
/**
* @}
*/
diff --git a/include/odp/api/spec/ipsec.h b/include/odp/api/spec/ipsec.h
index 2030b19e8..f1a9d4b7d 100644
--- a/include/odp/api/spec/ipsec.h
+++ b/include/odp/api/spec/ipsec.h
@@ -203,11 +203,13 @@ typedef struct odp_ipsec_inbound_config_t {
odp_reass_config_t reassembly;
/** Attempt reassembly after inbound IPsec processing in
- * odp_ipsec_in_enq().
+ * odp_ipsec_in_enq(). Default value is false.
*/
odp_bool_t reass_async;
- /** Attempt reassembly after inline inbound IPsec processing. */
+ /** Attempt reassembly after inline inbound IPsec processing.
+ * Default value is false.
+ **/
odp_bool_t reass_inline;
} odp_ipsec_inbound_config_t;
@@ -880,6 +882,8 @@ typedef struct odp_ipsec_sa_param_t {
* reassembly was enabled in the global IPsec
* configuration.
*
+ * Default value is false.
+ *
* @see odp_ipsec_config()
*
*/
diff --git a/include/odp/api/spec/packet_io.h b/include/odp/api/spec/packet_io.h
index 1f4ac84f6..d723ef11a 100644
--- a/include/odp/api/spec/packet_io.h
+++ b/include/odp/api/spec/packet_io.h
@@ -585,7 +585,10 @@ typedef struct odp_pktio_config_t {
* In this mode the packets sent out through the interface is
* looped back to input of the same interface. Supporting loopback mode
* is an optional feature per interface and should be queried in the
- * interface capability before enabling the same. */
+ * interface capability before enabling the same.
+ *
+ * Default value is false.
+ */
odp_bool_t enable_loop;
/** Inbound IPSEC inlined with packet input
@@ -908,6 +911,9 @@ typedef struct odp_pktio_capability_t {
/** Packet input reassembly capability */
odp_reass_capability_t reassembly;
+ /** Statistics counters capabilities */
+ odp_pktio_stats_capability_t stats;
+
} odp_pktio_capability_t;
/**
diff --git a/include/odp/api/spec/packet_io_stats.h b/include/odp/api/spec/packet_io_stats.h
index 10e74e52f..a6bcbef23 100644
--- a/include/odp/api/spec/packet_io_stats.h
+++ b/include/odp/api/spec/packet_io_stats.h
@@ -20,12 +20,19 @@ extern "C" {
#endif
#include <odp/api/deprecated.h>
+#include <odp/api/queue.h>
/** @addtogroup odp_packet_io
* @{
*/
/**
+ * @def ODP_PKTIO_STATS_EXTRA_NAME_LEN
+ * Maximum packet IO extra statistics counter name length in chars including
+ * null char
+ */
+
+/**
* Packet IO statistics counters
*
* In the counter definitions the term successfully refers to packets which were
@@ -35,7 +42,7 @@ extern "C" {
*/
typedef struct odp_pktio_stats_t {
/** Number of octets in successfully received packets. In case of
- * Ethernet, packet size includes MAC header and FCS. */
+ * Ethernet, packet size includes MAC header. */
uint64_t in_octets;
/** Number of successfully received packets. */
@@ -45,6 +52,14 @@ typedef struct odp_pktio_stats_t {
* destination MAC address. */
uint64_t in_ucast_pkts;
+ /** Number of successfully received Ethernet packets with a multicast
+ * destination MAC address. */
+ uint64_t in_mcast_pkts;
+
+ /** Number of successfully received Ethernet packets with a broadcast
+ * destination MAC address. */
+ uint64_t in_bcast_pkts;
+
/** Number of inbound packets which were discarded due to a lack of free
* resources (e.g. buffers) or other reasons than packet errors. */
uint64_t in_discards;
@@ -68,7 +83,7 @@ typedef struct odp_pktio_stats_t {
uint64_t ODP_DEPRECATE(in_unknown_protos);
/** Number of octets in successfully transmitted packets. In case of
- * Ethernet, packet size includes MAC header and FCS. */
+ * Ethernet, packet size includes MAC header. */
uint64_t out_octets;
/** Number of successfully transmitted packets. */
@@ -78,6 +93,14 @@ typedef struct odp_pktio_stats_t {
* destination MAC address. */
uint64_t out_ucast_pkts;
+ /** Number of successfully transmitted Ethernet packets with a multicast
+ * destination MAC address. */
+ uint64_t out_mcast_pkts;
+
+ /** Number of successfully transmitted Ethernet packets with a broadcast
+ * destination MAC address. */
+ uint64_t out_bcast_pkts;
+
/** Number of outbound packets which were discarded due to a lack of
* free resources (e.g. buffers) or other reasons than errors. */
uint64_t out_discards;
@@ -87,6 +110,174 @@ typedef struct odp_pktio_stats_t {
} odp_pktio_stats_t;
/**
+ * Packet IO input queue specific statistics counters
+ *
+ * Statistics counters for an individual packet input queue. Refer to packet IO
+ * level statistics odp_pktio_stats_t for counter definitions.
+ */
+typedef struct odp_pktin_queue_stats_t {
+ /** @see odp_pktio_stats_t::in_octets */
+ uint64_t octets;
+
+ /** @see odp_pktio_stats_t::in_packets */
+ uint64_t packets;
+
+ /** @see odp_pktio_stats_t::in_discards */
+ uint64_t discards;
+
+ /** @see odp_pktio_stats_t::in_errors */
+ uint64_t errors;
+
+} odp_pktin_queue_stats_t;
+
+/**
+ * Packet IO output queue specific statistics counters
+ *
+ * Statistics counters for an individual packet output queue. Refer to packet IO
+ * level statistics odp_pktio_stats_t for counter definitions.
+ */
+typedef struct odp_pktout_queue_stats_t {
+ /** @see odp_pktio_stats_t::out_octets */
+ uint64_t octets;
+
+ /** @see odp_pktio_stats_t::out_packets */
+ uint64_t packets;
+
+ /** @see odp_pktio_stats_t::out_discards */
+ uint64_t discards;
+
+ /** @see odp_pktio_stats_t::out_errors */
+ uint64_t errors;
+
+} odp_pktout_queue_stats_t;
+
+/**
+ * Packet IO statistics capabilities
+ */
+typedef struct odp_pktio_stats_capability_t {
+ /** Interface level capabilities */
+ struct {
+ /** Supported counters */
+ union {
+ /** Statistics counters in a bit field structure */
+ struct {
+ /** @see odp_pktio_stats_t::in_octets */
+ uint64_t in_octets : 1;
+
+ /** @see odp_pktio_stats_t::in_packets */
+ uint64_t in_packets : 1;
+
+ /** @see odp_pktio_stats_t::in_ucast_pkts */
+ uint64_t in_ucast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::in_mcast_pkts */
+ uint64_t in_mcast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::in_bcast_pkts */
+ uint64_t in_bcast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::in_discards */
+ uint64_t in_discards : 1;
+
+ /** @see odp_pktio_stats_t::in_errors */
+ uint64_t in_errors : 1;
+
+ /** @see odp_pktio_stats_t::out_octets */
+ uint64_t out_octets : 1;
+
+ /** @see odp_pktio_stats_t::out_packets */
+ uint64_t out_packets : 1;
+
+ /** @see odp_pktio_stats_t::out_ucast_pkts */
+ uint64_t out_ucast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::out_mcast_pkts */
+ uint64_t out_mcast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::out_bcast_pkts */
+ uint64_t out_bcast_pkts : 1;
+
+ /** @see odp_pktio_stats_t::out_discards */
+ uint64_t out_discards : 1;
+
+ /** @see odp_pktio_stats_t::out_errors */
+ uint64_t out_errors : 1;
+ } counter;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or
+ * for bitwise operations over the entire structure. */
+ uint64_t all_counters;
+ };
+ } pktio;
+
+ /** Input queue level capabilities */
+ struct {
+ /** Supported counters */
+ union {
+ /** Statistics counters in a bit field structure */
+ struct {
+ /** @see odp_pktin_queue_stats_t::octets */
+ uint64_t octets : 1;
+
+ /** @see odp_pktin_queue_stats_t::packets */
+ uint64_t packets : 1;
+
+ /** @see odp_pktin_queue_stats_t::discards */
+ uint64_t discards : 1;
+
+ /** @see odp_pktin_queue_stats_t::errors */
+ uint64_t errors : 1;
+ } counter;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or
+ * for bitwise operations over the entire structure. */
+ uint64_t all_counters;
+ };
+ } pktin_queue;
+
+ /** Output queue level capabilities */
+ struct {
+ /** Supported counters */
+ union {
+ /** Statistics counters in a bit field structure */
+ struct {
+ /** @see odp_pktout_queue_stats_t::octets */
+ uint64_t octets : 1;
+
+ /** @see odp_pktout_queue_stats_t::packets */
+ uint64_t packets : 1;
+
+ /** @see odp_pktout_queue_stats_t::discards */
+ uint64_t discards : 1;
+
+ /** @see odp_pktout_queue_stats_t::errors */
+ uint64_t errors : 1;
+ } counter;
+
+ /** All bits of the bit field structure
+ *
+ * This field can be used to set/clear all flags, or
+ * for bitwise operations over the entire structure. */
+ uint64_t all_counters;
+ };
+ } pktout_queue;
+
+} odp_pktio_stats_capability_t;
+
+/**
+ * Packet IO extra statistics counter information
+ */
+typedef struct odp_pktio_extra_stat_info_t {
+ /** Name of the counter */
+ char name[ODP_PKTIO_STATS_EXTRA_NAME_LEN];
+
+} odp_pktio_extra_stat_info_t;
+
+/**
* Get statistics for pktio handle
*
* Counters not supported by the interface are set to zero.
@@ -100,9 +291,75 @@ typedef struct odp_pktio_stats_t {
int odp_pktio_stats(odp_pktio_t pktio, odp_pktio_stats_t *stats);
/**
+ * Get statistics for direct packet input queue
+ *
+ * Packet input queue handles can be requested with odp_pktin_queue(). Counters
+ * not supported by the interface are set to zero.
+ *
+ * @param queue Packet input queue handle
+ * @param[out] stats Output buffer for counters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktin_queue_stats(odp_pktin_queue_t queue,
+ odp_pktin_queue_stats_t *stats);
+
+/**
+ * Get statistics for packet input event queue
+ *
+ * The queue must be a packet input event queue. Event queue handles can be
+ * requested with odp_pktin_event_queue(). Counters not supported by the
+ * interface are set to zero.
+ *
+ * @param pktio Packet IO handle
+ * @param queue Packet input event queue handle
+ * @param[out] stats Output buffer for counters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktin_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktin_queue_stats_t *stats);
+
+/**
+ * Get statistics for direct packet output queue
+ *
+ * Packet output queue handles can be requested with odp_pktout_queue().
+ * Counters not supported by the interface are set to zero.
+ *
+ * @param queue Packet output queue handle
+ * @param[out] stats Output buffer for counters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktout_queue_stats(odp_pktout_queue_t queue,
+ odp_pktout_queue_stats_t *stats);
+
+/**
+ * Get statistics for packet output event queue
+ *
+ * The queue must be a packet output event queue. Event queue handles can be
+ * requested with odp_pktout_event_queue(). Counters not supported by the
+ * interface are set to zero.
+ *
+ * @param pktio Packet IO handle
+ * @param queue Packet output event queue handle
+ * @param[out] stats Output buffer for counters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktout_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktout_queue_stats_t *stats);
+
+/**
* Reset statistics for pktio handle
*
- * Reset all statistics counters to zero.
+ * Reset all interface level statistics counters (odp_pktio_stats_t) to zero.
+ * It's implementation defined if other packet IO related statistics are
+ * affected.
*
* @param pktio Packet IO handle
*
@@ -112,6 +369,78 @@ int odp_pktio_stats(odp_pktio_t pktio, odp_pktio_stats_t *stats);
int odp_pktio_stats_reset(odp_pktio_t pktio);
/**
+ * Get extra statistics counter information for a packet IO interface
+ *
+ * Returns the number of implementation specific packet IO extra statistics
+ * counters supported by the interface. Outputs up to 'num' extra statistics
+ * counter info structures when the 'info' array pointer is not NULL. If the
+ * return value is larger than 'num', there are more extra counters than the
+ * function was allowed to output. If the return value (N) is less than 'num',
+ * only info[0 ... N-1] have been written.
+ *
+ * The index of a counter in the 'info' array can be used to read the value of
+ * the individual counter with odp_pktio_extra_stat_counter(). The order of
+ * counters in the output array matches with odp_pktio_extra_stats().
+ *
+ * @param pktio Packet IO handle
+ * @param[out] info Array of extra statistics info structs for output
+ * @param num Maximum number of info structs to output
+ *
+ * @return Number of extra statistics
+ * @retval <0 on failure
+ */
+int odp_pktio_extra_stat_info(odp_pktio_t pktio,
+ odp_pktio_extra_stat_info_t info[], int num);
+
+/**
+ * Get extra statistics for a packet IO interface
+ *
+ * Returns the number of implementation specific packet IO extra statistics
+ * counters supported by the interface. Outputs up to 'num' counters when the
+ * 'stats' array pointer is not NULL. If the return value is larger than 'num',
+ * there are more counters than the function was allowed to output. If the
+ * return value (N) is less than 'num', only stats[0 ... N-1] have been written.
+ *
+ * The index of a counter in the 'stats' array can be used to read the value of
+ * the individual counter with odp_pktio_extra_stat_counter(). The order of
+ * counters in the output array matches with odp_pktio_extra_stat_info().
+ *
+ * @param pktio Packet IO handle
+ * @param[out] stats Array of extra statistics for output
+ * @param num Maximum number of extra statistics to output
+ *
+ * @return Number of extra statistics
+ * @retval <0 on failure
+ */
+int odp_pktio_extra_stats(odp_pktio_t pktio, uint64_t stats[], int num);
+
+/**
+ * Get extra statistic counter value
+ *
+ * 'id' is the index of the particular counter in the output array of
+ * odp_pktio_extra_stat_info() or odp_pktio_extra_stats().
+ *
+ *
+ * @param pktio Packet IO handle
+ * @param id ID of the extra statistics counter
+ * @param[out] stat Pointer for statistic counter output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktio_extra_stat_counter(odp_pktio_t pktio, uint32_t id,
+ uint64_t *stat);
+
+/**
+ * Print extra statistics for a packet IO interface
+ *
+ * Print all packet IO device extra statistics to ODP log.
+ *
+ * @param pktio Packet IO handle
+ */
+void odp_pktio_extra_stats_print(odp_pktio_t pktio);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index 9e96e0684..e07c92b7e 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -547,6 +547,14 @@ void odp_schedule_order_lock_start(uint32_t lock_index);
void odp_schedule_order_lock_wait(uint32_t lock_index);
/**
+ * Print debug info about scheduler
+ *
+ * Print implementation defined information about scheduler to the ODP log.
+ * The information is intended to be used for debugging.
+ */
+void odp_schedule_print(void);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/std_types.h b/include/odp/api/spec/std_types.h
index bf3eb77a6..4b2af87ef 100644
--- a/include/odp/api/spec/std_types.h
+++ b/include/odp/api/spec/std_types.h
@@ -1,5 +1,6 @@
/* Copyright (c) 2013-2018, Linaro Limited
* Copyright (c) 2021, ARM Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -74,6 +75,39 @@ typedef struct ODP_ALIGNED(16) odp_u128_s {
} odp_u128_t;
/**
+ * Unsigned 64 bit fractional number
+ *
+ * The number is composed of integer and fraction parts. The fraction part is composed of
+ * two terms: numerator and denominator. Value of the number is sum of the integer and fraction
+ * parts: value = integer + numer/denom. When the fraction part is zero, the numerator is zero and
+ * the denominator may be zero.
+ */
+typedef struct odp_fract_u64_t {
+ /** Integer part */
+ uint64_t integer;
+
+ /** Numerator of the fraction part */
+ uint64_t numer;
+
+ /** Denominator of the fraction part. This may be zero when the numerator
+ * is zero. */
+ uint64_t denom;
+
+} odp_fract_u64_t;
+
+/**
+ * Convert fractional number (u64) to double
+ *
+ * Converts value of the unsigned 64 bit fractional number to a double-precision
+ * floating-point value.
+ *
+ * @param fract Pointer to a fractional number
+ *
+ * @return Value of the fractional number as double
+ */
+double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract);
+
+/**
* @}
*/
diff --git a/include/odp/api/spec/timer.h b/include/odp/api/spec/timer.h
index a90fb9b07..319f5e029 100644
--- a/include/odp/api/spec/timer.h
+++ b/include/odp/api/spec/timer.h
@@ -375,6 +375,54 @@ uint64_t odp_timer_ns_to_tick(odp_timer_pool_t timer_pool, uint64_t ns);
uint64_t odp_timer_current_tick(odp_timer_pool_t timer_pool);
/**
+ * Timer tick information
+ */
+typedef struct odp_timer_tick_info_t {
+ /**
+ * Timer tick frequency in hertz
+ *
+ * Timer tick frequency expressed as a fractional number. The integer part contains
+ * full hertz. The fraction part (numerator / denominator) contains parts of
+ * a hertz to be added with the integer.
+ *
+ * For example, a timer tick frequency of 333 333 and 1/3 Hz could be presented with
+ * these values: integer = 333 333, numer = 1, denom = 3. Implementation may choose numer
+ * and denom values freely.
+ */
+ odp_fract_u64_t freq;
+
+ /**
+ * One timer tick in nanoseconds
+ *
+ * Nanoseconds per tick is expressed as a fractional number. The integer part contains
+ * full nanoseconds. The fraction part (numerator / denominator) contains parts of
+ * a nanosecond to be added with the integer.
+ *
+ * For example, a timer tick period of 3.125 nanoseconds (320MHz) could be presented with
+ * these values: integer = 3, numer = 125 000 000, denom = 1 000 000 000. Implementation
+ * may choose numer and denom values freely.
+ */
+ odp_fract_u64_t nsec;
+
+ /**
+ * One timer tick in source clock cycles
+ *
+ * The clock cycle count is expressed as a fractional number. The integer part contains
+ * full clock cycles. The fraction part (numerator / denominator) contains parts of
+ * a clock cycle to be added with the integer.
+ *
+ * For example, a timer tick period of 42 and 1/3 source clock cycles could be presented
+ * with these values: integer = 42, numer = 1, denom = 3. Implementation may choose numer
+ * and denom values freely.
+ *
+ * The value is zero, when there is no direct connection between tick and the source
+ * clock signal.
+ */
+ odp_fract_u64_t clk_cycle;
+
+} odp_timer_tick_info_t;
+
+/**
* ODP timer pool information and configuration
*/
typedef struct {
@@ -390,6 +438,9 @@ typedef struct {
/** Name of timer pool */
const char *name;
+ /** Timer pool tick information */
+ odp_timer_tick_info_t tick_info;
+
} odp_timer_pool_info_t;
/**
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 8cdb84b15..bff49c692 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -164,6 +164,7 @@ __LIB__libodp_dpdk_la_SOURCES = \
odp_crypto.c \
odp_errno.c \
../linux-generic/odp_event.c \
+ ../linux-generic/odp_fractional.c \
../linux-generic/odp_hash_crc_gen.c \
odp_init.c \
../linux-generic/odp_impl.c \
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index 0d2782101..898709008 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -177,6 +177,14 @@ typedef struct pktio_if_ops {
int (*stop)(pktio_entry_t *pktio_entry);
int (*stats)(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats);
int (*stats_reset)(pktio_entry_t *pktio_entry);
+ int (*pktin_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats);
+ int (*pktout_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats);
+ int (*extra_stat_info)(pktio_entry_t *pktio_entry, odp_pktio_extra_stat_info_t info[],
+ int num);
+ int (*extra_stats)(pktio_entry_t *pktio_entry, uint64_t stats[], int num);
+ int (*extra_stat_counter)(pktio_entry_t *pktio_entry, uint32_t id, uint64_t *stat);
uint64_t (*pktio_ts_res)(pktio_entry_t *pktio_entry);
odp_time_t (*pktio_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
odp_time_t (*pktio_time)(pktio_entry_t *pktio_entry, odp_time_t *global_ts);
diff --git a/platform/linux-dpdk/odp_packet_dpdk.c b/platform/linux-dpdk/odp_packet_dpdk.c
index 9d400d603..156a34132 100644
--- a/platform/linux-dpdk/odp_packet_dpdk.c
+++ b/platform/linux-dpdk/odp_packet_dpdk.c
@@ -565,6 +565,21 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry,
capa->config.pktout.bit.ts_ena = 1;
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
+
return 0;
}
@@ -702,6 +717,17 @@ static int dpdk_setup_eth_tx(pktio_entry_t *pktio_entry,
}
}
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->s.num_out_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ ODP_DBG("Mapping per TX queue statistics not supported: %d\n", ret);
+ break;
+ }
+ }
+ ODP_DBG("Mapped %" PRIu32 "/%d TX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
return 0;
}
@@ -731,6 +757,17 @@ static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry,
}
}
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->s.num_in_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ ODP_DBG("Mapping per RX queue statistics not supported: %d\n", ret);
+ break;
+ }
+ }
+ ODP_DBG("Mapped %" PRIu32 "/%d RX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
return 0;
}
@@ -1403,11 +1440,15 @@ static void stats_convert(struct rte_eth_stats *rte_stats,
stats->in_octets = rte_stats->ibytes;
stats->in_packets = rte_stats->ipackets;
stats->in_ucast_pkts = 0;
+ stats->in_mcast_pkts = 0;
+ stats->in_bcast_pkts = 0;
stats->in_discards = rte_stats->imissed;
stats->in_errors = rte_stats->ierrors;
stats->out_octets = rte_stats->obytes;
stats->out_packets = rte_stats->opackets;
stats->out_ucast_pkts = 0;
+ stats->out_mcast_pkts = 0;
+ stats->out_bcast_pkts = 0;
stats->out_discards = 0;
stats->out_errors = rte_stats->oerrors;
}
@@ -1432,7 +1473,138 @@ static int stats_pkt_dpdk(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
static int stats_reset_pkt_dpdk(pktio_entry_t *pktio_entry)
{
- rte_eth_stats_reset(pkt_priv(pktio_entry)->port_id);
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+
+ (void)rte_eth_stats_reset(port_id);
+ (void)rte_eth_xstats_reset(port_id);
+ return 0;
+}
+
+static int dpdk_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if (num_stats < 0) {
+ ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (info == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat_name xstats_names[num_stats];
+
+ ret = rte_eth_xstats_get_names(port_id, xstats_names, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ strncpy(info[i].name, xstats_names[i].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+
+ return num_stats;
+}
+
+static int dpdk_extra_stats(pktio_entry_t *pktio_entry,
+ uint64_t stats[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get(port_id, NULL, 0);
+ if (num_stats < 0) {
+ ODP_ERR("rte_eth_xstats_get() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (stats == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat xstats[num_stats];
+
+ ret = rte_eth_xstats_get(port_id, xstats, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ ODP_ERR("rte_eth_xstats_get() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ stats[i] = xstats[i].value;
+
+ return num_stats;
+}
+
+static int dpdk_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ uint64_t xstat_id = id;
+ int ret;
+
+ ret = rte_eth_xstats_get_by_id(port_id, &xstat_id, stat, 1);
+ if (ret != 1) {
+ ODP_ERR("rte_eth_xstats_get_by_id() failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dpdk_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+
+ pktin_stats->packets = rte_stats.q_ipackets[index];
+ pktin_stats->octets = rte_stats.q_ibytes[index];
+ pktin_stats->errors = rte_stats.q_errors[index];
+
+ return 0;
+}
+
+static int dpdk_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+
+ pktout_stats->packets = rte_stats.q_opackets[index];
+ pktout_stats->octets = rte_stats.q_obytes[index];
+
return 0;
}
@@ -1448,6 +1620,11 @@ const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.stop = stop_pkt_dpdk,
.stats = stats_pkt_dpdk,
.stats_reset = stats_reset_pkt_dpdk,
+ .pktin_queue_stats = dpdk_pktin_stats,
+ .pktout_queue_stats = dpdk_pktout_stats,
+ .extra_stat_info = dpdk_extra_stat_info,
+ .extra_stats = dpdk_extra_stats,
+ .extra_stat_counter = dpdk_extra_stat_counter,
.pktio_ts_res = NULL,
.pktio_ts_from_ns = NULL,
.pktio_time = NULL,
diff --git a/platform/linux-dpdk/odp_schedule_eventdev.c b/platform/linux-dpdk/odp_schedule_eventdev.c
index f7b6bbb8f..399a3c6a3 100644
--- a/platform/linux-dpdk/odp_schedule_eventdev.c
+++ b/platform/linux-dpdk/odp_schedule_eventdev.c
@@ -1049,6 +1049,20 @@ static int schedule_config(const odp_schedule_config_t *config)
return 0;
}
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ ODP_PRINT("\nScheduler debug info\n");
+ ODP_PRINT("--------------------\n");
+ ODP_PRINT(" scheduler: eventdev\n");
+ ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ ODP_PRINT("\n");
+}
+
/* Fill in scheduler interface */
const schedule_fn_t _odp_schedule_eventdev_fn = {
.pktio_start = schedule_pktio_start,
@@ -1099,5 +1113,6 @@ const schedule_api_t _odp_schedule_eventdev_api = {
.schedule_order_unlock = schedule_order_unlock,
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
- .schedule_order_lock_wait = schedule_order_lock_wait
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_print = schedule_print
};
diff --git a/platform/linux-dpdk/odp_schedule_if.c b/platform/linux-dpdk/odp_schedule_if.c
index 2ccadf285..29d38b1fd 100644
--- a/platform/linux-dpdk/odp_schedule_if.c
+++ b/platform/linux-dpdk/odp_schedule_if.c
@@ -201,6 +201,11 @@ void odp_schedule_order_lock_wait(uint32_t lock_index)
_odp_sched_api->schedule_order_lock_wait(lock_index);
}
+void odp_schedule_print(void)
+{
+ _odp_sched_api->schedule_print();
+}
+
int _odp_schedule_init_global(void)
{
const char *sched = getenv("ODP_SCHEDULER");
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index b5367b4c2..e6f4ffe86 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -492,12 +492,31 @@ uint64_t odp_timer_current_tick(odp_timer_pool_t tp)
int odp_timer_pool_info(odp_timer_pool_t tp,
odp_timer_pool_info_t *info)
{
- timer_pool_t *timer_pool = timer_pool_from_hdl(tp);
+ timer_pool_t *timer_pool;
+ uint64_t freq_hz = rte_get_timer_hz();
+
+ if (odp_unlikely(tp == ODP_TIMER_POOL_INVALID)) {
+ ODP_ERR("Invalid timer pool.\n");
+ return -1;
+ }
+
+ timer_pool = timer_pool_from_hdl(tp);
+ memset(info, 0, sizeof(odp_timer_pool_info_t));
info->param = timer_pool->param;
info->cur_timers = timer_pool->cur_timers;
info->hwm_timers = timer_pool->hwm_timers;
info->name = timer_pool->name;
+
+ info->tick_info.freq.integer = freq_hz;
+ info->tick_info.nsec.integer = SEC_IN_NS / freq_hz;
+ if (SEC_IN_NS % freq_hz) {
+ info->tick_info.nsec.numer = SEC_IN_NS - (info->tick_info.nsec.integer * freq_hz);
+ info->tick_info.nsec.denom = freq_hz;
+ }
+ /* Leave source clock information to zero as there is no direct link
+ * between a source clock signal and a timer tick. */
+
return 0;
}
diff --git a/platform/linux-dpdk/test/example/ipsec_api/pktio_env b/platform/linux-dpdk/test/example/ipsec_api/pktio_env
index c647f6bf2..3267bd4cd 100644
--- a/platform/linux-dpdk/test/example/ipsec_api/pktio_env
+++ b/platform/linux-dpdk/test/example/ipsec_api/pktio_env
@@ -35,6 +35,12 @@ if [ -n "$WITH_OPENSSL" ] && [ ${WITH_OPENSSL} -eq 0 ]; then
exit 77
fi
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "IPsec Live / Router mode test. Skipping."
+ exit 77
+fi
+
IF0=p7p1
IF1=p8p1
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index e654381e6..b6721dce4 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -174,6 +174,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_errno.c \
odp_event.c \
odp_fdserver.c \
+ odp_fractional.c \
odp_hash_crc_gen.c \
odp_impl.c \
odp_init.c \
diff --git a/platform/linux-generic/include-abi/odp/api/abi/packet_io.h b/platform/linux-generic/include-abi/odp/api/abi/packet_io.h
index 45f06912c..48a847c07 100644
--- a/platform/linux-generic/include-abi/odp/api/abi/packet_io.h
+++ b/platform/linux-generic/include-abi/odp/api/abi/packet_io.h
@@ -50,6 +50,8 @@ typedef struct odp_pktout_queue_t {
#define ODP_PKTIN_NO_WAIT 0
#define ODP_PKTIN_WAIT UINT64_MAX
+#define ODP_PKTIO_STATS_EXTRA_NAME_LEN 64
+
/**
* @}
*/
diff --git a/platform/linux-generic/include/odp_ethtool_stats.h b/platform/linux-generic/include/odp_ethtool_stats.h
index a8783149d..2888d1c81 100644
--- a/platform/linux-generic/include/odp_ethtool_stats.h
+++ b/platform/linux-generic/include/odp_ethtool_stats.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -19,6 +20,11 @@ extern "C" {
*/
int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
+int _odp_ethtool_extra_stat_info(int fd, const char *name, odp_pktio_extra_stat_info_t info[],
+ int num);
+int _odp_ethtool_extra_stats(int fd, const char *name, uint64_t stats[], int num);
+int _odp_ethtool_extra_stat_counter(int fd, const char *name, uint32_t id, uint64_t *stat);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_ipsec_internal.h b/platform/linux-generic/include/odp_ipsec_internal.h
index 96153007f..66d85d119 100644
--- a/platform/linux-generic/include/odp_ipsec_internal.h
+++ b/platform/linux-generic/include/odp_ipsec_internal.h
@@ -84,8 +84,27 @@ int _odp_ipsec_status_send(odp_queue_t queue,
#define IPSEC_MAX_SALT_LEN 4 /**< Maximum salt length in bytes */
-/* 32 is minimum required by the standard. We do not support more */
-#define IPSEC_ANTIREPLAY_WS 32
+/* The minimum supported AR window size */
+#define IPSEC_AR_WIN_SIZE_MIN 32
+
+/* The maximum supported AR window size */
+#define IPSEC_AR_WIN_SIZE_MAX 4096
+
+/* For a 64-bit bucket size */
+#define IPSEC_AR_WIN_BUCKET_BITS 6
+#define IPSEC_AR_WIN_BUCKET_SIZE (1 << IPSEC_AR_WIN_BUCKET_BITS)
+#define IPSEC_AR_WIN_BITLOC_MASK (IPSEC_AR_WIN_BUCKET_SIZE - 1)
+
+/*
+ * We need one extra bucket in addition to the buckets that contain
+ * part of the window.
+ */
+#define IPSEC_AR_WIN_NUM_BUCKETS(window_size) \
+ (((window_size) - 1) / IPSEC_AR_WIN_BUCKET_SIZE + 2)
+
+/* Maximum number of buckets */
+#define IPSEC_AR_WIN_BUCKET_MAX \
+ IPSEC_AR_WIN_NUM_BUCKETS(IPSEC_AR_WIN_SIZE_MAX)
struct ipsec_sa_s {
odp_atomic_u32_t state ODP_ALIGNED_CACHE;
@@ -101,7 +120,14 @@ struct ipsec_sa_s {
union {
struct {
- odp_atomic_u64_t antireplay;
+ /* AR window lock */
+ odp_spinlock_t lock;
+
+ /* AR window top sequence number */
+ odp_atomic_u64_t wintop_seq;
+
+ /* AR window bucket array */
+ uint64_t bucket_arr[IPSEC_AR_WIN_BUCKET_MAX];
} in;
struct {
@@ -162,6 +188,16 @@ struct ipsec_sa_s {
union {
struct {
odp_ipsec_ip_version_t lookup_ver;
+
+ /* Anti-replay window management. */
+ struct {
+ /* Number of buckets for AR window */
+ uint16_t num_buckets;
+
+ /* AR window size */
+ uint32_t win_size;
+ } ar;
+
union {
odp_u32be_t lookup_dst_ipv4;
uint8_t lookup_dst_ipv6[_ODP_IPV6ADDR_LEN];
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index c5a51d11d..95444a6c8 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -1,5 +1,5 @@
/* Copyright (c) 2013-2018, Linaro Limited
- * Copyright (c) 2019-2020, Nokia
+ * Copyright (c) 2019-2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -196,6 +196,14 @@ typedef struct pktio_if_ops {
int (*stop)(pktio_entry_t *pktio_entry);
int (*stats)(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats);
int (*stats_reset)(pktio_entry_t *pktio_entry);
+ int (*pktin_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats);
+ int (*pktout_queue_stats)(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats);
+ int (*extra_stat_info)(pktio_entry_t *pktio_entry, odp_pktio_extra_stat_info_t info[],
+ int num);
+ int (*extra_stats)(pktio_entry_t *pktio_entry, uint64_t stats[], int num);
+ int (*extra_stat_counter)(pktio_entry_t *pktio_entry, uint32_t id, uint64_t *stat);
uint64_t (*pktio_ts_res)(pktio_entry_t *pktio_entry);
odp_time_t (*pktio_ts_from_ns)(pktio_entry_t *pktio_entry, uint64_t ns);
odp_time_t (*pktio_time)(pktio_entry_t *pktio_entry, odp_time_t *global_ts);
diff --git a/platform/linux-generic/include/odp_packet_io_stats.h b/platform/linux-generic/include/odp_packet_io_stats.h
index 22e3b5041..c1b37fb29 100644
--- a/platform/linux-generic/include/odp_packet_io_stats.h
+++ b/platform/linux-generic/include/odp_packet_io_stats.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -22,6 +23,17 @@ int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
int fd);
int _odp_sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
+void _odp_sock_stats_capa(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa);
+
+int _odp_sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num,
+ int fd);
+int _odp_sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[], int num,
+ int fd);
+int _odp_sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat, int fd);
+
pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd);
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index db0f5c264..d3202543d 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -126,6 +126,7 @@ typedef struct {
uint32_t lock_index);
void (*schedule_order_lock_start)(uint32_t lock_index);
void (*schedule_order_lock_wait)(uint32_t lock_index);
+ void (*schedule_print)(void);
} schedule_api_t;
diff --git a/platform/linux-generic/include/odp_sysfs_stats.h b/platform/linux-generic/include/odp_sysfs_stats.h
index 4bcd2b7ff..0adb67c84 100644
--- a/platform/linux-generic/include/odp_sysfs_stats.h
+++ b/platform/linux-generic/include/odp_sysfs_stats.h
@@ -1,4 +1,5 @@
/* Copyright (c) 2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,6 +18,13 @@ extern "C" {
int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
odp_pktio_stats_t *stats);
+int _odp_sysfs_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num);
+int _odp_sysfs_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num);
+int _odp_sysfs_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/m4/odp_dpdk.m4 b/platform/linux-generic/m4/odp_dpdk.m4
index 2bbe9a947..f61c38a8e 100644
--- a/platform/linux-generic/m4/odp_dpdk.m4
+++ b/platform/linux-generic/m4/odp_dpdk.m4
@@ -16,6 +16,17 @@ AC_ARG_WITH([dpdk-path],
pktio_dpdk_support=yes],[])
##########################################################################
+# Use shared DPDK library
+##########################################################################
+dpdk_shared=no
+AC_ARG_ENABLE([dpdk-shared],
+ [AS_HELP_STRING([--enable-dpdk-shared],
+ [use shared DPDK library [default=disabled] (linux-generic)])],
+ [if test x$enableval = xyes; then
+ dpdk_shared=yes
+ fi])
+
+##########################################################################
# Enable zero-copy DPDK pktio
##########################################################################
zero_copy=0
@@ -34,7 +45,7 @@ AC_ARG_ENABLE([dpdk-zero-copy],
##########################################################################
if test x$pktio_dpdk_support = xyes
then
- ODP_DPDK([$DPDK_PATH], [], [],
+ ODP_DPDK([$DPDK_PATH], [$dpdk_shared], [],
[AC_MSG_FAILURE([can't find DPDK])])
case "${host}" in
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 9b31f99a8..bc31d01eb 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -461,7 +461,7 @@ uint32_t odp_cls_cos_queues(odp_cos_t cos_id, odp_queue_t queue[],
for (i = 0; i < num_queues; i++)
queue[i] = queue_grp_tbl->s.queue[tbl_index + i];
- return num_queues;
+ return cos->s.num_queue;
}
int odp_cos_drop_set(odp_cos_t cos_id, odp_cls_drop_t drop_policy)
diff --git a/platform/linux-generic/odp_fractional.c b/platform/linux-generic/odp_fractional.c
new file mode 100644
index 000000000..c98f3a4b2
--- /dev/null
+++ b/platform/linux-generic/odp_fractional.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2021, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/api/std_types.h>
+
+double odp_fract_u64_to_dbl(const odp_fract_u64_t *fract)
+{
+ double fraction;
+
+ if (fract->numer == 0)
+ fraction = 0.0;
+ else
+ fraction = (double)fract->numer / fract->denom;
+
+ return fract->integer + fraction;
+}
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index ed19392c9..15590523c 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -158,7 +158,7 @@ int odp_ipsec_capability(odp_ipsec_capability_t *capa)
capa->max_num_sa = _odp_ipsec_max_num_sa();
- capa->max_antireplay_ws = IPSEC_ANTIREPLAY_WS;
+ capa->max_antireplay_ws = IPSEC_AR_WIN_SIZE_MAX;
rc = set_ipsec_crypto_capa(capa);
if (rc < 0)
@@ -261,6 +261,7 @@ void odp_ipsec_config_init(odp_ipsec_config_t *config)
config->inbound.default_queue = ODP_QUEUE_INVALID;
config->inbound.lookup.min_spi = 0;
config->inbound.lookup.max_spi = UINT32_MAX;
+ config->inbound.reassembly.max_num_frags = 2;
config->stats_en = false;
}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index d0367bf63..407192dcf 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -21,6 +21,7 @@
#include <odp/api/plat/cpu_inlines.h>
#include <string.h>
+#include <inttypes.h>
/*
* SA state consists of state value in the high order bits of ipsec_sa_t::state
@@ -446,6 +447,37 @@ static uint32_t esp_block_len_to_mask(uint32_t block_len)
return block_len - 1;
}
+/* AR window management initialization */
+static int ipsec_antireplay_init(ipsec_sa_t *ipsec_sa,
+ const odp_ipsec_sa_param_t *param)
+{
+ uint16_t num_bkts = 0;
+
+ if (param->inbound.antireplay_ws > IPSEC_AR_WIN_SIZE_MAX) {
+ ODP_ERR("Anti-replay window size %" PRIu32 " is not supported.\n",
+ param->inbound.antireplay_ws);
+ return -1;
+ }
+
+ ipsec_sa->antireplay = (param->inbound.antireplay_ws != 0);
+ if (!ipsec_sa->antireplay)
+ return 0;
+
+ ipsec_sa->in.ar.win_size = param->inbound.antireplay_ws;
+ /* Window size should be at least IPSEC_AR_WIN_SIZE_MIN */
+ if (ipsec_sa->in.ar.win_size < IPSEC_AR_WIN_SIZE_MIN)
+ ipsec_sa->in.ar.win_size = IPSEC_AR_WIN_SIZE_MIN;
+
+ num_bkts = IPSEC_AR_WIN_NUM_BUCKETS(ipsec_sa->in.ar.win_size);
+ ipsec_sa->in.ar.num_buckets = num_bkts;
+ odp_atomic_init_u64(&ipsec_sa->hot.in.wintop_seq, 0);
+ memset(ipsec_sa->hot.in.bucket_arr, 0, sizeof(uint64_t) * num_bkts);
+
+ odp_spinlock_init(&ipsec_sa->hot.in.lock);
+
+ return 0;
+}
+
odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
{
ipsec_sa_t *ipsec_sa;
@@ -488,10 +520,8 @@ odp_ipsec_sa_t odp_ipsec_sa_create(const odp_ipsec_sa_param_t *param)
sizeof(ipsec_sa->in.lookup_dst_ipv6));
}
- if (param->inbound.antireplay_ws > IPSEC_ANTIREPLAY_WS)
+ if (ipsec_antireplay_init(ipsec_sa, param))
goto error;
- ipsec_sa->antireplay = (param->inbound.antireplay_ws != 0);
- odp_atomic_init_u64(&ipsec_sa->hot.in.antireplay, 0);
} else {
ipsec_sa->lookup_mode = ODP_IPSEC_LOOKUP_DISABLED;
odp_atomic_init_u64(&ipsec_sa->hot.out.seq, 1);
@@ -909,10 +939,9 @@ int _odp_ipsec_sa_lifetime_update(ipsec_sa_t *ipsec_sa, uint32_t len,
static uint64_t ipsec_sa_antireplay_max_seq(ipsec_sa_t *ipsec_sa)
{
- uint64_t state, max_seq;
+ uint64_t max_seq = 0;
- state = odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay);
- max_seq = state & 0xffffffff;
+ max_seq = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq) & 0xffffffff;
return max_seq;
}
@@ -921,9 +950,8 @@ int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
odp_ipsec_op_status_t *status)
{
/* Try to be as quick as possible, we will discard packets later */
- if (ipsec_sa->antireplay &&
- seq + IPSEC_ANTIREPLAY_WS <=
- (odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay) & 0xffffffff)) {
+ if (ipsec_sa->antireplay && ((seq + ipsec_sa->in.ar.win_size) <=
+ (odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq) & 0xffffffff))) {
status->error.antireplay = 1;
return -1;
}
@@ -931,22 +959,73 @@ int _odp_ipsec_sa_replay_precheck(ipsec_sa_t *ipsec_sa, uint32_t seq,
return 0;
}
-int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
- odp_ipsec_op_status_t *status)
+static inline int ipsec_wslarge_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
+ odp_ipsec_op_status_t *status)
{
- int cas = 0;
- uint64_t state, new_state;
+ uint32_t bucket, wintop_bucket, new_bucket;
+ uint32_t bkt_diff, bkt_cnt, top_seq;
+ uint64_t bit = 0;
+
+ odp_spinlock_lock(&ipsec_sa->hot.in.lock);
- state = odp_atomic_load_u64(&ipsec_sa->hot.in.antireplay);
+ top_seq = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
+ if ((seq + ipsec_sa->in.ar.win_size) <= top_seq)
+ goto ar_err;
+
+ bucket = (seq >> IPSEC_AR_WIN_BUCKET_BITS);
+
+ /* Check if the seq is within the range */
+ if (seq > top_seq) {
+ wintop_bucket = top_seq >> IPSEC_AR_WIN_BUCKET_BITS;
+ bkt_diff = bucket - wintop_bucket;
+
+ /* Seq is way after the range of AR window size */
+ if (bkt_diff > ipsec_sa->in.ar.num_buckets)
+ bkt_diff = ipsec_sa->in.ar.num_buckets;
+
+ for (bkt_cnt = 0; bkt_cnt < bkt_diff; bkt_cnt++) {
+ new_bucket = (bkt_cnt + wintop_bucket + 1) %
+ ipsec_sa->in.ar.num_buckets;
+ ipsec_sa->hot.in.bucket_arr[new_bucket] = 0;
+ }
+ /* AR window top sequence number */
+ odp_atomic_store_u64(&ipsec_sa->hot.in.wintop_seq, seq);
+ }
+
+ bucket %= ipsec_sa->in.ar.num_buckets;
+ bit = (uint64_t)1 << (seq & IPSEC_AR_WIN_BITLOC_MASK);
+
+ /* Already seen the packet, discard it */
+ if (ipsec_sa->hot.in.bucket_arr[bucket] & bit)
+ goto ar_err;
+
+ /* Packet is new, mark it as seen */
+ ipsec_sa->hot.in.bucket_arr[bucket] |= bit;
+ odp_spinlock_unlock(&ipsec_sa->hot.in.lock);
+
+ return 0;
+ar_err:
+ status->error.antireplay = 1;
+ odp_spinlock_unlock(&ipsec_sa->hot.in.lock);
+ return -1;
+}
+
+static inline int ipsec_ws32_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ uint64_t state, new_state;
+ int cas = 0;
+
+ state = odp_atomic_load_u64(&ipsec_sa->hot.in.wintop_seq);
while (0 == cas) {
uint32_t max_seq = state & 0xffffffff;
uint32_t mask = state >> 32;
- if (seq + IPSEC_ANTIREPLAY_WS <= max_seq) {
+ if (seq + IPSEC_AR_WIN_SIZE_MIN <= max_seq) {
status->error.antireplay = 1;
return -1;
- } else if (seq >= max_seq + IPSEC_ANTIREPLAY_WS) {
+ } else if (seq >= max_seq + IPSEC_AR_WIN_SIZE_MIN) {
mask = 1;
max_seq = seq;
} else if (seq > max_seq) {
@@ -961,14 +1040,26 @@ int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
}
new_state = (((uint64_t)mask) << 32) | max_seq;
-
- cas = odp_atomic_cas_acq_rel_u64(&ipsec_sa->hot.in.antireplay,
+ cas = odp_atomic_cas_acq_rel_u64(&ipsec_sa->hot.in.wintop_seq,
&state, new_state);
}
-
return 0;
}
+int _odp_ipsec_sa_replay_update(ipsec_sa_t *ipsec_sa, uint32_t seq,
+ odp_ipsec_op_status_t *status)
+{
+ int ret;
+
+ /* Window update for ws equal to 32 */
+ if (ipsec_sa->in.ar.win_size == IPSEC_AR_WIN_SIZE_MIN)
+ ret = ipsec_ws32_replay_update(ipsec_sa, seq, status);
+ else
+ ret = ipsec_wslarge_replay_update(ipsec_sa, seq, status);
+
+ return ret;
+}
+
uint16_t _odp_ipsec_sa_alloc_ipv4_id(ipsec_sa_t *ipsec_sa)
{
(void)ipsec_sa;
@@ -1084,7 +1175,7 @@ static void ipsec_in_sa_info(ipsec_sa_t *ipsec_sa, odp_ipsec_sa_info_t *sa_info)
sa_info->param.inbound.lookup_param.dst_addr = dst;
if (ipsec_sa->antireplay) {
- sa_info->inbound.antireplay_ws = IPSEC_ANTIREPLAY_WS;
+ sa_info->inbound.antireplay_ws = ipsec_sa->in.ar.win_size;
sa_info->inbound.antireplay_window_top =
ipsec_sa_antireplay_max_seq(ipsec_sa);
}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 694b0a741..39cbd72f6 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -315,6 +315,10 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
return ODP_PKTIO_INVALID;
}
+ snprintf(pktio_entry->s.name,
+ sizeof(pktio_entry->s.name), "%s", if_name);
+ snprintf(pktio_entry->s.full_name,
+ sizeof(pktio_entry->s.full_name), "%s", name);
pktio_entry->s.pool = pool;
memcpy(&pktio_entry->s.param, param, sizeof(odp_pktio_param_t));
pktio_entry->s.handle = hdl;
@@ -347,10 +351,6 @@ static odp_pktio_t setup_pktio_entry(const char *name, odp_pool_t pool,
return ODP_PKTIO_INVALID;
}
- snprintf(pktio_entry->s.name,
- sizeof(pktio_entry->s.name), "%s", if_name);
- snprintf(pktio_entry->s.full_name,
- sizeof(pktio_entry->s.full_name), "%s", name);
pktio_entry->s.state = PKTIO_STATE_OPENED;
pktio_entry->s.ops = _odp_pktio_if_ops[pktio_if];
unlock_entry(pktio_entry);
@@ -1534,6 +1534,7 @@ void odp_pktio_config_init(odp_pktio_config_t *config)
memset(config, 0, sizeof(odp_pktio_config_t));
config->parser.layer = ODP_PROTO_LAYER_ALL;
+ config->reassembly.max_num_frags = 2;
}
int odp_pktio_info(odp_pktio_t hdl, odp_pktio_info_t *info)
@@ -1901,6 +1902,277 @@ int odp_pktio_stats_reset(odp_pktio_t pktio)
return ret;
}
+int odp_pktin_queue_stats(odp_pktin_queue_t queue,
+ odp_pktin_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+ int ret = -1;
+
+ entry = get_pktio_entry(queue.pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)queue.pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->s.param.in_mode;
+ if (odp_unlikely(mode != ODP_PKTIN_MODE_DIRECT)) {
+ unlock_entry(entry);
+ ODP_ERR("invalid packet input mode: %d\n", mode);
+ return -1;
+ }
+
+ if (entry->s.ops->pktin_queue_stats)
+ ret = entry->s.ops->pktin_queue_stats(entry, queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktin_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktin_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+ odp_pktin_queue_t pktin_queue;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->s.param.in_mode;
+ if (odp_unlikely(mode != ODP_PKTIN_MODE_SCHED && mode != ODP_PKTIN_MODE_QUEUE)) {
+ unlock_entry(entry);
+ ODP_ERR("invalid packet input mode: %d\n", mode);
+ return -1;
+ }
+
+ pktin_queue = _odp_queue_fn->get_pktin(queue);
+
+ if (entry->s.ops->pktin_queue_stats)
+ ret = entry->s.ops->pktin_queue_stats(entry, pktin_queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktout_queue_stats(odp_pktout_queue_t queue,
+ odp_pktout_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+ int ret = -1;
+
+ entry = get_pktio_entry(queue.pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)queue.pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->s.param.out_mode;
+ if (odp_unlikely(mode != ODP_PKTOUT_MODE_DIRECT)) {
+ unlock_entry(entry);
+ ODP_ERR("invalid packet output mode: %d\n", mode);
+ return -1;
+ }
+
+ if (entry->s.ops->pktout_queue_stats)
+ ret = entry->s.ops->pktout_queue_stats(entry, queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktout_event_queue_stats(odp_pktio_t pktio, odp_queue_t queue,
+ odp_pktout_queue_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+ odp_pktout_queue_t pktout_queue;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("pktio entry already freed\n");
+ return -1;
+ }
+
+ mode = entry->s.param.out_mode;
+ if (odp_unlikely(mode != ODP_PKTOUT_MODE_QUEUE)) {
+ unlock_entry(entry);
+ ODP_ERR("invalid packet output mode: %d\n", mode);
+ return -1;
+ }
+
+ pktout_queue = _odp_queue_fn->get_pktout(queue);
+
+ if (entry->s.ops->pktout_queue_stats)
+ ret = entry->s.ops->pktout_queue_stats(entry, pktout_queue.index, stats);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stat_info(odp_pktio_t pktio,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ pktio_entry_t *entry;
+ int ret = 0;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->extra_stat_info)
+ ret = entry->s.ops->extra_stat_info(entry, info, num);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stats(odp_pktio_t pktio, uint64_t stats[], int num)
+{
+ pktio_entry_t *entry;
+ int ret = 0;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->extra_stats)
+ ret = entry->s.ops->extra_stats(entry, stats, num);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_extra_stat_counter(odp_pktio_t pktio, uint32_t id, uint64_t *stat)
+{
+ pktio_entry_t *entry;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_ERR("pktio entry %" PRIuPTR " does not exist\n", (uintptr_t)pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_ERR("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->extra_stat_counter)
+ ret = entry->s.ops->extra_stat_counter(entry, id, stat);
+
+ unlock_entry(entry);
+
+ return ret;
+}
+
+void odp_pktio_extra_stats_print(odp_pktio_t pktio)
+{
+ int num_info, num_stats, i;
+
+ num_info = odp_pktio_extra_stat_info(pktio, NULL, 0);
+ if (num_info <= 0)
+ return;
+
+ num_stats = odp_pktio_extra_stats(pktio, NULL, 0);
+ if (num_stats <= 0)
+ return;
+
+ if (num_info != num_stats) {
+ ODP_ERR("extra statistics info counts not matching\n");
+ return;
+ }
+
+ odp_pktio_extra_stat_info_t stats_info[num_stats];
+ uint64_t extra_stats[num_stats];
+
+ num_info = odp_pktio_extra_stat_info(pktio, stats_info, num_stats);
+ if (num_info <= 0)
+ return;
+
+ num_stats = odp_pktio_extra_stats(pktio, extra_stats, num_stats);
+ if (num_stats <= 0)
+ return;
+
+ if (num_info != num_stats) {
+ ODP_ERR("extra statistics info counts not matching\n");
+ return;
+ }
+
+ printf("Pktio extra statistics\n----------------------\n");
+ for (i = 0; i < num_stats; i++)
+ ODP_PRINT(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
+ ODP_PRINT("\n");
+}
+
int odp_pktin_queue_config(odp_pktio_t pktio,
const odp_pktin_queue_param_t *param)
{
@@ -2037,14 +2309,13 @@ int odp_pktin_queue_config(odp_pktio_t pktio,
return -1;
}
- if (mode == ODP_PKTIN_MODE_QUEUE) {
- _odp_queue_fn->set_pktin(queue, pktio, i);
+ _odp_queue_fn->set_pktin(queue, pktio, i);
+ if (mode == ODP_PKTIN_MODE_QUEUE)
_odp_queue_fn->set_enq_deq_fn(queue,
NULL,
NULL,
pktin_dequeue,
pktin_deq_multi);
- }
entry->s.in_queue[i].queue = queue;
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 3c3810dfc..5d328b84c 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -190,10 +190,10 @@ typedef struct {
uint16_t max_spread;
uint32_t ring_mask;
- prio_q_mask_t prio_q_mask[NUM_PRIO];
odp_spinlock_t mask_lock;
odp_atomic_u32_t grp_epoch;
odp_shm_t shm;
+ prio_q_mask_t prio_q_mask[NUM_SCHED_GRPS][NUM_PRIO];
struct {
uint8_t grp;
@@ -210,7 +210,7 @@ typedef struct {
/* Scheduler priority queues */
prio_queue_t prio_q[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
- uint32_t prio_q_count[NUM_PRIO][MAX_SPREAD];
+ uint32_t prio_q_count[NUM_SCHED_GRPS][NUM_PRIO][MAX_SPREAD];
odp_thrmask_t mask_all;
odp_spinlock_t grp_lock;
@@ -218,7 +218,8 @@ typedef struct {
struct {
char name[ODP_SCHED_GROUP_NAME_LEN];
odp_thrmask_t mask;
- int allocated;
+ uint16_t spread_thrs[MAX_SPREAD];
+ uint8_t allocated;
} sched_grp[NUM_SCHED_GRPS];
struct {
@@ -467,6 +468,13 @@ static int schedule_init_global(void)
sched->sched_grp[ODP_SCHED_GROUP_ALL].allocated = 1;
sched->sched_grp[ODP_SCHED_GROUP_WORKER].allocated = 1;
sched->sched_grp[ODP_SCHED_GROUP_CONTROL].allocated = 1;
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_ALL].name, "__SCHED_GROUP_ALL",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_WORKER].name, "__SCHED_GROUP_WORKER",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+ strncpy(sched->sched_grp[ODP_SCHED_GROUP_CONTROL].name, "__SCHED_GROUP_CONTROL",
+ ODP_SCHED_GROUP_NAME_LEN - 1);
+
odp_thrmask_setall(&sched->mask_all);
@@ -589,6 +597,7 @@ static int schedule_create_queue(uint32_t queue_index,
const odp_schedule_param_t *sched_param)
{
int i;
+ int grp = sched_param->group;
int prio = prio_level_from_api(sched_param->prio);
uint8_t spread = spread_index(queue_index);
@@ -597,22 +606,19 @@ static int schedule_create_queue(uint32_t queue_index,
return -1;
}
- if (sched_param->group < 0 || sched_param->group >= NUM_SCHED_GRPS) {
- ODP_ERR("Bad schedule group\n");
+ if (grp < 0 || grp >= NUM_SCHED_GRPS) {
+ ODP_ERR("Bad schedule group %i\n", grp);
return -1;
}
- if (sched_param->group == ODP_SCHED_GROUP_ALL &&
- !sched->config_if.group_enable.all) {
+ if (grp == ODP_SCHED_GROUP_ALL && !sched->config_if.group_enable.all) {
ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_ALL\n");
return -1;
}
- if (sched_param->group == ODP_SCHED_GROUP_CONTROL &&
- !sched->config_if.group_enable.control) {
+ if (grp == ODP_SCHED_GROUP_CONTROL && !sched->config_if.group_enable.control) {
ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_CONTROL\n");
return -1;
}
- if (sched_param->group == ODP_SCHED_GROUP_WORKER &&
- !sched->config_if.group_enable.worker) {
+ if (grp == ODP_SCHED_GROUP_WORKER && !sched->config_if.group_enable.worker) {
ODP_ERR("Trying to use disabled ODP_SCHED_GROUP_WORKER\n");
return -1;
}
@@ -620,12 +626,12 @@ static int schedule_create_queue(uint32_t queue_index,
odp_spinlock_lock(&sched->mask_lock);
/* update scheduler prio queue usage status */
- sched->prio_q_mask[prio] |= 1 << spread;
- sched->prio_q_count[prio][spread]++;
+ sched->prio_q_mask[grp][prio] |= 1 << spread;
+ sched->prio_q_count[grp][prio][spread]++;
odp_spinlock_unlock(&sched->mask_lock);
- sched->queue[queue_index].grp = sched_param->group;
+ sched->queue[queue_index].grp = grp;
sched->queue[queue_index].prio = prio;
sched->queue[queue_index].spread = spread;
sched->queue[queue_index].sync = sched_param->sync;
@@ -650,16 +656,17 @@ static inline uint8_t sched_sync_type(uint32_t queue_index)
static void schedule_destroy_queue(uint32_t queue_index)
{
+ int grp = sched->queue[queue_index].grp;
int prio = sched->queue[queue_index].prio;
uint8_t spread = spread_index(queue_index);
odp_spinlock_lock(&sched->mask_lock);
/* Clear mask bit when last queue is removed*/
- sched->prio_q_count[prio][spread]--;
+ sched->prio_q_count[grp][prio][spread]--;
- if (sched->prio_q_count[prio][spread] == 0)
- sched->prio_q_mask[prio] &= (uint8_t)(~(1 << spread));
+ if (sched->prio_q_count[grp][prio][spread] == 0)
+ sched->prio_q_mask[grp][prio] &= (uint8_t)(~(1 << spread));
odp_spinlock_unlock(&sched->mask_lock);
@@ -1014,11 +1021,9 @@ static inline int poll_pktin(uint32_t qi, int direct_recv,
}
static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
- unsigned int max_num, int grp, int first)
+ unsigned int max_num, int grp, int first_spr)
{
- int prio, i;
- int ret;
- int id;
+ int prio, spr, i, ret;
uint32_t qi;
uint16_t burst_def;
int num_spread = sched->config.num_spread;
@@ -1026,13 +1031,13 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
/* Schedule events */
for (prio = 0; prio < NUM_PRIO; prio++) {
- if (sched->prio_q_mask[prio] == 0)
+ if (sched->prio_q_mask[grp][prio] == 0)
continue;
burst_def = sched->config.burst_default[prio];
- /* Select the first ring based on weights */
- id = first;
+ /* Select the first spread based on weights */
+ spr = first_spr;
for (i = 0; i < num_spread;) {
int num;
@@ -1044,24 +1049,24 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
int stashed = 1;
odp_event_t *ev_tbl = sched_local.stash.ev;
- if (id >= num_spread)
- id = 0;
+ if (spr >= num_spread)
+ spr = 0;
/* No queues created for this priority queue */
- if (odp_unlikely((sched->prio_q_mask[prio] & (1 << id))
+ if (odp_unlikely((sched->prio_q_mask[grp][prio] & (1 << spr))
== 0)) {
i++;
- id++;
+ spr++;
continue;
}
/* Get queue index from the priority queue */
- ring = &sched->prio_q[grp][prio][id].ring;
+ ring = &sched->prio_q[grp][prio][spr].ring;
if (ring_u32_deq(ring, ring_mask, &qi) == 0) {
/* Priority queue empty */
i++;
- id++;
+ spr++;
continue;
}
@@ -1180,9 +1185,7 @@ static inline int do_schedule_grp(odp_queue_t *out_queue, odp_event_t out_ev[],
static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
unsigned int max_num)
{
- int i, num_grp;
- int ret;
- int first, grp_id;
+ int i, num_grp, ret, spr, grp_id;
uint16_t spread_round, grp_round;
uint32_t epoch;
@@ -1214,7 +1217,7 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
else
sched_local.spread_round = spread_round + 1;
- first = sched_local.spread_tbl[spread_round];
+ spr = sched_local.spread_tbl[spread_round];
epoch = odp_atomic_load_acq_u32(&sched->grp_epoch);
num_grp = sched_local.num_grp;
@@ -1231,7 +1234,7 @@ static inline int do_schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
int grp;
grp = sched_local.grp[grp_id];
- ret = do_schedule_grp(out_queue, out_ev, max_num, grp, first);
+ ret = do_schedule_grp(out_queue, out_ev, max_num, grp, spr);
if (odp_likely(ret))
return ret;
@@ -1488,52 +1491,106 @@ static odp_schedule_group_t schedule_group_lookup(const char *name)
return group;
}
-static int schedule_group_join(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
+static int schedule_group_join(odp_schedule_group_t group, const odp_thrmask_t *mask)
{
- int ret;
+ int i, count, thr;
+ uint8_t spread;
+ odp_thrmask_t new_mask;
- odp_spinlock_lock(&sched->grp_lock);
+ if (group >= NUM_SCHED_GRPS || group < SCHED_GROUP_NAMED) {
+ ODP_ERR("Bad group %i\n", group);
+ return -1;
+ }
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- odp_thrmask_t new_mask;
+ count = odp_thrmask_count(mask);
+ if (count <= 0) {
+ ODP_ERR("No threads in the mask\n");
+ return -1;
+ }
- odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, mask);
- grp_update_mask(group, &new_mask);
+ int thr_tbl[count];
- ret = 0;
- } else {
- ret = -1;
+ thr = odp_thrmask_first(mask);
+ for (i = 0; i < count; i++) {
+ if (thr < 0) {
+ ODP_ERR("No more threads in the mask\n");
+ return -1;
+ }
+
+ thr_tbl[i] = thr;
+ thr = odp_thrmask_next(mask, thr);
+ }
+
+ odp_spinlock_lock(&sched->grp_lock);
+
+ if (sched->sched_grp[group].allocated == 0) {
+ odp_spinlock_unlock(&sched->grp_lock);
+ ODP_ERR("Bad group status\n");
+ return -1;
}
+ for (i = 0; i < count; i++) {
+ spread = spread_index(thr_tbl[i]);
+ sched->sched_grp[group].spread_thrs[spread]++;
+ }
+
+ odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, mask);
+ grp_update_mask(group, &new_mask);
+
odp_spinlock_unlock(&sched->grp_lock);
- return ret;
+ return 0;
}
-static int schedule_group_leave(odp_schedule_group_t group,
- const odp_thrmask_t *mask)
+static int schedule_group_leave(odp_schedule_group_t group, const odp_thrmask_t *mask)
{
+ int i, count, thr;
+ uint8_t spread;
odp_thrmask_t new_mask;
- int ret;
+
+ if (group >= NUM_SCHED_GRPS || group < SCHED_GROUP_NAMED) {
+ ODP_ERR("Bad group %i\n", group);
+ return -1;
+ }
+
+ count = odp_thrmask_count(mask);
+ if (count <= 0) {
+ ODP_ERR("No threads in the mask\n");
+ return -1;
+ }
+
+ int thr_tbl[count];
+
+ thr = odp_thrmask_first(mask);
+ for (i = 0; i < count; i++) {
+ if (thr < 0) {
+ ODP_ERR("No more threads in the mask\n");
+ return -1;
+ }
+
+ thr_tbl[i] = thr;
+ thr = odp_thrmask_next(mask, thr);
+ }
odp_thrmask_xor(&new_mask, mask, &sched->mask_all);
odp_spinlock_lock(&sched->grp_lock);
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
- odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask,
- &new_mask);
- grp_update_mask(group, &new_mask);
+ if (sched->sched_grp[group].allocated == 0) {
+ odp_spinlock_unlock(&sched->grp_lock);
+ ODP_ERR("Bad group status\n");
+ return -1;
+ }
- ret = 0;
- } else {
- ret = -1;
+ for (i = 0; i < count; i++) {
+ spread = spread_index(thr_tbl[i]);
+ sched->sched_grp[group].spread_thrs[spread]--;
}
+ odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
+ grp_update_mask(group, &new_mask);
+
odp_spinlock_unlock(&sched->grp_lock);
- return ret;
+ return 0;
}
static int schedule_group_thrmask(odp_schedule_group_t group,
@@ -1543,8 +1600,7 @@ static int schedule_group_thrmask(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
+ if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
*thrmask = sched->sched_grp[group].mask;
ret = 0;
} else {
@@ -1562,8 +1618,7 @@ static int schedule_group_info(odp_schedule_group_t group,
odp_spinlock_lock(&sched->grp_lock);
- if (group < NUM_SCHED_GRPS && group >= SCHED_GROUP_NAMED &&
- sched->sched_grp[group].allocated) {
+ if (group < NUM_SCHED_GRPS && sched->sched_grp[group].allocated) {
info->name = sched->sched_grp[group].name;
info->thrmask = sched->sched_grp[group].mask;
ret = 0;
@@ -1579,6 +1634,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
+ uint8_t spread = spread_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1594,6 +1650,7 @@ static int schedule_thr_add(odp_schedule_group_t group, int thr)
}
odp_thrmask_or(&new_mask, &sched->sched_grp[group].mask, &mask);
+ sched->sched_grp[group].spread_thrs[spread]++;
grp_update_mask(group, &new_mask);
odp_spinlock_unlock(&sched->grp_lock);
@@ -1605,6 +1662,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
{
odp_thrmask_t mask;
odp_thrmask_t new_mask;
+ uint8_t spread = spread_index(thr);
if (group < 0 || group >= SCHED_GROUP_NAMED)
return -1;
@@ -1621,6 +1679,7 @@ static int schedule_thr_rem(odp_schedule_group_t group, int thr)
}
odp_thrmask_and(&new_mask, &sched->sched_grp[group].mask, &new_mask);
+ sched->sched_grp[group].spread_thrs[spread]--;
grp_update_mask(group, &new_mask);
odp_spinlock_unlock(&sched->grp_lock);
@@ -1657,6 +1716,82 @@ static int schedule_capability(odp_schedule_capability_t *capa)
return 0;
}
+static void schedule_print(void)
+{
+ int spr, prio, grp;
+ uint32_t num_queues, num_active;
+ ring_u32_t *ring;
+ odp_schedule_capability_t capa;
+ int num_spread = sched->config.num_spread;
+
+ (void)schedule_capability(&capa);
+
+ ODP_PRINT("\nScheduler debug info\n");
+ ODP_PRINT("--------------------\n");
+ ODP_PRINT(" scheduler: basic\n");
+ ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ ODP_PRINT(" num spread: %i\n", num_spread);
+ ODP_PRINT(" prefer ratio: %u\n", sched->config.prefer_ratio);
+ ODP_PRINT("\n");
+
+ ODP_PRINT(" Number of active event queues:\n");
+ ODP_PRINT(" spread\n");
+ ODP_PRINT(" ");
+
+ for (spr = 0; spr < num_spread; spr++)
+ ODP_PRINT(" %7i", spr);
+
+ ODP_PRINT("\n");
+
+ for (prio = 0; prio < NUM_PRIO; prio++) {
+ ODP_PRINT(" prio %i", prio);
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++)
+ if (sched->prio_q_mask[grp][prio])
+ break;
+
+ if (grp == NUM_SCHED_GRPS) {
+ ODP_PRINT(":-\n");
+ continue;
+ }
+
+ ODP_PRINT("\n");
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ if (sched->sched_grp[grp].allocated == 0)
+ continue;
+
+ ODP_PRINT(" group %i:", grp);
+
+ for (spr = 0; spr < num_spread; spr++) {
+ num_queues = sched->prio_q_count[grp][prio][spr];
+ ring = &sched->prio_q[grp][prio][spr].ring;
+ num_active = ring_u32_len(ring);
+ ODP_PRINT(" %3u/%3u", num_active, num_queues);
+ }
+ ODP_PRINT("\n");
+ }
+ }
+
+ ODP_PRINT("\n Number of threads:\n");
+ ODP_PRINT(" spread\n");
+
+ for (grp = 0; grp < NUM_SCHED_GRPS; grp++) {
+ if (sched->sched_grp[grp].allocated == 0)
+ continue;
+
+ ODP_PRINT(" group %i:", grp);
+
+ for (spr = 0; spr < num_spread; spr++)
+ ODP_PRINT(" %u", sched->sched_grp[grp].spread_thrs[spr]);
+
+ ODP_PRINT("\n");
+ }
+
+ ODP_PRINT("\n");
+}
+
/* Fill in scheduler interface */
const schedule_fn_t _odp_schedule_basic_fn = {
.pktio_start = schedule_pktio_start,
@@ -1705,7 +1840,8 @@ const schedule_api_t _odp_schedule_basic_api = {
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
.schedule_order_unlock = schedule_order_unlock,
- .schedule_order_unlock_lock = schedule_order_unlock_lock,
- .schedule_order_lock_start = schedule_order_lock_start,
- .schedule_order_lock_wait = schedule_order_lock_wait
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index 01359543c..24307ab90 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -201,6 +201,11 @@ void odp_schedule_order_lock_wait(uint32_t lock_index)
_odp_sched_api->schedule_order_lock_wait(lock_index);
}
+void odp_schedule_print(void)
+{
+ _odp_sched_api->schedule_print();
+}
+
int _odp_schedule_init_global(void)
{
const char *sched = getenv("ODP_SCHEDULER");
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index c9991c3f3..18e738330 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -2175,6 +2175,20 @@ static int schedule_capability(odp_schedule_capability_t *capa)
return 0;
}
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ ODP_PRINT("\nScheduler debug info\n");
+ ODP_PRINT("--------------------\n");
+ ODP_PRINT(" scheduler: scalable\n");
+ ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ ODP_PRINT("\n");
+}
+
const schedule_fn_t _odp_schedule_scalable_fn = {
.pktio_start = pktio_start,
.thr_add = thr_add,
@@ -2222,5 +2236,6 @@ const schedule_api_t _odp_schedule_scalable_api = {
.schedule_order_unlock = schedule_order_unlock,
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
- .schedule_order_lock_wait = schedule_order_lock_wait
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index a401ba884..c4f8344f1 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -1039,6 +1039,20 @@ static int schedule_capability(odp_schedule_capability_t *capa)
return 0;
}
+static void schedule_print(void)
+{
+ odp_schedule_capability_t capa;
+
+ (void)schedule_capability(&capa);
+
+ ODP_PRINT("\nScheduler debug info\n");
+ ODP_PRINT("--------------------\n");
+ ODP_PRINT(" scheduler: sp\n");
+ ODP_PRINT(" max groups: %u\n", capa.max_groups);
+ ODP_PRINT(" max priorities: %u\n", capa.max_prios);
+ ODP_PRINT("\n");
+}
+
static void get_config(schedule_config_t *config)
{
*config = sched_global->config_if;
@@ -1092,7 +1106,8 @@ const schedule_api_t _odp_schedule_sp_api = {
.schedule_group_info = schedule_group_info,
.schedule_order_lock = schedule_order_lock,
.schedule_order_unlock = schedule_order_unlock,
- .schedule_order_unlock_lock = schedule_order_unlock_lock,
- .schedule_order_lock_start = schedule_order_lock_start,
- .schedule_order_lock_wait = schedule_order_lock_wait
+ .schedule_order_unlock_lock = schedule_order_unlock_lock,
+ .schedule_order_lock_start = schedule_order_lock_start,
+ .schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index f75fbd522..e5b42a83b 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -1332,15 +1332,28 @@ uint64_t odp_timer_current_tick(odp_timer_pool_t tpid)
return current_nsec(tp);
}
-int odp_timer_pool_info(odp_timer_pool_t tpid,
- odp_timer_pool_info_t *buf)
+int odp_timer_pool_info(odp_timer_pool_t tpid, odp_timer_pool_info_t *tp_info)
{
- timer_pool_t *tp = timer_pool_from_hdl(tpid);
+ timer_pool_t *tp;
+
+ if (odp_unlikely(tpid == ODP_TIMER_POOL_INVALID)) {
+ ODP_ERR("Invalid timer pool.\n");
+ return -1;
+ }
+
+ tp = timer_pool_from_hdl(tpid);
+
+ memset(tp_info, 0, sizeof(odp_timer_pool_info_t));
+ tp_info->param = tp->param;
+ tp_info->cur_timers = tp->num_alloc;
+ tp_info->hwm_timers = odp_atomic_load_u32(&tp->high_wm);
+ tp_info->name = tp->name;
+
+ /* One API timer tick is one nsec. Leave source clock information to zero
+ * as there is no direct link between a source clock signal and a timer tick. */
+ tp_info->tick_info.freq.integer = ODP_TIME_SEC_IN_NS;
+ tp_info->tick_info.nsec.integer = 1;
- buf->param = tp->param;
- buf->cur_timers = tp->num_alloc;
- buf->hwm_timers = odp_atomic_load_u32(&tp->high_wm);
- buf->name = tp->name;
return 0;
}
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 5d5fead16..4841402aa 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -138,7 +138,8 @@ typedef struct ODP_ALIGNED_CACHE {
struct rte_mempool *pkt_pool; /**< DPDK packet pool */
uint32_t data_room; /**< maximum packet length */
unsigned int min_rx_burst; /**< minimum RX burst size */
- odp_pktin_hash_proto_t hash; /**< Packet input hash protocol */
+ /** RSS configuration */
+ struct rte_eth_rss_conf rss_conf;
/* Supported RTE_PTYPE_XXX flags in a mask */
uint32_t supported_ptypes;
uint16_t mtu; /**< maximum transmission unit */
@@ -1131,7 +1132,7 @@ static int dpdk_vdev_promisc_mode_set(uint16_t port_id, int enable)
return mode;
}
-static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
+static void hash_proto_to_rss_conf(struct rte_eth_rss_conf *rss_conf,
const odp_pktin_hash_proto_t *hash_proto)
{
if (hash_proto->proto.ipv4_udp)
@@ -1154,34 +1155,19 @@ static void rss_conf_to_hash_proto(struct rte_eth_rss_conf *rss_conf,
rss_conf->rss_key = NULL;
}
-static int dpdk_setup_eth_dev(pktio_entry_t *pktio_entry,
- const struct rte_eth_dev_info *dev_info)
+static int dpdk_setup_eth_dev(pktio_entry_t *pktio_entry)
{
int ret;
pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
- struct rte_eth_rss_conf rss_conf;
struct rte_eth_conf eth_conf;
- uint64_t rss_hf_capa = dev_info->flow_type_rss_offloads;
uint64_t rx_offloads = 0;
uint64_t tx_offloads = 0;
- memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
-
- /* Always set some hash functions to enable DPDK RSS hash calculation.
- * Hash capability has been checked in pktin config. */
- if (pkt_dpdk->hash.all_bits == 0)
- rss_conf.rss_hf = ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP;
- else
- rss_conf_to_hash_proto(&rss_conf, &pkt_dpdk->hash);
-
- /* Filter out unsupported flags */
- rss_conf.rss_hf &= rss_hf_capa;
-
memset(&eth_conf, 0, sizeof(eth_conf));
eth_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
eth_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
- eth_conf.rx_adv_conf.rss_conf = rss_conf;
+ eth_conf.rx_adv_conf.rss_conf = pkt_dpdk->rss_conf;
/* Setup RX checksum offloads */
if (pktio_entry->s.config.pktin.bit.ipv4_chksum)
@@ -1423,59 +1409,57 @@ static int dpdk_pktio_term(void)
return 0;
}
-static int check_hash_proto(pktio_entry_t *pktio_entry,
- const odp_pktin_queue_param_t *p)
+static void prepare_rss_conf(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
{
struct rte_eth_dev_info dev_info;
uint64_t rss_hf_capa;
pkt_dpdk_t *pkt_dpdk = pkt_priv(pktio_entry);
uint16_t port_id = pkt_dpdk->port_id;
+ memset(&pkt_dpdk->rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+
+ if (!p->hash_enable)
+ return;
+
rte_eth_dev_info_get(port_id, &dev_info);
rss_hf_capa = dev_info.flow_type_rss_offloads;
+ /* Print debug info about unsupported hash protocols */
if (p->hash_proto.proto.ipv4 &&
- ((rss_hf_capa & ETH_RSS_IPV4) == 0)) {
- ODP_ERR("hash_proto.ipv4 not supported\n");
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_IPV4) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv4 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
if (p->hash_proto.proto.ipv4_udp &&
- ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_UDP) == 0)) {
- ODP_ERR("hash_proto.ipv4_udp not supported. "
- "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_UDP) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv4_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
if (p->hash_proto.proto.ipv4_tcp &&
- ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_TCP) == 0)) {
- ODP_ERR("hash_proto.ipv4_tcp not supported. "
- "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV4_TCP) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv4_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
if (p->hash_proto.proto.ipv6 &&
- ((rss_hf_capa & ETH_RSS_IPV6) == 0)) {
- ODP_ERR("hash_proto.ipv6 not supported. "
- "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_IPV6) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv6 not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
if (p->hash_proto.proto.ipv6_udp &&
- ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_UDP) == 0)) {
- ODP_ERR("hash_proto.ipv6_udp not supported. "
- "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_UDP) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv6_udp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
if (p->hash_proto.proto.ipv6_tcp &&
- ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_TCP) == 0)) {
- ODP_ERR("hash_proto.ipv6_tcp not supported. "
- "rss_hf_capa 0x%" PRIx64 "\n", rss_hf_capa);
- return -1;
- }
+ ((rss_hf_capa & ETH_RSS_NONFRAG_IPV6_TCP) == 0))
+ ODP_PRINT("DPDK: hash_proto.ipv6_tcp not supported (rss_hf_capa 0x%" PRIx64 ")\n",
+ rss_hf_capa);
- return 0;
+ hash_proto_to_rss_conf(&pkt_dpdk->rss_conf, &p->hash_proto);
+
+ /* Filter out unsupported hash functions */
+ pkt_dpdk->rss_conf.rss_hf &= rss_hf_capa;
}
static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
@@ -1484,8 +1468,7 @@ static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
uint8_t lockless;
- if (p->hash_enable && check_hash_proto(pktio_entry, p))
- return -1;
+ prepare_rss_conf(pktio_entry, p);
/**
* Scheduler synchronizes input queue polls. Only single thread
@@ -1496,9 +1479,6 @@ static int dpdk_input_queues_config(pktio_entry_t *pktio_entry,
else
lockless = 0;
- if (p->hash_enable && p->num_queues > 1)
- pkt_priv(pktio_entry)->hash = p->hash_proto;
-
pkt_priv(pktio_entry)->lockless_rx = lockless;
return 0;
@@ -1633,6 +1613,21 @@ static int dpdk_init_capability(pktio_entry_t *pktio_entry,
capa->config.pktout.bit.tcp_chksum;
capa->config.pktout.bit.ts_ena = 1;
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
+
return 0;
}
@@ -1811,6 +1806,17 @@ static int dpdk_setup_eth_tx(pktio_entry_t *pktio_entry,
}
}
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->s.num_out_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_tx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ ODP_DBG("Mapping per TX queue statistics not supported: %d\n", ret);
+ break;
+ }
+ }
+ ODP_DBG("Mapped %" PRIu32 "/%d TX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
return 0;
}
@@ -1839,6 +1845,17 @@ static int dpdk_setup_eth_rx(const pktio_entry_t *pktio_entry,
}
}
+ /* Set per queue statistics mappings. Not supported by all PMDs, so
+ * ignore the return value. */
+ for (i = 0; i < pktio_entry->s.num_in_queue && i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
+ ret = rte_eth_dev_set_rx_queue_stats_mapping(port_id, i, i);
+ if (ret) {
+ ODP_DBG("Mapping per RX queue statistics not supported: %d\n", ret);
+ break;
+ }
+ }
+ ODP_DBG("Mapped %" PRIu32 "/%d RX counters\n", i, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
return 0;
}
@@ -1901,7 +1918,7 @@ static int dpdk_start(pktio_entry_t *pktio_entry)
rte_eth_dev_info_get(port_id, &dev_info);
/* Setup device */
- if (dpdk_setup_eth_dev(pktio_entry, &dev_info)) {
+ if (dpdk_setup_eth_dev(pktio_entry)) {
ODP_ERR("Failed to configure device\n");
return -1;
}
@@ -2225,7 +2242,138 @@ static int dpdk_stats(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats)
static int dpdk_stats_reset(pktio_entry_t *pktio_entry)
{
- rte_eth_stats_reset(pkt_priv(pktio_entry)->port_id);
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+
+ (void)rte_eth_stats_reset(port_id);
+ (void)rte_eth_xstats_reset(port_id);
+ return 0;
+}
+
+static int dpdk_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get_names(port_id, NULL, 0);
+ if (num_stats < 0) {
+ ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (info == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat_name xstats_names[num_stats];
+
+ ret = rte_eth_xstats_get_names(port_id, xstats_names, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ ODP_ERR("rte_eth_xstats_get_names() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ strncpy(info[i].name, xstats_names[i].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+
+ return num_stats;
+}
+
+static int dpdk_extra_stats(pktio_entry_t *pktio_entry,
+ uint64_t stats[], int num)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ int num_stats, ret, i;
+
+ num_stats = rte_eth_xstats_get(port_id, NULL, 0);
+ if (num_stats < 0) {
+ ODP_ERR("rte_eth_xstats_get() failed: %d\n", num_stats);
+ return num_stats;
+ } else if (stats == NULL || num == 0 || num_stats == 0) {
+ return num_stats;
+ }
+
+ struct rte_eth_xstat xstats[num_stats];
+
+ ret = rte_eth_xstats_get(port_id, xstats, num_stats);
+ if (ret < 0 || ret > num_stats) {
+ ODP_ERR("rte_eth_xstats_get() failed: %d\n", ret);
+ return -1;
+ }
+ num_stats = ret;
+
+ for (i = 0; i < num && i < num_stats; i++)
+ stats[i] = xstats[i].value;
+
+ return num_stats;
+}
+
+static int dpdk_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ uint16_t port_id = pkt_priv(pktio_entry)->port_id;
+ uint64_t xstat_id = id;
+ int ret;
+
+ ret = rte_eth_xstats_get_by_id(port_id, &xstat_id, stat, 1);
+ if (ret != 1) {
+ ODP_ERR("rte_eth_xstats_get_by_id() failed: %d\n", ret);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int dpdk_pktin_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+
+ pktin_stats->packets = rte_stats.q_ipackets[index];
+ pktin_stats->octets = rte_stats.q_ibytes[index];
+ pktin_stats->errors = rte_stats.q_errors[index];
+
+ return 0;
+}
+
+static int dpdk_pktout_stats(pktio_entry_t *pktio_entry, uint32_t index,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ struct rte_eth_stats rte_stats;
+ int ret;
+
+ if (odp_unlikely(index > RTE_ETHDEV_QUEUE_STAT_CNTRS - 1)) {
+ ODP_ERR("DPDK supports max %d per queue counters\n",
+ RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ return -1;
+ }
+
+ ret = rte_eth_stats_get(pkt_priv(pktio_entry)->port_id, &rte_stats);
+ if (odp_unlikely(ret)) {
+ ODP_ERR("Failed to read DPDK pktio stats: %d\n", ret);
+ return -1;
+ }
+
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+
+ pktout_stats->packets = rte_stats.q_opackets[index];
+ pktout_stats->octets = rte_stats.q_obytes[index];
+
return 0;
}
@@ -2240,6 +2388,11 @@ const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.stop = dpdk_stop,
.stats = dpdk_stats,
.stats_reset = dpdk_stats_reset,
+ .pktin_queue_stats = dpdk_pktin_stats,
+ .pktout_queue_stats = dpdk_pktout_stats,
+ .extra_stat_info = dpdk_extra_stat_info,
+ .extra_stats = dpdk_extra_stats,
+ .extra_stat_counter = dpdk_extra_stat_counter,
.recv = dpdk_recv,
.send = dpdk_send,
.link_status = dpdk_link_status,
diff --git a/platform/linux-generic/pktio/dpdk_parse.c b/platform/linux-generic/pktio/dpdk_parse.c
index 2b41b14a2..f593f4b11 100644
--- a/platform/linux-generic/pktio/dpdk_parse.c
+++ b/platform/linux-generic/pktio/dpdk_parse.c
@@ -8,6 +8,8 @@
#ifdef _ODP_PKTIO_DPDK
+#include <odp_posix_extensions.h>
+
#include <odp_packet_io_internal.h>
#include <odp_packet_dpdk.h>
#include <odp/api/byteorder.h>
@@ -98,7 +100,7 @@ static inline uint16_t dpdk_parse_eth(packet_parser_t *prs,
goto error;
}
ethtype = odp_be_to_cpu_16(*((const uint16_t *)(uintptr_t)
- (parseptr + 6)));
+ (*parseptr + 6)));
*offset += 8;
*parseptr += 8;
}
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index dd321511a..437977771 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -458,6 +458,16 @@ static int loopback_init_capability(pktio_entry_t *pktio_entry)
capa->config.pktout.bit.sctp_chksum_ena =
capa->config.pktout.bit.sctp_chksum;
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktin_queue.counter.octets = 1;
+ capa->stats.pktin_queue.counter.packets = 1;
+ capa->stats.pktin_queue.counter.errors = 1;
+ capa->stats.pktout_queue.counter.octets = 1;
+ capa->stats.pktout_queue.counter.packets = 1;
return 0;
}
@@ -493,6 +503,27 @@ static int loopback_stats_reset(pktio_entry_t *pktio_entry ODP_UNUSED)
return 0;
}
+static int loopback_pktin_stats(pktio_entry_t *pktio_entry,
+ uint32_t index ODP_UNUSED,
+ odp_pktin_queue_stats_t *pktin_stats)
+{
+ memset(pktin_stats, 0, sizeof(odp_pktin_queue_stats_t));
+ pktin_stats->octets = pktio_entry->s.stats.in_octets;
+ pktin_stats->packets = pktio_entry->s.stats.in_packets;
+ pktin_stats->errors = pktio_entry->s.stats.in_errors;
+ return 0;
+}
+
+static int loopback_pktout_stats(pktio_entry_t *pktio_entry,
+ uint32_t index ODP_UNUSED,
+ odp_pktout_queue_stats_t *pktout_stats)
+{
+ memset(pktout_stats, 0, sizeof(odp_pktout_queue_stats_t));
+ pktout_stats->octets = pktio_entry->s.stats.out_octets;
+ pktout_stats->packets = pktio_entry->s.stats.out_packets;
+ return 0;
+}
+
static int loop_init_global(void)
{
ODP_PRINT("PKTIO: initialized loop interface.\n");
@@ -511,6 +542,8 @@ const pktio_if_ops_t _odp_loopback_pktio_ops = {
.stop = NULL,
.stats = loopback_stats,
.stats_reset = loopback_stats_reset,
+ .pktin_queue_stats = loopback_pktin_stats,
+ .pktout_queue_stats = loopback_pktout_stats,
.recv = loopback_recv,
.send = loopback_send,
.maxlen_get = loopback_mtu_get,
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 544503aa3..18d9b3a33 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -637,6 +637,7 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
pktio_entry->s.stats_type = STATS_UNSUPPORTED;
} else {
pktio_entry->s.stats_type = STATS_ETHTOOL;
+ _odp_sock_stats_capa(pktio_entry, &pktio_entry->s.capa);
}
(void)netmap_stats_reset(pktio_entry);
@@ -1249,6 +1250,27 @@ static int netmap_stats_reset(pktio_entry_t *pktio_entry)
pkt_priv(pktio_entry)->sockfd);
}
+static int netmap_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ return _odp_sock_extra_stat_info(pktio_entry, info, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int netmap_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ return _odp_sock_extra_stats(pktio_entry, stats, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int netmap_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ return _odp_sock_extra_stat_counter(pktio_entry, id, stat,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
static void netmap_print(pktio_entry_t *pktio_entry)
{
odp_pktin_hash_proto_t hash_proto;
@@ -1287,6 +1309,9 @@ const pktio_if_ops_t _odp_netmap_pktio_ops = {
.link_info = netmap_link_info,
.stats = netmap_stats,
.stats_reset = netmap_stats_reset,
+ .extra_stat_info = netmap_extra_stat_info,
+ .extra_stats = netmap_extra_stats,
+ .extra_stat_counter = netmap_extra_stat_counter,
.maxlen_get = netmap_mtu_get,
.maxlen_set = netmap_mtu_set,
.promisc_mode_set = netmap_promisc_mode_set,
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index d4858903b..bf4c87c02 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -431,6 +431,11 @@ static int pcapif_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
capa->config.pktout.bit.ts_ena = 1;
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+
return 0;
}
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index eb4390e46..291ee543f 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -526,7 +526,7 @@ static int sock_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *inf
return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
}
-static int sock_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
+static int sock_capability(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
pkt_sock_t *pkt_sock = pkt_priv(pktio_entry);
@@ -550,6 +550,9 @@ static int sock_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
capa->config.pktout.bit.ts_ena = 1;
+ /* Fill statistics capabilities */
+ _odp_sock_stats_capa(pktio_entry, capa);
+
return 0;
}
@@ -575,6 +578,28 @@ static int sock_stats_reset(pktio_entry_t *pktio_entry)
return _odp_sock_stats_reset_fd(pktio_entry, pkt_priv(pktio_entry)->sockfd);
}
+static int sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[],
+ int num)
+{
+ return _odp_sock_extra_stat_info(pktio_entry, info, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ return _odp_sock_extra_stats(pktio_entry, stats, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ return _odp_sock_extra_stat_counter(pktio_entry, id, stat,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
static int sock_init_global(void)
{
if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMSG")) {
@@ -600,6 +625,9 @@ const pktio_if_ops_t _odp_sock_mmsg_pktio_ops = {
.stop = NULL,
.stats = sock_stats,
.stats_reset = sock_stats_reset,
+ .extra_stat_info = sock_extra_stat_info,
+ .extra_stats = sock_extra_stats,
+ .extra_stat_counter = sock_extra_stat_counter,
.recv = sock_mmsg_recv,
.recv_tmo = sock_recv_tmo,
.recv_mq_tmo = sock_recv_mq_tmo,
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 532f392fa..93984af4f 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -841,7 +841,7 @@ static int sock_mmap_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t
return _odp_link_info_fd(pkt_priv(pktio_entry)->sockfd, pktio_entry->s.name, info);
}
-static int sock_mmap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
+static int sock_mmap_capability(pktio_entry_t *pktio_entry,
odp_pktio_capability_t *capa)
{
pkt_sock_mmap_t *const pkt_sock = pkt_priv(pktio_entry);
@@ -865,6 +865,9 @@ static int sock_mmap_capability(pktio_entry_t *pktio_entry ODP_UNUSED,
capa->config.pktout.bit.ts_ena = 1;
+ /* Fill statistics capabilities */
+ _odp_sock_stats_capa(pktio_entry, capa);
+
return 0;
}
@@ -893,6 +896,28 @@ static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry)
pkt_priv(pktio_entry)->sockfd);
}
+static int sock_mmap_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[],
+ int num)
+{
+ return _odp_sock_extra_stat_info(pktio_entry, info, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_mmap_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ return _odp_sock_extra_stats(pktio_entry, stats, num,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
+static int sock_mmap_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ return _odp_sock_extra_stat_counter(pktio_entry, id, stat,
+ pkt_priv(pktio_entry)->sockfd);
+}
+
static int sock_mmap_init_global(void)
{
if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMAP")) {
@@ -918,6 +943,9 @@ const pktio_if_ops_t _odp_sock_mmap_pktio_ops = {
.stop = NULL,
.stats = sock_mmap_stats,
.stats_reset = sock_mmap_stats_reset,
+ .extra_stat_info = sock_mmap_extra_stat_info,
+ .extra_stats = sock_mmap_extra_stats,
+ .extra_stat_counter = sock_mmap_extra_stat_counter,
.recv = sock_mmap_recv,
.recv_tmo = sock_mmap_recv_tmo,
.recv_mq_tmo = sock_mmap_recv_mq_tmo,
diff --git a/platform/linux-generic/pktio/stats/ethtool_stats.c b/platform/linux-generic/pktio/stats/ethtool_stats.c
index e4f99e331..d8b6a7976 100644
--- a/platform/linux-generic/pktio/stats/ethtool_stats.c
+++ b/platform/linux-generic/pktio/stats/ethtool_stats.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -83,15 +84,21 @@ static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
return strings;
}
-static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
+static int ethtool_stats_get(int fd, const char *name,
+ struct ethtool_gstrings **strings_out,
+ struct ethtool_stats **estats_out,
+ unsigned int *nstats_out)
{
struct ethtool_gstrings *strings;
struct ethtool_stats *estats;
- unsigned int n_stats, i;
+ struct ifreq ifr;
+ unsigned int n_stats;
int err;
- int cnts;
- strings = get_stringset(fd, ifr);
+ memset(&ifr, 0, sizeof(ifr));
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ strings = get_stringset(fd, &ifr);
if (!strings)
return -1;
@@ -111,8 +118,8 @@ static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
estats->cmd = ETHTOOL_GSTATS;
estats->n_stats = n_stats;
- ifr->ifr_data = (void *)estats;
- err = ioctl(fd, SIOCETHTOOL, ifr);
+ ifr.ifr_data = (void *)estats;
+ err = ioctl(fd, SIOCETHTOOL, &ifr);
if (err < 0) {
_odp_errno = errno;
free(strings);
@@ -120,36 +127,83 @@ static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
return -1;
}
- cnts = 0;
+ if (strings_out)
+ *strings_out = strings;
+ else
+ free(strings);
+
+ if (estats_out)
+ *estats_out = estats;
+ else
+ free(estats);
+
+ if (nstats_out)
+ *nstats_out = n_stats;
+
+ return 0;
+}
+
+int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
+{
+ struct ethtool_gstrings *strings;
+ struct ethtool_stats *estats;
+ unsigned int i, n_stats;
+ int cnts = 0;
+
+ if (ethtool_stats_get(fd, name, &strings, &estats, &n_stats))
+ return -1;
+
for (i = 0; i < n_stats; i++) {
char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
uint64_t val = estats->data[i];
- if (!strcmp(cnt, "rx_octets")) {
+ if (!strcmp(cnt, "rx_octets") ||
+ !strcmp(cnt, "rx_bytes")) {
stats->in_octets = val;
cnts++;
} else if (!strcmp(cnt, "rx_packets")) {
stats->in_packets = val;
cnts++;
- } else if (!strcmp(cnt, "rx_ucast_packets")) {
+ } else if (!strcmp(cnt, "rx_ucast_packets") ||
+ !strcmp(cnt, "rx_unicast")) {
stats->in_ucast_pkts = val;
cnts++;
- } else if (!strcmp(cnt, "rx_discards")) {
+ } else if (!strcmp(cnt, "rx_broadcast") ||
+ !strcmp(cnt, "rx_bcast_packets")) {
+ stats->in_bcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_multicast") ||
+ !strcmp(cnt, "rx_mcast_packets")) {
+ stats->in_mcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_discards") ||
+ !strcmp(cnt, "rx_dropped")) {
stats->in_discards = val;
cnts++;
} else if (!strcmp(cnt, "rx_errors")) {
stats->in_errors = val;
cnts++;
- } else if (!strcmp(cnt, "tx_octets")) {
+ } else if (!strcmp(cnt, "tx_octets") ||
+ !strcmp(cnt, "tx_bytes")) {
stats->out_octets = val;
cnts++;
} else if (!strcmp(cnt, "tx_packets")) {
stats->out_packets = val;
cnts++;
- } else if (!strcmp(cnt, "tx_ucast_packets")) {
+ } else if (!strcmp(cnt, "tx_ucast_packets") ||
+ !strcmp(cnt, "tx_unicast")) {
stats->out_ucast_pkts = val;
cnts++;
- } else if (!strcmp(cnt, "tx_discards")) {
+ } else if (!strcmp(cnt, "tx_broadcast") ||
+ !strcmp(cnt, "tx_bcast_packets")) {
+ stats->out_bcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_multicast") ||
+ !strcmp(cnt, "tx_mcast_packets")) {
+ stats->out_mcast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_discards") ||
+ !strcmp(cnt, "tx_dropped")) {
stats->out_discards = val;
cnts++;
} else if (!strcmp(cnt, "tx_errors")) {
@@ -164,18 +218,66 @@ static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
/* Ethtool strings came from kernel driver. Name of that
* strings is not universal. Current function needs to be updated
* if your driver has different names for counters */
- if (cnts < 8)
+ if (cnts < 14)
return -1;
return 0;
}
-int _odp_ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
+int _odp_ethtool_extra_stat_info(int fd, const char *name,
+ odp_pktio_extra_stat_info_t info[], int num)
{
- struct ifreq ifr;
+ struct ethtool_gstrings *strings;
+ unsigned int i, n_stats;
- memset(&ifr, 0, sizeof(ifr));
- snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ if (ethtool_stats_get(fd, name, &strings, NULL, &n_stats))
+ return -1;
+
+ for (i = 0; i < n_stats && i < (unsigned int)num; i++) {
+ char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
+
+ strncpy(info[i].name, cnt, ODP_PKTIO_STATS_EXTRA_NAME_LEN - 1);
+ }
+
+ free(strings);
+
+ return n_stats;
+}
+
+int _odp_ethtool_extra_stats(int fd, const char *name, uint64_t stats[], int num)
+{
+ struct ethtool_stats *estats;
+ unsigned int i, n_stats;
+
+ if (ethtool_stats_get(fd, name, NULL, &estats, &n_stats))
+ return -1;
+
+ for (i = 0; i < n_stats && i < (unsigned int)num; i++)
+ stats[i] = estats->data[i];
+
+ free(estats);
+
+ return n_stats;
+}
+
+int _odp_ethtool_extra_stat_counter(int fd, const char *name, uint32_t id,
+ uint64_t *stat)
+{
+ struct ethtool_stats *estats;
+ unsigned int n_stats;
+ int ret = 0;
+
+ if (ethtool_stats_get(fd, name, NULL, &estats, &n_stats))
+ return -1;
+
+ if (id >= n_stats) {
+ ODP_ERR("Invalid counter id\n");
+ ret = -1;
+ } else {
+ *stat = estats->data[id];
+ }
+
+ free(estats);
- return ethtool_stats(fd, &ifr, stats);
+ return ret;
}
diff --git a/platform/linux-generic/pktio/stats/packet_io_stats.c b/platform/linux-generic/pktio/stats/packet_io_stats.c
index e8d4d9a62..3dd0d8be9 100644
--- a/platform/linux-generic/pktio/stats/packet_io_stats.c
+++ b/platform/linux-generic/pktio/stats/packet_io_stats.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -66,6 +67,10 @@ int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
pktio_entry->s.stats.in_packets;
stats->in_ucast_pkts = cur_stats.in_ucast_pkts -
pktio_entry->s.stats.in_ucast_pkts;
+ stats->in_bcast_pkts = cur_stats.in_bcast_pkts -
+ pktio_entry->s.stats.in_bcast_pkts;
+ stats->in_mcast_pkts = cur_stats.in_mcast_pkts -
+ pktio_entry->s.stats.in_mcast_pkts;
stats->in_discards = cur_stats.in_discards -
pktio_entry->s.stats.in_discards;
stats->in_errors = cur_stats.in_errors -
@@ -80,6 +85,10 @@ int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
pktio_entry->s.stats.out_packets;
stats->out_ucast_pkts = cur_stats.out_ucast_pkts -
pktio_entry->s.stats.out_ucast_pkts;
+ stats->out_bcast_pkts = cur_stats.out_bcast_pkts -
+ pktio_entry->s.stats.out_bcast_pkts;
+ stats->out_mcast_pkts = cur_stats.out_mcast_pkts -
+ pktio_entry->s.stats.out_mcast_pkts;
stats->out_discards = cur_stats.out_discards -
pktio_entry->s.stats.out_discards;
stats->out_errors = cur_stats.out_errors -
@@ -88,6 +97,53 @@ int _odp_sock_stats_fd(pktio_entry_t *pktio_entry,
return ret;
}
+int _odp_sock_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num,
+ int fd)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
+ return 0;
+
+ if (pktio_entry->s.stats_type == STATS_ETHTOOL)
+ return _odp_ethtool_extra_stat_info(fd, pktio_entry->s.name,
+ info, num);
+ else if (pktio_entry->s.stats_type == STATS_SYSFS)
+ return _odp_sysfs_extra_stat_info(pktio_entry, info, num);
+
+ return 0;
+}
+
+int _odp_sock_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[], int num,
+ int fd)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
+ return 0;
+
+ if (pktio_entry->s.stats_type == STATS_ETHTOOL)
+ return _odp_ethtool_extra_stats(fd, pktio_entry->s.name,
+ stats, num);
+ else if (pktio_entry->s.stats_type == STATS_SYSFS)
+ return _odp_sysfs_extra_stats(pktio_entry, stats, num);
+
+ return 0;
+}
+
+int _odp_sock_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat, int fd)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
+ return -1;
+
+ if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
+ return _odp_ethtool_extra_stat_counter(fd, pktio_entry->s.name,
+ id, stat);
+ } else if (pktio_entry->s.stats_type == STATS_SYSFS) {
+ return _odp_sysfs_extra_stat_counter(pktio_entry, id, stat);
+ }
+
+ return 0;
+}
+
pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd)
{
odp_pktio_stats_t cur_stats;
@@ -100,3 +156,40 @@ pktio_stats_type_t _odp_sock_stats_type_fd(pktio_entry_t *pktio_entry, int fd)
return STATS_UNSUPPORTED;
}
+
+void _odp_sock_stats_capa(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa)
+{
+ capa->stats.pktio.all_counters = 0;
+ capa->stats.pktin_queue.all_counters = 0;
+ capa->stats.pktout_queue.all_counters = 0;
+
+ if (pktio_entry->s.stats_type == STATS_SYSFS) {
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_ucast_pkts = 1;
+ capa->stats.pktio.counter.in_mcast_pkts = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_ucast_pkts = 1;
+ capa->stats.pktio.counter.out_discards = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+ } else if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
+ capa->stats.pktio.counter.in_octets = 1;
+ capa->stats.pktio.counter.in_packets = 1;
+ capa->stats.pktio.counter.in_ucast_pkts = 1;
+ capa->stats.pktio.counter.in_bcast_pkts = 1;
+ capa->stats.pktio.counter.in_mcast_pkts = 1;
+ capa->stats.pktio.counter.in_discards = 1;
+ capa->stats.pktio.counter.in_errors = 1;
+ capa->stats.pktio.counter.out_octets = 1;
+ capa->stats.pktio.counter.out_packets = 1;
+ capa->stats.pktio.counter.out_ucast_pkts = 1;
+ capa->stats.pktio.counter.out_bcast_pkts = 1;
+ capa->stats.pktio.counter.out_mcast_pkts = 1;
+ capa->stats.pktio.counter.out_discards = 1;
+ capa->stats.pktio.counter.out_errors = 1;
+ }
+}
diff --git a/platform/linux-generic/pktio/stats/sysfs_stats.c b/platform/linux-generic/pktio/stats/sysfs_stats.c
index 1150f9d72..21620013e 100644
--- a/platform/linux-generic/pktio/stats/sysfs_stats.c
+++ b/platform/linux-generic/pktio/stats/sysfs_stats.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2015-2018, Linaro Limited
+ * Copyright (c) 2021, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -7,9 +8,13 @@
#include <odp_api.h>
#include <odp_sysfs_stats.h>
#include <odp_errno_define.h>
+#include <dirent.h>
#include <errno.h>
#include <string.h>
#include <inttypes.h>
+#include <linux/limits.h>
+
+#define SYSFS_DIR "/sys/class/net/%s/statistics"
static int sysfs_get_val(const char *fname, uint64_t *val)
{
@@ -57,7 +62,10 @@ int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
sprintf(fname, "/sys/class/net/%s/statistics/rx_packets", dev);
ret -= sysfs_get_val(fname, &stats->in_ucast_pkts);
- sprintf(fname, "/sys/class/net/%s/statistics/rx_droppped", dev);
+ sprintf(fname, "/sys/class/net/%s/statistics/multicast", dev);
+ ret -= sysfs_get_val(fname, &stats->in_mcast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_dropped", dev);
ret -= sysfs_get_val(fname, &stats->in_discards);
sprintf(fname, "/sys/class/net/%s/statistics/rx_errors", dev);
@@ -80,3 +88,116 @@ int _odp_sysfs_stats(pktio_entry_t *pktio_entry,
return ret;
}
+
+int _odp_sysfs_extra_stat_info(pktio_entry_t *pktio_entry,
+ odp_pktio_extra_stat_info_t info[], int num)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ int counters = 0;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->s.name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ ODP_ERR("Failed to open sysfs dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ if (info && counters < num)
+ snprintf(info[counters].name,
+ ODP_PKTIO_STATS_EXTRA_NAME_LEN, "%s",
+ e->d_name);
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return counters;
+}
+
+int _odp_sysfs_extra_stats(pktio_entry_t *pktio_entry, uint64_t stats[],
+ int num)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ char file_path[PATH_MAX];
+ int counters = 0;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->s.name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ ODP_ERR("Failed to open dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ uint64_t val;
+
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ snprintf(file_path, PATH_MAX, "%s/%s", sysfs_dir, e->d_name);
+ if (sysfs_get_val(file_path, &val)) {
+ ODP_ERR("Failed to read file: %s/n", file_path);
+ counters = -1;
+ break;
+ }
+
+ if (stats && counters < num)
+ stats[counters] = val;
+
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return counters;
+}
+
+int _odp_sysfs_extra_stat_counter(pktio_entry_t *pktio_entry, uint32_t id,
+ uint64_t *stat)
+{
+ struct dirent *e;
+ DIR *dir;
+ char sysfs_dir[PATH_MAX];
+ char file_path[PATH_MAX];
+ uint32_t counters = 0;
+ int ret = -1;
+
+ snprintf(sysfs_dir, PATH_MAX, SYSFS_DIR, pktio_entry->s.name);
+ dir = opendir(sysfs_dir);
+ if (!dir) {
+ ODP_ERR("Failed to open dir: %s\n", sysfs_dir);
+ return -1;
+ }
+
+ while ((e = readdir(dir)) != NULL) {
+ /* Skip . and .. */
+ if (strncmp(e->d_name, ".", 1) == 0)
+ continue;
+
+ if (counters == id) {
+ uint64_t val;
+
+ snprintf(file_path, PATH_MAX, "%s/%s",
+ sysfs_dir, e->d_name);
+ if (sysfs_get_val(file_path, &val)) {
+ ODP_ERR("Failed to read file: %s/n", file_path);
+ } else {
+ *stat = val;
+ ret = 0;
+ }
+ break;
+ }
+ counters++;
+ }
+ (void)closedir(dir);
+
+ return ret;
+}
diff --git a/platform/linux-generic/test/example/ipsec_api/pktio_env b/platform/linux-generic/test/example/ipsec_api/pktio_env
index a16002326..abf6b5ce3 100644
--- a/platform/linux-generic/test/example/ipsec_api/pktio_env
+++ b/platform/linux-generic/test/example/ipsec_api/pktio_env
@@ -35,6 +35,12 @@ if [ -n "$WITH_OPENSSL" ] && [ ${WITH_OPENSSL} -eq 0 ]; then
exit 77
fi
+# Skip live and router mode tests.
+if [ ${IPSEC_APP_MODE} -eq 1 ] || [ ${IPSEC_APP_MODE} -eq 2 ]; then
+ echo "IPsec Live / Router mode test. Skipping."
+ exit 77
+fi
+
IF0=p7p1
IF1=p8p1
diff --git a/scripts/ci/build_arm64.sh b/scripts/ci/build_arm64.sh
index ad3b95e75..4f8d51152 100755
--- a/scripts/ci/build_arm64.sh
+++ b/scripts/ci/build_arm64.sh
@@ -17,6 +17,6 @@ export CPPFLAGS="-I/usr/include/${TARGET_ARCH}/dpdk"
# Use target libraries
export PKG_CONFIG_PATH=
-export PKG_CONFIG_LIBDIR=/usr/lib/${TARGET_ARCH}/pkgconfig
+export PKG_CONFIG_LIBDIR=/usr/lib/${TARGET_ARCH}/pkgconfig:/usr/local/lib/${TARGET_ARCH}/pkgconfig
exec "$(dirname "$0")"/build.sh
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index c7950cfd9..3da08661c 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -344,7 +344,7 @@ static int run_worker_sched_mode_vector(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups) {
+ if (gbl_args->appl.num_groups > 0) {
odp_thrmask_t mask;
odp_thrmask_zero(&mask);
@@ -527,7 +527,7 @@ static int run_worker_sched_mode(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups) {
+ if (gbl_args->appl.num_groups > 0) {
odp_thrmask_t mask;
odp_thrmask_zero(&mask);
@@ -1523,7 +1523,8 @@ static void usage(char *progname)
" -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
" 1: Use checksum offload\n"
" -g, --groups <num> Number of groups to use: 0 ... num\n"
- " 0: SCHED_GROUP_ALL (default)\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
" num: must not exceed number of interfaces or workers\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
@@ -2071,8 +2072,13 @@ int main(int argc, char *argv[])
printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
printf("CPU mask: %s\n", cpumaskstr);
- if (num_groups)
+ if (num_groups > 0)
printf("num groups: %i\n", num_groups);
+ else if (num_groups == 0)
+ printf("group: ODP_SCHED_GROUP_ALL\n");
+ else
+ printf("group: ODP_SCHED_GROUP_WORKER\n");
+
if (num_groups > if_count || num_groups > num_workers) {
ODPH_ERR("Too many groups. Number of groups may not exceed "
@@ -2210,6 +2216,9 @@ int main(int argc, char *argv[])
if (num_groups == 0) {
group[0] = ODP_SCHED_GROUP_ALL;
num_groups = 1;
+ } else if (num_groups == -1) {
+ group[0] = ODP_SCHED_GROUP_WORKER;
+ num_groups = 1;
} else {
create_groups(num_groups, group);
}
diff --git a/test/performance/odp_sched_perf.c b/test/performance/odp_sched_perf.c
index 148bf11d5..4ec4f4352 100644
--- a/test/performance/odp_sched_perf.c
+++ b/test/performance/odp_sched_perf.c
@@ -33,7 +33,7 @@ typedef struct test_options_t {
uint32_t num_dummy;
uint32_t num_event;
uint32_t num_sched;
- uint32_t num_group;
+ int num_group;
uint32_t num_join;
uint32_t max_burst;
int queue_type;
@@ -81,6 +81,7 @@ typedef struct test_global_t {
odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
test_stat_t stat[ODP_THREAD_COUNT_MAX];
thread_arg_t thread_arg[ODP_THREAD_COUNT_MAX];
+ odp_atomic_u32_t num_worker;
} test_global_t;
@@ -95,9 +96,10 @@ static void print_usage(void)
" -q, --num_queue Number of queues. Default: 1.\n"
" -d, --num_dummy Number of empty queues. Default: 0.\n"
" -e, --num_event Number of events per queue. Default: 100.\n"
- " -s, --num_sched Number of events to schedule per thread\n"
+ " -s, --num_sched Number of events to schedule per thread. Default: 100 000.\n"
" -g, --num_group Number of schedule groups. Round robins threads and queues into groups.\n"
- " 0: SCHED_GROUP_ALL (default)\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
" -j, --num_join Number of groups a thread joins. Threads are divide evenly into groups,\n"
" if num_cpu is multiple of num_group and num_group is multiple of num_join.\n"
" 0: join all groups (default)\n"
@@ -115,9 +117,7 @@ static void print_usage(void)
static int parse_options(int argc, char *argv[], test_options_t *test_options)
{
- int opt;
- int long_index;
- uint32_t num_group, num_join;
+ int opt, long_index, num_group, num_join;
int ret = 0;
uint32_t ctx_size = 0;
@@ -232,21 +232,18 @@ static int parse_options(int argc, char *argv[], test_options_t *test_options)
num_group = test_options->num_group;
num_join = test_options->num_join;
if (num_group > MAX_GROUPS) {
- printf("Error: Too many groups. Max supported %i.\n",
- MAX_GROUPS);
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
ret = -1;
}
- if (num_join > num_group) {
- printf("Error: num_join (%u) larger than num_group (%u).\n",
- num_join, num_group);
+ if (num_group > 0 && num_join > num_group) {
+ ODPH_ERR("num_join (%i) larger than num_group (%i).\n", num_join, num_group);
ret = -1;
}
- if (num_join && num_group > (test_options->num_cpu * num_join)) {
- printf("WARNING: Too many groups (%u). Some groups (%u) are not served.\n\n",
- num_group,
- num_group - (test_options->num_cpu * num_join));
+ if (num_join && num_group > (int)(test_options->num_cpu * num_join)) {
+ printf("WARNING: Too many groups (%i). Some groups (%i) are not served.\n\n",
+ num_group, num_group - (test_options->num_cpu * num_join));
if (test_options->forward) {
printf("Error: Cannot forward when some queues are not served.\n");
@@ -333,7 +330,7 @@ static int create_pool(test_global_t *global)
uint32_t tot_queue = test_options->tot_queue;
uint32_t tot_event = test_options->tot_event;
uint32_t queue_size = test_options->queue_size;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
uint32_t num_join = test_options->num_join;
int forward = test_options->forward;
uint64_t wait_ns = test_options->wait_ns;
@@ -352,7 +349,14 @@ static int create_pool(test_global_t *global)
printf(" num queues %u\n", num_queue);
printf(" num empty queues %u\n", num_dummy);
printf(" total queues %u\n", tot_queue);
- printf(" num groups %u\n", num_group);
+ printf(" num groups %i", num_group);
+ if (num_group == -1)
+ printf(" (ODP_SCHED_GROUP_WORKER)\n");
+ else if (num_group == 0)
+ printf(" (ODP_SCHED_GROUP_ALL)\n");
+ else
+ printf("\n");
+
printf(" num join %u\n", num_join);
printf(" forward events %i\n", forward ? 1 : 0);
printf(" wait nsec %" PRIu64 "\n", wait_ns);
@@ -422,7 +426,7 @@ static int create_groups(test_global_t *global)
test_options_t *test_options = &global->test_options;
uint32_t num_group = test_options->num_group;
- if (num_group == 0)
+ if (test_options->num_group <= 0)
return 0;
if (odp_schedule_capability(&sched_capa)) {
@@ -466,7 +470,7 @@ static int create_queues(test_global_t *global)
uint32_t num_event = test_options->num_event;
uint32_t queue_size = test_options->queue_size;
uint32_t tot_queue = test_options->tot_queue;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
int type = test_options->queue_type;
odp_pool_t pool = global->pool;
uint8_t *ctx = NULL;
@@ -510,11 +514,14 @@ static int create_queues(test_global_t *global)
queue_param.type = ODP_QUEUE_TYPE_SCHED;
queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue_param.sched.sync = sync;
- queue_param.sched.group = ODP_SCHED_GROUP_ALL;
queue_param.size = queue_size;
+ if (num_group == -1)
+ queue_param.sched.group = ODP_SCHED_GROUP_WORKER;
+ else
+ queue_param.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < tot_queue; i++) {
- if (num_group) {
+ if (num_group > 0) {
odp_schedule_group_t group;
/* Divide all queues evenly into groups */
@@ -598,11 +605,11 @@ static int join_group(test_global_t *global, int grp_index, int thr)
static int join_all_groups(test_global_t *global, int thr)
{
- uint32_t i;
+ int i;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
- if (num_group == 0)
+ if (num_group <= 0)
return 0;
for (i = 0; i < num_group; i++) {
@@ -647,11 +654,11 @@ static int destroy_queues(test_global_t *global)
static int destroy_groups(test_global_t *global)
{
- uint32_t i;
+ int i;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
- if (num_group == 0)
+ if (num_group <= 0)
return 0;
for (i = 0; i < num_group; i++) {
@@ -725,7 +732,7 @@ static int test_sched(void *arg)
test_options_t *test_options = &global->test_options;
uint32_t num_sched = test_options->num_sched;
uint32_t max_burst = test_options->max_burst;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
int forward = test_options->forward;
int touch_data = test_options->touch_data;
uint32_t rd_words = test_options->rd_words;
@@ -746,7 +753,7 @@ static int test_sched(void *arg)
if (forward)
ctx_offset = ROUNDUP(sizeof(odp_queue_t), 8);
- if (num_group) {
+ if (num_group > 0) {
uint32_t num_join = test_options->num_join;
if (num_join) {
@@ -890,6 +897,16 @@ static int test_sched(void *arg)
global->stat[thr].dummy_sum = data_sum + ctx_sum;
global->stat[thr].failed = ret;
+ if (odp_atomic_fetch_dec_u32(&global->num_worker) == 1) {
+ /* The last worker frees all events. This is needed when the main
+ * thread cannot do the clean up (ODP_SCHED_GROUP_WORKER). */
+ odp_event_t event;
+ uint64_t sched_wait = odp_schedule_wait_time(200 * ODP_TIME_MSEC_IN_NS);
+
+ while ((event = odp_schedule(NULL, sched_wait)) != ODP_EVENT_INVALID)
+ odp_event_free(event);
+ }
+
/* Pause scheduling before thread exit */
odp_schedule_pause();
@@ -919,11 +936,13 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
odph_thread_common_param_t thr_common;
int i, ret;
test_options_t *test_options = &global->test_options;
- uint32_t num_group = test_options->num_group;
+ int num_group = test_options->num_group;
uint32_t num_join = test_options->num_join;
int num_cpu = test_options->num_cpu;
odph_thread_param_t thr_param[num_cpu];
+ odp_atomic_init_u32(&global->num_worker, num_cpu);
+
memset(global->thread_tbl, 0, sizeof(global->thread_tbl));
memset(thr_param, 0, sizeof(thr_param));
memset(&thr_common, 0, sizeof(thr_common));
@@ -939,7 +958,7 @@ static int start_workers(test_global_t *global, odp_instance_t instance)
global->thread_arg[i].global = global;
global->thread_arg[i].first_group = 0;
- if (num_group && num_join) {
+ if (num_group > 0 && num_join) {
/* Each thread joins only num_join groups, starting
* from this group index and wraping around the group
* table. */
diff --git a/test/validation/api/ipsec/ipsec_test_out.c b/test/validation/api/ipsec/ipsec_test_out.c
index 7c1121579..3349ded99 100644
--- a/test/validation/api/ipsec/ipsec_test_out.c
+++ b/test/validation/api/ipsec/ipsec_test_out.c
@@ -1570,6 +1570,12 @@ static void ipsec_test_default_values(void)
CU_ASSERT(config.inbound.retain_outer == ODP_PROTO_LAYER_NONE);
CU_ASSERT(config.inbound.parse_level == ODP_PROTO_LAYER_NONE);
CU_ASSERT(config.inbound.chksums.all_chksum == 0);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv4);
+ CU_ASSERT(!config.inbound.reassembly.en_ipv6);
+ CU_ASSERT(config.inbound.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.inbound.reassembly.max_num_frags == 2);
+ CU_ASSERT(!config.inbound.reass_async);
+ CU_ASSERT(!config.inbound.reass_inline);
CU_ASSERT(config.outbound.all_chksum == 0);
CU_ASSERT(!config.stats_en);
@@ -1592,6 +1598,7 @@ static void ipsec_test_default_values(void)
CU_ASSERT(sa_param.inbound.lookup_mode == ODP_IPSEC_LOOKUP_DISABLED);
CU_ASSERT(sa_param.inbound.antireplay_ws == 0);
CU_ASSERT(sa_param.inbound.pipeline == ODP_IPSEC_PIPELINE_NONE);
+ CU_ASSERT(!sa_param.inbound.reassembly_en);
CU_ASSERT(sa_param.outbound.tunnel.type == ODP_IPSEC_TUNNEL_IPV4);
CU_ASSERT(sa_param.outbound.tunnel.ipv4.dscp == 0);
CU_ASSERT(sa_param.outbound.tunnel.ipv4.df == 0);
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index 9a47dbe8c..838d50fd8 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -91,6 +91,11 @@ typedef enum {
RECV_MQ_TMO_NO_IDX,
} recv_tmo_mode_e;
+typedef enum {
+ ETH_UNICAST,
+ ETH_BROADCAST,
+} eth_addr_type_e;
+
/** size of transmitted packets */
static uint32_t packet_len = PKT_LEN_NORMAL;
@@ -157,7 +162,8 @@ static void set_pool_len(odp_pool_param_t *params, odp_pool_capability_t *capa)
}
}
-static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst)
+static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t dst,
+ eth_addr_type_e dst_addr_type)
{
uint32_t len;
odph_ethhdr_t *eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, &len);
@@ -167,9 +173,13 @@ static void pktio_pkt_set_macs(odp_packet_t pkt, odp_pktio_t src, odp_pktio_t ds
CU_ASSERT(ret == ODPH_ETHADDR_LEN);
CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
- ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
- CU_ASSERT(ret == ODPH_ETHADDR_LEN);
- CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+ if (dst_addr_type == ETH_UNICAST) {
+ ret = odp_pktio_mac_addr(dst, &eth->dst, ODP_PKTIO_MACADDR_MAXSIZE);
+ CU_ASSERT(ret == ODPH_ETHADDR_LEN);
+ CU_ASSERT(ret <= ODP_PKTIO_MACADDR_MAXSIZE);
+ } else {
+ CU_ASSERT(odph_eth_addr_parse(&eth->dst, "ff:ff:ff:ff:ff:ff") == 0);
+ }
}
static uint32_t pktio_pkt_set_seq(odp_packet_t pkt, size_t l4_hdr_len)
@@ -561,7 +571,8 @@ static int create_packets_udp(odp_packet_t pkt_tbl[],
int num,
odp_pktio_t pktio_src,
odp_pktio_t pktio_dst,
- odp_bool_t fix_cs)
+ odp_bool_t fix_cs,
+ eth_addr_type_e dst_addr_type)
{
int i, ret;
@@ -576,7 +587,7 @@ static int create_packets_udp(odp_packet_t pkt_tbl[],
break;
}
- pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, dst_addr_type);
/* Set user pointer. It should be NULL on receive side. */
odp_packet_user_ptr_set(pkt_tbl[i], (void *)1);
@@ -613,7 +624,7 @@ static int create_packets_sctp(odp_packet_t pkt_tbl[],
break;
}
- pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst);
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_src, pktio_dst, ETH_UNICAST);
ret = pktio_zero_checksums(pkt_tbl[i]);
if (ret != 0) {
@@ -629,7 +640,7 @@ static int create_packets(odp_packet_t pkt_tbl[], uint32_t pkt_seq[], int num,
odp_pktio_t pktio_src, odp_pktio_t pktio_dst)
{
return create_packets_udp(pkt_tbl, pkt_seq, num, pktio_src, pktio_dst,
- true);
+ true, ETH_UNICAST);
}
static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
@@ -1658,13 +1669,25 @@ static void pktio_test_pktio_config(void)
pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ memset(&config, 0xff, sizeof(config));
odp_pktio_config_init(&config);
+ /* Check default values */
+ CU_ASSERT(config.pktin.all_bits == 0);
+ CU_ASSERT(config.pktout.all_bits == 0);
+ CU_ASSERT(config.parser.layer == ODP_PROTO_LAYER_ALL);
+ CU_ASSERT(!config.enable_loop);
+ CU_ASSERT(!config.inbound_ipsec);
+ CU_ASSERT(!config.outbound_ipsec);
+ CU_ASSERT(!config.enable_lso);
+ CU_ASSERT(!config.reassembly.en_ipv4);
+ CU_ASSERT(!config.reassembly.en_ipv6);
+ CU_ASSERT(config.reassembly.max_wait_time == 0);
+ CU_ASSERT(config.reassembly.max_num_frags == 2);
+
/* Indicate packet refs might be used */
config.pktout.bit.no_packet_refs = 0;
- CU_ASSERT(config.parser.layer == ODP_PROTO_LAYER_ALL);
-
CU_ASSERT(odp_pktio_config(pktio, NULL) == 0);
CU_ASSERT(odp_pktio_config(pktio, &config) == 0);
@@ -1944,53 +1967,56 @@ static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
" in_octets %" PRIu64 "\n"
" in_packets %" PRIu64 "\n"
" in_ucast_pkts %" PRIu64 "\n"
+ " in_mcast_pkts %" PRIu64 "\n"
+ " in_bcast_pkts %" PRIu64 "\n"
" in_discards %" PRIu64 "\n"
" in_errors %" PRIu64 "\n"
" out_octets %" PRIu64 "\n"
" out_packets %" PRIu64 "\n"
" out_ucast_pkts %" PRIu64 "\n"
+ " out_mcast_pkts %" PRIu64 "\n"
+ " out_bcast_pkts %" PRIu64 "\n"
" out_discards %" PRIu64 "\n"
" out_errors %" PRIu64 "\n",
name,
s->in_octets,
s->in_packets,
s->in_ucast_pkts,
+ s->in_mcast_pkts,
+ s->in_bcast_pkts,
s->in_discards,
s->in_errors,
s->out_octets,
s->out_packets,
s->out_ucast_pkts,
+ s->out_mcast_pkts,
+ s->out_bcast_pkts,
s->out_discards,
s->out_errors);
}
#endif
-/* some pktio like netmap support various methods to
- * get statistics counters. ethtool strings are not standardised
- * and sysfs may not be supported. skip pktio_stats test until
- * we will solve that.*/
static int pktio_check_statistics_counters(void)
{
odp_pktio_t pktio;
- odp_pktio_stats_t stats;
- int ret;
+ odp_pktio_capability_t capa;
odp_pktio_param_t pktio_param;
- const char *iface = iface_name[0];
+ int ret;
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
- pktio = odp_pktio_open(iface, pool[0], &pktio_param);
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
if (pktio == ODP_PKTIO_INVALID)
return ODP_TEST_INACTIVE;
- ret = odp_pktio_stats(pktio, &stats);
+ ret = odp_pktio_capability(pktio, &capa);
(void)odp_pktio_close(pktio);
- if (ret == 0)
- return ODP_TEST_ACTIVE;
+ if (ret < 0 || capa.stats.pktio.all_counters == 0)
+ return ODP_TEST_INACTIVE;
- return ODP_TEST_INACTIVE;
+ return ODP_TEST_ACTIVE;
}
static void pktio_test_statistics_counters(void)
@@ -2008,6 +2034,7 @@ static void pktio_test_statistics_counters(void)
uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
odp_pktio_stats_t stats[2];
odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
for (i = 0; i < num_ifaces; i++) {
pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
@@ -2018,6 +2045,9 @@ static void pktio_test_statistics_counters(void)
pktio_tx = pktio[0];
pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
ret = odp_pktio_start(pktio_tx);
@@ -2066,11 +2096,11 @@ static void pktio_test_statistics_counters(void)
CU_ASSERT(ret == 0);
tx_stats = &stats[0];
- CU_ASSERT((tx_stats->out_octets == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
(tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
- CU_ASSERT((tx_stats->out_packets == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
(tx_stats->out_packets >= (uint64_t)pkts));
- CU_ASSERT((tx_stats->out_ucast_pkts == 0) ||
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_ucast_pkts == 0) ||
(tx_stats->out_ucast_pkts >= (uint64_t)pkts));
CU_ASSERT(tx_stats->out_discards == 0);
CU_ASSERT(tx_stats->out_errors == 0);
@@ -2081,15 +2111,46 @@ static void pktio_test_statistics_counters(void)
ret = odp_pktio_stats(pktio_rx, rx_stats);
CU_ASSERT(ret == 0);
}
- CU_ASSERT((rx_stats->in_octets == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
(rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
- CU_ASSERT((rx_stats->in_packets == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
(rx_stats->in_packets >= (uint64_t)pkts));
- CU_ASSERT((rx_stats->in_ucast_pkts == 0) ||
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_ucast_pkts == 0) ||
(rx_stats->in_ucast_pkts >= (uint64_t)pkts));
CU_ASSERT(rx_stats->in_discards == 0);
CU_ASSERT(rx_stats->in_errors == 0);
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktio.counter.in_octets)
+ CU_ASSERT(rx_stats->in_octets == 0);
+ if (!rx_capa.stats.pktio.counter.in_packets)
+ CU_ASSERT(rx_stats->in_packets == 0);
+ if (!rx_capa.stats.pktio.counter.in_ucast_pkts)
+ CU_ASSERT(rx_stats->in_ucast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_mcast_pkts)
+ CU_ASSERT(rx_stats->in_mcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_bcast_pkts)
+ CU_ASSERT(rx_stats->in_bcast_pkts == 0);
+ if (!rx_capa.stats.pktio.counter.in_discards)
+ CU_ASSERT(rx_stats->in_discards == 0);
+ if (!rx_capa.stats.pktio.counter.in_errors)
+ CU_ASSERT(rx_stats->in_errors == 0);
+
+ if (!tx_capa.stats.pktio.counter.out_octets)
+ CU_ASSERT(tx_stats->out_octets == 0);
+ if (!tx_capa.stats.pktio.counter.out_packets)
+ CU_ASSERT(tx_stats->out_packets == 0);
+ if (!tx_capa.stats.pktio.counter.out_ucast_pkts)
+ CU_ASSERT(tx_stats->out_ucast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_mcast_pkts)
+ CU_ASSERT(tx_stats->out_mcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_bcast_pkts)
+ CU_ASSERT(tx_stats->out_bcast_pkts == 0);
+ if (!tx_capa.stats.pktio.counter.out_discards)
+ CU_ASSERT(tx_stats->out_discards == 0);
+ if (!tx_capa.stats.pktio.counter.out_errors)
+ CU_ASSERT(tx_stats->out_errors == 0);
+
for (i = 0; i < num_ifaces; i++) {
CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
#ifdef DEBUG_STATS
@@ -2100,6 +2161,423 @@ static void pktio_test_statistics_counters(void)
}
}
+static int pktio_check_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktio.counter.in_bcast_pkts == 0 &&
+ capa.stats.pktio.counter.out_bcast_pkts == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_statistics_counters_bcast(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+ odp_pktio_stats_t *rx_stats, *tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets_udp(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx,
+ true, ETH_BROADCAST);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT(odp_pktio_stats(pktio_tx, &stats[0]) == 0);
+ tx_stats = &stats[0];
+
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_bcast_pkts == 0) ||
+ (tx_stats->out_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_octets == 0) ||
+ (tx_stats->out_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((tx_capa.stats.pktio.counter.out_packets == 0) ||
+ (tx_stats->out_packets >= (uint64_t)pkts));
+
+ rx_stats = &stats[0];
+ if (num_ifaces > 1) {
+ rx_stats = &stats[1];
+ CU_ASSERT(odp_pktio_stats(pktio_rx, rx_stats) == 0);
+ }
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_bcast_pkts == 0) ||
+ (rx_stats->in_bcast_pkts >= (uint64_t)pkts));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_octets == 0) ||
+ (rx_stats->in_octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((rx_capa.stats.pktio.counter.in_packets == 0) ||
+ (rx_stats->in_packets >= (uint64_t)pkts));
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ int i, pkts, tx_pkts, ret, alloc = 0;
+ odp_pktout_queue_t pktout;
+ odp_pktin_queue_t pktin;
+ uint64_t wait = odp_pktin_wait_time(ODP_TIME_SEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ alloc = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_pktout_send(pktout, &tx_pkt[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to send packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+ tx_pkts = pkts;
+
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ odp_packet_t pkt;
+
+ if (odp_pktin_recv_tmo(pktin, &pkt, 1, wait) != 1)
+ break;
+
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+
+ odp_packet_free(pkt);
+ }
+
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_queue_stats(pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_queue_stats(pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static int pktio_check_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktio_param_t pktio_param;
+ int ret;
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_QUEUE;
+
+ pktio = odp_pktio_open(iface_name[0], pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_capability(pktio, &capa);
+ (void)odp_pktio_close(pktio);
+
+ if (ret < 0 || (capa.stats.pktin_queue.all_counters == 0 &&
+ capa.stats.pktout_queue.all_counters == 0))
+ return ODP_TEST_INACTIVE;
+
+ return ODP_TEST_ACTIVE;
+}
+
+static void pktio_test_event_queue_statistics_counters(void)
+{
+ odp_pktio_t pktio_rx, pktio_tx;
+ odp_pktio_t pktio[MAX_NUM_IFACES] = {
+ ODP_PKTIO_INVALID, ODP_PKTIO_INVALID
+ };
+ odp_packet_t pkt;
+ odp_packet_t tx_pkt[1000];
+ uint32_t pkt_seq[1000];
+ odp_event_t ev;
+ int i, pkts, tx_pkts;
+ odp_queue_t pktout;
+ odp_queue_t pktin;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktin_queue_stats_t rx_stats;
+ odp_pktout_queue_stats_t tx_stats;
+ odp_pktio_capability_t rx_capa, tx_capa;
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_QUEUE);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ }
+ pktio_tx = pktio[0];
+ pktio_rx = (num_ifaces > 1) ? pktio[1] : pktio_tx;
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_tx, &tx_capa) == 0);
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio_rx, &rx_capa) == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue(pktio_rx, &pktin, 1) == 1);
+ CU_ASSERT_FATAL(odp_pktout_event_queue(pktio_tx, &pktout, 1) == 1);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio_rx) == 0);
+
+ tx_pkts = create_packets(tx_pkt, pkt_seq, 1000, pktio_tx, pktio_rx);
+
+ CU_ASSERT(odp_pktio_stats_reset(pktio_tx) == 0);
+ if (num_ifaces > 1)
+ CU_ASSERT(odp_pktio_stats_reset(pktio_rx) == 0);
+
+ CU_ASSERT_FATAL(send_packet_events(pktout, tx_pkt, tx_pkts) == 0);
+
+ /* Receive */
+ for (i = 0, pkts = 0; i < 1000 && pkts != tx_pkts; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+ CU_ASSERT(pkts == tx_pkts);
+
+ CU_ASSERT_FATAL(odp_pktout_event_queue_stats(pktio_tx, pktout, &tx_stats) == 0);
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.octets) ||
+ (tx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!tx_capa.stats.pktout_queue.counter.packets) ||
+ (tx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(tx_stats.discards == 0);
+ CU_ASSERT(tx_stats.errors == 0);
+
+ CU_ASSERT_FATAL(odp_pktin_event_queue_stats(pktio_rx, pktin, &rx_stats) == 0);
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.octets) ||
+ (rx_stats.octets >= (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ CU_ASSERT((!rx_capa.stats.pktin_queue.counter.packets) ||
+ (rx_stats.packets >= (uint64_t)pkts));
+ CU_ASSERT(rx_stats.discards == 0);
+ CU_ASSERT(rx_stats.errors == 0);
+
+ /* Check that all unsupported counters are still zero */
+ if (!rx_capa.stats.pktin_queue.counter.octets)
+ CU_ASSERT(rx_stats.octets == 0);
+ if (!rx_capa.stats.pktin_queue.counter.packets)
+ CU_ASSERT(rx_stats.packets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.octets)
+ CU_ASSERT(tx_stats.octets == 0);
+ if (!tx_capa.stats.pktout_queue.counter.packets)
+ CU_ASSERT(tx_stats.packets == 0);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+ flush_input_queue(pktio[i], ODP_PKTIN_MODE_SCHED);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
+static void pktio_test_extra_stats(void)
+{
+ odp_pktio_t pktio;
+ int num_info, num_stats, i, ret;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID)
+ CU_ASSERT_FATAL(odp_pktio_start(pktio) == 0);
+
+ num_info = odp_pktio_extra_stat_info(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_info >= 0);
+
+ num_stats = odp_pktio_extra_stats(pktio, NULL, 0);
+ CU_ASSERT_FATAL(num_stats >= 0);
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ /* No extra statistics supported */
+ if (num_stats == 0) {
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+ return;
+ }
+
+ odp_pktio_extra_stat_info_t stats_info[num_stats];
+ uint64_t extra_stats[num_stats];
+
+ ret = odp_pktio_extra_stat_info(pktio, stats_info, num_stats);
+ CU_ASSERT(ret == num_stats);
+ num_info = ret;
+
+ ret = odp_pktio_extra_stats(pktio, extra_stats, num_stats);
+ CU_ASSERT(ret == num_stats);
+ CU_ASSERT_FATAL(ret <= num_stats);
+ num_stats = ret;
+
+ CU_ASSERT_FATAL(num_info == num_stats);
+
+ printf("\nPktio extra statistics\n----------------------\n");
+ for (i = 0; i < num_stats; i++)
+ printf(" %s=%" PRIu64 "\n", stats_info[i].name, extra_stats[i]);
+
+ for (i = 0; i < num_stats; i++) {
+ uint64_t stat = 0;
+
+ CU_ASSERT(odp_pktio_extra_stat_counter(pktio, i, &stat) == 0);
+ }
+
+ odp_pktio_extra_stats_print(pktio);
+
+ CU_ASSERT(odp_pktio_stop(pktio) == 0);
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
static int pktio_check_start_stop(void)
{
if (getenv("ODP_PKTIO_TEST_DISABLE_START_STOP"))
@@ -2776,7 +3254,7 @@ static void pktio_test_chksum(void (*config_fn)(odp_pktio_t, odp_pktio_t),
}
ret = create_packets_udp(pkt_tbl, pkt_seq, TX_BATCH_LEN, pktio_tx,
- pktio_rx, false);
+ pktio_rx, false, ETH_UNICAST);
CU_ASSERT(ret == TX_BATCH_LEN);
if (ret != TX_BATCH_LEN) {
for (i = 0; i < num_ifaces; i++) {
@@ -4132,6 +4610,13 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
pktio_check_pktin_event_sched),
ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
pktio_check_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters_bcast,
+ pktio_check_statistics_counters_bcast),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_queue_statistics_counters,
+ pktio_check_queue_statistics_counters),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_event_queue_statistics_counters,
+ pktio_check_event_queue_statistics_counters),
+ ODP_TEST_INFO(pktio_test_extra_stats),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktin_ts,
pktio_check_pktin_ts),
ODP_TEST_INFO_CONDITIONAL(pktio_test_pktout_ts,
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 37f3b4f0b..9c84eacd3 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -68,6 +68,7 @@ typedef struct {
int buf_count;
int buf_count_cpy;
int queues_per_prio;
+ int test_debug_print;
odp_shm_t shm_glb;
odp_shm_t shm_args;
odp_pool_t pool;
@@ -860,6 +861,44 @@ static void scheduler_test_order_ignore(void)
CU_ASSERT_FATAL(odp_pool_destroy(pool) == 0);
}
+static void scheduler_test_group_info_predef(void)
+{
+ odp_schedule_group_info_t info;
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int thr;
+
+ thr = odp_thread_id();
+
+ group = ODP_SCHED_GROUP_ALL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf("\n Schedule group all name: %s\n", info.name);
+
+ /* This test case runs a control thread */
+ group = ODP_SCHED_GROUP_CONTROL;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group control name: %s\n", info.name);
+
+ group = ODP_SCHED_GROUP_WORKER;
+ odp_thrmask_zero(&thrmask);
+ CU_ASSERT(odp_schedule_group_thrmask(group, &thrmask) == 0);
+ CU_ASSERT(!odp_thrmask_isset(&thrmask, thr));
+ memset(&info, 0, sizeof(odp_schedule_group_info_t));
+ CU_ASSERT(odp_schedule_group_info(group, &info) == 0);
+ CU_ASSERT(odp_thrmask_equal(&info.thrmask, &thrmask));
+ printf(" Schedule group worker name: %s\n", info.name);
+}
+
static void scheduler_test_create_group(void)
{
odp_thrmask_t mask;
@@ -1787,6 +1826,9 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
fill_queues(args);
+ if (globals->test_debug_print)
+ odp_schedule_print();
+
/* Create and launch worker threads */
/* Test runs also on the main thread */
@@ -2956,6 +2998,25 @@ static void scheduler_test_flow_aware(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+/* Queues created but no events */
+static void scheduler_test_print(void)
+{
+ odp_schedule_print();
+}
+
+/* Queues with initial events enqueued */
+static void scheduler_test_mq_mt_prio_a_print(void)
+{
+ int prio = odp_schedule_num_prio();
+
+ globals->test_debug_print = 1;
+
+ parallel_execute(ODP_SCHED_SYNC_ATOMIC, globals->queues_per_prio, prio,
+ SCHD_ONE, DISABLE_EXCL_ATOMIC);
+
+ globals->test_debug_print = 0;
+}
+
static int scheduler_test_global_init(void)
{
odp_cpumask_t mask;
@@ -3158,6 +3219,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_max_queues_a),
ODP_TEST_INFO(scheduler_test_max_queues_o),
ODP_TEST_INFO(scheduler_test_order_ignore),
+ ODP_TEST_INFO(scheduler_test_group_info_predef),
ODP_TEST_INFO(scheduler_test_create_group),
ODP_TEST_INFO(scheduler_test_create_max_groups),
ODP_TEST_INFO(scheduler_test_groups),
@@ -3178,6 +3240,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
/* Scheduler test suite which runs events through hundreds of queues. Queues are created once
* in suite init phase. */
odp_testinfo_t scheduler_multi_suite[] = {
+ ODP_TEST_INFO(scheduler_test_print),
ODP_TEST_INFO(scheduler_test_chaos),
ODP_TEST_INFO(scheduler_test_1q_1t_n),
ODP_TEST_INFO(scheduler_test_1q_1t_a),
@@ -3205,6 +3268,7 @@ odp_testinfo_t scheduler_multi_suite[] = {
ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_a),
ODP_TEST_INFO(scheduler_test_multi_mq_mt_prio_o),
ODP_TEST_INFO(scheduler_test_multi_1q_mt_a_excl),
+ ODP_TEST_INFO(scheduler_test_mq_mt_prio_a_print),
ODP_TEST_INFO_NULL
};
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index 177f6f82b..0716b7999 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -554,6 +554,94 @@ static void timer_pool_max_res(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
+static void timer_pool_tick_info_run(odp_timer_clk_src_t clk_src)
+{
+ odp_timer_capability_t capa;
+ odp_timer_pool_param_t tp_param;
+ odp_timer_pool_t tp;
+ odp_timer_pool_info_t info;
+ uint64_t ticks_per_sec;
+ double tick_hz, tick_nsec, tick_to_nsec, tick_low;
+
+ memset(&capa, 0, sizeof(capa));
+ CU_ASSERT_FATAL(odp_timer_capability(clk_src, &capa) == 0);
+
+ /* Highest resolution */
+ memset(&tp_param, 0, sizeof(odp_timer_pool_param_t));
+ tp_param.res_hz = capa.max_res.res_hz;
+ tp_param.min_tmo = capa.max_res.min_tmo;
+ tp_param.max_tmo = capa.max_res.max_tmo;
+ tp_param.num_timers = 100;
+ tp_param.priv = 0;
+ tp_param.clk_src = clk_src;
+
+ tp = odp_timer_pool_create("tick_info_tp", &tp_param);
+ CU_ASSERT_FATAL(tp != ODP_TIMER_POOL_INVALID);
+
+ odp_timer_pool_start();
+
+ memset(&info, 0, sizeof(odp_timer_pool_info_t));
+ CU_ASSERT_FATAL(odp_timer_pool_info(tp, &info) == 0);
+
+ /* Tick frequency in hertz. Allow 1 hz rounding error between odp_timer_ns_to_tick()
+ * and tick_info. */
+ ticks_per_sec = odp_timer_ns_to_tick(tp, ODP_TIME_SEC_IN_NS);
+ tick_hz = odp_fract_u64_to_dbl(&info.tick_info.freq);
+
+ CU_ASSERT(((double)(ticks_per_sec - 1)) <= tick_hz);
+ CU_ASSERT(((double)(ticks_per_sec + 1)) >= tick_hz);
+
+ printf("\nClock source %i\n", clk_src);
+ printf(" Ticks per second: %" PRIu64 "\n", ticks_per_sec);
+ printf(" Tick info freq: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.freq.integer,
+ info.tick_info.freq.numer,
+ info.tick_info.freq.denom);
+ printf(" Tick info freq dbl: %f\n", tick_hz);
+
+ /* One tick on nsec. For better resolution, convert 1000 ticks (and use double)
+ * instead of one tick. Allow 1 nsec rounding error between odp_timer_tick_to_ns()
+ * and tick_info. */
+ tick_to_nsec = odp_timer_tick_to_ns(tp, 1000) / 1000.0;
+ tick_nsec = odp_fract_u64_to_dbl(&info.tick_info.nsec);
+ tick_low = tick_to_nsec - 1.0;
+ if (tick_to_nsec < 1.0)
+ tick_low = 0.0;
+
+ CU_ASSERT(tick_low <= tick_nsec);
+ CU_ASSERT((tick_to_nsec + 1.0) >= tick_nsec);
+
+ printf(" Tick in nsec: %f\n", tick_to_nsec);
+ printf(" Tick info nsec: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.nsec.integer,
+ info.tick_info.nsec.numer,
+ info.tick_info.nsec.denom);
+ printf(" Tick info nsec dbl: %f\n", tick_nsec);
+
+ /* One tick in source clock cycles. Depending on clock source it may be zero.
+ * Print the values to have a reference to the fields. */
+ printf(" Tick info clk cycles: %" PRIu64 " + %" PRIu64 " / %" PRIu64 "\n",
+ info.tick_info.clk_cycle.integer,
+ info.tick_info.clk_cycle.numer,
+ info.tick_info.clk_cycle.denom);
+
+ odp_timer_pool_destroy(tp);
+}
+
+static void timer_pool_tick_info(void)
+{
+ odp_timer_clk_src_t clk_src;
+ int i;
+
+ for (i = 0; i < ODP_CLOCK_NUM_SRC; i++) {
+ clk_src = ODP_CLOCK_SRC_0 + i;
+ if (global_mem->clk_supported[i]) {
+ ODPH_DBG("\nTesting clock source: %i\n", clk_src);
+ timer_pool_tick_info_run(clk_src);
+ }
+ }
+}
+
static void timer_test_event_type(odp_queue_type_t queue_type,
odp_event_type_t event_type)
{
@@ -1734,6 +1822,7 @@ odp_testinfo_t timer_suite[] = {
ODP_TEST_INFO(timer_test_timeout_pool_free),
ODP_TEST_INFO(timer_pool_create_destroy),
ODP_TEST_INFO(timer_pool_max_res),
+ ODP_TEST_INFO(timer_pool_tick_info),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_plain,
check_plain_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_tmo_event_sched,