aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaxim Uvarov <maxim.uvarov@linaro.org>2016-02-08 20:47:58 +0300
committerMaxim Uvarov <maxim.uvarov@linaro.org>2016-02-08 20:47:58 +0300
commitbbd5ac81d9e606415f2ad2ecb999c2ff8c3716c9 (patch)
treed6da6a9a491b05721cd562b1c22ea49c6076b107
parentee90f447a6f5c76ab7751e6b747d1da3ac407fb1 (diff)
parentfd93da30b42937f62afecccd43809815044f5a45 (diff)
Merge branch 'next'v1.7.0.0
-rw-r--r--CHANGELOG181
-rw-r--r--CONTRIBUTING5
-rw-r--r--DEPENDENCIES11
-rw-r--r--README2
-rw-r--r--configure.ac15
-rw-r--r--doc/application-api-guide/odp.dox2
-rw-r--r--doc/images/.gitignore3
-rw-r--r--doc/images/Makefile.am39
-rw-r--r--doc/process-guide/release-guide.adoc4
-rw-r--r--doc/users-guide/users-guide.adoc43
-rw-r--r--example/classifier/odp_classifier.c34
-rw-r--r--example/generator/odp_generator.c11
-rw-r--r--example/ipsec/odp_ipsec.c45
-rw-r--r--example/ipsec/odp_ipsec_cache.h2
-rw-r--r--example/ipsec/odp_ipsec_stream.c2
-rw-r--r--example/packet/odp_pktio.c18
-rw-r--r--example/time/time_global_test.c8
-rw-r--r--example/timer/odp_timer_test.c11
-rw-r--r--helper/Makefile.am3
-rw-r--r--helper/include/odp/helper/chksum.h4
-rw-r--r--helper/include/odp/helper/eth.h6
-rw-r--r--helper/include/odp/helper/icmp.h12
-rw-r--r--helper/include/odp/helper/ip.h22
-rw-r--r--helper/include/odp/helper/ipsec.h10
-rw-r--r--helper/include/odp/helper/tcp.h64
-rw-r--r--helper/include/odp/helper/udp.h8
-rw-r--r--helper/odph_pause.h54
-rw-r--r--helper/ring.c10
-rw-r--r--helper/test/Makefile.am2
-rw-r--r--helper/test/odph_pause.c14
-rw-r--r--include/odp/api/atomic.h394
-rw-r--r--include/odp/api/byteorder.h40
-rw-r--r--include/odp/api/classification.h16
-rw-r--r--include/odp/api/cpu.h73
-rw-r--r--include/odp/api/cpumask.h38
-rw-r--r--include/odp/api/errno.h37
-rw-r--r--include/odp/api/init.h11
-rw-r--r--include/odp/api/packet_io.h371
-rw-r--r--include/odp/api/packet_io_stats.h141
-rw-r--r--include/odp/api/pool.h3
-rw-r--r--include/odp/api/queue.h115
-rw-r--r--include/odp/api/schedule.h20
-rw-r--r--include/odp/api/schedule_types.h12
-rw-r--r--include/odp/api/std_clib.h18
-rw-r--r--include/odp/api/system_info.h14
-rw-r--r--include/odp/api/thrmask.h30
-rw-r--r--include/odp/api/version.h2
-rw-r--r--pkg/debian/changelog5
-rw-r--r--pkg/rpm/odp.spec2
-rw-r--r--platform/Makefile.inc1
-rw-r--r--platform/linux-generic/Makefile.am23
-rw-r--r--platform/linux-generic/Makefile.inc2
-rw-r--r--platform/linux-generic/arch/linux/odp/cpu_arch.h22
-rw-r--r--platform/linux-generic/arch/linux/odp_cpu_arch.c (renamed from platform/linux-generic/arch/linux/odp_cpu_cycles.c)2
-rw-r--r--platform/linux-generic/arch/linux/odp_sysinfo_parse.c19
-rw-r--r--platform/linux-generic/arch/mips64/odp/cpu_arch.h26
-rw-r--r--platform/linux-generic/arch/mips64/odp_cpu_arch.c (renamed from platform/linux-generic/arch/mips64/odp_cpu_cycles.c)0
-rw-r--r--platform/linux-generic/arch/mips64/odp_sysinfo_parse.c64
l---------platform/linux-generic/arch/powerpc/odp/cpu_arch.h1
l---------platform/linux-generic/arch/powerpc/odp_cpu_arch.c1
-rw-r--r--platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c63
-rw-r--r--platform/linux-generic/arch/x86/odp/cpu_arch.h27
-rw-r--r--platform/linux-generic/arch/x86/odp_cpu_arch.c (renamed from platform/linux-generic/arch/x86/odp_cpu_cycles.c)0
-rw-r--r--platform/linux-generic/arch/x86/odp_sysinfo_parse.c73
-rw-r--r--platform/linux-generic/include/odp/atomic.h222
-rw-r--r--platform/linux-generic/include/odp/byteorder.h48
-rw-r--r--platform/linux-generic/include/odp/cpu.h2
-rw-r--r--platform/linux-generic/include/odp/init.h2
-rw-r--r--platform/linux-generic/include/odp/plat/atomic_types.h21
-rw-r--r--platform/linux-generic/include/odp/plat/byteorder_types.h16
-rw-r--r--platform/linux-generic/include/odp/plat/init_types.h30
-rw-r--r--platform/linux-generic/include/odp/plat/packet_io_types.h16
-rw-r--r--platform/linux-generic/include/odp/plat/queue_types.h8
-rw-r--r--platform/linux-generic/include/odp/plat/schedule_types.h2
-rw-r--r--platform/linux-generic/include/odp/std_clib.h5
-rw-r--r--platform/linux-generic/include/odp_atomic_internal.h6
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h2
-rw-r--r--platform/linux-generic/include/odp_classification_inlines.h25
-rw-r--r--platform/linux-generic/include/odp_internal.h11
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h78
-rw-r--r--platform/linux-generic/include/odp_packet_netmap.h40
-rw-r--r--platform/linux-generic/include/odp_packet_socket.h57
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h1
-rw-r--r--platform/linux-generic/include/odp_queue_internal.h2
-rw-r--r--platform/linux-generic/include/odp_schedule_internal.h3
-rw-r--r--platform/linux-generic/include/odp_spin_internal.h58
-rw-r--r--platform/linux-generic/odp_atomic.c26
-rw-r--r--platform/linux-generic/odp_barrier.c12
-rw-r--r--platform/linux-generic/odp_cpumask_task.c11
-rw-r--r--platform/linux-generic/odp_packet_io.c647
-rw-r--r--platform/linux-generic/odp_pool.c1
-rw-r--r--platform/linux-generic/odp_queue.c26
-rw-r--r--platform/linux-generic/odp_rwlock.c30
-rw-r--r--platform/linux-generic/odp_schedule.c233
-rw-r--r--platform/linux-generic/odp_spinlock.c5
-rw-r--r--platform/linux-generic/odp_system_info.c215
-rw-r--r--platform/linux-generic/odp_ticketlock.c25
-rw-r--r--platform/linux-generic/odp_timer.c120
-rw-r--r--platform/linux-generic/odp_weak.c2
-rw-r--r--platform/linux-generic/pktio/ethtool.c164
-rw-r--r--platform/linux-generic/pktio/loop.c55
-rw-r--r--platform/linux-generic/pktio/netmap.c645
-rw-r--r--platform/linux-generic/pktio/pcap.c39
-rw-r--r--platform/linux-generic/pktio/pktio_common.c72
-rw-r--r--platform/linux-generic/pktio/socket.c320
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c67
-rw-r--r--platform/linux-generic/pktio/sysfs.c76
-rw-r--r--platform/linux-generic/test/Makefile.am4
-rw-r--r--test/api_test/odp_common.c4
-rw-r--r--test/performance/odp_atomic.c4
-rw-r--r--test/performance/odp_l2fwd.c1126
-rw-r--r--test/performance/odp_pktio_perf.c34
-rw-r--r--test/performance/odp_scheduling.c28
-rw-r--r--test/validation/Makefile.am8
-rw-r--r--test/validation/atomic/.gitignore1
-rw-r--r--test/validation/atomic/Makefile.am10
-rw-r--r--test/validation/atomic/atomic.c881
-rw-r--r--test/validation/atomic/atomic.h38
-rw-r--r--test/validation/atomic/atomic_main.c (renamed from test/validation/synchronizers/synchronizers_main.c)4
-rw-r--r--test/validation/barrier/.gitignore1
-rw-r--r--test/validation/barrier/Makefile.am10
-rw-r--r--test/validation/barrier/barrier.c393
-rw-r--r--test/validation/barrier/barrier.h29
-rw-r--r--test/validation/barrier/barrier_main.c12
-rw-r--r--test/validation/classification/classification.h10
-rw-r--r--test/validation/classification/odp_classification_basic.c4
-rw-r--r--test/validation/classification/odp_classification_common.c99
-rw-r--r--test/validation/classification/odp_classification_test_pmr.c409
-rw-r--r--test/validation/classification/odp_classification_tests.c76
-rw-r--r--test/validation/classification/odp_classification_testsuites.h5
-rw-r--r--test/validation/common/odp_cunit_common.h7
-rw-r--r--test/validation/crypto/crypto.c3
-rw-r--r--test/validation/init/init.c4
-rw-r--r--test/validation/lock/.gitignore1
-rw-r--r--test/validation/lock/Makefile.am10
-rw-r--r--test/validation/lock/lock.c (renamed from test/validation/synchronizers/synchronizers.c)608
-rw-r--r--test/validation/lock/lock.h45
-rw-r--r--test/validation/lock/lock_main.c12
-rw-r--r--test/validation/pktio/pktio.c594
-rw-r--r--test/validation/pktio/pktio.h11
-rw-r--r--test/validation/queue/queue.c50
-rw-r--r--test/validation/scheduler/scheduler.c89
-rw-r--r--test/validation/std_clib/std_clib.c38
-rw-r--r--test/validation/synchronizers/.gitignore1
-rw-r--r--test/validation/synchronizers/Makefile.am10
-rw-r--r--test/validation/synchronizers/synchronizers.h54
-rw-r--r--test/validation/system/system.c115
-rw-r--r--test/validation/system/system.h13
-rw-r--r--test/validation/timer/timer.c44
149 files changed, 8335 insertions, 2321 deletions
diff --git a/CHANGELOG b/CHANGELOG
index bd5b9982d..6549e0d87 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,184 @@
+opendataplane (1.7.0.0)
+ * API:
+ - api: atomic: add non-relaxed 64bit operations
+ - api: atomic: added 32 bit acquire and release
+ - api: atomic: added 32bit cas_rel and cas_acq_rel
+ - api: atomic: added atomic min and max
+ - api: atomic: added atomic_lock_free_u64
+ - api: atomic: added cas operations
+ - api: atomic: added relaxed exchange operation
+ - api: atomic: init functions are not atomic
+ - api: atomic: rename release ordering
+ - api: classifier: align enum type naming
+ - api: cpu: add new API to get CPU max frequency
+ - api: cpu: add new API to get per-CPU current frequency
+ - api: cpu: add new API to get per-CPU max frequency
+ - api: cpu: add new API to get per-CPU model string
+ - api: cpu: added pause call
+ - api: cpu: make frequency API return 0 on failure
+ - api: cpumask: add new API odp_cpumask_all_available()
+ - api: cpumask: documented string format
+ - api: define pktio statistics api
+ - api: endian: rename endian types with odp_ prefix
+ - api: errno: any odp function can set errno
+ - api: init: align enum type naming
+ - api: init: removed platform_init struct definition
+ - api: pktio: added direct queue receive
+ - api: pktio: added direct send to pktio output queue
+ - api: pktio: added link status
+ - api: pktio: added multiple pktio input queues
+ - api: pktio: added multiple pktio output queues
+ - api: pktio: added pktio capability struct
+ - api: pktio: refine multiqueue API spec
+ - api: pktio: remove unused ODP_PKTIO_ANY
+ - api: pktio: rename pktio modes
+ - api: pool: allow per-thread caching
+ - api: queue: define queue type as enum
+ - api: queue: moved queue type into queue parameters
+ - api: queue: rename QUEUE_TYPE_POLL to _PLAIN
+ - api: sched: rename SCHED_SYNC_NONE to _PARALLEL
+ - api: schedule: clarify scheduler API documentation
+ - api: stdlib: added odp_memcmp
+ - api: sysinfo: move CPU Hz API to cpu.h
+ - api: sysinfo: move CPU model API to cpu.h
+ - api: thrmask: documented string format
+ - api: pktio: rename single_user param
+ - api: pktio: renames for compact type and func names
+ - api: queue: add enq and deq mode params
+ * ODP docs:
+ - doc/users-guide: add time API section
+ - doc/users-guide: add cryptographic services section
+ - doc: userguide: add application programming section
+ - doc: process-guide: add release process
+ - doc: images: replace overview with editable svg src
+ - doc: guides: embed icons and images in html
+ - doc: re-organize doxygen doc for synchronizer
+ * Validation
+ - test/performance: pktio: perform an initial warmup run
+ - test: change l2fwd pool size
+ - test: l2fwd: added poll queue mode
+ - test: l2fwd: re-organize functions
+ - test: l2fwd: use multi-queue API for scheduled queues
+ - test: l2fwd: use multi-queue pktio in direct mode
+ - test: l2fwd: use multiple queues in sched mode
+ - test: perf: l2fwd detect missing odp_generator
+ - test: update CPU Hz calling functions
+ - tests: harmonize posix extensions level defines
+ - validation: atomic: added cas test
+ - validation: atomic: added lock free op test
+ - validation: atomic: added max and min tests
+ - validation: atomic: added non-relaxed test
+ - validation: atomic: added xchg test
+ - validation: classification: add additional PMR term
+ - validation: classification: adds Test case for ODP_PMR_DIP_ADDR
+ - validation: classification: remove double frees
+ - validation: cls: adopt for supported l3 PMR
+ - validation: cls: assign default CoS before creating chain
+ - validation: cls: use correct MAC addresses
+ - validation: define ODP_TEST_INACTIVE and ODP_TEST_ACTIVE
+ - validation: implement pktio statistics counters
+ - validation: pktio: don't continue if packet with > MTU is sent
+ - validation: pktio: fix check of pktio_stop() called twice
+ - validation: pktio: fix typo on setting in_mode
+ - validation: pktio: reduce stdout noise
+ - validation: pktio: test batch receive
+ - validation: pktio: use odp_time_ns() instead own function
+ - validation: pktio check for number of interfaces
+ - validation: pktio: add test for odp_pktio_recv_queue() and odp_pktio_send_queue()
+ - validation: pktio: add test for odp_pktout_queue_config()
+ - validation: pktio: add test for odp_pktin_queue_config()
+ - validation: possibility to inactive preconded test
+ - validation: queue: add test for odp_queue_to_u64()
+ - validation: remove remaining references synchronizers
+ - validation: removing synchronizers tests
+ - validation: scheduler: add timing tests for scheduled queue types
+ - validation: shmem: sync threads with barrier
+ - validation: stdlib: add odp_memcmp test
+ - validation: synchro tests split into 3 groups
+ - validation: system: add validation for new CPU APIs
+ - validation: system: add validation tests for odp_cpu_cycles_ calls
+ - validation: system: fix return code for checks
+ - validation: system: make odp_cpu_hz optional in validation test
+ - validation: system: make odp_cpu_hz_id conditional
+ - validation: test odp_pktio_link_status()
+ - validation: time: increase limit to check to 2 res
+ - validation: time: round up resolution
+ - validation: time: store local and global resolution
+ - validation: timer: fix delay after loop
+ - validation: timer: handle early exhaustion of pool
+ * General:
+ - linux-generic: add packet_io_stats.h to Makefile.am
+ - linux-generic: arch: renamed cpu arch files
+ - linux-generic: atomic: 32bit cas_rel and cas_acq_rel
+ - linux-generic: atomic: implemented exchange
+ - linux-generic: atomic: non-relaxed 64bit operations
+ - linux-generic: barrier: use API memory barrier
+ - linux-generic: classification: implement verify_pmr_dmac
+ - linux-generic: cpu: implemented pause
+ - linux-generic: define posix extension level once
+ - linux-generic: init: handle local/global init/term cleanly
+ - linux-generic: locks: replace internal atomics
+ - linux-generic: netmap: add functions for fetching pktio queues
+ - linux-generic: netmap: add initial multi queue support
+ - linux-generic: netmap: add netmap_close_descriptors() function
+ - linux-generic: netmap: add netmap_link_status() function
+ - linux-generic: netmap: add odp_pktio_capability()
+ - linux-generic: netmap: add odp_pktio_link_status()
+ - linux-generic: netmap: add odp_pktio_start()
+ - linux-generic: netmap: add scheduler multi-queue support
+ - linux-generic: netmap: add start()/stop() functionality
+ - linux-generic: netmap: disable debug prints
+ - linux-generic: netmap: fix MTU size
+ - linux-generic: netmap: fix netmap_mtu_get()
+ - linux-generic: netmap: implement pktio statistics
+ - linux-generic: netmap: map rings in netmap_start
+ - linux-generic: netmap: odp_pktio_recv() from all pktin queues
+ - linux-generic: netmap: use select() instead of poll() in recv
+ - linux-generic: packet: hide frame_len behind accessor
+ - linux-generic: packet_io: expose pktio_tbl and is_free()
+ - linux-generic: packet_io: fix array indexing in pktin_deq_multi()
+ - linux-generic: packet_io: separate locks for RX/TX
+ - linux-generic: pcap: implement pktio statistics counters
+ - linux-generic: pktio loop: implement statistics counters
+ - linux-generic: pktio: add RSS helper functions
+ - linux-generic: pktio: added poll type input queue
+ - linux-generic: pktio: added scheduler multi-queue support
+ - linux-generic: pktio: dummy multi-queue pktio
+ - linux-generic: pktio: enable using PKTIO_MAX_QUEUES in pktio implementations
+ - linux-generic: pktio: implement odp_pktio_link_status()
+ - linux-generic: pktio: print out the name of pktio used
+ - linux-generic: pktio: re-organize queue config code
+ - linux-generic: pktio: remove unwanted initialisation
+ - linux-generic: pktio: use multiqueue recv internally
+ - linux-generic: pool: accelerate buffer allocation marking
+ - linux-generic: pool: catch duplicate free errors in debug builds
+ - linux-generic: queue: check invalid handle in odp_queue_destroy
+ - linux-generic: remove direct include of endian.h from byteorder.h
+ - linux-generic: remove direct include of stdint.h by atomic.h
+ - linux-generic: remove direct include of stdlib.h by timer.h
+ - linux-generic: removed spin_internal
+ - linux-generic: scheduler: improve pktio polling
+ - linux-generic: sockets: implement pktio statistics
+ - linux-generic: sysinfo: apply per-CPU implementation to MIPS
+ - linux-generic: sysinfo: apply per-CPU implementation to PowerPC
+ - linux-generic: sysinfo: make the cpu_hz per-CPU data
+ - linux-generic: sysinfo: make the model_str per-CPU data
+ - linux-generic: sysinfo: move ARM system info codes to default arch file
+ - linux-generic: sysinfo: move MIPS system info codes to its plarform file
+ - linux-generic: sysinfo: move PowerPC system info codes to its plarform file
+ - linux-generic: sysinfo: move cpu_arch_str to odp_system_info_t
+ - linux-generic: sysinfo: move x86 system info codes to its plarform file
+ - linux-generic: sysinfo: rename odp_cpu_hz_current with odp_ prefix
+ - linux-generic: sysinfo: rename variable cpu_hz to cpu_hz_max
+ - linux-generic: sysinfo: revise odp_cpu_hz() to return current frequency
+ - linux-generic: sysinfo: set values for cpu_arch_str
+ - linux-generic: sysinfo: update dummy function to pass validation
+ - linux-generic: sysinfo: use uniform call odp_sysinfo_parser
+ - linux-generic: timer use SIGEV_THREAD_ID
+ - linux-generic: timer: limit notification about resolution incorrectness
+ - linux-generic: timer use SIGEV_THREAD_ID
+ - linux-generic: update CPU Hz calling functions
+
opendataplane (1.6.0.0)
* API:
- api: atomic: clean atomic API documentation
diff --git a/CONTRIBUTING b/CONTRIBUTING
index 4ad964e49..f6e3fc643 100644
--- a/CONTRIBUTING
+++ b/CONTRIBUTING
@@ -126,9 +126,8 @@ Code without a proper signoff cannot be merged into the mainline.
----
- Images are decorated with :-
.Optional Title
- image::../images/<image name>.png[align="center"]
-- The images are stored in the doc/images directory as svg files and rendered as
- png and eps during the build process.
+ image::../images/<image name>.svg[align="center"]
+- The images are stored in the doc/images directory as svg files.
- Body text shall wrap at the 80 char point.
- No warnings may be generated by the asciidoc tool.
diff --git a/DEPENDENCIES b/DEPENDENCIES
index 3563b36be..c2711d544 100644
--- a/DEPENDENCIES
+++ b/DEPENDENCIES
@@ -189,12 +189,9 @@ Prerequisites for building the OpenDataPlane (ODP) API
5.0 Documentation Images & Doxygen
- Images are stored as svg files and the png or eps versions generated when the docs are built
- Image magics convert application is used
- # Debian/Ubuntu
- # apt-get install imagemagick
+ Images are stored as svg files. No conversions for these are needed.
- Message squence diagrams are stored as msc files and the png or eps versions generated when the docs are built
+ Message squence diagrams are stored as msc files and the svg versions generated when the docs are built
mscgen is used
#Debian/Ubuntu
# apt-get install mscgen
@@ -208,10 +205,6 @@ The tested version of doxygen is 1.8.8
# Debian/Ubuntu
$ apt-get install doxygen graphviz
-5.1.2 PDF
- # Debian/Ubuntu
- $ apt-get install texlive-latex-recommended texlive-latex-extra texlive-fonts-recommended
-
5.2 User guides
5.2.1 HTML
diff --git a/README b/README
index cad513813..d8583bebb 100644
--- a/README
+++ b/README
@@ -45,4 +45,4 @@ Mailing list:
[PATCH] means patch is for odp.git
Bug tracking:
- https://bugs.linaro.org/describecomponents.cgi?product=OpenDataPlane
+ https://bugs.linaro.org/describecomponents.cgi?product=OpenDataPlane%20-%20linux-%20generic%20reference
diff --git a/configure.ac b/configure.ac
index 8ec73ceee..8108ff224 100644
--- a/configure.ac
+++ b/configure.ac
@@ -55,6 +55,7 @@ AX_VALGRIND_CHECK
AS_CASE([$host],
[x86*], [ARCH=x86],
[mips64*], [ARCH=mips64],
+ [powerpc*], [ARCH=powerpc],
[ARCH=linux]
)
AC_SUBST([ARCH])
@@ -261,16 +262,6 @@ AC_ARG_ENABLE([user-guides],
AM_CONDITIONAL([user_guide], [test "x${user_guides}" = "xyes" ])
##########################################################################
-# Check for imagemagic availability
-##########################################################################
- AC_CHECK_PROGS([IMAGEMAGIC], [convert])
- if test -z "$IMAGEMAGIC";
- then AC_MSG_WARN([Imagemagic (convert) not found - continuing without image support])
- fi
-
-AM_CONDITIONAL([HAVE_IMAGEMAGIC], [test "x${IMAGEMAGIC}" = "xconvert"])
-
-##########################################################################
# Check for mscgen availability
##########################################################################
AC_CHECK_PROGS([MSCGEN], [mscgen])
@@ -348,6 +339,8 @@ AC_CONFIG_FILES([Makefile
test/api_test/Makefile
test/performance/Makefile
test/validation/Makefile
+ test/validation/atomic/Makefile
+ test/validation/barrier/Makefile
test/validation/buffer/Makefile
test/validation/classification/Makefile
test/validation/config/Makefile
@@ -357,6 +350,7 @@ AC_CONFIG_FILES([Makefile
test/validation/errno/Makefile
test/validation/hash/Makefile
test/validation/init/Makefile
+ test/validation/lock/Makefile
test/validation/packet/Makefile
test/validation/pktio/Makefile
test/validation/pool/Makefile
@@ -364,7 +358,6 @@ AC_CONFIG_FILES([Makefile
test/validation/random/Makefile
test/validation/scheduler/Makefile
test/validation/std_clib/Makefile
- test/validation/synchronizers/Makefile
test/validation/thread/Makefile
test/validation/time/Makefile
test/validation/timer/Makefile
diff --git a/doc/application-api-guide/odp.dox b/doc/application-api-guide/odp.dox
index 0bff5ad9e..579217945 100644
--- a/doc/application-api-guide/odp.dox
+++ b/doc/application-api-guide/odp.dox
@@ -17,7 +17,7 @@
* also be useful for those wishing to implement ODP on other
* platforms.
*
- * @image html overview.png
+ * @image html overview.svg
*
* ODP consists of a common layer and an implementation layer.
* Applications written to the common layer are portable across all
diff --git a/doc/images/.gitignore b/doc/images/.gitignore
index 148f2f2cd..1647e415d 100644
--- a/doc/images/.gitignore
+++ b/doc/images/.gitignore
@@ -1,2 +1 @@
-*.png
-*.eps
+resource_management.svg
diff --git a/doc/images/Makefile.am b/doc/images/Makefile.am
index 8ab03b4f5..8fb8a99e5 100644
--- a/doc/images/Makefile.am
+++ b/doc/images/Makefile.am
@@ -1,43 +1,16 @@
-.svg.png:
- convert $^ $@
-
-.svg.eps:
- convert $^ $@
-
-.msc.png:
- mscgen -T png -i $^ -o $@
-
-SVG_SRCS = \
- atomic_queue.svg \
- ordered_queue.svg \
- parallel_queue.svg \
- odp_components.svg \
- odp_rx_processing.svg \
- odp_scheduling.svg \
- odp_traffic_manager.svg \
- overview.svg \
- release_git.svg \
- simple_release_git.svg
-
-SVG_TARGETS = $(SVG_SRCS:svg=png)
-SVG_TARGETS += $(SVG_SRCS:svg=eps)
+.msc.svg:
+ mscgen -T svg -i $^ -o $@
MSG_SRCS = resource_management.msc
-MSG_TARGETS = $(MSG_SRCS:msc=png)
+MSG_TARGETS = $(MSG_SRCS:msc=svg)
-EXTRA_DIST = $(SVG_SRCS) $(MSG_SRCS)
-
-TARGETS=$(SVG_TARGETS) $(MSG_TARGETS)
-
-if HAVE_IMAGEMAGIC
-TARGETS += $(SVG_TARGETS)
-endif
+EXTRA_DIST = $(MSG_SRCS)
if HAVE_MSCGEN
-TARGETS += $(MSG_TARGETS)
+TARGETS = $(MSG_TARGETS)
endif
all-local: $(TARGETS)
clean-local:
- rm -f $(SVG_TARGETS) $(MSG_TARGETS)
+ rm -f $(MSG_TARGETS)
diff --git a/doc/process-guide/release-guide.adoc b/doc/process-guide/release-guide.adoc
index 60ef769be..dab68d334 100644
--- a/doc/process-guide/release-guide.adoc
+++ b/doc/process-guide/release-guide.adoc
@@ -32,7 +32,7 @@ where a major release is to be made and applied atomically, this flow can be
seen in Figure 1.
.Overview of the ODP git process
-image::../images/simple_release_git.png[align="center"]
+image::../images/simple_release_git.svg[align="center"]
Regular bug fixes, and implementation changes occur directly to master.
@@ -47,7 +47,7 @@ cherry picked work so that it may be applied to master on release day, this can
be seen in detail in Figure 2.
.Overview of the ODP git process
-image::../images/release_git.png[align="center"]
+image::../images/release_git.svg[align="center"]
=== api-next ===
acceptance criteria for patches to api-next (path 1):
diff --git a/doc/users-guide/users-guide.adoc b/doc/users-guide/users-guide.adoc
index 2190c1376..bbb53a74d 100644
--- a/doc/users-guide/users-guide.adoc
+++ b/doc/users-guide/users-guide.adoc
@@ -12,7 +12,7 @@ Further details about ODP may be found at the http://opendataplane.org[ODP]
home page.
.Overview of a system running ODP applications
-image::../images/overview.png[align="center"]
+image::../images/overview.svg[align="center"]
ODP is an API specification that allows many implementations to provide
platform independence, automatic hardware acceleration and CPU scaling to
@@ -22,7 +22,7 @@ write an application that can successfully take advantage of the API.
:numbered:
== Introduction
.OpenDataPlane Components
-image::../images/odp_components.png[align="center"]
+image::../images/odp_components.svg[align="center"]
.The ODP API Specification
ODP consists of three separate but related component parts. First, ODP is an
@@ -360,7 +360,7 @@ The *Classifier* provides a suite of APIs that control packet receive (RX)
processing.
.ODP Receive Processing with Classifier
-image::../images/odp_rx_processing.png[align="center"]
+image::../images/odp_rx_processing.svg[align="center"]
The classifier provides two logically related services:
[horizontal]
@@ -397,7 +397,7 @@ The *Scheduler* provides a suite of APIs that control scalable event
processing.
.ODP Scheduler and Event Processing
-image::../images/odp_scheduling.png[align="center"]
+image::../images/odp_scheduling.svg[align="center"]
The Scheduler is responsible for selecting and dispatching one or more events
to a requesting thread. Event selection is based on several factors involving
@@ -429,7 +429,7 @@ The *Traffic Manager* provides a suite of APIs that control traffic shaping and
Quality of Service (QoS) processing for packet output.
.ODP Transmit processing with Traffic Manager
-image::../images/odp_traffic_manager.png[align="center"]
+image::../images/odp_traffic_manager.svg[align="center"]
The final stage of packet processing is to transmit it. Here, applications have
several choices. As with RX processing, applications may send packets
@@ -508,7 +508,7 @@ Shutdown is the logical reverse of the initialization procedure, with
called to terminate ODP.
.ODP Application Structure Flow Diagram
-image::../images/resource_management.png[align="center"]
+image::../images/resource_management.svg[align="center"]
== Common Conventions
Many ODP APIs share common conventions regarding their arguments and return
@@ -636,7 +636,7 @@ SCHED queues that specify a sync mode of ODP_SCHED_SYNC_NONE are unrestricted
in how events are processed.
.Parallel Queue Scheduling
-image::../images/parallel_queue.png[align="center"]
+image::../images/parallel_queue.svg[align="center"]
All events held on parallel queues are eligible to be scheduled simultaneously
and any required synchronization between them is the responsibility of the
@@ -650,24 +650,27 @@ might either be empty, of lower priority, or not in a scheduler group matching
any of the threads being serviced by the scheduler.
=== Atomic Queues
-Atomic queues simplify event synchronization because only a single event
-from a given atomic queue may be processed at a time. Events scheduled from
+Atomic queues simplify event synchronization because only a single thread may
+process event(s) from a given atomic queue at a time. Events scheduled from
atomic queues thus can be processed lock free because the locking is being
-done implicitly by the scheduler.
+done implicitly by the scheduler. Note that the caller may receive one or
+more events from the same atomic queue if *odp_schedule_multi()* is used. In
+this case these multiple events all share the same atomic scheduling context.
.Atomic Queue Scheduling
-image::../images/atomic_queue.png[align="center"]
+image::../images/atomic_queue.svg[align="center"]
-In this example, no matter how many events may be held in an atomic queue, only
-one of them can be scheduled at a time. Here two threads process events from
-two different atomic queues. Note that there is no synchronization between
-different atomic queues, only between events originating from the same atomic
-queue. The queue context associated with the atomic queue is held until the
-next call to the scheduler or until the application explicitly releases it
-via a call to *odp_schedule_release_atomic()*.
+In this example, no matter how many events may be held in an atomic queue,
+only one calling thread can receive scheduled events from it at a time. Here
+two threads process events from two different atomic queues. Note that there
+is no synchronization between different atomic queues, only between events
+originating from the same atomic queue. The queue context associated with the
+atomic queue is held until the next call to the scheduler or until the
+application explicitly releases it via a call to
+*odp_schedule_release_atomic()*.
Note that while atomic queues simplify programming, the serial nature of
-atomic queues will impair scaling.
+atomic queues may impair scaling.
=== Ordered Queues
Ordered queues provide the best of both worlds by providing the inherent
@@ -675,7 +678,7 @@ scaleabilty of parallel queues, with the easy synchronization of atomic
queues.
.Ordered Queue Scheduling
-image::../images/ordered_queue.png[align="center"]
+image::../images/ordered_queue.svg[align="center"]
When scheduling events from an ordered queue, the scheduler dispatches multiple
events from the queue in parallel to different threads, however the scheduler
diff --git a/example/classifier/odp_classifier.c b/example/classifier/odp_classifier.c
index 6b443d737..57a58d979 100644
--- a/example/classifier/odp_classifier.c
+++ b/example/classifier/odp_classifier.c
@@ -56,7 +56,7 @@ typedef struct {
odp_atomic_u64_t pool_pkt_count; /**< count of received packets */
char cos_name[ODP_COS_NAME_LEN]; /**< cos name */
struct {
- odp_pmr_term_e term; /**< odp pmr term value */
+ odp_pmr_term_t term; /**< odp pmr term value */
uint64_t val; /**< pmr term value */
uint64_t mask; /**< pmr term mask */
uint32_t val_sz; /**< size of the pmr term */
@@ -89,7 +89,7 @@ static void print_info(char *progname, appl_args_t *appl_args);
static void usage(char *progname);
static void configure_cos(odp_pktio_t pktio, appl_args_t *args);
static void configure_default_cos(odp_pktio_t pktio, appl_args_t *args);
-static int convert_str_to_pmr_enum(char *token, odp_pmr_term_e *term,
+static int convert_str_to_pmr_enum(char *token, odp_pmr_term_t *term,
uint32_t *offset);
static int parse_pmr_policy(appl_args_t *appl_args, char *argv[], char *optarg);
@@ -250,6 +250,7 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
}
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -257,7 +258,7 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
odp_pktio_to_u64(pktio));
inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
- inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
if (inq_def == ODP_QUEUE_INVALID) {
EXAMPLE_ERR("pktio inq create failed for %s\n", dev);
exit(EXIT_FAILURE);
@@ -369,11 +370,11 @@ static void configure_default_cos(odp_pktio_t pktio, appl_args_t *args)
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- queue_default = odp_queue_create(queue_name,
- ODP_QUEUE_TYPE_SCHED, &qparam);
+ queue_default = odp_queue_create(queue_name, &qparam);
if (queue_default == ODP_QUEUE_INVALID) {
EXAMPLE_ERR("Error: default queue create failed.\n");
exit(EXIT_FAILURE);
@@ -442,15 +443,14 @@ static void configure_cos(odp_pktio_t pktio, appl_args_t *args)
stats->pmr = odp_pmr_create(&match);
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = i % odp_schedule_num_prio();
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
snprintf(queue_name, sizeof(queue_name), "%sQueue%d",
args->stats[i].cos_name, i);
- stats->queue = odp_queue_create(queue_name,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ stats->queue = odp_queue_create(queue_name, &qparam);
if (ODP_QUEUE_INVALID == stats->queue) {
EXAMPLE_ERR("odp_queue_create failed");
exit(EXIT_FAILURE);
@@ -667,7 +667,7 @@ static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len)
odph_ethhdr_t *eth;
odph_ethaddr_t tmp_addr;
odph_ipv4hdr_t *ip;
- uint32be_t ip_tmp_addr; /* tmp ip addr */
+ odp_u32be_t ip_tmp_addr; /* tmp ip addr */
unsigned i;
for (i = 0; i < len; ++i) {
@@ -692,7 +692,7 @@ static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len)
}
}
-static int convert_str_to_pmr_enum(char *token, odp_pmr_term_e *term,
+static int convert_str_to_pmr_enum(char *token, odp_pmr_term_t *term,
uint32_t *offset)
{
if (NULL == token)
@@ -718,7 +718,7 @@ static int parse_pmr_policy(appl_args_t *appl_args, char *argv[], char *optarg)
int policy_count;
char *token;
size_t len;
- odp_pmr_term_e term;
+ odp_pmr_term_t term;
global_statistics *stats;
char *pmr_str;
uint32_t offset;
@@ -892,8 +892,8 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(),
- odp_sys_cpu_hz(), odp_sys_cache_line_size(),
+ odp_version_api_str(), odp_cpu_model_str(),
+ odp_cpu_hz_max(), odp_sys_cache_line_size(),
odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
@@ -923,9 +923,9 @@ static void usage(char *progname)
"\n"
"Mandatory OPTIONS:\n"
" -i, --interface Eth interface\n"
- " -p, --policy [<odp_pmr_term_e>|<offset>]:<value>:<mask bits>:<queue name>\n"
+ " -p, --policy [<odp_pmr_term_t>|<offset>]:<value>:<mask bits>:<queue name>\n"
"\n"
- "<odp_pmr_term_e> Packet Matching Rule defined with odp_pmr_term_e "
+ "<odp_pmr_term_t> Packet Matching Rule defined with odp_pmr_term_t "
"for the policy\n"
"<offset> Absolute offset in bytes from frame start to define a "
"ODP_PMR_CUSTOM_FRAME Packet Matching Rule for the policy\n"
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c
index 10643dce0..ab8c7faa5 100644
--- a/example/generator/odp_generator.c
+++ b/example/generator/odp_generator.c
@@ -347,6 +347,7 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
* resource
*/
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -354,7 +355,7 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
odp_pktio_to_u64(pktio));
inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
- inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
if (inq_def == ODP_QUEUE_INVALID)
EXAMPLE_ABORT("Error: pktio inq create failed for %s\n", dev);
@@ -771,7 +772,7 @@ int main(int argc, char *argv[])
cpu_first = odp_cpumask_first(&cpumask);
odp_cpumask_set(&cpu_mask, cpu_first);
- tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
+ tq = odp_queue_create("", NULL);
if (tq == ODP_QUEUE_INVALID)
abort();
args->thread[1].pktio_dev = args->appl.if_names[0];
@@ -789,7 +790,7 @@ int main(int argc, char *argv[])
gen_recv_thread, &args->thread[1],
ODP_THREAD_WORKER);
- tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
+ tq = odp_queue_create("", NULL);
if (tq == ODP_QUEUE_INVALID)
abort();
args->thread[0].pktio_dev = args->appl.if_names[0];
@@ -821,7 +822,7 @@ int main(int argc, char *argv[])
if_idx = i % args->appl.if_count;
args->thread[i].pktio_dev = args->appl.if_names[if_idx];
- tq = odp_queue_create("", ODP_QUEUE_TYPE_POLL, NULL);
+ tq = odp_queue_create("", NULL);
if (tq == ODP_QUEUE_INVALID)
abort();
args->thread[i].pool = pool;
@@ -1068,7 +1069,7 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_version_api_str(), odp_cpu_model_str(), odp_cpu_hz_max(),
odp_sys_cache_line_size(), odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c
index 6426d995e..6134ab6a4 100644
--- a/example/ipsec/odp_ipsec.c
+++ b/example/ipsec/odp_ipsec.c
@@ -222,7 +222,7 @@ void free_pkt_ctx(pkt_ctx_t *ctx)
* Example supports either polling queues or using odp_schedule
*/
typedef odp_queue_t (*queue_create_func_t)
- (const char *, odp_queue_type_t, odp_queue_param_t *);
+ (const char *, const odp_queue_param_t *);
typedef odp_event_t (*schedule_func_t) (odp_queue_t *);
static queue_create_func_t queue_create;
@@ -238,18 +238,24 @@ static int num_polled_queues;
*/
static
odp_queue_t polled_odp_queue_create(const char *name,
- odp_queue_type_t type,
- odp_queue_param_t *param EXAMPLE_UNUSED)
+ const odp_queue_param_t *param)
{
odp_queue_t my_queue;
- odp_queue_type_t my_type = type;
+ odp_queue_param_t qp;
+ odp_queue_type_t type;
+
+ odp_queue_param_init(&qp);
+ if (param)
+ memcpy(&qp, param, sizeof(odp_queue_param_t));
+
+ type = qp.type;
if (ODP_QUEUE_TYPE_SCHED == type) {
- printf("%s: change %s to POLL\n", __func__, name);
- my_type = ODP_QUEUE_TYPE_POLL;
+ printf("%s: change %s to PLAIN\n", __func__, name);
+ qp.type = ODP_QUEUE_TYPE_PLAIN;
}
- my_queue = odp_queue_create(name, my_type, NULL);
+ my_queue = odp_queue_create(name, &qp);
if ((ODP_QUEUE_TYPE_SCHED == type) || (ODP_QUEUE_TYPE_PKTIN == type)) {
poll_queues[num_polled_queues++] = my_queue;
@@ -309,25 +315,23 @@ void ipsec_init_pre(void)
* - sequence number queue (must be ATOMIC)
*/
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- completionq = queue_create("completion",
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ completionq = queue_create("completion", &qparam);
if (ODP_QUEUE_INVALID == completionq) {
EXAMPLE_ERR("Error: completion queue creation failed\n");
exit(EXIT_FAILURE);
}
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- seqnumq = queue_create("seqnum",
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ seqnumq = queue_create("seqnum", &qparam);
if (ODP_QUEUE_INVALID == seqnumq) {
EXAMPLE_ERR("Error: sequence number queue creation failed\n");
exit(EXIT_FAILURE);
@@ -435,26 +439,24 @@ void initialize_loop(char *intf)
/* Create input queue */
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
snprintf(queue_name, sizeof(queue_name), "%i-loop_inq_def", idx);
queue_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
- inq_def = queue_create(queue_name, ODP_QUEUE_TYPE_SCHED, &qparam);
+ inq_def = queue_create(queue_name, &qparam);
if (ODP_QUEUE_INVALID == inq_def) {
EXAMPLE_ERR("Error: input queue creation failed for %s\n",
intf);
exit(EXIT_FAILURE);
}
/* Create output queue */
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
snprintf(queue_name, sizeof(queue_name), "%i-loop_outq_def", idx);
queue_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
- outq_def = queue_create(queue_name, ODP_QUEUE_TYPE_POLL, &qparam);
+ outq_def = queue_create(queue_name, NULL);
if (ODP_QUEUE_INVALID == outq_def) {
EXAMPLE_ERR("Error: output queue creation failed for %s\n",
intf);
@@ -501,7 +503,7 @@ void initialize_intf(char *intf)
odp_pktio_param_init(&pktio_param);
if (getenv("ODP_IPSEC_USE_POLL_QUEUES"))
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
else
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
@@ -520,6 +522,7 @@ void initialize_intf(char *intf)
* resource
*/
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -527,7 +530,7 @@ void initialize_intf(char *intf)
odp_pktio_to_u64(pktio));
inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
- inq_def = queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = queue_create(inq_name, &qparam);
if (ODP_QUEUE_INVALID == inq_def) {
EXAMPLE_ERR("Error: pktio queue creation failed for %s\n",
intf);
@@ -1513,7 +1516,7 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_version_api_str(), odp_cpu_model_str(), odp_cpu_hz_max(),
odp_sys_cache_line_size(), odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
diff --git a/example/ipsec/odp_ipsec_cache.h b/example/ipsec/odp_ipsec_cache.h
index 91d9d7e14..56be9d86b 100644
--- a/example/ipsec/odp_ipsec_cache.h
+++ b/example/ipsec/odp_ipsec_cache.h
@@ -57,7 +57,7 @@ typedef struct ipsec_cache_entry_s {
uint32_t esp_seq; /**< ESP TX sequence number */
uint32_t ah_seq; /**< AH TX sequence number */
uint8_t iv[MAX_IV_LEN]; /**< ESP IV storage */
- uint16be_t tun_hdr_id; /**< Tunnel header IP ID */
+ odp_u16be_t tun_hdr_id; /**< Tunnel header IP ID */
} state;
} ipsec_cache_entry_t;
diff --git a/example/ipsec/odp_ipsec_stream.c b/example/ipsec/odp_ipsec_stream.c
index 9c2722e9d..ff2ca3379 100644
--- a/example/ipsec/odp_ipsec_stream.c
+++ b/example/ipsec/odp_ipsec_stream.c
@@ -36,7 +36,7 @@
* Stream packet header
*/
typedef struct ODP_PACKED stream_pkt_hdr_s {
- uint64be_t magic; /**< Stream magic value for verification */
+ odp_u64be_t magic; /**< Stream magic value for verification */
uint8_t data[0]; /**< Incrementing data stream */
} stream_pkt_hdr_t;
diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c
index c42331bba..adabc03b5 100644
--- a/example/packet/odp_pktio.c
+++ b/example/packet/odp_pktio.c
@@ -122,10 +122,10 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool, int mode)
switch (mode) {
case APPL_MODE_PKT_BURST:
- pktio_param.in_mode = ODP_PKTIN_MODE_RECV;
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
break;
case APPL_MODE_PKT_QUEUE:
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
break;
case APPL_MODE_PKT_SCHED:
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
@@ -151,17 +151,17 @@ static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool, int mode)
EXAMPLE_ABORT("Error: unable to start %s\n", dev);
return pktio;
case APPL_MODE_PKT_QUEUE:
- inq_def = odp_queue_create(inq_name,
- ODP_QUEUE_TYPE_PKTIN, NULL);
+ odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
+ inq_def = odp_queue_create(inq_name, &qparam);
break;
case APPL_MODE_PKT_SCHED:
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
-
- inq_def = odp_queue_create(inq_name,
- ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
break;
default:
EXAMPLE_ABORT("invalid mode %d\n", mode);
@@ -502,7 +502,7 @@ static void swap_pkt_addrs(odp_packet_t pkt_tbl[], unsigned len)
odph_ethhdr_t *eth;
odph_ethaddr_t tmp_addr;
odph_ipv4hdr_t *ip;
- uint32be_t ip_tmp_addr; /* tmp ip addr */
+ odp_u32be_t ip_tmp_addr; /* tmp ip addr */
unsigned i;
for (i = 0; i < len; ++i) {
@@ -654,7 +654,7 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_version_api_str(), odp_cpu_model_str(), odp_cpu_hz_max(),
odp_sys_cache_line_size(), odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
diff --git a/example/time/time_global_test.c b/example/time/time_global_test.c
index df0826c59..7cfd96990 100644
--- a/example/time/time_global_test.c
+++ b/example/time/time_global_test.c
@@ -171,7 +171,6 @@ static void *run_thread(void *ptr)
odp_buffer_t buf;
test_globals_t *gbls;
odp_pool_t buffer_pool;
- odp_queue_param_t qparams;
odp_queue_t queue, queue_next;
timestamp_event_t *timestamp_ev;
char queue_name[sizeof(QUEUE_NAME_PREFIX) + 2];
@@ -185,14 +184,9 @@ static void *run_thread(void *ptr)
* Own queue is needed to guarantee that next thread for receiving
* buffer is not the same thread.
*/
- odp_queue_param_init(&qparams);
- qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparams.sched.sync = ODP_SCHED_SYNC_NONE;
- qparams.sched.group = ODP_SCHED_GROUP_WORKER;
-
id = odp_atomic_fetch_inc_u32(&gbls->id_counter);
sprintf(queue_name, QUEUE_NAME_PREFIX "%d", id);
- queue = odp_queue_create(queue_name, ODP_QUEUE_TYPE_POLL, &qparams);
+ queue = odp_queue_create(queue_name, NULL);
if (queue == ODP_QUEUE_INVALID)
EXAMPLE_ABORT("Cannot create thread queue, thread %d", thr);
diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c
index 6d897133a..e6717bf7b 100644
--- a/example/timer/odp_timer_test.c
+++ b/example/timer/odp_timer_test.c
@@ -355,8 +355,8 @@ int main(int argc, char *argv[])
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %"PRIu64"\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("Max CPU count: %i\n", odp_cpu_count());
@@ -447,11 +447,12 @@ int main(int argc, char *argv[])
* Create a queue for timer test
*/
odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- param.sched.sync = ODP_SCHED_SYNC_NONE;
+ param.sched.sync = ODP_SCHED_SYNC_PARALLEL;
param.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create("timer_queue", ODP_QUEUE_TYPE_SCHED, &param);
+ queue = odp_queue_create("timer_queue", &param);
if (queue == ODP_QUEUE_INVALID) {
err = 1;
@@ -459,7 +460,7 @@ int main(int argc, char *argv[])
goto err;
}
- printf("CPU freq %"PRIu64" Hz\n", odp_sys_cpu_hz());
+ printf("CPU freq %"PRIu64" Hz\n", odp_cpu_hz_max());
printf("Timer ticks vs nanoseconds:\n");
ns = 0;
tick = odp_timer_ns_to_tick(gbls->tp, ns);
diff --git a/helper/Makefile.am b/helper/Makefile.am
index 1906ae285..a8665b287 100644
--- a/helper/Makefile.am
+++ b/helper/Makefile.am
@@ -1,3 +1,5 @@
+include $(top_srcdir)/platform/@with_platform@/Makefile.inc
+
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = $(top_builddir)/pkgconfig/libodphelper.pc
@@ -22,7 +24,6 @@ helperinclude_HEADERS = \
noinst_HEADERS = \
$(srcdir)/odph_debug.h \
- $(srcdir)/odph_pause.h \
$(srcdir)/odph_hashtable.h \
$(srcdir)/odph_lineartable.h \
$(srcdir)/odph_list_internal.h
diff --git a/helper/include/odp/helper/chksum.h b/helper/include/odp/helper/chksum.h
index edb1c98cd..215917cc9 100644
--- a/helper/include/odp/helper/chksum.h
+++ b/helper/include/odp/helper/chksum.h
@@ -27,7 +27,7 @@ extern "C" {
*
* @return checksum value in host cpu order
*/
-static inline uint16sum_t odp_chksum(void *buffer, int len)
+static inline odp_u16sum_t odp_chksum(void *buffer, int len)
{
uint16_t *buf = buffer;
uint32_t sum = 0;
@@ -43,7 +43,7 @@ static inline uint16sum_t odp_chksum(void *buffer, int len)
sum += (sum >> 16);
result = ~sum;
- return (__odp_force uint16sum_t) result;
+ return (__odp_force odp_u16sum_t) result;
}
#ifdef __cplusplus
diff --git a/helper/include/odp/helper/eth.h b/helper/include/odp/helper/eth.h
index f1c164d67..7c9c7287a 100644
--- a/helper/include/odp/helper/eth.h
+++ b/helper/include/odp/helper/eth.h
@@ -51,7 +51,7 @@ _ODP_STATIC_ASSERT(sizeof(odph_ethaddr_t) == ODPH_ETHADDR_LEN, "ODPH_ETHADDR_T__
typedef struct ODP_PACKED {
odph_ethaddr_t dst; /**< Destination address */
odph_ethaddr_t src; /**< Source address */
- uint16be_t type; /**< Type */
+ odp_u16be_t type; /**< Type */
} odph_ethhdr_t;
/** @internal Compile time assert */
@@ -63,8 +63,8 @@ _ODP_STATIC_ASSERT(sizeof(odph_ethhdr_t) == ODPH_ETHHDR_LEN, "ODPH_ETHHDR_T__SIZ
* @todo Check usage of tpid vs ethertype. Check outer VLAN TPID.
*/
typedef struct ODP_PACKED {
- uint16be_t tpid; /**< Tag protocol ID (located after ethhdr.src) */
- uint16be_t tci; /**< Priority / CFI / VLAN ID */
+ odp_u16be_t tpid; /**< Tag protocol ID (located after ethhdr.src) */
+ odp_u16be_t tci; /**< Priority / CFI / VLAN ID */
} odph_vlanhdr_t;
/** @internal Compile time assert */
diff --git a/helper/include/odp/helper/icmp.h b/helper/include/odp/helper/icmp.h
index abcf81808..7f5097ba8 100644
--- a/helper/include/odp/helper/icmp.h
+++ b/helper/include/odp/helper/icmp.h
@@ -33,16 +33,16 @@ extern "C" {
typedef struct ODP_PACKED {
uint8_t type; /**< message type */
uint8_t code; /**< type sub-code */
- uint16sum_t chksum; /**< checksum of icmp header */
+ odp_u16sum_t chksum; /**< checksum of icmp header */
union {
struct {
- uint16be_t id;
- uint16be_t sequence;
+ odp_u16be_t id;
+ odp_u16be_t sequence;
} echo; /**< echo datagram */
- uint32be_t gateway; /**< gateway address */
+ odp_u32be_t gateway; /**< gateway address */
struct {
- uint16be_t __unused;
- uint16be_t mtu;
+ odp_u16be_t __unused;
+ odp_u16be_t mtu;
} frag; /**< path mtu discovery */
} un; /**< icmp sub header */
} odph_icmphdr_t;
diff --git a/helper/include/odp/helper/ip.h b/helper/include/odp/helper/ip.h
index 41408a3d4..2fa4aae47 100644
--- a/helper/include/odp/helper/ip.h
+++ b/helper/include/odp/helper/ip.h
@@ -61,14 +61,14 @@ extern "C" {
typedef struct ODP_PACKED {
uint8_t ver_ihl; /**< Version / Header length */
uint8_t tos; /**< Type of service */
- uint16be_t tot_len; /**< Total length */
- uint16be_t id; /**< ID */
- uint16be_t frag_offset; /**< Fragmentation offset */
+ odp_u16be_t tot_len; /**< Total length */
+ odp_u16be_t id; /**< ID */
+ odp_u16be_t frag_offset;/**< Fragmentation offset */
uint8_t ttl; /**< Time to live */
uint8_t proto; /**< Protocol */
- uint16sum_t chksum; /**< Checksum */
- uint32be_t src_addr; /**< Source address */
- uint32be_t dst_addr; /**< Destination address */
+ odp_u16sum_t chksum; /**< Checksum */
+ odp_u32be_t src_addr; /**< Source address */
+ odp_u32be_t dst_addr; /**< Destination address */
} odph_ipv4hdr_t;
/** @internal Compile time assert */
@@ -83,11 +83,11 @@ _ODP_STATIC_ASSERT(sizeof(odph_ipv4hdr_t) == ODPH_IPV4HDR_LEN, "ODPH_IPV4HDR_T__
*/
static inline int odph_ipv4_csum_valid(odp_packet_t pkt)
{
- uint16be_t res = 0;
+ odp_u16be_t res = 0;
uint16_t *w;
int nleft = sizeof(odph_ipv4hdr_t);
odph_ipv4hdr_t ip;
- uint16be_t chksum;
+ odp_u16be_t chksum;
if (!odp_packet_l3_offset(pkt))
return 0;
@@ -113,7 +113,7 @@ static inline int odph_ipv4_csum_valid(odp_packet_t pkt)
*
* @return IPv4 checksum in host cpu order, or 0 on failure
*/
-static inline uint16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
+static inline odp_u16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
{
uint16_t *w;
odph_ipv4hdr_t *ip;
@@ -138,8 +138,8 @@ static inline uint16sum_t odph_ipv4_csum_update(odp_packet_t pkt)
* IPv6 header
*/
typedef struct ODP_PACKED {
- uint32be_t ver_tc_flow; /**< Version / Traffic class / Flow label */
- uint16be_t payload_len; /**< Payload length */
+ odp_u32be_t ver_tc_flow; /**< Version / Traffic class / Flow label */
+ odp_u16be_t payload_len; /**< Payload length */
uint8_t next_hdr; /**< Next header */
uint8_t hop_limit; /**< Hop limit */
uint8_t src_addr[16]; /**< Source address */
diff --git a/helper/include/odp/helper/ipsec.h b/helper/include/odp/helper/ipsec.h
index 2565f74b4..2cc2403f4 100644
--- a/helper/include/odp/helper/ipsec.h
+++ b/helper/include/odp/helper/ipsec.h
@@ -35,8 +35,8 @@ extern "C" {
* IPSec ESP header
*/
typedef struct ODP_PACKED {
- uint32be_t spi; /**< Security Parameter Index */
- uint32be_t seq_no; /**< Sequence Number */
+ odp_u32be_t spi; /**< Security Parameter Index */
+ odp_u32be_t seq_no; /**< Sequence Number */
uint8_t iv[0]; /**< Initialization vector */
} odph_esphdr_t;
@@ -61,9 +61,9 @@ _ODP_STATIC_ASSERT(sizeof(odph_esptrl_t) == ODPH_ESPTRL_LEN, "ODPH_ESPTRL_T__SIZ
typedef struct ODP_PACKED {
uint8_t next_header; /**< Next header protocol */
uint8_t ah_len; /**< AH header length */
- uint16be_t pad; /**< Padding (must be 0) */
- uint32be_t spi; /**< Security Parameter Index */
- uint32be_t seq_no; /**< Sequence Number */
+ odp_u16be_t pad; /**< Padding (must be 0) */
+ odp_u32be_t spi; /**< Security Parameter Index */
+ odp_u32be_t seq_no; /**< Sequence Number */
uint8_t icv[0]; /**< Integrity Check Value */
} odph_ahhdr_t;
diff --git a/helper/include/odp/helper/tcp.h b/helper/include/odp/helper/tcp.h
index 42f0cbe1f..eb0a268b2 100644
--- a/helper/include/odp/helper/tcp.h
+++ b/helper/include/odp/helper/tcp.h
@@ -30,54 +30,54 @@ extern "C" {
/** TCP header */
typedef struct ODP_PACKED {
- uint16be_t src_port; /**< Source port */
- uint16be_t dst_port; /**< Destination port */
- uint32be_t seq_no; /**< Sequence number */
- uint32be_t ack_no; /**< Acknowledgment number */
+ odp_u16be_t src_port; /**< Source port */
+ odp_u16be_t dst_port; /**< Destination port */
+ odp_u32be_t seq_no; /**< Sequence number */
+ odp_u32be_t ack_no; /**< Acknowledgment number */
union {
- uint16be_t doffset_flags;
+ odp_u16be_t doffset_flags;
#if defined(ODP_BIG_ENDIAN_BITFIELD)
struct {
- uint16be_t rsvd1:8;
- uint16be_t flags:8; /**< TCP flags as a byte */
+ odp_u16be_t rsvd1:8;
+ odp_u16be_t flags:8; /**< TCP flags as a byte */
};
struct {
- uint16be_t hl:4; /**< Hdr len, in words */
- uint16be_t rsvd3:4; /**< Reserved */
- uint16be_t cwr:1;
- uint16be_t ece:1;
- uint16be_t urg:1;
- uint16be_t ack:1;
- uint16be_t psh:1;
- uint16be_t rst:1;
- uint16be_t syn:1;
- uint16be_t fin:1;
+ odp_u16be_t hl:4; /**< Hdr len, in words */
+ odp_u16be_t rsvd3:4; /**< Reserved */
+ odp_u16be_t cwr:1;
+ odp_u16be_t ece:1;
+ odp_u16be_t urg:1;
+ odp_u16be_t ack:1;
+ odp_u16be_t psh:1;
+ odp_u16be_t rst:1;
+ odp_u16be_t syn:1;
+ odp_u16be_t fin:1;
};
#elif defined(ODP_LITTLE_ENDIAN_BITFIELD)
struct {
- uint16be_t flags:8;
- uint16be_t rsvd1:8; /**< TCP flags as a byte */
+ odp_u16be_t flags:8;
+ odp_u16be_t rsvd1:8; /**< TCP flags as a byte */
};
struct {
- uint16be_t rsvd3:4; /**< Reserved */
- uint16be_t hl:4; /**< Hdr len, in words */
- uint16be_t fin:1;
- uint16be_t syn:1;
- uint16be_t rst:1;
- uint16be_t psh:1;
- uint16be_t ack:1;
- uint16be_t urg:1;
- uint16be_t ece:1;
- uint16be_t cwr:1;
+ odp_u16be_t rsvd3:4; /**< Reserved */
+ odp_u16be_t hl:4; /**< Hdr len, in words */
+ odp_u16be_t fin:1;
+ odp_u16be_t syn:1;
+ odp_u16be_t rst:1;
+ odp_u16be_t psh:1;
+ odp_u16be_t ack:1;
+ odp_u16be_t urg:1;
+ odp_u16be_t ece:1;
+ odp_u16be_t cwr:1;
};
#else
#error "Endian BitField order not defined!"
#endif
};
- uint16be_t window; /**< Window size */
- uint16be_t cksm; /**< Checksum */
- uint16be_t urgptr; /**< Urgent pointer */
+ odp_u16be_t window; /**< Window size */
+ odp_u16be_t cksm; /**< Checksum */
+ odp_u16be_t urgptr; /**< Urgent pointer */
} odph_tcphdr_t;
/**
diff --git a/helper/include/odp/helper/udp.h b/helper/include/odp/helper/udp.h
index 93b342df3..88a77f858 100644
--- a/helper/include/odp/helper/udp.h
+++ b/helper/include/odp/helper/udp.h
@@ -30,10 +30,10 @@ extern "C" {
/** UDP header */
typedef struct ODP_PACKED {
- uint16be_t src_port; /**< Source port */
- uint16be_t dst_port; /**< Destination port */
- uint16be_t length; /**< UDP datagram length in bytes (header+data) */
- uint16be_t chksum; /**< UDP header and data checksum (0 if not used)*/
+ odp_u16be_t src_port; /**< Source port */
+ odp_u16be_t dst_port; /**< Destination port */
+ odp_u16be_t length; /**< UDP datagram length in bytes (header+data) */
+ odp_u16be_t chksum; /**< UDP header and data checksum (0 if not used)*/
} odph_udphdr_t;
/**
diff --git a/helper/odph_pause.h b/helper/odph_pause.h
deleted file mode 100644
index 5618f1fee..000000000
--- a/helper/odph_pause.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODPH_PAUSE_H_
-#define ODPH_PAUSE_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Spin loop for helper internal use
- */
-static inline void odph_pause(void)
-{
-#if defined __x86_64__ || defined __i386__
-
-#ifdef __SSE2__
- __asm__ __volatile__ ("pause");
-#else
- __asm__ __volatile__ ("rep; nop");
-#endif
-
-#elif defined __arm__
-
-#if __ARM_ARCH == 7
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
-#endif
-
-#elif defined __OCTEON__
-
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
-
-#endif
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/helper/ring.c b/helper/ring.c
index 312217377..669918682 100644
--- a/helper/ring.c
+++ b/helper/ring.c
@@ -69,15 +69,11 @@
*
***************************************************************************/
-#include <odp/shared_memory.h>
-#include <odp/spinlock.h>
-#include "odph_pause.h"
-#include <odp/align.h>
+#include <odp.h>
#include <fcntl.h>
#include <stdio.h>
#include <string.h>
#include "odph_debug.h"
-#include <odp/rwlock.h>
#include <odp/helper/ring.h>
static TAILQ_HEAD(, odph_ring) odp_ring_list;
@@ -283,7 +279,7 @@ int __odph_ring_mp_do_enqueue(odph_ring_t *r, void * const *obj_table,
* we need to wait for them to complete
*/
while (odp_unlikely(r->prod.tail != prod_head))
- odph_pause();
+ odp_cpu_pause();
/* Release our entries and the memory they refer to */
__atomic_thread_fence(__ATOMIC_RELEASE);
@@ -400,7 +396,7 @@ int __odph_ring_mc_do_dequeue(odph_ring_t *r, void **obj_table,
* we need to wait for them to complete
*/
while (odp_unlikely(r->cons.tail != cons_head))
- odph_pause();
+ odp_cpu_pause();
/* Release our entries and the memory they refer to */
__atomic_thread_fence(__ATOMIC_RELEASE);
diff --git a/helper/test/Makefile.am b/helper/test/Makefile.am
index d6820e188..bbad2a58b 100644
--- a/helper/test/Makefile.am
+++ b/helper/test/Makefile.am
@@ -8,7 +8,6 @@ TESTS_ENVIRONMENT += TEST_DIR=${builddir}
EXECUTABLES = odp_chksum$(EXEEXT) \
odp_thread$(EXEEXT) \
odp_process$(EXEEXT)\
- odph_pause$(EXEEXT)\
odp_table$(EXEEXT)
COMPILE_ONLY =
@@ -29,5 +28,4 @@ dist_odp_thread_SOURCES = odp_thread.c
odp_thread_LDADD = $(LIB)/libodphelper.la $(LIB)/libodp.la
dist_odp_process_SOURCES = odp_process.c
odp_process_LDADD = $(LIB)/libodphelper.la $(LIB)/libodp.la
-odph_pause_SOURCES = odph_pause.c
dist_odp_table_SOURCES = odp_table.c
diff --git a/helper/test/odph_pause.c b/helper/test/odph_pause.c
deleted file mode 100644
index f5f5da341..000000000
--- a/helper/test/odph_pause.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <test_debug.h>
-#include "../odph_pause.h"
-
-int main(int argc TEST_UNUSED, char *argv[] TEST_UNUSED)
-{
- odph_pause();
- return 0;
-}
diff --git a/include/odp/api/atomic.h b/include/odp/api/atomic.h
index 97e86392b..a16d90bfb 100644
--- a/include/odp/api/atomic.h
+++ b/include/odp/api/atomic.h
@@ -21,7 +21,7 @@ extern "C" {
/**
* @defgroup odp_atomic ODP ATOMIC
* @details
- * <b> Atomic integers </b>
+ * <b> Atomic integers using relaxed memory ordering </b>
*
* Atomic integer types (odp_atomic_u32_t and odp_atomic_u64_t) can be used to
* implement e.g. shared counters. If not otherwise documented, operations in
@@ -31,6 +31,23 @@ extern "C" {
* before or after the operation), only atomicity of the operation itself is
* guaranteed.
*
+ * <b> Operations with non-relaxed memory ordering </b>
+ *
+ * <b> An operation with RELEASE </b> memory ordering (odp_atomic_xxx_rel_xxx())
+ * ensures that other threads loading the same atomic variable with ACQUIRE
+ * memory ordering see all stores (from the calling thread) that happened before
+ * this releasing store.
+ *
+ * <b> An operation with ACQUIRE </b> memory ordering (odp_atomic_xxx_acq_xxx())
+ * ensures that the calling thread sees all stores (done by the releasing
+ * thread) that happened before a RELEASE memory ordered store to the same
+ * atomic variable.
+ *
+ * <b> An operation with ACQUIRE-and-RELEASE </b> memory ordering
+ * (odp_atomic_xxx_acq_rel_xxx()) combines the effects of ACQUIRE and RELEASE
+ * memory orders. A single operation acts as both an acquiring load and
+ * a releasing store.
+ *
* @{
*/
@@ -42,9 +59,18 @@ extern "C" {
* Atomic 32-bit unsigned integer
*/
+/*
+ * 32-bit operations in RELAXED memory ordering
+ * --------------------------------------------
+ */
+
/**
* Initialize atomic uint32 variable
*
+ * Initializes the atomic variable with 'val'. This operation is not atomic.
+ * Application must ensure that there's no race condition while initializing
+ * the variable.
+ *
* @param atom Pointer to atomic variable
* @param val Value to initialize the variable with
*/
@@ -136,8 +162,71 @@ uint32_t odp_atomic_fetch_dec_u32(odp_atomic_u32_t *atom);
void odp_atomic_dec_u32(odp_atomic_u32_t *atom);
/**
+ * Update maximum value of atomic uint32 variable
+ *
+ * Compares value of atomic variable to the new maximum value. If the new value
+ * is greater than the current value, writes the new value into the variable.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_max New maximum value to be written into the atomic variable
+ */
+void odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t new_max);
+
+/**
+ * Update minimum value of atomic uint32 variable
+ *
+ * Compares value of atomic variable to the new minimum value. If the new value
+ * is less than the current value, writes the new value into the variable.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_min New minimum value to be written into the atomic variable
+ */
+void odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t new_min);
+
+/**
+ * Compare and swap atomic uint32 variable
+ *
+ * Compares value of atomic variable to the value pointed by 'old_val'.
+ * If values are equal, the operation writes 'new_val' into the atomic variable
+ * and returns success. If they are not equal, the operation writes current
+ * value of atomic variable into 'old_val' and returns failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ *
+ */
+int odp_atomic_cas_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val);
+
+/**
+ * Exchange value of atomic uint32 variable
+ *
+ * Atomically replaces the value of atomic variable with the new value. Returns
+ * the old value.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_val New value of the atomic variable
+ *
+ * @return Value of the variable before the operation
+ */
+uint32_t odp_atomic_xchg_u32(odp_atomic_u32_t *atom, uint32_t new_val);
+
+/*
+ * 64-bit operations in RELAXED memory ordering
+ * --------------------------------------------
+ */
+
+/**
* Initialize atomic uint64 variable
*
+ * Initializes the atomic variable with 'val'. This operation is not atomic.
+ * Application must ensure that there's no race condition while initializing
+ * the variable.
+ *
* @param atom Pointer to atomic variable
* @param val Value to initialize the variable with
*/
@@ -229,6 +318,309 @@ uint64_t odp_atomic_fetch_dec_u64(odp_atomic_u64_t *atom);
void odp_atomic_dec_u64(odp_atomic_u64_t *atom);
/**
+ * Update maximum value of atomic uint64 variable
+ *
+ * Compares value of atomic variable to the new maximum value. If the new value
+ * is greater than the current value, writes the new value into the variable.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_max New maximum value to be written into the atomic variable
+ */
+void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_max);
+
+/**
+ * Update minimum value of atomic uint64 variable
+ *
+ * Compares value of atomic variable to the new minimum value. If the new value
+ * is less than the current value, writes the new value into the variable.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_min New minimum value to be written into the atomic variable
+ */
+void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_min);
+
+/**
+ * Compare and swap atomic uint64 variable
+ *
+ * Compares value of atomic variable to the value pointed by 'old_val'.
+ * If values are equal, the operation writes 'new_val' into the atomic variable
+ * and returns success. If they are not equal, the operation writes current
+ * value of atomic variable into 'old_val' and returns failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val);
+
+/**
+ * Exchange value of atomic uint64 variable
+ *
+ * Atomically replaces the value of atomic variable with the new value. Returns
+ * the old value.
+ *
+ * @param atom Pointer to atomic variable
+ * @param new_val New value of the atomic variable
+ *
+ * @return Value of the variable before the operation
+ */
+uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom, uint64_t new_val);
+
+/*
+ * 32-bit operations in non-RELAXED memory ordering
+ * ------------------------------------------------
+ */
+
+/**
+ * Load value of atomic uint32 variable using ACQUIRE memory ordering
+ *
+ * Otherwise identical to odp_atomic_load_u32() but ensures ACQUIRE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ *
+ * @return Value of the variable
+ */
+uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom);
+
+/**
+ * Store value to atomic uint32 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_store_u32() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to store in the variable
+ */
+void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom, uint32_t val);
+
+/**
+ * Add to atomic uint32 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_add_u32() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to be added to the variable
+ */
+void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val);
+
+/**
+ * Subtract from atomic uint32 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_sub_u32() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to be subtracted from the variable
+ */
+void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val);
+
+/**
+ * Compare and swap atomic uint32 variable using ACQUIRE memory ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u32() but ensures ACQUIRE memory
+ * ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val);
+
+/**
+ * Compare and swap atomic uint32 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u32() but ensures RELEASE memory
+ * ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val);
+
+/**
+ * Compare and swap atomic uint32 variable using ACQUIRE-and-RELEASE memory
+ * ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u32() but ensures ACQUIRE-and-RELEASE
+ * memory ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val);
+
+/*
+ * 64-bit operations in non-RELAXED memory ordering
+ * ------------------------------------------------
+ */
+
+/**
+ * Load value of atomic uint64 variable using ACQUIRE memory ordering
+ *
+ * Otherwise identical to odp_atomic_load_u64() but ensures ACQUIRE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ *
+ * @return Value of the variable
+ */
+uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom);
+
+/**
+ * Store value to atomic uint64 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_store_u64() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to store in the variable
+ */
+void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val);
+
+/**
+ * Add to atomic uint64 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_add_u64() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to be added to the variable
+ */
+void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val);
+
+/**
+ * Subtract from atomic uint64 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_sub_u64() but ensures RELEASE memory
+ * ordering.
+ *
+ * @param atom Pointer to atomic variable
+ * @param val Value to be subtracted from the variable
+ */
+void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val);
+
+/**
+ * Compare and swap atomic uint64 variable using ACQUIRE memory ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u64() but ensures ACQUIRE memory
+ * ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val);
+
+/**
+ * Compare and swap atomic uint64 variable using RELEASE memory ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u64() but ensures RELEASE memory
+ * ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val);
+
+/**
+ * Compare and swap atomic uint64 variable using ACQUIRE-and-RELEASE memory
+ * ordering
+ *
+ * Otherwise identical to odp_atomic_cas_u64() but ensures ACQUIRE-and-RELEASE
+ * memory ordering on success. Memory ordering is RELAXED on failure.
+ *
+ * @param atom Pointer to atomic variable
+ * @param[in,out] old_val Pointer to the old value of the atomic variable.
+ * Operation updates this value on failure.
+ * @param new_val New value to be written into the atomic variable
+ *
+ * @return 0 on failure, !0 on success
+ */
+int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val);
+
+/**
+ * Atomic operations
+ *
+ * Atomic operations listed in a bit field structure.
+ */
+typedef union odp_atomic_op_t {
+ /** Operation flags */
+ struct {
+ uint32_t init : 1; /**< Init atomic variable */
+ uint32_t load : 1; /**< Atomic load */
+ uint32_t store : 1; /**< Atomic store */
+ uint32_t fetch_add : 1; /**< Atomic fetch and add */
+ uint32_t add : 1; /**< Atomic add */
+ uint32_t fetch_sub : 1; /**< Atomic fetch and subtract */
+ uint32_t sub : 1; /**< Atomic subtract */
+ uint32_t fetch_inc : 1; /**< Atomic fetch and increment */
+ uint32_t inc : 1; /**< Atomic increment */
+ uint32_t fetch_dec : 1; /**< Atomic fetch and decrement */
+ uint32_t dec : 1; /**< Atomic decrement */
+ uint32_t min : 1; /**< Atomic minimum */
+ uint32_t max : 1; /**< Atomic maximum */
+ uint32_t cas : 1; /**< Atomic compare and swap */
+ uint32_t xchg : 1; /**< Atomic exchange */
+ } op;
+
+ /** All bits of the bit field structure.
+ * Operation flag mapping is architecture specific. This field can be
+ * used to set/clear all flags, or bitwise operations over the entire
+ * structure. */
+ uint32_t all_bits;
+} odp_atomic_op_t;
+
+/**
+ * Query which atomic uint64 operations are lock-free
+ *
+ * Lock-free implementations have higher performance and scale better than
+ * implementations using locks. User can decide to use e.g. uint32 atomic
+ * variables instead of uint64 to optimize performance on platforms that
+ * implement a performance critical operation using locks.
+ *
+ * Init operations (e.g. odp_atomic_init_64()) are not atomic. This function
+ * clears the op.init bit but will never set it to one.
+ *
+ * @param atomic_op Pointer to atomic operation structure for storing
+ * operation flags. All bits are initialized to zero during
+ * the operation. The parameter is ignored when NULL.
+ * @retval 0 None of the operations are lock-free
+ * @retval 1 Some of the operations are lock-free
+ * @retval 2 All operations are lock-free
+ */
+int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op);
+
+/**
* @}
*/
diff --git a/include/odp/api/byteorder.h b/include/odp/api/byteorder.h
index a7b3647b2..a12a7296f 100644
--- a/include/odp/api/byteorder.h
+++ b/include/odp/api/byteorder.h
@@ -41,28 +41,28 @@ extern "C" {
*/
/**
- * @typedef uint16le_t
+ * @typedef odp_u16le_t
* unsigned 16bit little endian
*
- * @typedef uint16be_t
+ * @typedef odp_u16be_t
* unsigned 16bit big endian
*
- * @typedef uint32le_t
+ * @typedef odp_u32le_t
* unsigned 32bit little endian
*
- * @typedef uint32be_t
+ * @typedef odp_u32be_t
* unsigned 32bit big endian
*
- * @typedef uint64le_t
+ * @typedef odp_u64le_t
* unsigned 64bit little endian
*
- * @typedef uint64be_t
+ * @typedef odp_u64be_t
* unsigned 64bit big endian
*
- * @typedef uint16sum_t
+ * @typedef odp_u16sum_t
* unsigned 16bit bitwise
*
- * @typedef uint32sum_t
+ * @typedef odp_u32sum_t
* unsigned 32bit bitwise
*/
@@ -75,21 +75,21 @@ extern "C" {
* @param be16 big endian 16bit
* @return cpu native uint16_t
*/
-uint16_t odp_be_to_cpu_16(uint16be_t be16);
+uint16_t odp_be_to_cpu_16(odp_u16be_t be16);
/**
* Convert 32bit big endian to cpu native uint32_t
* @param be32 big endian 32bit
* @return cpu native uint32_t
*/
-uint32_t odp_be_to_cpu_32(uint32be_t be32);
+uint32_t odp_be_to_cpu_32(odp_u32be_t be32);
/**
* Convert 64bit big endian to cpu native uint64_t
* @param be64 big endian 64bit
* @return cpu native uint64_t
*/
-uint64_t odp_be_to_cpu_64(uint64be_t be64);
+uint64_t odp_be_to_cpu_64(odp_u64be_t be64);
/*
@@ -101,21 +101,21 @@ uint64_t odp_be_to_cpu_64(uint64be_t be64);
* @param cpu16 uint16_t in cpu native format
* @return big endian 16bit
*/
-uint16be_t odp_cpu_to_be_16(uint16_t cpu16);
+odp_u16be_t odp_cpu_to_be_16(uint16_t cpu16);
/**
* Convert cpu native uint32_t to 32bit big endian
* @param cpu32 uint32_t in cpu native format
* @return big endian 32bit
*/
-uint32be_t odp_cpu_to_be_32(uint32_t cpu32);
+odp_u32be_t odp_cpu_to_be_32(uint32_t cpu32);
/**
* Convert cpu native uint64_t to 64bit big endian
* @param cpu64 uint64_t in cpu native format
* @return big endian 64bit
*/
-uint64be_t odp_cpu_to_be_64(uint64_t cpu64);
+odp_u64be_t odp_cpu_to_be_64(uint64_t cpu64);
/*
@@ -127,21 +127,21 @@ uint64be_t odp_cpu_to_be_64(uint64_t cpu64);
* @param le16 little endian 16bit
* @return cpu native uint16_t
*/
-uint16_t odp_le_to_cpu_16(uint16le_t le16);
+uint16_t odp_le_to_cpu_16(odp_u16le_t le16);
/**
* Convert 32bit little endian to cpu native uint32_t
* @param le32 little endian 32bit
* @return cpu native uint32_t
*/
-uint32_t odp_le_to_cpu_32(uint32le_t le32);
+uint32_t odp_le_to_cpu_32(odp_u32le_t le32);
/**
* Convert 64bit little endian to cpu native uint64_t
* @param le64 little endian 64bit
* @return cpu native uint64_t
*/
-uint64_t odp_le_to_cpu_64(uint64le_t le64);
+uint64_t odp_le_to_cpu_64(odp_u64le_t le64);
/*
@@ -153,21 +153,21 @@ uint64_t odp_le_to_cpu_64(uint64le_t le64);
* @param cpu16 uint16_t in cpu native format
* @return little endian 16bit
*/
-uint16le_t odp_cpu_to_le_16(uint16_t cpu16);
+odp_u16le_t odp_cpu_to_le_16(uint16_t cpu16);
/**
* Convert cpu native uint32_t to 32bit little endian
* @param cpu32 uint32_t in cpu native format
* @return little endian 32bit
*/
-uint32le_t odp_cpu_to_le_32(uint32_t cpu32);
+odp_u32le_t odp_cpu_to_le_32(uint32_t cpu32);
/**
* Convert cpu native uint64_t to 64bit little endian
* @param cpu64 uint64_t in cpu native format
* @return little endian 64bit
*/
-uint64le_t odp_cpu_to_le_64(uint64_t cpu64);
+odp_u64le_t odp_cpu_to_le_64(uint64_t cpu64);
/**
* @}
diff --git a/include/odp/api/classification.h b/include/odp/api/classification.h
index c9493c2e2..f46912e0e 100644
--- a/include/odp/api/classification.h
+++ b/include/odp/api/classification.h
@@ -62,7 +62,7 @@ extern "C" {
/**
* class of service packet drop policies
*/
-typedef enum odp_cls_drop {
+typedef enum {
ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */
ODP_COS_DROP_NEVER, /**< Never drop, ignoring buffer pool policy */
} odp_cls_drop_t;
@@ -72,7 +72,7 @@ typedef enum odp_cls_drop {
* for fields that may be used to calculate
* the flow signature, if present in a packet.
*/
-typedef enum odp_cos_hdr_flow_fields {
+typedef enum {
ODP_COS_FHDR_IN_PKTIO, /**< Ingress port number */
ODP_COS_FHDR_L2_SAP, /**< Ethernet Source MAC address */
ODP_COS_FHDR_L2_DAP, /**< Ethernet Destination MAC address */
@@ -86,7 +86,7 @@ typedef enum odp_cos_hdr_flow_fields {
ODP_COS_FHDR_IPSEC_SPI, /**< IPsec session identifier */
ODP_COS_FHDR_LD_VNI, /**< NVGRE/VXLAN network identifier */
ODP_COS_FHDR_USER /**< Application-specific header field(s) */
-} odp_cos_hdr_flow_fields_e;
+} odp_cos_hdr_flow_fields_t;
/**
* Class of service parameters
@@ -226,7 +226,7 @@ int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
/**
* @typedef odp_cos_flow_set_t
* Set of header fields that take part in flow signature hash calculation:
- * bit positions per odp_cos_hdr_flow_fields_e enumeration.
+ * bit positions per odp_cos_hdr_flow_fields_t enumeration.
*/
/**
@@ -240,7 +240,7 @@ int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
* for fields that may be used to calculate
* the PMR, if present in a packet.
*/
-typedef enum odp_pmr_term {
+typedef enum {
ODP_PMR_LEN, /**< Total length of received packet*/
ODP_PMR_ETHTYPE_0, /**< Initial (outer)
Ethertype only (*val=uint16_t)*/
@@ -270,13 +270,13 @@ typedef enum odp_pmr_term {
/** Inner header may repeat above values with this offset */
ODP_PMR_INNER_HDR_OFF = 32
-} odp_pmr_term_e;
+} odp_pmr_term_t;
/**
* Following structure is used to define a packet matching rule
*/
typedef struct odp_pmr_match_t {
- odp_pmr_term_e term; /**< PMR term value to be matched */
+ odp_pmr_term_t term; /**< PMR term value to be matched */
const void *val; /**< Value to be matched */
const void *mask; /**< Masked set of bits to be matched */
uint32_t val_sz; /**< Size of the term value */
@@ -334,7 +334,7 @@ int odp_cos_pmr_cos(odp_pmr_t pmr_id, odp_cos_t src_cos, odp_cos_t dst_cos);
/**
* Inquire about matching terms supported by the classifier
*
- * @return A mask one bit per enumerated term, one for each of op_pmr_term_e
+ * @return A mask one bit per enumerated term, one for each of odp_pmr_term_t
*/
unsigned long long odp_pmr_terms_cap(void);
diff --git a/include/odp/api/cpu.h b/include/odp/api/cpu.h
index a9ef81dfd..eaee24b5c 100644
--- a/include/odp/api/cpu.h
+++ b/include/odp/api/cpu.h
@@ -46,6 +46,70 @@ int odp_cpu_id(void);
int odp_cpu_count(void);
/**
+ * CPU model name of this CPU
+ *
+ * Returns the CPU model name of this CPU.
+ *
+ * @return Pointer to CPU model name string
+ */
+const char *odp_cpu_model_str(void);
+
+/**
+ * CPU model name of a CPU
+ *
+ * Return CPU model name of the specified CPU.
+ *
+ * @param id CPU ID
+ *
+ * @return Pointer to CPU model name string
+ */
+const char *odp_cpu_model_str_id(int id);
+
+/**
+ * Current CPU frequency in Hz
+ *
+ * Returns current frequency of this CPU
+ *
+ * @return CPU frequency in Hz
+ * @retval 0 on failure
+ */
+uint64_t odp_cpu_hz(void);
+
+/**
+ * Current CPU frequency of a CPU (in Hz)
+ *
+ * Returns current frequency of specified CPU
+ *
+ * @param id CPU ID
+ *
+ * @return CPU frequency in Hz
+ * @retval 0 on failure
+ */
+uint64_t odp_cpu_hz_id(int id);
+
+/**
+ * Maximum CPU frequency in Hz
+ *
+ * Returns maximum frequency of this CPU
+ *
+ * @return CPU frequency in Hz
+ * @retval 0 on failure
+ */
+uint64_t odp_cpu_hz_max(void);
+
+/**
+ * Maximum CPU frequency of a CPU (in Hz)
+ *
+ * Returns maximum frequency of specified CPU
+ *
+ * @param id CPU ID
+ *
+ * @return CPU frequency in Hz
+ * @retval 0 on failure
+ */
+uint64_t odp_cpu_hz_max_id(int id);
+
+/**
* Current CPU cycle count
*
* Return current CPU cycle count. Cycle count may not be reset at ODP init
@@ -96,6 +160,15 @@ uint64_t odp_cpu_cycles_max(void);
uint64_t odp_cpu_cycles_resolution(void);
/**
+ * Pause CPU execution for a short while
+ *
+ * This call is intended for tight loops which poll a shared resource. A short
+ * pause within the loop may save energy and improve system performance as
+ * CPU polling frequency is reduced.
+ */
+void odp_cpu_pause(void);
+
+/**
* @}
*/
diff --git a/include/odp/api/cpumask.h b/include/odp/api/cpumask.h
index 7480132b2..4407b1025 100644
--- a/include/odp/api/cpumask.h
+++ b/include/odp/api/cpumask.h
@@ -33,27 +33,37 @@ extern "C" {
/**
* @def ODP_CPUMASK_STR_SIZE
- * Minimum size of output buffer for odp_cpumask_to_str()
+ * The maximum number of characters needed to record any CPU mask as
+ * a string (output of odp_cpumask_to_str()).
*/
/**
* Add CPU mask bits from a string
*
+ * Each bit set in the string represents a CPU to be added into the mask.
+ * The string is null terminated and consists of hexadecimal digits. It may be
+ * prepended with '0x' and may contain leading zeros (e.g. 0x0001, 0x1 or 1).
+ * CPU #0 is located at the least significant bit (0x1).
+ *
* @param mask CPU mask to modify
- * @param str Hexadecimal digits in a string. CPU #0 is located
- * at the least significant bit (0x1).
+ * @param str String of hexadecimal digits
*/
void odp_cpumask_from_str(odp_cpumask_t *mask, const char *str);
/**
- * Format CPU mask as a string of hexadecimal digits
+ * Format a string from CPU mask
+ *
+ * Output string format is defined in odp_cpumask_from_str() documentation,
+ * except that the string is always prepended with '0x' and does not have any
+ * leading zeros (e.g. outputs always 0x1 instead of 0x0001 or 1).
*
- * @param mask CPU mask to format
- * @param[out] str Output buffer (use ODP_CPUMASK_STR_SIZE)
- * @param size Size of output buffer
+ * @param mask CPU mask
+ * @param[out] str String pointer for output
+ * @param size Size of output buffer. Buffer size ODP_CPUMASK_STR_SIZE
+ * or larger will have enough space for any CPU mask.
*
- * @return number of characters written (including terminating null char)
- * @retval <0 on failure (buffer too small)
+ * @return Number of characters written (including terminating null char)
+ * @retval <0 on failure (e.g. buffer too small)
*/
int32_t odp_cpumask_to_str(const odp_cpumask_t *mask, char *str, int32_t size);
@@ -224,6 +234,16 @@ int odp_cpumask_default_worker(odp_cpumask_t *mask, int num);
int odp_cpumask_default_control(odp_cpumask_t *mask, int num);
/**
+ * Report all the available CPUs
+ *
+ * All the available CPUs include both worker CPUs and control CPUs
+ *
+ * @param[out] mask CPU mask to hold all available CPUs
+ * @return cpu number of all available CPUs
+ */
+int odp_cpumask_all_available(odp_cpumask_t *mask);
+
+/**
* @}
*/
diff --git a/include/odp/api/errno.h b/include/odp/api/errno.h
index 982980738..33998b61b 100644
--- a/include/odp/api/errno.h
+++ b/include/odp/api/errno.h
@@ -17,20 +17,38 @@
extern "C" {
#endif
-/** @defgroup odp_errno ODP ERRNO
+/**
+ * @defgroup odp_errno ODP ERRNO
+ * @details
+ * <b> ODP errno </b>
+ *
+ * ODP errno (error number) is a thread local variable that any ODP function may
+ * set on a failure. It expresses additional information about the cause of
+ * the latest failure. A successful function call never sets errno. Application
+ * may initialize errno to zero at anytime by calling odp_errno_zero(). Other
+ * ODP functions never set errno to zero. Valid errno values are non-zero
+ * and implementation specific. It's also implementation specific which
+ * functions set errno in addition to those explicitly specified by
+ * the API spec. ODP errno is initially zero.
+ *
* @{
*/
/**
-* Return latest ODP errno
+* Latest ODP errno
*
-* @return ODP errno
-* @retval 0 No error
+* Returns the current ODP errno value on the calling thread. A non-zero value
+* indicates cause of the latest errno setting failure.
+*
+* @return Latest ODP errno value
+* @retval 0 Errno has not been set since the last initialization to zero
*/
int odp_errno(void);
/**
* Set ODP errno to zero
+*
+* Sets errno value to zero on the calling thread.
*/
void odp_errno_zero(void);
@@ -40,20 +58,19 @@ void odp_errno_zero(void);
* Interprets the value of ODP errno as an error message, and prints it,
* optionally preceding it with the custom message specified in str.
*
-* @param str NULL, or pointer to the string to be appended
+* @param str Pointer to the string to be appended, or NULL
*/
void odp_errno_print(const char *str);
/**
* Error message string
*
-* Interprets the value of ODP errno, generating a string with a
-* message that describes the error.
-* It uses the system definition of errno.
+* Interprets the value of ODP errno, generating a string with a message that
+* describes the error. Errno values and messages are implementation specific.
*
-* @param errnum Error code
+* @param errnum ODP errno value
*
-* @retval Pointer to the string
+* @retval Pointer to the error message string
*/
const char *odp_errno_str(int errnum);
diff --git a/include/odp/api/init.h b/include/odp/api/init.h
index 4ac521671..381f77e79 100644
--- a/include/odp/api/init.h
+++ b/include/odp/api/init.h
@@ -40,13 +40,13 @@ extern "C" {
/**
* ODP log level.
*/
-typedef enum odp_log_level {
+typedef enum {
ODP_LOG_DBG,
ODP_LOG_ERR,
ODP_LOG_UNIMPLEMENTED,
ODP_LOG_ABORT,
ODP_LOG_PRINT
-} odp_log_level_e;
+} odp_log_level_t;
/**
* ODP log function
@@ -71,7 +71,7 @@ typedef enum odp_log_level {
* @return The number of characters logged on success
* @retval <0 on failure
*/
-int odp_override_log(odp_log_level_e level, const char *fmt, ...);
+int odp_override_log(odp_log_level_t level, const char *fmt, ...);
/**
* ODP abort function
@@ -95,7 +95,7 @@ int odp_override_log(odp_log_level_e level, const char *fmt, ...);
void odp_override_abort(void) ODP_NORETURN;
/** Replaceable logging function */
-typedef int (*odp_log_func_t)(odp_log_level_e level, const char *fmt, ...);
+typedef int (*odp_log_func_t)(odp_log_level_t level, const char *fmt, ...);
/** Replaceable abort function */
typedef void (*odp_abort_func_t)(void) ODP_NORETURN;
@@ -125,6 +125,7 @@ typedef struct odp_init_t {
} odp_init_t;
/**
+ * @typedef odp_platform_init_t
* ODP platform initialization data
*
* @note ODP API does nothing with this data. It is the underlying
@@ -132,8 +133,6 @@ typedef struct odp_init_t {
* It is required that the application takes care of identifying and
* passing any required platform specific data.
*/
-typedef struct odp_platform_init_t {
-} odp_platform_init_t;
/**
diff --git a/include/odp/api/packet_io.h b/include/odp/api/packet_io.h
index cf9275191..42796da2f 100644
--- a/include/odp/api/packet_io.h
+++ b/include/odp/api/packet_io.h
@@ -18,6 +18,9 @@
extern "C" {
#endif
+#include <odp/api/packet_io_stats.h>
+#include <odp/api/queue.h>
+
/** @defgroup odp_packet_io ODP PACKET IO
* Operations on a packet Input/Output interface.
*
@@ -40,13 +43,18 @@ extern "C" {
*/
/**
- * @def ODP_PKTIO_INVALID
- * Invalid packet IO handle
+ * @typedef odp_pktin_queue_t
+ * Direct packet input queue handle
*/
/**
- * @def ODP_PKTIO_ANY
- * odp_pktio_t value to indicate any port
+ * @typedef odp_pktout_queue_t
+ * Direct packet output queue handle
+ */
+
+/**
+ * @def ODP_PKTIO_INVALID
+ * Invalid packet IO handle
*/
/**
@@ -59,28 +67,129 @@ extern "C" {
/**
* Packet input mode
*/
-typedef enum odp_pktio_input_mode_t {
- /** Application polls packet input directly with odp_pktio_recv() */
- ODP_PKTIN_MODE_RECV = 0,
- /** Packet input through scheduled queues */
+typedef enum odp_pktin_mode_t {
+ /** Direct packet input from the interface */
+ ODP_PKTIN_MODE_DIRECT = 0,
+ /** Packet input through scheduler and scheduled queues */
ODP_PKTIN_MODE_SCHED,
- /** Application polls packet input queues */
- ODP_PKTIN_MODE_POLL,
+ /** Packet input through plain queues */
+ ODP_PKTIN_MODE_QUEUE,
/** Application will never receive from this interface */
ODP_PKTIN_MODE_DISABLED
-} odp_pktio_input_mode_t;
+} odp_pktin_mode_t;
/**
* Packet output mode
*/
-typedef enum odp_pktio_output_mode_t {
- /** Direct packet output on the interface with odp_pktio_send() */
- ODP_PKTOUT_MODE_SEND = 0,
+typedef enum odp_pktout_mode_t {
+ /** Direct packet output on the interface */
+ ODP_PKTOUT_MODE_DIRECT = 0,
/** Packet output through traffic manager API */
ODP_PKTOUT_MODE_TM,
/** Application will never send to this interface */
ODP_PKTOUT_MODE_DISABLED
-} odp_pktio_output_mode_t;
+} odp_pktout_mode_t;
+
+/**
+ * Packet input hash protocols
+ *
+ * The list of protocol header field combinations, which are included into
+ * packet input hash calculation.
+ */
+typedef union odp_pktin_hash_proto_t {
+ /** Protocol header fields for hashing */
+ struct {
+ /** IPv4 addresses and UDP port numbers */
+ uint32_t ipv4_udp : 1;
+ /** IPv4 addresses and TCP port numbers */
+ uint32_t ipv4_tcp : 1;
+ /** IPv4 addresses */
+ uint32_t ipv4 : 1;
+ /** IPv6 addresses and UDP port numbers */
+ uint32_t ipv6_udp : 1;
+ /** IPv6 addresses and TCP port numbers */
+ uint32_t ipv6_tcp : 1;
+ /** IPv6 addresses */
+ uint32_t ipv6 : 1;
+ } proto;
+
+ /** All bits of the bit field structure */
+ uint32_t all_bits;
+} odp_pktin_hash_proto_t;
+
+/**
+ * Packet IO operation mode
+ */
+typedef enum odp_pktio_op_mode_t {
+ /** Multi-thread safe operation
+ *
+ * Direct packet IO operation (recv or send) is multi-thread safe. Any
+ * number of application threads may perform the operation
+ * concurrently. */
+ ODP_PKTIO_OP_MT = 0,
+
+ /** Not multi-thread safe operation
+ *
+ * Direct packet IO operation (recv or send) may not be multi-thread
+ * safe. Application ensures synchronization between threads so that
+ * simultaneously only single thread attempts the operation on
+ * the same (pktin or pktout) queue. */
+ ODP_PKTIO_OP_MT_UNSAFE
+
+} odp_pktio_op_mode_t;
+
+/**
+ * Packet input queue parameters
+ */
+typedef struct odp_pktin_queue_param_t {
+ /** Operation mode
+ *
+ * The default value is ODP_PKTIO_OP_MT. Application may enable
+ * performance optimization by defining ODP_PKTIO_OP_MT_UNSAFE when
+ * applicable. */
+ odp_pktio_op_mode_t op_mode;
+
+ /** Enable flow hashing
+ * 0: Do not hash flows
+ * 1: Hash flows to input queues */
+ odp_bool_t hash_enable;
+
+ /** Protocol field selection for hashing. Multiple protocols can be
+ * selected. */
+ odp_pktin_hash_proto_t hash_proto;
+
+ /** Number of input queues to be created. More than one input queue
+ * require input hashing or classifier setup. Hash_proto is ignored
+ * when hash_enable is zero or num_queues is one. This value must be
+ * between 1 and interface capability. Queue type is defined by the
+ * input mode. */
+ unsigned num_queues;
+
+ /** Queue parameters for creating input queues in ODP_PKTIN_MODE_QUEUE
+ * or ODP_PKTIN_MODE_SCHED modes. Scheduler parameters are considered
+ * only in ODP_PKTIN_MODE_SCHED mode. */
+ odp_queue_param_t queue_param;
+
+} odp_pktin_queue_param_t;
+
+/**
+ * Packet output queue parameters
+ *
+ * These parameters are used only in ODP_PKTOUT_MODE_DIRECT mode.
+ */
+typedef struct odp_pktout_queue_param_t {
+ /** Operation mode
+ *
+ * The default value is ODP_PKTIO_OP_MT. Application may enable
+ * performance optimization by defining ODP_PKTIO_OP_MT_UNSAFE when
+ * applicable. */
+ odp_pktio_op_mode_t op_mode;
+
+ /** Number of output queues to be created. The value must be between
+ * 1 and interface capability */
+ unsigned num_queues;
+
+} odp_pktout_queue_param_t;
/**
* Packet IO parameters
@@ -90,12 +199,22 @@ typedef enum odp_pktio_output_mode_t {
*/
typedef struct odp_pktio_param_t {
/** Packet input mode */
- odp_pktio_input_mode_t in_mode;
+ odp_pktin_mode_t in_mode;
/** Packet output mode */
- odp_pktio_output_mode_t out_mode;
+ odp_pktout_mode_t out_mode;
} odp_pktio_param_t;
/**
+ * Packet IO capabilities
+ */
+typedef struct odp_pktio_capability_t {
+ /** Maximum number of input queues */
+ unsigned max_input_queues;
+ /** Maximum number of output queues */
+ unsigned max_output_queues;
+} odp_pktio_capability_t;
+
+/**
* Open a packet IO interface
*
* An ODP program can open a single packet IO interface per device, attempts
@@ -133,6 +252,123 @@ odp_pktio_t odp_pktio_open(const char *dev, odp_pool_t pool,
const odp_pktio_param_t *param);
/**
+ * Query packet IO interface capabilities
+ *
+ * Outputs packet IO interface capabilities on success.
+ *
+ * @param pktio Packet IO handle
+ * @param[out] capa Pointer to capability structure for output
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa);
+
+/**
+ * Configure packet input queues
+ *
+ * Setup a number of packet input queues and configure those. The maximum number
+ * of queues is platform dependent and can be queried with
+ * odp_pktio_capability(). Queue handles for input queues can be requested with
+ * odp_pktin_queue() or odp_pktin_event_queue() after this call. All
+ * requested queues are setup on success, no queues are setup on failure.
+ * Each call reconfigures input queues and may invalidate all previous queue
+ * handles.
+ *
+ * @param pktio Packet IO handle
+ * @param param Packet input queue configuration parameters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @see odp_pktio_capability(), odp_pktin_queue(), odp_pktin_event_queue()
+ */
+int odp_pktin_queue_config(odp_pktio_t pktio,
+ const odp_pktin_queue_param_t *param);
+
+/**
+ * Configure packet output queues
+ *
+ * Setup a number of packet output queues and configure those. The maximum
+ * number of queues is platform dependent and can be queried with
+ * odp_pktio_capability(). All requested queues are setup on success, no
+ * queues are setup on failure. Each call reconfigures output queues and may
+ * invalidate all previous queue handles.
+ *
+ * @param pktio Packet IO handle
+ * @param param Packet output queue configuration parameters
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @see odp_pktio_capability(), odp_pktout_queue()
+ */
+int odp_pktout_queue_config(odp_pktio_t pktio,
+ const odp_pktout_queue_param_t *param);
+
+/**
+ * Queues for packet input
+ *
+ * Returns the number of input queues configured for the interface in
+ * ODP_PKTIN_MODE_QUEUE and ODP_PKTIN_MODE_SCHED modes. Outputs up to 'num'
+ * queue handles when the 'queues' array pointer is not NULL. If return value is
+ * larger than 'num', there are more queues than the function was allowed to
+ * output. If return value (N) is less than 'num', only queues[0 ... N-1] have
+ * been written.
+ *
+ * Packets (and other events) from these queues are received with
+ * odp_queue_deq(), odp_schedule(), etc calls.
+ *
+ * @param pktio Packet IO handle
+ * @param[out] queues Points to an array of queue handles for output
+ * @param num Maximum number of queue handles to output
+ *
+ * @return Number of packet input queues
+ * @retval <0 on failure
+ */
+int odp_pktin_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num);
+
+/**
+ * Direct packet input queues
+ *
+ * Returns the number of input queues configured for the interface in
+ * ODP_PKTIN_MODE_DIRECT mode. Outputs up to 'num' queue handles when the
+ * 'queues' array pointer is not NULL. If return value is larger than 'num',
+ * there are more queues than the function was allowed to output. If return
+ * value (N) is less than 'num', only queues[0 ... N-1] have been written.
+ *
+ * Packets from these queues are received with odp_pktio_recv_queue().
+ *
+ * @param pktio Packet IO handle
+ * @param[out] queues Points to an array of queue handles for output
+ * @param num Maximum number of queue handles to output
+ *
+ * @return Number of packet input queues
+ * @retval <0 on failure
+ */
+int odp_pktin_queue(odp_pktio_t pktio, odp_pktin_queue_t queues[], int num);
+
+/**
+ * Direct packet output queues
+ *
+ * Returns the number of output queues configured for the interface in
+ * ODP_PKTOUT_MODE_DIRECT mode. Outputs up to 'num' queue handles when the
+ * 'queues' array pointer is not NULL. If return value is larger than 'num',
+ * there are more queues than the function was allowed to output. If return
+ * value (N) is less than 'num', only queues[0 ... N-1] have been written.
+ *
+ * Packets are sent to these queues with odp_pktio_send_queue().
+ *
+ * @param pktio Packet IO handle
+ * @param[out] queues Points to an array of queue handles for output
+ * @param num Maximum number of queue handles to output
+ *
+ * @return Number of packet output queues
+ * @retval <0 on failure
+ */
+int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num);
+
+/**
* Start packet receive and transmit
*
* Activate packet receive and transmit on a previously opened or stopped
@@ -194,33 +430,79 @@ int odp_pktio_close(odp_pktio_t pktio);
odp_pktio_t odp_pktio_lookup(const char *dev);
/**
- * Receive packets
+ * Receive packets directly from an interface
+ *
+ * Receives up to 'num' packets from the interface. The operation is
+ * multi-thread safe.
+ *
+ * @param pktio Packet IO handle
+ * @param[out] packets[] Packet handle array for output of received packets
+ * @param num Maximum number of packets to receive
+ *
+ * @return Number of packets received
+ * @retval <0 on failure
+ */
+int odp_pktio_recv(odp_pktio_t pktio, odp_packet_t packets[], int num);
+
+/**
+ * Receive packets directly from an interface input queue
*
- * @param pktio Packet IO handle
- * @param pkt_table[] Storage for received packets (filled by function)
- * @param len Length of pkt_table[], i.e. max number of pkts to receive
+ * Receives up to 'num' packets from the pktio interface input queue. When
+ * input queue parameter 'op_mode' has been set to ODP_PKTIO_OP_MT_UNSAFE,
+ * the operation is optimized for single thread operation per queue and the same
+ * queue must not be accessed simultaneously from multiple threads.
+ *
+ * @param queue Pktio input queue handle for receiving packets
+ * @param[out] packets[] Packet handle array for output of received packets
+ * @param num Maximum number of packets to receive
*
* @return Number of packets received
* @retval <0 on failure
+ *
+ * @see odp_pktin_queue()
*/
-int odp_pktio_recv(odp_pktio_t pktio, odp_packet_t pkt_table[], int len);
+int odp_pktio_recv_queue(odp_pktin_queue_t queue, odp_packet_t packets[],
+ int num);
/**
- * Send packets
+ * Send packets directly to an interface
*
- * Sends out a number of packets. A successful call returns the actual number of
- * packets sent. If return value is less than 'len', the remaining packets at
- * the end of pkt_table[] are not consumed, and the caller has to take care of
- * them.
+ * Sends out a number of packets to the interface. The operation is
+ * multi-thread safe. A successful call returns the actual number of
+ * packets sent. If return value is less than 'num', the remaining packets at
+ * the end of packets[] array are not consumed, and the caller has to take
+ * care of them.
*
* @param pktio Packet IO handle
- * @param pkt_table[] Array of packets to send
- * @param len length of pkt_table[]
+ * @param packets[] Array of packets to send
+ * @param num Number of packets to send
+ *
+ * @return Number of packets sent
+ * @retval <0 on failure
+ */
+int odp_pktio_send(odp_pktio_t pktio, odp_packet_t packets[], int num);
+
+/**
+ * Send packets directly to an interface output queue
+ *
+ * Sends out a number of packets to the interface output queue. When
+ * output queue parameter 'op_mode' has been set to ODP_PKTIO_OP_MT_UNSAFE,
+ * the operation is optimized for single thread operation per queue and the same
+ * queue must not be accessed simultaneously from multiple threads.
+ *
+ * A successful call returns the actual number of packets sent. If return value
+ * is less than 'num', the remaining packets at the end of packets[] array
+ * are not consumed, and the caller has to take care of them.
+ *
+ * @param queue Pktio output queue handle for sending packets
+ * @param packets[] Array of packets to send
+ * @param num Number of packets to send
*
* @return Number of packets sent
* @retval <0 on failure
*/
-int odp_pktio_send(odp_pktio_t pktio, odp_packet_t pkt_table[], int len);
+int odp_pktio_send_queue(odp_pktout_queue_t queue, odp_packet_t packets[],
+ int num);
/**
* Set the default input queue to be associated with a pktio handle
@@ -386,6 +668,24 @@ uint64_t odp_pktio_to_u64(odp_pktio_t pktio);
void odp_pktio_param_init(odp_pktio_param_t *param);
/**
+ * Initialize packet input queue parameters
+ *
+ * Initialize an odp_pktin_queue_param_t to its default values.
+ *
+ * @param param Input queue parameter structure to be initialized
+ */
+void odp_pktin_queue_param_init(odp_pktin_queue_param_t *param);
+
+/**
+ * Initialize packet output queue parameters
+ *
+ * Initialize an odp_pktout_queue_param_t to its default values.
+ *
+ * @param param Output queue parameter structure to be initialized
+ */
+void odp_pktout_queue_param_init(odp_pktout_queue_param_t *param);
+
+/**
* Print pktio info to the console
*
* Print implementation-defined pktio debug information to the console.
@@ -395,6 +695,17 @@ void odp_pktio_param_init(odp_pktio_param_t *param);
void odp_pktio_print(odp_pktio_t pktio);
/**
+ * Determine pktio link is up or down for a packet IO interface.
+ *
+ * @param pktio Packet IO handle.
+ *
+ * @retval 1 link is up
+ * @retval 0 link is down
+ * @retval <0 on failure
+*/
+int odp_pktio_link_status(odp_pktio_t pktio);
+
+/**
* @}
*/
diff --git a/include/odp/api/packet_io_stats.h b/include/odp/api/packet_io_stats.h
new file mode 100644
index 000000000..148ad8d40
--- /dev/null
+++ b/include/odp/api/packet_io_stats.h
@@ -0,0 +1,141 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP Packet IO
+ */
+
+#ifndef ODP_API_PACKET_IO_STATS_H_
+#define ODP_API_PACKET_IO_STATS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/** @addtogroup odp_packet_io
+ * @{
+ */
+
+/**
+ * Packet IO statistics
+ *
+ * Packet IO statictics counters follow RFCs for Management Information Base
+ * (MIB)for use with network management protocols in the Internet community:
+ * https://tools.ietf.org/html/rfc3635
+ * https://tools.ietf.org/html/rfc2863
+ * https://tools.ietf.org/html/rfc2819
+ */
+typedef struct odp_pktio_stats_t {
+ /**
+ * The number of octets in valid MAC frames received on this interface,
+ * including the MAC header and FCS. See ifHCInOctets counter
+ * description in RFC 3635 for details.
+ */
+ uint64_t in_octets;
+
+ /**
+ * The number of packets, delivered by this sub-layer to a higher
+ * (sub-)layer, which were not addressed to a multicast or broadcast
+ * address at this sub-layer. See ifHCInUcastPkts in RFC 2863, RFC 3635.
+ */
+ uint64_t in_ucast_pkts;
+
+ /**
+ * The number of inbound packets which were chosen to be discarded
+ * even though no errors had been detected to preven their being
+ * deliverable to a higher-layer protocol. One possible reason for
+ * discarding such a packet could be to free up buffer space.
+ * See ifInDiscards in RFC 2863.
+ */
+ uint64_t in_discards;
+
+ /**
+ * The sum for this interface of AlignmentErrors, FCSErrors, FrameTooLongs,
+ * InternalMacReceiveErrors. See ifInErrors in RFC 3635.
+ */
+ uint64_t in_errors;
+
+ /**
+ * For packet-oriented interfaces, the number of packets received via
+ * the interface which were discarded because of an unknown or
+ * unsupported protocol. For character-oriented or fixed-length
+ * interfaces that support protocol multiplexing the number of
+ * transmission units received via the interface which were discarded
+ * because of an unknown or unsupported protocol. For any interface
+ * that does not support protocol multiplexing, this counter will always
+ * be 0. See ifInUnknownProtos in RFC 2863, RFC 3635.
+ */
+ uint64_t in_unknown_protos;
+
+ /**
+ * The number of octets transmitted in valid MAC frames on this
+ * interface, including the MAC header and FCS. This does include
+ * the number of octets in valid MAC Control frames transmitted on
+ * this interface. See ifHCOutOctets in RFC 3635.
+ */
+ uint64_t out_octets;
+
+ /**
+ * The total number of packets that higher-level protocols requested
+ * be transmitted, and which were not addressed to a multicast or
+ * broadcast address at this sub-layer, including those that were
+ * discarded or not sent. does not include MAC Control frames.
+ * See ifHCOutUcastPkts RFC 2863, 3635.
+ */
+ uint64_t out_ucast_pkts;
+
+ /**
+ * The number of outbound packets which were chosen to be discarded
+ * even though no errors had been detected to prevent their being
+ * transmitted. One possible reason for discarding such a packet could
+ * be to free up buffer space. See OutDiscards in RFC 2863.
+ */
+ uint64_t out_discards;
+
+ /**
+ * The sum for this interface of SQETestErrors, LateCollisions,
+ * ExcessiveCollisions, InternalMacTransmitErrors and
+ * CarrierSenseErrors. See ifOutErrors in RFC 3635.
+ */
+ uint64_t out_errors;
+} odp_pktio_stats_t;
+
+/**
+ * Get statistics for pktio handle
+ *
+ * @param pktio Packet IO handle
+ * @param[out] stats Output buffer for counters
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ * @note: If counter is not supported by platform it has
+ * to be set to 0.
+ */
+int odp_pktio_stats(odp_pktio_t pktio,
+ odp_pktio_stats_t *stats);
+
+/**
+ * Reset statistics for pktio handle
+ *
+ * Reset all pktio counters to 0.
+ * @param pktio Packet IO handle
+ * @retval 0 on success
+ * @retval <0 on failure
+ *
+ */
+int odp_pktio_stats_reset(odp_pktio_t pktio);
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/odp/api/pool.h b/include/odp/api/pool.h
index 2e79a5545..deab9633a 100644
--- a/include/odp/api/pool.h
+++ b/include/odp/api/pool.h
@@ -43,6 +43,9 @@ extern "C" {
/**
* Pool parameters
* Used to communicate pool creation options.
+ * @note A single thread may not be able to allocate all 'num' elements
+ * from the pool at any particular time, as other threads or hardware
+ * blocks are allowed to keep some for caching purposes.
*/
typedef struct odp_pool_param_t {
/** Pool type */
diff --git a/include/odp/api/queue.h b/include/odp/api/queue.h
index 3ee69fcfe..bde8ca3f6 100644
--- a/include/odp/api/queue.h
+++ b/include/odp/api/queue.h
@@ -47,53 +47,121 @@ extern "C" {
*/
/**
- * @typedef odp_queue_type_t
- * ODP queue type
+ * Queue type
*/
+typedef enum odp_queue_type_t {
+ /** Plain queue
+ *
+ * Plain queues offer simple FIFO storage of events. Application may
+ * dequeue directly from these queues. */
+ ODP_QUEUE_TYPE_PLAIN = 0,
-/**
- * @def ODP_QUEUE_TYPE_SCHED
- * Scheduled queue
- */
+ /** Scheduled queue
+ *
+ * Scheduled queues are connected to the scheduler. Application must
+ * not dequeue events directly from these queues but use the scheduler
+ * instead. */
+ ODP_QUEUE_TYPE_SCHED,
-/**
- * @def ODP_QUEUE_TYPE_POLL
- * Not scheduled queue
- */
+ /** To be removed */
+ ODP_QUEUE_TYPE_PKTIN,
-/**
- * @def ODP_QUEUE_TYPE_PKTIN
- * Packet input queue
- */
+ /** To be removed */
+ ODP_QUEUE_TYPE_PKTOUT
+} odp_queue_type_t;
/**
- * @def ODP_QUEUE_TYPE_PKTOUT
- * Packet output queue
- */
+ * Queue operation mode
+ */
+typedef enum odp_queue_op_mode_t {
+ /** Multi-thread safe operation
+ *
+ * Queue operation (enqueue or dequeue) is multi-thread safe. Any
+ * number of application threads may perform the operation
+ * concurrently. */
+ ODP_QUEUE_OP_MT = 0,
+
+ /** Not multi-thread safe operation
+ *
+ * Queue operation (enqueue or dequeue) may not be multi-thread safe.
+ * Application ensures synchronization between threads so that
+ * simultaneously only single thread attempts the operation on
+ * the same queue. */
+ ODP_QUEUE_OP_MT_UNSAFE,
+
+ /** Disabled
+ *
+ * Direct enqueue or dequeue operation from application is disabled.
+ * An attempt to enqueue/dequeue directly will result undefined
+ * behaviour. Various ODP functions (e.g. packet input, timer,
+ * crypto, scheduler, etc) are able to perform enqueue or
+ * dequeue operations normally on the queue.
+ * */
+ ODP_QUEUE_OP_DISABLED
+
+} odp_queue_op_mode_t;
/**
* ODP Queue parameters
*/
typedef struct odp_queue_param_t {
- /** Scheduler parameters */
+ /** Queue type
+ *
+ * Valid values for other parameters in this structure depend on
+ * the queue type. */
+ odp_queue_type_t type;
+
+ /** Enqueue mode
+ *
+ * Default value for both queue types is ODP_QUEUE_OP_MT. Application
+ * may enable performance optimizations by defining MT_UNSAFE or
+ * DISABLED modes when applicaple. */
+ odp_queue_op_mode_t enq_mode;
+
+ /** Dequeue mode
+ *
+ * For PLAIN queues, the default value is ODP_QUEUE_OP_MT. Application
+ * may enable performance optimizations by defining MT_UNSAFE or
+ * DISABLED modes when applicaple. However, when a plain queue is input
+ * to the implementation (e.g. a queue for packet output), the
+ * parameter is ignored in queue creation and the value is
+ * ODP_QUEUE_OP_DISABLED.
+ *
+ * For SCHED queues, the parameter is ignored in queue creation and
+ * the value is ODP_QUEUE_OP_DISABLED. */
+ odp_queue_op_mode_t deq_mode;
+
+ /** Scheduler parameters
+ *
+ * These parameters are considered only when queue type is
+ * ODP_QUEUE_TYPE_SCHED. */
odp_schedule_param_t sched;
- /** Queue context */
+
+ /** Queue context pointer
+ *
+ * User defined context pointer associated with the queue. The same
+ * pointer can be accessed with odp_queue_context() and
+ * odp_queue_context_set() calls. The implementation may read the
+ * pointer for prefetching the context data. Default value of the
+ * pointer is NULL. */
void *context;
} odp_queue_param_t;
-
/**
* Queue create
*
+ * Create a queue according to the queue parameters. Queue type is specified by
+ * queue parameter 'type'. Use odp_queue_param_init() to initialize parameters
+ * into their default values. Default values are also used when 'param' pointer
+ * is NULL. The default queue type is ODP_QUEUE_TYPE_PLAIN.
+ *
* @param name Queue name
- * @param type Queue type
* @param param Queue parameters. Uses defaults if NULL.
*
* @return Queue handle
* @retval ODP_QUEUE_INVALID on failure
*/
-odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
- odp_queue_param_t *param);
+odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param);
/**
* Destroy ODP queue
@@ -285,7 +353,6 @@ void odp_queue_param_init(odp_queue_param_t *param);
*/
typedef struct odp_queue_info_t {
const char *name; /**< queue name */
- odp_queue_type_t type; /**< queue type */
odp_queue_param_t param; /**< queue parameters */
} odp_queue_info_t;
diff --git a/include/odp/api/schedule.h b/include/odp/api/schedule.h
index 55191f951..68ba594fd 100644
--- a/include/odp/api/schedule.h
+++ b/include/odp/api/schedule.h
@@ -88,27 +88,35 @@ uint64_t odp_schedule_wait_time(uint64_t ns);
* When returns an event, the thread holds the queue synchronization context
* (atomic or ordered) until the next odp_schedule() or odp_schedule_multi()
* call. The next call implicitly releases the current context and potentially
- * returns with a new context. User can allow early context release (e.g. see
- * odp_schedule_release_atomic()) for performance optimization.
+ * returns with a new context. User can allow early context release (e.g., see
+ * odp_schedule_release_atomic() and odp_schedule_release_ordered()) for
+ * performance optimization.
*
* @param from Output parameter for the source queue (where the event was
* dequeued from). Ignored if NULL.
- * @param wait Minimum time to wait for an event. Waits infinitely, if set to
- * ODP_SCHED_WAIT. Does not wait, if set to ODP_SCHED_NO_WAIT.
+ * @param wait Minimum time to wait for an event. Waits indefinitely if set
+ * to ODP_SCHED_WAIT. Does not wait if set to ODP_SCHED_NO_WAIT.
* Use odp_schedule_wait_time() to convert time to other wait
* values.
*
* @return Next highest priority event
* @retval ODP_EVENT_INVALID on timeout and no events available
*
- * @see odp_schedule_multi(), odp_schedule_release_atomic()
+ * @see odp_schedule_multi(), odp_schedule_release_atomic(),
+ * odp_schedule_release_ordered()
*/
odp_event_t odp_schedule(odp_queue_t *from, uint64_t wait);
/**
* Schedule multiple events
*
- * Like odp_schedule(), but returns multiple events from a queue.
+ * Like odp_schedule(), but returns multiple events from a queue. The caller
+ * specifies the maximum number of events it is willing to accept. The
+ * scheduler is under no obligation to return more than a single event but
+ * will never return more than the number specified by the caller. The return
+ * code specifies the number of events returned and all of these events always
+ * originate from the same source queue and share the same scheduler
+ * synchronization context.
*
* @param from Output parameter for the source queue (where the event was
* dequeued from). Ignored if NULL.
diff --git a/include/odp/api/schedule_types.h b/include/odp/api/schedule_types.h
index d4eda7838..cf204f46d 100644
--- a/include/odp/api/schedule_types.h
+++ b/include/odp/api/schedule_types.h
@@ -54,12 +54,14 @@ extern "C" {
*/
/**
- * @def ODP_SCHED_SYNC_NONE
- * Queue not synchronised
+ * @def ODP_SCHED_SYNC_PARALLEL
+ * Parallel scheduled queues
*
- * The scheduler does not provide event synchronization or ordering, only load
- * balancing. Events can be scheduled freely to multiple threads for concurrent
- * processing.
+ * The scheduler performs priority scheduling, load balancing, pre-fetching, etc
+ * functions but does not provide additional event synchronization or ordering.
+ * It's free to schedule events from single parallel queue to multiple threads
+ * for concurrent processing. Application is responsible for queue context
+ * synchronization and event ordering (SW synchronization).
*/
/**
diff --git a/include/odp/api/std_clib.h b/include/odp/api/std_clib.h
index 2119ec481..791b72f1c 100644
--- a/include/odp/api/std_clib.h
+++ b/include/odp/api/std_clib.h
@@ -54,6 +54,24 @@ void *odp_memcpy(void *dst, const void *src, size_t num);
void *odp_memset(void *ptr, int value, size_t num);
/**
+ * Memcmp
+ *
+ * ODP version of C library memcmp function. It compares first 'num' bytes of
+ * memory blocks pointed by 'ptr1' and 'ptr2'.
+ *
+ * @param ptr1 Pointer to a memory block
+ * @param ptr2 Pointer to a memory block
+ * @param num Number of bytes to compare
+ *
+ * @retval 0 when the contents of memory blocks match
+ * @retval <0 when the contents of memory blocks do not match, and
+ * block 'ptr1' is less than block 'ptr2'
+ * @retval >0 when the contents of memory blocks do not match, and
+ * block 'ptr1' is greater than block 'ptr2'
+ */
+int odp_memcmp(const void *ptr1, const void *ptr2, size_t num);
+
+/**
* @}
*/
diff --git a/include/odp/api/system_info.h b/include/odp/api/system_info.h
index e55ff6d42..bde3a6031 100644
--- a/include/odp/api/system_info.h
+++ b/include/odp/api/system_info.h
@@ -24,13 +24,6 @@ extern "C" {
*/
/**
- * CPU frequency in Hz
- *
- * @return CPU frequency in Hz
- */
-uint64_t odp_sys_cpu_hz(void);
-
-/**
* Huge page size in bytes
*
* @return Huge page size in bytes
@@ -45,13 +38,6 @@ uint64_t odp_sys_huge_page_size(void);
uint64_t odp_sys_page_size(void);
/**
- * CPU model name
- *
- * @return Pointer to CPU model name string
- */
-const char *odp_sys_cpu_model_str(void);
-
-/**
* Cache line size in bytes
*
* @return CPU cache line size in bytes
diff --git a/include/odp/api/thrmask.h b/include/odp/api/thrmask.h
index 8666166e0..af0d9bae4 100644
--- a/include/odp/api/thrmask.h
+++ b/include/odp/api/thrmask.h
@@ -26,27 +26,37 @@ extern "C" {
/**
* @def ODP_THRMASK_STR_SIZE
- * Minimum size of output buffer for odp_thrmask_to_str()
+ * The maximum number of characters needed to record any thread mask as
+ * a string (output of odp_thrmask_to_str()).
*/
/**
* Add thread mask bits from a string
*
- * @param[out] mask Thread mask to modify
- * @param str Hexadecimal digits in a string. Thread ID zero is located
- * at the least significant bit (0x1).
+ * Each bit set in the string represents a thread ID to be added into the mask.
+ * The string is null terminated and consists of hexadecimal digits. It may be
+ * prepended with '0x' and may contain leading zeros (e.g. 0x0001, 0x1 or 1).
+ * Thread ID zero is located at the least significant bit (0x1).
+ *
+ * @param mask Thread mask to modify
+ * @param str String of hexadecimal digits
*/
void odp_thrmask_from_str(odp_thrmask_t *mask, const char *str);
/**
- * Format Thread mask as a string of hexadecimal digits
+ * Format a string from thread mask
+ *
+ * Output string format is defined in odp_thrmask_from_str() documentation,
+ * except that the string is always prepended with '0x' and does not have any
+ * leading zeros (e.g. outputs always 0x1 instead of 0x0001 or 1).
*
- * @param mask Thread mask to format
- * @param[out] str Output buffer (use ODP_THRMASK_STR_SIZE)
- * @param size Size of output buffer
+ * @param mask Thread mask
+ * @param[out] str String pointer for output
+ * @param size Size of output buffer. Buffer size ODP_THRMASK_STR_SIZE
+ * or larger will have enough space for any thread mask.
*
- * @return number of characters written (including terminating null char)
- * @retval <0 on failure (buffer too small)
+ * @return Number of characters written (including terminating null char)
+ * @retval <0 on failure (e.g. buffer too small)
*/
int32_t odp_thrmask_to_str(const odp_thrmask_t *mask, char *str, int32_t size);
diff --git a/include/odp/api/version.h b/include/odp/api/version.h
index 95fac3cf2..15c8dbdb3 100644
--- a/include/odp/api/version.h
+++ b/include/odp/api/version.h
@@ -44,7 +44,7 @@ extern "C" {
* Introduction of major new features or changes. APIs with different major
* versions are likely not backward compatible.
*/
-#define ODP_VERSION_API_MAJOR 6
+#define ODP_VERSION_API_MAJOR 7
/**
* ODP API minor version
diff --git a/pkg/debian/changelog b/pkg/debian/changelog
index 1a0b23c83..d90730637 100644
--- a/pkg/debian/changelog
+++ b/pkg/debian/changelog
@@ -1,3 +1,8 @@
+opendataplane (1.7.0.0-1) unstable; urgency=low
+ * ODP release v1.7
+
+ -- Maxim Uvarov <maxim.uvarov@linaro.org> Fri, 05 Feb 2016 13:05:00 +0300
+
opendataplane (1.6.0.0-1) unstable; urgency=low
* ODP release v1.6
diff --git a/pkg/rpm/odp.spec b/pkg/rpm/odp.spec
index 6c675a754..f6a8d5d73 100644
--- a/pkg/rpm/odp.spec
+++ b/pkg/rpm/odp.spec
@@ -68,6 +68,8 @@ and guides in HTMLformats.
%post -p /sbin/ldconfig
%postun -p /sbin/ldconfig
%changelog
+* Fri Feb 5 2016 - maxim.uvarov (at) linaro.org
+- ODP release v1.7
* Mon Dec 28 2015 - maxim.uvarov (at) linaro.org
- ODP release v1.6
* Mon Nov 30 2015 - mike.holmes (at) linaro.org
diff --git a/platform/Makefile.inc b/platform/Makefile.inc
index 9cefe2557..147ba9f4e 100644
--- a/platform/Makefile.inc
+++ b/platform/Makefile.inc
@@ -41,6 +41,7 @@ odpapiinclude_HEADERS = \
$(top_srcdir)/include/odp/api/packet.h \
$(top_srcdir)/include/odp/api/packet_flags.h \
$(top_srcdir)/include/odp/api/packet_io.h \
+ $(top_srcdir)/include/odp/api/packet_io_stats.h \
$(top_srcdir)/include/odp/api/pool.h \
$(top_srcdir)/include/odp/api/queue.h \
$(top_srcdir)/include/odp/api/random.h \
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index 279e5e296..a9090a371 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -2,6 +2,7 @@
#export CUSTOM_STR=https://git.linaro.org/lng/odp.git
include $(top_srcdir)/platform/Makefile.inc
+include $(top_srcdir)/platform/@with_platform@/Makefile.inc
AM_CFLAGS += -I$(srcdir)/include
AM_CFLAGS += -I$(top_srcdir)/include
@@ -51,7 +52,8 @@ odpinclude_HEADERS = \
$(srcdir)/include/odp/ticketlock.h \
$(srcdir)/include/odp/time.h \
$(srcdir)/include/odp/timer.h \
- $(srcdir)/include/odp/version.h
+ $(srcdir)/include/odp/version.h \
+ $(srcdir)/arch/@ARCH@/odp/cpu_arch.h
odpplatincludedir= $(includedir)/odp/plat
odpplatinclude_HEADERS = \
@@ -63,6 +65,7 @@ odpplatinclude_HEADERS = \
$(srcdir)/include/odp/plat/cpumask_types.h \
$(srcdir)/include/odp/plat/crypto_types.h \
$(srcdir)/include/odp/plat/event_types.h \
+ $(srcdir)/include/odp/plat/init_types.h \
$(srcdir)/include/odp/plat/packet_types.h \
$(srcdir)/include/odp/plat/packet_io_types.h \
$(srcdir)/include/odp/plat/pool_types.h \
@@ -103,11 +106,11 @@ noinst_HEADERS = \
${srcdir}/include/odp_posix_extensions.h \
${srcdir}/include/odp_queue_internal.h \
${srcdir}/include/odp_schedule_internal.h \
- ${srcdir}/include/odp_spin_internal.h \
${srcdir}/include/odp_timer_internal.h \
${srcdir}/Makefile.inc
__LIB__libodp_la_SOURCES = \
+ odp_atomic.c \
odp_barrier.c \
odp_buffer.c \
odp_classification.c \
@@ -123,12 +126,14 @@ __LIB__libodp_la_SOURCES = \
odp_packet.c \
odp_packet_flags.c \
odp_packet_io.c \
+ pktio/ethtool.c \
pktio/io_ops.c \
pktio/pktio_common.c \
pktio/loop.c \
pktio/netmap.c \
pktio/socket.c \
pktio/socket_mmap.c \
+ pktio/sysfs.c \
pktio/tap.c \
odp_pool.c \
odp_queue.c \
@@ -146,12 +151,18 @@ __LIB__libodp_la_SOURCES = \
odp_timer.c \
odp_version.c \
odp_weak.c \
- arch/@ARCH@/odp_cpu_cycles.c
+ arch/@ARCH@/odp_cpu_arch.c \
+ arch/@ARCH@/odp_sysinfo_parse.c
EXTRA_DIST = \
- arch/linux/odp_cpu_cycles.c \
- arch/mips64/odp_cpu_cycles.c \
- arch/x86/odp_cpu_cycles.c
+ arch/linux/odp_cpu_arch.c \
+ arch/linux/odp_sysinfo_parse.c \
+ arch/mips64/odp_cpu_arch.c \
+ arch/mips64/odp_sysinfo_parse.c \
+ arch/powerpc/odp_cpu_arch.c \
+ arch/powerpc/odp_sysinfo_parse.c \
+ arch/x86/odp_cpu_arch.c \
+ arch/x86/odp_sysinfo_parse.c
if HAVE_PCAP
__LIB__libodp_la_SOURCES += pktio/pcap.c
diff --git a/platform/linux-generic/Makefile.inc b/platform/linux-generic/Makefile.inc
index e69de29bb..048c8bb78 100644
--- a/platform/linux-generic/Makefile.inc
+++ b/platform/linux-generic/Makefile.inc
@@ -0,0 +1,2 @@
+AM_CFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH)
+AM_CXXFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH)
diff --git a/platform/linux-generic/arch/linux/odp/cpu_arch.h b/platform/linux-generic/arch/linux/odp/cpu_arch.h
new file mode 100644
index 000000000..1c79f875c
--- /dev/null
+++ b/platform/linux-generic/arch/linux/odp/cpu_arch.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_ARCH_H_
+#define ODP_PLAT_CPU_ARCH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void odp_cpu_pause(void)
+{
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/linux/odp_cpu_cycles.c b/platform/linux-generic/arch/linux/odp_cpu_arch.c
index 7509bf28b..3112d0cd0 100644
--- a/platform/linux-generic/arch/linux/odp_cpu_cycles.c
+++ b/platform/linux-generic/arch/linux/odp_cpu_arch.c
@@ -27,7 +27,7 @@ uint64_t odp_cpu_cycles(void)
if (ret != 0)
ODP_ABORT("clock_gettime failed\n");
- hz = odp_sys_cpu_hz();
+ hz = odp_cpu_hz_max();
sec = (uint64_t)time.tv_sec;
ns = (uint64_t)time.tv_nsec;
diff --git a/platform/linux-generic/arch/linux/odp_sysinfo_parse.c b/platform/linux-generic/arch/linux/odp_sysinfo_parse.c
new file mode 100644
index 000000000..8ff6f48d2
--- /dev/null
+++ b/platform/linux-generic/arch/linux/odp_sysinfo_parse.c
@@ -0,0 +1,19 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_internal.h>
+#include <string.h>
+
+int odp_cpuinfo_parser(FILE *file ODP_UNUSED,
+ odp_system_info_t *sysinfo ODP_UNUSED)
+{
+ return 0;
+}
+
+uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/mips64/odp/cpu_arch.h b/platform/linux-generic/arch/mips64/odp/cpu_arch.h
new file mode 100644
index 000000000..3bfa0dcde
--- /dev/null
+++ b/platform/linux-generic/arch/mips64/odp/cpu_arch.h
@@ -0,0 +1,26 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_ARCH_H_
+#define ODP_PLAT_CPU_ARCH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void odp_cpu_pause(void)
+{
+ __asm__ __volatile__ ("nop");
+ __asm__ __volatile__ ("nop");
+ __asm__ __volatile__ ("nop");
+ __asm__ __volatile__ ("nop");
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/mips64/odp_cpu_cycles.c b/platform/linux-generic/arch/mips64/odp_cpu_arch.c
index a20a31325..a20a31325 100644
--- a/platform/linux-generic/arch/mips64/odp_cpu_cycles.c
+++ b/platform/linux-generic/arch/mips64/odp_cpu_arch.c
diff --git a/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c b/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
new file mode 100644
index 000000000..53074f7bd
--- /dev/null
+++ b/platform/linux-generic/arch/mips64/odp_sysinfo_parse.c
@@ -0,0 +1,64 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_internal.h>
+#include <string.h>
+
+int odp_cpuinfo_parser(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double mhz = 0.0;
+ uint64_t hz;
+ int model = 0;
+ int count = 2;
+ int id = 0;
+
+ strcpy(sysinfo->cpu_arch_str, "mips64");
+ while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
+ if (!mhz) {
+ pos = strstr(str, "BogoMIPS");
+
+ if (pos)
+ if (sscanf(pos, "BogoMIPS : %lf", &mhz) == 1) {
+ /* bogomips seems to be 2x freq */
+ hz = (uint64_t)(mhz * 1000000.0 / 2.0);
+ sysinfo->cpu_hz_max[id] = hz;
+ count--;
+ }
+ }
+
+ if (!model) {
+ pos = strstr(str, "cpu model");
+
+ if (pos) {
+ int len;
+
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str[id], pos + 2,
+ sizeof(sysinfo->model_str[id]));
+ len = strlen(sysinfo->model_str[id]);
+ sysinfo->model_str[id][len - 1] = 0;
+ model = 1;
+ count--;
+ }
+ }
+
+ if (count == 0) {
+ mhz = 0.0;
+ model = 0;
+ count = 2;
+ id++;
+ }
+ }
+
+ return 0;
+}
+
+uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/powerpc/odp/cpu_arch.h b/platform/linux-generic/arch/powerpc/odp/cpu_arch.h
new file mode 120000
index 000000000..0617d7fa1
--- /dev/null
+++ b/platform/linux-generic/arch/powerpc/odp/cpu_arch.h
@@ -0,0 +1 @@
+../../linux/odp/cpu_arch.h \ No newline at end of file
diff --git a/platform/linux-generic/arch/powerpc/odp_cpu_arch.c b/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
new file mode 120000
index 000000000..c5fe40085
--- /dev/null
+++ b/platform/linux-generic/arch/powerpc/odp_cpu_arch.c
@@ -0,0 +1 @@
+../linux/odp_cpu_arch.c \ No newline at end of file
diff --git a/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
new file mode 100644
index 000000000..99457cec4
--- /dev/null
+++ b/platform/linux-generic/arch/powerpc/odp_sysinfo_parse.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_internal.h>
+#include <string.h>
+
+int odp_cpuinfo_parser(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double mhz = 0.0;
+ uint64_t hz;
+ int model = 0;
+ int count = 2;
+ int id = 0;
+
+ strcpy(sysinfo->cpu_arch_str, "powerpc");
+ while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
+ if (!mhz) {
+ pos = strstr(str, "clock");
+
+ if (pos)
+ if (sscanf(pos, "clock : %lf", &mhz) == 1) {
+ hz = (uint64_t)(mhz * 1000000.0);
+ sysinfo->cpu_hz_max[id] = hz;
+ count--;
+ }
+ }
+
+ if (!model) {
+ pos = strstr(str, "cpu");
+
+ if (pos) {
+ int len;
+
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str[id], pos + 2,
+ sizeof(sysinfo->model_str[id]));
+ len = strlen(sysinfo->model_str[id]);
+ sysinfo->model_str[id][len - 1] = 0;
+ model = 1;
+ count--;
+ }
+ }
+
+ if (count == 0) {
+ mhz = 0.0;
+ model = 0;
+ count = 2;
+ id++;
+ }
+ }
+
+ return 0;
+}
+
+uint64_t odp_cpu_hz_current(int id ODP_UNUSED)
+{
+ return 0;
+}
diff --git a/platform/linux-generic/arch/x86/odp/cpu_arch.h b/platform/linux-generic/arch/x86/odp/cpu_arch.h
new file mode 100644
index 000000000..997a95475
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp/cpu_arch.h
@@ -0,0 +1,27 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_PLAT_CPU_ARCH_H_
+#define ODP_PLAT_CPU_ARCH_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void odp_cpu_pause(void)
+{
+#ifdef __SSE2__
+ __asm__ __volatile__ ("pause");
+#else
+ __asm__ __volatile__ ("rep; nop");
+#endif
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/arch/x86/odp_cpu_cycles.c b/platform/linux-generic/arch/x86/odp_cpu_arch.c
index 1c5c0ec7f..1c5c0ec7f 100644
--- a/platform/linux-generic/arch/x86/odp_cpu_cycles.c
+++ b/platform/linux-generic/arch/x86/odp_cpu_arch.c
diff --git a/platform/linux-generic/arch/x86/odp_sysinfo_parse.c b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
new file mode 100644
index 000000000..816629dfa
--- /dev/null
+++ b/platform/linux-generic/arch/x86/odp_sysinfo_parse.c
@@ -0,0 +1,73 @@
+/* Copyright (c) 2016, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_internal.h>
+#include <string.h>
+
+int odp_cpuinfo_parser(FILE *file, odp_system_info_t *sysinfo)
+{
+ char str[1024];
+ char *pos;
+ double ghz = 0.0;
+ uint64_t hz;
+ int id = 0;
+
+ strcpy(sysinfo->cpu_arch_str, "x86");
+ while (fgets(str, sizeof(str), file) != NULL && id < MAX_CPU_NUMBER) {
+ pos = strstr(str, "model name");
+ if (pos) {
+ pos = strchr(str, ':');
+ strncpy(sysinfo->model_str[id], pos + 2,
+ sizeof(sysinfo->model_str[id]));
+
+ pos = strchr(sysinfo->model_str[id], '@');
+ *(pos - 1) = '\0';
+ if (sscanf(pos, "@ %lfGHz", &ghz) == 1) {
+ hz = (uint64_t)(ghz * 1000000000.0);
+ sysinfo->cpu_hz_max[id] = hz;
+ }
+ id++;
+ }
+ }
+
+ return 0;
+}
+
+uint64_t odp_cpu_hz_current(int id)
+{
+ char str[1024];
+ FILE *file;
+ int cpu;
+ char *pos;
+ double mhz = 0.0;
+
+ file = fopen("/proc/cpuinfo", "rt");
+
+ /* find the correct processor instance */
+ while (fgets(str, sizeof(str), file) != NULL) {
+ pos = strstr(str, "processor");
+ if (pos) {
+ if (sscanf(pos, "processor : %d", &cpu) == 1)
+ if (cpu == id)
+ break;
+ }
+ }
+
+ /* extract the cpu current speed */
+ while (fgets(str, sizeof(str), file) != NULL) {
+ pos = strstr(str, "cpu MHz");
+ if (pos) {
+ if (sscanf(pos, "cpu MHz : %lf", &mhz) == 1)
+ break;
+ }
+ }
+
+ fclose(file);
+ if (mhz)
+ return (uint64_t)(mhz * 1000000.0);
+
+ return 0;
+}
diff --git a/platform/linux-generic/include/odp/atomic.h b/platform/linux-generic/include/odp/atomic.h
index 10cf361b4..e262f4851 100644
--- a/platform/linux-generic/include/odp/atomic.h
+++ b/platform/linux-generic/include/odp/atomic.h
@@ -84,6 +84,45 @@ static inline void odp_atomic_dec_u32(odp_atomic_u32_t *atom)
(void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
}
+static inline int odp_atomic_cas_u32(odp_atomic_u32_t *atom, uint32_t *old_val,
+ uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint32_t odp_atomic_xchg_u32(odp_atomic_u32_t *atom,
+ uint32_t new_val)
+{
+ return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
+}
+
+static inline void odp_atomic_max_u32(odp_atomic_u32_t *atom, uint32_t new_max)
+{
+ uint32_t old_val;
+
+ old_val = odp_atomic_load_u32(atom);
+
+ while (new_max > old_val) {
+ if (odp_atomic_cas_u32(atom, &old_val, new_max))
+ break;
+ }
+}
+
+static inline void odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t new_min)
+{
+ uint32_t old_val;
+
+ old_val = odp_atomic_load_u32(atom);
+
+ while (new_min < old_val) {
+ if (odp_atomic_cas_u32(atom, &old_val, new_min))
+ break;
+ }
+}
+
static inline void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
{
atom->v = val;
@@ -185,6 +224,189 @@ static inline void odp_atomic_dec_u64(odp_atomic_u64_t *atom)
#endif
}
+static inline int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+#else
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+#endif
+}
+
+static inline uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom,
+ uint64_t new_val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ return ATOMIC_OP(atom, atom->v = new_val);
+#else
+ return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
+#endif
+}
+
+static inline void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_max)
+{
+ uint64_t old_val;
+
+ old_val = odp_atomic_load_u64(atom);
+
+ while (new_max > old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_max))
+ break;
+ }
+}
+
+static inline void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_min)
+{
+ uint64_t old_val;
+
+ old_val = odp_atomic_load_u64(atom);
+
+ while (new_min < old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_min))
+ break;
+ }
+}
+
+static inline uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom)
+{
+ return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
+}
+
+static inline void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t val)
+{
+ __atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
+}
+
+static inline int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED);
+}
+
+static inline int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+}
+
+static inline int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val,
+ uint32_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+}
+
+static inline uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ return ATOMIC_OP(atom, (void)0);
+#else
+ return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
+#endif
+}
+
+static inline void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ (void)ATOMIC_OP(atom, atom->v = val);
+#else
+ __atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
+#endif
+}
+
+static inline void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ (void)ATOMIC_OP(atom, atom->v += val);
+#else
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
+#endif
+}
+
+static inline void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ (void)ATOMIC_OP(atom, atom->v -= val);
+#else
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
+#endif
+}
+
+static inline int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+#else
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED);
+#endif
+}
+
+static inline int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+#else
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELEASE,
+ __ATOMIC_RELAXED);
+#endif
+}
+
+static inline int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val,
+ uint64_t new_val)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+#else
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_ACQ_REL,
+ __ATOMIC_RELAXED);
+#endif
+}
+
/**
* @}
*/
diff --git a/platform/linux-generic/include/odp/byteorder.h b/platform/linux-generic/include/odp/byteorder.h
index 7fc7dc512..6c94556b0 100644
--- a/platform/linux-generic/include/odp/byteorder.h
+++ b/platform/linux-generic/include/odp/byteorder.h
@@ -25,7 +25,7 @@ extern "C" {
* @{
*/
-static inline uint16_t odp_be_to_cpu_16(uint16be_t be16)
+static inline uint16_t odp_be_to_cpu_16(odp_u16be_t be16)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return __odp_builtin_bswap16((__odp_force uint16_t)be16);
@@ -34,7 +34,7 @@ static inline uint16_t odp_be_to_cpu_16(uint16be_t be16)
#endif
}
-static inline uint32_t odp_be_to_cpu_32(uint32be_t be32)
+static inline uint32_t odp_be_to_cpu_32(odp_u32be_t be32)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return __builtin_bswap32((__odp_force uint32_t)be32);
@@ -43,7 +43,7 @@ static inline uint32_t odp_be_to_cpu_32(uint32be_t be32)
#endif
}
-static inline uint64_t odp_be_to_cpu_64(uint64be_t be64)
+static inline uint64_t odp_be_to_cpu_64(odp_u64be_t be64)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return __builtin_bswap64((__odp_force uint64_t)be64);
@@ -53,35 +53,35 @@ static inline uint64_t odp_be_to_cpu_64(uint64be_t be64)
}
-static inline uint16be_t odp_cpu_to_be_16(uint16_t cpu16)
+static inline odp_u16be_t odp_cpu_to_be_16(uint16_t cpu16)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint16be_t)__odp_builtin_bswap16(cpu16);
+ return (__odp_force odp_u16be_t)__odp_builtin_bswap16(cpu16);
#else
- return (__odp_force uint16be_t)cpu16;
+ return (__odp_force odp_u16be_t)cpu16;
#endif
}
-static inline uint32be_t odp_cpu_to_be_32(uint32_t cpu32)
+static inline odp_u32be_t odp_cpu_to_be_32(uint32_t cpu32)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint32be_t)__builtin_bswap32(cpu32);
+ return (__odp_force odp_u32be_t)__builtin_bswap32(cpu32);
#else
- return (__odp_force uint32be_t)cpu32;
+ return (__odp_force odp_u32be_t)cpu32;
#endif
}
-static inline uint64be_t odp_cpu_to_be_64(uint64_t cpu64)
+static inline odp_u64be_t odp_cpu_to_be_64(uint64_t cpu64)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint64be_t)__builtin_bswap64(cpu64);
+ return (__odp_force odp_u64be_t)__builtin_bswap64(cpu64);
#else
- return (__odp_force uint64be_t)cpu64;
+ return (__odp_force odp_u64be_t)cpu64;
#endif
}
-static inline uint16_t odp_le_to_cpu_16(uint16le_t le16)
+static inline uint16_t odp_le_to_cpu_16(odp_u16le_t le16)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return (__odp_force uint16_t)le16;
@@ -90,7 +90,7 @@ static inline uint16_t odp_le_to_cpu_16(uint16le_t le16)
#endif
}
-static inline uint32_t odp_le_to_cpu_32(uint32le_t le32)
+static inline uint32_t odp_le_to_cpu_32(odp_u32le_t le32)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return (__odp_force uint32_t)le32;
@@ -99,7 +99,7 @@ static inline uint32_t odp_le_to_cpu_32(uint32le_t le32)
#endif
}
-static inline uint64_t odp_le_to_cpu_64(uint64le_t le64)
+static inline uint64_t odp_le_to_cpu_64(odp_u64le_t le64)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
return (__odp_force uint64_t)le64;
@@ -109,30 +109,30 @@ static inline uint64_t odp_le_to_cpu_64(uint64le_t le64)
}
-static inline uint16le_t odp_cpu_to_le_16(uint16_t cpu16)
+static inline odp_u16le_t odp_cpu_to_le_16(uint16_t cpu16)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint16le_t)cpu16;
+ return (__odp_force odp_u16le_t)cpu16;
#else
- return (__odp_force uint16le_t)__odp_builtin_bswap16(cpu16);
+ return (__odp_force odp_u16le_t)__odp_builtin_bswap16(cpu16);
#endif
}
-static inline uint32le_t odp_cpu_to_le_32(uint32_t cpu32)
+static inline odp_u32le_t odp_cpu_to_le_32(uint32_t cpu32)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint32le_t)cpu32;
+ return (__odp_force odp_u32le_t)cpu32;
#else
- return (__odp_force uint32le_t)__builtin_bswap32(cpu32);
+ return (__odp_force odp_u32le_t)__builtin_bswap32(cpu32);
#endif
}
-static inline uint64le_t odp_cpu_to_le_64(uint64_t cpu64)
+static inline odp_u64le_t odp_cpu_to_le_64(uint64_t cpu64)
{
#if ODP_BYTE_ORDER == ODP_LITTLE_ENDIAN
- return (__odp_force uint64le_t)cpu64;
+ return (__odp_force odp_u64le_t)cpu64;
#else
- return (__odp_force uint64le_t)__builtin_bswap64(cpu64);
+ return (__odp_force odp_u64le_t)__builtin_bswap64(cpu64);
#endif
}
diff --git a/platform/linux-generic/include/odp/cpu.h b/platform/linux-generic/include/odp/cpu.h
index b5b532005..b98507dd5 100644
--- a/platform/linux-generic/include/odp/cpu.h
+++ b/platform/linux-generic/include/odp/cpu.h
@@ -17,6 +17,8 @@
extern "C" {
#endif
+#include <odp/cpu_arch.h>
+
#include <odp/api/cpu.h>
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp/init.h b/platform/linux-generic/include/odp/init.h
index 950a4f829..3233e36de 100644
--- a/platform/linux-generic/include/odp/init.h
+++ b/platform/linux-generic/include/odp/init.h
@@ -17,6 +17,8 @@
extern "C" {
#endif
+#include <odp/plat/init_types.h>
+
/** @ingroup odp_initialization
* @{
*/
diff --git a/platform/linux-generic/include/odp/plat/atomic_types.h b/platform/linux-generic/include/odp/plat/atomic_types.h
index 0fe15ed13..bc0bd8bfe 100644
--- a/platform/linux-generic/include/odp/plat/atomic_types.h
+++ b/platform/linux-generic/include/odp/plat/atomic_types.h
@@ -43,6 +43,21 @@ struct odp_atomic_u32_s {
} ODP_ALIGNED(sizeof(uint32_t)); /* Enforce alignement! */;
#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+
+/**
+ * @internal
+ * CAS operation expression for the ATOMIC_OP macro
+ */
+#define ATOMIC_CAS_OP(ret_ptr, old_val, new_val) \
+({ \
+ if (atom->v == (old_val)) { \
+ atom->v = (new_val); \
+ *(ret_ptr) = 1; \
+ } else { \
+ *(ret_ptr) = 0; \
+ } \
+})
+
/**
* @internal
* Helper macro for lock-based atomic operations on 64-bit integers
@@ -52,14 +67,14 @@ struct odp_atomic_u32_s {
*/
#define ATOMIC_OP(atom, expr) \
({ \
- uint64_t old_val; \
+ uint64_t _old_val; \
/* Loop while lock is already taken, stop when lock becomes clear */ \
while (__atomic_test_and_set(&(atom)->lock, __ATOMIC_ACQUIRE)) \
(void)0; \
- old_val = (atom)->v; \
+ _old_val = (atom)->v; \
(expr); /* Perform whatever update is desired */ \
__atomic_clear(&(atom)->lock, __ATOMIC_RELEASE); \
- old_val; /* Return old value */ \
+ _old_val; /* Return old value */ \
})
#endif
diff --git a/platform/linux-generic/include/odp/plat/byteorder_types.h b/platform/linux-generic/include/odp/plat/byteorder_types.h
index cf917b1a2..0a8e4096e 100644
--- a/platform/linux-generic/include/odp/plat/byteorder_types.h
+++ b/platform/linux-generic/include/odp/plat/byteorder_types.h
@@ -67,17 +67,17 @@ extern "C" {
#define ODP_BYTE_ORDER ODP_BIG_ENDIAN
#endif
-typedef uint16_t __odp_bitwise uint16le_t;
-typedef uint16_t __odp_bitwise uint16be_t;
+typedef uint16_t __odp_bitwise odp_u16le_t;
+typedef uint16_t __odp_bitwise odp_u16be_t;
-typedef uint32_t __odp_bitwise uint32le_t;
-typedef uint32_t __odp_bitwise uint32be_t;
+typedef uint32_t __odp_bitwise odp_u32le_t;
+typedef uint32_t __odp_bitwise odp_u32be_t;
-typedef uint64_t __odp_bitwise uint64le_t;
-typedef uint64_t __odp_bitwise uint64be_t;
+typedef uint64_t __odp_bitwise odp_u64le_t;
+typedef uint64_t __odp_bitwise odp_u64be_t;
-typedef uint16_t __odp_bitwise uint16sum_t;
-typedef uint32_t __odp_bitwise uint32sum_t;
+typedef uint16_t __odp_bitwise odp_u16sum_t;
+typedef uint32_t __odp_bitwise odp_u32sum_t;
/**
* @}
diff --git a/platform/linux-generic/include/odp/plat/init_types.h b/platform/linux-generic/include/odp/plat/init_types.h
new file mode 100644
index 000000000..b240c93ca
--- /dev/null
+++ b/platform/linux-generic/include/odp/plat/init_types.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * ODP initialization.
+ */
+
+#ifndef ODP_INIT_TYPES_H_
+#define ODP_INIT_TYPES_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @internal platform specific data
+ */
+typedef struct odp_platform_init_t {
+} odp_platform_init_t;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/platform/linux-generic/include/odp/plat/packet_io_types.h b/platform/linux-generic/include/odp/plat/packet_io_types.h
index 3cc64c6d2..934d7de97 100644
--- a/platform/linux-generic/include/odp/plat/packet_io_types.h
+++ b/platform/linux-generic/include/odp/plat/packet_io_types.h
@@ -21,16 +21,26 @@ extern "C" {
#include <odp/std_types.h>
#include <odp/plat/strong_types.h>
-/** @addtogroup odp_packet_io ODP PACKET IO
+/** @addtogroup odp_packet_io
* Operations on a packet.
* @{
*/
typedef ODP_HANDLE_T(odp_pktio_t);
-#define ODP_PKTIO_INVALID _odp_cast_scalar(odp_pktio_t, 0)
+/** @internal */
+typedef struct odp_pktin_queue_t {
+ odp_pktio_t pktio; /**< @internal pktio handle */
+ int index; /**< @internal pktio queue index */
+} odp_pktin_queue_t;
+
+/** @internal */
+typedef struct odp_pktout_queue_t {
+ odp_pktio_t pktio; /**< @internal pktio handle */
+ int index; /**< @internal pktio queue index */
+} odp_pktout_queue_t;
-#define ODP_PKTIO_ANY _odp_cast_scalar(odp_pktio_t, ~0)
+#define ODP_PKTIO_INVALID _odp_cast_scalar(odp_pktio_t, 0)
#define ODP_PKTIO_MACADDR_MAXSIZE 16
diff --git a/platform/linux-generic/include/odp/plat/queue_types.h b/platform/linux-generic/include/odp/plat/queue_types.h
index a7df15576..40a53e5e6 100644
--- a/platform/linux-generic/include/odp/plat/queue_types.h
+++ b/platform/linux-generic/include/odp/plat/queue_types.h
@@ -33,14 +33,6 @@ typedef ODP_HANDLE_T(odp_queue_group_t);
#define ODP_QUEUE_NAME_LEN 32
-
-typedef int odp_queue_type_t;
-
-#define ODP_QUEUE_TYPE_SCHED 0
-#define ODP_QUEUE_TYPE_POLL 1
-#define ODP_QUEUE_TYPE_PKTIN 2
-#define ODP_QUEUE_TYPE_PKTOUT 3
-
/** Get printable format of odp_queue_t */
static inline uint64_t odp_queue_to_u64(odp_queue_t hdl)
{
diff --git a/platform/linux-generic/include/odp/plat/schedule_types.h b/platform/linux-generic/include/odp/plat/schedule_types.h
index 21fcbb84c..a4a352c04 100644
--- a/platform/linux-generic/include/odp/plat/schedule_types.h
+++ b/platform/linux-generic/include/odp/plat/schedule_types.h
@@ -37,7 +37,7 @@ typedef int odp_schedule_prio_t;
typedef int odp_schedule_sync_t;
-#define ODP_SCHED_SYNC_NONE 0
+#define ODP_SCHED_SYNC_PARALLEL 0
#define ODP_SCHED_SYNC_ATOMIC 1
#define ODP_SCHED_SYNC_ORDERED 2
diff --git a/platform/linux-generic/include/odp/std_clib.h b/platform/linux-generic/include/odp/std_clib.h
index c939c48e9..11c59bec2 100644
--- a/platform/linux-generic/include/odp/std_clib.h
+++ b/platform/linux-generic/include/odp/std_clib.h
@@ -23,6 +23,11 @@ static inline void *odp_memset(void *ptr, int value, size_t num)
return memset(ptr, value, num);
}
+static inline int odp_memcmp(const void *ptr1, const void *ptr2, size_t num)
+{
+ return memcmp(ptr1, ptr2, num);
+}
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_atomic_internal.h b/platform/linux-generic/include/odp_atomic_internal.h
index ce62368e1..ff3813f73 100644
--- a/platform/linux-generic/include/odp_atomic_internal.h
+++ b/platform/linux-generic/include/odp_atomic_internal.h
@@ -64,12 +64,6 @@ typedef enum {
_ODP_MEMMODEL_SC = __ATOMIC_SEQ_CST
} _odp_memmodel_t;
-/**
- * Insert a full memory barrier (fence) in the compiler and instruction
- * sequence.
- */
-#define _ODP_FULL_BARRIER() __atomic_thread_fence(__ATOMIC_SEQ_CST)
-
/*****************************************************************************
* Operations on 32-bit atomics
* _odp_atomic_u32_load_mm - return current value
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index 5b6520266..27d8a526c 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -54,7 +54,7 @@ Stores the Term and Value mapping for a PMR.
The maximum size of value currently supported in 64 bits
**/
typedef struct pmr_term_value {
- odp_pmr_term_e term; /* PMR Term */
+ odp_pmr_term_t term; /* PMR Term */
uint64_t val; /**< Value to be matched */
uint64_t mask; /**< Masked set of bits to be matched */
uint32_t offset; /**< Offset if term == ODP_PMR_CUSTOM_FRAME */
diff --git a/platform/linux-generic/include/odp_classification_inlines.h b/platform/linux-generic/include/odp_classification_inlines.h
index 5f0b564f7..96cf77ee2 100644
--- a/platform/linux-generic/include/odp_classification_inlines.h
+++ b/platform/linux-generic/include/odp_classification_inlines.h
@@ -154,11 +154,28 @@ static inline int verify_pmr_udp_sport(const uint8_t *pkt_addr,
return 0;
}
-static inline int verify_pmr_dmac(const uint8_t *pkt_addr ODP_UNUSED,
- odp_packet_hdr_t *pkt_hdr ODP_UNUSED,
- pmr_term_value_t *term_value ODP_UNUSED)
+static inline int verify_pmr_dmac(const uint8_t *pkt_addr,
+ odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
{
- ODP_UNIMPLEMENTED();
+ uint64_t dmac = 0;
+ uint64_t dmac_be = 0;
+ const odph_ethhdr_t *eth;
+
+ if (!pkt_hdr->input_flags.eth)
+ return 0;
+
+ eth = (const odph_ethhdr_t *)(pkt_addr + pkt_hdr->l2_offset);
+ memcpy(&dmac_be, eth->dst.addr, ODPH_ETHADDR_LEN);
+ dmac = odp_be_to_cpu_64(dmac_be);
+ /* since we are converting a 48 bit ethernet address from BE to cpu
+ format using odp_be_to_cpu_64() the last 16 bits needs to be right
+ shifted */
+ if (dmac_be != dmac)
+ dmac = dmac >> (64 - (ODPH_ETHADDR_LEN * 8));
+
+ if (term_value->val == (dmac & term_value->mask))
+ return 1;
return 0;
}
diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h
index b22f95698..e75154a51 100644
--- a/platform/linux-generic/include/odp_internal.h
+++ b/platform/linux-generic/include/odp_internal.h
@@ -20,16 +20,20 @@ extern "C" {
#include <odp/init.h>
#include <odp/thread.h>
+#include <stdio.h>
extern __thread int __odp_errno;
+#define MAX_CPU_NUMBER 128
+
typedef struct {
- uint64_t cpu_hz;
+ uint64_t cpu_hz_max[MAX_CPU_NUMBER];
uint64_t huge_page_size;
uint64_t page_size;
int cache_line_size;
int cpu_count;
- char model_str[128];
+ char cpu_arch_str[128];
+ char model_str[MAX_CPU_NUMBER][128];
} odp_system_info_t;
struct odp_global_data_s {
@@ -103,6 +107,9 @@ int odp_time_term_global(void);
void _odp_flush_caches(void);
+int odp_cpuinfo_parser(FILE *file, odp_system_info_t *sysinfo);
+uint64_t odp_cpu_hz_current(int id);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 3ab0bc86f..f871f0a76 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -20,9 +20,6 @@ extern "C" {
#include <odp/spinlock.h>
#include <odp/ticketlock.h>
-#include <odp_packet_socket.h>
-#include <odp_packet_netmap.h>
-#include <odp_packet_tap.h>
#include <odp_classification_datamodel.h>
#include <odp_align_internal.h>
#include <odp_debug_internal.h>
@@ -31,8 +28,16 @@ extern "C" {
#include <odp/hints.h>
#include <net/if.h>
+#define PKTIO_MAX_QUEUES 64
+#include <odp_packet_socket.h>
+#include <odp_packet_netmap.h>
+#include <odp_packet_tap.h>
+
#define PKTIO_NAME_LEN 256
+#define PKTIN_INVALID ((odp_pktin_queue_t) {ODP_PKTIO_INVALID, 0})
+#define PKTOUT_INVALID ((odp_pktout_queue_t) {ODP_PKTIO_INVALID, 0})
+
/** Determine if a socket read/write error should be reported. Transient errors
* that simply require the caller to retry are ignored, the _send/_recv APIs
* are non-blocking and it is the caller's responsibility to retry if the
@@ -70,7 +75,6 @@ struct pktio_entry {
int taken; /**< is entry taken(1) or free(0) */
int cls_enabled; /**< is classifier enabled */
odp_pktio_t handle; /**< pktio handle */
- odp_queue_t inq_default; /**< default input queue, if set */
odp_queue_t outq_default; /**< default out queue */
union {
pkt_loop_t pkt_loop; /**< Using loopback for IO */
@@ -88,10 +92,30 @@ struct pktio_entry {
STATE_STOP
} state;
classifier_t cls; /**< classifier linked with this pktio*/
+ odp_pktio_stats_t stats; /**< statistic counters for pktio */
+ enum {
+ STATS_SYSFS = 0,
+ STATS_ETHTOOL,
+ STATS_UNSUPPORTED
+ } stats_type;
char name[PKTIO_NAME_LEN]; /**< name of pktio provided to
pktio_open() */
odp_pktio_t id;
odp_pktio_param_t param;
+
+ /* Storage for queue handles
+ * Multi-queue support is pktio driver specific */
+ unsigned num_in_queue;
+ unsigned num_out_queue;
+
+ struct {
+ odp_queue_t queue;
+ odp_pktin_queue_t pktin;
+ } in_queue[PKTIO_MAX_QUEUES];
+
+ struct {
+ odp_pktout_queue_t pktout;
+ } out_queue[PKTIO_MAX_QUEUES];
};
typedef union {
@@ -107,6 +131,7 @@ typedef struct {
int is_free(pktio_entry_t *entry);
typedef struct pktio_if_ops {
+ const char *name;
int (*init)(void);
int (*term)(void);
int (*open)(odp_pktio_t pktio, pktio_entry_t *pktio_entry,
@@ -114,6 +139,8 @@ typedef struct pktio_if_ops {
int (*close)(pktio_entry_t *pktio_entry);
int (*start)(pktio_entry_t *pktio_entry);
int (*stop)(pktio_entry_t *pktio_entry);
+ int (*stats)(pktio_entry_t *pktio_entry, odp_pktio_stats_t *stats);
+ int (*stats_reset)(pktio_entry_t *pktio_entry);
int (*recv)(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
unsigned len);
int (*send)(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
@@ -122,6 +149,22 @@ typedef struct pktio_if_ops {
int (*promisc_mode_set)(pktio_entry_t *pktio_entry, int enable);
int (*promisc_mode_get)(pktio_entry_t *pktio_entry);
int (*mac_get)(pktio_entry_t *pktio_entry, void *mac_addr);
+ int (*link_status)(pktio_entry_t *pktio_entry);
+ int (*capability)(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa);
+ int (*input_queues_config)(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *param);
+ int (*output_queues_config)(pktio_entry_t *pktio_entry,
+ const odp_pktout_queue_param_t *p);
+ int (*in_queues)(pktio_entry_t *entry, odp_queue_t queues[], int num);
+ int (*pktin_queues)(pktio_entry_t *entry, odp_pktin_queue_t queues[],
+ int num);
+ int (*pktout_queues)(pktio_entry_t *entry, odp_pktout_queue_t queues[],
+ int num);
+ int (*recv_queue)(pktio_entry_t *entry, int index,
+ odp_packet_t packets[], int num);
+ int (*send_queue)(pktio_entry_t *entry, int index,
+ odp_packet_t packets[], int num);
} pktio_if_ops_t;
int _odp_packet_cls_enq(pktio_entry_t *pktio_entry, const uint8_t *base,
@@ -158,7 +201,25 @@ static inline void pktio_cls_enabled_set(pktio_entry_t *entry, int ena)
entry->s.cls_enabled = ena;
}
-int pktin_poll(pktio_entry_t *entry);
+int pktin_poll(pktio_entry_t *entry, int num_queue, int index[]);
+
+/*
+ * Dummy single queue implementations of multi-queue API
+ */
+int single_capability(odp_pktio_capability_t *capa);
+int single_input_queues_config(pktio_entry_t *entry,
+ const odp_pktin_queue_param_t *param);
+int single_output_queues_config(pktio_entry_t *entry,
+ const odp_pktout_queue_param_t *param);
+int single_in_queues(pktio_entry_t *entry, odp_queue_t queues[], int num);
+int single_pktin_queues(pktio_entry_t *entry, odp_pktin_queue_t queues[],
+ int num);
+int single_pktout_queues(pktio_entry_t *entry, odp_pktout_queue_t queues[],
+ int num);
+int single_recv_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num);
+int single_send_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num);
extern const pktio_if_ops_t netmap_pktio_ops;
extern const pktio_if_ops_t sock_mmsg_pktio_ops;
@@ -170,6 +231,13 @@ extern const pktio_if_ops_t pcap_pktio_ops;
extern const pktio_if_ops_t tap_pktio_ops;
extern const pktio_if_ops_t * const pktio_if_ops[];
+int sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats);
+int sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd);
+int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd);
+
#ifdef __cplusplus
}
#endif
diff --git a/platform/linux-generic/include/odp_packet_netmap.h b/platform/linux-generic/include/odp_packet_netmap.h
index 0577dfe20..26a8da1eb 100644
--- a/platform/linux-generic/include/odp_packet_netmap.h
+++ b/platform/linux-generic/include/odp_packet_netmap.h
@@ -7,19 +7,55 @@
#ifndef ODP_PACKET_NETMAP_H
#define ODP_PACKET_NETMAP_H
+#include <odp/align.h>
+#include <odp/debug.h>
+#include <odp/packet_io.h>
#include <odp/pool.h>
+#include <odp/ticketlock.h>
+#include <odp_align_internal.h>
#include <linux/if_ether.h>
+#include <net/if.h>
+
+#define NM_MAX_DESC 32
+
+/** Ring for mapping pktin/pktout queues to netmap descriptors */
+struct netmap_ring_t {
+ unsigned first; /**< Index of first netmap descriptor */
+ unsigned last; /**< Index of last netmap descriptor */
+ unsigned num; /**< Number of netmap descriptors */
+ /** Netmap metadata for the device */
+ struct nm_desc *desc[NM_MAX_DESC];
+ unsigned cur; /**< Index of current netmap descriptor */
+ odp_ticketlock_t lock; /**< Queue lock */
+};
+
+typedef union {
+ struct netmap_ring_t s;
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct netmap_ring_t))];
+} netmap_ring_t ODP_ALIGNED_CACHE;
/** Packet socket using netmap mmaped rings for both Rx and Tx */
typedef struct {
odp_pool_t pool; /**< pool to alloc packets from */
size_t max_frame_len; /**< buf_size - sizeof(pkt_hdr) */
- struct nm_desc *rx_desc; /**< netmap meta-data for the device */
- struct nm_desc *tx_desc; /**< netmap meta-data for the device */
uint32_t if_flags; /**< interface flags */
+ uint32_t mtu; /**< maximum transmission unit */
int sockfd; /**< control socket */
unsigned char if_mac[ETH_ALEN]; /**< eth mac address */
+ char nm_name[IF_NAMESIZE + 7]; /**< netmap:<ifname> */
+ odp_pktio_capability_t capa; /**< interface capabilities */
+ unsigned cur_rx_queue; /**< current pktin queue */
+ uint32_t num_rx_rings; /**< number of nm rx rings */
+ uint32_t num_tx_rings; /**< number of nm tx rings */
+ unsigned num_rx_desc_rings; /**< number of rx descriptor rings */
+ unsigned num_tx_desc_rings; /**< number of tx descriptor rings */
+ odp_bool_t lockless_rx; /**< no locking for rx */
+ odp_bool_t lockless_tx; /**< no locking for tx */
+ /** mapping of pktin queues to netmap rx descriptors */
+ netmap_ring_t rx_desc_ring[PKTIO_MAX_QUEUES];
+ /** mapping of pktout queues to netmap tx descriptors */
+ netmap_ring_t tx_desc_ring[PKTIO_MAX_QUEUES];
} pkt_netmap_t;
#endif
diff --git a/platform/linux-generic/include/odp_packet_socket.h b/platform/linux-generic/include/odp_packet_socket.h
index 1eaafb7e5..a7797d1c0 100644
--- a/platform/linux-generic/include/odp_packet_socket.h
+++ b/platform/linux-generic/include/odp_packet_socket.h
@@ -18,6 +18,7 @@
#include <odp/debug.h>
#include <odp/pool.h>
#include <odp/packet.h>
+#include <odp/packet_io.h>
#include <linux/version.h>
@@ -116,4 +117,60 @@ int promisc_mode_set_fd(int fd, const char *name, int enable);
*/
int promisc_mode_get_fd(int fd, const char *name);
+/**
+ * Return link status of a packet socket (up/down)
+ */
+int link_status_fd(int fd, const char *name);
+
+/**
+ * Get enabled RSS hash protocols of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto[out] Hash protocols
+ *
+ * @returns Number enabled hash protocols
+ */
+int rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
+
+/**
+ * Get supported RSS hash protocols of a packet socket
+ *
+ * Can be both read and modified.
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto[out] Hash protocols
+ *
+ * @returns Number of supported hash protocols
+ */
+int rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto);
+
+/**
+ * Set RSS hash protocols of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param hash_proto Hash protocols
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+int rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *proto);
+
+/**
+ * Print enabled RSS hash protocols
+ *
+ * @param hash_proto Hash protocols
+ */
+void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto);
+
+/**
+ * Get ethtool statistics of a packet socket
+ */
+int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats);
+
#endif
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index ae4836cc8..fdac6067e 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -28,7 +28,6 @@ extern "C" {
#include <odp/debug.h>
#include <odp/shared_memory.h>
#include <odp/atomic.h>
-#include <odp_atomic_internal.h>
#include <odp/thread.h>
#include <string.h>
diff --git a/platform/linux-generic/include/odp_queue_internal.h b/platform/linux-generic/include/odp_queue_internal.h
index 1cc0ed26e..2d349466f 100644
--- a/platform/linux-generic/include/odp_queue_internal.h
+++ b/platform/linux-generic/include/odp_queue_internal.h
@@ -76,7 +76,7 @@ struct queue_entry_s {
odp_event_t cmd_ev;
odp_queue_type_t type;
odp_queue_param_t param;
- odp_pktio_t pktin;
+ odp_pktin_queue_t pktin;
odp_pktio_t pktout;
char name[ODP_QUEUE_NAME_LEN];
uint64_t order_in;
diff --git a/platform/linux-generic/include/odp_schedule_internal.h b/platform/linux-generic/include/odp_schedule_internal.h
index 6b301cd18..08683941a 100644
--- a/platform/linux-generic/include/odp_schedule_internal.h
+++ b/platform/linux-generic/include/odp_schedule_internal.h
@@ -23,7 +23,8 @@ extern "C" {
int schedule_queue_init(queue_entry_t *qe);
void schedule_queue_destroy(queue_entry_t *qe);
int schedule_queue(const queue_entry_t *qe);
-int schedule_pktio_start(odp_pktio_t pktio, int prio);
+void schedule_pktio_start(odp_pktio_t pktio, int num_in_queue,
+ int in_queue_idx[]);
void odp_schedule_release_context(void);
#ifdef __cplusplus
diff --git a/platform/linux-generic/include/odp_spin_internal.h b/platform/linux-generic/include/odp_spin_internal.h
deleted file mode 100644
index 29c524fb1..000000000
--- a/platform/linux-generic/include/odp_spin_internal.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-
-#ifndef ODP_SPIN_INTERNAL_H_
-#define ODP_SPIN_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * Spin loop for ODP internal use
- */
-static inline void odp_spin(void)
-{
-#if defined __x86_64__ || defined __i386__
-
-#ifdef __SSE2__
- __asm__ __volatile__ ("pause");
-#else
- __asm__ __volatile__ ("rep; nop");
-#endif
-
-#elif defined __arm__
-
-#if __ARM_ARCH == 7
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
-#endif
-
-#elif defined __OCTEON__
-
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
- __asm__ __volatile__ ("nop");
-
-#endif
-}
-
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/odp_atomic.c b/platform/linux-generic/odp_atomic.c
new file mode 100644
index 000000000..5b71ecff3
--- /dev/null
+++ b/platform/linux-generic/odp_atomic.c
@@ -0,0 +1,26 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/atomic.h>
+
+int odp_atomic_lock_free_u64(odp_atomic_op_t *atomic_op)
+{
+#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+ /* All operations have locks */
+ if (atomic_op)
+ atomic_op->all_bits = 0;
+
+ return 0;
+#else
+ /* All operations are lock-free */
+ if (atomic_op) {
+ atomic_op->all_bits = ~((uint32_t)0);
+ atomic_op->op.init = 0;
+ }
+
+ return 2;
+#endif
+}
diff --git a/platform/linux-generic/odp_barrier.c b/platform/linux-generic/odp_barrier.c
index 53d83c0dd..f3525e209 100644
--- a/platform/linux-generic/odp_barrier.c
+++ b/platform/linux-generic/odp_barrier.c
@@ -6,8 +6,8 @@
#include <odp/barrier.h>
#include <odp/sync.h>
-#include <odp_spin_internal.h>
-#include <odp_atomic_internal.h>
+#include <odp/cpu.h>
+#include <odp/atomic.h>
void odp_barrier_init(odp_barrier_t *barrier, int count)
{
@@ -27,13 +27,13 @@ void odp_barrier_init(odp_barrier_t *barrier, int count)
* the cycle the barrier was in upon entry. Exit is when the
* barrier crosses to the other half of the cycle.
*/
-
void odp_barrier_wait(odp_barrier_t *barrier)
{
uint32_t count;
int wasless;
- _ODP_FULL_BARRIER();
+ odp_mb_full();
+
count = odp_atomic_fetch_inc_u32(&barrier->bar);
wasless = count < barrier->count;
@@ -43,8 +43,8 @@ void odp_barrier_wait(odp_barrier_t *barrier)
} else {
while ((odp_atomic_load_u32(&barrier->bar) < barrier->count)
== wasless)
- odp_spin();
+ odp_cpu_pause();
}
- _ODP_FULL_BARRIER();
+ odp_mb_full();
}
diff --git a/platform/linux-generic/odp_cpumask_task.c b/platform/linux-generic/odp_cpumask_task.c
index 41f2bc949..c5093e051 100644
--- a/platform/linux-generic/odp_cpumask_task.c
+++ b/platform/linux-generic/odp_cpumask_task.c
@@ -53,3 +53,14 @@ int odp_cpumask_default_control(odp_cpumask_t *mask, int num ODP_UNUSED)
odp_cpumask_set(mask, 0);
return 1;
}
+
+int odp_cpumask_all_available(odp_cpumask_t *mask)
+{
+ odp_cpumask_t mask_work, mask_ctrl;
+
+ odp_cpumask_default_worker(&mask_work, 0);
+ odp_cpumask_default_control(&mask_ctrl, 0);
+ odp_cpumask_or(mask, &mask_work, &mask_ctrl);
+
+ return odp_cpumask_count(mask);
+}
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index 540cdb8d2..a3ceec820 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -53,6 +53,8 @@ int odp_pktio_init_global(void)
odp_spinlock_init(&pktio_tbl->lock);
for (id = 1; id <= ODP_CONFIG_PKTIO_ENTRIES; ++id) {
+ odp_queue_param_t param;
+
pktio_entry = &pktio_tbl->entries[id - 1];
odp_ticketlock_init(&pktio_entry->s.rxl);
@@ -66,7 +68,10 @@ int odp_pktio_init_global(void)
snprintf(name, sizeof(name), "%i-pktio_outq_default", (int)id);
name[ODP_QUEUE_NAME_LEN-1] = '\0';
- qid = odp_queue_create(name, ODP_QUEUE_TYPE_PKTOUT, NULL);
+ odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_PKTOUT;
+
+ qid = odp_queue_create(name, &param);
if (qid == ODP_QUEUE_INVALID)
return -1;
pktio_entry->s.outq_default = qid;
@@ -133,9 +138,16 @@ static void unlock_entry_classifier(pktio_entry_t *entry)
static void init_pktio_entry(pktio_entry_t *entry)
{
+ int i;
+
set_taken(entry);
pktio_cls_enabled_set(entry, 0);
- entry->s.inq_default = ODP_QUEUE_INVALID;
+
+ for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
+ entry->s.in_queue[i].pktin = PKTIN_INVALID;
+ entry->s.out_queue[i].pktout = PKTOUT_INVALID;
+ }
pktio_classifier_init(entry);
}
@@ -208,6 +220,8 @@ static odp_pktio_t setup_pktio_entry(const char *dev, odp_pool_t pool,
if (!ret) {
pktio_entry->s.ops = pktio_if_ops[pktio_if];
+ ODP_DBG("%s uses %s\n",
+ dev, pktio_if_ops[pktio_if]->name);
break;
}
}
@@ -275,6 +289,18 @@ static int _pktio_close(pktio_entry_t *entry)
return 0;
}
+static void destroy_in_queues(pktio_entry_t *entry, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ if (entry->s.in_queue[i].queue != ODP_QUEUE_INVALID) {
+ odp_queue_destroy(entry->s.in_queue[i].queue);
+ entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
+ }
+ }
+}
+
int odp_pktio_close(odp_pktio_t id)
{
pktio_entry_t *entry;
@@ -285,6 +311,9 @@ int odp_pktio_close(odp_pktio_t id)
return -1;
lock_entry(entry);
+
+ destroy_in_queues(entry, entry->s.num_in_queue);
+
if (!is_free(entry)) {
res = _pktio_close(entry);
if (res)
@@ -298,6 +327,7 @@ int odp_pktio_close(odp_pktio_t id)
int odp_pktio_start(odp_pktio_t id)
{
pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
int res = 0;
entry = get_pktio_entry(id);
@@ -313,8 +343,26 @@ int odp_pktio_start(odp_pktio_t id)
res = entry->s.ops->start(entry);
if (!res)
entry->s.state = STATE_START;
+
unlock_entry(entry);
+ mode = entry->s.param.in_mode;
+
+ if (mode == ODP_PKTIN_MODE_SCHED) {
+ unsigned i;
+
+ for (i = 0; i < entry->s.num_in_queue; i++) {
+ int index = i;
+
+ if (entry->s.in_queue[i].queue == ODP_QUEUE_INVALID) {
+ ODP_ERR("No input queue\n");
+ return -1;
+ }
+
+ schedule_pktio_start(id, 1, &index);
+ }
+ }
+
return res;
}
@@ -379,8 +427,6 @@ odp_pktio_t odp_pktio_lookup(const char *dev)
return id;
}
-
-
int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
{
pktio_entry_t *pktio_entry = get_pktio_entry(id);
@@ -448,30 +494,19 @@ int odp_pktio_inq_setdef(odp_pktio_t id, odp_queue_t queue)
unlock_entry(pktio_entry);
return -1;
}
- pktio_entry->s.inq_default = queue;
+
+ /* Temporary support for default input queue */
+ pktio_entry->s.in_queue[0].queue = queue;
+ pktio_entry->s.in_queue[0].pktin.pktio = id;
+ pktio_entry->s.in_queue[0].pktin.index = 0;
+ pktio_entry->s.num_in_queue = 1;
unlock_entry(pktio_entry);
- switch (qentry->s.type) {
- /* Change to ODP_QUEUE_TYPE_POLL when ODP_QUEUE_TYPE_PKTIN is removed */
- case ODP_QUEUE_TYPE_PKTIN:
- /* User polls the input queue */
- queue_lock(qentry);
- qentry->s.pktin = id;
- queue_unlock(qentry);
-
- /* Uncomment when ODP_QUEUE_TYPE_PKTIN is removed
- break;
- case ODP_QUEUE_TYPE_SCHED:
- */
- /* Packet input through the scheduler */
- if (schedule_pktio_start(id, ODP_SCHED_PRIO_LOWEST)) {
- ODP_ERR("Schedule pktio start failed\n");
- return -1;
- }
- break;
- default:
- ODP_ABORT("Bad queue type\n");
- }
+ /* User polls the input queue */
+ queue_lock(qentry);
+ qentry->s.pktin.pktio = id;
+ qentry->s.pktin.index = 0;
+ queue_unlock(qentry);
return 0;
}
@@ -490,14 +525,19 @@ int odp_pktio_inq_remdef(odp_pktio_t id)
unlock_entry(pktio_entry);
return -1;
}
- queue = pktio_entry->s.inq_default;
+
+ /* Temporary support for default input queue */
+ queue = pktio_entry->s.in_queue[0].queue;
qentry = queue_to_qentry(queue);
queue_lock(qentry);
- qentry->s.pktin = ODP_PKTIO_INVALID;
+ qentry->s.pktin = PKTIN_INVALID;
queue_unlock(qentry);
- pktio_entry->s.inq_default = ODP_QUEUE_INVALID;
+ pktio_entry->s.in_queue[0].queue = ODP_QUEUE_INVALID;
+ pktio_entry->s.in_queue[0].pktin.pktio = ODP_PKTIO_INVALID;
+ pktio_entry->s.in_queue[0].pktin.index = 0;
+ pktio_entry->s.num_in_queue = 0;
unlock_entry(pktio_entry);
return 0;
@@ -510,7 +550,8 @@ odp_queue_t odp_pktio_inq_getdef(odp_pktio_t id)
if (pktio_entry == NULL)
return ODP_QUEUE_INVALID;
- return pktio_entry->s.inq_default;
+ /* Temporary support for default input queue */
+ return pktio_entry->s.in_queue[0].queue;
}
odp_queue_t odp_pktio_outq_getdef(odp_pktio_t id)
@@ -575,15 +616,13 @@ odp_buffer_hdr_t *pktin_dequeue(queue_entry_t *qentry)
odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
int pkts, i;
- odp_pktio_t pktio;
buf_hdr = queue_deq(qentry);
if (buf_hdr != NULL)
return buf_hdr;
- pktio = qentry->s.pktin;
+ pkts = odp_pktio_recv_queue(qentry->s.pktin, pkt_tbl, QUEUE_MULTI_MAX);
- pkts = odp_pktio_recv(pktio, pkt_tbl, QUEUE_MULTI_MAX);
if (pkts <= 0)
return NULL;
@@ -613,7 +652,6 @@ int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
odp_buffer_t buf;
int pkts, i, j;
- odp_pktio_t pktio;
nbr = queue_deq_multi(qentry, buf_hdr, num);
if (odp_unlikely(nbr > num))
@@ -626,9 +664,7 @@ int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
if (nbr == num)
return nbr;
- pktio = qentry->s.pktin;
-
- pkts = odp_pktio_recv(pktio, pkt_tbl, QUEUE_MULTI_MAX);
+ pkts = odp_pktio_recv_queue(qentry->s.pktin, pkt_tbl, QUEUE_MULTI_MAX);
if (pkts <= 0)
return nbr;
@@ -648,43 +684,47 @@ int pktin_deq_multi(queue_entry_t *qentry, odp_buffer_hdr_t *buf_hdr[], int num)
return nbr;
}
-int pktin_poll(pktio_entry_t *entry)
+int pktin_poll(pktio_entry_t *entry, int num_queue, int index[])
{
odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
- int num, i;
+ int num, i, idx;
odp_buffer_t buf;
- odp_pktio_t pktio;
- pktio = entry->s.handle;
-
- if (odp_unlikely(is_free(entry)))
+ if (odp_unlikely(is_free(entry))) {
+ ODP_ERR("Bad pktio entry\n");
return -1;
+ }
- if (odp_unlikely(entry->s.inq_default == ODP_QUEUE_INVALID))
+ /* Temporarely needed for odp_pktio_inq_remdef() */
+ if (odp_unlikely(entry->s.num_in_queue == 0))
return -1;
if (entry->s.state == STATE_STOP)
return 0;
- num = odp_pktio_recv(pktio, pkt_tbl, QUEUE_MULTI_MAX);
+ for (idx = 0; idx < num_queue; idx++) {
+ queue_entry_t *qentry;
+ odp_queue_t queue;
+ odp_pktin_queue_t pktin = entry->s.in_queue[index[idx]].pktin;
- if (num == 0)
- return 0;
+ num = odp_pktio_recv_queue(pktin, pkt_tbl, QUEUE_MULTI_MAX);
- if (num < 0) {
- ODP_ERR("Packet recv error\n");
- return -1;
- }
+ if (num == 0)
+ continue;
- for (i = 0; i < num; i++) {
- buf = _odp_packet_to_buffer(pkt_tbl[i]);
- hdr_tbl[i] = odp_buf_to_hdr(buf);
- }
+ if (num < 0) {
+ ODP_ERR("Packet recv error\n");
+ return -1;
+ }
- if (num) {
- queue_entry_t *qentry;
- qentry = queue_to_qentry(entry->s.inq_default);
+ for (i = 0; i < num; i++) {
+ buf = _odp_packet_to_buffer(pkt_tbl[i]);
+ hdr_tbl[i] = odp_buf_to_hdr(buf);
+ }
+
+ queue = entry->s.in_queue[index[idx]].queue;
+ qentry = queue_to_qentry(queue);
queue_enq_multi(qentry, hdr_tbl, num, 0);
}
@@ -800,11 +840,91 @@ int odp_pktio_mac_addr(odp_pktio_t id, void *mac_addr, int addr_size)
return ret;
}
+int odp_pktio_link_status(odp_pktio_t id)
+{
+ pktio_entry_t *entry;
+ int ret = -1;
+
+ entry = get_pktio_entry(id);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", id);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_DBG("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->link_status)
+ ret = entry->s.ops->link_status(entry);
+ unlock_entry(entry);
+
+ return ret;
+}
+
void odp_pktio_param_init(odp_pktio_param_t *params)
{
memset(params, 0, sizeof(odp_pktio_param_t));
}
+void odp_pktin_queue_param_init(odp_pktin_queue_param_t *param)
+{
+ memset(param, 0, sizeof(odp_pktin_queue_param_t));
+ param->op_mode = ODP_PKTIO_OP_MT;
+}
+
+void odp_pktout_queue_param_init(odp_pktout_queue_param_t *param)
+{
+ memset(param, 0, sizeof(odp_pktout_queue_param_t));
+ param->op_mode = ODP_PKTIO_OP_MT;
+}
+
+void odp_pktio_print(odp_pktio_t id)
+{
+ pktio_entry_t *entry;
+ uint8_t addr[ETH_ALEN];
+ int max_len = 512;
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+
+ entry = get_pktio_entry(id);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", id);
+ return;
+ }
+
+ len += snprintf(&str[len], n - len,
+ "pktio\n");
+ len += snprintf(&str[len], n - len,
+ " handle %" PRIu64 "\n", odp_pktio_to_u64(id));
+ len += snprintf(&str[len], n - len,
+ " name %s\n", entry->s.name);
+ len += snprintf(&str[len], n - len,
+ " type %s\n", entry->s.ops->name);
+ len += snprintf(&str[len], n - len,
+ " state %s\n",
+ entry->s.state == STATE_START ? "start" :
+ (entry->s.state == STATE_STOP ? "stop" : "unknown"));
+ memset(addr, 0, sizeof(addr));
+ odp_pktio_mac_addr(id, addr, ETH_ALEN);
+ len += snprintf(&str[len], n - len,
+ " mac %02x:%02x:%02x:%02x:%02x:%02x\n",
+ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ len += snprintf(&str[len], n - len,
+ " mtu %d\n", odp_pktio_mtu(id));
+ len += snprintf(&str[len], n - len,
+ " promisc %s\n",
+ odp_pktio_promisc_mode(id) ? "yes" : "no");
+ str[len] = '\0';
+
+ ODP_PRINT("\n%s\n", str);
+}
+
int odp_pktio_term_global(void)
{
int ret;
@@ -852,42 +972,391 @@ int odp_pktio_term_global(void)
return ret;
}
-void odp_pktio_print(odp_pktio_t id)
+int odp_pktio_capability(odp_pktio_t pktio, odp_pktio_capability_t *capa)
{
pktio_entry_t *entry;
- uint8_t addr[ETH_ALEN];
- int max_len = 512;
- char str[max_len];
- int len = 0;
- int n = max_len - 1;
- entry = get_pktio_entry(id);
+ entry = get_pktio_entry(pktio);
if (entry == NULL) {
- ODP_DBG("pktio entry %d does not exist\n", id);
- return;
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
}
- len += snprintf(&str[len], n - len,
- "pktio\n");
- len += snprintf(&str[len], n - len,
- " handle %" PRIu64 "\n", odp_pktio_to_u64(id));
- len += snprintf(&str[len], n - len,
- " name %s\n", entry->s.name);
- len += snprintf(&str[len], n - len,
- " state %s\n",
- entry->s.state == STATE_START ? "start" :
- (entry->s.state == STATE_STOP ? "stop" : "unknown"));
- memset(addr, 0, sizeof(addr));
- odp_pktio_mac_addr(id, addr, ETH_ALEN);
- len += snprintf(&str[len], n - len,
- " mac %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
- len += snprintf(&str[len], n - len,
- " mtu %d\n", odp_pktio_mtu(id));
- len += snprintf(&str[len], n - len,
- " promisc %s\n",
- odp_pktio_promisc_mode(id) ? "yes" : "no");
- str[len] = '\0';
+ if (entry->s.ops->capability)
+ return entry->s.ops->capability(entry, capa);
- ODP_PRINT("\n%s\n", str);
+ return single_capability(capa);
+}
+
+int odp_pktio_stats(odp_pktio_t pktio,
+ odp_pktio_stats_t *stats)
+{
+ pktio_entry_t *entry;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_DBG("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->stats)
+ ret = entry->s.ops->stats(entry, stats);
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktio_stats_reset(odp_pktio_t pktio)
+{
+ pktio_entry_t *entry;
+ int ret = -1;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ lock_entry(entry);
+
+ if (odp_unlikely(is_free(entry))) {
+ unlock_entry(entry);
+ ODP_DBG("already freed pktio\n");
+ return -1;
+ }
+
+ if (entry->s.ops->stats)
+ ret = entry->s.ops->stats_reset(entry);
+ unlock_entry(entry);
+
+ return ret;
+}
+
+int odp_pktin_queue_config(odp_pktio_t pktio,
+ const odp_pktin_queue_param_t *param)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+ odp_pktio_capability_t capa;
+ unsigned num_queues;
+ unsigned i;
+ odp_queue_t queue;
+
+ if (param == NULL) {
+ ODP_DBG("no parameters\n");
+ return -1;
+ }
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ if (entry->s.state != STATE_STOP) {
+ ODP_DBG("pktio %s: not stopped\n", entry->s.name);
+ return -1;
+ }
+
+ mode = entry->s.param.in_mode;
+
+ if (mode == ODP_PKTIN_MODE_DISABLED) {
+ ODP_DBG("pktio %s: packet input is disabled\n", entry->s.name);
+ return -1;
+ }
+
+ num_queues = param->num_queues;
+
+ if (num_queues == 0) {
+ ODP_DBG("pktio %s: zero input queues\n", entry->s.name);
+ return -1;
+ }
+
+ odp_pktio_capability(pktio, &capa);
+
+ if (num_queues > capa.max_input_queues) {
+ ODP_DBG("pktio %s: too many input queues\n", entry->s.name);
+ return -1;
+ }
+
+ /* If re-configuring, destroy old queues */
+ if (entry->s.num_in_queue)
+ destroy_in_queues(entry, entry->s.num_in_queue);
+
+ for (i = 0; i < num_queues; i++) {
+ if (mode == ODP_PKTIN_MODE_QUEUE ||
+ mode == ODP_PKTIN_MODE_SCHED) {
+ odp_queue_param_t queue_param;
+
+ memcpy(&queue_param, &param->queue_param,
+ sizeof(odp_queue_param_t));
+
+ queue_param.type = ODP_QUEUE_TYPE_PLAIN;
+
+ if (mode == ODP_PKTIN_MODE_SCHED)
+ queue_param.type = ODP_QUEUE_TYPE_SCHED;
+
+ queue = odp_queue_create("pktio_in",
+ &queue_param);
+
+ if (queue == ODP_QUEUE_INVALID) {
+ destroy_in_queues(entry, i + 1);
+ return -1;
+ }
+
+ if (mode == ODP_PKTIN_MODE_QUEUE) {
+ queue_entry_t *qentry;
+
+ qentry = queue_to_qentry(queue);
+ qentry->s.pktin.index = i;
+ qentry->s.pktin.pktio = pktio;
+
+ qentry->s.enqueue = pktin_enqueue;
+ qentry->s.dequeue = pktin_dequeue;
+ qentry->s.enqueue_multi = pktin_enq_multi;
+ qentry->s.dequeue_multi = pktin_deq_multi;
+ }
+
+ entry->s.in_queue[i].queue = queue;
+ } else {
+ entry->s.in_queue[i].queue = ODP_QUEUE_INVALID;
+ }
+
+ entry->s.in_queue[i].pktin.index = i;
+ entry->s.in_queue[i].pktin.pktio = entry->s.handle;
+ }
+
+ entry->s.num_in_queue = num_queues;
+
+ if (entry->s.ops->input_queues_config)
+ return entry->s.ops->input_queues_config(entry, param);
+
+ return 0;
+}
+
+int odp_pktout_queue_config(odp_pktio_t pktio,
+ const odp_pktout_queue_param_t *param)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+ odp_pktio_capability_t capa;
+ unsigned num_queues;
+ unsigned i;
+
+ if (param == NULL) {
+ ODP_DBG("no parameters\n");
+ return -1;
+ }
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ if (entry->s.state != STATE_STOP) {
+ ODP_DBG("pktio %s: not stopped\n", entry->s.name);
+ return -1;
+ }
+
+ mode = entry->s.param.out_mode;
+
+ if (mode == ODP_PKTOUT_MODE_DISABLED) {
+ ODP_DBG("pktio %s: packet output is disabled\n", entry->s.name);
+ return -1;
+ }
+
+ if (mode != ODP_PKTOUT_MODE_DIRECT) {
+ ODP_DBG("pktio %s: bad packet output mode\n", entry->s.name);
+ return -1;
+ }
+
+ num_queues = param->num_queues;
+
+ if (num_queues == 0) {
+ ODP_DBG("pktio %s: zero output queues\n", entry->s.name);
+ return -1;
+ }
+
+ odp_pktio_capability(pktio, &capa);
+
+ if (num_queues > capa.max_output_queues) {
+ ODP_DBG("pktio %s: too many output queues\n", entry->s.name);
+ return -1;
+ }
+
+ for (i = 0; i < num_queues; i++) {
+ entry->s.out_queue[i].pktout.index = i;
+ entry->s.out_queue[i].pktout.pktio = entry->s.handle;
+ }
+
+ entry->s.num_out_queue = num_queues;
+
+ if (entry->s.ops->output_queues_config)
+ return entry->s.ops->output_queues_config(entry, param);
+
+ return 0;
+}
+
+int odp_pktin_event_queue(odp_pktio_t pktio, odp_queue_t queues[], int num)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ mode = entry->s.param.in_mode;
+
+ if (mode != ODP_PKTIN_MODE_QUEUE &&
+ mode != ODP_PKTIN_MODE_SCHED)
+ return -1;
+
+ if (entry->s.ops->in_queues)
+ return entry->s.ops->in_queues(entry, queues, num);
+
+ return single_in_queues(entry, queues, num);
+}
+
+int odp_pktin_queue(odp_pktio_t pktio, odp_pktin_queue_t queues[], int num)
+{
+ pktio_entry_t *entry;
+ odp_pktin_mode_t mode;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ mode = entry->s.param.in_mode;
+
+ if (mode != ODP_PKTIN_MODE_DIRECT)
+ return -1;
+
+ if (entry->s.ops->pktin_queues)
+ return entry->s.ops->pktin_queues(entry, queues, num);
+
+ return single_pktin_queues(entry, queues, num);
+}
+
+int odp_pktout_queue(odp_pktio_t pktio, odp_pktout_queue_t queues[], int num)
+{
+ pktio_entry_t *entry;
+ odp_pktout_mode_t mode;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ mode = entry->s.param.out_mode;
+
+ if (mode != ODP_PKTOUT_MODE_DIRECT)
+ return -1;
+
+ if (entry->s.ops->pktout_queues)
+ return entry->s.ops->pktout_queues(entry, queues, num);
+
+ return single_pktout_queues(entry, queues, num);
+}
+
+int odp_pktio_recv_queue(odp_pktin_queue_t queue, odp_packet_t packets[],
+ int num)
+{
+ pktio_entry_t *entry;
+ odp_pktio_t pktio = queue.pktio;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ if (entry->s.ops->recv_queue)
+ return entry->s.ops->recv_queue(entry, queue.index,
+ packets, num);
+
+ return single_recv_queue(entry, queue.index, packets, num);
+}
+
+int odp_pktio_send_queue(odp_pktout_queue_t queue, odp_packet_t packets[],
+ int num)
+{
+ pktio_entry_t *entry;
+ odp_pktio_t pktio = queue.pktio;
+
+ entry = get_pktio_entry(pktio);
+ if (entry == NULL) {
+ ODP_DBG("pktio entry %d does not exist\n", pktio);
+ return -1;
+ }
+
+ if (entry->s.ops->send_queue)
+ return entry->s.ops->send_queue(entry, queue.index,
+ packets, num);
+
+ return single_send_queue(entry, queue.index, packets, num);
+}
+
+int single_capability(odp_pktio_capability_t *capa)
+{
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+ capa->max_input_queues = 1;
+ capa->max_output_queues = 1;
+
+ return 0;
+}
+
+int single_in_queues(pktio_entry_t *entry, odp_queue_t queues[], int num)
+{
+ if (queues && num > 0)
+ queues[0] = entry->s.in_queue[0].queue;
+
+ return 1;
+}
+
+int single_pktin_queues(pktio_entry_t *entry, odp_pktin_queue_t queues[],
+ int num)
+{
+ if (queues && num > 0)
+ queues[0] = entry->s.in_queue[0].pktin;
+
+ return 1;
+}
+
+int single_pktout_queues(pktio_entry_t *entry, odp_pktout_queue_t queues[],
+ int num)
+{
+ if (queues && num > 0)
+ queues[0] = entry->s.out_queue[0].pktout;
+
+ return 1;
+}
+
+int single_recv_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num)
+{
+ (void)index;
+ return odp_pktio_recv(entry->s.handle, packets, num);
+}
+
+int single_send_queue(pktio_entry_t *entry, int index, odp_packet_t packets[],
+ int num)
+{
+ (void)index;
+ return odp_pktio_send(entry->s.handle, packets, num);
}
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index a3fcac373..64668cc3d 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -19,7 +19,6 @@
#include <odp/hints.h>
#include <odp/thread.h>
#include <odp_debug_internal.h>
-#include <odp_atomic_internal.h>
#include <string.h>
#include <stdlib.h>
diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c
index 7f1147825..dbe2d9ce7 100644
--- a/platform/linux-generic/odp_queue.c
+++ b/platform/linux-generic/odp_queue.c
@@ -87,25 +87,29 @@ queue_entry_t *get_qentry(uint32_t queue_id)
}
static int queue_init(queue_entry_t *queue, const char *name,
- odp_queue_type_t type, odp_queue_param_t *param)
+ const odp_queue_param_t *param)
{
strncpy(queue->s.name, name, ODP_QUEUE_NAME_LEN - 1);
- queue->s.type = type;
if (param) {
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
if (queue->s.param.sched.lock_count >
ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE)
return -1;
+
+ if (param->type == ODP_QUEUE_TYPE_SCHED)
+ queue->s.param.deq_mode = ODP_QUEUE_OP_DISABLED;
} else {
/* Defaults */
- memset(&queue->s.param, 0, sizeof(odp_queue_param_t));
+ odp_queue_param_init(&queue->s.param);
queue->s.param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
queue->s.param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
queue->s.param.sched.group = ODP_SCHED_GROUP_ALL;
}
- switch (type) {
+ queue->s.type = queue->s.param.type;
+
+ switch (queue->s.type) {
case ODP_QUEUE_TYPE_PKTIN:
queue->s.enqueue = pktin_enqueue;
queue->s.dequeue = pktin_dequeue;
@@ -126,6 +130,8 @@ static int queue_init(queue_entry_t *queue, const char *name,
break;
}
+ queue->s.pktin = PKTIN_INVALID;
+
queue->s.head = NULL;
queue->s.tail = NULL;
@@ -248,12 +254,12 @@ int odp_queue_lock_count(odp_queue_t handle)
(int)queue->s.param.sched.lock_count : -1;
}
-odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
- odp_queue_param_t *param)
+odp_queue_t odp_queue_create(const char *name, const odp_queue_param_t *param)
{
uint32_t i;
queue_entry_t *queue;
odp_queue_t handle = ODP_QUEUE_INVALID;
+ odp_queue_type_t type;
for (i = 0; i < ODP_CONFIG_QUEUES; i++) {
queue = &queue_tbl->queue[i];
@@ -263,11 +269,13 @@ odp_queue_t odp_queue_create(const char *name, odp_queue_type_t type,
LOCK(&queue->s.lock);
if (queue->s.status == QUEUE_STATUS_FREE) {
- if (queue_init(queue, name, type, param)) {
+ if (queue_init(queue, name, param)) {
UNLOCK(&queue->s.lock);
return handle;
}
+ type = queue->s.type;
+
if (type == ODP_QUEUE_TYPE_SCHED ||
type == ODP_QUEUE_TYPE_PKTIN)
queue->s.status = QUEUE_STATUS_NOTSCHED;
@@ -944,6 +952,9 @@ void queue_unlock(queue_entry_t *queue)
void odp_queue_param_init(odp_queue_param_t *params)
{
memset(params, 0, sizeof(odp_queue_param_t));
+ params->type = ODP_QUEUE_TYPE_PLAIN;
+ params->enq_mode = ODP_QUEUE_OP_MT;
+ params->deq_mode = ODP_QUEUE_OP_MT;
}
/* These routines exists here rather than in odp_schedule
@@ -1099,7 +1110,6 @@ int odp_queue_info(odp_queue_t handle, odp_queue_info_t *info)
}
info->name = queue->s.name;
- info->type = queue->s.type;
info->param = queue->s.param;
UNLOCK(&queue->s.lock);
diff --git a/platform/linux-generic/odp_rwlock.c b/platform/linux-generic/odp_rwlock.c
index 47c15ef42..42ad0ccdb 100644
--- a/platform/linux-generic/odp_rwlock.c
+++ b/platform/linux-generic/odp_rwlock.c
@@ -6,10 +6,8 @@
#include <stdbool.h>
#include <odp/atomic.h>
-#include <odp_atomic_internal.h>
#include <odp/rwlock.h>
-
-#include <odp_spin_internal.h>
+#include <odp/cpu.h>
void odp_rwlock_init(odp_rwlock_t *rwlock)
{
@@ -22,23 +20,20 @@ void odp_rwlock_read_lock(odp_rwlock_t *rwlock)
int is_locked = 0;
while (is_locked == 0) {
- cnt = _odp_atomic_u32_load_mm(&rwlock->cnt, _ODP_MEMMODEL_RLX);
+ cnt = odp_atomic_load_u32(&rwlock->cnt);
/* waiting for read lock */
if ((int32_t)cnt < 0) {
- odp_spin();
+ odp_cpu_pause();
continue;
}
- is_locked = _odp_atomic_u32_cmp_xchg_strong_mm(&rwlock->cnt,
- &cnt,
- cnt + 1,
- _ODP_MEMMODEL_ACQ,
- _ODP_MEMMODEL_RLX);
+ is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
+ &cnt, cnt + 1);
}
}
void odp_rwlock_read_unlock(odp_rwlock_t *rwlock)
{
- _odp_atomic_u32_sub_mm(&rwlock->cnt, 1, _ODP_MEMMODEL_RLS);
+ odp_atomic_sub_rel_u32(&rwlock->cnt, 1);
}
void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
@@ -48,21 +43,18 @@ void odp_rwlock_write_lock(odp_rwlock_t *rwlock)
while (is_locked == 0) {
uint32_t zero = 0;
- cnt = _odp_atomic_u32_load_mm(&rwlock->cnt, _ODP_MEMMODEL_RLX);
+ cnt = odp_atomic_load_u32(&rwlock->cnt);
/* lock acquired, wait */
if (cnt != 0) {
- odp_spin();
+ odp_cpu_pause();
continue;
}
- is_locked = _odp_atomic_u32_cmp_xchg_strong_mm(&rwlock->cnt,
- &zero,
- (uint32_t)-1,
- _ODP_MEMMODEL_ACQ,
- _ODP_MEMMODEL_RLX);
+ is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt,
+ &zero, (uint32_t)-1);
}
}
void odp_rwlock_write_unlock(odp_rwlock_t *rwlock)
{
- _odp_atomic_u32_store_mm(&rwlock->cnt, 0, _ODP_MEMMODEL_RLS);
+ odp_atomic_store_rel_u32(&rwlock->cnt, 0);
}
diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c
index 58f1b1b37..fc54ee5d0 100644
--- a/platform/linux-generic/odp_schedule.c
+++ b/platform/linux-generic/odp_schedule.c
@@ -19,10 +19,10 @@
#include <odp/time.h>
#include <odp/spinlock.h>
#include <odp/hints.h>
+#include <odp/cpu.h>
#include <odp_queue_internal.h>
#include <odp_packet_io_internal.h>
-#include <odp_spin_internal.h>
odp_thrmask_t sched_mask_all;
@@ -30,12 +30,17 @@ odp_thrmask_t sched_mask_all;
* One per scheduled queue and packet interface */
#define NUM_SCHED_CMD (ODP_CONFIG_QUEUES + ODP_CONFIG_PKTIO_ENTRIES)
-/* Scheduler sub queues */
+/* Priority queues per priority */
#define QUEUES_PER_PRIO 4
+/* Packet input poll cmd queues */
+#define POLL_CMD_QUEUES 4
+
/* Maximum number of dequeues */
#define MAX_DEQ 4
+/* Maximum number of packet input queues per command */
+#define MAX_PKTIN 8
/* Mask of queues per priority */
typedef uint8_t pri_mask_t;
@@ -50,6 +55,13 @@ typedef struct {
odp_queue_t pri_queue[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
pri_mask_t pri_mask[ODP_CONFIG_SCHED_PRIOS];
odp_spinlock_t mask_lock;
+
+ odp_spinlock_t poll_cmd_lock;
+ struct {
+ odp_queue_t queue;
+ uint16_t num;
+ } poll_cmd[POLL_CMD_QUEUES];
+
odp_pool_t pool;
odp_shm_t shm;
uint32_t pri_count[ODP_CONFIG_SCHED_PRIOS][QUEUES_PER_PRIO];
@@ -69,8 +81,9 @@ typedef struct {
struct {
odp_pktio_t pktio;
+ int num;
+ int index[MAX_PKTIN];
pktio_entry_t *pe;
- int prio;
};
};
} sched_cmd_t;
@@ -80,19 +93,20 @@ typedef struct {
typedef struct {
+ int thr;
+ int num;
+ int index;
+ int pause;
+ uint32_t pktin_polls;
odp_queue_t pri_queue;
odp_event_t cmd_ev;
-
- odp_buffer_hdr_t *buf_hdr[MAX_DEQ];
queue_entry_t *qe;
queue_entry_t *origin_qe;
+ odp_buffer_hdr_t *buf_hdr[MAX_DEQ];
uint64_t order;
uint64_t sync[ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE];
odp_pool_t pool;
int enq_called;
- int num;
- int index;
- int pause;
int ignore_ordered_context;
} sched_local_t;
@@ -109,6 +123,7 @@ static void sched_local_init(void)
{
memset(&sched_local, 0, sizeof(sched_local_t));
+ sched_local.thr = odp_thread_id();
sched_local.pri_queue = ODP_QUEUE_INVALID;
sched_local.cmd_ev = ODP_EVENT_INVALID;
}
@@ -163,8 +178,7 @@ int odp_schedule_init_global(void)
name[10] = '0' + j / 10;
name[11] = '0' + j - 10*(j / 10);
- queue = odp_queue_create(name,
- ODP_QUEUE_TYPE_POLL, NULL);
+ queue = odp_queue_create(name, NULL);
if (queue == ODP_QUEUE_INVALID) {
ODP_ERR("Sched init: Queue create failed.\n");
@@ -176,6 +190,24 @@ int odp_schedule_init_global(void)
}
}
+ odp_spinlock_init(&sched->poll_cmd_lock);
+ for (i = 0; i < POLL_CMD_QUEUES; i++) {
+ odp_queue_t queue;
+ char name[] = "odp_poll_cmd_YY";
+
+ name[13] = '0' + i / 10;
+ name[14] = '0' + i - 10 * (i / 10);
+
+ queue = odp_queue_create(name, NULL);
+
+ if (queue == ODP_QUEUE_INVALID) {
+ ODP_ERR("Sched init: Queue create failed.\n");
+ return -1;
+ }
+
+ sched->poll_cmd[i].queue = queue;
+ }
+
odp_spinlock_init(&sched->grp_lock);
for (i = 0; i < ODP_CONFIG_SCHED_GRPS; i++) {
@@ -195,11 +227,11 @@ int odp_schedule_term_global(void)
int ret = 0;
int rc = 0;
int i, j;
+ odp_event_t ev;
for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
for (j = 0; j < QUEUES_PER_PRIO; j++) {
odp_queue_t pri_q;
- odp_event_t ev;
pri_q = sched->pri_queue[i][j];
@@ -207,25 +239,20 @@ int odp_schedule_term_global(void)
ODP_EVENT_INVALID) {
odp_buffer_t buf;
sched_cmd_t *sched_cmd;
+ queue_entry_t *qe;
+ odp_buffer_hdr_t *buf_hdr[1];
+ int num;
buf = odp_buffer_from_event(ev);
sched_cmd = odp_buffer_addr(buf);
+ qe = sched_cmd->qe;
+ num = queue_deq_multi(qe, buf_hdr, 1);
- if (sched_cmd->cmd == SCHED_CMD_DEQUEUE) {
- queue_entry_t *qe;
- odp_buffer_hdr_t *buf_hdr[1];
- int num;
-
- qe = sched_cmd->qe;
- num = queue_deq_multi(qe, buf_hdr, 1);
+ if (num < 0)
+ queue_destroy_finalize(qe);
- if (num < 0)
- queue_destroy_finalize(qe);
-
- if (num > 0)
- ODP_ERR("Queue not empty\n");
- } else
- odp_buffer_free(buf);
+ if (num > 0)
+ ODP_ERR("Queue not empty\n");
}
if (odp_queue_destroy(pri_q)) {
@@ -235,6 +262,18 @@ int odp_schedule_term_global(void)
}
}
+ for (i = 0; i < POLL_CMD_QUEUES; i++) {
+ odp_queue_t queue = sched->poll_cmd[i].queue;
+
+ while ((ev = odp_queue_deq(queue)) != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+
+ if (odp_queue_destroy(queue)) {
+ ODP_ERR("Poll cmd queue destroy failed\n");
+ rc = -1;
+ }
+ }
+
if (odp_pool_destroy(sched->pool) != 0) {
ODP_ERR("Pool destroy fail.\n");
rc = -1;
@@ -273,11 +312,6 @@ static int pri_id_queue(odp_queue_t queue)
return (QUEUES_PER_PRIO-1) & (queue_to_id(queue));
}
-static int pri_id_pktio(odp_pktio_t pktio)
-{
- return (QUEUES_PER_PRIO-1) & (pktio_to_id(pktio));
-}
-
static odp_queue_t pri_set(int id, int prio)
{
odp_spinlock_lock(&sched->mask_lock);
@@ -308,25 +342,12 @@ static odp_queue_t pri_set_queue(odp_queue_t queue, int prio)
return pri_set(id, prio);
}
-static odp_queue_t pri_set_pktio(odp_pktio_t pktio, int prio)
-{
- int id = pri_id_pktio(pktio);
-
- return pri_set(id, prio);
-}
-
static void pri_clr_queue(odp_queue_t queue, int prio)
{
int id = pri_id_queue(queue);
pri_clr(id, prio);
}
-static void pri_clr_pktio(odp_pktio_t pktio, int prio)
-{
- int id = pri_id_pktio(pktio);
- pri_clr(id, prio);
-}
-
int schedule_queue_init(queue_entry_t *qe)
{
odp_buffer_t buf;
@@ -357,30 +378,55 @@ void schedule_queue_destroy(queue_entry_t *qe)
qe->s.pri_queue = ODP_QUEUE_INVALID;
}
-int schedule_pktio_start(odp_pktio_t pktio, int prio)
+static int poll_cmd_queue_idx(odp_pktio_t pktio, int in_queue_idx)
+{
+ return (POLL_CMD_QUEUES - 1) & (pktio_to_id(pktio) ^ in_queue_idx);
+}
+
+void schedule_pktio_start(odp_pktio_t pktio, int num_in_queue,
+ int in_queue_idx[])
{
odp_buffer_t buf;
sched_cmd_t *sched_cmd;
- odp_queue_t pri_queue;
+ odp_queue_t queue;
+ int i, idx;
buf = odp_buffer_alloc(sched->pool);
if (buf == ODP_BUFFER_INVALID)
- return -1;
+ ODP_ABORT("Sched pool empty\n");
sched_cmd = odp_buffer_addr(buf);
sched_cmd->cmd = SCHED_CMD_POLL_PKTIN;
sched_cmd->pktio = pktio;
+ sched_cmd->num = num_in_queue;
sched_cmd->pe = get_pktio_entry(pktio);
- sched_cmd->prio = prio;
- pri_queue = pri_set_pktio(pktio, prio);
+ if (num_in_queue > MAX_PKTIN)
+ ODP_ABORT("Too many input queues for scheduler\n");
+
+ for (i = 0; i < num_in_queue; i++)
+ sched_cmd->index[i] = in_queue_idx[i];
- if (odp_queue_enq(pri_queue, odp_buffer_to_event(buf)))
+ idx = poll_cmd_queue_idx(pktio, in_queue_idx[0]);
+
+ odp_spinlock_lock(&sched->poll_cmd_lock);
+ sched->poll_cmd[idx].num++;
+ odp_spinlock_unlock(&sched->poll_cmd_lock);
+
+ queue = sched->poll_cmd[idx].queue;
+
+ if (odp_queue_enq(queue, odp_buffer_to_event(buf)))
ODP_ABORT("schedule_pktio_start failed\n");
+}
+static void schedule_pktio_stop(sched_cmd_t *sched_cmd)
+{
+ int idx = poll_cmd_queue_idx(sched_cmd->pktio, sched_cmd->index[0]);
- return 0;
+ odp_spinlock_lock(&sched->poll_cmd_lock);
+ sched->poll_cmd[idx].num--;
+ odp_spinlock_unlock(&sched->poll_cmd_lock);
}
void odp_schedule_release_atomic(void)
@@ -439,9 +485,12 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
unsigned int max_num, unsigned int max_deq)
{
int i, j;
- int thr;
int ret;
uint32_t k;
+ int id;
+ odp_event_t ev;
+ odp_buffer_t buf;
+ sched_cmd_t *sched_cmd;
if (sched_local.num) {
ret = copy_events(out_ev, max_num);
@@ -457,21 +506,16 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
if (odp_unlikely(sched_local.pause))
return 0;
- thr = odp_thread_id();
-
+ /* Schedule events */
for (i = 0; i < ODP_CONFIG_SCHED_PRIOS; i++) {
- int id;
if (sched->pri_mask[i] == 0)
continue;
- id = thr & (QUEUES_PER_PRIO-1);
+ id = sched_local.thr & (QUEUES_PER_PRIO - 1);
for (j = 0; j < QUEUES_PER_PRIO; j++, id++) {
odp_queue_t pri_q;
- odp_event_t ev;
- odp_buffer_t buf;
- sched_cmd_t *sched_cmd;
queue_entry_t *qe;
int num;
int qe_grp;
@@ -491,28 +535,12 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
buf = odp_buffer_from_event(ev);
sched_cmd = odp_buffer_addr(buf);
- if (sched_cmd->cmd == SCHED_CMD_POLL_PKTIN) {
- /* Poll packet input */
- if (pktin_poll(sched_cmd->pe)) {
- /* Stop scheduling the pktio */
- pri_clr_pktio(sched_cmd->pktio,
- sched_cmd->prio);
- odp_buffer_free(buf);
- } else {
- /* Continue scheduling the pktio */
- if (odp_queue_enq(pri_q, ev))
- ODP_ABORT("schedule failed\n");
- }
-
- continue;
- }
-
qe = sched_cmd->qe;
qe_grp = qe->s.param.sched.group;
if (qe_grp > ODP_SCHED_GROUP_ALL &&
!odp_thrmask_isset(sched->sched_grp[qe_grp].mask,
- thr)) {
+ sched_local.thr)) {
/* This thread is not eligible for work from
* this queue, so continue scheduling it.
*/
@@ -580,6 +608,59 @@ static int schedule(odp_queue_t *out_queue, odp_event_t out_ev[],
}
}
+ /*
+ * Poll packet input when there are no events
+ * * Each thread starts the search for a poll command from its
+ * preferred command queue. If the queue is empty, it moves to other
+ * queues.
+ * * Most of the times, the search stops on the first command found to
+ * optimize multi-threaded performance. A small portion of polls
+ * have to do full iteration to avoid packet input starvation when
+ * there are less threads than command queues.
+ */
+ id = sched_local.thr & (POLL_CMD_QUEUES - 1);
+
+ for (i = 0; i < POLL_CMD_QUEUES; i++, id++) {
+ odp_queue_t cmd_queue;
+
+ if (id == POLL_CMD_QUEUES)
+ id = 0;
+
+ if (sched->poll_cmd[id].num == 0)
+ continue;
+
+ cmd_queue = sched->poll_cmd[id].queue;
+ ev = odp_queue_deq(cmd_queue);
+
+ if (ev == ODP_EVENT_INVALID)
+ continue;
+
+ buf = odp_buffer_from_event(ev);
+ sched_cmd = odp_buffer_addr(buf);
+
+ if (sched_cmd->cmd != SCHED_CMD_POLL_PKTIN)
+ ODP_ABORT("Bad poll command\n");
+
+ /* Poll packet input */
+ if (pktin_poll(sched_cmd->pe,
+ sched_cmd->num,
+ sched_cmd->index)) {
+ /* Stop scheduling the pktio */
+ schedule_pktio_stop(sched_cmd);
+ odp_buffer_free(buf);
+ } else {
+ /* Continue scheduling the pktio */
+ if (odp_queue_enq(cmd_queue, ev))
+ ODP_ABORT("Poll command enqueue failed\n");
+
+ /* Do not iterate through all pktin poll command queues
+ * every time. */
+ if (odp_likely(sched_local.pktin_polls & 0xf))
+ break;
+ }
+ }
+
+ sched_local.pktin_polls++;
return 0;
}
@@ -812,7 +893,7 @@ void odp_schedule_order_lock(unsigned lock_index)
* some events in the ordered flow need to lock.
*/
while (sync != sync_out) {
- odp_spin();
+ odp_cpu_pause();
sync_out =
odp_atomic_load_u64(&origin_qe->s.sync_out[lock_index]);
}
diff --git a/platform/linux-generic/odp_spinlock.c b/platform/linux-generic/odp_spinlock.c
index f16572053..6a16dc4b9 100644
--- a/platform/linux-generic/odp_spinlock.c
+++ b/platform/linux-generic/odp_spinlock.c
@@ -5,9 +5,8 @@
*/
#include <odp/spinlock.h>
+#include <odp/cpu.h>
#include <odp_atomic_internal.h>
-#include <odp_spin_internal.h>
-
void odp_spinlock_init(odp_spinlock_t *spinlock)
{
@@ -23,7 +22,7 @@ void odp_spinlock_lock(odp_spinlock_t *spinlock)
* the loop will exit when the lock becomes available
* and we will retry the TAS operation above */
while (_odp_atomic_flag_load(&spinlock->lock))
- odp_spin();
+ odp_cpu_pause();
}
diff --git a/platform/linux-generic/odp_system_info.c b/platform/linux-generic/odp_system_info.c
index a948fce93..42aef8a11 100644
--- a/platform/linux-generic/odp_system_info.c
+++ b/platform/linux-generic/odp_system_info.c
@@ -25,13 +25,6 @@
#include <dirent.h>
-
-typedef struct {
- const char *cpu_arch_str;
- int (*cpuinfo_parser)(FILE *file, odp_system_info_t *sysinfo);
-
-} odp_compiler_info_t;
-
#define CACHE_LNSZ_FILE \
"/sys/devices/system/cpu/cpu0/cache/index0/coherency_line_size"
@@ -112,166 +105,6 @@ static int huge_page_size(void)
}
-
-/*
- * HW specific /proc/cpuinfo file parsing
- */
-#if defined __x86_64__ || defined __i386__
-
-static int cpuinfo_x86(FILE *file, odp_system_info_t *sysinfo)
-{
- char str[1024];
- char *pos;
- double mhz = 0.0;
- int model = 0;
- int count = 2;
-
- while (fgets(str, sizeof(str), file) != NULL && count > 0) {
- if (!mhz) {
- pos = strstr(str, "cpu MHz");
- if (pos) {
- sscanf(pos, "cpu MHz : %lf", &mhz);
- count--;
- }
- }
-
- if (!model) {
- pos = strstr(str, "model name");
- if (pos) {
- int len;
- pos = strchr(str, ':');
- strncpy(sysinfo->model_str, pos+2,
- sizeof(sysinfo->model_str));
- len = strlen(sysinfo->model_str);
- sysinfo->model_str[len - 1] = 0;
- model = 1;
- count--;
- }
- }
- }
-
- sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0);
-
- return 0;
-}
-
-#elif defined __arm__ || defined __aarch64__
-
-static int cpuinfo_arm(FILE *file ODP_UNUSED,
-odp_system_info_t *sysinfo ODP_UNUSED)
-{
- return 0;
-}
-
-#elif defined __OCTEON__
-
-static int cpuinfo_octeon(FILE *file, odp_system_info_t *sysinfo)
-{
- char str[1024];
- char *pos;
- double mhz = 0.0;
- int model = 0;
- int count = 2;
-
- while (fgets(str, sizeof(str), file) != NULL && count > 0) {
- if (!mhz) {
- pos = strstr(str, "BogoMIPS");
-
- if (pos) {
- sscanf(pos, "BogoMIPS : %lf", &mhz);
- count--;
- }
- }
-
- if (!model) {
- pos = strstr(str, "cpu model");
-
- if (pos) {
- int len;
- pos = strchr(str, ':');
- strncpy(sysinfo->model_str, pos+2,
- sizeof(sysinfo->model_str));
- len = strlen(sysinfo->model_str);
- sysinfo->model_str[len - 1] = 0;
- model = 1;
- count--;
- }
- }
- }
-
- /* bogomips seems to be 2x freq */
- sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0 / 2.0);
-
- return 0;
-}
-#elif defined __powerpc__
-static int cpuinfo_powerpc(FILE *file, odp_system_info_t *sysinfo)
-{
- char str[1024];
- char *pos;
- double mhz = 0.0;
- int model = 0;
- int count = 2;
-
- while (fgets(str, sizeof(str), file) != NULL && count > 0) {
- if (!mhz) {
- pos = strstr(str, "clock");
-
- if (pos) {
- sscanf(pos, "clock : %lf", &mhz);
- count--;
- }
- }
-
- if (!model) {
- pos = strstr(str, "cpu");
-
- if (pos) {
- int len;
- pos = strchr(str, ':');
- strncpy(sysinfo->model_str, pos+2,
- sizeof(sysinfo->model_str));
- len = strlen(sysinfo->model_str);
- sysinfo->model_str[len - 1] = 0;
- model = 1;
- count--;
- }
- }
-
- sysinfo->cpu_hz = (uint64_t) (mhz * 1000000.0);
- }
-
-
- return 0;
-}
-
-#else
- #error GCC target not found
-#endif
-
-static odp_compiler_info_t compiler_info = {
- #if defined __x86_64__ || defined __i386__
- .cpu_arch_str = "x86",
- .cpuinfo_parser = cpuinfo_x86
-
- #elif defined __arm__ || defined __aarch64__
- .cpu_arch_str = "arm",
- .cpuinfo_parser = cpuinfo_arm
-
- #elif defined __OCTEON__
- .cpu_arch_str = "octeon",
- .cpuinfo_parser = cpuinfo_octeon
-
- #elif defined __powerpc__
- .cpu_arch_str = "powerpc",
- .cpuinfo_parser = cpuinfo_powerpc
-
- #else
- #error GCC target not found
- #endif
-};
-
-
#if defined __x86_64__ || defined __i386__ || defined __OCTEON__ || \
defined __powerpc__
@@ -318,7 +151,7 @@ static int systemcpu(odp_system_info_t *sysinfo)
static int systemcpu(odp_system_info_t *sysinfo)
{
- int ret;
+ int ret, i;
ret = sysconf_cpu_count();
if (ret == 0) {
@@ -331,10 +164,14 @@ static int systemcpu(odp_system_info_t *sysinfo)
sysinfo->huge_page_size = huge_page_size();
/* Dummy values */
- sysinfo->cpu_hz = 1400000000;
sysinfo->cache_line_size = 64;
- strncpy(sysinfo->model_str, "UNKNOWN", sizeof(sysinfo->model_str));
+ ODP_DBG("Warning: use dummy values for freq and model string\n");
+ ODP_DBG("Refer to https://bugs.linaro.org/show_bug.cgi?id=1870\n");
+ for (i = 0; i < MAX_CPU_NUMBER; i++) {
+ sysinfo->cpu_hz_max[i] = 1400000000;
+ strcpy(sysinfo->model_str[i], "UNKNOWN");
+ }
return 0;
}
@@ -358,7 +195,7 @@ int odp_system_info_init(void)
return -1;
}
- compiler_info.cpuinfo_parser(file, &odp_global_data.system_info);
+ odp_cpuinfo_parser(file, &odp_global_data.system_info);
fclose(file);
@@ -383,9 +220,29 @@ int odp_system_info_term(void)
* Public access functions
*************************
*/
-uint64_t odp_sys_cpu_hz(void)
+uint64_t odp_cpu_hz(void)
{
- return odp_global_data.system_info.cpu_hz;
+ int id = sched_getcpu();
+
+ return odp_cpu_hz_current(id);
+}
+
+uint64_t odp_cpu_hz_id(int id)
+{
+ return odp_cpu_hz_current(id);
+}
+
+uint64_t odp_cpu_hz_max(void)
+{
+ return odp_cpu_hz_max_id(0);
+}
+
+uint64_t odp_cpu_hz_max_id(int id)
+{
+ if (id >= 0 && id < MAX_CPU_NUMBER)
+ return odp_global_data.system_info.cpu_hz_max[id];
+ else
+ return 0;
}
uint64_t odp_sys_huge_page_size(void)
@@ -398,9 +255,17 @@ uint64_t odp_sys_page_size(void)
return odp_global_data.system_info.page_size;
}
-const char *odp_sys_cpu_model_str(void)
+const char *odp_cpu_model_str(void)
+{
+ return odp_cpu_model_str_id(0);
+}
+
+const char *odp_cpu_model_str_id(int id)
{
- return odp_global_data.system_info.model_str;
+ if (id >= 0 && id < MAX_CPU_NUMBER)
+ return odp_global_data.system_info.model_str[id];
+ else
+ return NULL;
}
int odp_sys_cache_line_size(void)
diff --git a/platform/linux-generic/odp_ticketlock.c b/platform/linux-generic/odp_ticketlock.c
index 3e2a4ece1..84b893a5c 100644
--- a/platform/linux-generic/odp_ticketlock.c
+++ b/platform/linux-generic/odp_ticketlock.c
@@ -6,10 +6,8 @@
#include <odp/ticketlock.h>
#include <odp/atomic.h>
-#include <odp_atomic_internal.h>
#include <odp/sync.h>
-#include <odp_spin_internal.h>
-
+#include <odp/cpu.h>
void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
{
@@ -17,7 +15,6 @@ void odp_ticketlock_init(odp_ticketlock_t *ticketlock)
odp_atomic_init_u32(&ticketlock->cur_ticket, 0);
}
-
void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
{
uint32_t ticket;
@@ -29,9 +26,8 @@ void odp_ticketlock_lock(odp_ticketlock_t *ticketlock)
/* Spin waiting for our turn. Use load-acquire so that we acquire
* all stores from the previous lock owner */
- while (ticket != _odp_atomic_u32_load_mm(&ticketlock->cur_ticket,
- _ODP_MEMMODEL_ACQ))
- odp_spin();
+ while (ticket != odp_atomic_load_acq_u32(&ticketlock->cur_ticket))
+ odp_cpu_pause();
}
int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
@@ -55,11 +51,8 @@ int odp_ticketlock_trylock(odp_ticketlock_t *tklock)
* If CAS fails, it means some other thread intercepted and
* took a ticket which means the lock is not available
* anymore */
- if (_odp_atomic_u32_cmp_xchg_strong_mm(&tklock->next_ticket,
- &next,
- next + 1,
- _ODP_MEMMODEL_ACQ,
- _ODP_MEMMODEL_RLX))
+ if (odp_atomic_cas_acq_u32(&tklock->next_ticket,
+ &next, next + 1))
return 1;
}
return 0;
@@ -72,17 +65,15 @@ void odp_ticketlock_unlock(odp_ticketlock_t *ticketlock)
* 'cur_ticket', we don't need to do this with an (expensive)
* atomic RMW operation. Instead load-relaxed the current value
* and a store-release of the incremented value */
- uint32_t cur = _odp_atomic_u32_load_mm(&ticketlock->cur_ticket,
- _ODP_MEMMODEL_RLX);
- _odp_atomic_u32_store_mm(&ticketlock->cur_ticket, cur + 1,
- _ODP_MEMMODEL_RLS);
+ uint32_t cur = odp_atomic_load_u32(&ticketlock->cur_ticket);
+
+ odp_atomic_store_rel_u32(&ticketlock->cur_ticket, cur + 1);
#if defined __OCTEON__
odp_sync_stores(); /* SYNCW to flush write buffer */
#endif
}
-
int odp_ticketlock_is_locked(odp_ticketlock_t *ticketlock)
{
/* Compare 'cur_ticket' with 'next_ticket'. Ideally we should read
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index 01339ad86..fe3d40f21 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -27,12 +27,17 @@
#include <stdlib.h>
#include <time.h>
#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+
#include <odp/align.h>
#include <odp_align_internal.h>
#include <odp/atomic.h>
#include <odp_atomic_internal.h>
#include <odp/buffer.h>
#include <odp_buffer_inlines.h>
+#include <odp/cpu.h>
#include <odp/pool.h>
#include <odp_pool_internal.h>
#include <odp/debug.h>
@@ -42,7 +47,6 @@
#include <odp_internal.h>
#include <odp/queue.h>
#include <odp/shared_memory.h>
-#include <odp_spin_internal.h>
#include <odp/spinlock.h>
#include <odp/std_types.h>
#include <odp/sync.h>
@@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s {
tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */
odp_timer *timers; /* User pointer and queue handle (and lock) */
odp_atomic_u32_t high_wm;/* High watermark of allocated timers */
- odp_spinlock_t itimer_running;
odp_spinlock_t lock;
uint32_t num_alloc;/* Current number of allocated timers */
uint32_t first_free;/* 0..max_timers-1 => free timer */
@@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s {
odp_shm_t shm;
timer_t timerid;
int notify_overrun;
+ pthread_t timer_thread; /* pthread_t of timer thread */
+ pid_t timer_thread_id; /* gettid() for timer thread */
+ int timer_thread_exit; /* request to exit for timer thread */
} odp_timer_pool;
#define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */
@@ -254,26 +260,48 @@ static odp_timer_pool *odp_timer_pool_new(
}
tp->tp_idx = tp_idx;
odp_spinlock_init(&tp->lock);
- odp_spinlock_init(&tp->itimer_running);
timer_pool[tp_idx] = tp;
if (tp->param.clk_src == ODP_CLOCK_CPU)
itimer_init(tp);
return tp;
}
+static void block_sigalarm(void)
+{
+ sigset_t sigset;
+
+ sigemptyset(&sigset);
+ sigaddset(&sigset, SIGALRM);
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+}
+
+static void stop_timer_thread(odp_timer_pool *tp)
+{
+ int ret;
+
+ ODP_DBG("stop\n");
+ tp->timer_thread_exit = 1;
+ ret = pthread_join(tp->timer_thread, NULL);
+ if (ret != 0)
+ ODP_ABORT("unable to join thread, err %d\n", ret);
+}
+
static void odp_timer_pool_del(odp_timer_pool *tp)
{
odp_spinlock_lock(&tp->lock);
timer_pool[tp->tp_idx] = NULL;
- /* Wait for itimer thread to stop running */
- odp_spinlock_lock(&tp->itimer_running);
+
+ /* Stop timer triggering */
+ if (tp->param.clk_src == ODP_CLOCK_CPU)
+ itimer_fini(tp);
+
+ stop_timer_thread(tp);
+
if (tp->num_alloc != 0) {
/* It's a programming error to attempt to destroy a */
/* timer pool which is still in use */
ODP_ABORT("%s: timers in use\n", tp->name);
}
- if (tp->param.clk_src == ODP_CLOCK_CPU)
- itimer_fini(tp);
int rc = odp_shm_free(tp->shm);
if (rc != 0)
ODP_ABORT("Failed to free shared memory (%d)\n", rc);
@@ -410,7 +438,7 @@ static bool timer_reset(uint32_t idx,
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
/* While lock is taken, spin using relaxed loads */
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
- odp_spin();
+ odp_cpu_pause();
/* Only if there is a timeout buffer can be reset the timer */
if (odp_likely(tb->tmo_buf != ODP_BUFFER_INVALID)) {
@@ -457,7 +485,7 @@ static bool timer_reset(uint32_t idx,
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
/* While lock is taken, spin using relaxed loads */
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
- odp_spin();
+ odp_cpu_pause();
/* Swap in new buffer, save any old buffer */
old_buf = tb->tmo_buf;
@@ -498,7 +526,7 @@ static odp_buffer_t timer_cancel(odp_timer_pool *tp,
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
/* While lock is taken, spin using relaxed loads */
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
- odp_spin();
+ odp_cpu_pause();
/* Update the timer state (e.g. cancel the current timeout) */
tb->exp_tck.v = new_state;
@@ -552,7 +580,7 @@ static unsigned timer_expire(odp_timer_pool *tp, uint32_t idx, uint64_t tick)
while (_odp_atomic_flag_tas(IDX2LOCK(idx)))
/* While lock is taken, spin using relaxed loads */
while (_odp_atomic_flag_load(IDX2LOCK(idx)))
- odp_spin();
+ odp_cpu_pause();
/* Proper check for timer expired */
exp_tck = tb->exp_tck.v;
if (odp_likely(exp_tck <= tick)) {
@@ -632,10 +660,10 @@ static unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick)
* Functions that use Linux/POSIX per-process timers and related facilities
*****************************************************************************/
-static void timer_notify(sigval_t sigval)
+static void timer_notify(odp_timer_pool *tp)
{
int overrun;
- odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr;
+ int64_t prev_tick;
if (tp->notify_overrun) {
overrun = timer_getoverrun(tp->timerid);
@@ -653,32 +681,72 @@ static void timer_notify(sigval_t sigval)
for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / sizeof(array[0]))
PREFETCH(&array[i]);
#endif
- uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick);
- /* Attempt to acquire the lock, check if the old value was clear */
- if (odp_spinlock_trylock(&tp->itimer_running)) {
- /* Scan timer array, looking for timers to expire */
- (void)odp_timer_pool_expire(tp, prev_tick);
- odp_spinlock_unlock(&tp->itimer_running);
- }
+ prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick);
+
+ /* Scan timer array, looking for timers to expire */
+ (void)odp_timer_pool_expire(tp, prev_tick);
+
/* Else skip scan of timers. cur_tick was updated and next itimer
* invocation will process older expiration ticks as well */
}
+static void *timer_thread(void *arg)
+{
+ odp_timer_pool *tp = (odp_timer_pool *)arg;
+ sigset_t sigset;
+ int ret;
+ struct timespec tmo;
+ siginfo_t si;
+
+ tp->timer_thread_id = (pid_t)syscall(SYS_gettid);
+
+ tmo.tv_sec = 0;
+ tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100;
+
+ sigemptyset(&sigset);
+ /* unblock sigalarm in this thread */
+ sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+ sigaddset(&sigset, SIGALRM);
+
+ while (1) {
+ ret = sigtimedwait(&sigset, &si, &tmo);
+ if (tp->timer_thread_exit) {
+ tp->timer_thread_id = 0;
+ return NULL;
+ }
+ if (ret > 0)
+ timer_notify(tp);
+ }
+
+ return NULL;
+}
+
static void itimer_init(odp_timer_pool *tp)
{
struct sigevent sigev;
struct itimerspec ispec;
uint64_t res, sec, nsec;
+ int ret;
ODP_DBG("Creating POSIX timer for timer pool %s, period %"
PRIu64" ns\n", tp->name, tp->param.res_ns);
- memset(&sigev, 0, sizeof(sigev));
- memset(&ispec, 0, sizeof(ispec));
+ tp->timer_thread_id = 0;
+ ret = pthread_create(&tp->timer_thread, NULL, timer_thread, tp);
+ if (ret)
+ ODP_ABORT("unable to create timer thread\n");
+
+ /* wait thread set tp->timer_thread_id */
+ do {
+ sched_yield();
+ } while (tp->timer_thread_id == 0);
- sigev.sigev_notify = SIGEV_THREAD;
- sigev.sigev_notify_function = timer_notify;
+ memset(&sigev, 0, sizeof(sigev));
+ sigev.sigev_notify = SIGEV_THREAD_ID;
sigev.sigev_value.sival_ptr = tp;
+ sigev._sigev_un._tid = tp->timer_thread_id;
+ sigev.sigev_signo = SIGALRM;
if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid))
ODP_ABORT("timer_create() returned error %s\n",
@@ -688,6 +756,7 @@ static void itimer_init(odp_timer_pool *tp)
sec = res / ODP_TIME_SEC_IN_NS;
nsec = res - sec * ODP_TIME_SEC_IN_NS;
+ memset(&ispec, 0, sizeof(ispec));
ispec.it_interval.tv_sec = (time_t)sec;
ispec.it_interval.tv_nsec = (long)nsec;
ispec.it_value.tv_sec = (time_t)sec;
@@ -898,6 +967,9 @@ int odp_timer_init_global(void)
ODP_DBG("Using lock-less timer implementation\n");
#endif
odp_atomic_init_u32(&num_timer_pools, 0);
+
+ block_sigalarm();
+
return 0;
}
diff --git a/platform/linux-generic/odp_weak.c b/platform/linux-generic/odp_weak.c
index 0c59138eb..21fb5ed10 100644
--- a/platform/linux-generic/odp_weak.c
+++ b/platform/linux-generic/odp_weak.c
@@ -12,7 +12,7 @@
#include <stdarg.h>
ODP_WEAK_SYMBOL ODP_PRINTF_FORMAT(2, 3)
-int odp_override_log(odp_log_level_e level, const char *fmt, ...)
+int odp_override_log(odp_log_level_t level, const char *fmt, ...)
{
va_list args;
int r;
diff --git a/platform/linux-generic/pktio/ethtool.c b/platform/linux-generic/pktio/ethtool.c
new file mode 100644
index 000000000..1f2943888
--- /dev/null
+++ b/platform/linux-generic/pktio/ethtool.c
@@ -0,0 +1,164 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <sys/ioctl.h>
+#include <netinet/in.h>
+#include <linux/sockios.h>
+#include <linux/if.h>
+#include <linux/ethtool.h>
+#include <errno.h>
+#include <net/if.h>
+
+#include <odp.h>
+#include <odp_packet_socket.h>
+#include <odp_debug_internal.h>
+
+static struct ethtool_gstrings *get_stringset(int fd, struct ifreq *ifr)
+{
+ struct {
+ struct ethtool_sset_info hdr;
+ uint32_t buf[1];
+ } sset_info;
+ struct ethtool_drvinfo drvinfo;
+ uint32_t len;
+ struct ethtool_gstrings *strings;
+ ptrdiff_t drvinfo_offset = offsetof(struct ethtool_drvinfo, n_stats);
+
+ sset_info.hdr.cmd = ETHTOOL_GSSET_INFO;
+ sset_info.hdr.reserved = 0;
+ sset_info.hdr.sset_mask = 1ULL << ETH_SS_STATS;
+ ifr->ifr_data = &sset_info;
+ if (ioctl(fd, SIOCETHTOOL, ifr) == 0) {
+ len = sset_info.hdr.sset_mask ? sset_info.hdr.data[0] : 0;
+ } else if (errno == EOPNOTSUPP && drvinfo_offset != 0) {
+ /* Fallback for old kernel versions */
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr->ifr_data = &drvinfo;
+ if (ioctl(fd, SIOCETHTOOL, ifr)) {
+ __odp_errno = errno;
+ ODP_ERR("Cannot get stats information\n");
+ return NULL;
+ }
+ len = *(uint32_t *)(void *)((char *)&drvinfo + drvinfo_offset);
+ } else {
+ __odp_errno = errno;
+ return NULL;
+ }
+
+ if (!len) {
+ ODP_ERR("len is zero");
+ return NULL;
+ }
+
+ strings = calloc(1, sizeof(*strings) + len * ETH_GSTRING_LEN);
+ if (!strings) {
+ ODP_ERR("alloc failed\n");
+ return NULL;
+ }
+
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = len;
+ ifr->ifr_data = strings;
+ if (ioctl(fd, SIOCETHTOOL, ifr)) {
+ __odp_errno = errno;
+ ODP_ERR("Cannot get stats information\n");
+ free(strings);
+ return NULL;
+ }
+
+ return strings;
+}
+
+static int ethtool_stats(int fd, struct ifreq *ifr, odp_pktio_stats_t *stats)
+{
+ struct ethtool_gstrings *strings;
+ struct ethtool_stats *estats;
+ unsigned int n_stats, i;
+ int err;
+ int cnts;
+
+ strings = get_stringset(fd, ifr);
+ if (!strings)
+ return -1;
+
+ n_stats = strings->len;
+ if (n_stats < 1) {
+ ODP_ERR("no stats available\n");
+ free(strings);
+ return -1;
+ }
+
+ estats = calloc(1, n_stats * sizeof(uint64_t) +
+ sizeof(struct ethtool_stats));
+ if (!estats) {
+ free(strings);
+ return -1;
+ }
+
+ estats->cmd = ETHTOOL_GSTATS;
+ estats->n_stats = n_stats;
+ ifr->ifr_data = estats;
+ err = ioctl(fd, SIOCETHTOOL, ifr);
+ if (err < 0) {
+ __odp_errno = errno;
+ free(strings);
+ free(estats);
+ return -1;
+ }
+
+ cnts = 0;
+ for (i = 0; i < n_stats; i++) {
+ char *cnt = (char *)&strings->data[i * ETH_GSTRING_LEN];
+ uint64_t val = estats->data[i];
+
+ if (!strcmp(cnt, "rx_octets")) {
+ stats->in_octets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_ucast_packets")) {
+ stats->in_ucast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_discards")) {
+ stats->in_discards = val;
+ cnts++;
+ } else if (!strcmp(cnt, "rx_errors")) {
+ stats->in_errors = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_octets")) {
+ stats->out_octets = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_ucast_packets")) {
+ stats->out_ucast_pkts = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_discards")) {
+ stats->out_discards = val;
+ cnts++;
+ } else if (!strcmp(cnt, "tx_errors")) {
+ stats->out_errors = val;
+ cnts++;
+ }
+ }
+
+ free(strings);
+ free(estats);
+
+ /* Ethtool strings came from kernel driver. Name of that
+ * strings is not universal. Current function needs to be updated
+ * if your driver has different names for counters */
+ if (cnts < 8)
+ return -1;
+
+ return 0;
+}
+
+int ethtool_stats_get_fd(int fd, const char *name, odp_pktio_stats_t *stats)
+{
+ struct ifreq ifr;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ return ethtool_stats(fd, &ifr, stats);
+}
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 3e2645c66..e8203ff16 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -20,6 +20,8 @@
/* MAC address for the "loop" interface */
static const char pktio_loop_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x01};
+static int loopback_stats_reset(pktio_entry_t *pktio_entry);
+
static int loopback_open(odp_pktio_t id, pktio_entry_t *pktio_entry,
const char *devname, odp_pool_t pool ODP_UNUSED)
{
@@ -31,11 +33,13 @@ static int loopback_open(odp_pktio_t id, pktio_entry_t *pktio_entry,
snprintf(loopq_name, sizeof(loopq_name), "%" PRIu64 "-pktio_loopq",
odp_pktio_to_u64(id));
pktio_entry->s.pkt_loop.loopq =
- odp_queue_create(loopq_name, ODP_QUEUE_TYPE_POLL, NULL);
+ odp_queue_create(loopq_name, NULL);
if (pktio_entry->s.pkt_loop.loopq == ODP_QUEUE_INVALID)
return -1;
+ loopback_stats_reset(pktio_entry);
+
return 0;
}
@@ -63,8 +67,11 @@ static int loopback_recv(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
pkt_hdr = odp_packet_hdr(pkt);
packet_parse_reset(pkt_hdr);
packet_parse_l2(pkt_hdr);
- if (0 > _odp_packet_classifier(pktio_entry, pkt))
+ if (0 > _odp_packet_classifier(pktio_entry, pkt)) {
pkts[j++] = pkt;
+ pktio_entry->s.stats.in_octets +=
+ odp_packet_len(pkts[i]);
+ }
}
nbr = j;
} else {
@@ -74,9 +81,13 @@ static int loopback_recv(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
pkt_hdr = odp_packet_hdr(pkts[i]);
packet_parse_reset(pkt_hdr);
packet_parse_l2(pkt_hdr);
+ pktio_entry->s.stats.in_octets +=
+ odp_packet_len(pkts[i]);
}
}
+ pktio_entry->s.stats.in_ucast_pkts += nbr;
+
return nbr;
}
@@ -86,12 +97,22 @@ static int loopback_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_tbl[],
odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
queue_entry_t *qentry;
unsigned i;
+ int ret;
+ uint32_t bytes = 0;
- for (i = 0; i < len; ++i)
+ for (i = 0; i < len; ++i) {
hdr_tbl[i] = odp_buf_to_hdr(_odp_packet_to_buffer(pkt_tbl[i]));
+ bytes += odp_packet_len(pkt_tbl[i]);
+ }
qentry = queue_to_qentry(pktio_entry->s.pkt_loop.loopq);
- return queue_enq_multi(qentry, hdr_tbl, len, 0);
+ ret = queue_enq_multi(qentry, hdr_tbl, len, 0);
+ if (ret > 0) {
+ pktio_entry->s.stats.out_ucast_pkts += ret;
+ pktio_entry->s.stats.out_octets += bytes;
+ }
+
+ return ret;
}
static int loopback_mtu_get(pktio_entry_t *pktio_entry ODP_UNUSED)
@@ -119,17 +140,41 @@ static int loopback_promisc_mode_get(pktio_entry_t *pktio_entry)
return pktio_entry->s.pkt_loop.promisc ? 1 : 0;
}
+static int loopback_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ memcpy(stats, &pktio_entry->s.stats, sizeof(odp_pktio_stats_t));
+ return 0;
+}
+
+static int loopback_stats_reset(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t));
+ return 0;
+}
+
const pktio_if_ops_t loopback_pktio_ops = {
+ .name = "loop",
.init = NULL,
.term = NULL,
.open = loopback_open,
.close = loopback_close,
.start = NULL,
.stop = NULL,
+ .stats = loopback_stats,
+ .stats_reset = loopback_stats_reset,
.recv = loopback_recv,
.send = loopback_send,
.mtu_get = loopback_mtu_get,
.promisc_mode_set = loopback_promisc_mode_set,
.promisc_mode_get = loopback_promisc_mode_get,
- .mac_get = loopback_mac_addr_get
+ .mac_get = loopback_mac_addr_get,
+ .capability = NULL,
+ .input_queues_config = NULL,
+ .output_queues_config = NULL,
+ .in_queues = NULL,
+ .pktin_queues = NULL,
+ .pktout_queues = NULL,
+ .recv_queue = NULL,
+ .send_queue = NULL
};
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 774e8137d..97fb6c3bc 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -8,9 +8,9 @@
#include <odp_posix_extensions.h>
+#include <odp_packet_io_internal.h>
#include <odp_packet_netmap.h>
#include <odp_packet_socket.h>
-#include <odp_packet_io_internal.h>
#include <odp_debug_internal.h>
#include <odp/helper/eth.h>
@@ -22,16 +22,21 @@
#include <odp_classification_inlines.h>
#include <odp_classification_internal.h>
+/* Disable netmap debug prints */
+#ifndef ND
+#define ND(_fmt, ...) do {} while (0)
+#define D(_fmt, ...) do {} while (0)
+#define RD(lps, format, ...) do {} while (0)
+#endif
+
#define NETMAP_WITH_LIBS
#include <net/netmap_user.h>
-static struct nm_desc mmap_desc; /** Used to store the mmap address;
- filled in first time, used for
- subsequent calls to nm_open */
-
-#define NM_OPEN_RETRIES 5
+#define NM_WAIT_TIMEOUT 5 /* netmap_wait_for_link() timeout in seconds */
#define NM_INJECT_RETRIES 10
+static int netmap_stats_reset(pktio_entry_t *pktio_entry);
+
static int netmap_do_ioctl(pktio_entry_t *pktio_entry, unsigned long cmd,
int subcmd)
{
@@ -68,7 +73,7 @@ static int netmap_do_ioctl(pktio_entry_t *pktio_entry, unsigned long cmd,
break;
case SIOCETHTOOL:
if (subcmd == ETHTOOL_GLINK)
- return !eval.data;
+ return eval.data;
break;
default:
break;
@@ -80,16 +85,119 @@ done:
return err;
}
-static int netmap_close(pktio_entry_t *pktio_entry)
+/**
+ * Map netmap rings to pktin/pktout queues
+ *
+ * @param rings Array of netmap descriptor rings
+ * @param num_queues Number of pktin/pktout queues
+ * @param num_rings Number of matching netmap rings
+ */
+static inline void map_netmap_rings(netmap_ring_t *rings,
+ unsigned num_queues, unsigned num_rings)
+{
+ struct netmap_ring_t *desc_ring;
+ unsigned rings_per_queue;
+ unsigned remainder;
+ unsigned mapped_rings;
+ unsigned i;
+ unsigned desc_id = 0;
+
+ rings_per_queue = num_rings / num_queues;
+ remainder = num_rings % num_queues;
+
+ if (remainder)
+ ODP_DBG("WARNING: Netmap rings mapped unevenly to queues\n");
+
+ for (i = 0; i < num_queues; i++) {
+ desc_ring = &rings[i].s;
+ if (i < remainder)
+ mapped_rings = rings_per_queue + 1;
+ else
+ mapped_rings = rings_per_queue;
+
+ desc_ring->first = desc_id;
+ desc_ring->cur = desc_id;
+ desc_ring->last = desc_ring->first + mapped_rings - 1;
+ desc_ring->num = mapped_rings;
+
+ desc_id = desc_ring->last + 1;
+ }
+}
+
+static int netmap_input_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktin_queue_param_t *p)
{
pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ odp_pktin_mode_t mode = pktio_entry->s.param.in_mode;
+ unsigned num_queues = p->num_queues;
+ odp_bool_t lockless;
+
+ /* Scheduler synchronizes input queue polls. Only single thread
+ * at a time polls a queue */
+ if (mode == ODP_PKTIN_MODE_SCHED)
+ lockless = 1;
+ else
+ lockless = (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE);
- if (pkt_nm->rx_desc != NULL) {
- nm_close(pkt_nm->rx_desc);
- mmap_desc.mem = NULL;
+ if (p->hash_enable && num_queues > 1) {
+ if (rss_conf_set_fd(pktio_entry->s.pkt_nm.sockfd,
+ pktio_entry->s.name, &p->hash_proto)) {
+ ODP_ERR("Failed to configure input hash\n");
+ return -1;
+ }
}
- if (pkt_nm->tx_desc != NULL)
- nm_close(pkt_nm->tx_desc);
+
+ pkt_nm->lockless_rx = lockless;
+
+ return 0;
+}
+
+static int netmap_output_queues_config(pktio_entry_t *pktio_entry,
+ const odp_pktout_queue_param_t *p)
+{
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+
+ pkt_nm->lockless_tx = (p->op_mode == ODP_PKTIO_OP_MT_UNSAFE);
+
+ return 0;
+}
+
+/**
+ * Close netmap descriptors
+ *
+ * Can be reopened using netmap_start() function.
+ *
+ * @param pktio_entry Packet IO entry
+ */
+static inline void netmap_close_descriptors(pktio_entry_t *pktio_entry)
+{
+ int i, j;
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+
+ for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ for (j = 0; j < NM_MAX_DESC; j++) {
+ if (pkt_nm->rx_desc_ring[i].s.desc[j] != NULL) {
+ nm_close(pkt_nm->rx_desc_ring[i].s.desc[j]);
+ pkt_nm->rx_desc_ring[i].s.desc[j] = NULL;
+ }
+ }
+ for (j = 0; j < NM_MAX_DESC; j++) {
+ if (pkt_nm->tx_desc_ring[i].s.desc[j] != NULL) {
+ nm_close(pkt_nm->tx_desc_ring[i].s.desc[j]);
+ pkt_nm->tx_desc_ring[i].s.desc[j] = NULL;
+ }
+ }
+ }
+
+ pkt_nm->num_rx_desc_rings = 0;
+ pkt_nm->num_tx_desc_rings = 0;
+}
+
+static int netmap_close(pktio_entry_t *pktio_entry)
+{
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+
+ netmap_close_descriptors(pktio_entry);
if (pkt_nm->sockfd != -1 && close(pkt_nm->sockfd) != 0) {
__odp_errno = errno;
@@ -99,14 +207,57 @@ static int netmap_close(pktio_entry_t *pktio_entry)
return 0;
}
+static int netmap_link_status(pktio_entry_t *pktio_entry)
+{
+ return link_status_fd(pktio_entry->s.pkt_nm.sockfd,
+ pktio_entry->s.name);
+}
+
+/**
+ * Wait for netmap link to come up
+ *
+ * @param pktio_entry Packet IO entry
+ *
+ * @retval 1 link is up
+ * @retval 0 link is down
+ * @retval <0 on failure
+ */
+static inline int netmap_wait_for_link(pktio_entry_t *pktio_entry)
+{
+ int i;
+ int ret;
+
+ /* Wait for the link to come up */
+ for (i = 0; i <= NM_WAIT_TIMEOUT; i++) {
+ ret = netmap_link_status(pktio_entry);
+ if (ret == -1)
+ return -1;
+ /* nm_open() causes the physical link to reset. When using a
+ * direct attached loopback cable there may be a small delay
+ * until the opposing end's interface comes back up again. In
+ * this case without the additional sleep pktio validation
+ * tests fail. */
+ sleep(1);
+ if (ret == 1)
+ return 1;
+ }
+ ODP_DBG("%s link is down\n", pktio_entry->s.name);
+ return 0;
+}
+
static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
const char *netdev, odp_pool_t pool)
{
- char ifname[IFNAMSIZ + 7]; /* netmap:<ifname> */
+ int i;
int err;
int sockfd;
- int i;
+ int mtu;
+ uint32_t buf_size;
pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ struct nm_desc *desc;
+ struct netmap_ring *ring;
+ odp_pktin_hash_proto_t hash_proto;
+ odp_pktio_stats_t cur_stats;
if (getenv("ODP_PKTIO_DISABLE_NETMAP"))
return -1;
@@ -126,25 +277,39 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
snprintf(pktio_entry->s.name, sizeof(pktio_entry->s.name), "%s",
netdev);
- snprintf(ifname, sizeof(ifname), "netmap:%s", netdev);
-
- if (mmap_desc.mem == NULL)
- pkt_nm->rx_desc = nm_open(ifname, NULL, NETMAP_NO_TX_POLL,
- NULL);
- else
- pkt_nm->rx_desc = nm_open(ifname, NULL, NETMAP_NO_TX_POLL |
- NM_OPEN_NO_MMAP, &mmap_desc);
- pkt_nm->tx_desc = nm_open(ifname, NULL, NM_OPEN_NO_MMAP, &mmap_desc);
+ snprintf(pkt_nm->nm_name, sizeof(pkt_nm->nm_name), "netmap:%s",
+ netdev);
- if (pkt_nm->rx_desc == NULL || pkt_nm->tx_desc == NULL) {
- ODP_ERR("nm_open(%s) failed\n", ifname);
+ /* Dummy open here to check if netmap module is available and to read
+ * capability info. */
+ desc = nm_open(pkt_nm->nm_name, NULL, 0, NULL);
+ if (desc == NULL) {
+ ODP_ERR("nm_open(%s) failed\n", pkt_nm->nm_name);
goto error;
}
-
- if (mmap_desc.mem == NULL) {
- mmap_desc.mem = pkt_nm->rx_desc->mem;
- mmap_desc.memsize = pkt_nm->rx_desc->memsize;
+ if (desc->nifp->ni_rx_rings > NM_MAX_DESC) {
+ ODP_ERR("Unable to store all rx rings\n");
+ nm_close(desc);
+ goto error;
}
+ pkt_nm->num_rx_rings = desc->nifp->ni_rx_rings;
+ pkt_nm->capa.max_input_queues = PKTIO_MAX_QUEUES;
+ if (desc->nifp->ni_rx_rings < PKTIO_MAX_QUEUES)
+ pkt_nm->capa.max_input_queues = desc->nifp->ni_rx_rings;
+
+ if (desc->nifp->ni_tx_rings > NM_MAX_DESC) {
+ ODP_ERR("Unable to store all tx rings\n");
+ nm_close(desc);
+ goto error;
+ }
+ pkt_nm->num_tx_rings = desc->nifp->ni_tx_rings;
+ pkt_nm->capa.max_output_queues = PKTIO_MAX_QUEUES;
+ if (desc->nifp->ni_tx_rings < PKTIO_MAX_QUEUES)
+ pkt_nm->capa.max_output_queues = desc->nifp->ni_tx_rings;
+
+ ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
+ buf_size = ring->nr_buf_size;
+ nm_close(desc);
sockfd = socket(AF_INET, SOCK_DGRAM, 0);
if (sockfd == -1) {
@@ -153,6 +318,22 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
}
pkt_nm->sockfd = sockfd;
+ /* Use either interface MTU (+ ethernet header length) or netmap buffer
+ * size as MTU, whichever is smaller. */
+ mtu = mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name) +
+ ODPH_ETHHDR_LEN;
+ if (mtu < 0) {
+ ODP_ERR("Unable to read interface MTU\n");
+ goto error;
+ }
+ pkt_nm->mtu = ((uint32_t)mtu < buf_size) ? (uint32_t)mtu : buf_size;
+
+ /* Check if RSS is supported. If not, set 'max_input_queues' to 1. */
+ if (rss_conf_get_supported_fd(sockfd, netdev, &hash_proto) == 0) {
+ ODP_DBG("RSS not supported\n");
+ pkt_nm->capa.max_input_queues = 1;
+ }
+
err = netmap_do_ioctl(pktio_entry, SIOCGIFFLAGS, 0);
if (err)
goto error;
@@ -163,29 +344,148 @@ static int netmap_open(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
if (err)
goto error;
- /* Wait for the link to come up */
- for (i = 0; i < NM_OPEN_RETRIES; i++) {
- err = netmap_do_ioctl(pktio_entry, SIOCETHTOOL, ETHTOOL_GLINK);
- /* nm_open() causes the physical link to reset. When using a
- * direct attached loopback cable there may be a small delay
- * until the opposing end's interface comes back up again. In
- * this case without the additional sleep pktio validation
- * tests fail. */
- sleep(1);
- if (err == 0)
- return 0;
+ for (i = 0; i < PKTIO_MAX_QUEUES; i++) {
+ odp_ticketlock_init(&pkt_nm->rx_desc_ring[i].s.lock);
+ odp_ticketlock_init(&pkt_nm->tx_desc_ring[i].s.lock);
}
- ODP_ERR("%s didn't come up\n", pktio_entry->s.name);
+
+ /* netmap uses only ethtool to get statistics counters */
+ err = ethtool_stats_get_fd(pktio_entry->s.pkt_nm.sockfd,
+ pktio_entry->s.name,
+ &cur_stats);
+ if (err) {
+ ODP_ERR("netmap pktio %s does not support statistics counters\n",
+ pktio_entry->s.name);
+ pktio_entry->s.stats_type = STATS_UNSUPPORTED;
+ } else {
+ pktio_entry->s.stats_type = STATS_ETHTOOL;
+ }
+
+ (void)netmap_stats_reset(pktio_entry);
+
+ return 0;
error:
netmap_close(pktio_entry);
return -1;
}
+static int netmap_start(pktio_entry_t *pktio_entry)
+{
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ netmap_ring_t *desc_ring;
+ struct nm_desc base_desc;
+ unsigned i;
+ unsigned j;
+ uint64_t flags;
+ odp_pktin_mode_t in_mode = pktio_entry->s.param.in_mode;
+ odp_pktout_mode_t out_mode = pktio_entry->s.param.out_mode;
+
+ /* If no pktin/pktout queues have been configured. Configure one
+ * for each direction. */
+ if (!pktio_entry->s.num_in_queue &&
+ in_mode != ODP_PKTIN_MODE_DISABLED) {
+ odp_pktin_queue_param_t param;
+
+ odp_pktin_queue_param_init(&param);
+ param.num_queues = 1;
+ if (odp_pktin_queue_config(pktio_entry->s.handle, &param))
+ return -1;
+ }
+ if (!pktio_entry->s.num_out_queue &&
+ out_mode == ODP_PKTOUT_MODE_DIRECT) {
+ odp_pktout_queue_param_t param;
+
+ odp_pktout_queue_param_init(&param);
+ param.num_queues = 1;
+ if (odp_pktout_queue_config(pktio_entry->s.handle, &param))
+ return -1;
+ }
+
+ if (pkt_nm->num_rx_desc_rings == pktio_entry->s.num_in_queue &&
+ pkt_nm->num_tx_desc_rings == pktio_entry->s.num_out_queue)
+ return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;
+
+ netmap_close_descriptors(pktio_entry);
+
+ /* Map pktin/pktout queues to netmap rings */
+ if (pktio_entry->s.num_in_queue)
+ map_netmap_rings(pkt_nm->rx_desc_ring,
+ pktio_entry->s.num_in_queue,
+ pkt_nm->num_rx_rings);
+ if (pktio_entry->s.num_out_queue)
+ /* Enough to map only one netmap tx ring per pktout queue */
+ map_netmap_rings(pkt_nm->tx_desc_ring,
+ pktio_entry->s.num_out_queue,
+ pktio_entry->s.num_out_queue);
+
+ base_desc.self = &base_desc;
+ base_desc.mem = NULL;
+ memcpy(base_desc.req.nr_name, pktio_entry->s.name,
+ sizeof(pktio_entry->s.name));
+ base_desc.req.nr_flags &= ~NR_REG_MASK;
+ base_desc.req.nr_flags |= NR_REG_ONE_NIC;
+ base_desc.req.nr_ringid = 0;
+
+ /* Only the first rx descriptor does mmap */
+ desc_ring = pkt_nm->rx_desc_ring;
+ flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL;
+ desc_ring[0].s.desc[0] = nm_open(pkt_nm->nm_name, NULL, flags,
+ &base_desc);
+ if (desc_ring[0].s.desc[0] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n", pkt_nm->nm_name);
+ goto error;
+ }
+ /* Open rest of the rx descriptors (one per netmap ring) */
+ flags = NM_OPEN_IFNAME | NETMAP_NO_TX_POLL | NM_OPEN_NO_MMAP;
+ for (i = 0; i < pktio_entry->s.num_in_queue; i++) {
+ for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+ if (i == 0 && j == 0) /* First already opened */
+ continue;
+ base_desc.req.nr_ringid = j;
+ desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
+ flags, &base_desc);
+ if (desc_ring[i].s.desc[j] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n",
+ pkt_nm->nm_name);
+ goto error;
+ }
+ }
+ }
+ /* Open tx descriptors */
+ desc_ring = pkt_nm->tx_desc_ring;
+ flags = NM_OPEN_IFNAME | NM_OPEN_NO_MMAP;
+ for (i = 0; i < pktio_entry->s.num_out_queue; i++) {
+ for (j = desc_ring[i].s.first; j <= desc_ring[i].s.last; j++) {
+ base_desc.req.nr_ringid = j;
+ desc_ring[i].s.desc[j] = nm_open(pkt_nm->nm_name, NULL,
+ flags, &base_desc);
+ if (desc_ring[i].s.desc[j] == NULL) {
+ ODP_ERR("nm_start(%s) failed\n",
+ pkt_nm->nm_name);
+ goto error;
+ }
+ }
+ }
+ pkt_nm->num_rx_desc_rings = pktio_entry->s.num_in_queue;
+ pkt_nm->num_tx_desc_rings = pktio_entry->s.num_out_queue;
+ /* Wait for the link to come up */
+ return (netmap_wait_for_link(pktio_entry) == 1) ? 0 : -1;
+
+error:
+ netmap_close_descriptors(pktio_entry);
+ return -1;
+}
+
+static int netmap_stop(pktio_entry_t *pktio_entry ODP_UNUSED)
+{
+ return 0;
+}
+
/**
* Create ODP packet from netmap packet
*
- * @param pktio_entry Packet IO handle
+ * @param pktio_entry Packet IO entry
* @param pkt_out Storage for new ODP packet handle
* @param buf Netmap buffer address
* @param len Netmap buffer length
@@ -234,37 +534,56 @@ static inline int netmap_pkt_to_odp(pktio_entry_t *pktio_entry,
}
packet_parse_l2(pkt_hdr);
+
+ pkt_hdr->input = pktio_entry->s.handle;
+
*pkt_out = pkt;
}
return 0;
}
-static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
- unsigned num)
+static int netmap_recv_queue(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num)
{
- struct netmap_ring *ring;
- struct nm_desc *desc = pktio_entry->s.pkt_nm.rx_desc;
- struct pollfd polld;
char *buf;
+ struct netmap_ring *ring;
+ struct nm_desc *desc;
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
+ unsigned first_desc_id = pkt_nm->rx_desc_ring[index].s.first;
+ unsigned last_desc_id = pkt_nm->rx_desc_ring[index].s.last;
+ unsigned desc_id;
+ int num_desc = pkt_nm->rx_desc_ring[index].s.num;
int i;
- int num_rings = desc->last_rx_ring - desc->first_rx_ring + 1;
- int ring_id = desc->cur_rx_ring;
- unsigned num_rx = 0;
+ int num_rx = 0;
+ int max_fd = 0;
uint32_t slot_id;
+ fd_set empty_rings;
- polld.fd = desc->fd;
- polld.events = POLLIN;
+ if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+ return 0;
- for (i = 0; i < num_rings && num_rx != num; i++) {
- ring_id = desc->cur_rx_ring + i;
+ FD_ZERO(&empty_rings);
- if (ring_id > desc->last_rx_ring)
- ring_id = desc->first_rx_ring;
+ if (!pkt_nm->lockless_rx)
+ odp_ticketlock_lock(&pkt_nm->rx_desc_ring[index].s.lock);
- ring = NETMAP_RXRING(desc->nifp, ring_id);
+ desc_id = pkt_nm->rx_desc_ring[index].s.cur;
- while (!nm_ring_empty(ring) && num_rx != num) {
+ for (i = 0; i < num_desc && num_rx != num; i++) {
+ if (desc_id > last_desc_id)
+ desc_id = first_desc_id;
+
+ desc = pkt_nm->rx_desc_ring[index].s.desc[desc_id];
+ ring = NETMAP_RXRING(desc->nifp, desc->cur_rx_ring);
+
+ while (num_rx != num) {
+ if (nm_ring_empty(ring)) {
+ FD_SET(desc->fd, &empty_rings);
+ if (desc->fd > max_fd)
+ max_fd = desc->fd;
+ break;
+ }
slot_id = ring->cur;
buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
@@ -277,51 +596,127 @@ static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
ring->cur = nm_ring_next(ring, slot_id);
ring->head = ring->cur;
}
+ desc_id++;
}
- desc->cur_rx_ring = ring_id;
+ pkt_nm->rx_desc_ring[index].s.cur = desc_id;
+
+ if (num_rx != num) {
+ struct timeval tout = {.tv_sec = 0, .tv_usec = 0};
- if (num_rx == 0) {
- if (odp_unlikely(poll(&polld, 1, 0) < 0))
- ODP_ERR("RX: poll error\n");
+ if (select(max_fd + 1, &empty_rings, NULL, NULL, &tout) == -1)
+ ODP_ERR("RX: select error\n");
}
+ if (!pkt_nm->lockless_rx)
+ odp_ticketlock_unlock(&pkt_nm->rx_desc_ring[index].s.lock);
+
return num_rx;
}
-static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+static int netmap_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
unsigned num)
{
+ unsigned i;
+ unsigned num_rx = 0;
+ unsigned queue_id = pktio_entry->s.pkt_nm.cur_rx_queue;
+ unsigned num_queues = pktio_entry->s.num_in_queue;
+ unsigned pkts_left = num;
+ odp_packet_t *pkt_table_cur = pkt_table;
+
+ for (i = 0; i < num_queues && num_rx != num; i++) {
+ if (queue_id >= num_queues)
+ queue_id = 0;
+
+ pkt_table_cur = &pkt_table[num_rx];
+ pkts_left = num - num_rx;
+
+ num_rx += netmap_recv_queue(pktio_entry, queue_id,
+ pkt_table_cur, pkts_left);
+ queue_id++;
+ }
+ pktio_entry->s.pkt_nm.cur_rx_queue = queue_id;
+
+ return num_rx;
+}
+
+static int netmap_send_queue(pktio_entry_t *pktio_entry, int index,
+ odp_packet_t pkt_table[], int num)
+{
+ pkt_netmap_t *pkt_nm = &pktio_entry->s.pkt_nm;
struct pollfd polld;
- struct nm_desc *nm_desc = pktio_entry->s.pkt_nm.tx_desc;
- unsigned i, nb_tx;
- uint8_t *frame;
- uint32_t frame_len;
+ struct nm_desc *desc;
+ struct netmap_ring *ring;
+ int i;
+ int nb_tx;
+ int desc_id;
+ odp_packet_t pkt;
+ uint32_t pkt_len;
+ unsigned slot_id;
+ char *buf;
+
+ if (odp_unlikely(pktio_entry->s.state == STATE_STOP))
+ return 0;
- polld.fd = nm_desc->fd;
+ /* Only one netmap tx ring per pktout queue */
+ desc_id = pkt_nm->tx_desc_ring[index].s.cur;
+ desc = pkt_nm->tx_desc_ring[index].s.desc[desc_id];
+ ring = NETMAP_TXRING(desc->nifp, desc->cur_tx_ring);
+
+ if (!pkt_nm->lockless_tx)
+ odp_ticketlock_lock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+ polld.fd = desc->fd;
polld.events = POLLOUT;
for (nb_tx = 0; nb_tx < num; nb_tx++) {
- frame_len = 0;
- frame = odp_packet_l2_ptr(pkt_table[nb_tx], &frame_len);
+ pkt = pkt_table[nb_tx];
+ pkt_len = odp_packet_len(pkt);
+
+ if (pkt_len > pkt_nm->mtu) {
+ if (nb_tx == 0)
+ __odp_errno = EMSGSIZE;
+ break;
+ }
for (i = 0; i < NM_INJECT_RETRIES; i++) {
- if (nm_inject(nm_desc, frame, frame_len) == 0)
+ if (nm_ring_empty(ring)) {
poll(&polld, 1, 0);
- else
+ continue;
+ }
+ slot_id = ring->cur;
+ ring->slot[slot_id].flags = 0;
+ ring->slot[slot_id].len = pkt_len;
+
+ buf = NETMAP_BUF(ring, ring->slot[slot_id].buf_idx);
+
+ if (odp_packet_copydata_out(pkt, 0, pkt_len, buf)) {
+ i = NM_INJECT_RETRIES;
break;
- }
- if (odp_unlikely(i == NM_INJECT_RETRIES)) {
- ioctl(nm_desc->fd, NIOCTXSYNC, NULL);
+ }
+ ring->cur = nm_ring_next(ring, slot_id);
+ ring->head = ring->cur;
break;
}
+ if (i == NM_INJECT_RETRIES)
+ break;
+ odp_packet_free(pkt);
}
/* Send pending packets */
poll(&polld, 1, 0);
- for (i = 0; i < nb_tx; i++)
- odp_packet_free(pkt_table[i]);
+ if (!pkt_nm->lockless_tx)
+ odp_ticketlock_unlock(&pkt_nm->tx_desc_ring[index].s.lock);
+
+ if (odp_unlikely(nb_tx == 0 && __odp_errno != 0))
+ return -1;
return nb_tx;
}
+static int netmap_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned num)
+{
+ return netmap_send_queue(pktio_entry, 0, pkt_table, num);
+}
+
static int netmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
{
memcpy(mac_addr, pktio_entry->s.pkt_nm.if_mac, ETH_ALEN);
@@ -330,7 +725,7 @@ static int netmap_mac_addr_get(pktio_entry_t *pktio_entry, void *mac_addr)
static int netmap_mtu_get(pktio_entry_t *pktio_entry)
{
- return mtu_get_fd(pktio_entry->s.pkt_nm.sockfd, pktio_entry->s.name);
+ return pktio_entry->s.pkt_nm.mtu;
}
static int netmap_promisc_mode_set(pktio_entry_t *pktio_entry,
@@ -346,19 +741,105 @@ static int netmap_promisc_mode_get(pktio_entry_t *pktio_entry)
pktio_entry->s.name);
}
+static int netmap_capability(pktio_entry_t *pktio_entry,
+ odp_pktio_capability_t *capa)
+{
+ *capa = pktio_entry->s.pkt_nm.capa;
+ return 0;
+}
+
+static int netmap_in_queues(pktio_entry_t *pktio_entry, odp_queue_t queues[],
+ int num)
+{
+ int i;
+ int num_queues = pktio_entry->s.num_in_queue;
+
+ if (queues && num > 0) {
+ for (i = 0; i < num && i < num_queues; i++)
+ queues[i] = pktio_entry->s.in_queue[i].queue;
+ }
+
+ return num_queues;
+}
+
+static int netmap_pktin_queues(pktio_entry_t *pktio_entry,
+ odp_pktin_queue_t queues[], int num)
+{
+ int i;
+ int num_queues = pktio_entry->s.num_in_queue;
+
+ if (queues && num > 0) {
+ for (i = 0; i < num && i < num_queues; i++)
+ queues[i] = pktio_entry->s.in_queue[i].pktin;
+ }
+
+ return num_queues;
+}
+
+static int netmap_pktout_queues(pktio_entry_t *pktio_entry,
+ odp_pktout_queue_t queues[], int num)
+{
+ int i;
+ int num_queues = pktio_entry->s.num_out_queue;
+
+ if (queues && num > 0) {
+ for (i = 0; i < num && i < num_queues; i++)
+ queues[i] = pktio_entry->s.out_queue[i].pktout;
+ }
+
+ return num_queues;
+}
+
+static int netmap_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+ }
+
+ return sock_stats_fd(pktio_entry,
+ stats,
+ pktio_entry->s.pkt_nm.sockfd);
+}
+
+static int netmap_stats_reset(pktio_entry_t *pktio_entry)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(&pktio_entry->s.stats, 0,
+ sizeof(odp_pktio_stats_t));
+ return 0;
+ }
+
+ return sock_stats_reset_fd(pktio_entry,
+ pktio_entry->s.pkt_nm.sockfd);
+}
+
const pktio_if_ops_t netmap_pktio_ops = {
+ .name = "netmap",
.init = NULL,
.term = NULL,
.open = netmap_open,
.close = netmap_close,
- .start = NULL,
- .stop = NULL,
+ .start = netmap_start,
+ .stop = netmap_stop,
+ .link_status = netmap_link_status,
+ .stats = netmap_stats,
+ .stats_reset = netmap_stats_reset,
.recv = netmap_recv,
.send = netmap_send,
.mtu_get = netmap_mtu_get,
.promisc_mode_set = netmap_promisc_mode_set,
.promisc_mode_get = netmap_promisc_mode_get,
- .mac_get = netmap_mac_addr_get
+ .mac_get = netmap_mac_addr_get,
+ .capability = netmap_capability,
+ .input_queues_config = netmap_input_queues_config,
+ .output_queues_config = netmap_output_queues_config,
+ .in_queues = netmap_in_queues,
+ .pktin_queues = netmap_pktin_queues,
+ .pktout_queues = netmap_pktout_queues,
+ .recv_queue = netmap_recv_queue,
+ .send_queue = netmap_send_queue
};
#endif /* ODP_NETMAP */
diff --git a/platform/linux-generic/pktio/pcap.c b/platform/linux-generic/pktio/pcap.c
index 6511132b6..c22cce09a 100644
--- a/platform/linux-generic/pktio/pcap.c
+++ b/platform/linux-generic/pktio/pcap.c
@@ -49,6 +49,8 @@
#define PKTIO_PCAP_MTU (64 * 1024)
static const char pcap_mac[] = {0x02, 0xe9, 0x34, 0x80, 0x73, 0x04};
+static int pcapif_stats_reset(pktio_entry_t *pktio_entry);
+
static int _pcapif_parse_devname(pkt_pcap_t *pcap, const char *devname)
{
char *tok;
@@ -154,6 +156,8 @@ static int pcapif_init(odp_pktio_t id ODP_UNUSED, pktio_entry_t *pktio_entry,
if (ret == 0 && (!pcap->rx && !pcap->tx_dump))
ret = -1;
+ (void)pcapif_stats_reset(pktio_entry);
+
return ret;
}
@@ -249,6 +253,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
}
packet_parse_l2(pkt_hdr);
+ pktio_entry->s.stats.in_octets += pkt_hdr->frame_len;
pkts[i] = pkt;
pkt = ODP_PACKET_INVALID;
@@ -259,6 +264,7 @@ static int pcapif_recv_pkt(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
if (pkt != ODP_PACKET_INVALID)
odp_packet_free(pkt);
+ pktio_entry->s.stats.in_ucast_pkts += i;
return i;
}
@@ -291,7 +297,9 @@ static int pcapif_send_pkt(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
ODP_ASSERT(pktio_entry->s.state == STATE_START);
for (i = 0; i < len; ++i) {
- if (odp_packet_len(pkts[i]) > PKTIO_PCAP_MTU) {
+ int pkt_len = odp_packet_len(pkts[i]);
+
+ if (pkt_len > PKTIO_PCAP_MTU) {
if (i == 0)
return -1;
break;
@@ -300,9 +308,12 @@ static int pcapif_send_pkt(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
if (_pcapif_dump_pkt(pcap, pkts[i]) != 0)
break;
+ pktio_entry->s.stats.out_octets += pkt_len;
odp_packet_free(pkts[i]);
}
+ pktio_entry->s.stats.out_ucast_pkts += i;
+
return i;
}
@@ -367,13 +378,37 @@ static int pcapif_promisc_mode_get(pktio_entry_t *pktio_entry)
return pktio_entry->s.pkt_pcap.promisc;
}
+static int pcapif_stats_reset(pktio_entry_t *pktio_entry)
+{
+ memset(&pktio_entry->s.stats, 0, sizeof(odp_pktio_stats_t));
+ return 0;
+}
+
+static int pcapif_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ memcpy(stats, &pktio_entry->s.stats, sizeof(odp_pktio_stats_t));
+ return 0;
+}
+
const pktio_if_ops_t pcap_pktio_ops = {
+ .name = "pcap",
.open = pcapif_init,
.close = pcapif_close,
+ .stats = pcapif_stats,
+ .stats_reset = pcapif_stats_reset,
.recv = pcapif_recv_pkt,
.send = pcapif_send_pkt,
.mtu_get = pcapif_mtu_get,
.promisc_mode_set = pcapif_promisc_mode_set,
.promisc_mode_get = pcapif_promisc_mode_get,
- .mac_get = pcapif_mac_addr_get
+ .mac_get = pcapif_mac_addr_get,
+ .capability = NULL,
+ .input_queues_config = NULL,
+ .output_queues_config = NULL,
+ .in_queues = NULL,
+ .pktin_queues = NULL,
+ .pktout_queues = NULL,
+ .recv_queue = NULL,
+ .send_queue = NULL
};
diff --git a/platform/linux-generic/pktio/pktio_common.c b/platform/linux-generic/pktio/pktio_common.c
index be9db330f..1adc5f2ea 100644
--- a/platform/linux-generic/pktio/pktio_common.c
+++ b/platform/linux-generic/pktio/pktio_common.c
@@ -51,3 +51,75 @@ int _odp_packet_cls_enq(pktio_entry_t *pktio_entry,
return 0;
}
+
+int sock_stats_reset_fd(pktio_entry_t *pktio_entry, int fd)
+{
+ int err = 0;
+ odp_pktio_stats_t cur_stats;
+
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(&pktio_entry->s.stats, 0,
+ sizeof(odp_pktio_stats_t));
+ return 0;
+ }
+
+ memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
+
+ if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
+ (void)ethtool_stats_get_fd(fd,
+ pktio_entry->s.name,
+ &cur_stats);
+ } else if (pktio_entry->s.stats_type == STATS_SYSFS) {
+ err = sysfs_stats(pktio_entry, &cur_stats);
+ if (err != 0)
+ ODP_ERR("stats error\n");
+ }
+
+ if (err == 0)
+ memcpy(&pktio_entry->s.stats, &cur_stats,
+ sizeof(odp_pktio_stats_t));
+
+ return err;
+}
+
+int sock_stats_fd(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats,
+ int fd)
+{
+ odp_pktio_stats_t cur_stats;
+ int ret = 0;
+
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED)
+ return 0;
+
+ memset(&cur_stats, 0, sizeof(odp_pktio_stats_t));
+ if (pktio_entry->s.stats_type == STATS_ETHTOOL) {
+ (void)ethtool_stats_get_fd(fd,
+ pktio_entry->s.name,
+ &cur_stats);
+ } else if (pktio_entry->s.stats_type == STATS_SYSFS) {
+ sysfs_stats(pktio_entry, &cur_stats);
+ }
+
+ stats->in_octets = cur_stats.in_octets -
+ pktio_entry->s.stats.in_octets;
+ stats->in_ucast_pkts = cur_stats.in_ucast_pkts -
+ pktio_entry->s.stats.in_ucast_pkts;
+ stats->in_discards = cur_stats.in_discards -
+ pktio_entry->s.stats.in_discards;
+ stats->in_errors = cur_stats.in_errors -
+ pktio_entry->s.stats.in_errors;
+ stats->in_unknown_protos = cur_stats.in_unknown_protos -
+ pktio_entry->s.stats.in_unknown_protos;
+
+ stats->out_octets = cur_stats.out_octets -
+ pktio_entry->s.stats.out_octets;
+ stats->out_ucast_pkts = cur_stats.out_ucast_pkts -
+ pktio_entry->s.stats.out_ucast_pkts;
+ stats->out_discards = cur_stats.out_discards -
+ pktio_entry->s.stats.out_discards;
+ stats->out_errors = cur_stats.out_errors -
+ pktio_entry->s.stats.out_errors;
+
+ return ret;
+}
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index 1417fb421..ed338c2c6 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -29,6 +29,8 @@
#include <sys/ioctl.h>
#include <errno.h>
#include <sys/syscall.h>
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
#include <odp.h>
#include <odp_packet_socket.h>
@@ -44,6 +46,8 @@
#include <odp/helper/eth.h>
#include <odp/helper/ip.h>
+static int sock_stats_reset(pktio_entry_t *pktio_entry);
+
/** Provide a sendmmsg wrapper for systems with no libc or kernel support.
* As it is implemented as a weak symbol, it has zero effect on systems
* with both.
@@ -191,6 +195,255 @@ int promisc_mode_get_fd(int fd, const char *name)
return !!(ifr.ifr_flags & IFF_PROMISC);
}
+int link_status_fd(int fd, const char *name)
+{
+ struct ifreq ifr;
+ int ret;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+ ret = ioctl(fd, SIOCGIFFLAGS, &ifr);
+ if (ret < 0) {
+ __odp_errno = errno;
+ ODP_DBG("ioctl(SIOCGIFFLAGS): %s: \"%s\".\n", strerror(errno),
+ ifr.ifr_name);
+ return -1;
+ }
+
+ return !!(ifr.ifr_flags & IFF_RUNNING);
+}
+
+/**
+ * Get enabled hash options of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param flow_type Packet flow type
+ * @param options[out] Enabled hash options
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+static inline int get_rss_hash_options(int fd, const char *name,
+ uint32_t flow_type, uint64_t *options)
+{
+ struct ifreq ifr;
+ struct ethtool_rxnfc rsscmd;
+
+ memset(&rsscmd, 0, sizeof(rsscmd));
+ *options = 0;
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ rsscmd.cmd = ETHTOOL_GRXFH;
+ rsscmd.flow_type = flow_type;
+
+ ifr.ifr_data = (caddr_t)&rsscmd;
+
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
+ return -1;
+
+ *options = rsscmd.data;
+ return 0;
+}
+
+int rss_conf_get_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ int rss_enabled = 0;
+
+ memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
+
+ get_rss_hash_options(fd, name, IPV4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
+ hash_proto->proto.ipv4 = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, TCP_V4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv4_tcp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, UDP_V4_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv4_udp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, IPV6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST)) {
+ hash_proto->proto.ipv6 = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, TCP_V6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv6_tcp = 1;
+ rss_enabled++;
+ }
+ get_rss_hash_options(fd, name, UDP_V6_FLOW, &options);
+ if ((options & RXH_IP_SRC) && (options & RXH_IP_DST) &&
+ (options & RXH_L4_B_0_1) && (options & RXH_L4_B_2_3)) {
+ hash_proto->proto.ipv6_udp = 1;
+ rss_enabled++;
+ }
+ return rss_enabled;
+}
+
+/**
+ * Set hash options of a packet socket
+ *
+ * @param fd Socket file descriptor
+ * @param name Interface name
+ * @param flow_type Packet flow type
+ * @param options Hash options
+ *
+ * @retval 0 on success
+ * @retval <0 on failure
+ */
+static inline int set_rss_hash(int fd, const char *name,
+ uint32_t flow_type, uint64_t options)
+{
+ struct ifreq ifr;
+ struct ethtool_rxnfc rsscmd;
+
+ memset(&rsscmd, 0, sizeof(rsscmd));
+
+ snprintf(ifr.ifr_name, IF_NAMESIZE, "%s", name);
+
+ rsscmd.cmd = ETHTOOL_SRXFH;
+ rsscmd.flow_type = flow_type;
+ rsscmd.data = options;
+
+ ifr.ifr_data = (caddr_t)&rsscmd;
+
+ if (ioctl(fd, SIOCETHTOOL, &ifr) < 0)
+ return -1;
+
+ return 0;
+}
+
+int rss_conf_set_fd(int fd, const char *name,
+ const odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ odp_pktin_hash_proto_t cur_hash;
+
+ /* Compare to currently set hash protocols */
+ rss_conf_get_fd(fd, name, &cur_hash);
+
+ if (hash_proto->proto.ipv4_udp && !cur_hash.proto.ipv4_udp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, UDP_V4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv4_tcp && !cur_hash.proto.ipv4_tcp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, TCP_V4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6_udp && !cur_hash.proto.ipv6_udp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, UDP_V6_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6_tcp && !cur_hash.proto.ipv6_tcp) {
+ options = RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ if (set_rss_hash(fd, name, TCP_V6_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv4 && !cur_hash.proto.ipv4) {
+ options = RXH_IP_SRC | RXH_IP_DST;
+ if (set_rss_hash(fd, name, IPV4_FLOW, options))
+ return -1;
+ }
+ if (hash_proto->proto.ipv6 && !cur_hash.proto.ipv6) {
+ options = RXH_IP_SRC | RXH_IP_DST;
+ if (set_rss_hash(fd, name, IPV6_FLOW, options))
+ return -1;
+ }
+ return 0;
+}
+
+int rss_conf_get_supported_fd(int fd, const char *name,
+ odp_pktin_hash_proto_t *hash_proto)
+{
+ uint64_t options;
+ int rss_supported = 0;
+
+ memset(hash_proto, 0, sizeof(odp_pktin_hash_proto_t));
+
+ if (!get_rss_hash_options(fd, name, IPV4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, IPV4_FLOW, options)) {
+ hash_proto->proto.ipv4 = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, TCP_V4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, TCP_V4_FLOW, options)) {
+ hash_proto->proto.ipv4_tcp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, UDP_V4_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, UDP_V4_FLOW, options)) {
+ hash_proto->proto.ipv4_udp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, IPV6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, IPV6_FLOW, options)) {
+ hash_proto->proto.ipv6 = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, TCP_V6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, TCP_V6_FLOW, options)) {
+ hash_proto->proto.ipv6_tcp = 1;
+ rss_supported++;
+ }
+ }
+ if (!get_rss_hash_options(fd, name, UDP_V6_FLOW, &options)) {
+ if (!set_rss_hash(fd, name, UDP_V6_FLOW, options)) {
+ hash_proto->proto.ipv6_udp = 1;
+ rss_supported++;
+ }
+ }
+ return rss_supported;
+}
+
+void rss_conf_print(const odp_pktin_hash_proto_t *hash_proto)
+{ int max_len = 512;
+ char str[max_len];
+ int len = 0;
+ int n = max_len - 1;
+
+ len += snprintf(&str[len], n - len, "RSS conf\n");
+
+ if (hash_proto->proto.ipv4)
+ len += snprintf(&str[len], n - len,
+ " IPV4\n");
+ if (hash_proto->proto.ipv4_tcp)
+ len += snprintf(&str[len], n - len,
+ " IPV4 TCP\n");
+ if (hash_proto->proto.ipv4_udp)
+ len += snprintf(&str[len], n - len,
+ " IPV4 UDP\n");
+ if (hash_proto->proto.ipv6)
+ len += snprintf(&str[len], n - len,
+ " IPV6\n");
+ if (hash_proto->proto.ipv6_tcp)
+ len += snprintf(&str[len], n - len,
+ " IPV6 TCP\n");
+ if (hash_proto->proto.ipv6_udp)
+ len += snprintf(&str[len], n - len,
+ " IPV6 UDP\n");
+ str[len] = '\0';
+
+ ODP_PRINT("\n%s\n", str);
+}
+
/*
* ODP_PACKET_SOCKET_MMSG:
*/
@@ -223,6 +476,7 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
char shm_name[ODP_SHM_NAME_LEN];
pkt_sock_t *pkt_sock = &pktio_entry->s.pkt_sock;
uint8_t *addr;
+ odp_pktio_stats_t cur_stats;
/* Init pktio entry */
memset(pkt_sock, 0, sizeof(*pkt_sock));
@@ -281,6 +535,27 @@ static int sock_setup_pkt(pktio_entry_t *pktio_entry, const char *netdev,
ODP_ERR("bind(to IF): %s\n", strerror(errno));
goto error;
}
+
+ err = ethtool_stats_get_fd(pktio_entry->s.pkt_sock.sockfd,
+ pktio_entry->s.name,
+ &cur_stats);
+ if (err != 0) {
+ err = sysfs_stats(pktio_entry, &cur_stats);
+ if (err != 0) {
+ pktio_entry->s.stats_type = STATS_UNSUPPORTED;
+ ODP_DBG("pktio: %s unsupported stats\n",
+ pktio_entry->s.name);
+ } else {
+ pktio_entry->s.stats_type = STATS_SYSFS;
+ }
+ } else {
+ pktio_entry->s.stats_type = STATS_ETHTOOL;
+ }
+
+ err = sock_stats_reset(pktio_entry);
+ if (err != 0)
+ goto error;
+
return 0;
error:
@@ -531,17 +806,60 @@ static int sock_promisc_mode_get(pktio_entry_t *pktio_entry)
pktio_entry->s.name);
}
+static int sock_link_status(pktio_entry_t *pktio_entry)
+{
+ return link_status_fd(pktio_entry->s.pkt_sock.sockfd,
+ pktio_entry->s.name);
+}
+
+static int sock_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+ }
+
+ return sock_stats_fd(pktio_entry,
+ stats,
+ pktio_entry->s.pkt_sock.sockfd);
+}
+
+static int sock_stats_reset(pktio_entry_t *pktio_entry)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(&pktio_entry->s.stats, 0,
+ sizeof(odp_pktio_stats_t));
+ return 0;
+ }
+
+ return sock_stats_reset_fd(pktio_entry,
+ pktio_entry->s.pkt_sock.sockfd);
+}
+
const pktio_if_ops_t sock_mmsg_pktio_ops = {
+ .name = "socket",
.init = NULL,
.term = NULL,
.open = sock_mmsg_open,
.close = sock_close,
.start = NULL,
.stop = NULL,
+ .stats = sock_stats,
+ .stats_reset = sock_stats_reset,
.recv = sock_mmsg_recv,
.send = sock_mmsg_send,
.mtu_get = sock_mtu_get,
.promisc_mode_set = sock_promisc_mode_set,
.promisc_mode_get = sock_promisc_mode_get,
- .mac_get = sock_mac_addr_get
+ .mac_get = sock_mac_addr_get,
+ .link_status = sock_link_status,
+ .capability = NULL,
+ .input_queues_config = NULL,
+ .output_queues_config = NULL,
+ .in_queues = NULL,
+ .pktin_queues = NULL,
+ .pktout_queues = NULL,
+ .recv_queue = NULL,
+ .send_queue = NULL
};
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 1d2eb3c9c..07fbee63e 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -444,6 +444,7 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
{
int if_idx;
int ret = 0;
+ odp_pktio_stats_t cur_stats;
if (getenv("ODP_PKTIO_DISABLE_SOCKET_MMAP"))
return -1;
@@ -503,6 +504,27 @@ static int sock_mmap_open(odp_pktio_t id ODP_UNUSED,
goto error;
}
+ ret = ethtool_stats_get_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
+ pktio_entry->s.name,
+ &cur_stats);
+ if (ret != 0) {
+ ret = sysfs_stats(pktio_entry, &cur_stats);
+ if (ret != 0) {
+ pktio_entry->s.stats_type = STATS_UNSUPPORTED;
+ ODP_DBG("pktio: %s unsupported stats\n",
+ pktio_entry->s.name);
+ } else {
+ pktio_entry->s.stats_type = STATS_SYSFS;
+ }
+ } else {
+ pktio_entry->s.stats_type = STATS_ETHTOOL;
+ }
+
+ ret = sock_stats_reset_fd(pktio_entry,
+ pktio_entry->s.pkt_sock_mmap.sockfd);
+ if (ret != 0)
+ goto error;
+
return 0;
error:
@@ -553,17 +575,60 @@ static int sock_mmap_promisc_mode_get(pktio_entry_t *pktio_entry)
pktio_entry->s.name);
}
+static int sock_mmap_link_status(pktio_entry_t *pktio_entry)
+{
+ return link_status_fd(pktio_entry->s.pkt_sock_mmap.sockfd,
+ pktio_entry->s.name);
+}
+
+static int sock_mmap_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(stats, 0, sizeof(*stats));
+ return 0;
+ }
+
+ return sock_stats_fd(pktio_entry,
+ stats,
+ pktio_entry->s.pkt_sock_mmap.sockfd);
+}
+
+static int sock_mmap_stats_reset(pktio_entry_t *pktio_entry)
+{
+ if (pktio_entry->s.stats_type == STATS_UNSUPPORTED) {
+ memset(&pktio_entry->s.stats, 0,
+ sizeof(odp_pktio_stats_t));
+ return 0;
+ }
+
+ return sock_stats_reset_fd(pktio_entry,
+ pktio_entry->s.pkt_sock_mmap.sockfd);
+}
+
const pktio_if_ops_t sock_mmap_pktio_ops = {
+ .name = "socket_mmap",
.init = NULL,
.term = NULL,
.open = sock_mmap_open,
.close = sock_mmap_close,
.start = NULL,
.stop = NULL,
+ .stats = sock_mmap_stats,
+ .stats_reset = sock_mmap_stats_reset,
.recv = sock_mmap_recv,
.send = sock_mmap_send,
.mtu_get = sock_mmap_mtu_get,
.promisc_mode_set = sock_mmap_promisc_mode_set,
.promisc_mode_get = sock_mmap_promisc_mode_get,
- .mac_get = sock_mmap_mac_addr_get
+ .mac_get = sock_mmap_mac_addr_get,
+ .link_status = sock_mmap_link_status,
+ .capability = NULL,
+ .input_queues_config = NULL,
+ .output_queues_config = NULL,
+ .in_queues = NULL,
+ .pktin_queues = NULL,
+ .pktout_queues = NULL,
+ .recv_queue = NULL,
+ .send_queue = NULL
};
diff --git a/platform/linux-generic/pktio/sysfs.c b/platform/linux-generic/pktio/sysfs.c
new file mode 100644
index 000000000..4e5c02837
--- /dev/null
+++ b/platform/linux-generic/pktio/sysfs.c
@@ -0,0 +1,76 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp.h>
+#include <odp_packet_io_internal.h>
+#include <errno.h>
+#include <string.h>
+
+static int sysfs_get_val(const char *fname, uint64_t *val)
+{
+ FILE *file;
+ char str[128];
+ int ret = -1;
+
+ file = fopen(fname, "rt");
+ if (file == NULL) {
+ __odp_errno = errno;
+ /* do not print debug err if sysfs is not supported by
+ * kernel driver.
+ */
+ if (errno != ENOENT)
+ ODP_ERR("fopen %s: %s\n", fname, strerror(errno));
+ return 0;
+ }
+
+ if (fgets(str, sizeof(str), file) != NULL)
+ ret = sscanf(str, "%" SCNx64, val);
+
+ (void)fclose(file);
+
+ if (ret != 1) {
+ ODP_ERR("read %s\n", fname);
+ return -1;
+ }
+
+ return 0;
+}
+
+int sysfs_stats(pktio_entry_t *pktio_entry,
+ odp_pktio_stats_t *stats)
+{
+ char fname[256];
+ const char *dev = pktio_entry->s.name;
+ int ret = 0;
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_bytes", dev);
+ ret -= sysfs_get_val(fname, &stats->in_octets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->in_ucast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_droppped", dev);
+ ret -= sysfs_get_val(fname, &stats->in_discards);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/rx_errors", dev);
+ ret -= sysfs_get_val(fname, &stats->in_errors);
+
+ /* stats->in_unknown_protos is not supported in sysfs */
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_bytes", dev);
+ ret -= sysfs_get_val(fname, &stats->out_octets);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_packets", dev);
+ ret -= sysfs_get_val(fname, &stats->out_ucast_pkts);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_dropped", dev);
+ ret -= sysfs_get_val(fname, &stats->out_discards);
+
+ sprintf(fname, "/sys/class/net/%s/statistics/tx_errors", dev);
+ ret -= sysfs_get_val(fname, &stats->out_errors);
+
+ return ret;
+}
diff --git a/platform/linux-generic/test/Makefile.am b/platform/linux-generic/test/Makefile.am
index e62987297..db923b8c7 100644
--- a/platform/linux-generic/test/Makefile.am
+++ b/platform/linux-generic/test/Makefile.am
@@ -6,6 +6,8 @@ ODP_MODULES = pktio
if test_vald
TESTS = pktio/pktio_run \
pktio/pktio_run_tap \
+ ${top_builddir}/test/validation/atomic/atomic_main$(EXEEXT) \
+ ${top_builddir}/test/validation/barrier/barrier_main$(EXEEXT) \
${top_builddir}/test/validation/buffer/buffer_main$(EXEEXT) \
${top_builddir}/test/validation/classification/classification_main$(EXEEXT) \
${top_builddir}/test/validation/config/config_main$(EXEEXT) \
@@ -16,13 +18,13 @@ TESTS = pktio/pktio_run \
${top_builddir}/test/validation/init/init_main_ok$(EXEEXT) \
${top_builddir}/test/validation/init/init_main_abort$(EXEEXT) \
${top_builddir}/test/validation/init/init_main_log$(EXEEXT) \
+ ${top_builddir}/test/validation/lock/lock_main$(EXEEXT) \
${top_builddir}/test/validation/packet/packet_main$(EXEEXT) \
${top_builddir}/test/validation/pool/pool_main$(EXEEXT) \
${top_builddir}/test/validation/queue/queue_main$(EXEEXT) \
${top_builddir}/test/validation/random/random_main$(EXEEXT) \
${top_builddir}/test/validation/scheduler/scheduler_main$(EXEEXT) \
${top_builddir}/test/validation/std_clib/std_clib_main$(EXEEXT) \
- ${top_builddir}/test/validation/synchronizers/synchronizers_main$(EXEEXT) \
${top_builddir}/test/validation/thread/thread_main$(EXEEXT) \
${top_builddir}/test/validation/time/time_main$(EXEEXT) \
${top_builddir}/test/validation/timer/timer_main$(EXEEXT) \
diff --git a/test/api_test/odp_common.c b/test/api_test/odp_common.c
index cebaa1271..70aee96f8 100644
--- a/test/api_test/odp_common.c
+++ b/test/api_test/odp_common.c
@@ -41,8 +41,8 @@ void odp_print_system_info(void)
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %"PRIu64"\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("CPU count: %i\n", odp_cpu_count());
printf("CPU mask: %s\n", str);
diff --git a/test/performance/odp_atomic.c b/test/performance/odp_atomic.c
index 054f653b7..067329bdc 100644
--- a/test/performance/odp_atomic.c
+++ b/test/performance/odp_atomic.c
@@ -337,8 +337,8 @@ void odp_print_system_info(void)
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %"PRIu64"\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %"PRIu64"\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("CPU count: %i\n", odp_cpu_count());
printf("CPU mask: %s\n", str);
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index 31f6a7088..4de93fdd1 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -47,12 +47,19 @@
*/
#define MAX_PKT_BURST 32
+/** Maximum number of pktio queues per interface */
+#define MAX_QUEUES 32
+
+/** Maximum number of pktio interfaces */
+#define MAX_PKTIOS 8
+
/**
* Packet input mode
*/
typedef enum pkt_in_mode_t {
DIRECT_RECV,
- SCHED_NONE,
+ PLAIN_QUEUE,
+ SCHED_PARALLEL,
SCHED_ATOMIC,
SCHED_ORDERED,
} pkt_in_mode_t;
@@ -66,6 +73,7 @@ typedef enum pkt_in_mode_t {
typedef struct {
int cpu_count;
int if_count; /**< Number of interfaces to be used */
+ int num_workers; /**< Number of worker threads */
char **if_names; /**< Array of pointers to interface names */
pkt_in_mode_t mode; /**< Packet input mode */
int time; /**< Time in seconds to run. */
@@ -97,8 +105,22 @@ typedef union {
/**
* Thread specific arguments
*/
-typedef struct {
- int src_idx; /**< Source interface identifier */
+typedef struct thread_args_t {
+ int thr_idx;
+ int num_pktio;
+
+ struct {
+ odp_pktio_t rx_pktio;
+ odp_pktio_t tx_pktio;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ odp_queue_t rx_queue;
+ int rx_idx;
+ int tx_idx;
+ int rx_queue_idx;
+ int tx_queue_idx;
+ } pktio[MAX_PKTIOS];
+
stats_t *stats; /**< Pointer to per thread stats */
} thread_args_t;
@@ -112,14 +134,25 @@ typedef struct {
appl_args_t appl;
/** Thread specific arguments */
thread_args_t thread[MAX_WORKERS];
- /** Table of pktio handles */
- odp_pktio_t pktios[ODP_CONFIG_PKTIO_ENTRIES];
/** Table of port ethernet addresses */
- odph_ethaddr_t port_eth_addr[ODP_CONFIG_PKTIO_ENTRIES];
+ odph_ethaddr_t port_eth_addr[MAX_PKTIOS];
/** Table of dst ethernet addresses */
- odph_ethaddr_t dst_eth_addr[ODP_CONFIG_PKTIO_ENTRIES];
+ odph_ethaddr_t dst_eth_addr[MAX_PKTIOS];
/** Table of dst ports */
- int dst_port[ODP_CONFIG_PKTIO_ENTRIES];
+ int dst_port[MAX_PKTIOS];
+ /** Table of pktio handles */
+ struct {
+ odp_pktio_t pktio;
+ odp_pktin_queue_t pktin[MAX_QUEUES];
+ odp_pktout_queue_t pktout[MAX_QUEUES];
+ odp_queue_t rx_q[MAX_QUEUES];
+ int num_rx_thr;
+ int num_tx_thr;
+ int num_rx_queue;
+ int num_tx_queue;
+ int next_rx_queue;
+ int next_tx_queue;
+ } pktios[MAX_PKTIOS];
} args_t;
/** Global pointer to args */
@@ -127,22 +160,97 @@ static args_t *gbl_args;
/** Global barrier to synchronize main and workers */
static odp_barrier_t barrier;
-/* helper funcs */
-static inline int lookup_dest_port(odp_packet_t pkt);
-static inline int find_dest_port(int port);
-static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num);
-static void fill_eth_addrs(odp_packet_t pkt_tbl[], unsigned num,
- int dst_port);
-static void parse_args(int argc, char *argv[], appl_args_t *appl_args);
-static void print_info(char *progname, appl_args_t *appl_args);
-static void usage(char *progname);
+/**
+ * Lookup the destination port for a given packet
+ *
+ * @param pkt ODP packet handle
+ */
+static inline int lookup_dest_port(odp_packet_t pkt)
+{
+ int i, src_idx;
+ odp_pktio_t pktio_src;
+
+ pktio_src = odp_packet_input(pkt);
+
+ for (src_idx = -1, i = 0; gbl_args->pktios[i].pktio
+ != ODP_PKTIO_INVALID; ++i)
+ if (gbl_args->pktios[i].pktio == pktio_src)
+ src_idx = i;
+
+ if (src_idx == -1)
+ LOG_ABORT("Failed to determine pktio input\n");
+
+ return gbl_args->dst_port[src_idx];
+}
+
+/**
+ * Drop packets which input parsing marked as containing errors.
+ *
+ * Frees packets with error and modifies pkt_tbl[] to only contain packets with
+ * no detected errors.
+ *
+ * @param pkt_tbl Array of packets
+ * @param num Number of packets in pkt_tbl[]
+ *
+ * @return Number of packets dropped
+ */
+static inline int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
+{
+ odp_packet_t pkt;
+ unsigned dropped = 0;
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ odp_packet_free(pkt); /* Drop */
+ dropped++;
+ } else if (odp_unlikely(i != j++)) {
+ pkt_tbl[j - 1] = pkt;
+ }
+ }
+
+ return dropped;
+}
+
+/**
+ * Fill packets' eth addresses according to the destination port
+ *
+ * @param pkt_tbl Array of packets
+ * @param num Number of packets in the array
+ * @param dst_port Destination port
+ */
+static inline void fill_eth_addrs(odp_packet_t pkt_tbl[],
+ unsigned num, int dst_port)
+{
+ odp_packet_t pkt;
+ odph_ethhdr_t *eth;
+ unsigned i;
+
+ if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
+ return;
+
+ for (i = 0; i < num; ++i) {
+ pkt = pkt_tbl[i];
+ if (odp_packet_has_eth(pkt)) {
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+
+ if (gbl_args->appl.src_change)
+ eth->src = gbl_args->port_eth_addr[dst_port];
+
+ if (gbl_args->appl.dst_change)
+ eth->dst = gbl_args->dst_eth_addr[dst_port];
+ }
+ }
+}
/**
- * Packet IO worker thread using ODP queues
+ * Packet IO worker thread using scheduled queues
*
* @param arg thread arguments of type 'thread_args_t *'
*/
-static void *pktio_queue_thread(void *arg)
+static void *run_worker_sched_mode(void *arg)
{
odp_event_t ev_tbl[MAX_PKT_BURST];
odp_packet_t pkt_tbl[MAX_PKT_BURST];
@@ -150,20 +258,34 @@ static void *pktio_queue_thread(void *arg)
int thr;
uint64_t wait;
int dst_idx;
- odp_pktio_t pktio_dst;
+ int thr_idx;
+ int i;
+ odp_pktout_queue_t pktout[MAX_PKTIOS];
thread_args_t *thr_args = arg;
stats_t *stats = thr_args->stats;
thr = odp_thread_id();
+ thr_idx = thr_args->thr_idx;
+
+ memset(pktout, 0, sizeof(pktout));
+ for (i = 0; i < gbl_args->appl.if_count; i++) {
+ if (gbl_args->pktios[i].num_tx_queue ==
+ gbl_args->appl.num_workers)
+ pktout[i] = gbl_args->pktios[i].pktout[thr_idx];
+ else if (gbl_args->pktios[i].num_tx_queue == 1)
+ pktout[i] = gbl_args->pktios[i].pktout[0];
+ else
+ LOG_ABORT("Bad number of output queues %i\n", i);
+ }
- printf("[%02i] QUEUE mode\n", thr);
+ printf("[%02i] SCHEDULED QUEUE mode\n", thr);
odp_barrier_wait(&barrier);
wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS * 100);
/* Loop packets */
while (!exit_threads) {
- int sent, i;
+ int sent;
unsigned tx_drops;
pkts = odp_schedule_multi(NULL, wait, ev_tbl, MAX_PKT_BURST);
@@ -192,9 +314,7 @@ static void *pktio_queue_thread(void *arg)
/* packets from the same queue are from the same interface */
dst_idx = lookup_dest_port(pkt_tbl[0]);
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- pktio_dst = gbl_args->pktios[dst_idx];
-
- sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);
+ sent = odp_pktio_send_queue(pktout[dst_idx], pkt_tbl, pkts);
sent = odp_unlikely(sent < 0) ? 0 : sent;
tx_drops = pkts - sent;
@@ -217,43 +337,94 @@ static void *pktio_queue_thread(void *arg)
}
/**
- * Lookup the destination port for a given packet
+ * Packet IO worker thread using plain queues
*
- * @param pkt ODP packet handle
+ * @param arg thread arguments of type 'thread_args_t *'
*/
-static inline int lookup_dest_port(odp_packet_t pkt)
+static void *run_worker_plain_queue_mode(void *arg)
{
- int i, src_idx;
- odp_pktio_t pktio_src;
+ int thr;
+ int pkts;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int dst_idx, num_pktio;
+ odp_queue_t queue;
+ odp_pktout_queue_t pktout;
+ int pktio = 0;
+ thread_args_t *thr_args = arg;
+ stats_t *stats = thr_args->stats;
- pktio_src = odp_packet_input(pkt);
+ thr = odp_thread_id();
- for (src_idx = -1, i = 0; gbl_args->pktios[i] != ODP_PKTIO_INVALID; ++i)
- if (gbl_args->pktios[i] == pktio_src)
- src_idx = i;
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
- if (src_idx == -1)
- LOG_ABORT("Failed to determine pktio input\n");
+ printf("[%02i] num pktios %i, PLAIN QUEUE mode\n", thr, num_pktio);
+ odp_barrier_wait(&barrier);
- return gbl_args->dst_port[src_idx];
-}
+ /* Loop packets */
+ while (!exit_threads) {
+ int sent;
+ unsigned tx_drops;
+ odp_event_t event[MAX_PKT_BURST];
+ int i;
-/**
- * Find the destination port for a given input port
- *
- * @param port Input port index
- */
-static inline int find_dest_port(int port)
-{
- /* Even number of ports */
- if (gbl_args->appl.if_count % 2 == 0)
- return (port % 2 == 0) ? port + 1 : port - 1;
+ pkts = odp_queue_deq_multi(queue, event, MAX_PKT_BURST);
+ if (odp_unlikely(pkts <= 0))
+ continue;
- /* Odd number of ports */
- if (port == gbl_args->appl.if_count - 1)
- return 0;
- else
- return port + 1;
+ for (i = 0; i < pkts; i++)
+ pkt_tbl[i] = odp_packet_from_event(event[i]);
+
+ if (gbl_args->appl.error_check) {
+ int rx_drops;
+
+ /* Drop packets with errors */
+ rx_drops = drop_err_pkts(pkt_tbl, pkts);
+
+ if (odp_unlikely(rx_drops)) {
+ stats->s.rx_drops += rx_drops;
+ if (pkts == rx_drops)
+ continue;
+
+ pkts -= rx_drops;
+ }
+ }
+
+ fill_eth_addrs(pkt_tbl, pkts, dst_idx);
+
+ sent = odp_pktio_send_queue(pktout, pkt_tbl, pkts);
+
+ sent = odp_unlikely(sent < 0) ? 0 : sent;
+ tx_drops = pkts - sent;
+
+ if (odp_unlikely(tx_drops)) {
+ int i;
+
+ stats->s.tx_drops += tx_drops;
+
+ /* Drop rejected packets */
+ for (i = sent; i < pkts; i++)
+ odp_packet_free(pkt_tbl[i]);
+ }
+
+ stats->s.packets += pkts;
+
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ queue = thr_args->pktio[pktio].rx_queue;
+ pktout = thr_args->pktio[pktio].pktout;
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+ }
+
+ /* Make sure that latest stat writes are visible to other threads */
+ odp_mb_full();
+
+ return NULL;
}
/**
@@ -261,37 +432,34 @@ static inline int find_dest_port(int port)
*
* @param arg thread arguments of type 'thread_args_t *'
*/
-static void *pktio_direct_recv_thread(void *arg)
+static void *run_worker_direct_mode(void *arg)
{
int thr;
int pkts;
odp_packet_t pkt_tbl[MAX_PKT_BURST];
- int src_idx, dst_idx;
- odp_pktio_t pktio_src, pktio_dst;
+ int dst_idx, num_pktio;
+ odp_pktin_queue_t pktin;
+ odp_pktout_queue_t pktout;
+ int pktio = 0;
thread_args_t *thr_args = arg;
stats_t *stats = thr_args->stats;
thr = odp_thread_id();
- src_idx = thr_args->src_idx;
- dst_idx = gbl_args->dst_port[src_idx];
- pktio_src = gbl_args->pktios[src_idx];
- pktio_dst = gbl_args->pktios[dst_idx];
-
- printf("[%02i] srcif:%s dstif:%s spktio:%02" PRIu64
- " dpktio:%02" PRIu64 " DIRECT RECV mode\n",
- thr,
- gbl_args->appl.if_names[src_idx],
- gbl_args->appl.if_names[dst_idx],
- odp_pktio_to_u64(pktio_src), odp_pktio_to_u64(pktio_dst));
+ num_pktio = thr_args->num_pktio;
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+
+ printf("[%02i] num pktios %i, DIRECT RECV mode\n", thr, num_pktio);
odp_barrier_wait(&barrier);
/* Loop packets */
while (!exit_threads) {
- int sent, i;
+ int sent;
unsigned tx_drops;
- pkts = odp_pktio_recv(pktio_src, pkt_tbl, MAX_PKT_BURST);
+ pkts = odp_pktio_recv_queue(pktin, pkt_tbl, MAX_PKT_BURST);
if (odp_unlikely(pkts <= 0))
continue;
@@ -312,12 +480,14 @@ static void *pktio_direct_recv_thread(void *arg)
fill_eth_addrs(pkt_tbl, pkts, dst_idx);
- sent = odp_pktio_send(pktio_dst, pkt_tbl, pkts);
+ sent = odp_pktio_send_queue(pktout, pkt_tbl, pkts);
sent = odp_unlikely(sent < 0) ? 0 : sent;
tx_drops = pkts - sent;
if (odp_unlikely(tx_drops)) {
+ int i;
+
stats->s.tx_drops += tx_drops;
/* Drop rejected packets */
@@ -326,6 +496,16 @@ static void *pktio_direct_recv_thread(void *arg)
}
stats->s.packets += pkts;
+
+ if (num_pktio > 1) {
+ dst_idx = thr_args->pktio[pktio].tx_idx;
+ pktin = thr_args->pktio[pktio].pktin;
+ pktout = thr_args->pktio[pktio].pktout;
+ pktio++;
+ if (pktio == num_pktio)
+ pktio = 0;
+ }
+
}
/* Make sure that latest stat writes are visible to other threads */
@@ -337,70 +517,171 @@ static void *pktio_direct_recv_thread(void *arg)
/**
* Create a pktio handle, optionally associating a default input queue.
*
- * @param dev Name of device to open
- * @param pool Pool to associate with device for packet RX/TX
+ * @param dev Name of device to open
+ * @param index Pktio index
+ * @param pool Pool to associate with device for packet RX/TX
*
- * @return The handle of the created pktio object.
- * @retval ODP_PKTIO_INVALID if the create fails.
+ * @retval 0 on success
+ * @retval -1 on failure
*/
-static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
+static int create_pktio(const char *dev, int idx, int num_rx, int num_tx,
+ odp_pool_t pool)
{
- char inq_name[ODP_QUEUE_NAME_LEN];
- odp_queue_param_t qparam;
- odp_queue_t inq_def;
odp_pktio_t pktio;
- int ret;
odp_pktio_param_t pktio_param;
odp_schedule_sync_t sync_mode;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktio_op_mode_t mode_rx = ODP_PKTIO_OP_MT_UNSAFE;
+ odp_pktio_op_mode_t mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
odp_pktio_param_init(&pktio_param);
- if (gbl_args->appl.mode == DIRECT_RECV)
- pktio_param.in_mode = ODP_PKTIN_MODE_RECV;
- else
+ if (gbl_args->appl.mode == DIRECT_RECV) {
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ } else if (gbl_args->appl.mode == PLAIN_QUEUE) {
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ } else {
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+ pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
+ }
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
LOG_ERR("Error: failed to open %s\n", dev);
- return ODP_PKTIO_INVALID;
+ return -1;
}
printf("created pktio %" PRIu64 " (%s)\n",
odp_pktio_to_u64(pktio), dev);
- /* no further setup needed for direct receive mode */
- if (gbl_args->appl.mode == DIRECT_RECV)
- return pktio;
+ if (odp_pktio_capability(pktio, &capa)) {
+ LOG_ERR("Error: capability query failed %s\n", dev);
+ return -1;
+ }
+
+ if (num_rx > (int)capa.max_input_queues) {
+ printf("Sharing %i input queues between %i workers\n",
+ capa.max_input_queues, num_rx);
+ num_rx = capa.max_input_queues;
+ mode_rx = ODP_PKTIO_OP_MT;
+ }
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ odp_pktout_queue_param_init(&out_queue_param);
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE) {
+
+ if (num_tx > (int)capa.max_output_queues) {
+ printf("Sharing %i output queues between %i workers\n",
+ capa.max_output_queues, num_tx);
+ num_tx = capa.max_output_queues;
+ mode_tx = ODP_PKTIO_OP_MT;
+ }
+
+ in_queue_param.op_mode = mode_rx;
+ in_queue_param.hash_enable = 1;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+ in_queue_param.num_queues = num_rx;
+
+ if (odp_pktin_queue_config(pktio, &in_queue_param)) {
+ LOG_ERR("Error: input queue config failed %s\n", dev);
+ return -1;
+ }
+
+ out_queue_param.op_mode = mode_tx;
+ out_queue_param.num_queues = num_tx;
+
+ if (odp_pktout_queue_config(pktio, &out_queue_param)) {
+ LOG_ERR("Error: output queue config failed %s\n", dev);
+ return -1;
+ }
+
+ if (gbl_args->appl.mode == DIRECT_RECV) {
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin,
+ num_rx) != num_rx) {
+ LOG_ERR("Error: pktin queue query failed %s\n",
+ dev);
+ return -1;
+ }
+ } else { /* PLAIN QUEUE */
+ if (odp_pktin_event_queue(pktio,
+ gbl_args->pktios[idx].rx_q,
+ num_rx) != num_rx) {
+ LOG_ERR("Error: input queue query failed %s\n",
+ dev);
+ return -1;
+ }
+ }
+
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout,
+ num_tx) != num_tx) {
+ LOG_ERR("Error: pktout queue query failed %s\n", dev);
+ return -1;
+ }
+
+ printf("created %i input and %i output queues on (%s)\n",
+ num_rx, num_tx, dev);
+
+ gbl_args->pktios[idx].num_rx_queue = num_rx;
+ gbl_args->pktios[idx].num_tx_queue = num_tx;
+ gbl_args->pktios[idx].pktio = pktio;
+
+ return 0;
+ }
+
+ if (num_tx > (int)capa.max_output_queues) {
+ printf("Sharing 1 output queue between %i workers\n",
+ num_tx);
+ num_tx = 1;
+ mode_tx = ODP_PKTIO_OP_MT;
+ }
if (gbl_args->appl.mode == SCHED_ATOMIC)
sync_mode = ODP_SCHED_SYNC_ATOMIC;
else if (gbl_args->appl.mode == SCHED_ORDERED)
sync_mode = ODP_SCHED_SYNC_ORDERED;
else
- sync_mode = ODP_SCHED_SYNC_NONE;
-
- odp_queue_param_init(&qparam);
- qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = sync_mode;
- qparam.sched.group = ODP_SCHED_GROUP_ALL;
- snprintf(inq_name, sizeof(inq_name), "%" PRIu64 "-pktio_inq_def",
- odp_pktio_to_u64(pktio));
- inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
-
- inq_def = odp_queue_create(inq_name, ODP_QUEUE_TYPE_PKTIN, &qparam);
- if (inq_def == ODP_QUEUE_INVALID) {
- LOG_ERR("Error: pktio queue creation failed\n");
- return ODP_PKTIO_INVALID;
+ sync_mode = ODP_SCHED_SYNC_PARALLEL;
+
+ in_queue_param.hash_enable = 1;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+ in_queue_param.num_queues = num_rx;
+ in_queue_param.queue_param.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ in_queue_param.queue_param.sched.sync = sync_mode;
+ in_queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+
+ if (odp_pktin_queue_config(pktio, &in_queue_param)) {
+ LOG_ERR("Error: input queue config failed %s\n", dev);
+ return -1;
}
- ret = odp_pktio_inq_setdef(pktio, inq_def);
- if (ret != 0) {
- LOG_ERR("Error: default input-Q setup\n");
- return ODP_PKTIO_INVALID;
+ out_queue_param.op_mode = mode_tx;
+ out_queue_param.num_queues = num_tx;
+
+ if (odp_pktout_queue_config(pktio, &out_queue_param)) {
+ LOG_ERR("Error: output queue config failed %s\n", dev);
+ return -1;
}
- return pktio;
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx)
+ != num_tx) {
+ LOG_ERR("Error: pktout queue query failed %s\n", dev);
+ return -1;
+ }
+
+ printf("created %i input and %i output queues on (%s)\n",
+ num_rx, num_tx, dev);
+
+ gbl_args->pktios[idx].num_rx_queue = num_rx;
+ gbl_args->pktios[idx].num_tx_queue = num_tx;
+ gbl_args->pktios[idx].pktio = pktio;
+
+ return 0;
}
/**
@@ -466,232 +747,231 @@ static int print_speed_stats(int num_workers, stats_t *thr_stats,
return pkts > 100 ? 0 : -1;
}
-/**
- * ODP L2 forwarding main function
- */
-int main(int argc, char *argv[])
+static void print_port_mapping(void)
{
- odph_linux_pthread_t thread_tbl[MAX_WORKERS];
- odp_pool_t pool;
- int i;
- int cpu;
- int num_workers;
- odp_shm_t shm;
- odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
- odph_ethaddr_t new_addr;
- odp_pktio_t pktio;
- odp_pool_param_t params;
- int ret;
- stats_t *stats;
-
- /* Init ODP before calling anything else */
- if (odp_init_global(NULL, NULL)) {
- LOG_ERR("Error: ODP global init failed.\n");
- exit(EXIT_FAILURE);
- }
-
- /* Init this thread */
- if (odp_init_local(ODP_THREAD_CONTROL)) {
- LOG_ERR("Error: ODP local init failed.\n");
- exit(EXIT_FAILURE);
- }
+ int if_count, num_workers;
+ int thr, pktio;
- /* Reserve memory for args from shared mem */
- shm = odp_shm_reserve("shm_args", sizeof(args_t),
- ODP_CACHE_LINE_SIZE, 0);
- gbl_args = odp_shm_addr(shm);
+ if_count = gbl_args->appl.if_count;
+ num_workers = gbl_args->appl.num_workers;
- if (gbl_args == NULL) {
- LOG_ERR("Error: shared mem alloc failed.\n");
- exit(EXIT_FAILURE);
- }
- memset(gbl_args, 0, sizeof(*gbl_args));
+ printf("\nWorker mapping table (port[queue])\n--------------------\n");
- /* Parse and store the application arguments */
- parse_args(argc, argv, &gbl_args->appl);
+ for (thr = 0; thr < num_workers; thr++) {
+ int rx_idx, tx_idx;
+ int rx_queue_idx, tx_queue_idx;
+ thread_args_t *thr_args = &gbl_args->thread[thr];
+ int num = thr_args->num_pktio;
- /* Print both system and application information */
- print_info(NO_PATH(argv[0]), &gbl_args->appl);
+ printf("Worker %i\n", thr);
- /* Default to system CPU count unless user specified */
- num_workers = MAX_WORKERS;
- if (gbl_args->appl.cpu_count)
- num_workers = gbl_args->appl.cpu_count;
-
- /* Get default worker cpumask */
- num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
-
- printf("num worker threads: %i\n", num_workers);
- printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("cpu mask: %s\n", cpumaskstr);
-
- if (num_workers < gbl_args->appl.if_count) {
- LOG_ERR("Error: CPU count %d less than interface count\n",
- num_workers);
- exit(EXIT_FAILURE);
- }
-
- /* Create packet pool */
- odp_pool_param_init(&params);
- params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
- params.pkt.num = SHM_PKT_POOL_SIZE;
- params.type = ODP_POOL_PACKET;
-
- pool = odp_pool_create("packet pool", &params);
-
- if (pool == ODP_POOL_INVALID) {
- LOG_ERR("Error: packet pool create failed.\n");
- exit(EXIT_FAILURE);
- }
- odp_pool_print(pool);
-
- for (i = 0; i < gbl_args->appl.if_count; ++i) {
- pktio = create_pktio(gbl_args->appl.if_names[i], pool);
- if (pktio == ODP_PKTIO_INVALID)
- exit(EXIT_FAILURE);
- gbl_args->pktios[i] = pktio;
-
- /* Save interface ethernet address */
- if (odp_pktio_mac_addr(pktio, gbl_args->port_eth_addr[i].addr,
- ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- LOG_ERR("Error: interface ethernet address unknown\n");
- exit(EXIT_FAILURE);
- }
-
- /* Save destination eth address */
- if (gbl_args->appl.dst_change) {
- /* 02:00:00:00:00:XX */
- memset(&new_addr, 0, sizeof(odph_ethaddr_t));
- new_addr.addr[0] = 0x02;
- new_addr.addr[5] = i;
- gbl_args->dst_eth_addr[i] = new_addr;
+ for (pktio = 0; pktio < num; pktio++) {
+ rx_idx = thr_args->pktio[pktio].rx_idx;
+ tx_idx = thr_args->pktio[pktio].tx_idx;
+ rx_queue_idx = thr_args->pktio[pktio].rx_queue_idx;
+ tx_queue_idx = thr_args->pktio[pktio].tx_queue_idx;
+ printf(" %i[%i] -> %i[%i]\n",
+ rx_idx, rx_queue_idx, tx_idx, tx_queue_idx);
}
-
- /* Save interface destination port */
- gbl_args->dst_port[i] = find_dest_port(i);
}
- gbl_args->pktios[i] = ODP_PKTIO_INVALID;
-
- memset(thread_tbl, 0, sizeof(thread_tbl));
-
- stats = gbl_args->stats;
-
- odp_barrier_init(&barrier, num_workers + 1);
-
- /* Create worker threads */
- cpu = odp_cpumask_first(&cpumask);
- for (i = 0; i < num_workers; ++i) {
- odp_cpumask_t thd_mask;
- void *(*thr_run_func) (void *);
-
- if (gbl_args->appl.mode == DIRECT_RECV)
- thr_run_func = pktio_direct_recv_thread;
- else /* SCHED_NONE / SCHED_ATOMIC / SCHED_ORDERED */
- thr_run_func = pktio_queue_thread;
-
- gbl_args->thread[i].src_idx = i % gbl_args->appl.if_count;
- gbl_args->thread[i].stats = &stats[i];
+ printf("\nPort config\n--------------------\n");
- odp_cpumask_zero(&thd_mask);
- odp_cpumask_set(&thd_mask, cpu);
- odph_linux_pthread_create(&thread_tbl[i], &thd_mask,
- thr_run_func,
- &gbl_args->thread[i],
- ODP_THREAD_WORKER);
- cpu = odp_cpumask_next(&cpumask, cpu);
- }
+ for (pktio = 0; pktio < if_count; pktio++) {
+ const char *dev = gbl_args->appl.if_names[pktio];
- /* Start packet receive and transmit */
- for (i = 0; i < gbl_args->appl.if_count; ++i) {
- pktio = gbl_args->pktios[i];
- ret = odp_pktio_start(pktio);
- if (ret) {
- LOG_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
- exit(EXIT_FAILURE);
- }
+ printf("Port %i (%s)\n", pktio, dev);
+ printf(" rx workers %i\n",
+ gbl_args->pktios[pktio].num_rx_thr);
+ printf(" tx workers %i\n",
+ gbl_args->pktios[pktio].num_tx_thr);
+ printf(" rx queues %i\n",
+ gbl_args->pktios[pktio].num_rx_queue);
+ printf(" tx queues %i\n",
+ gbl_args->pktios[pktio].num_tx_queue);
}
- ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
- gbl_args->appl.accuracy);
- exit_threads = 1;
-
- /* Master thread waits for other threads to exit */
- odph_linux_pthread_join(thread_tbl, num_workers);
-
- free(gbl_args->appl.if_names);
- free(gbl_args->appl.if_str);
- printf("Exit\n\n");
-
- return ret;
+ printf("\n");
}
/**
- * Drop packets which input parsing marked as containing errors.
- *
- * Frees packets with error and modifies pkt_tbl[] to only contain packets with
- * no detected errors.
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in pkt_tbl[]
+ * Find the destination port for a given input port
*
- * @return Number of packets dropped
+ * @param port Input port index
*/
-static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned num)
+static int find_dest_port(int port)
{
- odp_packet_t pkt;
- unsigned dropped = 0;
- unsigned i, j;
+ /* Even number of ports */
+ if (gbl_args->appl.if_count % 2 == 0)
+ return (port % 2 == 0) ? port + 1 : port - 1;
- for (i = 0, j = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
+ /* Odd number of ports */
+ if (port == gbl_args->appl.if_count - 1)
+ return 0;
+ else
+ return port + 1;
+}
- if (odp_unlikely(odp_packet_has_error(pkt))) {
- odp_packet_free(pkt); /* Drop */
- dropped++;
- } else if (odp_unlikely(i != j++)) {
- pkt_tbl[j-1] = pkt;
+/*
+ * Bind worker threads to interfaces and calculate number of queues needed
+ *
+ * less workers (N) than interfaces (M)
+ * - assign each worker to process every Nth interface
+ * - workers process inequal number of interfaces, when M is not divisible by N
+ * - needs only single queue per interface
+ * otherwise
+ * - assign an interface to every Mth worker
+ * - interfaces are processed by inequal number of workers, when N is not
+ * divisible by M
+ * - tries to configure a queue per worker per interface
+ * - shares queues, if interface capability does not allows a queue per worker
+ */
+static void bind_workers(void)
+{
+ int if_count, num_workers;
+ int rx_idx, tx_idx, thr, pktio;
+ thread_args_t *thr_args;
+
+ if_count = gbl_args->appl.if_count;
+ num_workers = gbl_args->appl.num_workers;
+
+ /* initialize port forwarding table */
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++)
+ gbl_args->dst_port[rx_idx] = find_dest_port(rx_idx);
+
+ if (if_count > num_workers) {
+ thr = 0;
+
+ for (rx_idx = 0; rx_idx < if_count; rx_idx++) {
+ thr_args = &gbl_args->thread[thr];
+ pktio = thr_args->num_pktio;
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ thr++;
+ if (thr >= num_workers)
+ thr = 0;
+ }
+ } else {
+ rx_idx = 0;
+
+ for (thr = 0; thr < num_workers; thr++) {
+ thr_args = &gbl_args->thread[thr];
+ pktio = thr_args->num_pktio;
+ tx_idx = gbl_args->dst_port[rx_idx];
+ thr_args->pktio[pktio].rx_idx = rx_idx;
+ thr_args->pktio[pktio].tx_idx = tx_idx;
+ thr_args->num_pktio++;
+
+ gbl_args->pktios[rx_idx].num_rx_thr++;
+ gbl_args->pktios[tx_idx].num_tx_thr++;
+
+ rx_idx++;
+ if (rx_idx >= if_count)
+ rx_idx = 0;
}
}
-
- return dropped;
}
-/**
- * Fill packets' eth addresses according to the destination port
- *
- * @param pkt_tbl Array of packets
- * @param num Number of packets in the array
- * @param dst_port Destination port
+/*
+ * Bind queues to threads and fill in missing thread arguments (handles)
*/
-static void fill_eth_addrs(odp_packet_t pkt_tbl[], unsigned num, int dst_port)
+static void bind_queues(void)
{
- odp_packet_t pkt;
- odph_ethhdr_t *eth;
- unsigned i;
-
- if (!gbl_args->appl.dst_change && !gbl_args->appl.src_change)
- return;
-
- for (i = 0; i < num; ++i) {
- pkt = pkt_tbl[i];
- if (odp_packet_has_eth(pkt)) {
- eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
-
- if (gbl_args->appl.src_change)
- eth->src = gbl_args->port_eth_addr[dst_port];
-
- if (gbl_args->appl.dst_change)
- eth->dst = gbl_args->dst_eth_addr[dst_port];
+ int num_workers;
+ int thr, pktio;
+
+ num_workers = gbl_args->appl.num_workers;
+
+ for (thr = 0; thr < num_workers; thr++) {
+ int rx_idx, tx_idx;
+ thread_args_t *thr_args = &gbl_args->thread[thr];
+ int num = thr_args->num_pktio;
+
+ for (pktio = 0; pktio < num; pktio++) {
+ int rx_queue, tx_queue;
+
+ rx_idx = thr_args->pktio[pktio].rx_idx;
+ tx_idx = thr_args->pktio[pktio].tx_idx;
+ rx_queue = gbl_args->pktios[rx_idx].next_rx_queue;
+ tx_queue = gbl_args->pktios[tx_idx].next_tx_queue;
+
+ thr_args->pktio[pktio].rx_queue_idx = rx_queue;
+ thr_args->pktio[pktio].tx_queue_idx = tx_queue;
+ thr_args->pktio[pktio].pktin =
+ gbl_args->pktios[rx_idx].pktin[rx_queue];
+ thr_args->pktio[pktio].pktout =
+ gbl_args->pktios[tx_idx].pktout[tx_queue];
+ thr_args->pktio[pktio].rx_queue =
+ gbl_args->pktios[rx_idx].rx_q[rx_queue];
+ thr_args->pktio[pktio].rx_pktio =
+ gbl_args->pktios[rx_idx].pktio;
+ thr_args->pktio[pktio].tx_pktio =
+ gbl_args->pktios[tx_idx].pktio;
+
+ rx_queue++;
+ tx_queue++;
+
+ if (rx_queue >= gbl_args->pktios[rx_idx].num_rx_queue)
+ rx_queue = 0;
+ if (tx_queue >= gbl_args->pktios[tx_idx].num_tx_queue)
+ tx_queue = 0;
+
+ gbl_args->pktios[rx_idx].next_rx_queue = rx_queue;
+ gbl_args->pktios[tx_idx].next_tx_queue = tx_queue;
}
}
}
/**
+ * Prinf usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "OpenDataPlane L2 forwarding application.\n"
+ "\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
+ " In the above example,\n"
+ " eth0 will send pkts to eth1 and vice versa\n"
+ " eth2 will send pkts to eth3 and vice versa\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ " -i, --interface Eth interfaces (comma-separated, no spaces)\n"
+ " Interface count min 1, max %i\n"
+ "\n"
+ "Optional OPTIONS\n"
+ " -m, --mode 0: Receive packets directly from pktio interface (default)\n"
+ " 1: Receive packets through scheduler sync parallel queues\n"
+ " 2: Receive packets through scheduler sync atomic queues\n"
+ " 3: Receive packets through scheduler sync ordered queues\n"
+ " 4: Receive packets through plain queues\n"
+ " -c, --count <number> CPU count.\n"
+ " -t, --time <number> Time in seconds to run.\n"
+ " -a, --accuracy <number> Time in seconds get print statistics\n"
+ " (default is 1 second).\n"
+ " -d, --dst_change 0: Don't change packets' dst eth addresses (default)\n"
+ " 1: Change packets' dst eth addresses\n"
+ " -s, --src_change 0: Don't change packets' src eth addresses\n"
+ " 1: Change packets' src eth addresses (default)\n"
+ " -e, --error_check 0: Don't check packet errors (default)\n"
+ " 1: Check packet errors\n"
+ " -h, --help Display help and exit.\n\n"
+ " environment variables: ODP_PKTIO_DISABLE_NETMAP\n"
+ " ODP_PKTIO_DISABLE_SOCKET_MMAP\n"
+ " ODP_PKTIO_DISABLE_SOCKET_MMSG\n"
+ " can be used to advanced pkt I/O selection for linux-generic\n"
+ "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS
+ );
+}
+
+/**
* Parse and store the command line arguments
*
* @param argc argument count
@@ -764,7 +1044,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->if_count = i;
- if (appl_args->if_count == 0) {
+ if (appl_args->if_count < 1 ||
+ appl_args->if_count > MAX_PKTIOS) {
usage(argv[0]);
exit(EXIT_FAILURE);
}
@@ -783,11 +1064,13 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'm':
i = atoi(optarg);
if (i == 1)
- appl_args->mode = SCHED_NONE;
+ appl_args->mode = SCHED_PARALLEL;
else if (i == 2)
appl_args->mode = SCHED_ATOMIC;
else if (i == 3)
appl_args->mode = SCHED_ORDERED;
+ else if (i == 4)
+ appl_args->mode = PLAIN_QUEUE;
else
appl_args->mode = DIRECT_RECV;
break;
@@ -829,11 +1112,11 @@ static void print_info(char *progname, appl_args_t *appl_args)
"---------------\n"
"ODP API version: %s\n"
"CPU model: %s\n"
- "CPU freq (hz): %"PRIu64"\n"
+ "CPU freq (hz): %" PRIu64 "\n"
"Cache line size: %i\n"
"CPU count: %i\n"
"\n",
- odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_version_api_str(), odp_cpu_model_str(), odp_cpu_hz_max(),
odp_sys_cache_line_size(), odp_cpu_count());
printf("Running ODP appl: \"%s\"\n"
@@ -847,8 +1130,10 @@ static void print_info(char *progname, appl_args_t *appl_args)
"Mode: ");
if (appl_args->mode == DIRECT_RECV)
printf("DIRECT_RECV");
- else if (appl_args->mode == SCHED_NONE)
- printf("SCHED_NONE");
+ else if (appl_args->mode == PLAIN_QUEUE)
+ printf("PLAIN_QUEUE");
+ else if (appl_args->mode == SCHED_PARALLEL)
+ printf("SCHED_PARALLEL");
else if (appl_args->mode == SCHED_ATOMIC)
printf("SCHED_ATOMIC");
else if (appl_args->mode == SCHED_ORDERED)
@@ -857,43 +1142,202 @@ static void print_info(char *progname, appl_args_t *appl_args)
fflush(NULL);
}
+static void gbl_args_init(args_t *args)
+{
+ int pktio, queue;
+
+ memset(args, 0, sizeof(args_t));
+
+ for (pktio = 0; pktio < MAX_PKTIOS; pktio++) {
+ args->pktios[pktio].pktio = ODP_PKTIO_INVALID;
+
+ for (queue = 0; queue < MAX_QUEUES; queue++)
+ args->pktios[pktio].rx_q[queue] = ODP_QUEUE_INVALID;
+ }
+}
+
/**
- * Prinf usage information
+ * ODP L2 forwarding main function
*/
-static void usage(char *progname)
+int main(int argc, char *argv[])
{
- printf("\n"
- "OpenDataPlane L2 forwarding application.\n"
- "\n"
- "Usage: %s OPTIONS\n"
- " E.g. %s -i eth0,eth1,eth2,eth3 -m 0 -t 1\n"
- " In the above example,\n"
- " eth0 will send pkts to eth1 and vice versa\n"
- " eth2 will send pkts to eth3 and vice versa\n"
- "\n"
- "Mandatory OPTIONS:\n"
- " -i, --interface Eth interfaces (comma-separated, no spaces)\n"
- "\n"
- "Optional OPTIONS\n"
- " -m, --mode 0: Send&receive packets directly from NIC (default)\n"
- " 1: Send&receive packets through scheduler sync none queues\n"
- " 2: Send&receive packets through scheduler sync atomic queues\n"
- " 3: Send&receive packets through scheduler sync ordered queues\n"
- " -c, --count <number> CPU count.\n"
- " -t, --time <number> Time in seconds to run.\n"
- " -a, --accuracy <number> Time in seconds get print statistics\n"
- " (default is 1 second).\n"
- " -d, --dst_change 0: Don't change packets' dst eth addresses (default)\n"
- " 1: Change packets' dst eth addresses\n"
- " -s, --src_change 0: Don't change packets' src eth addresses\n"
- " 1: Change packets' src eth addresses (default)\n"
- " -e, --error_check 0: Don't check packet errors (default)\n"
- " 1: Check packet errors\n"
- " -h, --help Display help and exit.\n\n"
- " environment variables: ODP_PKTIO_DISABLE_NETMAP\n"
- " ODP_PKTIO_DISABLE_SOCKET_MMAP\n"
- " ODP_PKTIO_DISABLE_SOCKET_MMSG\n"
- " can be used to advanced pkt I/O selection for linux-generic\n"
- "\n", NO_PATH(progname), NO_PATH(progname)
- );
+ odph_linux_pthread_t thread_tbl[MAX_WORKERS];
+ odp_pool_t pool;
+ int i;
+ int cpu;
+ int num_workers;
+ odp_shm_t shm;
+ odp_cpumask_t cpumask;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+ odph_ethaddr_t new_addr;
+ odp_pool_param_t params;
+ int ret;
+ stats_t *stats;
+ int if_count;
+ void *(*thr_run_func)(void *);
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(NULL, NULL)) {
+ LOG_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local(ODP_THREAD_CONTROL)) {
+ LOG_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Reserve memory for args from shared mem */
+ shm = odp_shm_reserve("shm_args", sizeof(args_t),
+ ODP_CACHE_LINE_SIZE, 0);
+ gbl_args = odp_shm_addr(shm);
+
+ if (gbl_args == NULL) {
+ LOG_ERR("Error: shared mem alloc failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ gbl_args_init(gbl_args);
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv, &gbl_args->appl);
+
+ /* Print both system and application information */
+ print_info(NO_PATH(argv[0]), &gbl_args->appl);
+
+ /* Default to system CPU count unless user specified */
+ num_workers = MAX_WORKERS;
+ if (gbl_args->appl.cpu_count)
+ num_workers = gbl_args->appl.cpu_count;
+
+ /* Get default worker cpumask */
+ num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
+ (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+
+ gbl_args->appl.num_workers = num_workers;
+
+ for (i = 0; i < num_workers; i++)
+ gbl_args->thread[i].thr_idx = i;
+
+ if_count = gbl_args->appl.if_count;
+
+ printf("num worker threads: %i\n", num_workers);
+ printf("first CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf("cpu mask: %s\n", cpumaskstr);
+
+ /* Create packet pool */
+ odp_pool_param_init(&params);
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE;
+ params.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create("packet pool", &params);
+
+ if (pool == ODP_POOL_INVALID) {
+ LOG_ERR("Error: packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ bind_workers();
+
+ for (i = 0; i < if_count; ++i) {
+ const char *dev = gbl_args->appl.if_names[i];
+ int num_rx, num_tx;
+
+ /* A queue per worker in scheduled mode */
+ num_rx = num_workers;
+ num_tx = num_workers;
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE) {
+ /* A queue per assigned worker */
+ num_rx = gbl_args->pktios[i].num_rx_thr;
+ num_tx = gbl_args->pktios[i].num_tx_thr;
+ }
+
+ if (create_pktio(dev, i, num_rx, num_tx, pool))
+ exit(EXIT_FAILURE);
+
+ /* Save interface ethernet address */
+ if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
+ gbl_args->port_eth_addr[i].addr,
+ ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
+ LOG_ERR("Error: interface ethernet address unknown\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Save destination eth address */
+ if (gbl_args->appl.dst_change) {
+ /* 02:00:00:00:00:XX */
+ memset(&new_addr, 0, sizeof(odph_ethaddr_t));
+ new_addr.addr[0] = 0x02;
+ new_addr.addr[5] = i;
+ gbl_args->dst_eth_addr[i] = new_addr;
+ }
+ }
+
+ gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
+
+ bind_queues();
+
+ if (gbl_args->appl.mode == DIRECT_RECV ||
+ gbl_args->appl.mode == PLAIN_QUEUE)
+ print_port_mapping();
+
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
+ stats = gbl_args->stats;
+
+ odp_barrier_init(&barrier, num_workers + 1);
+
+ if (gbl_args->appl.mode == DIRECT_RECV)
+ thr_run_func = run_worker_direct_mode;
+ else if (gbl_args->appl.mode == PLAIN_QUEUE)
+ thr_run_func = run_worker_plain_queue_mode;
+ else /* SCHED_PARALLEL / SCHED_ATOMIC / SCHED_ORDERED */
+ thr_run_func = run_worker_sched_mode;
+
+ /* Create worker threads */
+ cpu = odp_cpumask_first(&cpumask);
+ for (i = 0; i < num_workers; ++i) {
+ odp_cpumask_t thd_mask;
+
+ gbl_args->thread[i].stats = &stats[i];
+
+ odp_cpumask_zero(&thd_mask);
+ odp_cpumask_set(&thd_mask, cpu);
+ odph_linux_pthread_create(&thread_tbl[i], &thd_mask,
+ thr_run_func,
+ &gbl_args->thread[i],
+ ODP_THREAD_WORKER);
+ cpu = odp_cpumask_next(&cpumask, cpu);
+ }
+
+ /* Start packet receive and transmit */
+ for (i = 0; i < if_count; ++i) {
+ odp_pktio_t pktio;
+
+ pktio = gbl_args->pktios[i].pktio;
+ ret = odp_pktio_start(pktio);
+ if (ret) {
+ LOG_ERR("Error: unable to start %s\n",
+ gbl_args->appl.if_names[i]);
+ exit(EXIT_FAILURE);
+ }
+ }
+
+ ret = print_speed_stats(num_workers, stats, gbl_args->appl.time,
+ gbl_args->appl.accuracy);
+ exit_threads = 1;
+
+ /* Master thread waits for other threads to exit */
+ odph_linux_pthread_join(thread_tbl, num_workers);
+
+ free(gbl_args->appl.if_names);
+ free(gbl_args->appl.if_str);
+ printf("Exit\n\n");
+
+ return ret;
}
diff --git a/test/performance/odp_pktio_perf.c b/test/performance/odp_pktio_perf.c
index 9fdc4cb81..9a4735579 100644
--- a/test/performance/odp_pktio_perf.c
+++ b/test/performance/odp_pktio_perf.c
@@ -145,7 +145,7 @@ typedef struct {
} thread_args_t;
typedef struct {
- uint32be_t magic; /* Packet header magic number */
+ odp_u32be_t magic; /* Packet header magic number */
} pkt_head_t;
/* Pool from which transmitted packets are allocated */
@@ -382,7 +382,7 @@ static void *run_thread_tx(void *arg)
return NULL;
}
-static int receive_packets(odp_queue_t pollq,
+static int receive_packets(odp_queue_t plainq,
odp_event_t *event_tbl, unsigned num_pkts)
{
int n_ev = 0;
@@ -390,12 +390,12 @@ static int receive_packets(odp_queue_t pollq,
if (num_pkts == 0)
return 0;
- if (pollq != ODP_QUEUE_INVALID) {
+ if (plainq != ODP_QUEUE_INVALID) {
if (num_pkts == 1) {
- event_tbl[0] = odp_queue_deq(pollq);
+ event_tbl[0] = odp_queue_deq(plainq);
n_ev = event_tbl[0] != ODP_EVENT_INVALID;
} else {
- n_ev = odp_queue_deq_multi(pollq, event_tbl, num_pkts);
+ n_ev = odp_queue_deq_multi(plainq, event_tbl, num_pkts);
}
} else {
if (num_pkts == 1) {
@@ -413,7 +413,7 @@ static void *run_thread_rx(void *arg)
{
test_globals_t *globals;
int thr_id, batch_len;
- odp_queue_t pollq = ODP_QUEUE_INVALID;
+ odp_queue_t plainq = ODP_QUEUE_INVALID;
thread_args_t *targs = arg;
@@ -429,8 +429,8 @@ static void *run_thread_rx(void *arg)
pkt_rx_stats_t *stats = &globals->rx_stats[thr_id];
if (gbl_args->args.schedule == 0) {
- pollq = odp_pktio_inq_getdef(globals->pktio_rx);
- if (pollq == ODP_QUEUE_INVALID)
+ plainq = odp_pktio_inq_getdef(globals->pktio_rx);
+ if (plainq == ODP_QUEUE_INVALID)
LOG_ABORT("Invalid input queue.\n");
}
@@ -439,7 +439,7 @@ static void *run_thread_rx(void *arg)
odp_event_t ev[BATCH_LEN_MAX];
int i, n_ev;
- n_ev = receive_packets(pollq, ev, batch_len);
+ n_ev = receive_packets(plainq, ev, batch_len);
for (i = 0; i < n_ev; ++i) {
if (odp_event_type(ev[i]) == ODP_EVENT_PACKET) {
@@ -672,7 +672,7 @@ static int run_test(void)
printf("\tReceive batch length: \t%" PRIu32 "\n",
gbl_args->args.rx_batch_len);
printf("\tPacket receive method:\t%s\n",
- gbl_args->args.schedule ? "schedule" : "poll");
+ gbl_args->args.schedule ? "schedule" : "plain");
printf("\tInterface(s): \t");
for (i = 0; i < gbl_args->args.num_ifaces; ++i)
printf("%s ", gbl_args->args.ifaces[i]);
@@ -712,7 +712,7 @@ static odp_pktio_t create_pktio(const char *iface, int schedule)
if (schedule)
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
else
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
pktio = odp_pktio_open(iface, pool, &pktio_param);
@@ -766,16 +766,16 @@ static int test_init(void)
/* create and associate an input queue for the RX side */
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
snprintf(inq_name, sizeof(inq_name), "inq-pktio-%" PRIu64,
odp_pktio_to_u64(gbl_args->pktio_rx));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(inq_name,
- ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
if (inq_def == ODP_QUEUE_INVALID)
return -1;
@@ -809,7 +809,7 @@ static int destroy_inq(odp_pktio_t pktio)
/* flush any pending events */
while (1) {
- if (q_type == ODP_QUEUE_TYPE_POLL)
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
ev = odp_queue_deq(inq);
else
ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -881,7 +881,7 @@ static void usage(void)
printf(" default: cpu_count+1/2\n");
printf(" -b, --txbatch <length> Number of packets per TX batch\n");
printf(" default: %d\n", BATCH_LEN_MAX);
- printf(" -p, --poll Poll input queue for packet RX\n");
+ printf(" -p, --plain Plain input queue for packet RX\n");
printf(" default: disabled (use scheduler)\n");
printf(" -R, --rxbatch <length> Number of packets per RX batch\n");
printf(" default: %d\n", BATCH_LEN_MAX);
@@ -904,7 +904,7 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
{"count", required_argument, NULL, 'c'},
{"txcount", required_argument, NULL, 't'},
{"txbatch", required_argument, NULL, 'b'},
- {"poll", no_argument, NULL, 'p'},
+ {"plain", no_argument, NULL, 'p'},
{"rxbatch", required_argument, NULL, 'R'},
{"length", required_argument, NULL, 'l'},
{"rate", required_argument, NULL, 'r'},
diff --git a/test/performance/odp_scheduling.c b/test/performance/odp_scheduling.c
index 8ec89bfe8..d785878cf 100644
--- a/test/performance/odp_scheduling.c
+++ b/test/performance/odp_scheduling.c
@@ -246,7 +246,7 @@ static int test_alloc_multi(int thr, odp_pool_t pool)
}
/**
- * @internal Test queue polling
+ * @internal Test plain queues
*
* Enqueue to and dequeue to/from a single shared queue.
*
@@ -255,7 +255,7 @@ static int test_alloc_multi(int thr, odp_pool_t pool)
*
* @return 0 if successful
*/
-static int test_poll_queue(int thr, odp_pool_t msg_pool)
+static int test_plain_queue(int thr, odp_pool_t msg_pool)
{
odp_event_t ev;
odp_buffer_t buf;
@@ -278,7 +278,7 @@ static int test_poll_queue(int thr, odp_pool_t msg_pool)
t_msg->msg_id = MSG_HELLO;
t_msg->seq = 0;
- queue = odp_queue_lookup("poll_queue");
+ queue = odp_queue_lookup("plain_queue");
if (queue == ODP_QUEUE_INVALID) {
printf(" [%i] Queue lookup failed.\n", thr);
@@ -310,7 +310,7 @@ static int test_poll_queue(int thr, odp_pool_t msg_pool)
cycles = odp_cpu_cycles_diff(c2, c1);
cycles = cycles / QUEUE_ROUNDS;
- printf(" [%i] poll_queue enq+deq %6" PRIu64 " CPU cycles\n",
+ printf(" [%i] plain_queue enq+deq %6" PRIu64 " CPU cycles\n",
thr, cycles);
odp_buffer_free(buf);
@@ -645,7 +645,7 @@ static void *run_thread(void *arg)
odp_barrier_wait(barrier);
- if (test_poll_queue(thr, msg_pool))
+ if (test_plain_queue(thr, msg_pool))
return NULL;
/* Low prio */
@@ -724,14 +724,14 @@ static void test_cpu_freq(void)
nsec = odp_time_to_ns(test_time);
cycles = odp_cpu_cycles_diff(c2, c1);
- max_cycles = (nsec * odp_sys_cpu_hz()) / 1000000000.0;
+ max_cycles = (nsec * odp_cpu_hz_max()) / 1000000000.0;
/* Compare measured CPU cycles to maximum theoretical CPU cycle count */
diff_max_hz = ((double)(cycles) - max_cycles) / max_cycles;
printf("odp_time %" PRIu64 " ns\n", nsec);
printf("odp_cpu_cycles %" PRIu64 " CPU cycles\n", cycles);
- printf("odp_sys_cpu_hz %" PRIu64 " hz\n", odp_sys_cpu_hz());
+ printf("odp_sys_cpu_hz %" PRIu64 " hz\n", odp_cpu_hz_max());
printf("Diff from max CPU freq %f%%\n", diff_max_hz * 100.0);
printf("\n");
@@ -845,8 +845,8 @@ int main(int argc, char *argv[])
printf("ODP system info\n");
printf("---------------\n");
printf("ODP API version: %s\n", odp_version_api_str());
- printf("CPU model: %s\n", odp_sys_cpu_model_str());
- printf("CPU freq (hz): %" PRIu64 "\n", odp_sys_cpu_hz());
+ printf("CPU model: %s\n", odp_cpu_model_str());
+ printf("CPU freq (hz): %" PRIu64 "\n", odp_cpu_hz_max());
printf("Cache line size: %i\n", odp_sys_cache_line_size());
printf("Max CPU count: %i\n", odp_cpu_count());
@@ -900,12 +900,12 @@ int main(int argc, char *argv[])
/* odp_pool_print(pool); */
/*
- * Create a queue for direct poll test
+ * Create a queue for plain queue test
*/
- queue = odp_queue_create("poll_queue", ODP_QUEUE_TYPE_POLL, NULL);
+ queue = odp_queue_create("plain_queue", NULL);
if (queue == ODP_QUEUE_INVALID) {
- LOG_ERR("Poll queue create failed.\n");
+ LOG_ERR("Plain queue create failed.\n");
return -1;
}
@@ -926,6 +926,7 @@ int main(int argc, char *argv[])
name[7] = '0' + i - 10*(i/10);
odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = i;
param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
param.sched.group = ODP_SCHED_GROUP_ALL;
@@ -934,8 +935,7 @@ int main(int argc, char *argv[])
name[9] = '0' + j/10;
name[10] = '0' + j - 10*(j/10);
- queue = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED,
- &param);
+ queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
LOG_ERR("Schedule queue create failed.\n");
diff --git a/test/validation/Makefile.am b/test/validation/Makefile.am
index 197ff8960..cdd5a842a 100644
--- a/test/validation/Makefile.am
+++ b/test/validation/Makefile.am
@@ -1,4 +1,6 @@
-ODP_MODULES = buffer \
+ODP_MODULES = atomic \
+ barrier \
+ buffer \
classification \
config \
cpumask \
@@ -6,6 +8,7 @@ ODP_MODULES = buffer \
errno \
hash \
init \
+ lock \
queue \
packet \
pktio \
@@ -13,7 +16,6 @@ ODP_MODULES = buffer \
random \
scheduler \
std_clib \
- synchronizers \
thread \
time \
timer \
@@ -23,4 +25,4 @@ ODP_MODULES = buffer \
SUBDIRS = common $(ODP_MODULES)
#The tests will need to retain the deprecated test implementation
-AM_CFLAGS += -Wno-deprecated-declarations \ No newline at end of file
+AM_CFLAGS += -Wno-deprecated-declarations
diff --git a/test/validation/atomic/.gitignore b/test/validation/atomic/.gitignore
new file mode 100644
index 000000000..610ffeab0
--- /dev/null
+++ b/test/validation/atomic/.gitignore
@@ -0,0 +1 @@
+atomic_main
diff --git a/test/validation/atomic/Makefile.am b/test/validation/atomic/Makefile.am
new file mode 100644
index 000000000..9b6bd6315
--- /dev/null
+++ b/test/validation/atomic/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestatomic.la
+libtestatomic_la_SOURCES = atomic.c
+
+test_PROGRAMS = atomic_main$(EXEEXT)
+dist_atomic_main_SOURCES = atomic_main.c
+atomic_main_LDADD = libtestatomic.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = atomic.h
diff --git a/test/validation/atomic/atomic.c b/test/validation/atomic/atomic.c
new file mode 100644
index 000000000..24c0de731
--- /dev/null
+++ b/test/validation/atomic/atomic.c
@@ -0,0 +1,881 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "atomic.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+
+#define ADD_SUB_CNT 5
+
+#define CNT 10
+#define U32_INIT_VAL (1UL << 10)
+#define U64_INIT_VAL (1ULL << 33)
+#define U32_MAGIC 0xa23f65b2
+#define U64_MAGIC 0xf2e1c5430cb6a52e
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+#define CHECK_MAX_MIN (1 << 0)
+#define CHECK_XCHG (1 << 2)
+
+static odp_atomic_u32_t a32u;
+static odp_atomic_u64_t a64u;
+static odp_atomic_u32_t a32u_min;
+static odp_atomic_u32_t a32u_max;
+static odp_atomic_u64_t a64u_min;
+static odp_atomic_u64_t a64u_max;
+static odp_atomic_u32_t a32u_xchg;
+static odp_atomic_u64_t a64u_xchg;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ volatile_u32_t global_lock_owner;
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void test_atomic_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u32(&a32u);
+}
+
+static void test_atomic_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_inc_u64(&a64u);
+}
+
+static void test_atomic_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u32(&a32u);
+}
+
+static void test_atomic_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_dec_u64(&a64u);
+}
+
+static void test_atomic_fetch_inc_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u32(&a32u);
+}
+
+static void test_atomic_fetch_inc_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_inc_u64(&a64u);
+}
+
+static void test_atomic_fetch_dec_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u32(&a32u);
+}
+
+static void test_atomic_fetch_dec_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_dec_u64(&a64u);
+}
+
+static void test_atomic_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_add_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_32(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
+}
+
+static void test_atomic_fetch_sub_64(void)
+{
+ int i;
+
+ for (i = 0; i < CNT; i++)
+ odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
+}
+
+static void test_atomic_min_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u32(&a32u);
+ odp_atomic_min_u32(&a32u_min, tmp);
+ }
+}
+
+static void test_atomic_min_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_dec_u64(&a64u);
+ odp_atomic_min_u64(&a64u_min, tmp);
+ }
+}
+
+static void test_atomic_max_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u32(&a32u);
+ odp_atomic_max_u32(&a32u_max, tmp);
+ }
+}
+
+static void test_atomic_max_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_fetch_inc_u64(&a64u);
+ odp_atomic_max_u64(&a64u_max, tmp);
+ }
+}
+
+static void test_atomic_cas_inc_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_32(void)
+{
+ int i;
+ uint32_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u32(&a32u);
+
+ while (odp_atomic_cas_u32(&a32u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_inc_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old + 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_cas_dec_64(void)
+{
+ int i;
+ uint64_t old;
+
+ for (i = 0; i < CNT; i++) {
+ old = odp_atomic_load_u64(&a64u);
+
+ while (odp_atomic_cas_u64(&a64u, &old, old - 1) == 0)
+ ;
+ }
+}
+
+static void test_atomic_xchg_32(void)
+{
+ uint32_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u32(&a32u);
+ old = odp_atomic_xchg_u32(&a32u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u32(&a32u_xchg, 0);
+ else
+ odp_atomic_xchg_u32(&a32u_xchg, 1);
+ }
+
+ odp_atomic_sub_u32(&a32u, CNT);
+ odp_atomic_xchg_u32(&a32u_xchg, U32_MAGIC);
+}
+
+static void test_atomic_xchg_64(void)
+{
+ uint64_t old, new;
+ int i;
+
+ for (i = 0; i < CNT; i++) {
+ new = odp_atomic_fetch_inc_u64(&a64u);
+ old = odp_atomic_xchg_u64(&a64u_xchg, new);
+
+ if (old & 0x1)
+ odp_atomic_xchg_u64(&a64u_xchg, 0);
+ else
+ odp_atomic_xchg_u64(&a64u_xchg, 1);
+ }
+
+ odp_atomic_sub_u64(&a64u, CNT);
+ odp_atomic_xchg_u64(&a64u_xchg, U64_MAGIC);
+}
+
+static void test_atomic_non_relaxed_32(void)
+{
+ int i;
+ uint32_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u32(&a32u);
+ odp_atomic_store_rel_u32(&a32u, tmp);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_max);
+ odp_atomic_add_rel_u32(&a32u_max, 1);
+
+ tmp = odp_atomic_load_acq_u32(&a32u_min);
+ odp_atomic_sub_rel_u32(&a32u_min, 1);
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_acq_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ while (odp_atomic_cas_rel_u32(&a32u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u32(&a32u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u32(&a32u_xchg, &tmp, U32_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_non_relaxed_64(void)
+{
+ int i;
+ uint64_t tmp;
+
+ for (i = 0; i < CNT; i++) {
+ tmp = odp_atomic_load_acq_u64(&a64u);
+ odp_atomic_store_rel_u64(&a64u, tmp);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_max);
+ odp_atomic_add_rel_u64(&a64u_max, 1);
+
+ tmp = odp_atomic_load_acq_u64(&a64u_min);
+ odp_atomic_sub_rel_u64(&a64u_min, 1);
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_acq_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ while (odp_atomic_cas_rel_u64(&a64u_xchg, &tmp, tmp + 1) == 0)
+ ;
+
+ tmp = odp_atomic_load_u64(&a64u_xchg);
+ /* finally set value for validation */
+ while (odp_atomic_cas_acq_rel_u64(&a64u_xchg, &tmp, U64_MAGIC)
+ == 0)
+ ;
+ }
+}
+
+static void test_atomic_inc_dec_32(void)
+{
+ test_atomic_inc_32();
+ test_atomic_dec_32();
+}
+
+static void test_atomic_inc_dec_64(void)
+{
+ test_atomic_inc_64();
+ test_atomic_dec_64();
+}
+
+static void test_atomic_fetch_inc_dec_32(void)
+{
+ test_atomic_fetch_inc_32();
+ test_atomic_fetch_dec_32();
+}
+
+static void test_atomic_fetch_inc_dec_64(void)
+{
+ test_atomic_fetch_inc_64();
+ test_atomic_fetch_dec_64();
+}
+
+static void test_atomic_add_sub_32(void)
+{
+ test_atomic_add_32();
+ test_atomic_sub_32();
+}
+
+static void test_atomic_add_sub_64(void)
+{
+ test_atomic_add_64();
+ test_atomic_sub_64();
+}
+
+static void test_atomic_fetch_add_sub_32(void)
+{
+ test_atomic_fetch_add_32();
+ test_atomic_fetch_sub_32();
+}
+
+static void test_atomic_fetch_add_sub_64(void)
+{
+ test_atomic_fetch_add_64();
+ test_atomic_fetch_sub_64();
+}
+
+static void test_atomic_max_min_32(void)
+{
+ test_atomic_max_32();
+ test_atomic_min_32();
+}
+
+static void test_atomic_max_min_64(void)
+{
+ test_atomic_max_64();
+ test_atomic_min_64();
+}
+
+static void test_atomic_cas_inc_dec_32(void)
+{
+ test_atomic_cas_inc_32();
+ test_atomic_cas_dec_32();
+}
+
+static void test_atomic_cas_inc_dec_64(void)
+{
+ test_atomic_cas_inc_64();
+ test_atomic_cas_dec_64();
+}
+
+static void test_atomic_init(void)
+{
+ odp_atomic_init_u32(&a32u, 0);
+ odp_atomic_init_u64(&a64u, 0);
+ odp_atomic_init_u32(&a32u_min, 0);
+ odp_atomic_init_u32(&a32u_max, 0);
+ odp_atomic_init_u64(&a64u_min, 0);
+ odp_atomic_init_u64(&a64u_max, 0);
+ odp_atomic_init_u32(&a32u_xchg, 0);
+ odp_atomic_init_u64(&a64u_xchg, 0);
+}
+
+static void test_atomic_store(void)
+{
+ odp_atomic_store_u32(&a32u, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_min, U32_INIT_VAL);
+ odp_atomic_store_u32(&a32u_max, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_min, U64_INIT_VAL);
+ odp_atomic_store_u64(&a64u_max, U64_INIT_VAL);
+ odp_atomic_store_u32(&a32u_xchg, U32_INIT_VAL);
+ odp_atomic_store_u64(&a64u_xchg, U64_INIT_VAL);
+}
+
+static void test_atomic_validate(int check)
+{
+ CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
+ CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
+
+ if (check & CHECK_MAX_MIN) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_max) >
+ odp_atomic_load_u32(&a32u_min));
+
+ CU_ASSERT(odp_atomic_load_u64(&a64u_max) >
+ odp_atomic_load_u64(&a64u_min));
+ }
+
+ if (check & CHECK_XCHG) {
+ CU_ASSERT(odp_atomic_load_u32(&a32u_xchg) == U32_MAGIC);
+ CU_ASSERT(odp_atomic_load_u64(&a64u_xchg) == U64_MAGIC);
+ }
+}
+
+int atomic_init(void)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+/* Atomic tests */
+static void *test_atomic_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_inc_dec_32();
+ test_atomic_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_add_sub_32();
+ test_atomic_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_inc_dec_32();
+ test_atomic_fetch_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_fetch_add_sub_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_fetch_add_sub_32();
+ test_atomic_fetch_add_sub_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_max_min_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_max_min_32();
+ test_atomic_max_min_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_cas_inc_dec_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_cas_inc_dec_32();
+ test_atomic_cas_inc_dec_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_xchg_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_xchg_32();
+ test_atomic_xchg_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *test_atomic_non_relaxed_thread(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+
+ per_thread_mem = thread_init();
+ test_atomic_non_relaxed_32();
+ test_atomic_non_relaxed_64();
+
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void test_atomic_functional(void *func_ptr(void *), int check)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ test_atomic_init();
+ test_atomic_store();
+ odp_cunit_thread_create(func_ptr, &arg);
+ odp_cunit_thread_exit(&arg);
+ test_atomic_validate(check);
+}
+
+void atomic_test_atomic_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_add_sub(void)
+{
+ test_atomic_functional(test_atomic_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_fetch_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_fetch_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_fetch_add_sub(void)
+{
+ test_atomic_functional(test_atomic_fetch_add_sub_thread, 0);
+}
+
+void atomic_test_atomic_max_min(void)
+{
+ test_atomic_functional(test_atomic_max_min_thread, CHECK_MAX_MIN);
+}
+
+void atomic_test_atomic_cas_inc_dec(void)
+{
+ test_atomic_functional(test_atomic_cas_inc_dec_thread, 0);
+}
+
+void atomic_test_atomic_xchg(void)
+{
+ test_atomic_functional(test_atomic_xchg_thread, CHECK_XCHG);
+}
+
+void atomic_test_atomic_non_relaxed(void)
+{
+ test_atomic_functional(test_atomic_non_relaxed_thread,
+ CHECK_MAX_MIN | CHECK_XCHG);
+}
+
+void atomic_test_atomic_op_lock_free(void)
+{
+ odp_atomic_op_t atomic_op;
+ int ret_null, ret;
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ atomic_op.all_bits = 0;
+
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+
+ /* Test setting first, last and couple of other bits */
+ atomic_op.op.init = 1;
+ CU_ASSERT(atomic_op.op.init == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.init = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.xchg = 1;
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.xchg = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.add = 1;
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.add = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ atomic_op.op.dec = 1;
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.all_bits != 0);
+ atomic_op.op.dec = 0;
+ CU_ASSERT(atomic_op.all_bits == 0);
+
+ memset(&atomic_op, 0xff, sizeof(odp_atomic_op_t));
+ ret = odp_atomic_lock_free_u64(&atomic_op);
+ ret_null = odp_atomic_lock_free_u64(NULL);
+
+ CU_ASSERT(ret == ret_null);
+
+ /* Init operation is not atomic by the spec. Call to
+ * odp_atomic_lock_free_u64() zeros it but never sets it. */
+
+ if (ret == 0) {
+ /* none are lock free */
+ CU_ASSERT(atomic_op.all_bits == 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 0);
+ CU_ASSERT(atomic_op.op.store == 0);
+ CU_ASSERT(atomic_op.op.fetch_add == 0);
+ CU_ASSERT(atomic_op.op.add == 0);
+ CU_ASSERT(atomic_op.op.fetch_sub == 0);
+ CU_ASSERT(atomic_op.op.sub == 0);
+ CU_ASSERT(atomic_op.op.fetch_inc == 0);
+ CU_ASSERT(atomic_op.op.inc == 0);
+ CU_ASSERT(atomic_op.op.fetch_dec == 0);
+ CU_ASSERT(atomic_op.op.dec == 0);
+ CU_ASSERT(atomic_op.op.min == 0);
+ CU_ASSERT(atomic_op.op.max == 0);
+ CU_ASSERT(atomic_op.op.cas == 0);
+ CU_ASSERT(atomic_op.op.xchg == 0);
+ }
+
+ if (ret == 1) {
+ /* some are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ }
+
+ if (ret == 2) {
+ /* all are lock free */
+ CU_ASSERT(atomic_op.all_bits != 0);
+ CU_ASSERT(atomic_op.op.init == 0);
+ CU_ASSERT(atomic_op.op.load == 1);
+ CU_ASSERT(atomic_op.op.store == 1);
+ CU_ASSERT(atomic_op.op.fetch_add == 1);
+ CU_ASSERT(atomic_op.op.add == 1);
+ CU_ASSERT(atomic_op.op.fetch_sub == 1);
+ CU_ASSERT(atomic_op.op.sub == 1);
+ CU_ASSERT(atomic_op.op.fetch_inc == 1);
+ CU_ASSERT(atomic_op.op.inc == 1);
+ CU_ASSERT(atomic_op.op.fetch_dec == 1);
+ CU_ASSERT(atomic_op.op.dec == 1);
+ CU_ASSERT(atomic_op.op.min == 1);
+ CU_ASSERT(atomic_op.op.max == 1);
+ CU_ASSERT(atomic_op.op.cas == 1);
+ CU_ASSERT(atomic_op.op.xchg == 1);
+ }
+}
+
+odp_testinfo_t atomic_suite_atomic[] = {
+ ODP_TEST_INFO(atomic_test_atomic_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_fetch_add_sub),
+ ODP_TEST_INFO(atomic_test_atomic_max_min),
+ ODP_TEST_INFO(atomic_test_atomic_cas_inc_dec),
+ ODP_TEST_INFO(atomic_test_atomic_xchg),
+ ODP_TEST_INFO(atomic_test_atomic_non_relaxed),
+ ODP_TEST_INFO(atomic_test_atomic_op_lock_free),
+ ODP_TEST_INFO_NULL,
+};
+
+odp_suiteinfo_t atomic_suites[] = {
+ {"atomic", NULL, NULL,
+ atomic_suite_atomic},
+ ODP_SUITE_INFO_NULL
+};
+
+int atomic_main(void)
+{
+ int ret;
+
+ odp_cunit_register_global_init(atomic_init);
+
+ ret = odp_cunit_register(atomic_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/atomic/atomic.h b/test/validation/atomic/atomic.h
new file mode 100644
index 000000000..526767086
--- /dev/null
+++ b/test/validation/atomic/atomic.h
@@ -0,0 +1,38 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_ATOMIC_H_
+#define _ODP_TEST_ATOMIC_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void atomic_test_atomic_inc_dec(void);
+void atomic_test_atomic_add_sub(void);
+void atomic_test_atomic_fetch_inc_dec(void);
+void atomic_test_atomic_fetch_add_sub(void);
+void atomic_test_atomic_max_min(void);
+void atomic_test_atomic_cas_inc_dec(void);
+void atomic_test_atomic_xchg(void);
+void atomic_test_atomic_non_relaxed(void);
+void atomic_test_atomic_op_lock_free(void);
+
+/* test arrays: */
+extern odp_testinfo_t atomic_suite_atomic[];
+
+/* test array init/term functions: */
+int atomic_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t atomic_suites[];
+
+/* executable init/term functions: */
+int atomic_init(void);
+
+/* main test program: */
+int atomic_main(void);
+
+#endif
diff --git a/test/validation/synchronizers/synchronizers_main.c b/test/validation/atomic/atomic_main.c
index 659d3152f..377bdd5b9 100644
--- a/test/validation/synchronizers/synchronizers_main.c
+++ b/test/validation/atomic/atomic_main.c
@@ -4,9 +4,9 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include "synchronizers.h"
+#include "atomic.h"
int main(void)
{
- return synchronizers_main();
+ return atomic_main();
}
diff --git a/test/validation/barrier/.gitignore b/test/validation/barrier/.gitignore
new file mode 100644
index 000000000..2e0ee7ade
--- /dev/null
+++ b/test/validation/barrier/.gitignore
@@ -0,0 +1 @@
+barrier_main
diff --git a/test/validation/barrier/Makefile.am b/test/validation/barrier/Makefile.am
new file mode 100644
index 000000000..8fc632c27
--- /dev/null
+++ b/test/validation/barrier/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestbarrier.la
+libtestbarrier_la_SOURCES = barrier.c
+
+test_PROGRAMS = barrier_main$(EXEEXT)
+dist_barrier_main_SOURCES = barrier_main.c
+barrier_main_LDADD = libtestbarrier.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = barrier.h
diff --git a/test/validation/barrier/barrier.c b/test/validation/barrier/barrier.c
new file mode 100644
index 000000000..8f15cdf0d
--- /dev/null
+++ b/test/validation/barrier/barrier.c
@@ -0,0 +1,393 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <malloc.h>
+#include <odp.h>
+#include <CUnit/Basic.h>
+#include <odp_cunit_common.h>
+#include <unistd.h>
+#include "barrier.h"
+
+#define VERBOSE 0
+#define MAX_ITERATIONS 1000
+#define BARRIER_ITERATIONS 64
+
+#define SLOW_BARRIER_DELAY 400
+#define BASE_DELAY 6
+
+#define NUM_TEST_BARRIERS BARRIER_ITERATIONS
+#define NUM_RESYNC_BARRIERS 100
+
+#define BARRIER_DELAY 10
+
+#define GLOBAL_SHM_NAME "GlobalLockTest"
+
+#define UNUSED __attribute__((__unused__))
+
+static volatile int temp_result;
+
+typedef __volatile uint32_t volatile_u32_t;
+typedef __volatile uint64_t volatile_u64_t;
+
+typedef struct {
+ odp_atomic_u32_t wait_cnt;
+} custom_barrier_t;
+
+typedef struct {
+ /* Global variables */
+ uint32_t g_num_threads;
+ uint32_t g_iterations;
+ uint32_t g_verbose;
+ uint32_t g_max_num_cores;
+
+ odp_barrier_t test_barriers[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier1[NUM_TEST_BARRIERS];
+ custom_barrier_t custom_barrier2[NUM_TEST_BARRIERS];
+ volatile_u32_t slow_thread_num;
+ volatile_u32_t barrier_cnt1;
+ volatile_u32_t barrier_cnt2;
+ odp_barrier_t global_barrier;
+
+} global_shared_mem_t;
+
+/* Per-thread memory */
+typedef struct {
+ global_shared_mem_t *global_mem;
+
+ int thread_id;
+ int thread_core;
+
+ volatile_u64_t delay_counter;
+} per_thread_mem_t;
+
+static odp_shm_t global_shm;
+static global_shared_mem_t *global_mem;
+
+/*
+* Delay a consistent amount of time. Ideally the amount of CPU time taken
+* is linearly proportional to "iterations". The goal is to try to do some
+* work that the compiler optimizer won't optimize away, and also to
+* minimize loads and stores (at least to different memory addresses)
+* so as to not affect or be affected by caching issues. This does NOT have to
+* correlate to a specific number of cpu cycles or be consistent across
+* CPU architectures.
+*/
+static void thread_delay(per_thread_mem_t *per_thread_mem, uint32_t iterations)
+{
+ volatile_u64_t *counter_ptr;
+ uint32_t cnt;
+
+ counter_ptr = &per_thread_mem->delay_counter;
+
+ for (cnt = 1; cnt <= iterations; cnt++)
+ (*counter_ptr)++;
+}
+
+/* Initialise per-thread memory */
+static per_thread_mem_t *thread_init(void)
+{
+ global_shared_mem_t *global_mem;
+ per_thread_mem_t *per_thread_mem;
+ odp_shm_t global_shm;
+ uint32_t per_thread_mem_len;
+
+ per_thread_mem_len = sizeof(per_thread_mem_t);
+ per_thread_mem = malloc(per_thread_mem_len);
+ memset(per_thread_mem, 0, per_thread_mem_len);
+
+ per_thread_mem->delay_counter = 1;
+
+ per_thread_mem->thread_id = odp_thread_id();
+ per_thread_mem->thread_core = odp_cpu_id();
+
+ global_shm = odp_shm_lookup(GLOBAL_SHM_NAME);
+ global_mem = odp_shm_addr(global_shm);
+ CU_ASSERT_PTR_NOT_NULL(global_mem);
+
+ per_thread_mem->global_mem = global_mem;
+
+ return per_thread_mem;
+}
+
+static void thread_finalize(per_thread_mem_t *per_thread_mem)
+{
+ free(per_thread_mem);
+}
+
+static void custom_barrier_init(custom_barrier_t *custom_barrier,
+ uint32_t num_threads)
+{
+ odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
+}
+
+static void custom_barrier_wait(custom_barrier_t *custom_barrier)
+{
+ volatile_u64_t counter = 1;
+ uint32_t delay_cnt, wait_cnt;
+
+ odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
+
+ wait_cnt = 1;
+ while (wait_cnt != 0) {
+ for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
+ counter++;
+
+ wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
+ }
+}
+
+static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
+ odp_bool_t no_barrier_test)
+{
+ global_shared_mem_t *global_mem;
+ uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
+ uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
+ uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
+
+ thread_num = odp_thread_id();
+ global_mem = per_thread_mem->global_mem;
+ num_threads = global_mem->g_num_threads;
+ iterations = BARRIER_ITERATIONS;
+
+ barrier_errs = 0;
+ lock_owner_delay = SLOW_BARRIER_DELAY;
+
+ for (cnt = 1; cnt < iterations; cnt++) {
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
+
+ barrier_cnt1 = global_mem->barrier_cnt1;
+ barrier_cnt2 = global_mem->barrier_cnt2;
+
+ if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
+ printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
+ " %" PRIu32 " cnt=%" PRIu32 "\n",
+ thread_num, barrier_cnt1, barrier_cnt2, cnt);
+ barrier_errs++;
+ }
+
+ /* Wait here until all of the threads reach this point */
+ custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
+
+ slow_thread_num = global_mem->slow_thread_num;
+ i_am_slow_thread = thread_num == slow_thread_num;
+ next_slow_thread = slow_thread_num + 1;
+ if (num_threads < next_slow_thread)
+ next_slow_thread = 1;
+
+ /*
+ * Now run the test, which involves having all but one thread
+ * immediately calling odp_barrier_wait(), and one thread wait a
+ * moderate amount of time and then calling odp_barrier_wait().
+ * The test fails if any of the first group of threads
+ * has not waited for the "slow" thread. The "slow" thread is
+ * responsible for re-initializing the barrier for next trial.
+ */
+ if (i_am_slow_thread) {
+ thread_delay(per_thread_mem, lock_owner_delay);
+ lock_owner_delay += BASE_DELAY;
+ if ((global_mem->barrier_cnt1 != cnt) ||
+ (global_mem->barrier_cnt2 != cnt) ||
+ (global_mem->slow_thread_num
+ != slow_thread_num))
+ barrier_errs++;
+ }
+
+ if (no_barrier_test == 0)
+ odp_barrier_wait(&global_mem->test_barriers[cnt]);
+
+ global_mem->barrier_cnt1 = cnt + 1;
+ odp_mb_full();
+
+ if (i_am_slow_thread) {
+ global_mem->slow_thread_num = next_slow_thread;
+ global_mem->barrier_cnt2 = cnt + 1;
+ odp_mb_full();
+ } else {
+ while (global_mem->barrier_cnt2 != (cnt + 1))
+ thread_delay(per_thread_mem, BASE_DELAY);
+ }
+ }
+
+ if ((global_mem->g_verbose) && (barrier_errs != 0))
+ printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
+ " barrier_errs in %" PRIu32 " iterations\n", thread_num,
+ per_thread_mem->thread_id,
+ per_thread_mem->thread_core, barrier_errs, iterations);
+
+ return barrier_errs;
+}
+
+static void *no_barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 1);
+
+ /*
+ * Note that the following CU_ASSERT MAY appear incorrect, but for the
+ * no_barrier test it should see barrier_errs or else there is something
+ * wrong with the test methodology or the ODP thread implementation.
+ * So this test PASSES only if it sees barrier_errs or a single
+ * worker was used.
+ */
+ CU_ASSERT(barrier_errs != 0 || global_mem->g_num_threads == 1);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void *barrier_functional_test(void *arg UNUSED)
+{
+ per_thread_mem_t *per_thread_mem;
+ uint32_t barrier_errs;
+
+ per_thread_mem = thread_init();
+ barrier_errs = barrier_test(per_thread_mem, 0);
+
+ CU_ASSERT(barrier_errs == 0);
+ thread_finalize(per_thread_mem);
+
+ return NULL;
+}
+
+static void barrier_test_init(void)
+{
+ uint32_t num_threads, idx;
+
+ num_threads = global_mem->g_num_threads;
+
+ for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
+ odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
+ custom_barrier_init(&global_mem->custom_barrier1[idx],
+ num_threads);
+ custom_barrier_init(&global_mem->custom_barrier2[idx],
+ num_threads);
+ }
+
+ global_mem->slow_thread_num = 1;
+ global_mem->barrier_cnt1 = 1;
+ global_mem->barrier_cnt2 = 1;
+}
+
+/* Barrier tests */
+void barrier_test_memory_barrier(void)
+{
+ volatile int a = 0;
+ volatile int b = 0;
+ volatile int c = 0;
+ volatile int d = 0;
+
+ /* Call all memory barriers to verify that those are implemented */
+ a = 1;
+ odp_mb_release();
+ b = 1;
+ odp_mb_acquire();
+ c = 1;
+ odp_mb_full();
+ d = 1;
+
+ /* Avoid "variable set but not used" warning */
+ temp_result = a + b + c + d;
+}
+
+void barrier_test_no_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(no_barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+void barrier_test_barrier_functional(void)
+{
+ pthrd_arg arg;
+
+ arg.numthrds = global_mem->g_num_threads;
+ barrier_test_init();
+ odp_cunit_thread_create(barrier_functional_test, &arg);
+ odp_cunit_thread_exit(&arg);
+}
+
+odp_testinfo_t barrier_suite_barrier[] = {
+ ODP_TEST_INFO(barrier_test_memory_barrier),
+ ODP_TEST_INFO(barrier_test_no_barrier_functional),
+ ODP_TEST_INFO(barrier_test_barrier_functional),
+ ODP_TEST_INFO_NULL
+};
+
+int barrier_init(void)
+{
+ uint32_t workers_count, max_threads;
+ int ret = 0;
+ odp_cpumask_t mask;
+
+ if (0 != odp_init_global(NULL, NULL)) {
+ fprintf(stderr, "error: odp_init_global() failed.\n");
+ return -1;
+ }
+ if (0 != odp_init_local(ODP_THREAD_CONTROL)) {
+ fprintf(stderr, "error: odp_init_local() failed.\n");
+ return -1;
+ }
+
+ global_shm = odp_shm_reserve(GLOBAL_SHM_NAME,
+ sizeof(global_shared_mem_t), 64,
+ ODP_SHM_SW_ONLY);
+ if (ODP_SHM_INVALID == global_shm) {
+ fprintf(stderr, "Unable reserve memory for global_shm\n");
+ return -1;
+ }
+
+ global_mem = odp_shm_addr(global_shm);
+ memset(global_mem, 0, sizeof(global_shared_mem_t));
+
+ global_mem->g_num_threads = MAX_WORKERS;
+ global_mem->g_iterations = MAX_ITERATIONS;
+ global_mem->g_verbose = VERBOSE;
+
+ workers_count = odp_cpumask_default_worker(&mask, 0);
+
+ max_threads = (workers_count >= MAX_WORKERS) ?
+ MAX_WORKERS : workers_count;
+
+ if (max_threads < global_mem->g_num_threads) {
+ printf("Requested num of threads is too large\n");
+ printf("reducing from %" PRIu32 " to %" PRIu32 "\n",
+ global_mem->g_num_threads,
+ max_threads);
+ global_mem->g_num_threads = max_threads;
+ }
+
+ printf("Num of threads used = %" PRIu32 "\n",
+ global_mem->g_num_threads);
+
+ return ret;
+}
+
+odp_suiteinfo_t barrier_suites[] = {
+ {"barrier", NULL, NULL,
+ barrier_suite_barrier},
+ ODP_SUITE_INFO_NULL
+};
+
+int barrier_main(void)
+{
+ int ret;
+
+ odp_cunit_register_global_init(barrier_init);
+
+ ret = odp_cunit_register(barrier_suites);
+
+ if (ret == 0)
+ ret = odp_cunit_run();
+
+ return ret;
+}
diff --git a/test/validation/barrier/barrier.h b/test/validation/barrier/barrier.h
new file mode 100644
index 000000000..3cddfc428
--- /dev/null
+++ b/test/validation/barrier/barrier.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_BARRIER_H_
+#define _ODP_TEST_BARRIER_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void barrier_test_memory_barrier(void);
+void barrier_test_no_barrier_functional(void);
+void barrier_test_barrier_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t barrier_suite_barrier[];
+
+/* test registry: */
+extern odp_suiteinfo_t barrier_suites[];
+
+/* executable init/term functions: */
+int barrier_init(void);
+
+/* main test program: */
+int barrier_main(void);
+
+#endif
diff --git a/test/validation/barrier/barrier_main.c b/test/validation/barrier/barrier_main.c
new file mode 100644
index 000000000..88c9b3e52
--- /dev/null
+++ b/test/validation/barrier/barrier_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "barrier.h"
+
+int main(void)
+{
+ return barrier_main();
+}
diff --git a/test/validation/classification/classification.h b/test/validation/classification/classification.h
index a186339ff..5508af7ca 100644
--- a/test/validation/classification/classification.h
+++ b/test/validation/classification/classification.h
@@ -19,6 +19,8 @@
#define CLS_DEFAULT_DADDR "10.0.0.100/32"
#define CLS_DEFAULT_SPORT 1024
#define CLS_DEFAULT_DPORT 2048
+#define CLS_DEFAULT_DMAC 0x010203040506
+#define CLS_DEFAULT_SMAC 0x060504030201
/* Config values for Error CoS */
#define TEST_ERROR 1
@@ -29,18 +31,18 @@
#define CLS_PMR_CHAIN_SRC 2
#define CLS_PMR_CHAIN_DST 3
#define CLS_PMR_CHAIN_SADDR "10.0.0.5/32"
-#define CLS_PMR_CHAIN_SPORT 3000
+#define CLS_PMR_CHAIN_PORT 3000
/* Config values for PMR */
#define TEST_PMR 1
#define CLS_PMR 4
-#define CLS_PMR_SPORT 4000
+#define CLS_PMR_PORT 4000
/* Config values for PMR SET */
#define TEST_PMR_SET 1
#define CLS_PMR_SET 5
#define CLS_PMR_SET_SADDR "10.0.0.6/32"
-#define CLS_PMR_SET_SPORT 5000
+#define CLS_PMR_SET_PORT 5000
/* Config values for CoS L2 Priority */
#define TEST_L2_QOS 1
@@ -76,6 +78,8 @@ void classification_test_pmr_term_tcp_sport(void);
void classification_test_pmr_term_udp_dport(void);
void classification_test_pmr_term_udp_sport(void);
void classification_test_pmr_term_ipproto(void);
+void classification_test_pmr_term_dmac(void);
+void classification_test_pmr_term_packet_len(void);
/* test arrays: */
extern odp_testinfo_t classification_suite_basic[];
diff --git a/test/validation/classification/odp_classification_basic.c b/test/validation/classification/odp_classification_basic.c
index f0b7a4243..81077b609 100644
--- a/test/validation/classification/odp_classification_basic.c
+++ b/test/validation/classification/odp_classification_basic.c
@@ -78,7 +78,7 @@ void classification_test_create_pmr_match(void)
val = 1024;
mask = 0xffff;
- match.term = ODP_PMR_TCP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
@@ -99,7 +99,7 @@ void classification_test_destroy_pmr(void)
val = 1024;
mask = 0xffff;
- match.term = ODP_PMR_TCP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
diff --git a/test/validation/classification/odp_classification_common.c b/test/validation/classification/odp_classification_common.c
index afcea4546..54ce5949d 100644
--- a/test/validation/classification/odp_classification_common.c
+++ b/test/validation/classification/odp_classification_common.c
@@ -13,8 +13,8 @@
#include <odp/helper/tcp.h>
typedef struct cls_test_packet {
- uint32be_t magic;
- uint32be_t seq;
+ odp_u32be_t magic;
+ odp_u32be_t seq;
} cls_test_packet_t;
int destroy_inq(odp_pktio_t pktio)
@@ -161,17 +161,14 @@ odp_queue_t queue_create(const char *queuename, bool sched)
if (sched) {
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue = odp_queue_create(queuename, &qparam);
} else {
- queue = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_POLL,
- NULL);
+ queue = odp_queue_create(queuename, NULL);
}
return queue;
@@ -193,21 +190,35 @@ odp_pool_t pool_create(const char *poolname)
odp_packet_t create_packet(odp_pool_t pool, bool vlan,
odp_atomic_u32_t *seq, bool flag_udp)
{
+ return create_packet_len(pool, vlan, seq, flag_udp, 0);
+}
+
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len)
+{
uint32_t seqno;
odph_ethhdr_t *ethhdr;
odph_udphdr_t *udp;
odph_tcphdr_t *tcp;
odph_ipv4hdr_t *ip;
- uint8_t payload_len;
- char src_mac[ODPH_ETHADDR_LEN] = {0};
- char dst_mac[ODPH_ETHADDR_LEN] = {0};
+ uint16_t payload_len;
+ uint64_t src_mac = CLS_DEFAULT_SMAC;
+ uint64_t dst_mac = CLS_DEFAULT_DMAC;
+ uint64_t dst_mac_be;
uint32_t addr = 0;
uint32_t mask;
int offset;
odp_packet_t pkt;
int packet_len = 0;
- payload_len = sizeof(cls_test_packet_t);
+ /* 48 bit ethernet address needs to be left shifted for proper
+ value after changing to be*/
+ dst_mac_be = odp_cpu_to_be_64(dst_mac);
+ if (dst_mac != dst_mac_be)
+ dst_mac_be = dst_mac_be >> (64 - 8 * ODPH_ETHADDR_LEN);
+
+ payload_len = sizeof(cls_test_packet_t) + len;
packet_len += ODPH_ETHHDR_LEN;
packet_len += ODPH_IPV4HDR_LEN;
if (flag_udp)
@@ -226,8 +237,8 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
offset = 0;
odp_packet_l2_offset_set(pkt, offset);
ethhdr = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
- memcpy(ethhdr->src.addr, src_mac, ODPH_ETHADDR_LEN);
- memcpy(ethhdr->dst.addr, dst_mac, ODPH_ETHADDR_LEN);
+ memcpy(ethhdr->src.addr, &src_mac, ODPH_ETHADDR_LEN);
+ memcpy(ethhdr->dst.addr, &dst_mac_be, ODPH_ETHADDR_LEN);
offset += sizeof(odph_ethhdr_t);
if (vlan) {
/* Default vlan header */
@@ -240,7 +251,7 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
vlan->tpid = odp_cpu_to_be_16(ODPH_ETHTYPE_VLAN);
offset += sizeof(odph_vlanhdr_t);
parseptr += sizeof(odph_vlanhdr_t);
- uint16be_t *type = (uint16be_t *)(void *)parseptr;
+ odp_u16be_t *type = (odp_u16be_t *)(void *)parseptr;
*type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
} else {
ethhdr->type = odp_cpu_to_be_16(ODPH_ETHTYPE_IPV4);
@@ -251,10 +262,10 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
/* ipv4 */
ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
- parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
+ parse_ipv4_string(CLS_DEFAULT_DADDR, &addr, &mask);
ip->dst_addr = odp_cpu_to_be_32(addr);
- parse_ipv4_string(CLS_DEFAULT_DADDR, &addr, &mask);
+ parse_ipv4_string(CLS_DEFAULT_SADDR, &addr, &mask);
ip->src_addr = odp_cpu_to_be_32(addr);
ip->ver_ihl = ODPH_IPV4 << 4 | ODPH_IPV4HDR_IHL_MIN;
if (flag_udp)
@@ -299,3 +310,55 @@ odp_packet_t create_packet(odp_pool_t pool, bool vlan,
return pkt;
}
+
+odp_pmr_term_t find_first_supported_l3_pmr(void)
+{
+ unsigned long long cap;
+ odp_pmr_term_t term = ODP_PMR_TCP_DPORT;
+
+ /* choose supported PMR */
+ cap = odp_pmr_terms_cap();
+ if (cap & (1 << ODP_PMR_UDP_SPORT))
+ term = ODP_PMR_UDP_SPORT;
+ else if (cap & (1 << ODP_PMR_UDP_DPORT))
+ term = ODP_PMR_UDP_DPORT;
+ else if (cap & (1 << ODP_PMR_TCP_SPORT))
+ term = ODP_PMR_TCP_SPORT;
+ else if (cap & (1 << ODP_PMR_TCP_DPORT))
+ term = ODP_PMR_TCP_DPORT;
+ else
+ CU_FAIL("Implementations doesn't support any TCP/UDP PMR");
+
+ return term;
+}
+
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port)
+{
+ odph_udphdr_t *udp;
+ odph_tcphdr_t *tcp;
+ odp_pmr_term_t term;
+
+ udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
+ port = odp_cpu_to_be_16(port);
+ term = find_first_supported_l3_pmr();
+ switch (term) {
+ case ODP_PMR_UDP_SPORT:
+ udp->src_port = port;
+ break;
+ case ODP_PMR_UDP_DPORT:
+ udp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_DPORT:
+ tcp->dst_port = port;
+ break;
+ case ODP_PMR_TCP_SPORT:
+ tcp->src_port = port;
+ break;
+ default:
+ CU_FAIL("Unsupported L3 term");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/test/validation/classification/odp_classification_test_pmr.c b/test/validation/classification/odp_classification_test_pmr.c
index 53b96204e..5f516a7fc 100644
--- a/test/validation/classification/odp_classification_test_pmr.c
+++ b/test/validation/classification/odp_classification_test_pmr.c
@@ -39,8 +39,9 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type)
return ODP_PKTIO_INVALID;
odp_pktio_param_init(&pktio_param);
- if (q_type == ODP_QUEUE_TYPE_POLL)
- pktio_param.in_mode = ODP_PKTIN_MODE_POLL;
+
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
+ pktio_param.in_mode = ODP_PKTIN_MODE_QUEUE;
else
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
@@ -55,13 +56,14 @@ odp_pktio_t create_pktio(odp_queue_type_t q_type)
return pktio;
}
-int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
+int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype ODP_UNUSED)
{
odp_queue_param_t qparam;
odp_queue_t inq_def;
char inq_name[ODP_QUEUE_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -70,10 +72,7 @@ int create_default_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
odp_pktio_to_u64(pktio));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(
- inq_name,
- ODP_QUEUE_TYPE_PKTIN,
- qtype == ODP_QUEUE_TYPE_POLL ? NULL : &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
CU_ASSERT_FATAL(inq_def != ODP_QUEUE_INVALID);
@@ -156,6 +155,7 @@ void classification_test_pmr_term_tcp_dport(void)
odp_pool_t pool;
odp_pool_t pool_recv;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_DPORT;
mask = 0xffff;
@@ -166,6 +166,9 @@ void classification_test_pmr_term_tcp_dport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_TCP_DPORT;
match.val = &val;
match.mask = &mask;
@@ -193,12 +196,13 @@ void classification_test_pmr_term_tcp_dport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
@@ -219,6 +223,9 @@ void classification_test_pmr_term_tcp_dport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
@@ -265,6 +272,7 @@ void classification_test_pmr_term_tcp_sport(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_SPORT;
mask = 0xffff;
@@ -275,6 +283,9 @@ void classification_test_pmr_term_tcp_sport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_TCP_SPORT;
match.val = &val;
match.mask = &mask;
@@ -301,12 +312,13 @@ void classification_test_pmr_term_tcp_sport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, false);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
@@ -325,6 +337,9 @@ void classification_test_pmr_term_tcp_sport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
tcp = (odph_tcphdr_t *)odp_packet_l4_ptr(pkt, NULL);
tcp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
@@ -371,6 +386,7 @@ void classification_test_pmr_term_udp_dport(void)
char cosname[ODP_COS_NAME_LEN];
odp_pmr_match_t match;
odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_DPORT;
mask = 0xffff;
@@ -381,6 +397,9 @@ void classification_test_pmr_term_udp_dport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_UDP_DPORT;
match.val = &val;
match.mask = &mask;
@@ -407,12 +426,13 @@ void classification_test_pmr_term_udp_dport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT);
@@ -432,6 +452,9 @@ void classification_test_pmr_term_udp_dport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->dst_port = odp_cpu_to_be_16(CLS_DEFAULT_DPORT + 1);
@@ -478,6 +501,7 @@ void classification_test_pmr_term_udp_sport(void)
char cosname[ODP_COS_NAME_LEN];
odp_pmr_match_t match;
odp_cls_cos_param_t cls_param;
+ odph_ethhdr_t *eth;
val = CLS_DEFAULT_SPORT;
mask = 0xffff;
@@ -488,6 +512,9 @@ void classification_test_pmr_term_udp_sport(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_UDP_SPORT;
match.val = &val;
match.mask = &mask;
@@ -514,12 +541,13 @@ void classification_test_pmr_term_udp_sport(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT);
@@ -538,6 +566,9 @@ void classification_test_pmr_term_udp_sport(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
udp->src_port = odp_cpu_to_be_16(CLS_DEFAULT_SPORT + 1);
@@ -583,6 +614,7 @@ void classification_test_pmr_term_ipproto(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -593,6 +625,9 @@ void classification_test_pmr_term_ipproto(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -619,8 +654,114 @@ void classification_test_pmr_term_ipproto(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
+ pkt = create_packet(pkt_pool, false, &seq, true);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
+void classification_test_pmr_term_dmac(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint64_t val;
+ uint64_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
+
+ val = CLS_DEFAULT_DMAC; /* 48 bit Ethernet Mac address */
+ mask = 0xffffffffffff;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
configure_default_cos(pktio, &default_cos,
&default_queue, &default_pool);
+
+ match.term = ODP_PMR_DMAC;
+ match.val = &val;
+ match.mask = &mask;
+ match.val_sz = ODPH_ETHADDR_LEN;
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("dmac", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("dmac");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "dmac");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
@@ -638,6 +779,8 @@ void classification_test_pmr_term_ipproto(void)
/* Other packets delivered to default queue */
pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ memset(eth->dst.addr, 0, ODPH_ETHADDR_LEN);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
@@ -663,6 +806,116 @@ void classification_test_pmr_term_ipproto(void)
odp_pktio_close(pktio);
}
+void classification_test_pmr_term_packet_len(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ uint16_t val;
+ uint16_t mask;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_cos_t default_cos;
+ odp_pool_t default_pool;
+ odp_pool_t pool;
+ odp_pool_t recvpool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ char cosname[ODP_COS_NAME_LEN];
+ odp_cls_cos_param_t cls_param;
+ odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
+
+ val = 1024;
+ /*Mask value will match any packet of length 1000 - 1099*/
+ mask = 0xff00;
+ seqno = 0;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ match.term = ODP_PMR_LEN;
+ match.val = &val;
+ match.mask = &mask;
+ match.val_sz = sizeof(val);
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("packet_len", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("packet_len");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "packet_len");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
+ /* create packet of payload length 1024 */
+ pkt = create_packet_len(pkt_pool, false, &seq, true, 1024);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == pool);
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ recvpool = odp_packet_pool(pkt);
+ CU_ASSERT(recvpool == default_pool);
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
static void classification_test_pmr_pool_set(void)
{
odp_packet_t pkt;
@@ -684,6 +937,7 @@ static void classification_test_pmr_pool_set(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -694,6 +948,9 @@ static void classification_test_pmr_pool_set(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -727,12 +984,13 @@ static void classification_test_pmr_pool_set(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
enqueue_pktio_interface(pkt, pktio);
@@ -777,6 +1035,7 @@ static void classification_test_pmr_queue_set(void)
char cosname[ODP_COS_NAME_LEN];
odp_cls_cos_param_t cls_param;
odp_pmr_match_t match;
+ odph_ethhdr_t *eth;
val = ODPH_IPPROTO_UDP;
mask = 0xff;
@@ -787,6 +1046,9 @@ static void classification_test_pmr_queue_set(void)
retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
CU_ASSERT(retval == 0);
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
match.term = ODP_PMR_IPPROTO;
match.val = &val;
match.mask = &mask;
@@ -820,12 +1082,13 @@ static void classification_test_pmr_queue_set(void)
retval = odp_pktio_pmr_cos(pmr, pktio, cos);
CU_ASSERT(retval == 0);
- configure_default_cos(pktio, &default_cos,
- &default_queue, &default_pool);
pkt = create_packet(pkt_pool, false, &seq, true);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
enqueue_pktio_interface(pkt, pktio);
@@ -849,13 +1112,121 @@ static void classification_test_pmr_queue_set(void)
odp_pktio_close(pktio);
}
+static void classification_test_pmr_term_daddr(void)
+{
+ odp_packet_t pkt;
+ uint32_t seqno;
+ int retval;
+ odp_pktio_t pktio;
+ odp_queue_t queue;
+ odp_queue_t retqueue;
+ odp_queue_t default_queue;
+ odp_pool_t pool;
+ odp_pool_t default_pool;
+ odp_pmr_t pmr;
+ odp_cos_t cos;
+ odp_cos_t default_cos;
+ uint32_t addr;
+ uint32_t mask;
+ char cosname[ODP_QUEUE_NAME_LEN];
+ odp_pmr_match_t match;
+ odp_cls_cos_param_t cls_param;
+ odph_ipv4hdr_t *ip;
+ const char *dst_addr = "10.0.0.99/32";
+ odph_ethhdr_t *eth;
+
+ pktio = create_pktio(ODP_QUEUE_TYPE_SCHED);
+ retval = create_default_inq(pktio, ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(retval == 0);
+
+ configure_default_cos(pktio, &default_cos,
+ &default_queue, &default_pool);
+
+ parse_ipv4_string(dst_addr, &addr, &mask);
+ match.term = ODP_PMR_DIP_ADDR;
+ match.val = &addr;
+ match.mask = &mask;
+ match.val_sz = sizeof(addr);
+
+ pmr = odp_pmr_create(&match);
+ CU_ASSERT_FATAL(pmr != ODP_PMR_INVAL);
+
+ queue = queue_create("daddr", true);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+
+ pool = pool_create("daddr");
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ sprintf(cosname, "daddr");
+ odp_cls_cos_param_init(&cls_param);
+ cls_param.pool = pool;
+ cls_param.queue = queue;
+ cls_param.drop_policy = ODP_COS_DROP_POOL;
+
+ cos = odp_cls_cos_create(cosname, &cls_param);
+ CU_ASSERT_FATAL(cos != ODP_COS_INVALID);
+
+ retval = odp_pktio_pmr_cos(pmr, pktio, cos);
+ CU_ASSERT(retval == 0);
+
+ /* packet with dst ip address matching PMR rule to be
+ received in the CoS queue*/
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+ ip = (odph_ipv4hdr_t *)odp_packet_l3_ptr(pkt, NULL);
+ ip->dst_addr = odp_cpu_to_be_32(addr);
+ ip->chksum = odph_ipv4_csum_update(pkt);
+
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == queue);
+ odp_packet_free(pkt);
+
+ /* Other packets delivered to default queue */
+ pkt = create_packet(pkt_pool, false, &seq, false);
+ seqno = cls_pkt_get_seq(pkt);
+ CU_ASSERT(seqno != TEST_SEQ_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ odp_pktio_mac_addr(pktio, eth->src.addr, ODPH_ETHADDR_LEN);
+ odp_pktio_mac_addr(pktio, eth->dst.addr, ODPH_ETHADDR_LEN);
+
+ enqueue_pktio_interface(pkt, pktio);
+
+ pkt = receive_packet(&retqueue, ODP_TIME_SEC_IN_NS);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ CU_ASSERT(seqno == cls_pkt_get_seq(pkt));
+ CU_ASSERT(retqueue == default_queue);
+
+ odp_cos_destroy(cos);
+ odp_cos_destroy(default_cos);
+ odp_pmr_destroy(pmr);
+ odp_packet_free(pkt);
+ destroy_inq(pktio);
+ odp_pool_destroy(default_pool);
+ odp_pool_destroy(pool);
+ odp_queue_destroy(queue);
+ odp_queue_destroy(default_queue);
+ odp_pktio_close(pktio);
+}
+
odp_testinfo_t classification_suite_pmr[] = {
ODP_TEST_INFO(classification_test_pmr_term_tcp_dport),
ODP_TEST_INFO(classification_test_pmr_term_tcp_sport),
ODP_TEST_INFO(classification_test_pmr_term_udp_dport),
ODP_TEST_INFO(classification_test_pmr_term_udp_sport),
ODP_TEST_INFO(classification_test_pmr_term_ipproto),
+ ODP_TEST_INFO(classification_test_pmr_term_dmac),
ODP_TEST_INFO(classification_test_pmr_pool_set),
ODP_TEST_INFO(classification_test_pmr_queue_set),
+ ODP_TEST_INFO(classification_test_pmr_term_daddr),
+ ODP_TEST_INFO(classification_test_pmr_term_packet_len),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/classification/odp_classification_tests.c b/test/validation/classification/odp_classification_tests.c
index e11c3d8b6..a19425987 100644
--- a/test/validation/classification/odp_classification_tests.c
+++ b/test/validation/classification/odp_classification_tests.c
@@ -10,6 +10,7 @@
#include <odp/helper/eth.h>
#include <odp/helper/ip.h>
#include <odp/helper/udp.h>
+#include <odp/helper/tcp.h>
static odp_cos_t cos_list[CLS_ENTRIES];
static odp_pmr_t pmr_list[CLS_ENTRIES];
@@ -50,13 +51,13 @@ int classification_suite_init(void)
}
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "inq_loop");
- inq_def = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_PKTIN, &qparam);
+ inq_def = odp_queue_create(queuename, &qparam);
odp_pktio_inq_setdef(pktio_loop, inq_def);
for (i = 0; i < CLS_ENTRIES; i++)
@@ -139,15 +140,14 @@ void configure_cls_pmr_chain(void)
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
qparam.sched.lock_count = ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE;
sprintf(queuename, "%s", "SrcQueue");
- queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_CHAIN_SRC] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_SRC] != ODP_QUEUE_INVALID);
@@ -165,14 +165,13 @@ void configure_cls_pmr_chain(void)
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_NORMAL;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DstQueue");
- queue_list[CLS_PMR_CHAIN_DST] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_CHAIN_DST] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_CHAIN_DST] != ODP_QUEUE_INVALID);
sprintf(poolname, "%s", "DstPool");
@@ -195,9 +194,9 @@ void configure_cls_pmr_chain(void)
pmr_list[CLS_PMR_CHAIN_SRC] = odp_pmr_create(&match);
CU_ASSERT_FATAL(pmr_list[CLS_PMR_CHAIN_SRC] != ODP_PMR_INVAL);
- val = CLS_PMR_CHAIN_SPORT;
+ val = CLS_PMR_CHAIN_PORT;
maskport = 0xffff;
- match.term = ODP_PMR_UDP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &maskport;
match.val_sz = sizeof(val);
@@ -218,7 +217,6 @@ void test_cls_pmr_chain(void)
{
odp_packet_t pkt;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
odp_queue_t queue;
odp_pool_t pool;
uint32_t addr = 0;
@@ -236,8 +234,7 @@ void test_cls_pmr_chain(void)
ip->chksum = 0;
ip->chksum = odph_ipv4_csum_update(pkt);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_CHAIN_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_CHAIN_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
@@ -280,12 +277,12 @@ void configure_pktio_default_cos(void)
char poolname[ODP_POOL_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "DefaultQueue");
- queue_list[CLS_DEFAULT] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED, &qparam);
+ queue_list[CLS_DEFAULT] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_DEFAULT] != ODP_QUEUE_INVALID);
sprintf(poolname, "DefaultPool");
@@ -339,14 +336,13 @@ void configure_pktio_error_cos(void)
char poolname[ODP_POOL_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "ErrorCos");
- queue_list[CLS_ERROR] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_ERROR] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_ERROR] != ODP_QUEUE_INVALID);
sprintf(poolname, "ErrorPool");
@@ -439,13 +435,13 @@ void configure_cos_with_l2_priority(void)
qos_tbl[i] = 0;
odp_queue_param_init(&qparam);
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < num_qos; i++) {
qparam.sched.prio = ODP_SCHED_PRIO_LOWEST - i;
sprintf(queuename, "%s_%d", "L2_Queue", i);
- queue_tbl[i] = odp_queue_create(queuename, ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_tbl[i] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_tbl[i] != ODP_QUEUE_INVALID);
queue_list[CLS_L2_QOS_0 + i] = queue_tbl[i];
@@ -513,9 +509,9 @@ void configure_pmr_cos(void)
char queuename[ODP_QUEUE_NAME_LEN];
char poolname[ODP_POOL_NAME_LEN];
- val = CLS_PMR_SPORT;
+ val = CLS_PMR_PORT;
mask = 0xffff;
- match.term = ODP_PMR_UDP_SPORT;
+ match.term = find_first_supported_l3_pmr();
match.val = &val;
match.mask = &mask;
match.val_sz = sizeof(val);
@@ -524,14 +520,13 @@ void configure_pmr_cos(void)
CU_ASSERT_FATAL(pmr_list[CLS_PMR] != ODP_PMR_INVAL);
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "PMR_CoS");
- queue_list[CLS_PMR] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR] != ODP_QUEUE_INVALID);
sprintf(poolname, "PMR_Pool");
@@ -554,7 +549,6 @@ void configure_pmr_cos(void)
void test_pmr_cos(void)
{
odp_packet_t pkt;
- odph_udphdr_t *udp;
odp_queue_t queue;
odp_pool_t pool;
uint32_t seqno = 0;
@@ -563,8 +557,7 @@ void test_pmr_cos(void)
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
seqno = cls_pkt_get_seq(pkt);
CU_ASSERT(seqno != TEST_SEQ_INVALID);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
@@ -597,9 +590,9 @@ void configure_pktio_pmr_match_set_cos(void)
pmr_terms[0].val_sz = sizeof(addr);
- val = CLS_PMR_SET_SPORT;
+ val = CLS_PMR_SET_PORT;
maskport = 0xffff;
- pmr_terms[1].term = ODP_PMR_UDP_SPORT;
+ pmr_terms[1].term = find_first_supported_l3_pmr();
pmr_terms[1].val = &val;
pmr_terms[1].mask = &maskport;
pmr_terms[1].val_sz = sizeof(val);
@@ -608,14 +601,13 @@ void configure_pktio_pmr_match_set_cos(void)
CU_ASSERT(retval > 0);
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_SCHED;
qparam.sched.prio = ODP_SCHED_PRIO_HIGHEST;
- qparam.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparam.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
sprintf(queuename, "%s", "cos_pmr_set_queue");
- queue_list[CLS_PMR_SET] = odp_queue_create(queuename,
- ODP_QUEUE_TYPE_SCHED,
- &qparam);
+ queue_list[CLS_PMR_SET] = odp_queue_create(queuename, &qparam);
CU_ASSERT_FATAL(queue_list[CLS_PMR_SET] != ODP_QUEUE_INVALID);
sprintf(poolname, "cos_pmr_set_pool");
@@ -640,7 +632,6 @@ void test_pktio_pmr_match_set_cos(void)
uint32_t addr = 0;
uint32_t mask;
odph_ipv4hdr_t *ip;
- odph_udphdr_t *udp;
odp_packet_t pkt;
odp_pool_t pool;
odp_queue_t queue;
@@ -657,8 +648,7 @@ void test_pktio_pmr_match_set_cos(void)
ip->chksum = 0;
ip->chksum = odph_ipv4_csum_update(pkt);
- udp = (odph_udphdr_t *)odp_packet_l4_ptr(pkt, NULL);
- udp->src_port = odp_cpu_to_be_16(CLS_PMR_SET_SPORT);
+ set_first_supported_pmr_port(pkt, CLS_PMR_SET_PORT);
enqueue_pktio_interface(pkt, pktio_loop);
pkt = receive_packet(&queue, ODP_TIME_SEC_IN_NS);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
diff --git a/test/validation/classification/odp_classification_testsuites.h b/test/validation/classification/odp_classification_testsuites.h
index 02828e125..19d5ae2f3 100644
--- a/test/validation/classification/odp_classification_testsuites.h
+++ b/test/validation/classification/odp_classification_testsuites.h
@@ -22,6 +22,9 @@ int classification_suite_pmr_init(void);
odp_packet_t create_packet(odp_pool_t pool, bool vlan,
odp_atomic_u32_t *seq, bool udp);
+odp_packet_t create_packet_len(odp_pool_t pool, bool vlan,
+ odp_atomic_u32_t *seq, bool flag_udp,
+ uint16_t len);
int cls_pkt_set_seq(odp_packet_t pkt);
uint32_t cls_pkt_get_seq(odp_packet_t pkt);
odp_pktio_t create_pktio(odp_queue_type_t q_type);
@@ -46,5 +49,7 @@ void test_pmr_cos(void);
void configure_pktio_pmr_match_set_cos(void);
void test_pktio_pmr_match_set_cos(void);
int destroy_inq(odp_pktio_t pktio);
+odp_pmr_term_t find_first_supported_l3_pmr(void);
+int set_first_supported_pmr_port(odp_packet_t pkt, uint16_t port);
#endif /* ODP_BUFFER_TESTSUITES_H_ */
diff --git a/test/validation/common/odp_cunit_common.h b/test/validation/common/odp_cunit_common.h
index 37e8e8c7e..bf7a7f3a9 100644
--- a/test/validation/common/odp_cunit_common.h
+++ b/test/validation/common/odp_cunit_common.h
@@ -47,9 +47,12 @@ static inline void odp_cunit_test_missing(void) { }
#define ODP_TEST_INFO_INACTIVE(test_func, args...) \
{#test_func, odp_cunit_test_missing, odp_cunit_test_inactive}
+#define ODP_TEST_INACTIVE 0
+#define ODP_TEST_ACTIVE 1
+
/* A test case that may be marked as inactive at runtime based on the
- * return value of the cond_func function. A return value of 0 means
- * inactive, anything else is active. */
+ * return value of the cond_func function. A return value of ODP_TEST_INACTIVE
+ * means inactive, ODP_TEST_ACTIVE means active. */
#define ODP_TEST_INFO_CONDITIONAL(test_func, cond_func) \
{#test_func, test_func, cond_func}
diff --git a/test/validation/crypto/crypto.c b/test/validation/crypto/crypto.c
index 1234f783f..b2d8f459d 100644
--- a/test/validation/crypto/crypto.c
+++ b/test/validation/crypto/crypto.c
@@ -51,8 +51,7 @@ int crypto_init(void)
fprintf(stderr, "Packet pool creation failed.\n");
return -1;
}
- out_queue = odp_queue_create("crypto-out",
- ODP_QUEUE_TYPE_POLL, NULL);
+ out_queue = odp_queue_create("crypto-out", NULL);
if (ODP_QUEUE_INVALID == out_queue) {
fprintf(stderr, "Crypto outq creation failed.\n");
return -1;
diff --git a/test/validation/init/init.c b/test/validation/init/init.c
index a8a564063..62bd75cc6 100644
--- a/test/validation/init/init.c
+++ b/test/validation/init/init.c
@@ -18,7 +18,7 @@ static void odp_init_abort(void) ODP_NORETURN;
/* replacement log function: */
ODP_PRINTF_FORMAT(2, 3)
-static int odp_init_log(odp_log_level_e level, const char *fmt, ...);
+static int odp_init_log(odp_log_level_t level, const char *fmt, ...);
/* test ODP global init, with alternate abort function */
void init_test_odp_init_global_replace_abort(void)
@@ -98,7 +98,7 @@ odp_suiteinfo_t init_suites_log[] = {
ODP_SUITE_INFO_NULL,
};
-static int odp_init_log(odp_log_level_e level __attribute__((unused)),
+static int odp_init_log(odp_log_level_t level __attribute__((unused)),
const char *fmt, ...)
{
va_list args;
diff --git a/test/validation/lock/.gitignore b/test/validation/lock/.gitignore
new file mode 100644
index 000000000..ff16646f4
--- /dev/null
+++ b/test/validation/lock/.gitignore
@@ -0,0 +1 @@
+lock_main
diff --git a/test/validation/lock/Makefile.am b/test/validation/lock/Makefile.am
new file mode 100644
index 000000000..29993df44
--- /dev/null
+++ b/test/validation/lock/Makefile.am
@@ -0,0 +1,10 @@
+include ../Makefile.inc
+
+noinst_LTLIBRARIES = libtestlock.la
+libtestlock_la_SOURCES = lock.c
+
+test_PROGRAMS = lock_main$(EXEEXT)
+dist_lock_main_SOURCES = lock_main.c
+lock_main_LDADD = libtestlock.la $(LIBCUNIT_COMMON) $(LIBODP)
+
+EXTRA_DIST = lock.h
diff --git a/test/validation/synchronizers/synchronizers.c b/test/validation/lock/lock.c
index 0302069b8..0f4415dba 100644
--- a/test/validation/synchronizers/synchronizers.c
+++ b/test/validation/lock/lock.c
@@ -9,35 +9,21 @@
#include <CUnit/Basic.h>
#include <odp_cunit_common.h>
#include <unistd.h>
-#include "synchronizers.h"
+#include "lock.h"
#define VERBOSE 0
#define MAX_ITERATIONS 1000
-#define BARRIER_ITERATIONS 64
#define SLOW_BARRIER_DELAY 400
#define BASE_DELAY 6
#define MIN_DELAY 1
-#define NUM_TEST_BARRIERS BARRIER_ITERATIONS
#define NUM_RESYNC_BARRIERS 100
-#define ADD_SUB_CNT 5
-
-#define CNT 10
-#define BARRIER_DELAY 10
-#define U32_INIT_VAL (1UL << 10)
-#define U64_INIT_VAL (1ULL << 33)
-
#define GLOBAL_SHM_NAME "GlobalLockTest"
#define UNUSED __attribute__((__unused__))
-static odp_atomic_u32_t a32u;
-static odp_atomic_u64_t a64u;
-
-static volatile int temp_result;
-
typedef __volatile uint32_t volatile_u32_t;
typedef __volatile uint64_t volatile_u64_t;
@@ -52,9 +38,6 @@ typedef struct {
uint32_t g_verbose;
uint32_t g_max_num_cores;
- odp_barrier_t test_barriers[NUM_TEST_BARRIERS];
- custom_barrier_t custom_barrier1[NUM_TEST_BARRIERS];
- custom_barrier_t custom_barrier2[NUM_TEST_BARRIERS];
volatile_u32_t slow_thread_num;
volatile_u32_t barrier_cnt1;
volatile_u32_t barrier_cnt2;
@@ -143,145 +126,6 @@ static void thread_finalize(per_thread_mem_t *per_thread_mem)
free(per_thread_mem);
}
-static void custom_barrier_init(custom_barrier_t *custom_barrier,
- uint32_t num_threads)
-{
- odp_atomic_init_u32(&custom_barrier->wait_cnt, num_threads);
-}
-
-static void custom_barrier_wait(custom_barrier_t *custom_barrier)
-{
- volatile_u64_t counter = 1;
- uint32_t delay_cnt, wait_cnt;
-
- odp_atomic_sub_u32(&custom_barrier->wait_cnt, 1);
-
- wait_cnt = 1;
- while (wait_cnt != 0) {
- for (delay_cnt = 1; delay_cnt <= BARRIER_DELAY; delay_cnt++)
- counter++;
-
- wait_cnt = odp_atomic_load_u32(&custom_barrier->wait_cnt);
- }
-}
-
-static uint32_t barrier_test(per_thread_mem_t *per_thread_mem,
- odp_bool_t no_barrier_test)
-{
- global_shared_mem_t *global_mem;
- uint32_t barrier_errs, iterations, cnt, i_am_slow_thread;
- uint32_t thread_num, slow_thread_num, next_slow_thread, num_threads;
- uint32_t lock_owner_delay, barrier_cnt1, barrier_cnt2;
-
- thread_num = odp_thread_id();
- global_mem = per_thread_mem->global_mem;
- num_threads = global_mem->g_num_threads;
- iterations = BARRIER_ITERATIONS;
-
- barrier_errs = 0;
- lock_owner_delay = SLOW_BARRIER_DELAY;
-
- for (cnt = 1; cnt < iterations; cnt++) {
- /* Wait here until all of the threads reach this point */
- custom_barrier_wait(&global_mem->custom_barrier1[cnt]);
-
- barrier_cnt1 = global_mem->barrier_cnt1;
- barrier_cnt2 = global_mem->barrier_cnt2;
-
- if ((barrier_cnt1 != cnt) || (barrier_cnt2 != cnt)) {
- printf("thread_num=%" PRIu32 " barrier_cnts of %" PRIu32
- " %" PRIu32 " cnt=%" PRIu32 "\n",
- thread_num, barrier_cnt1, barrier_cnt2, cnt);
- barrier_errs++;
- }
-
- /* Wait here until all of the threads reach this point */
- custom_barrier_wait(&global_mem->custom_barrier2[cnt]);
-
- slow_thread_num = global_mem->slow_thread_num;
- i_am_slow_thread = thread_num == slow_thread_num;
- next_slow_thread = slow_thread_num + 1;
- if (num_threads < next_slow_thread)
- next_slow_thread = 1;
-
- /*
- * Now run the test, which involves having all but one thread
- * immediately calling odp_barrier_wait(), and one thread wait a
- * moderate amount of time and then calling odp_barrier_wait().
- * The test fails if any of the first group of threads
- * has not waited for the "slow" thread. The "slow" thread is
- * responsible for re-initializing the barrier for next trial.
- */
- if (i_am_slow_thread) {
- thread_delay(per_thread_mem, lock_owner_delay);
- lock_owner_delay += BASE_DELAY;
- if ((global_mem->barrier_cnt1 != cnt) ||
- (global_mem->barrier_cnt2 != cnt) ||
- (global_mem->slow_thread_num
- != slow_thread_num))
- barrier_errs++;
- }
-
- if (no_barrier_test == 0)
- odp_barrier_wait(&global_mem->test_barriers[cnt]);
-
- global_mem->barrier_cnt1 = cnt + 1;
- odp_mb_full();
-
- if (i_am_slow_thread) {
- global_mem->slow_thread_num = next_slow_thread;
- global_mem->barrier_cnt2 = cnt + 1;
- odp_mb_full();
- } else {
- while (global_mem->barrier_cnt2 != (cnt + 1))
- thread_delay(per_thread_mem, BASE_DELAY);
- }
- }
-
- if ((global_mem->g_verbose) && (barrier_errs != 0))
- printf("\nThread %" PRIu32 " (id=%d core=%d) had %" PRIu32
- " barrier_errs in %" PRIu32 " iterations\n", thread_num,
- per_thread_mem->thread_id,
- per_thread_mem->thread_core, barrier_errs, iterations);
-
- return barrier_errs;
-}
-
-static void *no_barrier_functional_test(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
- uint32_t barrier_errs;
-
- per_thread_mem = thread_init();
- barrier_errs = barrier_test(per_thread_mem, 1);
-
- /*
- * Note that the following CU_ASSERT MAY appear incorrect, but for the
- * no_barrier test it should see barrier_errs or else there is something
- * wrong with the test methodology or the ODP thread implementation.
- * So this test PASSES only if it sees barrier_errs or a single
- * worker was used.
- */
- CU_ASSERT(barrier_errs != 0 || global_mem->g_num_threads == 1);
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *barrier_functional_test(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
- uint32_t barrier_errs;
-
- per_thread_mem = thread_init();
- barrier_errs = barrier_test(per_thread_mem, 0);
-
- CU_ASSERT(barrier_errs == 0);
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
static void spinlock_api_test(odp_spinlock_t *spinlock)
{
odp_spinlock_init(spinlock);
@@ -1055,269 +899,8 @@ static void *rwlock_recursive_functional_test(void *arg UNUSED)
return NULL;
}
-static void barrier_test_init(void)
-{
- uint32_t num_threads, idx;
-
- num_threads = global_mem->g_num_threads;
-
- for (idx = 0; idx < NUM_TEST_BARRIERS; idx++) {
- odp_barrier_init(&global_mem->test_barriers[idx], num_threads);
- custom_barrier_init(&global_mem->custom_barrier1[idx],
- num_threads);
- custom_barrier_init(&global_mem->custom_barrier2[idx],
- num_threads);
- }
-
- global_mem->slow_thread_num = 1;
- global_mem->barrier_cnt1 = 1;
- global_mem->barrier_cnt2 = 1;
-}
-
-static void test_atomic_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u32(&a32u);
-}
-
-static void test_atomic_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_inc_u64(&a64u);
-}
-
-static void test_atomic_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u32(&a32u);
-}
-
-static void test_atomic_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_dec_u64(&a64u);
-}
-
-static void test_atomic_fetch_inc_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u32(&a32u);
-}
-
-static void test_atomic_fetch_inc_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_inc_u64(&a64u);
-}
-
-static void test_atomic_fetch_dec_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u32(&a32u);
-}
-
-static void test_atomic_fetch_dec_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_dec_u64(&a64u);
-}
-
-static void test_atomic_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_add_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_add_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_32(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u32(&a32u, ADD_SUB_CNT);
-}
-
-static void test_atomic_fetch_sub_64(void)
-{
- int i;
-
- for (i = 0; i < CNT; i++)
- odp_atomic_fetch_sub_u64(&a64u, ADD_SUB_CNT);
-}
-
-static void test_atomic_inc_dec_32(void)
-{
- test_atomic_inc_32();
- test_atomic_dec_32();
-}
-
-static void test_atomic_inc_dec_64(void)
-{
- test_atomic_inc_64();
- test_atomic_dec_64();
-}
-
-static void test_atomic_fetch_inc_dec_32(void)
-{
- test_atomic_fetch_inc_32();
- test_atomic_fetch_dec_32();
-}
-
-static void test_atomic_fetch_inc_dec_64(void)
-{
- test_atomic_fetch_inc_64();
- test_atomic_fetch_dec_64();
-}
-
-static void test_atomic_add_sub_32(void)
-{
- test_atomic_add_32();
- test_atomic_sub_32();
-}
-
-static void test_atomic_add_sub_64(void)
-{
- test_atomic_add_64();
- test_atomic_sub_64();
-}
-
-static void test_atomic_fetch_add_sub_32(void)
-{
- test_atomic_fetch_add_32();
- test_atomic_fetch_sub_32();
-}
-
-static void test_atomic_fetch_add_sub_64(void)
-{
- test_atomic_fetch_add_64();
- test_atomic_fetch_sub_64();
-}
-
-static void test_atomic_init(void)
-{
- odp_atomic_init_u32(&a32u, 0);
- odp_atomic_init_u64(&a64u, 0);
-}
-
-static void test_atomic_store(void)
-{
- odp_atomic_store_u32(&a32u, U32_INIT_VAL);
- odp_atomic_store_u64(&a64u, U64_INIT_VAL);
-}
-
-static void test_atomic_validate(void)
-{
- CU_ASSERT(U32_INIT_VAL == odp_atomic_load_u32(&a32u));
- CU_ASSERT(U64_INIT_VAL == odp_atomic_load_u64(&a64u));
-}
-
-/* Barrier tests */
-void synchronizers_test_memory_barrier(void)
-{
- volatile int a = 0;
- volatile int b = 0;
- volatile int c = 0;
- volatile int d = 0;
-
- /* Call all memory barriers to verify that those are implemented */
- a = 1;
- odp_mb_release();
- b = 1;
- odp_mb_acquire();
- c = 1;
- odp_mb_full();
- d = 1;
-
- /* Avoid "variable set but not used" warning */
- temp_result = a + b + c + d;
-}
-
-void synchronizers_test_no_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(no_barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-void synchronizers_test_barrier_functional(void)
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- barrier_test_init();
- odp_cunit_thread_create(barrier_functional_test, &arg);
- odp_cunit_thread_exit(&arg);
-}
-
-odp_testinfo_t synchronizers_suite_barrier[] = {
- ODP_TEST_INFO(synchronizers_test_memory_barrier),
- ODP_TEST_INFO(synchronizers_test_no_barrier_functional),
- ODP_TEST_INFO(synchronizers_test_barrier_functional),
- ODP_TEST_INFO_NULL
-};
-
/* Thread-unsafe tests */
-void synchronizers_test_no_lock_functional(void)
+void lock_test_no_lock_functional(void)
{
pthrd_arg arg;
@@ -1326,13 +909,13 @@ void synchronizers_test_no_lock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_no_locking[] = {
- ODP_TEST_INFO(synchronizers_test_no_lock_functional),
+odp_testinfo_t lock_suite_no_locking[] = {
+ ODP_TEST_INFO(lock_test_no_lock_functional),
ODP_TEST_INFO_NULL
};
/* Spin lock tests */
-void synchronizers_test_spinlock_api(void)
+void lock_test_spinlock_api(void)
{
pthrd_arg arg;
@@ -1341,7 +924,7 @@ void synchronizers_test_spinlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_functional(void)
+void lock_test_spinlock_functional(void)
{
pthrd_arg arg;
@@ -1351,7 +934,7 @@ void synchronizers_test_spinlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_recursive_api(void)
+void lock_test_spinlock_recursive_api(void)
{
pthrd_arg arg;
@@ -1360,7 +943,7 @@ void synchronizers_test_spinlock_recursive_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_spinlock_recursive_functional(void)
+void lock_test_spinlock_recursive_functional(void)
{
pthrd_arg arg;
@@ -1370,20 +953,20 @@ void synchronizers_test_spinlock_recursive_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_spinlock[] = {
- ODP_TEST_INFO(synchronizers_test_spinlock_api),
- ODP_TEST_INFO(synchronizers_test_spinlock_functional),
+odp_testinfo_t lock_suite_spinlock[] = {
+ ODP_TEST_INFO(lock_test_spinlock_api),
+ ODP_TEST_INFO(lock_test_spinlock_functional),
ODP_TEST_INFO_NULL
};
-odp_testinfo_t synchronizers_suite_spinlock_recursive[] = {
- ODP_TEST_INFO(synchronizers_test_spinlock_recursive_api),
- ODP_TEST_INFO(synchronizers_test_spinlock_recursive_functional),
+odp_testinfo_t lock_suite_spinlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_spinlock_recursive_api),
+ ODP_TEST_INFO(lock_test_spinlock_recursive_functional),
ODP_TEST_INFO_NULL
};
/* Ticket lock tests */
-void synchronizers_test_ticketlock_api(void)
+void lock_test_ticketlock_api(void)
{
pthrd_arg arg;
@@ -1392,7 +975,7 @@ void synchronizers_test_ticketlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_ticketlock_functional(void)
+void lock_test_ticketlock_functional(void)
{
pthrd_arg arg;
@@ -1403,14 +986,14 @@ void synchronizers_test_ticketlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_ticketlock[] = {
- ODP_TEST_INFO(synchronizers_test_ticketlock_api),
- ODP_TEST_INFO(synchronizers_test_ticketlock_functional),
+odp_testinfo_t lock_suite_ticketlock[] = {
+ ODP_TEST_INFO(lock_test_ticketlock_api),
+ ODP_TEST_INFO(lock_test_ticketlock_functional),
ODP_TEST_INFO_NULL
};
/* RW lock tests */
-void synchronizers_test_rwlock_api(void)
+void lock_test_rwlock_api(void)
{
pthrd_arg arg;
@@ -1419,7 +1002,7 @@ void synchronizers_test_rwlock_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_rwlock_functional(void)
+void lock_test_rwlock_functional(void)
{
pthrd_arg arg;
@@ -1429,13 +1012,13 @@ void synchronizers_test_rwlock_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_rwlock[] = {
- ODP_TEST_INFO(synchronizers_test_rwlock_api),
- ODP_TEST_INFO(synchronizers_test_rwlock_functional),
+odp_testinfo_t lock_suite_rwlock[] = {
+ ODP_TEST_INFO(lock_test_rwlock_api),
+ ODP_TEST_INFO(lock_test_rwlock_functional),
ODP_TEST_INFO_NULL
};
-void synchronizers_test_rwlock_recursive_api(void)
+void lock_test_rwlock_recursive_api(void)
{
pthrd_arg arg;
@@ -1444,7 +1027,7 @@ void synchronizers_test_rwlock_recursive_api(void)
odp_cunit_thread_exit(&arg);
}
-void synchronizers_test_rwlock_recursive_functional(void)
+void lock_test_rwlock_recursive_functional(void)
{
pthrd_arg arg;
@@ -1454,13 +1037,13 @@ void synchronizers_test_rwlock_recursive_functional(void)
odp_cunit_thread_exit(&arg);
}
-odp_testinfo_t synchronizers_suite_rwlock_recursive[] = {
- ODP_TEST_INFO(synchronizers_test_rwlock_recursive_api),
- ODP_TEST_INFO(synchronizers_test_rwlock_recursive_functional),
+odp_testinfo_t lock_suite_rwlock_recursive[] = {
+ ODP_TEST_INFO(lock_test_rwlock_recursive_api),
+ ODP_TEST_INFO(lock_test_rwlock_recursive_functional),
ODP_TEST_INFO_NULL
};
-int synchronizers_suite_init(void)
+int lock_suite_init(void)
{
uint32_t num_threads, idx;
@@ -1472,7 +1055,7 @@ int synchronizers_suite_init(void)
return 0;
}
-int synchronizers_init(void)
+int lock_init(void)
{
uint32_t workers_count, max_threads;
int ret = 0;
@@ -1521,126 +1104,29 @@ int synchronizers_init(void)
return ret;
}
-/* Atomic tests */
-static void *test_atomic_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_inc_dec_32();
- test_atomic_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_add_sub_32();
- test_atomic_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_fetch_inc_dec_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_inc_dec_32();
- test_atomic_fetch_inc_dec_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void *test_atomic_fetch_add_sub_thread(void *arg UNUSED)
-{
- per_thread_mem_t *per_thread_mem;
-
- per_thread_mem = thread_init();
- test_atomic_fetch_add_sub_32();
- test_atomic_fetch_add_sub_64();
-
- thread_finalize(per_thread_mem);
-
- return NULL;
-}
-
-static void test_atomic_functional(void *func_ptr(void *))
-{
- pthrd_arg arg;
-
- arg.numthrds = global_mem->g_num_threads;
- test_atomic_init();
- test_atomic_store();
- odp_cunit_thread_create(func_ptr, &arg);
- odp_cunit_thread_exit(&arg);
- test_atomic_validate();
-}
-
-void synchronizers_test_atomic_inc_dec(void)
-{
- test_atomic_functional(test_atomic_inc_dec_thread);
-}
-
-void synchronizers_test_atomic_add_sub(void)
-{
- test_atomic_functional(test_atomic_add_sub_thread);
-}
-
-void synchronizers_test_atomic_fetch_inc_dec(void)
-{
- test_atomic_functional(test_atomic_fetch_inc_dec_thread);
-}
-
-void synchronizers_test_atomic_fetch_add_sub(void)
-{
- test_atomic_functional(test_atomic_fetch_add_sub_thread);
-}
-
-odp_testinfo_t synchronizers_suite_atomic[] = {
- ODP_TEST_INFO(synchronizers_test_atomic_inc_dec),
- ODP_TEST_INFO(synchronizers_test_atomic_add_sub),
- ODP_TEST_INFO(synchronizers_test_atomic_fetch_inc_dec),
- ODP_TEST_INFO(synchronizers_test_atomic_fetch_add_sub),
- ODP_TEST_INFO_NULL,
-};
-
-odp_suiteinfo_t synchronizers_suites[] = {
- {"barrier", NULL, NULL,
- synchronizers_suite_barrier},
- {"nolocking", synchronizers_suite_init, NULL,
- synchronizers_suite_no_locking},
- {"spinlock", synchronizers_suite_init, NULL,
- synchronizers_suite_spinlock},
- {"spinlock_recursive", synchronizers_suite_init, NULL,
- synchronizers_suite_spinlock_recursive},
- {"ticketlock", synchronizers_suite_init, NULL,
- synchronizers_suite_ticketlock},
- {"rwlock", synchronizers_suite_init, NULL,
- synchronizers_suite_rwlock},
- {"rwlock_recursive", synchronizers_suite_init, NULL,
- synchronizers_suite_rwlock_recursive},
- {"atomic", NULL, NULL,
- synchronizers_suite_atomic},
+odp_suiteinfo_t lock_suites[] = {
+ {"nolocking", lock_suite_init, NULL,
+ lock_suite_no_locking},
+ {"spinlock", lock_suite_init, NULL,
+ lock_suite_spinlock},
+ {"spinlock_recursive", lock_suite_init, NULL,
+ lock_suite_spinlock_recursive},
+ {"ticketlock", lock_suite_init, NULL,
+ lock_suite_ticketlock},
+ {"rwlock", lock_suite_init, NULL,
+ lock_suite_rwlock},
+ {"rwlock_recursive", lock_suite_init, NULL,
+ lock_suite_rwlock_recursive},
ODP_SUITE_INFO_NULL
};
-int synchronizers_main(void)
+int lock_main(void)
{
int ret;
- odp_cunit_register_global_init(synchronizers_init);
+ odp_cunit_register_global_init(lock_init);
- ret = odp_cunit_register(synchronizers_suites);
+ ret = odp_cunit_register(lock_suites);
if (ret == 0)
ret = odp_cunit_run();
diff --git a/test/validation/lock/lock.h b/test/validation/lock/lock.h
new file mode 100644
index 000000000..d90cdbc70
--- /dev/null
+++ b/test/validation/lock/lock.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef _ODP_TEST_LOCK_H_
+#define _ODP_TEST_LOCK_H_
+
+#include <odp_cunit_common.h>
+
+/* test functions: */
+void lock_test_no_lock_functional(void);
+void lock_test_spinlock_api(void);
+void lock_test_spinlock_functional(void);
+void lock_test_spinlock_recursive_api(void);
+void lock_test_spinlock_recursive_functional(void);
+void lock_test_ticketlock_api(void);
+void lock_test_ticketlock_functional(void);
+void lock_test_rwlock_api(void);
+void lock_test_rwlock_functional(void);
+void lock_test_rwlock_recursive_api(void);
+void lock_test_rwlock_recursive_functional(void);
+
+/* test arrays: */
+extern odp_testinfo_t lock_suite_no_locking[];
+extern odp_testinfo_t lock_suite_spinlock[];
+extern odp_testinfo_t lock_suite_spinlock_recursive[];
+extern odp_testinfo_t lock_suite_ticketlock[];
+extern odp_testinfo_t lock_suite_rwlock[];
+extern odp_testinfo_t lock_suite_rwlock_recursive[];
+
+/* test array init/term functions: */
+int lock_suite_init(void);
+
+/* test registry: */
+extern odp_suiteinfo_t lock_suites[];
+
+/* executable init/term functions: */
+int lock_init(void);
+
+/* main test program: */
+int lock_main(void);
+
+#endif
diff --git a/test/validation/lock/lock_main.c b/test/validation/lock/lock_main.c
new file mode 100644
index 000000000..c12c2b514
--- /dev/null
+++ b/test/validation/lock/lock_main.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include "lock.h"
+
+int main(void)
+{
+ return lock_main();
+}
diff --git a/test/validation/pktio/pktio.c b/test/validation/pktio/pktio.c
index 96d3e7290..aab8a42cd 100644
--- a/test/validation/pktio/pktio.c
+++ b/test/validation/pktio/pktio.c
@@ -22,6 +22,9 @@
#define TEST_SEQ_INVALID ((uint32_t)~0)
#define TEST_SEQ_MAGIC 0x92749451
#define TX_BATCH_LEN 4
+#define MAX_QUEUES 10
+
+#undef DEBUG_STATS
/** interface names used for testing */
static const char *iface_name[MAX_NUM_IFACES];
@@ -40,18 +43,18 @@ typedef struct {
odp_pktio_t id;
odp_queue_t outq;
odp_queue_t inq;
- odp_pktio_input_mode_t in_mode;
+ odp_pktin_mode_t in_mode;
} pktio_info_t;
/** magic number and sequence at start of UDP payload */
typedef struct ODP_PACKED {
- uint32be_t magic;
- uint32be_t seq;
+ odp_u32be_t magic;
+ odp_u32be_t seq;
} pkt_head_t;
/** magic number at end of UDP payload */
typedef struct ODP_PACKED {
- uint32be_t magic;
+ odp_u32be_t magic;
} pkt_tail_t;
/** Run mode */
@@ -79,6 +82,29 @@ pkt_segmented_e pool_segmentation = PKT_POOL_UNSEGMENTED;
odp_pool_t pool[MAX_NUM_IFACES] = {ODP_POOL_INVALID, ODP_POOL_INVALID};
+static inline void _pktio_wait_linkup(odp_pktio_t pktio)
+{
+ /* wait 1 second for link up */
+ uint64_t wait_ns = (10 * ODP_TIME_MSEC_IN_NS);
+ int wait_num = 100;
+ int i;
+ int ret = -1;
+
+ for (i = 0; i < wait_num; i++) {
+ ret = odp_pktio_link_status(pktio);
+ if (ret < 0 || ret == 1)
+ break;
+ /* link is down, call status again after delay */
+ odp_time_wait_ns(wait_ns);
+ }
+
+ if (ret != -1) {
+ /* assert only if link state supported and
+ * it's down. */
+ CU_ASSERT_FATAL(ret == 1);
+ }
+}
+
static void set_pool_len(odp_pool_param_t *params)
{
switch (pool_segmentation) {
@@ -264,8 +290,8 @@ static int default_pool_create(void)
return 0;
}
-static odp_pktio_t create_pktio(int iface_idx, odp_pktio_input_mode_t imode,
- odp_pktio_output_mode_t omode)
+static odp_pktio_t create_pktio(int iface_idx, odp_pktin_mode_t imode,
+ odp_pktout_mode_t omode)
{
odp_pktio_t pktio;
odp_pktio_param_t pktio_param;
@@ -289,13 +315,14 @@ static odp_pktio_t create_pktio(int iface_idx, odp_pktio_input_mode_t imode,
return pktio;
}
-static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
+static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype ODP_UNUSED)
{
odp_queue_param_t qparam;
odp_queue_t inq_def;
char inq_name[ODP_QUEUE_NAME_LEN];
odp_queue_param_init(&qparam);
+ qparam.type = ODP_QUEUE_TYPE_PKTIN;
qparam.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qparam.sched.sync = ODP_SCHED_SYNC_ATOMIC;
qparam.sched.group = ODP_SCHED_GROUP_ALL;
@@ -304,10 +331,7 @@ static int create_inq(odp_pktio_t pktio, odp_queue_type_t qtype)
odp_pktio_to_u64(pktio));
inq_def = odp_queue_lookup(inq_name);
if (inq_def == ODP_QUEUE_INVALID)
- inq_def = odp_queue_create(
- inq_name,
- ODP_QUEUE_TYPE_PKTIN,
- qtype == ODP_QUEUE_TYPE_POLL ? NULL : &qparam);
+ inq_def = odp_queue_create(inq_name, &qparam);
CU_ASSERT(inq_def != ODP_QUEUE_INVALID);
@@ -333,7 +357,7 @@ static int destroy_inq(odp_pktio_t pktio)
/* flush any pending events */
while (1) {
- if (q_type == ODP_QUEUE_TYPE_POLL)
+ if (q_type == ODP_QUEUE_TYPE_PLAIN)
ev = odp_queue_deq(inq);
else
ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -355,11 +379,11 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
int num_pkts = 0;
int i;
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_RECV)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_DIRECT)
return odp_pktio_recv(pktio_rx->id, pkt_tbl, num);
if (mode == TXRX_MODE_MULTI) {
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_POLL)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
num_evts = odp_queue_deq_multi(pktio_rx->inq, evt_tbl,
num);
else
@@ -368,7 +392,7 @@ static int get_packets(pktio_info_t *pktio_rx, odp_packet_t pkt_tbl[],
} else {
odp_event_t evt_tmp;
- if (pktio_rx->in_mode == ODP_PKTIN_MODE_POLL)
+ if (pktio_rx->in_mode == ODP_PKTIN_MODE_QUEUE)
evt_tmp = odp_queue_deq(pktio_rx->inq);
else
evt_tmp = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
@@ -486,7 +510,7 @@ static void pktio_txrx_multi(pktio_info_t *pktio_a, pktio_info_t *pktio_b,
}
}
-static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
+static void test_txrx(odp_pktin_mode_t in_mode, int num_pkts,
txrx_mode_e mode)
{
int ret, i, if_b;
@@ -498,7 +522,7 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
io = &pktios[i];
io->name = iface_name[i];
- io->id = create_pktio(i, in_mode, ODP_PKTOUT_MODE_SEND);
+ io->id = create_pktio(i, in_mode, ODP_PKTOUT_MODE_DIRECT);
if (io->id == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open iface");
return;
@@ -506,8 +530,8 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
io->outq = odp_pktio_outq_getdef(io->id);
io->in_mode = in_mode;
- if (in_mode == ODP_PKTIN_MODE_POLL) {
- create_inq(io->id, ODP_QUEUE_TYPE_POLL);
+ if (in_mode == ODP_PKTIN_MODE_QUEUE) {
+ create_inq(io->id, ODP_QUEUE_TYPE_PLAIN);
io->inq = odp_pktio_inq_getdef(io->id);
} else if (in_mode == ODP_PKTIN_MODE_SCHED) {
create_inq(io->id, ODP_QUEUE_TYPE_SCHED);
@@ -516,6 +540,8 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
ret = odp_pktio_start(io->id);
CU_ASSERT(ret == 0);
+
+ _pktio_wait_linkup(io->id);
}
/* if we have two interfaces then send through one and receive on
@@ -526,23 +552,23 @@ static void test_txrx(odp_pktio_input_mode_t in_mode, int num_pkts,
for (i = 0; i < num_ifaces; ++i) {
ret = odp_pktio_stop(pktios[i].id);
CU_ASSERT(ret == 0);
- if (in_mode != ODP_PKTIN_MODE_RECV)
+ if (in_mode != ODP_PKTIN_MODE_DIRECT)
destroy_inq(pktios[i].id);
ret = odp_pktio_close(pktios[i].id);
CU_ASSERT(ret == 0);
}
}
-void pktio_test_poll_queue(void)
+void pktio_test_plain_queue(void)
{
- test_txrx(ODP_PKTIN_MODE_POLL, 1, TXRX_MODE_SINGLE);
- test_txrx(ODP_PKTIN_MODE_POLL, TX_BATCH_LEN, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_SINGLE);
}
-void pktio_test_poll_multi(void)
+void pktio_test_plain_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_POLL, TX_BATCH_LEN, TXRX_MODE_MULTI);
- test_txrx(ODP_PKTIN_MODE_POLL, 1, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_QUEUE, 1, TXRX_MODE_MULTI);
}
void pktio_test_sched_queue(void)
@@ -559,12 +585,133 @@ void pktio_test_sched_multi(void)
void pktio_test_recv(void)
{
- test_txrx(ODP_PKTIN_MODE_RECV, 1, TXRX_MODE_SINGLE);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, 1, TXRX_MODE_SINGLE);
}
void pktio_test_recv_multi(void)
{
- test_txrx(ODP_PKTIN_MODE_RECV, TX_BATCH_LEN, TXRX_MODE_MULTI);
+ test_txrx(ODP_PKTIN_MODE_DIRECT, TX_BATCH_LEN, TXRX_MODE_MULTI);
+}
+
+void pktio_test_recv_queue(void)
+{
+ odp_pktio_t pktio_tx, pktio_rx;
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t in_queue_param;
+ odp_pktout_queue_param_t out_queue_param;
+ odp_pktout_queue_t pktout_queue[MAX_QUEUES];
+ odp_pktin_queue_t pktin_queue[MAX_QUEUES];
+ odp_packet_t pkt_tbl[TX_BATCH_LEN];
+ odp_packet_t tmp_pkt[TX_BATCH_LEN];
+ uint32_t pkt_seq[TX_BATCH_LEN];
+ odp_time_t wait_time, end;
+ int num_rx = 0;
+ int num_queues;
+ int ret;
+ int i;
+
+ CU_ASSERT_FATAL(num_ifaces >= 1);
+
+ /* Open and configure interfaces */
+ for (i = 0; i < num_ifaces; ++i) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio[i], &capa) == 0);
+
+ odp_pktin_queue_param_init(&in_queue_param);
+ num_queues = capa.max_input_queues;
+ in_queue_param.num_queues = num_queues;
+ in_queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ in_queue_param.hash_proto.proto.ipv4_udp = 1;
+
+ ret = odp_pktin_queue_config(pktio[i], &in_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ odp_pktout_queue_param_init(&out_queue_param);
+ out_queue_param.num_queues = capa.max_output_queues;
+
+ ret = odp_pktout_queue_config(pktio[i], &out_queue_param);
+ CU_ASSERT_FATAL(ret == 0);
+
+ CU_ASSERT_FATAL(odp_pktio_start(pktio[i]) == 0);
+ }
+
+ for (i = 0; i < num_ifaces; ++i)
+ _pktio_wait_linkup(pktio[i]);
+
+ pktio_tx = pktio[0];
+ if (num_ifaces > 1)
+ pktio_rx = pktio[1];
+ else
+ pktio_rx = pktio_tx;
+
+ /* Allocate and initialize test packets */
+ for (i = 0; i < TX_BATCH_LEN; i++) {
+ pkt_tbl[i] = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt_tbl[i] == ODP_PACKET_INVALID)
+ break;
+
+ pkt_seq[i] = pktio_init_packet(pkt_tbl[i]);
+ if (pkt_seq[i] == TEST_SEQ_INVALID) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+
+ pktio_pkt_set_macs(pkt_tbl[i], pktio_tx, pktio_rx);
+
+ if (pktio_fixup_checksums(pkt_tbl[i]) != 0) {
+ odp_packet_free(pkt_tbl[i]);
+ break;
+ }
+ }
+ if (i != TX_BATCH_LEN) {
+ CU_FAIL("Failed to generate test packets");
+ return;
+ }
+
+ /* Send packets */
+ num_queues = odp_pktout_queue(pktio_tx, pktout_queue, MAX_QUEUES);
+ CU_ASSERT(num_queues > 0);
+ ret = odp_pktio_send_queue(pktout_queue[num_queues - 1], pkt_tbl,
+ TX_BATCH_LEN);
+ CU_ASSERT_FATAL(ret == TX_BATCH_LEN);
+
+ /* Receive packets */
+ num_queues = odp_pktin_queue(pktio_rx, pktin_queue, MAX_QUEUES);
+ CU_ASSERT(num_queues > 0);
+
+ wait_time = odp_time_local_from_ns(ODP_TIME_SEC_IN_NS);
+ end = odp_time_sum(odp_time_local(), wait_time);
+ do {
+ int n = 0;
+
+ for (i = 0; i < num_queues; i++) {
+ n = odp_pktio_recv_queue(pktin_queue[i], tmp_pkt,
+ TX_BATCH_LEN);
+ if (n != 0)
+ break;
+ }
+ if (n < 0)
+ break;
+ for (i = 0; i < n; i++) {
+ if (pktio_pkt_seq(tmp_pkt[i]) == pkt_seq[num_rx])
+ pkt_tbl[num_rx++] = tmp_pkt[i];
+ else
+ odp_packet_free(tmp_pkt[i]);
+ }
+ } while (num_rx < TX_BATCH_LEN &&
+ odp_time_cmp(end, odp_time_local()) > 0);
+
+ for (i = 0; i < num_rx; i++)
+ odp_packet_free(pkt_tbl[i]);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT_FATAL(odp_pktio_stop(pktio[i]) == 0);
+ CU_ASSERT_FATAL(odp_pktio_close(pktio[i]) == 0);
+ }
}
void pktio_test_jumbo(void)
@@ -580,7 +727,7 @@ void pktio_test_mtu(void)
int mtu;
odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
mtu = odp_pktio_mtu(pktio);
@@ -597,7 +744,7 @@ void pktio_test_promisc(void)
int ret;
odp_pktio_t pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
ret = odp_pktio_promisc_mode_set(pktio, 1);
@@ -626,7 +773,7 @@ void pktio_test_mac(void)
odp_pktio_t pktio;
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
printf("testing mac for %s\n", iface_name[0]);
@@ -655,9 +802,9 @@ void pktio_test_inq_remdef(void)
int i;
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0);
+ CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_PLAIN) == 0);
inq = odp_pktio_inq_getdef(pktio);
CU_ASSERT(inq != ODP_QUEUE_INVALID);
CU_ASSERT(odp_pktio_inq_remdef(pktio) == 0);
@@ -684,7 +831,7 @@ void pktio_test_open(void)
/* test the sequence open->close->open->close() */
for (i = 0; i < 2; ++i) {
pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
CU_ASSERT(odp_pktio_close(pktio) == 0);
}
@@ -725,8 +872,8 @@ static void pktio_test_print(void)
int i;
for (i = 0; i < num_ifaces; ++i) {
- pktio = create_pktio(i, ODP_PKTIN_MODE_POLL,
- ODP_PKTOUT_MODE_SEND);
+ pktio = create_pktio(i, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
/* Print pktio debug info and test that the
@@ -737,19 +884,336 @@ static void pktio_test_print(void)
}
}
+void pktio_test_pktin_queue_config_direct(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT(odp_pktio_capability(ODP_PKTIO_INVALID, &capa) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES) < 0);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_sched(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_SCHED, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ queue_param.queue_param.sched.group = ODP_SCHED_GROUP_ALL;
+ queue_param.queue_param.sched.sync = ODP_SCHED_SYNC_ATOMIC;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT_FATAL(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktin_queue_config_queue(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktin_queue_param_t queue_param;
+ odp_pktin_queue_t pktin_queues[MAX_QUEUES];
+ odp_queue_t in_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_input_queues > 0);
+ num_queues = capa.max_input_queues;
+
+ odp_pktin_queue_param_init(&queue_param);
+
+ queue_param.hash_enable = (num_queues > 1) ? 1 : 0;
+ queue_param.hash_proto.proto.ipv4_udp = 1;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktin_event_queue(pktio, in_queues, MAX_QUEUES)
+ == num_queues);
+ CU_ASSERT(odp_pktin_queue(pktio, pktin_queues, MAX_QUEUES) < 0);
+
+ queue_param.num_queues = 1;
+ CU_ASSERT_FATAL(odp_pktin_queue_config(pktio, &queue_param) == 0);
+
+ queue_param.num_queues = capa.max_input_queues + 1;
+ CU_ASSERT(odp_pktin_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
+void pktio_test_pktout_queue_config(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_capability_t capa;
+ odp_pktout_queue_param_t queue_param;
+ odp_pktout_queue_t pktout_queues[MAX_QUEUES];
+ int num_queues;
+
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT, ODP_PKTOUT_MODE_DIRECT);
+ CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
+
+ CU_ASSERT_FATAL(odp_pktio_capability(pktio, &capa) == 0 &&
+ capa.max_output_queues > 0);
+ num_queues = capa.max_output_queues;
+
+ odp_pktout_queue_param_init(&queue_param);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT_UNSAFE;
+ queue_param.num_queues = num_queues;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue(pktio, pktout_queues, MAX_QUEUES)
+ == num_queues);
+
+ queue_param.op_mode = ODP_PKTIO_OP_MT;
+ queue_param.num_queues = 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) == 0);
+
+ CU_ASSERT(odp_pktout_queue_config(ODP_PKTIO_INVALID, &queue_param) < 0);
+
+ queue_param.num_queues = capa.max_output_queues + 1;
+ CU_ASSERT(odp_pktout_queue_config(pktio, &queue_param) < 0);
+
+ CU_ASSERT(odp_pktio_close(pktio) == 0);
+}
+
void pktio_test_inq(void)
{
odp_pktio_t pktio;
- pktio = create_pktio(0, ODP_PKTIN_MODE_POLL,
- ODP_PKTOUT_MODE_SEND);
+ pktio = create_pktio(0, ODP_PKTIN_MODE_QUEUE,
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio != ODP_PKTIO_INVALID);
- CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_POLL) == 0);
+ CU_ASSERT(create_inq(pktio, ODP_QUEUE_TYPE_PLAIN) == 0);
CU_ASSERT(destroy_inq(pktio) == 0);
CU_ASSERT(odp_pktio_close(pktio) == 0);
}
+#ifdef DEBUG_STATS
+static void _print_pktio_stats(odp_pktio_stats_t *s, const char *name)
+{
+ fprintf(stderr, "\n%s:\n"
+ " in_octets %" PRIu64 "\n"
+ " in_ucast_pkts %" PRIu64 "\n"
+ " in_discards %" PRIu64 "\n"
+ " in_errors %" PRIu64 "\n"
+ " in_unknown_protos %" PRIu64 "\n"
+ " out_octets %" PRIu64 "\n"
+ " out_ucast_pkts %" PRIu64 "\n"
+ " out_discards %" PRIu64 "\n"
+ " out_errors %" PRIu64 "\n",
+ name,
+ s->in_octets,
+ s->in_ucast_pkts,
+ s->in_discards,
+ s->in_errors,
+ s->in_unknown_protos,
+ s->out_octets,
+ s->out_ucast_pkts,
+ s->out_discards,
+ s->out_errors);
+}
+#endif
+
+/* some pktio like netmap support various methods to
+ * get statistics counters. ethtool strings are not standardised
+ * and sysfs may not be supported. skip pktio_stats test until
+ * we will solve that.*/
+int pktio_check_statistics_counters(void)
+{
+ odp_pktio_t pktio;
+ odp_pktio_stats_t stats;
+ int ret;
+ odp_pktio_param_t pktio_param;
+ const char *iface = iface_name[0];
+
+ odp_pktio_param_init(&pktio_param);
+ pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
+
+ pktio = odp_pktio_open(iface, pool[0], &pktio_param);
+ if (pktio == ODP_PKTIO_INVALID)
+ return ODP_TEST_INACTIVE;
+
+ ret = odp_pktio_stats(pktio, &stats);
+ (void)odp_pktio_close(pktio);
+
+ if (ret == 0)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
+}
+
+void pktio_test_statistics_counters(void)
+{
+ odp_pktio_t pktio[MAX_NUM_IFACES];
+ odp_packet_t pkt;
+ odp_event_t tx_ev[1000];
+ odp_event_t ev;
+ int i, pkts, ret, alloc = 0;
+ odp_queue_t outq;
+ uint64_t wait = odp_schedule_wait_time(ODP_TIME_MSEC_IN_NS);
+ odp_pktio_stats_t stats[2];
+
+ for (i = 0; i < num_ifaces; i++) {
+ pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
+ ODP_PKTOUT_MODE_DIRECT);
+
+ CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
+ create_inq(pktio[i], ODP_QUEUE_TYPE_SCHED);
+ }
+
+ outq = odp_pktio_outq_getdef(pktio[0]);
+
+ ret = odp_pktio_start(pktio[0]);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_start(pktio[1]);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* flush packets with magic number in pipes */
+ for (i = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID)
+ odp_event_free(ev);
+ }
+
+ /* alloc */
+ for (alloc = 0; alloc < 1000; alloc++) {
+ pkt = odp_packet_alloc(default_pkt_pool, packet_len);
+ if (pkt == ODP_PACKET_INVALID)
+ break;
+ pktio_init_packet(pkt);
+ tx_ev[alloc] = odp_packet_to_event(pkt);
+ }
+
+ ret = odp_pktio_stats_reset(pktio[0]);
+ CU_ASSERT(ret == 0);
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats_reset(pktio[1]);
+ CU_ASSERT(ret == 0);
+ }
+
+ /* send */
+ for (pkts = 0; pkts != alloc; ) {
+ ret = odp_queue_enq_multi(outq, &tx_ev[pkts], alloc - pkts);
+ if (ret < 0) {
+ CU_FAIL("unable to enqueue packet\n");
+ break;
+ }
+ pkts += ret;
+ }
+
+ /* get */
+ for (i = 0, pkts = 0; i < 1000; i++) {
+ ev = odp_schedule(NULL, wait);
+ if (ev != ODP_EVENT_INVALID) {
+ if (odp_event_type(ev) == ODP_EVENT_PACKET) {
+ pkt = odp_packet_from_event(ev);
+ if (pktio_pkt_seq(pkt) != TEST_SEQ_INVALID)
+ pkts++;
+ }
+ odp_event_free(ev);
+ }
+ }
+
+ ret = odp_pktio_stats(pktio[0], &stats[0]);
+ CU_ASSERT(ret == 0);
+
+ if (num_ifaces > 1) {
+ ret = odp_pktio_stats(pktio[1], &stats[1]);
+ CU_ASSERT(ret == 0);
+ CU_ASSERT((stats[1].in_ucast_pkts == 0) ||
+ (stats[1].in_ucast_pkts >= (uint64_t)pkts));
+ CU_ASSERT(stats[0].out_ucast_pkts == stats[1].in_ucast_pkts);
+ CU_ASSERT(stats[0].out_octets == stats[1].in_octets);
+ CU_ASSERT((stats[0].out_octets == 0) ||
+ (stats[0].out_octets >=
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ } else {
+ CU_ASSERT((stats[0].in_ucast_pkts == 0) ||
+ (stats[0].in_ucast_pkts == (uint64_t)pkts));
+ CU_ASSERT((stats[0].in_octets == 0) ||
+ (stats[0].in_octets ==
+ (PKT_LEN_NORMAL * (uint64_t)pkts)));
+ }
+
+ CU_ASSERT(pkts == alloc);
+ CU_ASSERT(0 == stats[0].in_discards);
+ CU_ASSERT(0 == stats[0].in_errors);
+ CU_ASSERT(0 == stats[0].in_unknown_protos);
+ CU_ASSERT(0 == stats[0].out_discards);
+ CU_ASSERT(0 == stats[0].out_errors);
+
+ for (i = 0; i < num_ifaces; i++) {
+ CU_ASSERT(odp_pktio_stop(pktio[i]) == 0);
+#ifdef DEBUG_STATS
+ _print_pktio_stats(&stats[i], iface_name[i]);
+#endif
+ destroy_inq(pktio[i]);
+ CU_ASSERT(odp_pktio_close(pktio[i]) == 0);
+ }
+}
+
void pktio_test_start_stop(void)
{
odp_pktio_t pktio[MAX_NUM_IFACES];
@@ -762,7 +1226,7 @@ void pktio_test_start_stop(void)
for (i = 0; i < num_ifaces; i++) {
pktio[i] = create_pktio(i, ODP_PKTIN_MODE_SCHED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
CU_ASSERT_FATAL(pktio[i] != ODP_PKTIO_INVALID);
create_inq(pktio[i], ODP_QUEUE_TYPE_SCHED);
}
@@ -781,6 +1245,8 @@ void pktio_test_start_stop(void)
ret = odp_pktio_start(pktio[0]);
CU_ASSERT(ret < 0);
+ _pktio_wait_linkup(pktio[0]);
+
/* Test Rx on a stopped interface. Only works if there are 2 */
if (num_ifaces > 1) {
for (alloc = 0; alloc < 1000; alloc++) {
@@ -828,6 +1294,8 @@ void pktio_test_start_stop(void)
ret = odp_pktio_start(pktio[1]);
CU_ASSERT(ret == 0);
+ _pktio_wait_linkup(pktio[1]);
+
/* flush packets with magic number in pipes */
for (i = 0; i < 1000; i++) {
ev = odp_schedule(NULL, wait);
@@ -899,12 +1367,12 @@ int pktio_check_send_failure(void)
memset(&pktio_param, 0, sizeof(pktio_param));
- pktio_param.in_mode = ODP_PKTIN_MODE_RECV;
+ pktio_param.in_mode = ODP_PKTIN_MODE_DIRECT;
pktio_tx = odp_pktio_open(iface, pool[iface_idx], &pktio_param);
if (pktio_tx == ODP_PKTIO_INVALID) {
fprintf(stderr, "%s: failed to open pktio\n", __func__);
- return 0;
+ return ODP_TEST_INACTIVE;
}
/* read the MTU from the transmit interface */
@@ -912,7 +1380,10 @@ int pktio_check_send_failure(void)
odp_pktio_close(pktio_tx);
- return (mtu <= ODP_CONFIG_PACKET_BUF_LEN_MAX - 32);
+ if (mtu <= ODP_CONFIG_PACKET_BUF_LEN_MAX - 32)
+ return ODP_TEST_ACTIVE;
+
+ return ODP_TEST_INACTIVE;
}
void pktio_test_send_failure(void)
@@ -926,8 +1397,8 @@ void pktio_test_send_failure(void)
int long_pkt_idx = TX_BATCH_LEN / 2;
pktio_info_t info_rx;
- pktio_tx = create_pktio(0, ODP_PKTIN_MODE_RECV,
- ODP_PKTOUT_MODE_SEND);
+ pktio_tx = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
if (pktio_tx == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open pktio");
return;
@@ -939,6 +1410,8 @@ void pktio_test_send_failure(void)
ret = odp_pktio_start(pktio_tx);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio_tx);
+
/* configure the pool so that we can generate test packets larger
* than the interface MTU */
memset(&pool_params, 0, sizeof(pool_params));
@@ -950,10 +1423,12 @@ void pktio_test_send_failure(void)
CU_ASSERT_FATAL(pkt_pool != ODP_POOL_INVALID);
if (num_ifaces > 1) {
- pktio_rx = create_pktio(1, ODP_PKTIN_MODE_RECV,
- ODP_PKTOUT_MODE_SEND);
+ pktio_rx = create_pktio(1, ODP_PKTIN_MODE_DIRECT,
+ ODP_PKTOUT_MODE_DIRECT);
ret = odp_pktio_start(pktio_rx);
CU_ASSERT_FATAL(ret == 0);
+
+ _pktio_wait_linkup(pktio_rx);
} else {
pktio_rx = pktio_tx;
}
@@ -998,7 +1473,7 @@ void pktio_test_send_failure(void)
info_rx.id = pktio_rx;
info_rx.outq = ODP_QUEUE_INVALID;
info_rx.inq = ODP_QUEUE_INVALID;
- info_rx.in_mode = ODP_PKTIN_MODE_RECV;
+ info_rx.in_mode = ODP_PKTIN_MODE_DIRECT;
i = wait_for_packets(&info_rx, pkt_tbl, pkt_seq, ret,
TXRX_MODE_MULTI, ODP_TIME_SEC_IN_NS);
@@ -1057,7 +1532,7 @@ void pktio_test_recv_on_wonly(void)
int ret;
pktio = create_pktio(0, ODP_PKTIN_MODE_DISABLED,
- ODP_PKTOUT_MODE_SEND);
+ ODP_PKTOUT_MODE_DIRECT);
if (pktio == ODP_PKTIO_INVALID) {
CU_FAIL("failed to open pktio");
@@ -1067,6 +1542,8 @@ void pktio_test_recv_on_wonly(void)
ret = odp_pktio_start(pktio);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio);
+
ret = odp_pktio_recv(pktio, &pkt, 1);
CU_ASSERT(ret < 0);
@@ -1086,7 +1563,7 @@ void pktio_test_send_on_ronly(void)
odp_packet_t pkt;
int ret;
- pktio = create_pktio(0, ODP_PKTIN_MODE_RECV,
+ pktio = create_pktio(0, ODP_PKTIN_MODE_DIRECT,
ODP_PKTOUT_MODE_DISABLED);
if (pktio == ODP_PKTIO_INVALID) {
@@ -1097,6 +1574,8 @@ void pktio_test_send_on_ronly(void)
ret = odp_pktio_start(pktio);
CU_ASSERT_FATAL(ret == 0);
+ _pktio_wait_linkup(pktio);
+
pkt = odp_packet_alloc(default_pkt_pool, packet_len);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID)
@@ -1221,13 +1700,18 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_open),
ODP_TEST_INFO(pktio_test_lookup),
ODP_TEST_INFO(pktio_test_print),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_direct),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_sched),
+ ODP_TEST_INFO(pktio_test_pktin_queue_config_queue),
+ ODP_TEST_INFO(pktio_test_pktout_queue_config),
ODP_TEST_INFO(pktio_test_inq),
- ODP_TEST_INFO(pktio_test_poll_queue),
- ODP_TEST_INFO(pktio_test_poll_multi),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
ODP_TEST_INFO(pktio_test_sched_queue),
ODP_TEST_INFO(pktio_test_sched_multi),
ODP_TEST_INFO(pktio_test_recv),
ODP_TEST_INFO(pktio_test_recv_multi),
+ ODP_TEST_INFO(pktio_test_recv_queue),
ODP_TEST_INFO(pktio_test_jumbo),
ODP_TEST_INFO_CONDITIONAL(pktio_test_send_failure,
pktio_check_send_failure),
@@ -1238,12 +1722,14 @@ odp_testinfo_t pktio_suite_unsegmented[] = {
ODP_TEST_INFO(pktio_test_start_stop),
ODP_TEST_INFO(pktio_test_recv_on_wonly),
ODP_TEST_INFO(pktio_test_send_on_ronly),
+ ODP_TEST_INFO_CONDITIONAL(pktio_test_statistics_counters,
+ pktio_check_statistics_counters),
ODP_TEST_INFO_NULL
};
odp_testinfo_t pktio_suite_segmented[] = {
- ODP_TEST_INFO(pktio_test_poll_queue),
- ODP_TEST_INFO(pktio_test_poll_multi),
+ ODP_TEST_INFO(pktio_test_plain_queue),
+ ODP_TEST_INFO(pktio_test_plain_multi),
ODP_TEST_INFO(pktio_test_sched_queue),
ODP_TEST_INFO(pktio_test_sched_multi),
ODP_TEST_INFO(pktio_test_recv),
diff --git a/test/validation/pktio/pktio.h b/test/validation/pktio/pktio.h
index 58fdbca5b..22fd814d6 100644
--- a/test/validation/pktio/pktio.h
+++ b/test/validation/pktio/pktio.h
@@ -10,12 +10,13 @@
#include <odp_cunit_common.h>
/* test functions: */
-void pktio_test_poll_queue(void);
-void pktio_test_poll_multi(void);
+void pktio_test_plain_queue(void);
+void pktio_test_plain_multi(void);
void pktio_test_sched_queue(void);
void pktio_test_sched_multi(void);
void pktio_test_recv(void);
void pktio_test_recv_multi(void);
+void pktio_test_recv_queue(void);
void pktio_test_jumbo(void);
void pktio_test_mtu(void);
void pktio_test_promisc(void);
@@ -24,11 +25,17 @@ void pktio_test_inq_remdef(void);
void pktio_test_open(void);
void pktio_test_lookup(void);
void pktio_test_inq(void);
+void pktio_test_pktin_queue_config_direct(void);
+void pktio_test_pktin_queue_config_sched(void);
+void pktio_test_pktin_queue_config_queue(void);
+void pktio_test_pktout_queue_config(void);
void pktio_test_start_stop(void);
int pktio_check_send_failure(void);
void pktio_test_send_failure(void);
void pktio_test_recv_on_wonly(void);
void pktio_test_send_on_ronly(void);
+int pktio_check_statistics_counters(void);
+void pktio_test_statistics_counters(void);
/* test arrays: */
extern odp_testinfo_t pktio_suite[];
diff --git a/test/validation/queue/queue.c b/test/validation/queue/queue.c
index 3c1c64ab1..7c55eb335 100644
--- a/test/validation/queue/queue.c
+++ b/test/validation/queue/queue.c
@@ -54,15 +54,17 @@ void queue_test_sunnydays(void)
odp_queue_param_t qparams;
odp_queue_param_init(&qparams);
+ qparams.type = ODP_QUEUE_TYPE_SCHED;
qparams.sched.prio = ODP_SCHED_PRIO_LOWEST;
- qparams.sched.sync = ODP_SCHED_SYNC_NONE;
+ qparams.sched.sync = ODP_SCHED_SYNC_PARALLEL;
qparams.sched.group = ODP_SCHED_GROUP_WORKER;
- queue_creat_id = odp_queue_create("test_queue",
- ODP_QUEUE_TYPE_POLL, &qparams);
+ queue_creat_id = odp_queue_create("test_queue", &qparams);
CU_ASSERT(ODP_QUEUE_INVALID != queue_creat_id);
+ CU_ASSERT(odp_queue_to_u64(queue_creat_id) !=
+ odp_queue_to_u64(ODP_QUEUE_INVALID));
- CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_POLL,
+ CU_ASSERT_EQUAL(ODP_QUEUE_TYPE_SCHED,
odp_queue_type(queue_creat_id));
queue_id = odp_queue_lookup("test_queue");
@@ -71,7 +73,8 @@ void queue_test_sunnydays(void)
CU_ASSERT_EQUAL(ODP_SCHED_GROUP_WORKER,
odp_queue_sched_group(queue_id));
CU_ASSERT_EQUAL(ODP_SCHED_PRIO_LOWEST, odp_queue_sched_prio(queue_id));
- CU_ASSERT_EQUAL(ODP_SCHED_SYNC_NONE, odp_queue_sched_type(queue_id));
+ CU_ASSERT_EQUAL(ODP_SCHED_SYNC_PARALLEL,
+ odp_queue_sched_type(queue_id));
CU_ASSERT(0 == odp_queue_context_set(queue_id, &queue_contest));
@@ -127,46 +130,47 @@ void queue_test_sunnydays(void)
void queue_test_info(void)
{
- odp_queue_t q_poll, q_order;
- const char *const nq_poll = "test_q_poll";
+ odp_queue_t q_plain, q_order;
+ const char *const nq_plain = "test_q_plain";
const char *const nq_order = "test_q_order";
odp_queue_info_t info;
odp_queue_param_t param;
- char q_poll_ctx[] = "test_q_poll context data";
+ char q_plain_ctx[] = "test_q_plain context data";
char q_order_ctx[] = "test_q_order context data";
unsigned lock_count;
char *ctx;
int ret;
- /* Create a polled queue and set context */
- q_poll = odp_queue_create(nq_poll, ODP_QUEUE_TYPE_POLL, NULL);
- CU_ASSERT(ODP_QUEUE_INVALID != q_poll);
- CU_ASSERT(odp_queue_context_set(q_poll, q_poll_ctx) == 0);
+ /* Create a plain queue and set context */
+ q_plain = odp_queue_create(nq_plain, NULL);
+ CU_ASSERT(ODP_QUEUE_INVALID != q_plain);
+ CU_ASSERT(odp_queue_context_set(q_plain, q_plain_ctx) == 0);
/* Create a scheduled ordered queue with explicitly set params */
odp_queue_param_init(&param);
+ param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = ODP_SCHED_PRIO_NORMAL;
param.sched.sync = ODP_SCHED_SYNC_ORDERED;
param.sched.group = ODP_SCHED_GROUP_ALL;
param.sched.lock_count = 1;
param.context = q_order_ctx;
- q_order = odp_queue_create(nq_order, ODP_QUEUE_TYPE_SCHED, &param);
+ q_order = odp_queue_create(nq_order, &param);
CU_ASSERT(ODP_QUEUE_INVALID != q_order);
- /* Check info for the polled queue */
- CU_ASSERT(odp_queue_info(q_poll, &info) == 0);
- CU_ASSERT(strcmp(nq_poll, info.name) == 0);
- CU_ASSERT(info.type == ODP_QUEUE_TYPE_POLL);
- CU_ASSERT(info.type == odp_queue_type(q_poll));
+ /* Check info for the plain queue */
+ CU_ASSERT(odp_queue_info(q_plain, &info) == 0);
+ CU_ASSERT(strcmp(nq_plain, info.name) == 0);
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_PLAIN);
+ CU_ASSERT(info.param.type == odp_queue_type(q_plain));
ctx = info.param.context; /* 'char' context ptr */
- CU_ASSERT(ctx == q_poll_ctx);
- CU_ASSERT(info.param.context == odp_queue_context(q_poll));
+ CU_ASSERT(ctx == q_plain_ctx);
+ CU_ASSERT(info.param.context == odp_queue_context(q_plain));
/* Check info for the scheduled ordered queue */
CU_ASSERT(odp_queue_info(q_order, &info) == 0);
CU_ASSERT(strcmp(nq_order, info.name) == 0);
- CU_ASSERT(info.type == ODP_QUEUE_TYPE_SCHED);
- CU_ASSERT(info.type == odp_queue_type(q_order));
+ CU_ASSERT(info.param.type == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT(info.param.type == odp_queue_type(q_order));
ctx = info.param.context; /* 'char' context ptr */
CU_ASSERT(ctx == q_order_ctx);
CU_ASSERT(info.param.context == odp_queue_context(q_order));
@@ -178,7 +182,7 @@ void queue_test_info(void)
lock_count = (unsigned) ret;
CU_ASSERT(info.param.sched.lock_count == lock_count);
- CU_ASSERT(odp_queue_destroy(q_poll) == 0);
+ CU_ASSERT(odp_queue_destroy(q_plain) == 0);
CU_ASSERT(odp_queue_destroy(q_order) == 0);
}
diff --git a/test/validation/scheduler/scheduler.c b/test/validation/scheduler/scheduler.c
index ff95b4b27..dcf01c073 100644
--- a/test/validation/scheduler/scheduler.c
+++ b/test/validation/scheduler/scheduler.c
@@ -129,7 +129,11 @@ void scheduler_test_wait_time(void)
/* check ODP_SCHED_NO_WAIT */
odp_queue_param_init(&qp);
- queue = odp_queue_create("dummy_queue", ODP_QUEUE_TYPE_SCHED, &qp);
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ qp.sched.prio = ODP_SCHED_PRIO_NORMAL;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
+ queue = odp_queue_create("dummy_queue", &qp);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
wait_time = odp_schedule_wait_time(ODP_TIME_SEC_IN_NS);
@@ -184,7 +188,7 @@ void scheduler_test_queue_destroy(void)
odp_event_t ev;
uint32_t *u32;
int i;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
@@ -200,11 +204,12 @@ void scheduler_test_queue_destroy(void)
CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
for (i = 0; i < 3; i++) {
+ qp.type = ODP_QUEUE_TYPE_SCHED;
qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qp.sched.sync = sync[i];
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
- queue = odp_queue_create("sched_destroy_queue",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue = odp_queue_create("sched_destroy_queue", &qp);
CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
@@ -249,7 +254,7 @@ void scheduler_test_groups(void)
odp_event_t ev;
uint32_t *u32;
int i, j, rc;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
int thr_id = odp_thread_id();
@@ -337,13 +342,13 @@ void scheduler_test_groups(void)
CU_ASSERT_FATAL(p != ODP_POOL_INVALID);
for (i = 0; i < 3; i++) {
+ qp.type = ODP_QUEUE_TYPE_SCHED;
qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
qp.sched.sync = sync[i];
qp.sched.group = mygrp1;
/* Create and populate a group in group 1 */
- queue_grp1 = odp_queue_create("sched_group_test_queue_1",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue_grp1 = odp_queue_create("sched_group_test_queue_1", &qp);
CU_ASSERT_FATAL(queue_grp1 != ODP_QUEUE_INVALID);
CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp1) == mygrp1);
@@ -360,8 +365,7 @@ void scheduler_test_groups(void)
/* Now create and populate a queue in group 2 */
qp.sched.group = mygrp2;
- queue_grp2 = odp_queue_create("sched_group_test_queue_2",
- ODP_QUEUE_TYPE_SCHED, &qp);
+ queue_grp2 = odp_queue_create("sched_group_test_queue_2", &qp);
CU_ASSERT_FATAL(queue_grp2 != ODP_QUEUE_INVALID);
CU_ASSERT_FATAL(odp_queue_sched_group(queue_grp2) == mygrp2);
@@ -533,7 +537,7 @@ static void chaos_run(unsigned int qtype)
odp_queue_t from;
int i, rc;
uint64_t wait;
- odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_NONE,
+ odp_schedule_sync_t sync[] = {ODP_SCHED_SYNC_PARALLEL,
ODP_SCHED_SYNC_ATOMIC,
ODP_SCHED_SYNC_ORDERED};
const unsigned num_sync = (sizeof(sync) / sizeof(sync[0]));
@@ -562,7 +566,9 @@ static void chaos_run(unsigned int qtype)
pool = odp_pool_create("sched_chaos_pool", &params);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
- qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.type = ODP_QUEUE_TYPE_SCHED;
+ qp.sched.prio = ODP_SCHED_PRIO_DEFAULT;
+ qp.sched.group = ODP_SCHED_GROUP_ALL;
for (i = 0; i < CHAOS_NUM_QUEUES; i++) {
uint32_t ndx = qtype == num_sync ? i % num_sync : qtype;
@@ -574,9 +580,7 @@ static void chaos_run(unsigned int qtype)
qtypes[ndx]);
globals->chaos_q[i].handle =
- odp_queue_create(globals->chaos_q[i].name,
- ODP_QUEUE_TYPE_SCHED,
- &qp);
+ odp_queue_create(globals->chaos_q[i].name, &qp);
CU_ASSERT_FATAL(globals->chaos_q[i].handle !=
ODP_QUEUE_INVALID);
rc = odp_queue_context_set(globals->chaos_q[i].handle,
@@ -842,7 +846,7 @@ static void *schedule_common_(void *arg)
for (i = 0; i < args->num_prio; i++) {
for (j = 0; j < args->num_queues; j++) {
snprintf(name, sizeof(name),
- "poll_%d_%d_o", i, j);
+ "plain_%d_%d_o", i, j);
pq = odp_queue_lookup(name);
CU_ASSERT_FATAL(pq != ODP_QUEUE_INVALID);
@@ -898,7 +902,7 @@ static void fill_queues(thread_args_t *args)
odp_queue_t queue;
switch (sync) {
- case ODP_SCHED_SYNC_NONE:
+ case ODP_SCHED_SYNC_PARALLEL:
snprintf(name, sizeof(name),
"sched_%d_%d_n", i, j);
break;
@@ -1047,10 +1051,10 @@ static void parallel_execute(odp_schedule_sync_t sync, int num_queues,
reset_queues(args);
}
-/* 1 queue 1 thread ODP_SCHED_SYNC_NONE */
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_1q_1t_n(void)
{
- schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_ONE);
}
/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC */
@@ -1065,12 +1069,12 @@ void scheduler_test_1q_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_ONE);
}
-/* Many queues 1 thread ODP_SCHED_SYNC_NONE */
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_1t_n(void)
{
/* Only one priority involved in these tests, but use
the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_ONE);
}
/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC */
@@ -1085,12 +1089,12 @@ void scheduler_test_mq_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_ONE);
}
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE */
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_1t_prio_n(void)
{
int prio = odp_schedule_num_prio();
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE);
}
/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC */
@@ -1109,12 +1113,12 @@ void scheduler_test_mq_1t_prio_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_ONE);
}
-/* Many queues many threads check priority ODP_SCHED_SYNC_NONE */
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL */
void scheduler_test_mq_mt_prio_n(void)
{
int prio = odp_schedule_num_prio();
- parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_ONE,
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_ONE,
DISABLE_EXCL_ATOMIC);
}
@@ -1143,10 +1147,10 @@ void scheduler_test_1q_mt_a_excl(void)
ENABLE_EXCL_ATOMIC);
}
-/* 1 queue 1 thread ODP_SCHED_SYNC_NONE multi */
+/* 1 queue 1 thread ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_1q_1t_n(void)
{
- schedule_common(ODP_SCHED_SYNC_NONE, ONE_Q, ONE_PRIO, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, ONE_Q, ONE_PRIO, SCHD_MULTI);
}
/* 1 queue 1 thread ODP_SCHED_SYNC_ATOMIC multi */
@@ -1161,12 +1165,12 @@ void scheduler_test_multi_1q_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, ONE_Q, ONE_PRIO, SCHD_MULTI);
}
-/* Many queues 1 thread ODP_SCHED_SYNC_NONE multi */
+/* Many queues 1 thread ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_1t_n(void)
{
/* Only one priority involved in these tests, but use
the same number of queues the more general case uses */
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, ONE_PRIO, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, ONE_PRIO, SCHD_MULTI);
}
/* Many queues 1 thread ODP_SCHED_SYNC_ATOMIC multi */
@@ -1181,12 +1185,12 @@ void scheduler_test_multi_mq_1t_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, ONE_PRIO, SCHD_MULTI);
}
-/* Many queues 1 thread check priority ODP_SCHED_SYNC_NONE multi */
+/* Many queues 1 thread check priority ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_1t_prio_n(void)
{
int prio = odp_schedule_num_prio();
- schedule_common(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI);
+ schedule_common(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI);
}
/* Many queues 1 thread check priority ODP_SCHED_SYNC_ATOMIC multi */
@@ -1205,12 +1209,12 @@ void scheduler_test_multi_mq_1t_prio_o(void)
schedule_common(ODP_SCHED_SYNC_ORDERED, MANY_QS, prio, SCHD_MULTI);
}
-/* Many queues many threads check priority ODP_SCHED_SYNC_NONE multi */
+/* Many queues many threads check priority ODP_SCHED_SYNC_PARALLEL multi */
void scheduler_test_multi_mq_mt_prio_n(void)
{
int prio = odp_schedule_num_prio();
- parallel_execute(ODP_SCHED_SYNC_NONE, MANY_QS, prio, SCHD_MULTI, 0);
+ parallel_execute(ODP_SCHED_SYNC_PARALLEL, MANY_QS, prio, SCHD_MULTI, 0);
}
/* Many queues many threads check priority ODP_SCHED_SYNC_ATOMIC multi */
@@ -1318,6 +1322,7 @@ static int create_queues(void)
for (i = 0; i < prios; i++) {
odp_queue_param_t p;
odp_queue_param_init(&p);
+ p.type = ODP_QUEUE_TYPE_SCHED;
p.sched.prio = i;
for (j = 0; j < QUEUES_PER_PRIO; j++) {
@@ -1326,8 +1331,8 @@ static int create_queues(void)
odp_queue_t q, pq;
snprintf(name, sizeof(name), "sched_%d_%d_n", i, j);
- p.sched.sync = ODP_SCHED_SYNC_NONE;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ p.sched.sync = ODP_SCHED_SYNC_PARALLEL;
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
@@ -1336,24 +1341,24 @@ static int create_queues(void)
snprintf(name, sizeof(name), "sched_%d_%d_a", i, j);
p.sched.sync = ODP_SCHED_SYNC_ATOMIC;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
return -1;
}
- snprintf(name, sizeof(name), "poll_%d_%d_o", i, j);
- pq = odp_queue_create(name, ODP_QUEUE_TYPE_POLL, NULL);
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
+ pq = odp_queue_create(name, NULL);
if (pq == ODP_QUEUE_INVALID) {
- printf("Poll queue create failed.\n");
+ printf("Plain queue create failed.\n");
return -1;
}
queue_ctx_buf = odp_buffer_alloc(queue_ctx_pool);
if (queue_ctx_buf == ODP_BUFFER_INVALID) {
- printf("Cannot allocate poll queue ctx buf\n");
+ printf("Cannot allocate plain queue ctx buf\n");
return -1;
}
@@ -1364,7 +1369,7 @@ static int create_queues(void)
rc = odp_queue_context_set(pq, pqctx);
if (rc != 0) {
- printf("Cannot set poll queue context\n");
+ printf("Cannot set plain queue context\n");
return -1;
}
@@ -1372,7 +1377,7 @@ static int create_queues(void)
p.sched.sync = ODP_SCHED_SYNC_ORDERED;
p.sched.lock_count =
ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE;
- q = odp_queue_create(name, ODP_QUEUE_TYPE_SCHED, &p);
+ q = odp_queue_create(name, &p);
if (q == ODP_QUEUE_INVALID) {
printf("Schedule queue create failed.\n");
@@ -1516,7 +1521,7 @@ static int destroy_queues(void)
if (destroy_queue(name) != 0)
return -1;
- snprintf(name, sizeof(name), "poll_%d_%d_o", i, j);
+ snprintf(name, sizeof(name), "plain_%d_%d_o", i, j);
if (destroy_queue(name) != 0)
return -1;
}
diff --git a/test/validation/std_clib/std_clib.c b/test/validation/std_clib/std_clib.c
index e53ad3946..e69bc3901 100644
--- a/test/validation/std_clib/std_clib.c
+++ b/test/validation/std_clib/std_clib.c
@@ -44,9 +44,47 @@ static void std_clib_test_memset(void)
CU_ASSERT(ret == 0);
}
+static void std_clib_test_memcmp(void)
+{
+ uint8_t data[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t equal[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ uint8_t greater_11[] = {1, 2, 3, 4, 5, 6, 7, 8,
+ 9, 10, 99, 12, 13, 14, 15, 16};
+ uint8_t less_6[] = {1, 2, 3, 4, 5, 2, 7, 8,
+ 9, 10, 11, 12, 13, 14, 15, 16};
+ size_t i;
+
+ CU_ASSERT(odp_memcmp(data, equal, 0) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data)) == 0);
+ CU_ASSERT(odp_memcmp(data, equal, sizeof(data) - 3) == 0);
+
+ CU_ASSERT(odp_memcmp(greater_11, data, sizeof(data)) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 11) > 0);
+ CU_ASSERT(odp_memcmp(greater_11, data, 10) == 0);
+
+ CU_ASSERT(odp_memcmp(less_6, data, sizeof(data)) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 6) < 0);
+ CU_ASSERT(odp_memcmp(less_6, data, 5) == 0);
+
+ for (i = 0; i < sizeof(data); i++) {
+ uint8_t tmp;
+
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) == 0);
+ tmp = equal[i];
+ equal[i] = 88;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) < 0);
+ equal[i] = 0;
+ CU_ASSERT(odp_memcmp(data, equal, i + 1) > 0);
+ equal[i] = tmp;
+ }
+}
+
odp_testinfo_t std_clib_suite[] = {
ODP_TEST_INFO(std_clib_test_memcpy),
ODP_TEST_INFO(std_clib_test_memset),
+ ODP_TEST_INFO(std_clib_test_memcmp),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/synchronizers/.gitignore b/test/validation/synchronizers/.gitignore
deleted file mode 100644
index 6aad9dfbd..000000000
--- a/test/validation/synchronizers/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-synchronizers_main
diff --git a/test/validation/synchronizers/Makefile.am b/test/validation/synchronizers/Makefile.am
deleted file mode 100644
index dd504d560..000000000
--- a/test/validation/synchronizers/Makefile.am
+++ /dev/null
@@ -1,10 +0,0 @@
-include ../Makefile.inc
-
-noinst_LTLIBRARIES = libtestsynchronizers.la
-libtestsynchronizers_la_SOURCES = synchronizers.c
-
-test_PROGRAMS = synchronizers_main$(EXEEXT)
-dist_synchronizers_main_SOURCES = synchronizers_main.c
-synchronizers_main_LDADD = libtestsynchronizers.la $(LIBCUNIT_COMMON) $(LIBODP)
-
-EXTRA_DIST = synchronizers.h
diff --git a/test/validation/synchronizers/synchronizers.h b/test/validation/synchronizers/synchronizers.h
deleted file mode 100644
index ad8db0b2e..000000000
--- a/test/validation/synchronizers/synchronizers.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2015, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef _ODP_TEST_SYNCHRONIZERS_H_
-#define _ODP_TEST_SYNCHRONIZERS_H_
-
-#include <odp_cunit_common.h>
-
-/* test functions: */
-void synchronizers_test_memory_barrier(void);
-void synchronizers_test_no_barrier_functional(void);
-void synchronizers_test_barrier_functional(void);
-void synchronizers_test_no_lock_functional(void);
-void synchronizers_test_spinlock_api(void);
-void synchronizers_test_spinlock_functional(void);
-void synchronizers_test_spinlock_recursive_api(void);
-void synchronizers_test_spinlock_recursive_functional(void);
-void synchronizers_test_ticketlock_api(void);
-void synchronizers_test_ticketlock_functional(void);
-void synchronizers_test_rwlock_api(void);
-void synchronizers_test_rwlock_functional(void);
-void synchronizers_test_rwlock_recursive_api(void);
-void synchronizers_test_rwlock_recursive_functional(void);
-void synchronizers_test_atomic_inc_dec(void);
-void synchronizers_test_atomic_add_sub(void);
-void synchronizers_test_atomic_fetch_inc_dec(void);
-void synchronizers_test_atomic_fetch_add_sub(void);
-
-/* test arrays: */
-extern odp_testinfo_t synchronizers_suite_barrier[];
-extern odp_testinfo_t synchronizers_suite_no_locking[];
-extern odp_testinfo_t synchronizers_suite_spinlock[];
-extern odp_testinfo_t synchronizers_suite_spinlock_recursive[];
-extern odp_testinfo_t synchronizers_suite_ticketlock[];
-extern odp_testinfo_t synchronizers_suite_rwlock[];
-extern odp_testinfo_t synchronizers_suite_rwlock_recursive[];
-extern odp_testinfo_t synchronizers_suite_atomic[];
-
-/* test array init/term functions: */
-int synchronizers_suite_init(void);
-
-/* test registry: */
-extern odp_suiteinfo_t synchronizers_suites[];
-
-/* executable init/term functions: */
-int synchronizers_init(void);
-
-/* main test program: */
-int synchronizers_main(void);
-
-#endif
diff --git a/test/validation/system/system.c b/test/validation/system/system.c
index 7f54338b8..ac34b2478 100644
--- a/test/validation/system/system.c
+++ b/test/validation/system/system.c
@@ -6,6 +6,7 @@
#include <ctype.h>
#include <odp.h>
+#include <odp/cpumask.h>
#include "odp_cunit_common.h"
#include "test_debug.h"
#include "system.h"
@@ -170,15 +171,32 @@ void system_test_odp_sys_cache_line_size(void)
CU_ASSERT(ODP_CACHE_LINE_SIZE == cache_size);
}
-void system_test_odp_sys_cpu_model_str(void)
+void system_test_odp_cpu_model_str(void)
{
char model[128];
- snprintf(model, 128, "%s", odp_sys_cpu_model_str());
+ snprintf(model, 128, "%s", odp_cpu_model_str());
CU_ASSERT(strlen(model) > 0);
CU_ASSERT(strlen(model) < 127);
}
+void system_test_odp_cpu_model_str_id(void)
+{
+ char model[128];
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ snprintf(model, 128, "%s", odp_cpu_model_str_id(cpu));
+ CU_ASSERT(strlen(model) > 0);
+ CU_ASSERT(strlen(model) < 127);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
void system_test_odp_sys_page_size(void)
{
uint64_t page;
@@ -196,22 +214,107 @@ void system_test_odp_sys_huge_page_size(void)
CU_ASSERT(0 < page);
}
-void system_test_odp_sys_cpu_hz(void)
+int system_check_odp_cpu_hz(void)
+{
+ if (odp_cpu_hz() == 0) {
+ fprintf(stderr, "odp_cpu_hz is not supported, skipping\n");
+ return ODP_TEST_INACTIVE;
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz(void)
+{
+ uint64_t hz = odp_cpu_hz();
+
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+}
+
+int system_check_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ if (hz == 0) {
+ fprintf(stderr, "cpu %d does not support"
+ " odp_cpu_hz_id(),"
+ "skip that test\n", cpu);
+ return ODP_TEST_INACTIVE;
+ }
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+
+ return ODP_TEST_ACTIVE;
+}
+
+void system_test_odp_cpu_hz_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_id(cpu);
+ /* Test value sanity: less than 10GHz */
+ CU_ASSERT(hz < 10 * GIGA_HZ);
+ /* larger than 1kHz */
+ CU_ASSERT(hz > 1 * KILO_HZ);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
+void system_test_odp_cpu_hz_max(void)
{
uint64_t hz;
- hz = odp_sys_cpu_hz();
+ hz = odp_cpu_hz_max();
CU_ASSERT(0 < hz);
}
+void system_test_odp_cpu_hz_max_id(void)
+{
+ uint64_t hz;
+ odp_cpumask_t mask;
+ int i, num, cpu;
+
+ num = odp_cpumask_all_available(&mask);
+ cpu = odp_cpumask_first(&mask);
+
+ for (i = 0; i < num; i++) {
+ hz = odp_cpu_hz_max_id(cpu);
+ CU_ASSERT(0 < hz);
+ cpu = odp_cpumask_next(&mask, cpu);
+ }
+}
+
odp_testinfo_t system_suite[] = {
ODP_TEST_INFO(system_test_odp_version_numbers),
ODP_TEST_INFO(system_test_odp_cpu_count),
ODP_TEST_INFO(system_test_odp_sys_cache_line_size),
- ODP_TEST_INFO(system_test_odp_sys_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str),
+ ODP_TEST_INFO(system_test_odp_cpu_model_str_id),
ODP_TEST_INFO(system_test_odp_sys_page_size),
ODP_TEST_INFO(system_test_odp_sys_huge_page_size),
- ODP_TEST_INFO(system_test_odp_sys_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz,
+ system_check_odp_cpu_hz),
+ ODP_TEST_INFO_CONDITIONAL(system_test_odp_cpu_hz_id,
+ system_check_odp_cpu_hz_id),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max),
+ ODP_TEST_INFO(system_test_odp_cpu_hz_max_id),
ODP_TEST_INFO(system_test_odp_cpu_cycles),
ODP_TEST_INFO(system_test_odp_cpu_cycles_max),
ODP_TEST_INFO(system_test_odp_cpu_cycles_resolution),
diff --git a/test/validation/system/system.h b/test/validation/system/system.h
index 0c263f2c0..cf585a7b7 100644
--- a/test/validation/system/system.h
+++ b/test/validation/system/system.h
@@ -9,14 +9,23 @@
#include <odp_cunit_common.h>
+#define GIGA_HZ 1000000000ULL
+#define KILO_HZ 1000ULL
+
/* test functions: */
void system_test_odp_version_numbers(void);
void system_test_odp_cpu_count(void);
void system_test_odp_sys_cache_line_size(void);
-void system_test_odp_sys_cpu_model_str(void);
+void system_test_odp_cpu_model_str(void);
+void system_test_odp_cpu_model_str_id(void);
void system_test_odp_sys_page_size(void);
void system_test_odp_sys_huge_page_size(void);
-void system_test_odp_sys_cpu_hz(void);
+int system_check_odp_cpu_hz(void);
+void system_test_odp_cpu_hz(void);
+int system_check_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_id(void);
+void system_test_odp_cpu_hz_max(void);
+void system_test_odp_cpu_hz_max_id(void);
void system_test_odp_cpu_cycles_max(void);
void system_test_odp_cpu_cycles(void);
void system_test_odp_cpu_cycles_diff(void);
diff --git a/test/validation/timer/timer.c b/test/validation/timer/timer.c
index 0bd67fb15..004670ad9 100644
--- a/test/validation/timer/timer.c
+++ b/test/validation/timer/timer.c
@@ -37,6 +37,10 @@ static odp_timer_pool_t tp;
/** @private Count of timeouts delivered too late */
static odp_atomic_u32_t ndelivtoolate;
+/** @private Sum of all allocated timers from all threads. Thread-local
+ * caches may make this number lower than the capacity of the pool */
+static odp_atomic_u32_t timers_allocated;
+
/** @private min() function */
static int min(int a, int b)
{
@@ -161,7 +165,7 @@ void timer_test_odp_timer_cancel(void)
/* Start all created timer pools */
odp_timer_pool_start();
- queue = odp_queue_create("timer_queue", ODP_QUEUE_TYPE_POLL, NULL);
+ queue = odp_queue_create("timer_queue", NULL);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -274,13 +278,11 @@ static void handle_tmo(odp_event_t ev, bool stale, uint64_t prev_tick)
static void *worker_entrypoint(void *arg TEST_UNUSED)
{
int thr = odp_thread_id();
- uint32_t i;
+ uint32_t i, allocated;
unsigned seed = thr;
int rc;
- odp_queue_t queue = odp_queue_create("timer_queue",
- ODP_QUEUE_TYPE_POLL,
- NULL);
+ odp_queue_t queue = odp_queue_create("timer_queue", NULL);
if (queue == ODP_QUEUE_INVALID)
CU_FAIL_FATAL("Queue create failed");
@@ -290,21 +292,30 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
/* Prepare all timers */
for (i = 0; i < NTIMERS; i++) {
- tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
- if (tt[i].tim == ODP_TIMER_INVALID)
- CU_FAIL_FATAL("Failed to allocate timer");
tt[i].ev = odp_timeout_to_event(odp_timeout_alloc(tbp));
- if (tt[i].ev == ODP_EVENT_INVALID)
- CU_FAIL_FATAL("Failed to allocate timeout");
+ if (tt[i].ev == ODP_EVENT_INVALID) {
+ LOG_DBG("Failed to allocate timeout (%d/%d)\n",
+ i, NTIMERS);
+ break;
+ }
+ tt[i].tim = odp_timer_alloc(tp, queue, &tt[i]);
+ if (tt[i].tim == ODP_TIMER_INVALID) {
+ LOG_DBG("Failed to allocate timer (%d/%d)\n",
+ i, NTIMERS);
+ odp_timeout_free(tt[i].ev);
+ break;
+ }
tt[i].ev2 = tt[i].ev;
tt[i].tick = TICK_INVALID;
}
+ allocated = i;
+ odp_atomic_fetch_add_u32(&timers_allocated, allocated);
odp_barrier_wait(&test_barrier);
/* Initial set all timers with a random expiration time */
uint32_t nset = 0;
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
uint64_t tck = odp_timer_current_tick(tp) + 1 +
odp_timer_ns_to_tick(tp,
(rand_r(&seed) % RANGE_MS)
@@ -336,7 +347,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
nrcv++;
}
prev_tick = odp_timer_current_tick(tp);
- i = rand_r(&seed) % NTIMERS;
+ i = rand_r(&seed) % allocated;
if (tt[i].ev == ODP_EVENT_INVALID &&
(rand_r(&seed) % 2 == 0)) {
/* Timer active, cancel it */
@@ -384,7 +395,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
/* Cancel and free all timers */
uint32_t nstale = 0;
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
(void)odp_timer_cancel(tt[i].tim, &tt[i].ev);
tt[i].tick = TICK_INVALID;
if (tt[i].ev == ODP_EVENT_INVALID)
@@ -430,7 +441,7 @@ static void *worker_entrypoint(void *arg TEST_UNUSED)
rc = odp_queue_destroy(queue);
CU_ASSERT(rc == 0);
- for (i = 0; i < NTIMERS; i++) {
+ for (i = 0; i < allocated; i++) {
if (tt[i].ev != ODP_EVENT_INVALID)
odp_event_free(tt[i].ev);
}
@@ -506,6 +517,9 @@ void timer_test_odp_timer_all(void)
/* Initialize the shared timeout counter */
odp_atomic_init_u32(&ndelivtoolate, 0);
+ /* Initialize the number of finally allocated elements */
+ odp_atomic_init_u32(&timers_allocated, 0);
+
/* Create and start worker threads */
pthrd_arg thrdarg;
thrdarg.testcase = 0;
@@ -522,7 +536,7 @@ void timer_test_odp_timer_all(void)
CU_FAIL("odp_timer_pool_info");
CU_ASSERT(tpinfo.param.num_timers == (unsigned)num_workers * NTIMERS);
CU_ASSERT(tpinfo.cur_timers == 0);
- CU_ASSERT(tpinfo.hwm_timers == (unsigned)num_workers * NTIMERS);
+ CU_ASSERT(tpinfo.hwm_timers == odp_atomic_load_u32(&timers_allocated));
/* Destroy timer pool, all timers must have been freed */
odp_timer_pool_destroy(tp);