aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatias Elo <matias.elo@nokia.com>2022-06-20 17:40:58 +0300
committerGitHub <noreply@github.com>2022-06-20 17:40:58 +0300
commit6cf762f28a96dc61db44697b9254327e41580a7f (patch)
treea1de51fcf09a60bd14c260bb7c5dd44630193828
parent929cd75ba6654e5f635295953cbb20ffdca20c3e (diff)
parent6d365eb66573c4cd7fab868bd69395cc9acdc123 (diff)
Merge ODP v1.37.0.0v1.37.0.0_DPDK_19.11
Merge ODP linux-generic v1.37.0.0 into linux-dpdk
-rw-r--r--CHANGELOG25
-rw-r--r--DEPENDENCIES66
-rw-r--r--configure.ac2
-rw-r--r--doc/Doxyfile_common1
-rw-r--r--include/odp/api/spec/classification.h485
-rw-r--r--include/odp/api/spec/crypto.h15
-rw-r--r--include/odp/api/spec/schedule.h15
-rw-r--r--include/odp/api/spec/schedule_types.h4
-rw-r--r--include/odp/api/spec/traffic_mngr.h4
-rw-r--r--include/odp/autoheader_internal.h.in3
-rw-r--r--platform/linux-dpdk/Makefile.am1
-rw-r--r--platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h7
-rw-r--r--platform/linux-dpdk/include/odp_buffer_internal.h1
-rw-r--r--platform/linux-dpdk/include/odp_eventdev_internal.h4
-rw-r--r--platform/linux-dpdk/include/odp_packet_internal.h27
-rw-r--r--platform/linux-dpdk/include/odp_packet_io_internal.h4
-rw-r--r--platform/linux-dpdk/include/odp_queue_basic_internal.h4
-rw-r--r--platform/linux-dpdk/odp_crypto.c5
-rw-r--r--platform/linux-dpdk/odp_packet.c6
-rw-r--r--platform/linux-dpdk/odp_pool.c60
-rw-r--r--platform/linux-dpdk/odp_queue_basic.c5
-rw-r--r--platform/linux-dpdk/odp_queue_eventdev.c5
-rw-r--r--platform/linux-dpdk/odp_schedule_eventdev.c1
-rw-r--r--platform/linux-dpdk/odp_schedule_if.c5
-rw-r--r--platform/linux-dpdk/odp_shared_memory.c6
-rw-r--r--platform/linux-dpdk/odp_timer.c5
-rw-r--r--platform/linux-generic/Makefile.am5
-rw-r--r--platform/linux-generic/arch/aarch64/cpu_flags.c19
-rw-r--r--platform/linux-generic/arch/aarch64/odp_crypto_armv8.c38
-rw-r--r--platform/linux-generic/include/odp/api/plat/packet_inline_types.h7
-rw-r--r--platform/linux-generic/include/odp_align_internal.h67
-rw-r--r--platform/linux-generic/include/odp_buffer_internal.h1
-rw-r--r--platform/linux-generic/include/odp_classification_datamodel.h9
-rw-r--r--platform/linux-generic/include/odp_macros_internal.h63
-rw-r--r--platform/linux-generic/include/odp_packet_dpdk.h10
-rw-r--r--platform/linux-generic/include/odp_packet_internal.h39
-rw-r--r--platform/linux-generic/include/odp_packet_io_internal.h7
-rw-r--r--platform/linux-generic/include/odp_parse_internal.h4
-rw-r--r--platform/linux-generic/include/odp_pool_internal.h33
-rw-r--r--platform/linux-generic/include/odp_queue_basic_internal.h4
-rw-r--r--platform/linux-generic/include/odp_queue_scalable_internal.h6
-rw-r--r--platform/linux-generic/include/odp_ring_internal.h4
-rw-r--r--platform/linux-generic/include/odp_ring_mpmc_internal.h3
-rw-r--r--platform/linux-generic/include/odp_ring_st_internal.h2
-rw-r--r--platform/linux-generic/include/odp_schedule_if.h1
-rw-r--r--platform/linux-generic/include/odp_schedule_scalable_ordered.h5
-rw-r--r--platform/linux-generic/libodp-linux.pc.in2
-rw-r--r--platform/linux-generic/m4/configure.m43
-rw-r--r--platform/linux-generic/m4/odp_xdp.m415
-rw-r--r--platform/linux-generic/odp_classification.c116
-rw-r--r--platform/linux-generic/odp_crypto_null.c5
-rw-r--r--platform/linux-generic/odp_crypto_openssl.c8
-rw-r--r--platform/linux-generic/odp_ipsec.c7
-rw-r--r--platform/linux-generic/odp_ipsec_sad.c9
-rw-r--r--platform/linux-generic/odp_ishm.c1
-rw-r--r--platform/linux-generic/odp_ishmphy.c1
-rw-r--r--platform/linux-generic/odp_ishmpool.c13
-rw-r--r--platform/linux-generic/odp_name_table.c12
-rw-r--r--platform/linux-generic/odp_packet.c4
-rw-r--r--platform/linux-generic/odp_pcapng.c19
-rw-r--r--platform/linux-generic/odp_pkt_queue.c6
-rw-r--r--platform/linux-generic/odp_pool.c108
-rw-r--r--platform/linux-generic/odp_pool_mem_src_ops.c22
-rw-r--r--platform/linux-generic/odp_queue_basic.c7
-rw-r--r--platform/linux-generic/odp_queue_scalable.c10
-rw-r--r--platform/linux-generic/odp_schedule_basic.c12
-rw-r--r--platform/linux-generic/odp_schedule_if.c5
-rw-r--r--platform/linux-generic/odp_schedule_scalable.c6
-rw-r--r--platform/linux-generic/odp_schedule_sp.c9
-rw-r--r--platform/linux-generic/odp_stash.c11
-rw-r--r--platform/linux-generic/odp_timer.c8
-rw-r--r--platform/linux-generic/odp_timer_wheel.c2
-rw-r--r--platform/linux-generic/odp_traffic_mngr.c47
-rw-r--r--platform/linux-generic/pktio/dpdk.c116
-rw-r--r--platform/linux-generic/pktio/io_ops.c3
-rw-r--r--platform/linux-generic/pktio/ipc.c12
-rw-r--r--platform/linux-generic/pktio/loop.c15
-rw-r--r--platform/linux-generic/pktio/netmap.c12
-rw-r--r--platform/linux-generic/pktio/socket.c2
-rw-r--r--platform/linux-generic/pktio/socket_mmap.c4
-rw-r--r--platform/linux-generic/pktio/socket_xdp.c688
-rw-r--r--test/performance/odp_l2fwd.c469
-rw-r--r--test/performance/odp_packet_gen.c48
-rw-r--r--test/performance/odp_sched_latency.c245
-rw-r--r--test/validation/api/buffer/buffer.c187
-rw-r--r--test/validation/api/classification/odp_classification_common.c5
-rw-r--r--test/validation/api/classification/odp_classification_test_pmr.c119
-rw-r--r--test/validation/api/classification/odp_classification_testsuites.h3
-rw-r--r--test/validation/api/crypto/odp_crypto_test_inp.c3
-rw-r--r--test/validation/api/pktio/pktio.c1
-rw-r--r--test/validation/api/scheduler/scheduler.c52
-rw-r--r--test/validation/api/timer/timer.c43
92 files changed, 2616 insertions, 972 deletions
diff --git a/CHANGELOG b/CHANGELOG
index e182f0ad5..6053d27a5 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,3 +1,28 @@
+== OpenDataPlane (1.37.0.0)
+
+=== Backward incompatible API changes
+==== Classifier
+* Deprecate `odp_cos_with_l3_qos()` function. Use new `ODP_PMR_IP_DSCP` PMR
+term instead.
+
+=== Backward compatible API changes
+==== Classifier
+* Add new PMR term enumeration `ODP_PMR_IP_DSCP` for Differentiated Services
+Code Point (DSCP) bits in IP header.
+* Add new PMR term enumeration `ODP_PMR_VLAN_PCP_0` for Priority Code Point
+(PCP) bits in VLAN header.
+* Clarify `ODP_PMR_ETHTYPE_0`, `ODP_PMR_VLAN_ID_0`, and `ODP_PMR_VLAN_ID_X`
+PMR term specifications.
+* Remove unused `odp_cos_hdr_flow_fields_t` enumeration.
+
+==== Scheduler
+* Add new `odp_schedule_order_wait()` function which waits until the currently
+held scheduling context is the first in order.
+
+=== Implementation
+==== Packet IO
+* Add new experimental AF_XDP socket based packet IO device.
+
== OpenDataPlane (1.36.0.0)
=== Backward incompatible API changes
diff --git a/DEPENDENCIES b/DEPENDENCIES
index 85a4cf242..b18279da2 100644
--- a/DEPENDENCIES
+++ b/DEPENDENCIES
@@ -291,6 +291,72 @@ Prerequisites for building the OpenDataPlane (ODP) API
1024MB of memory:
$ sudo ODP_PKTIO_DPDK_PARAMS="-m 1024" ./test/performance/odp_l2fwd -i 0 -c 1
+3.6 AF_XDP socket based packet I/O support (optional)
+
+ Use AF_XDP socket for packet I/O. At the moment, only zero-copy variant is
+ supported, requiring a kernel version 5.4 or higher. Additionally, if packet
+ pools are to be shared between packet I/Os, kernel version of 5.10 or higher
+ is required.
+
+ More information about XDP and AF_XDP can be found here:
+ https://www.kernel.org/doc/Documentation/networking/af_xdp.rst
+
+ The status of the implementation is **experimental** and may cause issues
+ e.g. with some packet length, packet segment length and pool size
+ combinations that would otherwise conform to reported capabilities.
+
+3.6.1 AF_XDP socket packet I/O requirements
+
+ AF_XDP socket packet I/O implementation requires libxdp and libbpf libraries.
+ They can be fetched from XDP-project in GitHub:
+
+ $ git clone https://github.com/xdp-project/xdp-tools
+
+ (Contains submodules which should be cloned as well.)
+
+ Additional packages might be needed to be installed as well: llvm-dev and
+ gcc-multilib.
+
+ $ ./configure
+ $ make
+
+ After building, libraries should be installed.
+
+ $ cd <path to built libxdp>
+ $ make install
+ $ cd <path to built libbpf>
+ $ make install
+
+3.6.2 Build ODP with AF_XDP socket packet I/O support
+
+ After building and installing libxdp and libbpf, ODP can be configured to be
+ built with AF_XDP support (pass PKG_CONFIG_PATH if needed).
+
+ $ ./configure --enable-xdp
+
+3.6.3 Running ODP with AF_XDP socket packet I/O
+
+ At the moment, each AF_XDP socket packet I/O binds to a single TRX queue,
+ this means that NIC(s) of the environment have to be configured accordingly.
+
+ $ ethtool -L <if name> combined 1
+
+ Additionally, with some NICs (e.g. Mellanox), when zero-copy XDP is in use,
+ the queue configuration is adjusted by the NIC with additional queues on top
+ of the configured single TRX queue. This requires a forwarding rule:
+
+ $ ethtool -N <if name> flow-type ether dst <mac of if> action 1
+
+ Which queue to bind to in a given interface can be controlled with an
+ environment variable when starting an ODP executable:
+
+ $ ODP_PKTIO_XDP_PARAMS="<if name>:<queue index> <if name>:<queue index> ..." ./<odp executable> ...
+
+ parameter being a string of interface-queue index pairs, where interface and
+ queue is separated by a colon and pairs separated by a whitespace. If no
+ environment variable is passed, zero (0) queue is chosen for all AF_XDP
+ interfaces.
+
4.0 Packages needed to build API tests
CUnit test framework version 2.1-3 is required
diff --git a/configure.ac b/configure.ac
index 9a4cce34a..8e1ffbd61 100644
--- a/configure.ac
+++ b/configure.ac
@@ -3,7 +3,7 @@ AC_PREREQ([2.5])
# ODP API version
##########################################################################
m4_define([odpapi_generation_version], [1])
-m4_define([odpapi_major_version], [36])
+m4_define([odpapi_major_version], [37])
m4_define([odpapi_minor_version], [0])
m4_define([odpapi_point_version], [0])
m4_define([odpapi_version],
diff --git a/doc/Doxyfile_common b/doc/Doxyfile_common
index 29a311491..2aa54ac48 100644
--- a/doc/Doxyfile_common
+++ b/doc/Doxyfile_common
@@ -38,4 +38,5 @@ PREDEFINED = __GNUC__ \
__x86_64__ \
ODP_PACKED \
ODP_DEPRECATE(x)=x \
+ ODP_DEPRECATED_API=1 \
"ODP_HANDLE_T(type)=odp_handle_t type"
diff --git a/include/odp/api/spec/classification.h b/include/odp/api/spec/classification.h
index 1c76ea192..f36826058 100644
--- a/include/odp/api/spec/classification.h
+++ b/include/odp/api/spec/classification.h
@@ -11,8 +11,8 @@
* ODP classification descriptor
*/
-#ifndef ODP_API_SPEC_CLASSIFY_H_
-#define ODP_API_SPEC_CLASSIFY_H_
+#ifndef ODP_API_SPEC_CLASSIFICATION_H_
+#define ODP_API_SPEC_CLASSIFICATION_H_
#include <odp/visibility_begin.h>
#ifdef __cplusplus
@@ -23,6 +23,7 @@ extern "C" {
#include <odp/api/pool_types.h>
#include <odp/api/std_types.h>
#include <odp/api/threshold.h>
+#include <odp/api/deprecated.h>
/** @defgroup odp_classification ODP CLASSIFICATION
* Packet input classification.
@@ -30,26 +31,171 @@ extern "C" {
*/
/**
+ * @typedef odp_pmr_t
+ * Packet matching rule handle
+ */
+
+/**
+ * @def ODP_PMR_INVALID
+ * Invalid packet matching rule handle
+ */
+
+/**
* @typedef odp_cos_t
- * ODP Class of service handle
+ * Class of service handle
*/
/**
* @def ODP_COS_INVALID
- * This value is returned from odp_cls_cos_create() on failure.
+ * Invalid class of service handle
*/
/**
* @def ODP_COS_NAME_LEN
- * Maximum ClassOfService name length in chars including null char
+ * Maximum class of service name length in chars including null char
*/
/**
- * @def ODP_PMR_INVALID
- * Invalid odp_pmr_t value.
- * This value is returned from odp_cls_pmr_create()
- * function on failure.
+ * Packet Matching Rule terms
+ *
+ * This enumeration selects the protocol field that is matched against PMR
+ * value/mask or value range. Protocol field values and masks are passed in big
+ * endian (network endian) format. However, ODP_PMR_LEN value and range are
+ * passed in CPU native endian (uint32_t words), as the term does not represent
+ * a protocol field.
+ *
+ * PMR value/mask data size is term specific. This size must be set into val_sz
+ * field of odp_pmr_param_t. There is no alignment requirement for PMR
+ * value/mask data.
*/
+typedef enum {
+ /** Total length of received packet. Exceptionally, value and mask are
+ * uint32_t (val_sz = 4) in CPU endian. */
+ ODP_PMR_LEN,
+
+ /** Initial (outer) Ethertype only (val_sz = 2)
+ *
+ * PMR matches Ethertype field when packet does not have VLAN headers. When there are
+ * VLAN headers, it matches Tag protocol identifier (TPID) field of the first VLAN header.
+ * I.e. it matches a field in the same offset from the start of the packet in both cases.
+ */
+ ODP_PMR_ETHTYPE_0,
+
+ /** Ethertype of most inner VLAN tag (val_sz = 2) */
+ ODP_PMR_ETHTYPE_X,
+
+ /** First (outer) VLAN ID (val_sz = 2)
+ *
+ * VLAN ID value and mask are stored into 12 least significant bits of a 16-bit word.
+ * The word is passed in big endian format.
+ */
+ ODP_PMR_VLAN_ID_0,
+
+ /** Last (most inner) VLAN ID (val_sz = 2)
+ *
+ * VLAN ID value and mask are stored into 12 least significant bits of a 16-bit word.
+ * The word is passed in big endian format.
+ */
+ ODP_PMR_VLAN_ID_X,
+
+ /** PCP bits in the first (outer) VLAN header (val_sz = 1)
+ *
+ * Priority Code Point (PCP) value is stored into three least significant bits of
+ * the octet pointed by odp_pmr_param_t::value. The same applies for odp_pmr_param_t::mask.
+ */
+ ODP_PMR_VLAN_PCP_0,
+
+ /** Destination MAC address (val_sz = 6) */
+ ODP_PMR_DMAC,
+
+ /** IPv4 Protocol or IPv6 Next Header (val_sz = 1) */
+ ODP_PMR_IPPROTO,
+
+ /** Differentiated Services Code Point (DSCP) bits in IPv4 or IPv6 header (val_sz = 1)
+ *
+ * DSCP value is stored into six least significant bits of the octet pointed by
+ * odp_pmr_param_t::value. The same applies for odp_pmr_param_t::mask.
+ */
+ ODP_PMR_IP_DSCP,
+
+ /** Destination UDP port (val_sz = 2) */
+ ODP_PMR_UDP_DPORT,
+
+ /** Destination TCP port (val_sz = 2) */
+ ODP_PMR_TCP_DPORT,
+
+ /** Source UDP port (val_sz = 2) */
+ ODP_PMR_UDP_SPORT,
+
+ /** Source TCP port (val_sz = 2) */
+ ODP_PMR_TCP_SPORT,
+
+ /** Source IPv4 address (val_sz = 4) */
+ ODP_PMR_SIP_ADDR,
+
+ /** Destination IPv4 address (val_sz = 4) */
+ ODP_PMR_DIP_ADDR,
+
+ /** Source IPv6 address (val_sz = 16) */
+ ODP_PMR_SIP6_ADDR,
+
+ /** Destination IPv6 address (val_sz = 16) */
+ ODP_PMR_DIP6_ADDR,
+
+ /** IPsec session identifier (val_sz = 4)*/
+ ODP_PMR_IPSEC_SPI,
+
+ /** NVGRE/VXLAN network identifier (val_sz = 4) */
+ ODP_PMR_LD_VNI,
+
+ /**
+ * Custom frame match rule
+ *
+ * PMR offset is counted from the start of the packet. The match is
+ * defined by the offset, the expected value, and its size. Custom frame
+ * rules must be applied before any other PMR.
+ */
+ ODP_PMR_CUSTOM_FRAME,
+
+ /**
+ * Custom layer 3 match rule
+ *
+ * PMR offset is counted from the start of layer 3 in the packet.
+ * The match is defined by the offset, the expected value, and its size.
+ * Custom L3 rules may be combined with other PMRs.
+ */
+ ODP_PMR_CUSTOM_L3,
+
+ /** IGMP Group address (val_sz = 4), implies IPPROTO=2 */
+ ODP_PMR_IGMP_GRP_ADDR,
+
+ /** ICMP identifier (val_sz = 2), implies IPPROTO=1 and ICMP_TYPE=0 or ICMP_TYPE=8 */
+ ODP_PMR_ICMP_ID,
+
+ /** ICMP type (val_sz = 1), implies IPPROTO=1 */
+ ODP_PMR_ICMP_TYPE,
+
+ /** ICMP code (val_sz = 1), implies IPPROTO=1 */
+ ODP_PMR_ICMP_CODE,
+
+ /** Source SCTP port (val_sz = 2), implies IPPROTO=132 */
+ ODP_PMR_SCTP_SPORT,
+
+ /** Destination SCTP port (val_sz = 2), implies IPPROTO=132 */
+ ODP_PMR_SCTP_DPORT,
+
+ /** GTPv1 tunnel endpoint identifier (val_sz = 4)
+ *
+ * Matches if and only if IP protocol is UDP, UDP destination port
+ * is 2152 and the UDP payload interpreted as GTP header has GTP
+ * version 1 and TEID as specified.
+ */
+ ODP_PMR_GTPV1_TEID,
+
+ /** Inner header may repeat above values with this offset */
+ ODP_PMR_INNER_HDR_OFF = 32
+
+} odp_cls_pmr_term_t;
/**
* Supported PMR term values
@@ -69,10 +215,14 @@ typedef union odp_cls_pmr_terms_t {
uint64_t vlan_id_0:1;
/** Last VLAN ID (inner) */
uint64_t vlan_id_x:1;
+ /** PCP in the first VLAN header (#ODP_PMR_VLAN_PCP_0) */
+ uint64_t vlan_pcp_0:1;
/** destination MAC address */
uint64_t dmac:1;
/** IP Protocol or IPv6 Next Header */
uint64_t ip_proto:1;
+ /** DSCP in IP header (#ODP_PMR_IP_DSCP) */
+ uint64_t ip_dscp:1;
/** Destination UDP port, implies IPPROTO=17 */
uint64_t udp_dport:1;
/** Destination TCP port implies IPPROTO=6 */
@@ -114,10 +264,89 @@ typedef union odp_cls_pmr_terms_t {
/** GTPv1 tunnel endpoint identifier */
uint64_t gtpv1_teid:1;
} bit;
+
/** All bits of the bit field structure */
uint64_t all_bits;
+
} odp_cls_pmr_terms_t;
+/**
+ * Packet Matching Rule parameter structure
+ *
+ * Match value/mask size and endianness are defined in PMR term documentation
+ * (@see odp_cls_pmr_term_t). Most values and masks are passed in big
+ * endian format without data alignment requirement. ODP_PMR_LEN is
+ * an exception to this (uint32_t in CPU endian).
+ */
+typedef struct odp_pmr_param_t {
+ /** Packet Matching Rule term */
+ odp_cls_pmr_term_t term;
+
+ /** True if the value is range and false if match. Default is false. */
+ odp_bool_t range_term;
+
+ /** Variant mappings for types of matches */
+ union {
+ /** Parameters for single-valued matches */
+ struct {
+ /** Points to the value to be matched. Value size and
+ * endianness are defined by the term used. Values of
+ * protocol fields are defined in big endian format.
+ */
+ const void *value;
+
+ /** Mask of the bits to be matched. The same size and
+ * endianness is used than with the value. */
+ const void *mask;
+ } match;
+
+ /** Parameter for range value matches */
+ struct {
+ /** Start value of the range */
+ const void *val_start;
+
+ /** End value of the range */
+ const void *val_end;
+ } range;
+ };
+
+ /** Size of the value to be matched */
+ uint32_t val_sz;
+
+ /** Offset to the value
+ *
+ * Byte offset to the value to be matched in a packet. PMR term defines
+ * starting point for the offset. Used only with custom PMR terms,
+ * ignored with other terms.
+ */
+ uint32_t offset;
+
+} odp_pmr_param_t;
+
+/**
+ * Packet Matching Rule creation options
+ */
+typedef struct odp_pmr_create_opt_t {
+ /** PMR terms
+ *
+ * Array of odp_pmr_param_t entries, one entry per term desired.
+ * Use odp_cls_pmr_param_init() to initialize parameters into their default values.
+ */
+ odp_pmr_param_t *terms;
+
+ /** Number of terms in the match rule. */
+ int num_terms;
+
+ /** Classification mark value
+ *
+ * Value to be set in the CLS mark of a packet when the packet matches this
+ * Packet Matching Rule. The default value is zero. The maximum value is indicated in
+ * odp_cls_capability_t::max_mark capability.
+ */
+ uint64_t mark;
+
+} odp_pmr_create_opt_t;
+
/** Random Early Detection (RED)
* Random Early Detection is enabled to initiate a drop probability for the
* incoming packet when the packets in the queue/pool cross the specified
@@ -153,6 +382,7 @@ typedef struct odp_red_param_t {
* the minimum threshold value and is disabled otherwise
*/
odp_threshold_t threshold;
+
} odp_red_param_t;
/** Back pressure (BP)
@@ -174,6 +404,7 @@ typedef struct odp_bp_param_t {
* @see odp_red_param_t for 'resource usage' documentation.
*/
odp_threshold_t threshold;
+
} odp_bp_param_t;
/**
@@ -346,28 +577,8 @@ typedef struct odp_cls_capability_t {
typedef enum {
ODP_COS_DROP_POOL, /**< Follow buffer pool drop policy */
ODP_COS_DROP_NEVER, /**< Never drop, ignoring buffer pool policy */
-} odp_cls_drop_t;
-/**
- * Packet header field enumeration
- * for fields that may be used to calculate
- * the flow signature, if present in a packet.
- */
-typedef enum {
- ODP_COS_FHDR_IN_PKTIO, /**< Ingress port number */
- ODP_COS_FHDR_L2_SAP, /**< Ethernet Source MAC address */
- ODP_COS_FHDR_L2_DAP, /**< Ethernet Destination MAC address */
- ODP_COS_FHDR_L2_VID, /**< Ethernet VLAN ID */
- ODP_COS_FHDR_L3_FLOW, /**< IPv6 flow_id */
- ODP_COS_FHDR_L3_SAP, /**< IP source address */
- ODP_COS_FHDR_L3_DAP, /**< IP destination address */
- ODP_COS_FHDR_L4_PROTO, /**< IP protocol (e.g. TCP/UDP/ICMP) */
- ODP_COS_FHDR_L4_SAP, /**< Transport source port */
- ODP_COS_FHDR_L4_DAP, /**< Transport destination port */
- ODP_COS_FHDR_IPSEC_SPI, /**< IPsec session identifier */
- ODP_COS_FHDR_LD_VNI, /**< NVGRE/VXLAN network identifier */
- ODP_COS_FHDR_USER /**< Application-specific header field(s) */
-} odp_cos_hdr_flow_fields_t;
+} odp_cls_drop_t;
/**
* Enumeration of actions for CoS.
@@ -387,6 +598,7 @@ typedef enum {
* their originating pool.
*/
ODP_COS_ACTION_DROP,
+
} odp_cos_action_t;
/**
@@ -458,6 +670,7 @@ typedef struct odp_cls_cos_param {
/** Packet input vector configuration */
odp_pktin_vector_config_t vector;
+
} odp_cls_cos_param_t;
/**
@@ -612,8 +825,9 @@ int odp_cos_with_l2_priority(odp_pktio_t pktio_in,
odp_cos_t cos_table[]);
/**
- * Request to override per-port class of service
- * based on Layer-3 priority field if present.
+ * Request to override per-port class of service based on Layer-3 priority field if present.
+ *
+ * @deprecated Use #ODP_PMR_IP_DSCP instead.
*
* @param pktio_in Ingress port identifier.
* @param num_qos Number of allowed Layer-3 QoS levels.
@@ -624,14 +838,9 @@ int odp_cos_with_l2_priority(odp_pktio_t pktio_in,
*
* @retval 0 on success
* @retval <0 on failure
- *
- * @note Optional.
*/
-int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
- uint32_t num_qos,
- uint8_t qos_table[],
- odp_cos_t cos_table[],
- odp_bool_t l3_preference);
+int ODP_DEPRECATE(odp_cos_with_l3_qos)(odp_pktio_t pktio_in, uint32_t num_qos, uint8_t qos_table[],
+ odp_cos_t cos_table[], odp_bool_t l3_preference);
/**
* Get statistics for a CoS
@@ -675,204 +884,6 @@ int odp_cls_queue_stats(odp_cos_t cos, odp_queue_t queue,
odp_cls_queue_stats_t *stats);
/**
- * @typedef odp_pmr_t
- * PMR - Packet Matching Rule
- * Up to 32 bit of ternary matching of one of the available header fields
- */
-
-/**
- * Packet Matching Rule terms
- *
- * This enumeration selects the protocol field that is matched against PMR
- * value/mask or value range. Protocol field values and masks are passed in big
- * endian (network endian) format. However, ODP_PMR_LEN value and range are
- * passed in CPU native endian (uint32_t words), as the term does not represent
- * a protocol field.
- *
- * PMR value/mask data size is term specific. This size must be set into val_sz
- * field of odp_pmr_param_t. There is no alignment requirement for PMR
- * value/mask data.
- */
-typedef enum {
- /** Total length of received packet. Exceptionally, value and mask are
- * uint32_t (val_sz = 4) in CPU endian. */
- ODP_PMR_LEN,
-
- /** Initial (outer) Ethertype only (val_sz = 2) */
- ODP_PMR_ETHTYPE_0,
-
- /** Ethertype of most inner VLAN tag (val_sz = 2) */
- ODP_PMR_ETHTYPE_X,
-
- /** First (outer) VLAN ID (val_sz = 2) */
- ODP_PMR_VLAN_ID_0,
-
- /** Last (most inner) VLAN ID (val_sz = 2) */
- ODP_PMR_VLAN_ID_X,
-
- /** Destination MAC address (val_sz = 6) */
- ODP_PMR_DMAC,
-
- /** IPv4 Protocol or IPv6 Next Header (val_sz = 1) */
- ODP_PMR_IPPROTO,
-
- /** Destination UDP port (val_sz = 2) */
- ODP_PMR_UDP_DPORT,
-
- /** Destination TCP port (val_sz = 2) */
- ODP_PMR_TCP_DPORT,
-
- /** Source UDP port (val_sz = 2) */
- ODP_PMR_UDP_SPORT,
-
- /** Source TCP port (val_sz = 2) */
- ODP_PMR_TCP_SPORT,
-
- /** Source IPv4 address (val_sz = 4) */
- ODP_PMR_SIP_ADDR,
-
- /** Destination IPv4 address (val_sz = 4) */
- ODP_PMR_DIP_ADDR,
-
- /** Source IPv6 address (val_sz = 16) */
- ODP_PMR_SIP6_ADDR,
-
- /** Destination IPv6 address (val_sz = 16) */
- ODP_PMR_DIP6_ADDR,
-
- /** IPsec session identifier (val_sz = 4)*/
- ODP_PMR_IPSEC_SPI,
-
- /** NVGRE/VXLAN network identifier (val_sz = 4) */
- ODP_PMR_LD_VNI,
-
- /**
- * Custom frame match rule
- *
- * PMR offset is counted from the start of the packet. The match is
- * defined by the offset, the expected value, and its size. Custom frame
- * rules must be applied before any other PMR.
- */
- ODP_PMR_CUSTOM_FRAME,
-
- /**
- * Custom layer 3 match rule
- *
- * PMR offset is counted from the start of layer 3 in the packet.
- * The match is defined by the offset, the expected value, and its size.
- * Custom L3 rules may be combined with other PMRs.
- */
- ODP_PMR_CUSTOM_L3,
-
- /** IGMP Group address (val_sz = 4), implies IPPROTO=2 */
- ODP_PMR_IGMP_GRP_ADDR,
-
- /** ICMP identifier (val_sz = 2), implies IPPROTO=1 and ICMP_TYPE=0 or ICMP_TYPE=8 */
- ODP_PMR_ICMP_ID,
-
- /** ICMP type (val_sz = 1), implies IPPROTO=1 */
- ODP_PMR_ICMP_TYPE,
-
- /** ICMP code (val_sz = 1), implies IPPROTO=1 */
- ODP_PMR_ICMP_CODE,
-
- /** Source SCTP port (val_sz = 2), implies IPPROTO=132 */
- ODP_PMR_SCTP_SPORT,
-
- /** Destination SCTP port (val_sz = 2), implies IPPROTO=132 */
- ODP_PMR_SCTP_DPORT,
-
- /** GTPv1 tunnel endpoint identifier (val_sz = 4)
- *
- * Matches if and only if IP protocol is UDP, UDP destination port
- * is 2152 and the UDP payload interpreted as GTP header has GTP
- * version 1 and TEID as specified.
- */
- ODP_PMR_GTPV1_TEID,
-
- /** Inner header may repeat above values with this offset */
- ODP_PMR_INNER_HDR_OFF = 32
-
-} odp_cls_pmr_term_t;
-
-/**
- * Packet Matching Rule parameter structure
- *
- * Match value/mask size and endianness are defined in PMR term documentation
- * (@see odp_cls_pmr_term_t). Most values and masks are passed in big
- * endian format without data alignment requirement. ODP_PMR_LEN is
- * an exception to this (uint32_t in CPU endian).
- */
-typedef struct odp_pmr_param_t {
- /** Packet Matching Rule term */
- odp_cls_pmr_term_t term;
-
- /** True if the value is range and false if match. Default is false. */
- odp_bool_t range_term;
-
- /** Variant mappings for types of matches */
- union {
- /** Parameters for single-valued matches */
- struct {
- /** Points to the value to be matched. Value size and
- * endianness are defined by the term used. Values of
- * protocol fields are defined in big endian format.
- */
- const void *value;
-
- /** Mask of the bits to be matched. The same size and
- * endianness is used than with the value. */
- const void *mask;
- } match;
-
- /** Parameter for range value matches */
- struct {
- /** Start value of the range */
- const void *val_start;
-
- /** End value of the range */
- const void *val_end;
- } range;
- };
-
- /** Size of the value to be matched */
- uint32_t val_sz;
-
- /** Offset to the value
- *
- * Byte offset to the value to be matched in a packet. PMR term defines
- * starting point for the offset. Used only with custom PMR terms,
- * ignored with other terms.
- */
- uint32_t offset;
-
-} odp_pmr_param_t;
-
-/**
- * Packet Matching Rule creation options
- */
-typedef struct odp_pmr_create_opt_t {
- /** PMR terms
- *
- * Array of odp_pmr_param_t entries, one entry per term desired.
- * Use odp_cls_pmr_param_init() to initialize parameters into their default values.
- */
- odp_pmr_param_t *terms;
-
- /** Number of terms in the match rule. */
- int num_terms;
-
- /** Classification mark value
- *
- * Value to be set in the CLS mark of a packet when the packet matches this
- * Packet Matching Rule. The default value is zero. The maximum value is indicated in
- * odp_cls_capability_t::max_mark capability.
- */
- uint64_t mark;
-
-} odp_pmr_create_opt_t;
-
-/**
* Initialize packet matching rule parameters
*
* Initialize an odp_pmr_param_t to its default values for all fields
diff --git a/include/odp/api/spec/crypto.h b/include/odp/api/spec/crypto.h
index 7cd69f4d2..b45731e1b 100644
--- a/include/odp/api/spec/crypto.h
+++ b/include/odp/api/spec/crypto.h
@@ -498,7 +498,7 @@ typedef struct odp_crypto_key {
/**
* Crypto API IV structure
*
- * @deprecated
+ * @deprecated Use per-packet IV in crypto operation parameters
*/
typedef struct odp_crypto_iv {
/** IV data
@@ -594,14 +594,17 @@ typedef struct odp_crypto_session_param_t {
*/
union {
#if ODP_DEPRECATED_API
- /** Cipher Initialization Vector (IV) */
+ /** @deprecated Cipher IV */
odp_crypto_iv_t ODP_DEPRECATE(cipher_iv);
#endif
/** Cipher IV length */
struct {
#if ODP_DEPRECATED_API
- /** Unused padding field */
+ /** @cond
+ * Unused padding field
+ */
uint8_t *dummy_padding_0;
+ /** @endcond */
#endif
/** Length of cipher initialization vector.
* Default value is zero.
@@ -641,13 +644,17 @@ typedef struct odp_crypto_session_param_t {
*/
union {
#if ODP_DEPRECATED_API
+ /** @deprecated Authentication IV */
odp_crypto_iv_t ODP_DEPRECATE(auth_iv);
#endif
/** Authentication IV length */
struct {
#if ODP_DEPRECATED_API
- /** Unused padding field */
+ /** @cond
+ * Unused padding field
+ */
uint8_t *dummy_padding_1;
+ /** @endcond */
#endif
/** Length of authentication initialization vector.
* Default value is zero.
diff --git a/include/odp/api/spec/schedule.h b/include/odp/api/spec/schedule.h
index 524d9e2bf..29db262c0 100644
--- a/include/odp/api/spec/schedule.h
+++ b/include/odp/api/spec/schedule.h
@@ -547,6 +547,21 @@ void odp_schedule_order_lock_start(uint32_t lock_index);
void odp_schedule_order_lock_wait(uint32_t lock_index);
/**
+ * Wait until the currently held scheduling context is the first in order
+ *
+ * Wait until there are no other scheduling contexts that precede the
+ * scheduling context of the calling thread in the source queue order.
+ * The context remains the first in order until the thread releases it.
+ *
+ * This function must not be called if the current thread is not holding
+ * an ordered scheduling context or if an ordered lock is being held.
+ *
+ * This functions does nothing if ordered wait is not supported.
+ * @see odp_schedule_capability()
+ */
+void odp_schedule_order_wait(void);
+
+/**
* Print debug info about scheduler
*
* Print implementation defined information about scheduler to the ODP log.
diff --git a/include/odp/api/spec/schedule_types.h b/include/odp/api/spec/schedule_types.h
index 09d812528..b90cd5062 100644
--- a/include/odp/api/spec/schedule_types.h
+++ b/include/odp/api/spec/schedule_types.h
@@ -225,6 +225,10 @@ typedef struct odp_schedule_capability_t {
* The specification is the same as for the blocking implementation. */
odp_support_t waitfree_queues;
+ /** Order wait support. If not supported, odp_schedule_order_wait()
+ * does nothing. */
+ odp_support_t order_wait;
+
} odp_schedule_capability_t;
/**
diff --git a/include/odp/api/spec/traffic_mngr.h b/include/odp/api/spec/traffic_mngr.h
index b22887fc6..e10f6e9e2 100644
--- a/include/odp/api/spec/traffic_mngr.h
+++ b/include/odp/api/spec/traffic_mngr.h
@@ -1059,7 +1059,7 @@ typedef struct {
* not TRUE while packets per second when packet mode is TRUE.
*/
union {
- /**< @deprecated Use commit_rate instead */
+ /** @deprecated Use commit_rate instead */
uint64_t ODP_DEPRECATE(commit_bps);
uint64_t commit_rate; /**< Commit information rate */
};
@@ -1070,7 +1070,7 @@ typedef struct {
* This field is ignored when dual_rate is FALSE.
*/
union {
- /**< @deprecated Use peak_rate instead */
+ /** @deprecated Use peak_rate instead */
uint64_t ODP_DEPRECATE(peak_bps);
uint64_t peak_rate; /**< Peak information rate */
};
diff --git a/include/odp/autoheader_internal.h.in b/include/odp/autoheader_internal.h.in
index 952675fb5..33d9f280f 100644
--- a/include/odp/autoheader_internal.h.in
+++ b/include/odp/autoheader_internal.h.in
@@ -32,4 +32,7 @@
/* Define to 1 to enable OpenSSL random data */
#undef _ODP_OPENSSL_RAND
+/* Define to 1 to enable XDP support */
+#undef _ODP_PKTIO_XDP
+
#endif
diff --git a/platform/linux-dpdk/Makefile.am b/platform/linux-dpdk/Makefile.am
index 70ce4b980..b3365be71 100644
--- a/platform/linux-dpdk/Makefile.am
+++ b/platform/linux-dpdk/Makefile.am
@@ -104,7 +104,6 @@ odpapiabiarchinclude_HEADERS += \
endif
noinst_HEADERS = \
- ${top_srcdir}/platform/linux-generic/include/odp_align_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_atomic_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_bitset.h \
include/odp_buffer_internal.h \
diff --git a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
index 06e322066..e0169579a 100644
--- a/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-dpdk/include/odp/api/plat/packet_inline_types.h
@@ -113,7 +113,7 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 6;
+ uint32_t reserved1: 7;
/*
* Init flags
@@ -144,14 +144,13 @@ typedef union {
uint32_t udp_err: 1; /* UDP error */
uint32_t sctp_err: 1; /* SCTP error */
uint32_t l4_chksum_err: 1; /* L4 checksum error */
- uint32_t crypto_err: 1; /* Crypto packet operation error */
};
/* Flag groups */
struct {
- uint32_t reserved2: 6;
+ uint32_t reserved2: 7;
uint32_t other: 18; /* All other flags */
- uint32_t error: 8; /* All error flags */
+ uint32_t error: 7; /* All error flags */
} all;
} _odp_packet_flags_t;
diff --git a/platform/linux-dpdk/include/odp_buffer_internal.h b/platform/linux-dpdk/include/odp_buffer_internal.h
index 42b686a2a..3b99961c1 100644
--- a/platform/linux-dpdk/include/odp_buffer_internal.h
+++ b/platform/linux-dpdk/include/odp_buffer_internal.h
@@ -27,7 +27,6 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/thread.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp_event_internal.h>
diff --git a/platform/linux-dpdk/include/odp_eventdev_internal.h b/platform/linux-dpdk/include/odp_eventdev_internal.h
index f6b01ec81..66da6daee 100644
--- a/platform/linux-dpdk/include/odp_eventdev_internal.h
+++ b/platform/linux-dpdk/include/odp_eventdev_internal.h
@@ -25,9 +25,9 @@ extern "C" {
#include <odp/api/thread.h>
#include <odp/api/ticketlock.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp_forward_typedefs_internal.h>
+#include <odp_macros_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_ptr_ring_mpmc_internal.h>
#include <odp_queue_if.h>
@@ -88,7 +88,7 @@ struct queue_entry_s {
union queue_entry_u {
struct queue_entry_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
};
/* Eventdev global data */
diff --git a/platform/linux-dpdk/include/odp_packet_internal.h b/platform/linux-dpdk/include/odp_packet_internal.h
index a381386fc..02d7ff7b0 100644
--- a/platform/linux-dpdk/include/odp_packet_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_internal.h
@@ -20,20 +20,23 @@ extern "C" {
#include <odp/api/align.h>
#include <odp/api/debug.h>
+#include <odp/api/hints.h>
#include <odp/api/ipsec.h>
#include <odp/api/packet.h>
-#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
#include <odp/api/comp.h>
#include <odp/api/std.h>
+#include <odp/api/plat/packet_inline_types.h>
+
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
#include <odp_event_internal.h>
#include <odp_pool_internal.h>
#include <protocols/eth.h>
+#include <stdint.h>
#include <string.h>
#include <rte_config.h>
@@ -158,13 +161,11 @@ typedef struct odp_packet_hdr_t {
int8_t subtype;
union {
- struct {
- /* Result for crypto packet op */
- odp_crypto_packet_result_t crypto_op_result;
+ /* Result for crypto packet op */
+ odp_crypto_packet_result_t crypto_op_result;
- /* Context for IPsec */
- odp_ipsec_packet_result_t ipsec_ctx;
- };
+ /* Context for IPsec */
+ odp_ipsec_packet_result_t ipsec_ctx;
/* Result for comp packet op */
odp_comp_packet_result_t comp_op_result;
@@ -271,7 +272,10 @@ static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
odp_packet_hdr_t *src_hdr,
odp_bool_t uarea_copy)
{
+ const int8_t subtype = src_hdr->subtype;
+
dst_hdr->input = src_hdr->input;
+ dst_hdr->subtype = subtype;
dst_hdr->dst_queue = src_hdr->dst_queue;
dst_hdr->cos = src_hdr->cos;
dst_hdr->cls_mark = src_hdr->cls_mark;
@@ -321,6 +325,15 @@ static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
dst_hdr->uarea_addr = src_uarea;
}
}
+
+ if (odp_unlikely(subtype != ODP_EVENT_PACKET_BASIC)) {
+ if (subtype == ODP_EVENT_PACKET_IPSEC)
+ dst_hdr->ipsec_ctx = src_hdr->ipsec_ctx;
+ else if (subtype == ODP_EVENT_PACKET_CRYPTO)
+ dst_hdr->crypto_op_result = src_hdr->crypto_op_result;
+ else if (subtype == ODP_EVENT_PACKET_COMP)
+ dst_hdr->comp_op_result = src_hdr->comp_op_result;
+ }
}
static inline void _odp_packet_copy_cls_md(odp_packet_hdr_t *dst_hdr,
diff --git a/platform/linux-dpdk/include/odp_packet_io_internal.h b/platform/linux-dpdk/include/odp_packet_io_internal.h
index 1f3fb650d..59410eef6 100644
--- a/platform/linux-dpdk/include/odp_packet_io_internal.h
+++ b/platform/linux-dpdk/include/odp_packet_io_internal.h
@@ -26,10 +26,10 @@ extern "C" {
#include <odp/api/plat/packet_io_inlines.h>
-#include <odp_align_internal.h>
#include <odp_classification_datamodel.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_queue_if.h>
#include <inttypes.h>
@@ -159,7 +159,7 @@ struct pktio_entry {
typedef union {
struct pktio_entry s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
} pktio_entry_t;
typedef struct {
diff --git a/platform/linux-dpdk/include/odp_queue_basic_internal.h b/platform/linux-dpdk/include/odp_queue_basic_internal.h
index 72c77d985..36ef7c6b5 100644
--- a/platform/linux-dpdk/include/odp_queue_basic_internal.h
+++ b/platform/linux-dpdk/include/odp_queue_basic_internal.h
@@ -20,10 +20,10 @@ extern "C" {
#include <odp/api/shared_memory.h>
#include <odp/api/ticketlock.h>
-#include <odp_align_internal.h>
#include <odp_buffer_internal.h>
#include <odp_config_internal.h>
#include <odp_forward_typedefs_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ptr_ring_mpmc_internal.h>
#include <odp_ptr_ring_st_internal.h>
#include <odp_ptr_ring_spsc_internal.h>
@@ -70,7 +70,7 @@ struct queue_entry_s {
union queue_entry_u {
struct queue_entry_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
};
typedef struct queue_global_t {
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
index 21c9f00ee..ff6dcf0b6 100644
--- a/platform/linux-dpdk/odp_crypto.c
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -1816,7 +1816,6 @@ int odp_crypto_int(odp_packet_t pkt_in,
odp_bool_t allocated = false;
odp_packet_t out_pkt = *pkt_out;
odp_crypto_packet_result_t *op_result;
- odp_packet_hdr_t *pkt_hdr;
odp_bool_t result_ok = true;
session = (crypto_session_entry_t *)(intptr_t)param->session;
@@ -1984,9 +1983,6 @@ out:
op_result->auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
op_result->ok = result_ok;
- pkt_hdr = packet_hdr(out_pkt);
- pkt_hdr->p.flags.crypto_err = !op_result->ok;
-
/* Synchronous, simply return results */
*pkt_out = out_pkt;
@@ -2032,7 +2028,6 @@ int odp_crypto_operation(odp_crypto_op_param_t *param,
* We cannot fail since odp_crypto_op() has already processed
* the packet. Let's indicate error in the result instead.
*/
- packet_hdr(out_pkt)->p.flags.crypto_err = 1;
packet_result.ok = false;
}
diff --git a/platform/linux-dpdk/odp_packet.c b/platform/linux-dpdk/odp_packet.c
index 1af9bf0c5..daef605d4 100644
--- a/platform/linux-dpdk/odp_packet.c
+++ b/platform/linux-dpdk/odp_packet.c
@@ -5,6 +5,7 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/align.h>
#include <odp/api/buffer.h>
#include <odp/api/byteorder.h>
#include <odp/api/hash.h>
@@ -20,7 +21,6 @@
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/packet_io_inlines.h>
-#include <odp_align_internal.h>
#include <odp_chksum_internal.h>
#include <odp_debug_internal.h>
#include <odp_errno_define.h>
@@ -788,7 +788,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
if (seglen >= len) {
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign == 0)
return 0;
shift = align - misalign;
@@ -798,7 +798,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
shift = len - seglen;
uaddr -= shift;
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign)
shift += align - misalign;
}
diff --git a/platform/linux-dpdk/odp_pool.c b/platform/linux-dpdk/odp_pool.c
index 3dd79f17b..d121b8253 100644
--- a/platform/linux-dpdk/odp_pool.c
+++ b/platform/linux-dpdk/odp_pool.c
@@ -5,42 +5,41 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <odp/api/std_types.h>
+#include <odp/api/align.h>
+#include <odp/api/hints.h>
#include <odp/api/pool.h>
-#include <odp_pool_internal.h>
-#include <odp_buffer_internal.h>
-#include <odp_packet_internal.h>
-#include <odp_timer_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/shared_memory.h>
-#include <odp/api/align.h>
-#include <odp_init_internal.h>
+#include <odp/api/std_types.h>
+
+#include <odp/api/plat/pool_inline_types.h>
+
+#include <odp_buffer_internal.h>
#include <odp_config_internal.h>
-#include <odp/api/hints.h>
-#include <odp/api/debug.h>
#include <odp_debug_internal.h>
-#include <odp/api/cpumask.h>
-#include <odp_libconfig_internal.h>
#include <odp_event_vector_internal.h>
-
-#include <string.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include <math.h>
-#include <inttypes.h>
-
-#include <odp/api/plat/pool_inline_types.h>
+#include <odp_init_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_timer_internal.h>
#include <rte_config.h>
#include <rte_errno.h>
+#include <rte_malloc.h>
#include <rte_mempool.h>
#include <rte_mbuf_pool_ops.h>
-#include <rte_malloc.h>
/* ppc64 rte_memcpy.h (included through rte_mempool.h) may define vector */
#if defined(__PPC64__) && defined(vector)
#undef vector
#endif
+#include <inttypes.h>
+#include <math.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
#ifdef POOL_USE_TICKETLOCK
#include <odp/api/ticketlock.h>
#define LOCK(a) odp_ticketlock_lock(a)
@@ -420,7 +419,7 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
- if (!CHECK_IS_POWER2(params->buf.align)) {
+ if (!_ODP_CHECK_IS_POWER2(params->buf.align)) {
ODP_ERR("buf.align not power of two %u\n", params->buf.align);
return -1;
}
@@ -444,7 +443,7 @@ static int check_params(const odp_pool_param_t *params)
return -1;
}
- if (!CHECK_IS_POWER2(params->pkt.align)) {
+ if (!_ODP_CHECK_IS_POWER2(params->pkt.align)) {
ODP_ERR("pkt.align not power of two %u\n", params->pkt.align);
return -1;
}
@@ -631,7 +630,7 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt)
pool->pool_idx, pool->name);
uarea_name[ODP_SHM_NAME_LEN - 1] = 0;
- pool->uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_size = _ODP_ROUNDUP_CACHE_LINE(uarea_size);
pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, 0);
@@ -699,8 +698,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
if (params->buf.align != 0)
- blk_size = ROUNDUP_ALIGN(blk_size,
- buf_align);
+ blk_size = _ODP_ROUNDUP_ALIGN(blk_size, buf_align);
hdr_size = sizeof(odp_buffer_hdr_t);
CHECK_U16_OVERFLOW(blk_size);
@@ -732,13 +730,12 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
if ((max_len + blk_size) / blk_size > params->pkt.num)
blk_size = (max_len + params->pkt.num) /
params->pkt.num;
- blk_size = ROUNDUP_ALIGN(headroom + blk_size +
- tailroom, min_align);
+ blk_size = _ODP_ROUNDUP_ALIGN(headroom + blk_size + tailroom, min_align);
/* Segment size minus headroom might be rounded down by the driver (e.g.
* ixgbe) to the nearest multiple of 1024. Round it up here to make sure the
* requested size is still going to fit without segmentation. */
- blk_size = ROUNDUP_ALIGN(blk_size - headroom, min_seg_len) + headroom;
+ blk_size = _ODP_ROUNDUP_ALIGN(blk_size - headroom, min_seg_len) + headroom;
/* Round down the block size to 16 bits */
if (blk_size > UINT16_MAX) {
@@ -783,8 +780,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
return ODP_POOL_INVALID;
}
- mb_ctor_arg.seg_buf_offset =
- (uint16_t)ROUNDUP_CACHE_LINE(hdr_size);
+ mb_ctor_arg.seg_buf_offset = (uint16_t)_ODP_ROUNDUP_CACHE_LINE(hdr_size);
mb_ctor_arg.seg_buf_size = mbp_ctor_arg.mbuf_data_room_size;
mb_ctor_arg.type = type;
mb_ctor_arg.event_type = event_type;
@@ -1140,7 +1136,7 @@ int odp_pool_stats_reset(odp_pool_t pool_hdl ODP_UNUSED)
* Round up the space we reserve for objhdr up to cache line size. The rte_mbuf
* that comes after this must be cache line aligned.
*/
-#define SIZEOF_OBJHDR ROUNDUP_CACHE_LINE(sizeof(struct rte_mempool_objhdr))
+#define SIZEOF_OBJHDR _ODP_ROUNDUP_CACHE_LINE(sizeof(struct rte_mempool_objhdr))
int odp_pool_ext_capability(odp_pool_type_t type,
odp_pool_ext_capability_t *capa)
diff --git a/platform/linux-dpdk/odp_queue_basic.c b/platform/linux-dpdk/odp_queue_basic.c
index 5f07bb620..c6970930d 100644
--- a/platform/linux-dpdk/odp_queue_basic.c
+++ b/platform/linux-dpdk/odp_queue_basic.c
@@ -18,6 +18,7 @@
#include <odp_config_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp/api/hints.h>
#include <odp/api/sync.h>
#include <odp/api/plat/sync_inlines.h>
@@ -995,11 +996,11 @@ static int queue_init(queue_entry_t *queue, const char *name,
}
/* Ring size must larger than queue_size */
- if (CHECK_IS_POWER2(queue_size))
+ if (_ODP_CHECK_IS_POWER2(queue_size))
queue_size++;
/* Round up if not already a power of two */
- queue_size = ROUNDUP_POWER2_U32(queue_size);
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
/* Single-producer / single-consumer plain queue has simple and
* lock-free implementation */
diff --git a/platform/linux-dpdk/odp_queue_eventdev.c b/platform/linux-dpdk/odp_queue_eventdev.c
index 5e28112ab..e60b4e006 100644
--- a/platform/linux-dpdk/odp_queue_eventdev.c
+++ b/platform/linux-dpdk/odp_queue_eventdev.c
@@ -15,6 +15,7 @@
#include <odp_config_internal.h>
#include <odp_event_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_libconfig_internal.h>
#include <odp_queue_if.h>
#include <odp_schedule_if.h>
@@ -1165,11 +1166,11 @@ static int queue_init(queue_entry_t *queue, const char *name,
}
/* Ring size must larger than queue_size */
- if (CHECK_IS_POWER2(queue_size))
+ if (_ODP_CHECK_IS_POWER2(queue_size))
queue_size++;
/* Round up if not already a power of two */
- queue_size = ROUNDUP_POWER2_U32(queue_size);
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
/* Default to error functions */
queue->s.enqueue = error_enqueue;
diff --git a/platform/linux-dpdk/odp_schedule_eventdev.c b/platform/linux-dpdk/odp_schedule_eventdev.c
index 8e2c2c034..3c715ae36 100644
--- a/platform/linux-dpdk/odp_schedule_eventdev.c
+++ b/platform/linux-dpdk/odp_schedule_eventdev.c
@@ -1117,5 +1117,6 @@ const schedule_api_t _odp_schedule_eventdev_api = {
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
.schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
.schedule_print = schedule_print
};
diff --git a/platform/linux-dpdk/odp_schedule_if.c b/platform/linux-dpdk/odp_schedule_if.c
index 73475ae88..d5b39b656 100644
--- a/platform/linux-dpdk/odp_schedule_if.c
+++ b/platform/linux-dpdk/odp_schedule_if.c
@@ -206,6 +206,11 @@ void odp_schedule_order_lock_wait(uint32_t lock_index)
_odp_sched_api->schedule_order_lock_wait(lock_index);
}
+void odp_schedule_order_wait(void)
+{
+ _odp_sched_api->schedule_order_wait();
+}
+
void odp_schedule_print(void)
{
_odp_sched_api->schedule_print();
diff --git a/platform/linux-dpdk/odp_shared_memory.c b/platform/linux-dpdk/odp_shared_memory.c
index 645ee49ef..9e2151a0d 100644
--- a/platform/linux-dpdk/odp_shared_memory.c
+++ b/platform/linux-dpdk/odp_shared_memory.c
@@ -13,9 +13,9 @@
#include <odp/api/shared_memory.h>
#include <odp/api/spinlock.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_shm_internal.h>
#include <string.h>
@@ -266,8 +266,8 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
}
/* DPDK requires alignment to be power of two */
- if (!rte_is_power_of_2(align))
- align = ROUNDUP_POWER2_U32(align);
+ if (!_ODP_CHECK_IS_POWER2(align))
+ align = _ODP_ROUNDUP_POWER2_U32(align);
odp_spinlock_lock(&shm_tbl->lock);
diff --git a/platform/linux-dpdk/odp_timer.c b/platform/linux-dpdk/odp_timer.c
index 01f23ee31..c10b246b7 100644
--- a/platform/linux-dpdk/odp_timer.c
+++ b/platform/linux-dpdk/odp_timer.c
@@ -23,6 +23,7 @@
#include <odp_debug_internal.h>
#include <odp_init_internal.h>
#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
#include <odp_pool_internal.h>
#include <odp_queue_if.h>
#include <odp_ring_u32_internal.h>
@@ -650,9 +651,9 @@ odp_timer_pool_t odp_timer_pool_create(const char *name,
nsec_per_scan = res_ns;
/* Ring size must larger than param->num_timers */
- if (CHECK_IS_POWER2(num_timers))
+ if (_ODP_CHECK_IS_POWER2(num_timers))
num_timers++;
- num_timers = ROUNDUP_POWER2_U32(num_timers);
+ num_timers = _ODP_ROUNDUP_POWER2_U32(num_timers);
odp_ticketlock_lock(&timer_global->lock);
diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am
index d4e94f677..6e64df740 100644
--- a/platform/linux-generic/Makefile.am
+++ b/platform/linux-generic/Makefile.am
@@ -17,6 +17,7 @@ AM_CPPFLAGS += $(NETMAP_CPPFLAGS)
AM_CFLAGS += $(AARCH64CRYPTO_CFLAGS)
AM_CFLAGS += $(DPDK_CFLAGS)
AM_CFLAGS += $(LIBCONFIG_CFLAGS)
+AM_CFLAGS += $(LIBXDP_CFLAGS)
DISTCLEANFILES = include/odp_libconfig_config.h
include/odp_libconfig_config.h: $(top_builddir)/$(rel_default_config_path) $(top_builddir)/config.status
@@ -105,7 +106,6 @@ odpapiabiarchinclude_HEADERS += \
endif
noinst_HEADERS = \
- include/odp_align_internal.h \
include/odp_atomic_internal.h \
include/odp_bitset.h \
include/odp_buffer_internal.h \
@@ -210,6 +210,7 @@ __LIB__libodp_linux_la_SOURCES = \
odp_parse.c \
odp_pkt_queue.c \
odp_pool.c \
+ odp_pool_mem_src_ops.c \
odp_queue_basic.c \
odp_queue_if.c \
odp_queue_lf.c \
@@ -256,6 +257,7 @@ __LIB__libodp_linux_la_SOURCES = \
pktio/pktio_common.c \
pktio/socket.c \
pktio/socket_mmap.c \
+ pktio/socket_xdp.c \
pktio/tap.c
if WITH_OPENSSL_CRYPTO
@@ -418,6 +420,7 @@ __LIB__libodp_linux_la_LIBADD += $(LIBCONFIG_LIBS)
__LIB__libodp_linux_la_LIBADD += $(DPDK_LIBS_LIBODP)
__LIB__libodp_linux_la_LIBADD += $(PTHREAD_LIBS)
__LIB__libodp_linux_la_LIBADD += $(TIMER_LIBS)
+__LIB__libodp_linux_la_LIBADD += $(LIBXDP_LIBS)
if ODP_PKTIO_PCAP
__LIB__libodp_linux_la_LIBADD += $(PCAP_LIBS)
diff --git a/platform/linux-generic/arch/aarch64/cpu_flags.c b/platform/linux-generic/arch/aarch64/cpu_flags.c
index d70e26271..e013f749f 100644
--- a/platform/linux-generic/arch/aarch64/cpu_flags.c
+++ b/platform/linux-generic/arch/aarch64/cpu_flags.c
@@ -1,19 +1,22 @@
/* Copyright (c) 2018, Linaro Limited
- * Copyright (c) 2020-2021, Nokia
+ * Copyright (c) 2020-2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <string.h>
-#include <stdlib.h>
-#include <sys/auxv.h>
-#include <asm/hwcap.h>
-
#include <odp/api/hints.h>
+
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+
#include "cpu_flags.h"
+#include <asm/hwcap.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/auxv.h>
+
typedef struct {
const char *feat_flag;
const unsigned int hwcap_field;
@@ -895,7 +898,7 @@ static void _odp_sys_info_print_hwcap_flags(void)
/* Print supported hardware flags via AT_HWCAP entry of the hwcaps
* auxiliary vector. */
hwcaps = getauxval(AT_HWCAP);
- size = sizeof(hwcap_flags) / sizeof(hwcap_feat_flag_t);
+ size = _ODP_ARRAY_SIZE(hwcap_flags);
for (unsigned int i = 0; i < size; i++) {
if (hwcap_flags[i].valid) {
if (check_hwcap_duplicates(hwcap_flags[i].hwcap_field)) {
@@ -912,7 +915,7 @@ static void _odp_sys_info_print_hwcap_flags(void)
/* Print supported hardware flags via AT_HWCAP2 entry of the hwcaps
* auxiliary vector. */
hwcaps2 = getauxval(AT_HWCAP2);
- size2 = sizeof(hwcap2_flags) / sizeof(hwcap_feat_flag_t);
+ size2 = _ODP_ARRAY_SIZE(hwcap2_flags);
for (unsigned long i = 0; i < size2; i++) {
if (hwcap2_flags[i].valid) {
if (hwcaps2 & 0x01)
diff --git a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
index 4d46846ce..4531ebc28 100644
--- a/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
+++ b/platform/linux-generic/arch/aarch64/odp_crypto_armv8.c
@@ -95,9 +95,9 @@ typedef struct odp_crypto_generic_session_t odp_crypto_generic_session_t;
* Algorithm handler function prototype
*/
typedef
-odp_bool_t (*crypto_func_t)(odp_packet_t pkt,
- const odp_crypto_packet_op_param_t *param,
- odp_crypto_generic_session_t *session);
+void (*crypto_func_t)(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session);
/**
* Per crypto session data structure
@@ -204,13 +204,12 @@ static inline void set_crypto_op_result_ok(odp_packet_t pkt)
ODP_CRYPTO_ALG_ERR_NONE);
}
-static odp_bool_t
+static void
null_crypto_routine(odp_packet_t pkt ODP_UNUSED,
const odp_crypto_packet_op_param_t *param ODP_UNUSED,
odp_crypto_generic_session_t *session ODP_UNUSED)
{
set_crypto_op_result_ok(pkt);
- return true;
}
static inline void copy_aad(uint8_t *dst, uint8_t *src, uint32_t len)
@@ -225,9 +224,9 @@ static inline void copy_aad(uint8_t *dst, uint8_t *src, uint32_t len)
}
static
-odp_bool_t aes_gcm_encrypt(odp_packet_t pkt,
- const odp_crypto_packet_op_param_t *param,
- odp_crypto_generic_session_t *session)
+void aes_gcm_encrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
{
armv8_cipher_state_t cs = {
.counter = {
@@ -257,7 +256,7 @@ odp_bool_t aes_gcm_encrypt(odp_packet_t pkt,
else if (session->p.cipher_iv.data)
iv_ptr = session->cipher.iv_data;
else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
+ goto err;
#else
iv_ptr = param->cipher_iv_ptr;
ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
@@ -316,19 +315,18 @@ odp_bool_t aes_gcm_encrypt(odp_packet_t pkt,
}
set_crypto_op_result_ok(pkt);
- return true;
+ return;
err:
set_crypto_op_result(pkt,
ODP_CRYPTO_ALG_ERR_DATA_SIZE,
ODP_CRYPTO_ALG_ERR_NONE);
- return false;
}
static
-odp_bool_t aes_gcm_decrypt(odp_packet_t pkt,
- const odp_crypto_packet_op_param_t *param,
- odp_crypto_generic_session_t *session)
+void aes_gcm_decrypt(odp_packet_t pkt,
+ const odp_crypto_packet_op_param_t *param,
+ odp_crypto_generic_session_t *session)
{
armv8_cipher_state_t cs = {
.counter = {
@@ -358,7 +356,7 @@ odp_bool_t aes_gcm_decrypt(odp_packet_t pkt,
else if (session->p.cipher_iv.data)
iv_ptr = session->cipher.iv_data;
else
- return ODP_CRYPTO_ALG_ERR_IV_INVALID;
+ goto err;
#else
iv_ptr = param->cipher_iv_ptr;
ODP_ASSERT(session->p.cipher_iv_len == 0 || iv_ptr != NULL);
@@ -416,13 +414,12 @@ odp_bool_t aes_gcm_decrypt(odp_packet_t pkt,
odp_packet_copy_from_mem(pkt, in_pos, in_len, data);
set_crypto_op_result_ok(pkt);
- return true;
+ return;
err:
set_crypto_op_result(pkt,
ODP_CRYPTO_ALG_ERR_NONE,
ODP_CRYPTO_ALG_ERR_ICV_CHECK);
- return false;
}
static int process_aes_gcm_param(odp_crypto_generic_session_t *session)
@@ -724,7 +721,6 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
* We cannot fail since odp_crypto_op() has already processed
* the packet. Let's indicate error in the result instead.
*/
- packet_hdr(out_pkt)->p.flags.crypto_err = 1;
packet_result.ok = false;
}
@@ -916,8 +912,6 @@ int crypto_int(odp_packet_t pkt_in,
odp_crypto_generic_session_t *session;
odp_bool_t allocated = false;
odp_packet_t out_pkt = *pkt_out;
- odp_packet_hdr_t *pkt_hdr;
- odp_bool_t ok;
session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
@@ -959,11 +953,9 @@ int crypto_int(odp_packet_t pkt_in,
}
/* Invoke the crypto function */
- ok = session->func(out_pkt, param, session);
+ session->func(out_pkt, param, session);
packet_subtype_set(out_pkt, ODP_EVENT_PACKET_CRYPTO);
- pkt_hdr = packet_hdr(out_pkt);
- pkt_hdr->p.flags.crypto_err = !ok;
/* Synchronous, simply return results */
*pkt_out = out_pkt;
diff --git a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
index 126404f2d..c5293fc86 100644
--- a/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
+++ b/platform/linux-generic/include/odp/api/plat/packet_inline_types.h
@@ -115,7 +115,7 @@ typedef union {
uint32_t all_flags;
struct {
- uint32_t reserved1: 6;
+ uint32_t reserved1: 7;
/*
* Init flags
@@ -146,14 +146,13 @@ typedef union {
uint32_t udp_err: 1; /* UDP error */
uint32_t sctp_err: 1; /* SCTP error */
uint32_t l4_chksum_err: 1; /* L4 checksum error */
- uint32_t crypto_err: 1; /* Crypto packet operation error */
};
/* Flag groups */
struct {
- uint32_t reserved2: 6;
+ uint32_t reserved2: 7;
uint32_t other: 18; /* All other flags */
- uint32_t error: 8; /* All error flags */
+ uint32_t error: 7; /* All error flags */
} all;
} _odp_packet_flags_t;
diff --git a/platform/linux-generic/include/odp_align_internal.h b/platform/linux-generic/include/odp_align_internal.h
deleted file mode 100644
index df2182df2..000000000
--- a/platform/linux-generic/include/odp_align_internal.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Copyright (c) 2014-2018, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-/**
- * @file
- *
- * ODP internal alignments
- */
-
-#ifndef ODP_ALIGN_INTERNAL_H_
-#define ODP_ALIGN_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/api/align.h>
-#include <stdint.h>
-
-/* Macros to calculate ODP_ROUNDUP_POWER2_U32() in five rounds of shift
- * and OR operations. */
-#define _RSHIFT_U32(x, y) (((uint32_t)(x)) >> (y))
-#define _POW2_U32_R1(x) (((uint32_t)(x)) | _RSHIFT_U32(x, 1))
-#define _POW2_U32_R2(x) (_POW2_U32_R1(x) | _RSHIFT_U32(_POW2_U32_R1(x), 2))
-#define _POW2_U32_R3(x) (_POW2_U32_R2(x) | _RSHIFT_U32(_POW2_U32_R2(x), 4))
-#define _POW2_U32_R4(x) (_POW2_U32_R3(x) | _RSHIFT_U32(_POW2_U32_R3(x), 8))
-#define _POW2_U32_R5(x) (_POW2_U32_R4(x) | _RSHIFT_U32(_POW2_U32_R4(x), 16))
-
-/* Round up a uint32_t value 'x' to the next power of two.
- *
- * The value is not round up, if it's already a power of two (including 1).
- * The value must be larger than 0 and not exceed 0x80000000.
- */
-#define ROUNDUP_POWER2_U32(x) \
- ((((uint32_t)(x)) > 0x80000000) ? 0 : (_POW2_U32_R5(x - 1) + 1))
-
-/*
- * Round up 'x' to alignment 'align'
- */
-#define ROUNDUP_ALIGN(x, align)\
- ((align) * (((x) + (align) - 1) / (align)))
-
-/*
- * Round up 'x' to cache line size alignment
- */
-#define ROUNDUP_CACHE_LINE(x)\
- ROUNDUP_ALIGN(x, ODP_CACHE_LINE_SIZE)
-
-/*
- * Round down 'x' to 'align' alignment, which is a power of two
- */
-#define ROUNDDOWN_POWER2(x, align)\
- ((x) & (~((align) - 1)))
-
-/*
- * Check if value is a power of two
- */
-#define CHECK_IS_POWER2(x) ((((x) - 1) & (x)) == 0)
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h
index e0be593d0..5841720ef 100644
--- a/platform/linux-generic/include/odp_buffer_internal.h
+++ b/platform/linux-generic/include/odp_buffer_internal.h
@@ -24,7 +24,6 @@ extern "C" {
#include <odp/api/buffer.h>
#include <odp/api/debug.h>
#include <odp/api/align.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp/api/byteorder.h>
#include <odp/api/thread.h>
diff --git a/platform/linux-generic/include/odp_classification_datamodel.h b/platform/linux-generic/include/odp_classification_datamodel.h
index 6b50fef68..6e89a9947 100644
--- a/platform/linux-generic/include/odp_classification_datamodel.h
+++ b/platform/linux-generic/include/odp_classification_datamodel.h
@@ -21,10 +21,13 @@ extern "C" {
#include <odp/api/spinlock.h>
#include <odp/api/classification.h>
#include <odp/api/debug.h>
+
+#include <odp_macros_internal.h>
#include <odp_pool_internal.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_queue_if.h>
+
#include <protocols/ip.h>
/* Maximum Class Of Service Entry */
@@ -155,7 +158,7 @@ struct cos_s {
typedef union cos_u {
struct cos_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct cos_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct cos_s))];
} cos_t;
/* Pattern Matching Rule */
@@ -172,7 +175,7 @@ struct pmr_s {
typedef union pmr_u {
struct pmr_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pmr_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct pmr_s))];
} pmr_t;
typedef struct _cls_queue_grp_tbl_s {
@@ -181,7 +184,7 @@ typedef struct _cls_queue_grp_tbl_s {
typedef union _cls_queue_grp_tbl_t {
_cls_queue_grp_tbl_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(_cls_queue_grp_tbl_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(_cls_queue_grp_tbl_s))];
} _cls_queue_grp_tbl_t;
/**
diff --git a/platform/linux-generic/include/odp_macros_internal.h b/platform/linux-generic/include/odp_macros_internal.h
index b8be7f938..abf017aec 100644
--- a/platform/linux-generic/include/odp_macros_internal.h
+++ b/platform/linux-generic/include/odp_macros_internal.h
@@ -1,4 +1,5 @@
-/* Copyright (c) 2018-2018, Linaro Limited
+/* Copyright (c) 2014-2018, Linaro Limited
+ * Copyright (c) 2022, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -17,37 +18,67 @@
extern "C" {
#endif
-#include <odp/api/debug.h>
+#include <odp/api/align.h>
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+#include <stdint.h>
-#define MIN(a, b) \
+#define _ODP_ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define _ODP_MIN(a, b) \
__extension__ ({ \
__typeof__(a) tmp_a = (a); \
__typeof__(b) tmp_b = (b); \
tmp_a < tmp_b ? tmp_a : tmp_b; \
})
-#define MAX(a, b) \
+#define _ODP_MAX(a, b) \
__extension__ ({ \
__typeof__(a) tmp_a = (a); \
__typeof__(b) tmp_b = (b); \
tmp_a > tmp_b ? tmp_a : tmp_b; \
})
-#define MAX3(a, b, c) (MAX(MAX((a), (b)), (c)))
+#define _ODP_MAX3(a, b, c) (_ODP_MAX(_ODP_MAX((a), (b)), (c)))
-#define odp_container_of(pointer, type, member) \
- ((type *)(void *)(((char *)pointer) - offsetof(type, member)))
+/* Macros to calculate ODP_ROUNDUP_POWER2_U32() in five rounds of shift
+ * and OR operations. */
+#define __ODP_RSHIFT_U32(x, y) (((uint32_t)(x)) >> (y))
+#define __ODP_POW2_U32_R1(x) (((uint32_t)(x)) | __ODP_RSHIFT_U32(x, 1))
+#define __ODP_POW2_U32_R2(x) (__ODP_POW2_U32_R1(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R1(x), 2))
+#define __ODP_POW2_U32_R3(x) (__ODP_POW2_U32_R2(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R2(x), 4))
+#define __ODP_POW2_U32_R4(x) (__ODP_POW2_U32_R3(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R3(x), 8))
+#define __ODP_POW2_U32_R5(x) (__ODP_POW2_U32_R4(x) | __ODP_RSHIFT_U32(__ODP_POW2_U32_R4(x), 16))
-#define DIV_ROUND_UP(a, b) \
- __extension__ ({ \
- __typeof__(a) tmp_a = (a); \
- __typeof__(b) tmp_b = (b); \
- ODP_STATIC_ASSERT(__builtin_constant_p(b), ""); \
- ODP_STATIC_ASSERT((((b) - 1) & (b)) == 0, ""); \
- (tmp_a + tmp_b - 1) >> __builtin_ctz(tmp_b); \
- })
+/* Round up a uint32_t value 'x' to the next power of two.
+ *
+ * The value is not round up, if it's already a power of two (including 1).
+ * The value must be larger than 0 and not exceed 0x80000000.
+ */
+#define _ODP_ROUNDUP_POWER2_U32(x) \
+ ((((uint32_t)(x)) > 0x80000000) ? 0 : (__ODP_POW2_U32_R5(x - 1) + 1))
+
+/*
+ * Round up 'x' to alignment 'align'
+ */
+#define _ODP_ROUNDUP_ALIGN(x, align)\
+ ((align) * (((x) + (align) - 1) / (align)))
+
+/*
+ * Round up 'x' to cache line size alignment
+ */
+#define _ODP_ROUNDUP_CACHE_LINE(x)\
+ _ODP_ROUNDUP_ALIGN(x, ODP_CACHE_LINE_SIZE)
+
+/*
+ * Round down 'x' to 'align' alignment, which is a power of two
+ */
+#define _ODP_ROUNDDOWN_POWER2(x, align)\
+ ((x) & (~((align) - 1)))
+
+/*
+ * Check if value is a power of two
+ */
+#define _ODP_CHECK_IS_POWER2(x) ((((x) - 1) & (x)) == 0)
#ifdef __cplusplus
}
diff --git a/platform/linux-generic/include/odp_packet_dpdk.h b/platform/linux-generic/include/odp_packet_dpdk.h
index b326000e6..7d42971be 100644
--- a/platform/linux-generic/include/odp_packet_dpdk.h
+++ b/platform/linux-generic/include/odp_packet_dpdk.h
@@ -26,16 +26,6 @@ struct rte_mbuf;
#define PTYPE_UDP 0x20
#define PTYPE_TCP 0x40
-/**
- * Calculate size of zero-copy DPDK packet pool object
- */
-uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size);
-
-/**
- * Create zero-copy DPDK packet pool
- */
-int _odp_dpdk_pool_create(pool_t *pool);
-
/** Packet parser using DPDK interface */
int _odp_dpdk_packet_parse_common(packet_parser_t *pkt_hdr,
const uint8_t *ptr,
diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h
index a8f58146a..e54d88f6a 100644
--- a/platform/linux-generic/include/odp_packet_internal.h
+++ b/platform/linux-generic/include/odp_packet_internal.h
@@ -21,19 +21,21 @@ extern "C" {
#include <odp/api/align.h>
#include <odp/api/atomic.h>
#include <odp/api/debug.h>
+#include <odp/api/hints.h>
#include <odp/api/packet.h>
-#include <odp/api/plat/packet_inline_types.h>
#include <odp/api/packet_io.h>
#include <odp/api/crypto.h>
#include <odp/api/comp.h>
#include <odp/api/std.h>
-#include <odp/api/abi/packet.h>
+
+#include <odp/api/plat/packet_inline_types.h>
#include <odp_debug_internal.h>
#include <odp_event_internal.h>
#include <odp_ipsec_internal.h>
#include <odp_pool_internal.h>
#include <odp_queue_if.h>
+#include <odp_config_internal.h>
#include <stdint.h>
#include <string.h>
@@ -44,12 +46,6 @@ ODP_STATIC_ASSERT(sizeof(_odp_packet_input_flags_t) == sizeof(uint64_t),
ODP_STATIC_ASSERT(sizeof(_odp_packet_flags_t) == sizeof(uint32_t),
"PACKET_FLAGS_SIZE_ERROR");
-/* Packet extra data length */
-#define PKT_EXTRA_LEN 128
-
-/* Packet extra data types */
-#define PKT_EXTRA_TYPE_DPDK 1
-
/* Maximum number of segments per packet */
#define PKT_MAX_SEGS 255
@@ -150,14 +146,15 @@ typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
/* LSO profile index */
uint8_t lso_profile_idx;
+ /* Pktio where packet is used as a memory source */
+ uint8_t ms_pktio_idx;
+
union {
- struct {
- /* Result for crypto packet op */
- odp_crypto_packet_result_t crypto_op_result;
+ /* Result for crypto packet op */
+ odp_crypto_packet_result_t crypto_op_result;
- /* Context for IPsec */
- odp_ipsec_packet_result_t ipsec_ctx;
- };
+ /* Context for IPsec */
+ odp_ipsec_packet_result_t ipsec_ctx;
/* Result for comp packet op */
odp_comp_packet_result_t comp_op_result;
@@ -172,6 +169,8 @@ typedef struct ODP_ALIGNED_CACHE odp_packet_hdr_t {
* grow over 256 bytes. */
ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) <= 256, "PACKET_HDR_SIZE_ERROR");
+ODP_STATIC_ASSERT(ODP_CONFIG_PKTIO_ENTRIES < UINT8_MAX, "MS_PKTIO_IDX_SIZE_ERROR");
+
/**
* Return the packet header
*/
@@ -298,6 +297,8 @@ static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
odp_packet_hdr_t *src_hdr,
odp_bool_t uarea_copy)
{
+ int8_t subtype = src_hdr->subtype;
+
/* Lengths and segmentation data are not copied:
* .frame_len
* .headroom
@@ -308,6 +309,7 @@ static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
* .seg_count
*/
dst_hdr->input = src_hdr->input;
+ dst_hdr->subtype = subtype;
dst_hdr->dst_queue = src_hdr->dst_queue;
dst_hdr->cos = src_hdr->cos;
dst_hdr->cls_mark = src_hdr->cls_mark;
@@ -351,6 +353,15 @@ static inline void _odp_packet_copy_md(odp_packet_hdr_t *dst_hdr,
dst_hdr->uarea_addr = src_uarea;
}
}
+
+ if (odp_unlikely(subtype != ODP_EVENT_PACKET_BASIC)) {
+ if (subtype == ODP_EVENT_PACKET_IPSEC)
+ dst_hdr->ipsec_ctx = src_hdr->ipsec_ctx;
+ else if (subtype == ODP_EVENT_PACKET_CRYPTO)
+ dst_hdr->crypto_op_result = src_hdr->crypto_op_result;
+ else if (subtype == ODP_EVENT_PACKET_COMP)
+ dst_hdr->comp_op_result = src_hdr->comp_op_result;
+ }
}
static inline void _odp_packet_copy_cls_md(odp_packet_hdr_t *dst_hdr,
diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index aed6de412..ca9f083da 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -26,10 +26,10 @@ extern "C" {
#include <odp/api/plat/packet_io_inlines.h>
#include <odp/autoheader_internal.h>
-#include <odp_align_internal.h>
#include <odp_classification_datamodel.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_packet_io_stats_common.h>
#include <odp_queue_if.h>
@@ -70,7 +70,7 @@ struct pktio_if_ops;
#elif defined(_ODP_PKTIO_DPDK)
#define PKTIO_PRIVATE_SIZE 5632
#else
-#define PKTIO_PRIVATE_SIZE 384
+#define PKTIO_PRIVATE_SIZE 512
#endif
struct pktio_entry {
@@ -173,7 +173,7 @@ struct pktio_entry {
typedef union {
struct pktio_entry s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct pktio_entry))];
} pktio_entry_t;
typedef struct {
@@ -308,6 +308,7 @@ static inline void _odp_pktio_tx_ts_set(pktio_entry_t *entry)
extern const pktio_if_ops_t _odp_netmap_pktio_ops;
extern const pktio_if_ops_t _odp_dpdk_pktio_ops;
+extern const pktio_if_ops_t _odp_sock_xdp_pktio_ops;
extern const pktio_if_ops_t _odp_sock_mmsg_pktio_ops;
extern const pktio_if_ops_t _odp_sock_mmap_pktio_ops;
extern const pktio_if_ops_t _odp_loopback_pktio_ops;
diff --git a/platform/linux-generic/include/odp_parse_internal.h b/platform/linux-generic/include/odp_parse_internal.h
index 8aa5e118b..22d8c2cf6 100644
--- a/platform/linux-generic/include/odp_parse_internal.h
+++ b/platform/linux-generic/include/odp_parse_internal.h
@@ -45,8 +45,8 @@ extern "C" {
#define PARSE_SCTP_BYTES (sizeof(_odp_sctphdr_t))
/* _odp_packet_parse_common_l3_l4() requires up to this many bytes. */
-#define PARSE_L3_L4_BYTES (MAX(PARSE_IPV4_BYTES, PARSE_IPV6_BYTES) + \
- MAX3(PARSE_TCP_BYTES, PARSE_UDP_BYTES, PARSE_SCTP_BYTES))
+#define PARSE_L3_L4_BYTES (_ODP_MAX(PARSE_IPV4_BYTES, PARSE_IPV6_BYTES) + \
+ _ODP_MAX3(PARSE_TCP_BYTES, PARSE_UDP_BYTES, PARSE_SCTP_BYTES))
/* _odp_packet_parse_common() requires up to this many bytes. */
#define PARSE_BYTES (PARSE_ETH_BYTES + PARSE_L3_L4_BYTES)
diff --git a/platform/linux-generic/include/odp_pool_internal.h b/platform/linux-generic/include/odp_pool_internal.h
index 4c9f9a9ce..824aa9292 100644
--- a/platform/linux-generic/include/odp_pool_internal.h
+++ b/platform/linux-generic/include/odp_pool_internal.h
@@ -28,6 +28,8 @@ extern "C" {
#include <odp_ring_ptr_internal.h>
#include <odp/api/plat/strong_types.h>
+#define _ODP_POOL_MEM_SRC_DATA_SIZE 128
+
typedef struct ODP_ALIGNED_CACHE pool_cache_t {
/* Number of buffers in cache */
uint32_t cache_num;
@@ -52,8 +54,7 @@ typedef struct ODP_ALIGNED_CACHE {
} pool_ring_t;
#pragma GCC diagnostic pop
-/* Callback function for pool destroy */
-typedef void (*pool_destroy_cb_fn)(void *pool);
+struct _odp_pool_mem_src_ops_t;
typedef struct pool_t {
odp_ticketlock_t lock ODP_ALIGNED_CACHE;
@@ -91,14 +92,11 @@ typedef struct pool_t {
uint8_t *uarea_base_addr;
odp_pool_type_t type_2;
odp_pool_ext_param_t ext_param;
-
- /* Used by DPDK zero-copy pktio */
- uint32_t dpdk_elt_size;
uint32_t skipped_blocks;
- uint8_t pool_in_use;
uint8_t mem_from_huge_pages;
- pool_destroy_cb_fn ext_destroy;
- void *ext_desc;
+ const struct _odp_pool_mem_src_ops_t *mem_src_ops;
+ /* Private area for memory source operations */
+ uint8_t mem_src_data[_ODP_POOL_MEM_SRC_DATA_SIZE] ODP_ALIGNED_CACHE;
struct ODP_ALIGNED_CACHE {
odp_atomic_u64_t alloc_ops;
@@ -130,6 +128,25 @@ typedef struct pool_global_t {
} pool_global_t;
+/* Operations for when ODP packet pool is used as a memory source for e.g. zero-copy packet IO
+ * purposes */
+typedef struct _odp_pool_mem_src_ops_t {
+ /* Name of the ops provider */
+ const char *name;
+ /* Signal if ops provider is an active user for the pool as a memory source */
+ odp_bool_t (*is_active)(void);
+ /* Force disable for the ops provider (for now, if one active memory source user is found,
+ * others are disabled) */
+ void (*force_disable)(void);
+ /* Adjust pool block sizes as required by memory consumer */
+ void (*adjust_size)(uint8_t *data, uint32_t *block_size, uint32_t *block_offset,
+ uint32_t *flags);
+ /* Bind the pool as a memory source */
+ int (*bind)(uint8_t *data, pool_t *pool);
+ /* Unbind the pool as a memory source */
+ void (*unbind)(uint8_t *data);
+} _odp_pool_mem_src_ops_t;
+
extern pool_global_t *_odp_pool_glb;
static inline pool_t *pool_entry(uint32_t pool_idx)
diff --git a/platform/linux-generic/include/odp_queue_basic_internal.h b/platform/linux-generic/include/odp_queue_basic_internal.h
index d8a3226cb..9babb62e9 100644
--- a/platform/linux-generic/include/odp_queue_basic_internal.h
+++ b/platform/linux-generic/include/odp_queue_basic_internal.h
@@ -17,12 +17,12 @@ extern "C" {
#include <odp_forward_typedefs_internal.h>
#include <odp_queue_if.h>
#include <odp_buffer_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/packet_io.h>
#include <odp/api/align.h>
#include <odp/api/hints.h>
#include <odp/api/ticketlock.h>
#include <odp_config_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ring_mpmc_internal.h>
#include <odp_ring_st_internal.h>
#include <odp_ring_spsc_internal.h>
@@ -69,7 +69,7 @@ struct queue_entry_s {
union queue_entry_u {
struct queue_entry_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
};
typedef struct queue_global_t {
diff --git a/platform/linux-generic/include/odp_queue_scalable_internal.h b/platform/linux-generic/include/odp_queue_scalable_internal.h
index 6f9b85c85..3c74d1699 100644
--- a/platform/linux-generic/include/odp_queue_scalable_internal.h
+++ b/platform/linux-generic/include/odp_queue_scalable_internal.h
@@ -18,12 +18,12 @@ extern "C" {
#include <odp_forward_typedefs_internal.h>
#include <odp_queue_if.h>
#include <odp_event_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/packet_io.h>
#include <odp/api/align.h>
#include <odp/api/hints.h>
#include <odp/api/ticketlock.h>
#include <odp_config_internal.h>
+#include <odp_macros_internal.h>
#include <odp_schedule_scalable.h>
#include <odp_schedule_scalable_ordered.h>
@@ -55,7 +55,7 @@ struct queue_entry_s {
union queue_entry_u {
struct queue_entry_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct queue_entry_s))];
};
int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num);
@@ -71,7 +71,7 @@ static inline void *shm_pool_alloc_align(_odp_ishm_pool_t *pool, uint32_t size)
{
void *addr;
- addr = _odp_ishm_pool_alloc(pool, ROUNDUP_CACHE_LINE(size));
+ addr = _odp_ishm_pool_alloc(pool, _ODP_ROUNDUP_CACHE_LINE(size));
ODP_ASSERT(((uintptr_t)addr & (ODP_CACHE_LINE_SIZE - 1)) == 0);
return addr;
diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index d11e81bf2..961e83448 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -15,12 +15,14 @@
extern "C" {
#endif
+#include <odp/api/align.h>
#include <odp/api/atomic.h>
#include <odp/api/cpu.h>
#include <odp/api/hints.h>
-#include <odp_align_internal.h>
+
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/plat/cpu_inlines.h>
+
#include <odp_ring_common.h>
/* Generic ring implementation
diff --git a/platform/linux-generic/include/odp_ring_mpmc_internal.h b/platform/linux-generic/include/odp_ring_mpmc_internal.h
index 473e69e90..6ed4dd4d1 100644
--- a/platform/linux-generic/include/odp_ring_mpmc_internal.h
+++ b/platform/linux-generic/include/odp_ring_mpmc_internal.h
@@ -11,10 +11,11 @@
extern "C" {
#endif
+#include <odp/api/align.h>
#include <odp/api/atomic.h>
#include <odp/api/cpu.h>
#include <odp/api/hints.h>
-#include <odp_align_internal.h>
+
#include <odp/api/plat/atomic_inlines.h>
#include <odp/api/plat/cpu_inlines.h>
diff --git a/platform/linux-generic/include/odp_ring_st_internal.h b/platform/linux-generic/include/odp_ring_st_internal.h
index 23b012d96..406d043b5 100644
--- a/platform/linux-generic/include/odp_ring_st_internal.h
+++ b/platform/linux-generic/include/odp_ring_st_internal.h
@@ -11,8 +11,8 @@
extern "C" {
#endif
+#include <odp/api/align.h>
#include <odp/api/hints.h>
-#include <odp_align_internal.h>
/* Basic ring for single thread usage. Operations must be synchronized by using
* locks (or other means), when multiple threads use the same ring. */
diff --git a/platform/linux-generic/include/odp_schedule_if.h b/platform/linux-generic/include/odp_schedule_if.h
index cec9c8bb8..d9ba165bc 100644
--- a/platform/linux-generic/include/odp_schedule_if.h
+++ b/platform/linux-generic/include/odp_schedule_if.h
@@ -131,6 +131,7 @@ typedef struct {
uint32_t lock_index);
void (*schedule_order_lock_start)(uint32_t lock_index);
void (*schedule_order_lock_wait)(uint32_t lock_index);
+ void (*schedule_order_wait)(void);
void (*schedule_print)(void);
} schedule_api_t;
diff --git a/platform/linux-generic/include/odp_schedule_scalable_ordered.h b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
index 21c89bed2..be4894f73 100644
--- a/platform/linux-generic/include/odp_schedule_scalable_ordered.h
+++ b/platform/linux-generic/include/odp_schedule_scalable_ordered.h
@@ -9,11 +9,12 @@
#ifndef ODP_SCHEDULE_SCALABLE_ORDERED_H
#define ODP_SCHEDULE_SCALABLE_ORDERED_H
+#include <odp/api/align.h>
#include <odp/api/shared_memory.h>
-#include <odp_align_internal.h>
#include <odp_bitset.h>
#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ishmpool_internal.h>
/* High level functioning of reordering
@@ -68,7 +69,7 @@ typedef struct ODP_ALIGNED(sizeof(uint64_t)) hc {
* Should be at least one per CPU.
*/
#define RWIN_SIZE 32
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(RWIN_SIZE), "RWIN_SIZE is not a power of 2");
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(RWIN_SIZE), "RWIN_SIZE is not a power of 2");
typedef struct reorder_context reorder_context_t;
diff --git a/platform/linux-generic/libodp-linux.pc.in b/platform/linux-generic/libodp-linux.pc.in
index 28c7ac49c..f9a339fb8 100644
--- a/platform/linux-generic/libodp-linux.pc.in
+++ b/platform/linux-generic/libodp-linux.pc.in
@@ -8,5 +8,5 @@ Description: The ODP packet processing engine
Version: @PKGCONFIG_VERSION@
Requires.private: libconfig@AARCH64CRYPTO_PKG@
Libs: -L${libdir} -l@ODP_LIB_NAME@ @ATOMIC_LIBS_NON_ABI_COMPAT@
-Libs.private: @OPENSSL_STATIC_LIBS@ @DPDK_LIBS@ @PCAP_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@
+Libs.private: @OPENSSL_STATIC_LIBS@ @DPDK_LIBS@ @PCAP_LIBS@ @PTHREAD_LIBS@ @TIMER_LIBS@ @LIBXDP_LIBS@ -lpthread @ATOMIC_LIBS_ABI_COMPAT@
Cflags: -I${includedir}
diff --git a/platform/linux-generic/m4/configure.m4 b/platform/linux-generic/m4/configure.m4
index 291cb2773..70a393f56 100644
--- a/platform/linux-generic/m4/configure.m4
+++ b/platform/linux-generic/m4/configure.m4
@@ -26,9 +26,10 @@ m4_include([platform/linux-generic/m4/odp_crypto.m4])
m4_include([platform/linux-generic/m4/odp_pcapng.m4])
m4_include([platform/linux-generic/m4/odp_netmap.m4])
m4_include([platform/linux-generic/m4/odp_dpdk.m4])
+m4_include([platform/linux-generic/m4/odp_xdp.m4])
ODP_SCHEDULER
-AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${AARCH64CRYPTO_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS}"])
+AS_VAR_APPEND([PLAT_DEP_LIBS], ["${ATOMIC_LIBS} ${AARCH64CRYPTO_LIBS} ${LIBCONFIG_LIBS} ${OPENSSL_LIBS} ${DPDK_LIBS_LT} ${LIBCLI_LIBS} ${LIBXDP_LIBS}"])
# Add text to the end of configure with platform specific settings.
# Make sure it's aligned same as other lines in configure.ac.
diff --git a/platform/linux-generic/m4/odp_xdp.m4 b/platform/linux-generic/m4/odp_xdp.m4
new file mode 100644
index 000000000..2c6179df9
--- /dev/null
+++ b/platform/linux-generic/m4/odp_xdp.m4
@@ -0,0 +1,15 @@
+##########################################################################
+# Check for libxdp availability
+##########################################################################
+AC_ARG_ENABLE([xdp], AS_HELP_STRING([--enable-xdp],
+ [enable experimental XDP support for Packet I/O [default=disabled] (linux-generic)]))
+
+AS_IF([test "x$enable_xdp" = "xyes"], [
+ PKG_CHECK_MODULES([LIBXDP], [libxdp],
+ [
+ AC_DEFINE(_ODP_PKTIO_XDP, [1], [Define to 1 to enable xdp packet I/O support])
+ ],
+ [
+ AS_IF([test "x$enable_xdp" == "xyes"], [AC_MSG_ERROR([libxdp not found])])
+ ])
+])
diff --git a/platform/linux-generic/odp_classification.c b/platform/linux-generic/odp_classification.c
index 2fdecbc51..90ebc8ae0 100644
--- a/platform/linux-generic/odp_classification.c
+++ b/platform/linux-generic/odp_classification.c
@@ -169,8 +169,10 @@ int odp_cls_capability(odp_cls_capability_t *capability)
capability->supported_terms.bit.ethtype_x = 1;
capability->supported_terms.bit.vlan_id_0 = 1;
capability->supported_terms.bit.vlan_id_x = 1;
+ capability->supported_terms.bit.vlan_pcp_0 = 1;
capability->supported_terms.bit.dmac = 1;
capability->supported_terms.bit.ip_proto = 1;
+ capability->supported_terms.bit.ip_dscp = 1;
capability->supported_terms.bit.udp_dport = 1;
capability->supported_terms.bit.udp_sport = 1;
capability->supported_terms.bit.tcp_dport = 1;
@@ -595,11 +597,8 @@ int odp_cos_with_l2_priority(odp_pktio_t pktio_in,
return 0;
}
-int odp_cos_with_l3_qos(odp_pktio_t pktio_in,
- uint32_t num_qos,
- uint8_t qos_table[],
- odp_cos_t cos_table[],
- odp_bool_t l3_preference)
+int ODP_DEPRECATE(odp_cos_with_l3_qos)(odp_pktio_t pktio_in, uint32_t num_qos, uint8_t qos_table[],
+ odp_cos_t cos_table[], odp_bool_t l3_preference)
{
pmr_l3_cos_t *l3_cos;
uint32_t i;
@@ -644,7 +643,11 @@ static int pmr_create_term(pmr_term_value_t *value,
value->range_term = param->range_term;
switch (term) {
+ case ODP_PMR_VLAN_PCP_0:
+ /* Fall through */
case ODP_PMR_IPPROTO:
+ /* Fall through */
+ case ODP_PMR_IP_DSCP:
size = 1;
break;
@@ -856,23 +859,50 @@ static inline int verify_pmr_packet_len(odp_packet_hdr_t *pkt_hdr,
return 0;
}
-static inline int verify_pmr_ip_proto(const uint8_t *pkt_addr,
- odp_packet_hdr_t *pkt_hdr,
- pmr_term_value_t *term_value)
+static inline int verify_pmr_ipv4_proto(const _odp_ipv4hdr_t *ipv4, pmr_term_value_t *term_value)
{
- const _odp_ipv4hdr_t *ip;
uint8_t proto;
- if (!pkt_hdr->p.input_flags.ipv4)
- return 0;
- ip = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
- proto = ip->proto;
+ proto = ipv4->proto;
if (term_value->match.value == (proto & term_value->match.mask))
return 1;
return 0;
}
+static inline int verify_pmr_ipv6_next_hdr(const _odp_ipv6hdr_t *ipv6, pmr_term_value_t *term_value)
+{
+ uint8_t next_hdr;
+
+ next_hdr = ipv6->next_hdr;
+ if (term_value->match.value == (next_hdr & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv4_dscp(const _odp_ipv4hdr_t *ipv4, pmr_term_value_t *term_value)
+{
+ uint8_t dscp;
+
+ dscp = _ODP_IPV4HDR_DSCP(ipv4->tos);
+ if (term_value->match.value == (dscp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
+static inline int verify_pmr_ipv6_dscp(const _odp_ipv6hdr_t *ipv6, pmr_term_value_t *term_value)
+{
+ uint8_t dscp;
+
+ dscp = _ODP_IPV6HDR_DSCP(odp_be_to_cpu_32(ipv6->ver_tc_flow));
+ if (term_value->match.value == (dscp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
static inline int verify_pmr_ipv4_saddr(const uint8_t *pkt_addr,
odp_packet_hdr_t *pkt_hdr,
pmr_term_value_t *term_value)
@@ -1095,6 +1125,28 @@ static inline int verify_pmr_vlan_id_x(const uint8_t *pkt_addr,
return 0;
}
+static inline int verify_pmr_vlan_pcp_0(const uint8_t *pkt_addr, odp_packet_hdr_t *pkt_hdr,
+ pmr_term_value_t *term_value)
+{
+ const _odp_ethhdr_t *eth;
+ const _odp_vlanhdr_t *vlan;
+ uint16_t tci;
+ uint8_t pcp;
+
+ if (!packet_hdr_has_eth(pkt_hdr) || !pkt_hdr->p.input_flags.vlan)
+ return 0;
+
+ eth = (const _odp_ethhdr_t *)(pkt_addr + pkt_hdr->p.l2_offset);
+ vlan = (const _odp_vlanhdr_t *)(eth + 1);
+ tci = odp_be_to_cpu_16(vlan->tci);
+ pcp = tci >> _ODP_VLANHDR_PCP_SHIFT;
+
+ if (term_value->match.value == (pcp & term_value->match.mask))
+ return 1;
+
+ return 0;
+}
+
static inline int verify_pmr_ipsec_spi(const uint8_t *pkt_addr,
odp_packet_hdr_t *pkt_hdr,
pmr_term_value_t *term_value)
@@ -1241,6 +1293,8 @@ static int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr,
int num_pmr;
int i;
pmr_term_value_t *term_value;
+ const _odp_ipv4hdr_t *ipv4 = NULL;
+ const _odp_ipv6hdr_t *ipv6 = NULL;
/* Locking is not required as PMR rules for in-flight packets
delivery during a PMR change is indeterminate*/
@@ -1249,6 +1303,11 @@ static int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr,
return 0;
num_pmr = pmr->s.num_pmr;
+ if (pkt_hdr->p.input_flags.ipv4)
+ ipv4 = (const _odp_ipv4hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+ if (pkt_hdr->p.input_flags.ipv6)
+ ipv6 = (const _odp_ipv6hdr_t *)(pkt_addr + pkt_hdr->p.l3_offset);
+
/* Iterate through list of PMR Term values in a pmr_t */
for (i = 0; i < num_pmr; i++) {
term_value = &pmr->s.pmr_term_value[i];
@@ -1277,15 +1336,36 @@ static int verify_pmr(pmr_t *pmr, const uint8_t *pkt_addr,
term_value))
pmr_failure = 1;
break;
+ case ODP_PMR_VLAN_PCP_0:
+ if (!verify_pmr_vlan_pcp_0(pkt_addr, pkt_hdr, term_value))
+ pmr_failure = 1;
+ break;
case ODP_PMR_DMAC:
if (!verify_pmr_dmac(pkt_addr, pkt_hdr,
term_value))
pmr_failure = 1;
break;
case ODP_PMR_IPPROTO:
- if (!verify_pmr_ip_proto(pkt_addr, pkt_hdr,
- term_value))
+ if (ipv4) {
+ if (!verify_pmr_ipv4_proto(ipv4, term_value))
+ pmr_failure = 1;
+ } else if (ipv6) {
+ if (!verify_pmr_ipv6_next_hdr(ipv6, term_value))
+ pmr_failure = 1;
+ } else {
pmr_failure = 1;
+ }
+ break;
+ case ODP_PMR_IP_DSCP:
+ if (ipv4) {
+ if (!verify_pmr_ipv4_dscp(ipv4, term_value))
+ pmr_failure = 1;
+ } else if (ipv6) {
+ if (!verify_pmr_ipv6_dscp(ipv6, term_value))
+ pmr_failure = 1;
+ } else {
+ pmr_failure = 1;
+ }
break;
case ODP_PMR_UDP_DPORT:
if (!verify_pmr_udp_dport(pkt_addr, pkt_hdr,
@@ -1381,12 +1461,18 @@ static const char *format_pmr_name(odp_cls_pmr_term_t pmr_term)
case ODP_PMR_VLAN_ID_X:
name = "PMR_VLAN_ID_X";
break;
+ case ODP_PMR_VLAN_PCP_0:
+ name = "PMR_VLAN_PCP_0";
+ break;
case ODP_PMR_DMAC:
name = "PMR_DMAC";
break;
case ODP_PMR_IPPROTO:
name = "PMR_IPPROTO";
break;
+ case ODP_PMR_IP_DSCP:
+ name = "PMR_IP_DSCP";
+ break;
case ODP_PMR_UDP_DPORT:
name = "PMR_UDP_DPORT";
break;
diff --git a/platform/linux-generic/odp_crypto_null.c b/platform/linux-generic/odp_crypto_null.c
index f276d4659..8eb2332a1 100644
--- a/platform/linux-generic/odp_crypto_null.c
+++ b/platform/linux-generic/odp_crypto_null.c
@@ -299,7 +299,6 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
* We cannot fail since odp_crypto_op() has already processed
* the packet. Let's indicate error in the result instead.
*/
- packet_hdr(out_pkt)->p.flags.crypto_err = 1;
packet_result.ok = false;
}
@@ -496,7 +495,6 @@ int crypto_int(odp_packet_t pkt_in,
odp_bool_t allocated = false;
odp_packet_t out_pkt = *pkt_out;
odp_crypto_packet_result_t *op_result;
- odp_packet_hdr_t *pkt_hdr;
session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
@@ -546,9 +544,6 @@ int crypto_int(odp_packet_t pkt_in,
op_result->auth_status.hw_err = ODP_CRYPTO_HW_ERR_NONE;
op_result->ok = true;
- pkt_hdr = packet_hdr(out_pkt);
- pkt_hdr->p.flags.crypto_err = !op_result->ok;
-
/* Synchronous, simply return results */
*pkt_out = out_pkt;
diff --git a/platform/linux-generic/odp_crypto_openssl.c b/platform/linux-generic/odp_crypto_openssl.c
index 6ff0ac041..9f0978b49 100644
--- a/platform/linux-generic/odp_crypto_openssl.c
+++ b/platform/linux-generic/odp_crypto_openssl.c
@@ -18,6 +18,7 @@
#include <odp/api/random.h>
#include <odp/api/plat/packet_inlines.h>
#include <odp/api/plat/thread_inlines.h>
+#include <odp_macros_internal.h>
#include <odp_packet_internal.h>
#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
@@ -1013,7 +1014,7 @@ static inline int internal_crypt(EVP_CIPHER_CTX *ctx,
rc = EVP_update(ctx, in_addr, &out_len, in_addr, len);
if (odp_unlikely(rc != 1))
goto err;
- ODP_ASSERT(CHECK_IS_POWER2(block_len));
+ ODP_ASSERT(_ODP_CHECK_IS_POWER2(block_len));
buffered = len & (block_len - 1);
if (odp_unlikely(out_len + buffered != len))
goto err;
@@ -2531,7 +2532,6 @@ odp_crypto_operation(odp_crypto_op_param_t *param,
* We cannot fail since odp_crypto_op() has already processed
* the packet. Let's indicate error in the result instead.
*/
- packet_hdr(out_pkt)->p.flags.crypto_err = 1;
packet_result.ok = false;
}
@@ -2810,7 +2810,6 @@ int crypto_int(odp_packet_t pkt_in,
odp_bool_t allocated = false;
odp_packet_t out_pkt = *pkt_out;
odp_crypto_packet_result_t *op_result;
- odp_packet_hdr_t *pkt_hdr;
session = (odp_crypto_generic_session_t *)(intptr_t)param->session;
@@ -2873,9 +2872,6 @@ int crypto_int(odp_packet_t pkt_in,
(rc_cipher == ODP_CRYPTO_ALG_ERR_NONE) &&
(rc_auth == ODP_CRYPTO_ALG_ERR_NONE);
- pkt_hdr = packet_hdr(out_pkt);
- pkt_hdr->p.flags.crypto_err = !op_result->ok;
-
/* Synchronous, simply return results */
*pkt_out = out_pkt;
diff --git a/platform/linux-generic/odp_ipsec.c b/platform/linux-generic/odp_ipsec.c
index 29f0e4bcd..04b4b6aeb 100644
--- a/platform/linux-generic/odp_ipsec.c
+++ b/platform/linux-generic/odp_ipsec.c
@@ -5,19 +5,20 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/byteorder.h>
#include <odp/api/ipsec.h>
#include <odp/api/chksum.h>
#include <odp/api/plat/packet_inlines.h>
-#include <odp/api/byteorder.h>
#include <odp/api/plat/byteorder_inlines.h>
+#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_packet_internal.h>
#include <odp_ipsec_internal.h>
-#include <odp/api/plat/queue_inlines.h>
#include <odp_classification_internal.h>
#include <odp_libconfig_internal.h>
#include <odp_schedule_if.h>
@@ -1099,7 +1100,7 @@ uint64_t ipsec_seq_no(ipsec_sa_t *ipsec_sa)
*/
static inline uint32_t ipsec_padded_len(uint32_t len, uint32_t pad_mask)
{
- ODP_ASSERT(CHECK_IS_POWER2(pad_mask + 1));
+ ODP_ASSERT(_ODP_CHECK_IS_POWER2(pad_mask + 1));
return (len + pad_mask) & ~pad_mask;
}
diff --git a/platform/linux-generic/odp_ipsec_sad.c b/platform/linux-generic/odp_ipsec_sad.c
index 4e4057ede..64d7b6fdf 100644
--- a/platform/linux-generic/odp_ipsec_sad.c
+++ b/platform/linux-generic/odp_ipsec_sad.c
@@ -10,16 +10,17 @@
#include <odp/api/random.h>
#include <odp/api/shared_memory.h>
+#include <odp/api/plat/atomic_inlines.h>
+#include <odp/api/plat/cpu_inlines.h>
+
#include <odp_config_internal.h>
#include <odp_init_internal.h>
#include <odp_debug_internal.h>
#include <odp_ipsec_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ring_mpmc_internal.h>
#include <odp_global_data.h>
-#include <odp/api/plat/atomic_inlines.h>
-#include <odp/api/plat/cpu_inlines.h>
-
#include <string.h>
#include <inttypes.h>
@@ -430,7 +431,7 @@ static uint32_t esp_block_len_to_mask(uint32_t block_len)
if (block_len < 4)
block_len = 4;
- ODP_ASSERT(CHECK_IS_POWER2(block_len));
+ ODP_ASSERT(_ODP_CHECK_IS_POWER2(block_len));
return block_len - 1;
}
diff --git a/platform/linux-generic/odp_ishm.c b/platform/linux-generic/odp_ishm.c
index dea4d56f0..0f7c48f35 100644
--- a/platform/linux-generic/odp_ishm.c
+++ b/platform/linux-generic/odp_ishm.c
@@ -45,7 +45,6 @@
#include <odp_errno_define.h>
#include <odp_shm_internal.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
#include <odp_fdserver_internal.h>
#include <odp_shm_internal.h>
#include <odp_ishmphy_internal.h>
diff --git a/platform/linux-generic/odp_ishmphy.c b/platform/linux-generic/odp_ishmphy.c
index 64eb2a732..efaf12c7b 100644
--- a/platform/linux-generic/odp_ishmphy.c
+++ b/platform/linux-generic/odp_ishmphy.c
@@ -14,7 +14,6 @@
#include <odp/api/system_info.h>
#include <odp/api/debug.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
#include <odp_shm_internal.h>
#include <odp_ishmphy_internal.h>
diff --git a/platform/linux-generic/odp_ishmpool.c b/platform/linux-generic/odp_ishmpool.c
index 818b0a132..4186444fa 100644
--- a/platform/linux-generic/odp_ishmpool.c
+++ b/platform/linux-generic/odp_ishmpool.c
@@ -43,14 +43,17 @@
*/
#include <odp_posix_extensions.h>
+
#include <odp/api/spinlock.h>
#include <odp/api/align.h>
#include <odp/api/debug.h>
+
#include <odp_shm_internal.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
+#include <odp_macros_internal.h>
#include <odp_shm_internal.h>
#include <odp_ishmpool_internal.h>
+
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
@@ -203,14 +206,14 @@ static pool_t *_odp_ishmbud_pool_create(const char *pool_name, int store_idx,
max_nb_bblock = (1 << (order - min_order));
/* space needed for the control area (padded to cache line size)*/
- control_sz = ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
+ control_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
/* space needed for 'order' free bblock list heads: */
/* Note that only lists from min_order to order are really used.*/
- free_head_sz = ROUNDUP_CACHE_LINE(sizeof(void *) * (order + 1));
+ free_head_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(void *) * (order + 1));
/* space needed for order -i.e. size- storage of alloc'd bblock:*/
- saved_order_sz = ROUNDUP_CACHE_LINE(max_nb_bblock * sizeof(uint8_t));
+ saved_order_sz = _ODP_ROUNDUP_CACHE_LINE(max_nb_bblock * sizeof(uint8_t));
/* space needed for user area is 2^order bytes: */
user_sz = 1ULL << order;
@@ -455,7 +458,7 @@ static pool_t *_odp_ishmslab_pool_create(const char *pool_name, int store_idx,
nb_sblock = (size / elt_size) + ((size % elt_size) ? 1 : 0);
/* space needed for the control area (padded to cache line size)*/
- control_sz = ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
+ control_sz = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_ishm_pool_ctrl_t));
/* space needed for user area is : */
user_sz = nb_sblock * elt_size;
diff --git a/platform/linux-generic/odp_name_table.c b/platform/linux-generic/odp_name_table.c
index a9ce6cad3..fbb35cf00 100644
--- a/platform/linux-generic/odp_name_table.c
+++ b/platform/linux-generic/odp_name_table.c
@@ -248,7 +248,7 @@ static uint32_t name_tbl_free_list_add(name_tbl_t *name_tbl,
name_tbl_id = name_tbl->base_id | first_idx;
entry_idx = first_idx;
- num_added = MIN(num_to_add, name_tbl->num_avail_to_add);
+ num_added = _ODP_MIN(num_to_add, name_tbl->num_avail_to_add);
if (num_added == 0)
return 0;
@@ -299,7 +299,7 @@ static int new_name_tbl_add(void)
name_tbls_idx = name_tbls.num_name_tbls;
num_entries = INITIAL_NAME_TBL_SIZE << name_tbls_idx;
new_name_tbl = name_tbl_alloc(name_tbls_idx, num_entries);
- name_tbl_free_list_add(new_name_tbl, MIN(num_entries, UINT32_C(256)));
+ name_tbl_free_list_add(new_name_tbl, _ODP_MIN(num_entries, UINT32_C(256)));
name_tbls.tbls[name_tbls_idx] = new_name_tbl;
name_tbls.avail_space_bit_mask |= 1 << name_tbls_idx;
@@ -389,7 +389,7 @@ static hash_tbl_entry_t make_hash_tbl_entry(name_tbl_entry_t *name_tbl_entry,
hash_tbl_entry_t hash_tbl_entry;
uint32_t new_entry_cnt;
- new_entry_cnt = MIN(entry_cnt + 1, UINT32_C(0x3F));
+ new_entry_cnt = _ODP_MIN(entry_cnt + 1, UINT32_C(0x3F));
hash_tbl_entry = (hash_tbl_entry_t)(uintptr_t)name_tbl_entry;
hash_tbl_entry &= ~0x3F;
hash_tbl_entry |= new_entry_cnt;
@@ -1008,7 +1008,7 @@ static uint32_t level2_hash_histo(secondary_hash_tbl_t *hash_tbl,
collisions = linked_list_len(name_tbl_entry);
}
- level2_histo[MIN(collisions, UINT32_C(256))]++;
+ level2_histo[_ODP_MIN(collisions, UINT32_C(256))]++;
total_collisions += collisions;
}
@@ -1040,7 +1040,7 @@ static uint32_t level1_hash_histo(secondary_hash_tbl_t *hash_tbl,
level2_histo);
}
- level1_histo[MIN(collisions, UINT32_C(256))]++;
+ level1_histo[_ODP_MIN(collisions, UINT32_C(256))]++;
total_collisions += collisions;
}
@@ -1150,7 +1150,7 @@ void _odp_int_name_tbl_stats_print(void)
memset(primary_hash_histo, 0, sizeof(primary_hash_histo));
for (idx = 0; idx < PRIMARY_HASH_TBL_SIZE; idx++) {
collisions =
- MIN(name_hash_tbl.hash_collisions[idx], UINT32_C(256));
+ _ODP_MIN(name_hash_tbl.hash_collisions[idx], UINT32_C(256));
primary_hash_histo[collisions]++;
}
diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c
index b0d0fc03c..07e9c2d4d 100644
--- a/platform/linux-generic/odp_packet.c
+++ b/platform/linux-generic/odp_packet.c
@@ -1241,7 +1241,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
if (seglen >= len) {
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign == 0)
return 0;
shift = align - misalign;
@@ -1251,7 +1251,7 @@ int odp_packet_align(odp_packet_t *pkt, uint32_t offset, uint32_t len,
shift = len - seglen;
uaddr -= shift;
misalign = align <= 1 ? 0 :
- ROUNDUP_ALIGN(uaddr, align) - uaddr;
+ _ODP_ROUNDUP_ALIGN(uaddr, align) - uaddr;
if (misalign)
shift += align - misalign;
}
diff --git a/platform/linux-generic/odp_pcapng.c b/platform/linux-generic/odp_pcapng.c
index 6bdd1fd69..2a4b7ad04 100644
--- a/platform/linux-generic/odp_pcapng.c
+++ b/platform/linux-generic/odp_pcapng.c
@@ -130,8 +130,7 @@ static void pcapng_drain_fifo(int fd)
static void inotify_event_handle(pktio_entry_t *entry, int qidx,
struct inotify_event *event)
{
- int mtu = MAX(odp_pktin_maxlen(entry->s.handle),
- odp_pktout_maxlen(entry->s.handle));
+ int mtu = _ODP_MAX(odp_pktin_maxlen(entry->s.handle), odp_pktout_maxlen(entry->s.handle));
if (event->mask & IN_OPEN) {
int ret;
@@ -173,8 +172,7 @@ static void get_pcapng_fifo_name(char *pcapng_entry, size_t len,
static int get_qidx_from_fifo(pktio_entry_t *entry, char *name)
{
- unsigned int max_queue =
- MAX(entry->s.num_in_queue, entry->s.num_out_queue);
+ unsigned int max_queue = _ODP_MAX(entry->s.num_in_queue, entry->s.num_out_queue);
unsigned int i;
for (i = 0; i < max_queue; i++) {
@@ -291,8 +289,7 @@ int _odp_pcapng_start(pktio_entry_t *entry)
int ret = -1, fd;
pthread_attr_t attr;
unsigned int i;
- unsigned int max_queue =
- MAX(entry->s.num_in_queue, entry->s.num_out_queue);
+ unsigned int max_queue = _ODP_MAX(entry->s.num_in_queue, entry->s.num_out_queue);
int fifo_sz;
fifo_sz = get_fifo_max_size();
@@ -396,8 +393,7 @@ void _odp_pcapng_stop(pktio_entry_t *entry)
{
int ret;
unsigned int i;
- unsigned int max_queue =
- MAX(entry->s.num_in_queue, entry->s.num_out_queue);
+ unsigned int max_queue = _ODP_MAX(entry->s.num_in_queue, entry->s.num_out_queue);
odp_spinlock_lock(&pcapng_gbl->lock);
@@ -528,7 +524,7 @@ int _odp_pcapng_write_pkts(pktio_entry_t *entry, int qidx,
NULL);
if (block_len + sizeof(epb[i]) +
- ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
+ _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
sizeof(uint32_t) > PIPE_BUF) {
wlen = write_fifo(fd, packet_iov, iovcnt);
if (wlen > 0) {
@@ -539,7 +535,7 @@ int _odp_pcapng_write_pkts(pktio_entry_t *entry, int qidx,
}
epb[i].block_type = PCAPNG_BLOCK_TYPE_EPB;
epb[i].block_total_length = sizeof(epb[i]) +
- ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
+ _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN) +
PCAPNG_DATA_ALIGN;
epb[i].interface_idx = 0;
epb[i].timestamp_high =
@@ -556,8 +552,7 @@ int _odp_pcapng_write_pkts(pktio_entry_t *entry, int qidx,
/* data */
packet_iov[iovcnt].iov_base = buf;
- packet_iov[iovcnt].iov_len =
- ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN);
+ packet_iov[iovcnt].iov_len = _ODP_ROUNDUP_ALIGN(seg_len, PCAPNG_DATA_ALIGN);
block_len += packet_iov[iovcnt].iov_len;
iovcnt++;
diff --git a/platform/linux-generic/odp_pkt_queue.c b/platform/linux-generic/odp_pkt_queue.c
index 81ae334c1..e977efc24 100644
--- a/platform/linux-generic/odp_pkt_queue.c
+++ b/platform/linux-generic/odp_pkt_queue.c
@@ -125,7 +125,7 @@ static int pkt_queue_free_list_add(queue_pool_t *pool,
}
/* Now add as many queue_blks to the free list as... */
- blks_to_add = MIN(num_blks - start_idx, num_queue_blks);
+ blks_to_add = _ODP_MIN(num_blks - start_idx, num_queue_blks);
queue_blk = &queue_blks->blks[start_idx];
for (cnt = 1; cnt <= blks_to_add; cnt++) {
queue_blk->next_queue_blk_idx = start_idx + cnt;
@@ -223,7 +223,7 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
/* Initialize the queue_blk_tbl_sizes array based upon the
* max_queued_pkts.
*/
- max_queued_pkts = MAX(max_queued_pkts, 64 * UINT32_C(1024));
+ max_queued_pkts = _ODP_MAX(max_queued_pkts, 64 * UINT32_C(1024));
queue_region_desc_init(pool, 0, max_queued_pkts / 4);
queue_region_desc_init(pool, 1, max_queued_pkts / 64);
queue_region_desc_init(pool, 2, max_queued_pkts / 64);
@@ -235,7 +235,7 @@ _odp_int_queue_pool_t _odp_queue_pool_create(uint32_t max_num_queues,
/* Now allocate the first queue_blk_tbl and add its blks to the free
* list. Replenish the queue_blk_t free list.
*/
- initial_free_list_size = MIN(64 * UINT32_C(1024), max_queued_pkts / 4);
+ initial_free_list_size = _ODP_MIN(64 * UINT32_C(1024), max_queued_pkts / 4);
rc = pkt_queue_free_list_add(pool, initial_free_list_size);
if (rc < 0) {
free(pool->queue_num_tbl);
diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c
index defdeb4fb..739665cb9 100644
--- a/platform/linux-generic/odp_pool.c
+++ b/platform/linux-generic/odp_pool.c
@@ -15,9 +15,9 @@
#include <odp_pool_internal.h>
#include <odp_init_internal.h>
#include <odp_packet_internal.h>
-#include <odp_packet_dpdk.h>
#include <odp_config_internal.h>
#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ring_ptr_internal.h>
#include <odp_global_data.h>
#include <odp_libconfig_internal.h>
@@ -63,6 +63,8 @@ typedef struct pool_local_t {
} pool_local_t;
+extern const _odp_pool_mem_src_ops_t * const _odp_pool_mem_src_ops[];
+
pool_global_t *_odp_pool_glb;
static __thread pool_local_t local;
@@ -239,7 +241,7 @@ static int read_config_file(pool_global_t *pool_glb)
if (val == 0)
align = ODP_CACHE_LINE_SIZE;
- if (!CHECK_IS_POWER2(align)) {
+ if (!_ODP_CHECK_IS_POWER2(align)) {
ODP_ERR("Not a power of two: %s = %i\n", str, val);
return -1;
}
@@ -257,7 +259,7 @@ static int read_config_file(pool_global_t *pool_glb)
if (val == 0)
align = ODP_CACHE_LINE_SIZE;
- if (!CHECK_IS_POWER2(align)) {
+ if (!_ODP_CHECK_IS_POWER2(align)) {
ODP_ERR("Not a power of two: %s = %i\n", str, val);
return -1;
}
@@ -617,7 +619,7 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, ui
sprintf(uarea_name, "pool_%03i_uarea_%s", pool->pool_idx, pool->name);
pool->param_uarea_size = uarea_size;
- pool->uarea_size = ROUNDUP_CACHE_LINE(uarea_size);
+ pool->uarea_size = _ODP_ROUNDUP_CACHE_LINE(uarea_size);
pool->uarea_shm_size = num_pkt * (uint64_t)pool->uarea_size;
shm = odp_shm_reserve(uarea_name, pool->uarea_shm_size, ODP_PAGE_SIZE, shmflags);
@@ -630,6 +632,26 @@ static int reserve_uarea(pool_t *pool, uint32_t uarea_size, uint32_t num_pkt, ui
return 0;
}
+static void set_mem_src_ops(pool_t *pool)
+{
+ odp_bool_t is_active_found = false;
+
+ pool->mem_src_ops = NULL;
+
+ for (int i = 0; _odp_pool_mem_src_ops[i]; i++) {
+ if (!is_active_found) {
+ if (_odp_pool_mem_src_ops[i]->is_active()) {
+ is_active_found = true;
+ pool->mem_src_ops = _odp_pool_mem_src_ops[i];
+ ODP_DBG("Packet pool as a memory source for: %s\n",
+ pool->mem_src_ops->name);
+ }
+ } else if (_odp_pool_mem_src_ops[i]->is_active()) {
+ _odp_pool_mem_src_ops[i]->force_disable();
+ }
+ }
+}
+
/* Create pool according to params. Actual type of the pool is type_2, which is recorded for pool
* info calls. */
odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
@@ -659,7 +681,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
uint32_t align_req = params->pkt.align;
if (align_req &&
- (!CHECK_IS_POWER2(align_req) ||
+ (!_ODP_CHECK_IS_POWER2(align_req) ||
align_req > _odp_pool_glb->config.pkt_base_align)) {
ODP_ERR("Bad align requirement\n");
return ODP_POOL_INVALID;
@@ -676,7 +698,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
/* Validate requested buffer alignment */
if (align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
- align != ROUNDDOWN_POWER2(align, align)) {
+ align != _ODP_ROUNDDOWN_POWER2(align, align)) {
ODP_ERR("Bad align requirement\n");
return ODP_POOL_INVALID;
}
@@ -773,26 +795,29 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
pool->type_2 = type_2;
pool->params = *params;
pool->block_offset = 0;
+ set_mem_src_ops(pool);
if (type == ODP_POOL_PACKET) {
- uint32_t dpdk_obj_size;
+ uint32_t adj_size;
- hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_packet_hdr_t));
block_size = hdr_size + align + headroom + seg_len + tailroom;
- /* Calculate extra space required for storing DPDK objects and
- * mbuf headers. NOP if no DPDK pktio used or zero-copy mode is
- * disabled. */
- dpdk_obj_size = _odp_dpdk_pool_obj_size(pool, block_size);
- if (!dpdk_obj_size) {
- ODP_ERR("Calculating DPDK mempool obj size failed\n");
- return ODP_POOL_INVALID;
- }
- if (dpdk_obj_size != block_size) {
- shmflags |= ODP_SHM_HP;
- block_size = dpdk_obj_size;
- } else {
- block_size = ROUNDUP_CACHE_LINE(block_size);
+ adj_size = block_size;
+
+ if (pool->mem_src_ops && pool->mem_src_ops->adjust_size) {
+ pool->mem_src_ops->adjust_size(pool->mem_src_data, &adj_size,
+ &pool->block_offset, &shmflags);
+
+ if (!adj_size) {
+ ODP_ERR("Calculating adjusted block size failed\n");
+ return ODP_POOL_INVALID;
+ }
}
+
+ if (adj_size != block_size)
+ block_size = adj_size;
+ else
+ block_size = _ODP_ROUNDUP_CACHE_LINE(block_size);
} else {
/* Header size is rounded up to cache line size, so the
* following data can be cache line aligned without extra
@@ -801,13 +826,13 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
align - ODP_CACHE_LINE_SIZE : 0;
if (type == ODP_POOL_BUFFER)
- hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_buffer_hdr_t));
else if (type == ODP_POOL_TIMEOUT)
- hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_timeout_hdr_t));
else
- hdr_size = ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
+ hdr_size = _ODP_ROUNDUP_CACHE_LINE(sizeof(odp_event_vector_hdr_t));
- block_size = ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len);
+ block_size = _ODP_ROUNDUP_CACHE_LINE(hdr_size + align_pad + seg_len);
}
/* Allocate extra memory for skipping packet buffers which cross huge
@@ -823,7 +848,7 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
if (num + 1 <= RING_SIZE_MIN)
ring_size = RING_SIZE_MIN;
else
- ring_size = ROUNDUP_POWER2_U32(num + 1);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(num + 1);
pool->ring_mask = ring_size - 1;
pool->num = num;
@@ -835,8 +860,6 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
pool->tailroom = tailroom;
pool->block_size = block_size;
pool->shm_size = (num + num_extra) * (uint64_t)block_size;
- pool->ext_desc = NULL;
- pool->ext_destroy = NULL;
set_pool_cache_size(pool, cache_size);
@@ -863,12 +886,18 @@ odp_pool_t _odp_pool_create(const char *name, const odp_pool_param_t *params,
ring_ptr_init(&pool->ring->hdr);
init_buffers(pool);
- /* Create zero-copy DPDK memory pool. NOP if zero-copy is disabled. */
- if (type == ODP_POOL_PACKET && _odp_dpdk_pool_create(pool)) {
- ODP_ERR("Creating DPDK packet pool failed\n");
+ if (type == ODP_POOL_PACKET && pool->mem_src_ops && pool->mem_src_ops->bind &&
+ pool->mem_src_ops->bind(pool->mem_src_data, pool)) {
+ ODP_ERR("Binding pool as memory source failed\n");
goto error;
}
+ /* Total ops utilizes alloc_ops and free_ops counters */
+ if (pool->params.stats.bit.total_ops) {
+ pool->params.stats.bit.alloc_ops = 1;
+ pool->params.stats.bit.free_ops = 1;
+ }
+
/* Reset pool stats */
odp_atomic_init_u64(&pool->stats.alloc_ops, 0);
odp_atomic_init_u64(&pool->stats.alloc_fails, 0);
@@ -1074,12 +1103,8 @@ int odp_pool_destroy(odp_pool_t pool_hdl)
return -1;
}
- /* Destroy external DPDK mempool */
- if (pool->ext_destroy) {
- pool->ext_destroy(pool->ext_desc);
- pool->ext_destroy = NULL;
- pool->ext_desc = NULL;
- }
+ if (pool->type == ODP_POOL_PACKET && pool->mem_src_ops && pool->mem_src_ops->unbind)
+ pool->mem_src_ops->unbind(pool->mem_src_data);
/* Make sure local caches are empty */
for (i = 0; i < ODP_THREAD_COUNT_MAX; i++)
@@ -1369,7 +1394,7 @@ int odp_pool_capability(odp_pool_capability_t *capa)
supported_stats.bit.alloc_ops = CONFIG_POOL_STATISTICS;
supported_stats.bit.alloc_fails = CONFIG_POOL_STATISTICS;
supported_stats.bit.free_ops = CONFIG_POOL_STATISTICS;
- supported_stats.bit.total_ops = 0;
+ supported_stats.bit.total_ops = CONFIG_POOL_STATISTICS;
supported_stats.bit.cache_available = 1;
supported_stats.bit.cache_alloc_ops = CONFIG_POOL_STATISTICS;
supported_stats.bit.cache_free_ops = CONFIG_POOL_STATISTICS;
@@ -1452,6 +1477,8 @@ void odp_pool_print(odp_pool_t pool_hdl)
ODP_PRINT(" uarea base addr %p\n", (void *)pool->uarea_base_addr);
ODP_PRINT(" cache size %u\n", pool->cache_size);
ODP_PRINT(" burst size %u\n", pool->burst_size);
+ ODP_PRINT(" mem src %s\n",
+ pool->mem_src_ops ? pool->mem_src_ops->name : "(none)");
ODP_PRINT("\n");
}
@@ -1574,6 +1601,9 @@ int odp_pool_stats(odp_pool_t pool_hdl, odp_pool_stats_t *stats)
if (pool->params.stats.bit.free_ops)
stats->free_ops = odp_atomic_load_u64(&pool->stats.free_ops);
+ if (pool->params.stats.bit.total_ops)
+ stats->total_ops = stats->alloc_ops + stats->free_ops;
+
if (pool->params.stats.bit.cache_available)
stats->cache_available = cache_total_available(pool);
@@ -1798,7 +1828,7 @@ odp_pool_t odp_pool_ext_create(const char *name, const odp_pool_ext_param_t *par
if (num_buf + 1 <= RING_SIZE_MIN)
ring_size = RING_SIZE_MIN;
else
- ring_size = ROUNDUP_POWER2_U32(num_buf + 1);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(num_buf + 1);
pool->ring_mask = ring_size - 1;
pool->type = param->type;
diff --git a/platform/linux-generic/odp_pool_mem_src_ops.c b/platform/linux-generic/odp_pool_mem_src_ops.c
new file mode 100644
index 000000000..2f8dc2078
--- /dev/null
+++ b/platform/linux-generic/odp_pool_mem_src_ops.c
@@ -0,0 +1,22 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+#include <odp_pool_internal.h>
+
+extern const _odp_pool_mem_src_ops_t _odp_pool_dpdk_mem_src_ops;
+extern const _odp_pool_mem_src_ops_t _odp_pool_sock_xdp_mem_src_ops;
+
+/* List of available ODP packet pool memory source operations. Array must be NULL terminated */
+const _odp_pool_mem_src_ops_t * const _odp_pool_mem_src_ops[] = {
+#ifdef _ODP_PKTIO_DPDK
+ &_odp_pool_dpdk_mem_src_ops,
+#endif
+#ifdef _ODP_PKTIO_XDP
+ &_odp_pool_sock_xdp_mem_src_ops,
+#endif
+ NULL
+};
diff --git a/platform/linux-generic/odp_queue_basic.c b/platform/linux-generic/odp_queue_basic.c
index 5155bae4b..9a1abfd3b 100644
--- a/platform/linux-generic/odp_queue_basic.c
+++ b/platform/linux-generic/odp_queue_basic.c
@@ -28,6 +28,7 @@
#include <odp_global_data.h>
#include <odp_queue_basic_internal.h>
#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
#include <odp/api/plat/ticketlock_inlines.h>
#define LOCK(queue_ptr) odp_ticketlock_lock(&((queue_ptr)->s.lock))
@@ -77,7 +78,7 @@ static int read_config_file(queue_global_t *_odp_queue_glb)
val_u32 = val;
if (val_u32 > MAX_QUEUE_SIZE || val_u32 < MIN_QUEUE_SIZE ||
- !CHECK_IS_POWER2(val_u32)) {
+ !_ODP_CHECK_IS_POWER2(val_u32)) {
ODP_ERR("Bad value %s = %u\n", str, val_u32);
return -1;
}
@@ -95,7 +96,7 @@ static int read_config_file(queue_global_t *_odp_queue_glb)
if (val_u32 > _odp_queue_glb->config.max_queue_size ||
val_u32 < MIN_QUEUE_SIZE ||
- !CHECK_IS_POWER2(val_u32)) {
+ !_ODP_CHECK_IS_POWER2(val_u32)) {
ODP_ERR("Bad value %s = %u\n", str, val_u32);
return -1;
}
@@ -1039,7 +1040,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
queue_size = MIN_QUEUE_SIZE;
/* Round up if not already a power of two */
- queue_size = ROUNDUP_POWER2_U32(queue_size);
+ queue_size = _ODP_ROUNDUP_POWER2_U32(queue_size);
if (queue_size > _odp_queue_glb->config.max_queue_size) {
ODP_ERR("Too large queue size %u\n", queue_size);
diff --git a/platform/linux-generic/odp_queue_scalable.c b/platform/linux-generic/odp_queue_scalable.c
index c92ebeddd..881dbb985 100644
--- a/platform/linux-generic/odp_queue_scalable.c
+++ b/platform/linux-generic/odp_queue_scalable.c
@@ -104,7 +104,7 @@ static int queue_init(queue_entry_t *queue, const char *name,
sched_elem = &queue->s.sched_elem;
ring_size = param->size > 0 ?
- ROUNDUP_POWER2_U32(param->size) : CONFIG_SCAL_QUEUE_SIZE;
+ _ODP_ROUNDUP_POWER2_U32(param->size) : CONFIG_SCAL_QUEUE_SIZE;
strncpy(queue->s.name, name ? name : "", ODP_QUEUE_NAME_LEN - 1);
queue->s.name[ODP_QUEUE_NAME_LEN - 1] = 0;
memcpy(&queue->s.param, param, sizeof(odp_queue_param_t));
@@ -529,7 +529,7 @@ static inline int _odp_queue_enq(sched_elem_t *q,
*/
old_read = __atomic_load_n(&q->prod_read, __ATOMIC_ACQUIRE);
- actual = MIN(num, (int)((mask + 1) - (old_write - old_read)));
+ actual = _ODP_MIN(num, (int)((mask + 1) - (old_write - old_read)));
if (odp_unlikely(actual <= 0))
return 0;
@@ -591,7 +591,7 @@ int _odp_queue_enq_sp(sched_elem_t *q,
old_write = q->prod_write;
/* Consumer does store-release prod_read, we need load-acquire */
old_read = __atomic_load_n(&q->prod_read, __ATOMIC_ACQUIRE);
- actual = MIN(num, (int)((mask + 1) - (old_write - old_read)));
+ actual = _ODP_MIN(num, (int)((mask + 1) - (old_write - old_read)));
if (odp_unlikely(actual <= 0))
return 0;
@@ -704,7 +704,7 @@ int _odp_queue_deq_sc(sched_elem_t *q, odp_event_t *evp, int num)
old_read = q->cons_read;
/* Producer does store-release cons_write, we need load-acquire */
old_write = __atomic_load_n(&q->cons_write, __ATOMIC_ACQUIRE);
- actual = MIN(num, (int)(old_write - old_read));
+ actual = _ODP_MIN(num, (int)(old_write - old_read));
if (odp_unlikely(actual <= 0))
return 0;
@@ -757,7 +757,7 @@ int _odp_queue_deq(sched_elem_t *q, _odp_event_hdr_t *event_hdr[], int num)
/* Prefetch ring buffer array */
__builtin_prefetch(&q->cons_ring[old_read & mask], 0, 0);
- actual = MIN(num, (int)(old_write - old_read));
+ actual = _ODP_MIN(num, (int)(old_write - old_read));
if (odp_unlikely(actual <= 0))
return 0;
diff --git a/platform/linux-generic/odp_schedule_basic.c b/platform/linux-generic/odp_schedule_basic.c
index 3b22a5bb6..b6952bf1f 100644
--- a/platform/linux-generic/odp_schedule_basic.c
+++ b/platform/linux-generic/odp_schedule_basic.c
@@ -27,7 +27,6 @@
#include <odp/api/cpu.h>
#include <odp/api/thrmask.h>
#include <odp_config_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/sync.h>
#include <odp/api/packet_io.h>
#include <odp_ring_u32_internal.h>
@@ -37,6 +36,7 @@
#include <odp/api/plat/queue_inlines.h>
#include <odp_global_data.h>
#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
#include <string.h>
@@ -94,11 +94,11 @@ ODP_STATIC_ASSERT((QUEUE_LOAD * CONFIG_MAX_SCHED_QUEUES) < UINT32_MAX, "Load_val
#define MAX_RING_SIZE CONFIG_MAX_SCHED_QUEUES
/* For best performance, the number of queues should be a power of two. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
"Number_of_queues_is_not_power_of_two");
/* Ring size must be power of two, so that mask can be used. */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(MAX_RING_SIZE),
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(MAX_RING_SIZE),
"Ring_size_is_not_power_of_two");
/* Thread ID is saved into uint16_t variable */
@@ -289,7 +289,7 @@ ODP_STATIC_ASSERT(MAX_SPREAD <= 256, "Spread_does_not_fit_8_bits");
ODP_STATIC_ASSERT(CONFIG_QUEUE_MAX_ORD_LOCKS <= 256,
"Ordered_lock_count_does_not_fit_8_bits");
ODP_STATIC_ASSERT(NUM_PKTIO <= 256, "Pktio_index_does_not_fit_8_bits");
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(GRP_WEIGHT_TBL_SIZE), "Not_power_of_2");
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(GRP_WEIGHT_TBL_SIZE), "Not_power_of_2");
/* Global scheduler context */
static sched_global_t *sched;
@@ -503,7 +503,7 @@ static int schedule_init_global(void)
num_rings = sched->config.num_spread;
}
- ring_size = ROUNDUP_POWER2_U32(ring_size);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(ring_size);
ODP_ASSERT(ring_size <= MAX_RING_SIZE);
sched->ring_mask = ring_size - 1;
@@ -1979,6 +1979,7 @@ static int schedule_capability(odp_schedule_capability_t *capa)
capa->max_queues = sched->max_queues;
capa->max_queue_size = _odp_queue_glb->config.max_queue_size;
capa->max_flow_id = BUF_HDR_MAX_FLOW_ID;
+ capa->order_wait = ODP_SUPPORT_YES;
return 0;
}
@@ -2117,5 +2118,6 @@ const schedule_api_t _odp_schedule_basic_api = {
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
.schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
.schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_schedule_if.c b/platform/linux-generic/odp_schedule_if.c
index 3b908ea87..564153137 100644
--- a/platform/linux-generic/odp_schedule_if.c
+++ b/platform/linux-generic/odp_schedule_if.c
@@ -203,6 +203,11 @@ void odp_schedule_order_lock_wait(uint32_t lock_index)
_odp_sched_api->schedule_order_lock_wait(lock_index);
}
+void odp_schedule_order_wait(void)
+{
+ _odp_sched_api->schedule_order_wait();
+}
+
void odp_schedule_print(void)
{
_odp_sched_api->schedule_print();
diff --git a/platform/linux-generic/odp_schedule_scalable.c b/platform/linux-generic/odp_schedule_scalable.c
index 7482d776d..66cb66315 100644
--- a/platform/linux-generic/odp_schedule_scalable.c
+++ b/platform/linux-generic/odp_schedule_scalable.c
@@ -24,13 +24,13 @@
#include <odp_shm_internal.h>
#include <odp_ishmpool_internal.h>
-#include <odp_align_internal.h>
#include <odp/api/plat/cpu_inlines.h>
#include <odp_llqueue.h>
#include <odp_queue_scalable_internal.h>
#include <odp_schedule_if.h>
#include <odp_bitset.h>
#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_timer_internal.h>
@@ -46,7 +46,7 @@
#define FLAG_PKTIN 0x80
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(CONFIG_MAX_SCHED_QUEUES),
"Number_of_queues_is_not_power_of_two");
#define SCHED_GROUP_JOIN 0
@@ -2167,6 +2167,7 @@ static int schedule_capability(odp_schedule_capability_t *capa)
capa->max_prios = schedule_num_prio();
capa->max_queues = CONFIG_MAX_SCHED_QUEUES;
capa->max_queue_size = 0;
+ capa->order_wait = ODP_SUPPORT_YES;
return 0;
}
@@ -2233,5 +2234,6 @@ const schedule_api_t _odp_schedule_scalable_api = {
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
.schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
.schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_schedule_sp.c b/platform/linux-generic/odp_schedule_sp.c
index 558c3f4fe..11564d32b 100644
--- a/platform/linux-generic/odp_schedule_sp.c
+++ b/platform/linux-generic/odp_schedule_sp.c
@@ -24,9 +24,9 @@
#include <odp_schedule_if.h>
#include <odp_debug_internal.h>
-#include <odp_align_internal.h>
#include <odp_config_internal.h>
#include <odp_event_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ring_u32_internal.h>
#include <odp_timer_internal.h>
#include <odp_queue_basic_internal.h>
@@ -53,11 +53,11 @@
#define GROUP_PKTIN GROUP_ALL
/* Maximum number of commands: one priority/group for all queues and pktios */
-#define RING_SIZE (ROUNDUP_POWER2_U32(NUM_QUEUE + NUM_PKTIO))
+#define RING_SIZE (_ODP_ROUNDUP_POWER2_U32(NUM_QUEUE + NUM_PKTIO))
#define RING_MASK (RING_SIZE - 1)
/* Ring size must be power of two */
-ODP_STATIC_ASSERT(CHECK_IS_POWER2(RING_SIZE),
+ODP_STATIC_ASSERT(_ODP_CHECK_IS_POWER2(RING_SIZE),
"Ring_size_is_not_power_of_two");
ODP_STATIC_ASSERT(NUM_ORDERED_LOCKS <= CONFIG_QUEUE_MAX_ORD_LOCKS,
@@ -80,7 +80,7 @@ struct sched_cmd_s {
typedef struct ODP_ALIGNED_CACHE sched_cmd_t {
struct sched_cmd_s s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct sched_cmd_s)) -
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct sched_cmd_s)) -
sizeof(struct sched_cmd_s)];
} sched_cmd_t;
@@ -1119,5 +1119,6 @@ const schedule_api_t _odp_schedule_sp_api = {
.schedule_order_unlock_lock = schedule_order_unlock_lock,
.schedule_order_lock_start = schedule_order_lock_start,
.schedule_order_lock_wait = schedule_order_lock_wait,
+ .schedule_order_wait = order_lock,
.schedule_print = schedule_print
};
diff --git a/platform/linux-generic/odp_stash.c b/platform/linux-generic/odp_stash.c
index 9dbc8cc26..e12f1aed3 100644
--- a/platform/linux-generic/odp_stash.c
+++ b/platform/linux-generic/odp_stash.c
@@ -4,10 +4,6 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
-#include <stdint.h>
-#include <stdio.h>
-#include <string.h>
-
#include <odp/api/ticketlock.h>
#include <odp/api/shared_memory.h>
#include <odp/api/stash.h>
@@ -17,9 +13,14 @@
#include <odp_debug_internal.h>
#include <odp_global_data.h>
#include <odp_init_internal.h>
+#include <odp_macros_internal.h>
#include <odp_ring_u32_internal.h>
#include <odp_ring_u64_internal.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+
ODP_STATIC_ASSERT(CONFIG_INTERNAL_STASHES < CONFIG_MAX_STASHES, "TOO_MANY_INTERNAL_STASHES");
#define MAX_RING_SIZE (1024 * 1024)
@@ -203,7 +204,7 @@ odp_stash_t odp_stash_create(const char *name, const odp_stash_param_t *param)
if (ring_size + 1 <= MIN_RING_SIZE)
ring_size = MIN_RING_SIZE;
else
- ring_size = ROUNDUP_POWER2_U32(ring_size + 1);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(ring_size + 1);
memset(shm_name, 0, sizeof(shm_name));
snprintf(shm_name, sizeof(shm_name) - 1, "_stash_%s", name);
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c
index 1c54ab740..3af1ae737 100644
--- a/platform/linux-generic/odp_timer.c
+++ b/platform/linux-generic/odp_timer.c
@@ -37,7 +37,6 @@
#include <odp/api/plat/timer_inline_types.h>
-#include <odp_align_internal.h>
#include <odp_atomic_internal.h>
#include <odp_debug_internal.h>
#include <odp_errno_define.h>
@@ -45,6 +44,7 @@
#include <odp_global_data.h>
#include <odp_init_internal.h>
#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
#include <odp_pool_internal.h>
#include <odp_queue_if.h>
#include <odp_timer_internal.h>
@@ -431,9 +431,9 @@ static odp_timer_pool_t timer_pool_new(const char *name,
}
}
- sz0 = ROUNDUP_CACHE_LINE(sizeof(timer_pool_t));
- sz1 = ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers);
- sz2 = ROUNDUP_CACHE_LINE(sizeof(_odp_timer_t) * param->num_timers);
+ sz0 = _ODP_ROUNDUP_CACHE_LINE(sizeof(timer_pool_t));
+ sz1 = _ODP_ROUNDUP_CACHE_LINE(sizeof(tick_buf_t) * param->num_timers);
+ sz2 = _ODP_ROUNDUP_CACHE_LINE(sizeof(_odp_timer_t) * param->num_timers);
tp_size = sz0 + sz1 + sz2;
shm = odp_shm_reserve(name, tp_size, ODP_CACHE_LINE_SIZE, flags);
diff --git a/platform/linux-generic/odp_timer_wheel.c b/platform/linux-generic/odp_timer_wheel.c
index c50d3a13d..e597d5f70 100644
--- a/platform/linux-generic/odp_timer_wheel.c
+++ b/platform/linux-generic/odp_timer_wheel.c
@@ -632,7 +632,7 @@ static int timer_current_wheel_update(timer_wheels_t *timer_wheels,
slot_idx = wheel_desc->slot_idx;
num_slots = wheel_desc->num_slots;
max_ticks = wheel_desc->max_ticks;
- max_cnt = MIN(elapsed_ticks, UINT32_C(32));
+ max_cnt = _ODP_MIN(elapsed_ticks, UINT32_C(32));
current_wheel = timer_wheels->current_wheel;
ret_code = 0;
rc = -1;
diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c
index f02297ec9..9bea659e9 100644
--- a/platform/linux-generic/odp_traffic_mngr.c
+++ b/platform/linux-generic/odp_traffic_mngr.c
@@ -649,7 +649,7 @@ static void tm_shaper_params_cvt_to(const odp_tm_shaper_params_t *shaper_params,
} else {
max_peak_time_delta = tm_max_time_delta(peak_rate);
peak_burst = (int64_t)shaper_params->peak_burst;
- highest_rate = MAX(commit_rate, peak_rate);
+ highest_rate = _ODP_MAX(commit_rate, peak_rate);
min_time_delta = (uint32_t)((1 << 26) / highest_rate);
}
@@ -836,7 +836,7 @@ static void update_shaper_elapsed_time(tm_system_t *tm_system,
else
commit_inc = time_delta * shaper_params->commit_rate;
- shaper_obj->commit_cnt = (int64_t)MIN(max_commit, commit + commit_inc);
+ shaper_obj->commit_cnt = (int64_t)_ODP_MIN(max_commit, commit + commit_inc);
if (shaper_params->dual_rate) {
peak = shaper_obj->peak_cnt;
@@ -846,7 +846,7 @@ static void update_shaper_elapsed_time(tm_system_t *tm_system,
else
peak_inc = time_delta * shaper_params->peak_rate;
- shaper_obj->peak_cnt = (int64_t)MIN(max_peak, peak + peak_inc);
+ shaper_obj->peak_cnt = (int64_t)_ODP_MIN(max_peak, peak + peak_inc);
}
shaper_obj->last_update_time = tm_system->current_time;
@@ -866,9 +866,8 @@ static uint64_t time_till_not_red(tm_shaper_params_t *shaper_params,
commit_delay = (-shaper_obj->commit_cnt)
/ shaper_params->commit_rate;
- min_time_delay =
- MAX(shaper_obj->shaper_params->min_time_delta, UINT64_C(256));
- commit_delay = MAX(commit_delay, min_time_delay);
+ min_time_delay = _ODP_MAX(shaper_obj->shaper_params->min_time_delta, UINT64_C(256));
+ commit_delay = _ODP_MAX(commit_delay, min_time_delay);
if (!shaper_params->dual_rate)
return commit_delay;
@@ -876,13 +875,13 @@ static uint64_t time_till_not_red(tm_shaper_params_t *shaper_params,
if (shaper_obj->peak_cnt < 0)
peak_delay = (-shaper_obj->peak_cnt) / shaper_params->peak_rate;
- peak_delay = MAX(peak_delay, min_time_delay);
+ peak_delay = _ODP_MAX(peak_delay, min_time_delay);
if (0 < shaper_obj->commit_cnt)
return peak_delay;
else if (0 < shaper_obj->peak_cnt)
return commit_delay;
else
- return MIN(commit_delay, peak_delay);
+ return _ODP_MIN(commit_delay, peak_delay);
}
static int delete_timer(tm_system_t *tm_system ODP_UNUSED,
@@ -1192,8 +1191,8 @@ static int tm_set_finish_time(tm_schedulers_obj_t *schedulers_obj,
frame_weight = ((inverted_weight * frame_len) + (1 << 15)) >> 16;
sched_state = &schedulers_obj->sched_states[new_priority];
- base_virtual_time = MAX(prod_shaper_obj->virtual_finish_time,
- sched_state->base_virtual_time);
+ base_virtual_time = _ODP_MAX(prod_shaper_obj->virtual_finish_time,
+ sched_state->base_virtual_time);
virtual_finish_time = base_virtual_time + frame_weight;
prod_shaper_obj->virtual_finish_time = virtual_finish_time;
@@ -1805,7 +1804,7 @@ static odp_tm_percent_t tm_queue_fullness(tm_wred_params_t *wred_params,
return 0;
fullness = (10000 * current_cnt) / max_cnt;
- return (odp_tm_percent_t)MIN(fullness, UINT64_C(50000));
+ return (odp_tm_percent_t)_ODP_MIN(fullness, UINT64_C(50000));
}
static odp_bool_t tm_local_random_drop(tm_system_t *tm_system,
@@ -2228,6 +2227,7 @@ static void tm_egress_marking(tm_system_t *tm_system, odp_packet_t odp_pkt)
tm_tos_marking_t *ip_marking;
color = odp_packet_color(odp_pkt);
+ ODP_ASSERT(color < ODP_NUM_PACKET_COLORS);
if (odp_packet_has_vlan(odp_pkt)) {
vlan_marking = &tm_system->marking.vlan_marking[color];
@@ -2710,11 +2710,10 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
uint32_t min_weight, max_weight;
uint8_t max_priority;
- num_levels = MAX(MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
+ num_levels = _ODP_MAX(_ODP_MIN(req_ptr->num_levels, ODP_TM_MAX_LEVELS), 1);
memset(cap_ptr, 0, sizeof(odp_tm_capabilities_t));
- max_queues = MIN(req_ptr->max_tm_queues,
- (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
+ max_queues = _ODP_MIN(req_ptr->max_tm_queues, (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
shaper_supported = req_ptr->tm_queue_shaper_needed;
wred_supported = req_ptr->tm_queue_wred_needed;
dual_slope = req_ptr->tm_queue_dual_slope_needed;
@@ -2755,16 +2754,16 @@ static void tm_system_capabilities_set(odp_tm_capabilities_t *cap_ptr,
per_level_cap = &cap_ptr->per_level[level_idx];
per_level_req = &req_ptr->per_level[level_idx];
- max_nodes = MIN(per_level_req->max_num_tm_nodes,
- (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
- max_fanin = MIN(per_level_req->max_fanin_per_node,
- UINT32_C(1024));
- max_priority = MIN(per_level_req->max_priority,
- ODP_TM_MAX_PRIORITIES - 1);
- min_weight = MAX(per_level_req->min_weight,
- ODP_TM_MIN_SCHED_WEIGHT);
- max_weight = MIN(per_level_req->max_weight,
- ODP_TM_MAX_SCHED_WEIGHT);
+ max_nodes = _ODP_MIN(per_level_req->max_num_tm_nodes,
+ (uint32_t)ODP_TM_MAX_NUM_TM_NODES);
+ max_fanin = _ODP_MIN(per_level_req->max_fanin_per_node,
+ UINT32_C(1024));
+ max_priority = _ODP_MIN(per_level_req->max_priority,
+ ODP_TM_MAX_PRIORITIES - 1);
+ min_weight = _ODP_MAX(per_level_req->min_weight,
+ ODP_TM_MIN_SCHED_WEIGHT);
+ max_weight = _ODP_MIN(per_level_req->max_weight,
+ ODP_TM_MAX_SCHED_WEIGHT);
shaper_supported = per_level_req->tm_node_shaper_needed;
wred_supported = per_level_req->tm_node_wred_needed;
dual_slope = per_level_req->tm_node_dual_slope_needed;
diff --git a/platform/linux-generic/pktio/dpdk.c b/platform/linux-generic/pktio/dpdk.c
index 030560b0d..006344b48 100644
--- a/platform/linux-generic/pktio/dpdk.c
+++ b/platform/linux-generic/pktio/dpdk.c
@@ -23,14 +23,15 @@
#include <odp/api/time.h>
#include <odp/api/plat/time_inlines.h>
-#include <odp_align_internal.h>
#include <odp_packet_io_internal.h>
+#include <odp_pool_internal.h>
#include <odp_classification_internal.h>
#include <odp_socket_common.h>
#include <odp_packet_dpdk.h>
#include <odp_debug_internal.h>
#include <odp_libconfig_internal.h>
#include <odp_errno_define.h>
+#include <odp_macros_internal.h>
#include <protocols/eth.h>
#include <protocols/udp.h>
@@ -120,7 +121,7 @@ struct pkt_cache_t {
typedef union ODP_ALIGNED_CACHE {
struct pkt_cache_t s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct pkt_cache_t))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct pkt_cache_t))];
} pkt_cache_t;
/** Packet IO using DPDK interface */
@@ -153,6 +154,15 @@ typedef struct ODP_ALIGNED_CACHE {
ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_dpdk_t),
"PKTIO_PRIVATE_SIZE too small");
+typedef struct {
+ uint32_t dpdk_elt_size;
+ uint8_t pool_in_use;
+ struct rte_mempool *pkt_pool;
+} mem_src_data_t;
+
+ODP_STATIC_ASSERT(_ODP_POOL_MEM_SRC_DATA_SIZE >= sizeof(mem_src_data_t),
+ "_ODP_POOL_MEM_SRC_DATA_SIZE too small");
+
static inline struct rte_mbuf *mbuf_from_pkt_hdr(odp_packet_hdr_t *pkt_hdr)
{
return ((struct rte_mbuf *)pkt_hdr) - 1;
@@ -168,6 +178,11 @@ static inline pkt_dpdk_t *pkt_priv(pktio_entry_t *pktio_entry)
return (pkt_dpdk_t *)(uintptr_t)(pktio_entry->s.pkt_priv);
}
+static inline mem_src_data_t *mem_src_priv(uint8_t *data)
+{
+ return (mem_src_data_t *)data;
+}
+
static int disable_pktio; /** !0 this pktio disabled, 0 enabled */
static int dpdk_pktio_init(void);
@@ -320,13 +335,14 @@ static void pktmbuf_init(struct rte_mempool *mp, void *opaque_arg ODP_UNUSED,
* Create custom DPDK packet pool
*/
static struct rte_mempool *mbuf_pool_create(const char *name,
- pool_t *pool_entry)
+ pool_t *pool_entry,
+ uint32_t dpdk_elt_size)
{
odp_shm_info_t shm_info;
struct rte_mempool *mp = NULL;
struct rte_pktmbuf_pool_private mbp_priv;
struct rte_mempool_objsz sz;
- unsigned int elt_size = pool_entry->dpdk_elt_size;
+ unsigned int elt_size = dpdk_elt_size;
unsigned int num = pool_entry->num, populated = 0;
uint32_t total_size;
uint64_t page_size, offset = 0, remainder = 0;
@@ -423,9 +439,10 @@ static int pool_enqueue(struct rte_mempool *mp,
{
odp_packet_t pkt_tbl[num];
pool_t *pool_entry = (pool_t *)mp->pool_config;
+ mem_src_data_t *mem_src_data = mem_src_priv(pool_entry->mem_src_data);
unsigned i;
- if (odp_unlikely(num == 0 || !pool_entry->pool_in_use))
+ if (odp_unlikely(num == 0 || !mem_src_data->pool_in_use))
return 0;
for (i = 0; i < num; i++) {
@@ -497,67 +514,72 @@ static void pool_free(struct rte_mempool *mp)
}
}
-static void pool_destroy(void *pool)
+static void pool_destroy(uint8_t *data)
{
- struct rte_mempool *mp = (struct rte_mempool *)pool;
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
- if (mp != NULL) {
- pool_t *pool_entry = (pool_t *)mp->pool_config;
-
- pool_entry->pool_in_use = 0;
- rte_mempool_free(mp);
+ if (mem_src_data->pkt_pool != NULL) {
+ mem_src_data->pool_in_use = 0;
+ rte_mempool_free(mem_src_data->pkt_pool);
}
+
+ mem_src_data->pkt_pool = NULL;
}
-int _odp_dpdk_pool_create(pool_t *pool)
+static int pool_create(uint8_t *data, pool_t *pool)
{
struct rte_mempool *pkt_pool;
char pool_name[RTE_MEMPOOL_NAMESIZE];
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
+
+ mem_src_data->pkt_pool = NULL;
if (!_ODP_DPDK_ZERO_COPY)
return 0;
- pool->pool_in_use = 0;
-
+ mem_src_data->pool_in_use = 0;
snprintf(pool_name, sizeof(pool_name),
"dpdk_pktpool_%" PRIu32 "_%" PRIu32 "", odp_global_ro.main_pid,
pool->pool_idx);
- pkt_pool = mbuf_pool_create(pool_name, pool);
+ pkt_pool = mbuf_pool_create(pool_name, pool, mem_src_data->dpdk_elt_size);
if (pkt_pool == NULL) {
ODP_ERR("Creating external DPDK pool failed\n");
return -1;
}
- pool->ext_desc = pkt_pool;
- pool->ext_destroy = pool_destroy;
- pool->pool_in_use = 1;
+ mem_src_data->pkt_pool = pkt_pool;
+ mem_src_data->pool_in_use = 1;
return 0;
}
-uint32_t _odp_dpdk_pool_obj_size(pool_t *pool, uint32_t block_size)
+static void pool_obj_size(uint8_t *data, uint32_t *block_size, uint32_t *block_offset,
+ uint32_t *flags)
{
struct rte_mempool_objsz sz;
+ uint32_t size;
uint32_t total_size;
+ mem_src_data_t *mem_src_data = mem_src_priv(data);
if (!_ODP_DPDK_ZERO_COPY)
- return block_size;
+ return;
if (odp_global_rw->dpdk_initialized == 0) {
if (dpdk_pktio_init()) {
ODP_ERR("Initializing DPDK failed\n");
- return 0;
+ *block_size = 0;
+ return;
}
odp_global_rw->dpdk_initialized = 1;
}
- block_size += sizeof(struct rte_mbuf);
- total_size = rte_mempool_calc_obj_size(block_size, MEMPOOL_FLAGS, &sz);
- pool->dpdk_elt_size = sz.elt_size;
- pool->block_offset = sz.header_size + sizeof(struct rte_mbuf);
-
- return total_size;
+ *flags |= ODP_SHM_HP;
+ size = *block_size + sizeof(struct rte_mbuf);
+ total_size = rte_mempool_calc_obj_size(size, MEMPOOL_FLAGS, &sz);
+ mem_src_data->dpdk_elt_size = sz.elt_size;
+ *block_size = total_size;
+ *block_offset = sz.header_size + sizeof(struct rte_mbuf);
}
static struct rte_mempool_ops odp_pool_ops = {
@@ -1735,7 +1757,9 @@ static int dpdk_open(odp_pktio_t id ODP_UNUSED,
pkt_dpdk->min_rx_burst = 0;
if (_ODP_DPDK_ZERO_COPY) {
- pkt_pool = (struct rte_mempool *)pool_entry->ext_desc;
+ mem_src_data_t *mem_src_data = mem_src_priv(pool_entry->mem_src_data);
+
+ pkt_pool = mem_src_data->pkt_pool;
} else {
snprintf(pool_name, sizeof(pool_name), "pktpool_%s", netdev);
/* Check if the pool exists already */
@@ -2406,27 +2430,27 @@ const pktio_if_ops_t _odp_dpdk_pktio_ops = {
.output_queues_config = dpdk_output_queues_config
};
-#else
-
-#include <stdint.h>
-
-#include <odp/api/hints.h>
-
-#include <odp_packet_dpdk.h>
-#include <odp_pool_internal.h>
-
-/*
- * Dummy functions for pool_create()
- */
-
-uint32_t _odp_dpdk_pool_obj_size(pool_t *pool ODP_UNUSED, uint32_t block_size)
+static odp_bool_t is_mem_src_active(void)
{
- return block_size;
+ return !disable_pktio && _ODP_DPDK_ZERO_COPY;
}
-int _odp_dpdk_pool_create(pool_t *pool ODP_UNUSED)
+static void force_mem_src_disable(void)
{
- return 0;
+ if (_ODP_DPDK_ZERO_COPY)
+ disable_pktio = 1;
}
+const _odp_pool_mem_src_ops_t _odp_pool_dpdk_mem_src_ops = {
+ .name = "dpdk_zc",
+ .is_active = is_mem_src_active,
+ .force_disable = force_mem_src_disable,
+ .adjust_size = pool_obj_size,
+ .bind = pool_create,
+ .unbind = pool_destroy
+};
+
+#else
+/* Avoid warning about empty translation unit */
+typedef int _odp_dummy;
#endif /* _ODP_PKTIO_DPDK */
diff --git a/platform/linux-generic/pktio/io_ops.c b/platform/linux-generic/pktio/io_ops.c
index b5a08b58a..f9ea89f71 100644
--- a/platform/linux-generic/pktio/io_ops.c
+++ b/platform/linux-generic/pktio/io_ops.c
@@ -16,6 +16,9 @@ const pktio_if_ops_t * const _odp_pktio_if_ops[] = {
#ifdef _ODP_PKTIO_DPDK
&_odp_dpdk_pktio_ops,
#endif
+#ifdef _ODP_PKTIO_XDP
+ &_odp_sock_xdp_pktio_ops,
+#endif
#ifdef _ODP_PKTIO_NETMAP
&_odp_netmap_pktio_ops,
#endif
diff --git a/platform/linux-generic/pktio/ipc.c b/platform/linux-generic/pktio/ipc.c
index 81938a983..455243159 100644
--- a/platform/linux-generic/pktio/ipc.c
+++ b/platform/linux-generic/pktio/ipc.c
@@ -5,10 +5,12 @@
* SPDX-License-Identifier: BSD-3-Clause
*/
+#include <odp/api/system_info.h>
+
#include <odp_debug_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_errno_define.h>
-#include <odp/api/system_info.h>
+#include <odp_macros_internal.h>
#include <odp_shm_internal.h>
#include <odp_ring_ptr_internal.h>
#include <odp_global_data.h>
@@ -124,7 +126,7 @@ static ring_ptr_t *_ring_create(const char *name, uint32_t count,
shm_flags |= ODP_SHM_SINGLE_VA;
/* count must be a power of 2 */
- if (!CHECK_IS_POWER2(count)) {
+ if (!_ODP_CHECK_IS_POWER2(count)) {
ODP_ERR("Requested size is invalid, must be a power of 2\n");
_odp_errno = EINVAL;
return NULL;
@@ -234,17 +236,17 @@ static int _ipc_init_master(pktio_entry_t *pktio_entry,
uint32_t ring_size;
uint32_t ring_mask;
- if ((uint64_t)ROUNDUP_POWER2_U32(pool->num + 1) > UINT32_MAX) {
+ if ((uint64_t)_ODP_ROUNDUP_POWER2_U32(pool->num + 1) > UINT32_MAX) {
ODP_ERR("Too large packet pool\n");
return -1;
}
/* Ring must be able to store all packets in the pool */
- ring_size = ROUNDUP_POWER2_U32(pool->num + 1);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(pool->num + 1);
/* Ring size has to larger than burst size */
if (ring_size <= IPC_BURST_SIZE)
- ring_size = ROUNDUP_POWER2_U32(IPC_BURST_SIZE + 1);
+ ring_size = _ODP_ROUNDUP_POWER2_U32(IPC_BURST_SIZE + 1);
ring_mask = ring_size - 1;
pktio_ipc->ring_size = ring_size;
diff --git a/platform/linux-generic/pktio/loop.c b/platform/linux-generic/pktio/loop.c
index 3e21efecd..c702f9ded 100644
--- a/platform/linux-generic/pktio/loop.c
+++ b/platform/linux-generic/pktio/loop.c
@@ -197,7 +197,7 @@ static int loopback_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
* parser in the case of a segmented packet. */
if (odp_unlikely(seg_len < PARSE_BYTES &&
pkt_len > seg_len)) {
- seg_len = MIN(pkt_len, PARSE_BYTES);
+ seg_len = _ODP_MIN(pkt_len, PARSE_BYTES);
odp_packet_copy_to_mem(pkt, 0, seg_len, buf);
pkt_addr = buf;
} else {
@@ -405,20 +405,9 @@ static int loopback_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
}
for (i = 0; i < nb_tx; ++i) {
- odp_ipsec_packet_result_t result;
-
- if (odp_packet_subtype(pkt_tbl[i]) ==
- ODP_EVENT_PACKET_IPSEC &&
- pktio_entry->s.config.outbound_ipsec) {
-
- /* Possibly postprocessing packet */
- odp_ipsec_result(&result, pkt_tbl[i]);
- }
packet_subtype_set(pkt_tbl[i], ODP_EVENT_PACKET_BASIC);
- }
-
- for (i = 0; i < nb_tx; ++i)
loopback_fix_checksums(pkt_tbl[i], pktout_cfg, pktout_capa);
+ }
odp_ticketlock_lock(&pktio_entry->s.txl);
diff --git a/platform/linux-generic/pktio/netmap.c b/platform/linux-generic/pktio/netmap.c
index 94b88e21e..342f38431 100644
--- a/platform/linux-generic/pktio/netmap.c
+++ b/platform/linux-generic/pktio/netmap.c
@@ -24,17 +24,17 @@
#include <odp_socket_common.h>
#include <odp_debug_internal.h>
#include <odp_errno_define.h>
-#include <protocols/eth.h>
+#include <odp_classification_datamodel.h>
+#include <odp_classification_internal.h>
+#include <odp_libconfig_internal.h>
+#include <odp_macros_internal.h>
+#include <protocols/eth.h>
#include <sys/ioctl.h>
#include <sys/syscall.h>
#include <poll.h>
#include <linux/ethtool.h>
#include <linux/sockios.h>
-#include <odp_classification_datamodel.h>
-#include <odp_classification_internal.h>
-#include <odp_libconfig_internal.h>
-
#include <inttypes.h>
/* Disable netmap debug prints */
@@ -73,7 +73,7 @@ struct netmap_ring_t {
typedef union ODP_ALIGNED_CACHE {
struct netmap_ring_t s;
- uint8_t pad[ROUNDUP_CACHE_LINE(sizeof(struct netmap_ring_t))];
+ uint8_t pad[_ODP_ROUNDUP_CACHE_LINE(sizeof(struct netmap_ring_t))];
} netmap_ring_t;
/** Netmap ring slot */
diff --git a/platform/linux-generic/pktio/socket.c b/platform/linux-generic/pktio/socket.c
index 9d1bbe545..0d756c4e1 100644
--- a/platform/linux-generic/pktio/socket.c
+++ b/platform/linux-generic/pktio/socket.c
@@ -289,7 +289,7 @@ static int sock_mmsg_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
/* Make sure there is enough data for the packet
* parser in the case of a segmented packet. */
if (odp_unlikely(seg_len < PARSE_BYTES && pkt_len > seg_len)) {
- seg_len = MIN(pkt_len, PARSE_BYTES);
+ seg_len = _ODP_MIN(pkt_len, PARSE_BYTES);
odp_packet_copy_to_mem(pkt, 0, seg_len, buf);
base = buf;
}
diff --git a/platform/linux-generic/pktio/socket_mmap.c b/platform/linux-generic/pktio/socket_mmap.c
index 7824b0e91..4845b5dab 100644
--- a/platform/linux-generic/pktio/socket_mmap.c
+++ b/platform/linux-generic/pktio/socket_mmap.c
@@ -25,6 +25,7 @@
#include <odp_classification_datamodel.h>
#include <odp_classification_internal.h>
#include <odp_global_data.h>
+#include <odp_macros_internal.h>
#include <protocols/eth.h>
#include <protocols/ip.h>
@@ -433,8 +434,7 @@ static int mmap_setup_ring(pkt_sock_mmap_t *pkt_sock, struct ring *ring,
ring->type = type;
ring->version = TPACKET_V2;
- frame_size = ROUNDUP_POWER2_U32(mtu + TPACKET_HDRLEN
- + TPACKET_ALIGNMENT);
+ frame_size = _ODP_ROUNDUP_POWER2_U32(mtu + TPACKET_HDRLEN + TPACKET_ALIGNMENT);
block_size = BLOCK_SIZE;
if (frame_size > block_size)
block_size = frame_size;
diff --git a/platform/linux-generic/pktio/socket_xdp.c b/platform/linux-generic/pktio/socket_xdp.c
new file mode 100644
index 000000000..e43e4bf89
--- /dev/null
+++ b/platform/linux-generic/pktio/socket_xdp.c
@@ -0,0 +1,688 @@
+/* Copyright (c) 2022, Nokia
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/autoheader_internal.h>
+
+#ifdef _ODP_PKTIO_XDP
+
+#include <odp_posix_extensions.h>
+#include <odp/api/debug.h>
+#include <odp/api/hints.h>
+#include <odp/api/system_info.h>
+#include <odp/api/ticketlock.h>
+
+#include <odp_debug_internal.h>
+#include <odp_macros_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_packet_internal.h>
+#include <odp_parse_internal.h>
+#include <odp_classification_internal.h>
+#include <odp_socket_common.h>
+
+#include <string.h>
+#include <errno.h>
+#include <sys/socket.h>
+#include <unistd.h>
+#include <poll.h>
+
+#include <xdp/xsk.h>
+
+#define NUM_XDP_DESCS 1024U
+#define MIN_FRAME_SIZE 2048U
+#define IF_DELIM " "
+#define Q_DELIM ':'
+
+typedef struct {
+ struct xsk_ring_prod fill_q;
+ struct xsk_ring_cons compl_q;
+ struct xsk_umem *umem;
+ pool_t *pool;
+} xdp_umem_info_t;
+
+typedef struct {
+ struct xsk_ring_cons rx;
+ struct xsk_ring_cons compl_q;
+ struct xsk_ring_prod tx;
+ struct xsk_ring_prod fill_q;
+ xdp_umem_info_t *umem_info;
+ struct xsk_socket *xsk;
+ int pktio_idx;
+ int helper_sock;
+ uint32_t mtu;
+ uint32_t max_mtu;
+} xdp_sock_info_t;
+
+typedef struct {
+ odp_ticketlock_t rx_lock ODP_ALIGNED_CACHE;
+ odp_ticketlock_t tx_lock ODP_ALIGNED_CACHE;
+ xdp_sock_info_t sock_info;
+} pkt_xdp_t;
+
+typedef struct {
+ odp_packet_hdr_t *pkt_hdr;
+ odp_packet_t pkt;
+ uint8_t *data;
+ uint32_t len;
+} pkt_data_t;
+
+ODP_STATIC_ASSERT(PKTIO_PRIVATE_SIZE >= sizeof(pkt_xdp_t),
+ "PKTIO_PRIVATE_SIZE too small");
+
+static odp_bool_t disable_pktio;
+
+static int sock_xdp_init_global(void)
+{
+ if (getenv("ODP_PKTIO_DISABLE_SOCKET_XDP")) {
+ ODP_PRINT("PKTIO: socket xdp skipped,"
+ " enabled export ODP_PKTIO_DISABLE_SOCKET_XDP=1.\n");
+ disable_pktio = true;
+ } else {
+ ODP_PRINT("PKTIO: initialized socket xdp,"
+ " use export ODP_PKTIO_DISABLE_SOCKET_XDP=1 to disable.\n");
+ }
+
+ return 0;
+}
+
+static inline pkt_xdp_t *pkt_priv(pktio_entry_t *pktio_entry)
+{
+ return (pkt_xdp_t *)(uintptr_t)(pktio_entry->s.pkt_priv);
+}
+
+static void fill_socket_config(struct xsk_socket_config *config)
+{
+ config->rx_size = NUM_XDP_DESCS;
+ config->tx_size = NUM_XDP_DESCS;
+ config->libxdp_flags = 0U;
+ config->xdp_flags = 0U;
+ config->bind_flags = XDP_ZEROCOPY; /* TODO: XDP_COPY */
+}
+
+static uint32_t get_bind_queue_index(const char *devname)
+{
+ const char *param = getenv("ODP_PKTIO_XDP_PARAMS");
+ char *tmp_str;
+ char *tmp;
+ char *if_str;
+ int idx = 0;
+
+ if (param == NULL)
+ goto out;
+
+ tmp_str = strdup(param);
+
+ if (tmp_str == NULL)
+ goto out;
+
+ tmp = strtok(tmp_str, IF_DELIM);
+
+ if (tmp == NULL)
+ goto out_str;
+
+ while (tmp) {
+ if_str = strchr(tmp, Q_DELIM);
+
+ if (if_str != NULL && if_str != &tmp[strlen(tmp) - 1U]) {
+ if (strncmp(devname, tmp, (uint64_t)(uintptr_t)(if_str - tmp)) == 0) {
+ idx = _ODP_MAX(atoi(++if_str), 0);
+ break;
+ }
+ }
+
+ tmp = strtok(NULL, IF_DELIM);
+ }
+
+out_str:
+ free(tmp_str);
+
+out:
+ return idx;
+}
+
+static odp_bool_t reserve_fill_queue_elements(xdp_sock_info_t *sock_info, int num)
+{
+ pool_t *pool;
+ odp_packet_t packets[num];
+ int count;
+ struct xsk_ring_prod *fill_q;
+ uint32_t start_idx;
+ int pktio_idx;
+ uint32_t block_size;
+ odp_packet_hdr_t *pkt_hdr;
+
+ pool = sock_info->umem_info->pool;
+ count = odp_packet_alloc_multi(pool->pool_hdl, sock_info->mtu, packets, num);
+
+ if (count <= 0)
+ return false;
+
+ fill_q = &sock_info->fill_q;
+
+ if (xsk_ring_prod__reserve(fill_q, count, &start_idx) == 0U) {
+ odp_packet_free_multi(packets, count);
+ return false;
+ }
+
+ pktio_idx = sock_info->pktio_idx;
+ block_size = pool->block_size;
+
+ for (int i = 0; i < count; ++i) {
+ pkt_hdr = packet_hdr(packets[i]);
+ pkt_hdr->ms_pktio_idx = pktio_idx;
+ *xsk_ring_prod__fill_addr(fill_q, start_idx++) =
+ pkt_hdr->event_hdr.index.event * block_size;
+ }
+
+ xsk_ring_prod__submit(&sock_info->fill_q, count);
+
+ return true;
+}
+
+static int sock_xdp_open(odp_pktio_t pktio, pktio_entry_t *pktio_entry, const char *devname,
+ odp_pool_t pool_hdl)
+{
+ pkt_xdp_t *priv;
+ pool_t *pool;
+ struct xsk_socket_config config;
+ uint32_t bind_q;
+ int ret;
+
+ if (disable_pktio)
+ return -1;
+
+ priv = pkt_priv(pktio_entry);
+ memset(priv, 0, sizeof(pkt_xdp_t));
+ pool = pool_entry_from_hdl(pool_hdl);
+ priv->sock_info.umem_info = (xdp_umem_info_t *)pool->mem_src_data;
+ priv->sock_info.xsk = NULL;
+ /* Mark transitory kernel-owned packets with the pktio index, so that they can be freed on
+ * close. */
+ priv->sock_info.pktio_idx = 1 + odp_pktio_index(pktio);
+ fill_socket_config(&config);
+ bind_q = get_bind_queue_index(devname);
+ /* With xsk_socket__create_shared(), as only one bind queue index can
+ * be passed, NIC in use needs to be configured accordingly to have
+ * only a single combined TX-RX queue, otherwise traffic may not end up
+ * on the socket. For now, always bind to the first queue (overridable
+ * with environment variable). */
+ ret = xsk_socket__create_shared(&priv->sock_info.xsk, devname, bind_q,
+ priv->sock_info.umem_info->umem, &priv->sock_info.rx,
+ &priv->sock_info.tx, &priv->sock_info.fill_q,
+ &priv->sock_info.compl_q, &config);
+
+ if (ret) {
+ ODP_ERR("Error creating xdp socket for bind queue %u: %d\n", bind_q, ret);
+ goto xsk_err;
+ }
+
+ /* Ring setup/clean up routines seem to be asynchronous with some drivers and might not be
+ * ready yet after xsk_socket__create_shared(). */
+ sleep(1U);
+
+ /* Querying with ioctl() via AF_XDP socket doesn't seem to work, so
+ * create a helper socket for this. */
+ priv->sock_info.helper_sock = -1;
+ ret = socket(AF_INET, SOCK_DGRAM, 0);
+
+ if (ret == -1) {
+ ODP_ERR("Error creating helper socket for xdp: %s\n", strerror(errno));
+ goto sock_err;
+ }
+
+ priv->sock_info.helper_sock = ret;
+ priv->sock_info.mtu = _odp_mtu_get_fd(priv->sock_info.helper_sock, devname);
+
+ if (priv->sock_info.mtu == 0U)
+ goto res_err;
+
+ priv->sock_info.max_mtu = pool->seg_len;
+
+ if (!reserve_fill_queue_elements(&priv->sock_info, config.rx_size)) {
+ ODP_ERR("Unable to reserve fill queue descriptors.\n");
+ goto res_err;
+ }
+
+ odp_ticketlock_init(&priv->rx_lock);
+ odp_ticketlock_init(&priv->tx_lock);
+
+ return 0;
+
+res_err:
+ close(priv->sock_info.helper_sock);
+ priv->sock_info.helper_sock = -1;
+
+sock_err:
+ xsk_socket__delete(priv->sock_info.xsk);
+ priv->sock_info.xsk = NULL;
+
+xsk_err:
+ return -1;
+}
+
+static int sock_xdp_close(pktio_entry_t *pktio_entry)
+{
+ pkt_xdp_t *priv = pkt_priv(pktio_entry);
+ pool_t *pool = priv->sock_info.umem_info->pool;
+ odp_packet_hdr_t *pkt_hdr;
+
+ if (priv->sock_info.helper_sock != -1)
+ close(priv->sock_info.helper_sock);
+
+ if (priv->sock_info.xsk != NULL)
+ xsk_socket__delete(priv->sock_info.xsk);
+
+ /* Ring setup/clean up routines seem to be asynchronous with some drivers and might not be
+ * ready yet after xsk_socket__delete(). */
+ sleep(1U);
+
+ /* Free all packets that were in fill or completion queues at the time of closing. */
+ for (uint32_t i = 0U; i < pool->num + pool->skipped_blocks; ++i) {
+ pkt_hdr = packet_hdr(packet_from_event_hdr(event_hdr_from_index(pool, i)));
+
+ if (pkt_hdr->ms_pktio_idx == priv->sock_info.pktio_idx) {
+ pkt_hdr->ms_pktio_idx = 0U;
+ odp_packet_free(packet_handle(pkt_hdr));
+ }
+ }
+
+ return 0;
+}
+
+static inline void extract_data(const struct xdp_desc *rx_desc, uint8_t *pool_base_addr,
+ pkt_data_t *pkt_data)
+{
+ uint64_t frame_off;
+ uint64_t pkt_off;
+
+ /* UMEM "addresses" are offsets from start of a registered UMEM area.
+ * Additionally, the packet data offset (where received packet data
+ * starts within a UMEM frame) is encoded to the UMEM address with
+ * XSK_UNALIGNED_BUF_OFFSET_SHIFT left bitshift when XDP_ZEROCOPY and
+ * XDP_UMEM_UNALIGNED_CHUNK_FLAG are enabled. */
+ frame_off = rx_desc->addr;
+ pkt_off = xsk_umem__add_offset_to_addr(frame_off);
+ frame_off = xsk_umem__extract_addr(frame_off);
+ pkt_data->pkt_hdr = xsk_umem__get_data(pool_base_addr, frame_off);
+ pkt_data->pkt = packet_handle(pkt_data->pkt_hdr);
+ pkt_data->data = xsk_umem__get_data(pool_base_addr, pkt_off);
+ pkt_data->len = rx_desc->len;
+}
+
+static uint32_t process_received(pktio_entry_t *pktio_entry, xdp_sock_info_t *sock_info,
+ uint32_t start_idx, odp_packet_t packets[], int num)
+{
+ pkt_data_t pkt_data;
+ struct xsk_ring_cons *rx = &sock_info->rx;
+ uint8_t *base_addr = sock_info->umem_info->pool->base_addr;
+ const odp_proto_layer_t layer = pktio_entry->s.parse_layer;
+ const odp_proto_chksums_t in_chksums = pktio_entry->s.in_chksums;
+ const odp_pktin_config_opt_t opt = pktio_entry->s.config.pktin;
+ uint64_t l4_part_sum = 0U;
+ odp_pool_t *pool_hdl = &sock_info->umem_info->pool->pool_hdl;
+ odp_pktio_t pktio_hdl = pktio_entry->s.handle;
+ uint32_t num_rx = 0U;
+
+ for (int i = 0; i < num; ++i) {
+ extract_data(xsk_ring_cons__rx_desc(rx, start_idx++), base_addr, &pkt_data);
+ pkt_data.pkt_hdr->ms_pktio_idx = 0U;
+ packet_init(pkt_data.pkt_hdr, pkt_data.len);
+
+ if (layer) {
+ if (_odp_packet_parse_common(&pkt_data.pkt_hdr->p, pkt_data.data,
+ pkt_data.len, pkt_data.len,
+ layer, in_chksums, &l4_part_sum, opt) < 0) {
+ odp_packet_free(pkt_data.pkt);
+ continue;
+ }
+
+ if (pktio_cls_enabled(pktio_entry) &&
+ _odp_cls_classify_packet(pktio_entry, pkt_data.data, pool_hdl,
+ pkt_data.pkt_hdr)) {
+ odp_packet_free(pkt_data.pkt);
+ continue;
+ }
+ }
+
+ pkt_data.pkt_hdr->seg_data = pkt_data.data;
+ pkt_data.pkt_hdr->event_hdr.base_data = pkt_data.data;
+ pkt_data.pkt_hdr->input = pktio_hdl;
+ packets[num_rx++] = pkt_data.pkt;
+ }
+
+ return num_rx;
+}
+
+static int sock_xdp_recv(pktio_entry_t *pktio_entry, int index ODP_UNUSED, odp_packet_t packets[],
+ int num)
+{
+ pkt_xdp_t *priv;
+ struct pollfd fd;
+ uint32_t start_idx = 0U, recvd, procd;
+
+ priv = pkt_priv(pktio_entry);
+ odp_ticketlock_lock(&priv->rx_lock);
+
+ if (odp_unlikely(xsk_ring_prod__needs_wakeup(&priv->sock_info.fill_q))) {
+ fd.fd = xsk_socket__fd(priv->sock_info.xsk);
+ fd.events = POLLIN;
+ (void)poll(&fd, 1U, 0);
+ }
+
+ recvd = xsk_ring_cons__peek(&priv->sock_info.rx, num, &start_idx);
+
+ if (recvd == 0U) {
+ odp_ticketlock_unlock(&priv->rx_lock);
+ return 0;
+ }
+
+ procd = process_received(pktio_entry, &priv->sock_info, start_idx, packets, recvd);
+ xsk_ring_cons__release(&priv->sock_info.rx, recvd);
+ (void)reserve_fill_queue_elements(&priv->sock_info, recvd);
+ odp_ticketlock_unlock(&priv->rx_lock);
+
+ return procd;
+}
+
+static inline void populate_tx_desc(pool_t *pool, odp_packet_hdr_t *pkt_hdr,
+ struct xdp_desc *tx_desc)
+{
+ uint64_t frame_off;
+ uint64_t pkt_off;
+
+ frame_off = pkt_hdr->event_hdr.index.event * pool->block_size;
+ pkt_off = (uint64_t)(uintptr_t)pkt_hdr->event_hdr.base_data
+ - (uint64_t)(uintptr_t)pool->base_addr - frame_off;
+ pkt_off <<= XSK_UNALIGNED_BUF_OFFSET_SHIFT;
+ tx_desc->addr = frame_off | pkt_off;
+ tx_desc->len = pkt_hdr->frame_len;
+}
+
+static void handle_pending_tx(xdp_sock_info_t *sock_info, int num)
+{
+ struct xsk_ring_cons *compl_q;
+ uint32_t sent;
+ uint8_t *base_addr;
+ uint32_t start_idx;
+ uint64_t frame_off;
+ odp_packet_t pkt;
+
+ if (odp_unlikely(xsk_ring_prod__needs_wakeup(&sock_info->tx)))
+ (void)sendto(xsk_socket__fd(sock_info->xsk), NULL, 0U, MSG_DONTWAIT, NULL, 0U);
+
+ compl_q = &sock_info->compl_q;
+ sent = xsk_ring_cons__peek(compl_q, num, &start_idx);
+ base_addr = sock_info->umem_info->pool->base_addr;
+
+ odp_packet_t packets[sent];
+
+ if (sent) {
+ for (uint32_t i = 0U; i < sent; ++i) {
+ frame_off = *xsk_ring_cons__comp_addr(compl_q, start_idx++);
+ frame_off = xsk_umem__extract_addr(frame_off);
+ pkt = xsk_umem__get_data(base_addr, frame_off);
+ packets[i] = pkt;
+ packet_hdr(packets[i])->ms_pktio_idx = 0U;
+ }
+
+ odp_packet_free_multi(packets, sent);
+ xsk_ring_cons__release(compl_q, sent);
+ }
+}
+
+static int sock_xdp_send(pktio_entry_t *pktio_entry, int index ODP_UNUSED,
+ const odp_packet_t packets[], int num)
+{
+ pkt_xdp_t *priv;
+ xdp_sock_info_t *sock_info;
+ pool_t *pool;
+ odp_pool_t pool_hdl;
+ int pktio_idx, i;
+ struct xsk_ring_prod *tx;
+ odp_packet_t pkt;
+ odp_packet_hdr_t *pkt_hdr;
+ uint32_t start_idx;
+
+ if (odp_unlikely(num == 0))
+ return 0;
+
+ priv = pkt_priv(pktio_entry);
+ odp_ticketlock_lock(&priv->tx_lock);
+ sock_info = &priv->sock_info;
+ pool = sock_info->umem_info->pool;
+ pool_hdl = pool->pool_hdl;
+ pktio_idx = sock_info->pktio_idx;
+ tx = &sock_info->tx;
+
+ for (i = 0; i < num; ++i) {
+ pkt = ODP_PACKET_INVALID;
+
+ if (odp_unlikely(odp_packet_num_segs(packets[i])) > 1) {
+ /* TODO: handle segmented packets */
+ ODP_ERR("Only single-segment packets supported\n");
+ break;
+ }
+
+ pkt_hdr = packet_hdr(packets[i]);
+
+ if (pkt_hdr->event_hdr.pool_ptr != pool) {
+ pkt = odp_packet_copy(packets[i], pool_hdl);
+
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID))
+ break;
+
+ pkt_hdr = packet_hdr(pkt);
+ }
+
+ if (xsk_ring_prod__reserve(tx, 1U, &start_idx) == 0U) {
+ handle_pending_tx(sock_info, NUM_XDP_DESCS);
+
+ if (xsk_ring_prod__reserve(tx, 1U, &start_idx) == 0U) {
+ if (pkt != ODP_PACKET_INVALID)
+ odp_packet_free(pkt);
+
+ break;
+ }
+ }
+
+ if (pkt != ODP_PACKET_INVALID)
+ odp_packet_free(packets[i]);
+
+ pkt_hdr->ms_pktio_idx = pktio_idx;
+ populate_tx_desc(pool, pkt_hdr, xsk_ring_prod__tx_desc(tx, start_idx));
+ }
+
+ xsk_ring_prod__submit(tx, i);
+ handle_pending_tx(sock_info, NUM_XDP_DESCS);
+ odp_ticketlock_unlock(&priv->tx_lock);
+
+ return i;
+}
+
+static uint32_t sock_xdp_mtu_get(pktio_entry_t *pktio_entry)
+{
+ return pkt_priv(pktio_entry)->sock_info.mtu;
+}
+
+static int sock_xdp_mtu_set(pktio_entry_t *pktio_entry, uint32_t maxlen_input,
+ uint32_t maxlen_output ODP_UNUSED)
+{
+ pkt_xdp_t *priv = pkt_priv(pktio_entry);
+ int ret;
+
+ ret = _odp_mtu_set_fd(priv->sock_info.helper_sock, pktio_entry->s.name, maxlen_input);
+ if (ret)
+ return ret;
+
+ priv->sock_info.mtu = maxlen_input;
+
+ return 0;
+}
+
+static int sock_xdp_promisc_mode_set(pktio_entry_t *pktio_entry, int enable)
+{
+ return _odp_promisc_mode_set_fd(pkt_priv(pktio_entry)->sock_info.helper_sock,
+ pktio_entry->s.name, enable);
+}
+
+static int sock_xdp_promisc_mode_get(pktio_entry_t *pktio_entry)
+{
+ return _odp_promisc_mode_get_fd(pkt_priv(pktio_entry)->sock_info.helper_sock,
+ pktio_entry->s.name);
+}
+
+static int sock_xdp_mac_addr_get(pktio_entry_t *pktio_entry ODP_UNUSED, void *mac_addr)
+{
+ return _odp_mac_addr_get_fd(pkt_priv(pktio_entry)->sock_info.helper_sock,
+ pktio_entry->s.name, mac_addr) ? -1 : ETH_ALEN;
+}
+
+static int sock_xdp_link_status(pktio_entry_t *pktio_entry)
+{
+ return _odp_link_status_fd(pkt_priv(pktio_entry)->sock_info.helper_sock,
+ pktio_entry->s.name);
+}
+
+static int sock_xdp_link_info(pktio_entry_t *pktio_entry, odp_pktio_link_info_t *info)
+{
+ return _odp_link_info_fd(pkt_priv(pktio_entry)->sock_info.helper_sock,
+ pktio_entry->s.name, info);
+}
+
+static int sock_xdp_capability(pktio_entry_t *pktio_entry, odp_pktio_capability_t *capa)
+{
+ pkt_xdp_t *priv = pkt_priv(pktio_entry);
+
+ memset(capa, 0, sizeof(odp_pktio_capability_t));
+ capa->max_input_queues = 1U;
+ capa->max_output_queues = 1U;
+ capa->set_op.op.promisc_mode = 1U;
+ capa->set_op.op.maxlen = 1U;
+
+ capa->maxlen.equal = true;
+ capa->maxlen.min_input = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_input = priv->sock_info.max_mtu;
+ capa->maxlen.min_output = _ODP_SOCKET_MTU_MIN;
+ capa->maxlen.max_output = priv->sock_info.max_mtu;
+
+ capa->config.parser.layer = ODP_PROTO_LAYER_ALL;
+
+ capa->stats.pktio.all_counters = 0U;
+ capa->stats.pktin_queue.all_counters = 0U;
+ capa->stats.pktout_queue.all_counters = 0U;
+
+ return 0;
+}
+
+const pktio_if_ops_t _odp_sock_xdp_pktio_ops = {
+ /* TODO: at least stats */
+ .name = "socket_xdp",
+ .print = NULL,
+ .init_global = sock_xdp_init_global,
+ .init_local = NULL,
+ .term = NULL,
+ .open = sock_xdp_open,
+ .close = sock_xdp_close,
+ .start = NULL,
+ .stop = NULL,
+ .stats = NULL,
+ .stats_reset = NULL,
+ .pktin_queue_stats = NULL,
+ .pktout_queue_stats = NULL,
+ .extra_stat_info = NULL,
+ .extra_stats = NULL,
+ .extra_stat_counter = NULL,
+ .pktio_ts_res = NULL,
+ .pktio_ts_from_ns = NULL,
+ .pktio_time = NULL,
+ .recv = sock_xdp_recv,
+ .recv_tmo = NULL,
+ .recv_mq_tmo = NULL,
+ .fd_set = NULL,
+ .send = sock_xdp_send,
+ .maxlen_get = sock_xdp_mtu_get,
+ .maxlen_set = sock_xdp_mtu_set,
+ .promisc_mode_set = sock_xdp_promisc_mode_set,
+ .promisc_mode_get = sock_xdp_promisc_mode_get,
+ .mac_get = sock_xdp_mac_addr_get,
+ .mac_set = NULL,
+ .link_status = sock_xdp_link_status,
+ .link_info = sock_xdp_link_info,
+ .capability = sock_xdp_capability,
+ .config = NULL,
+ .input_queues_config = NULL,
+ .output_queues_config = NULL
+};
+
+static odp_bool_t sock_xdp_is_mem_src_active(void)
+{
+ return !disable_pktio;
+}
+
+static void sock_xdp_force_mem_src_disable(void)
+{
+ disable_pktio = true;
+}
+
+static void sock_xdp_adjust_block_size(uint8_t *data ODP_UNUSED, uint32_t *block_size,
+ uint32_t *block_offset ODP_UNUSED, uint32_t *flags)
+{
+ const uint32_t size = *block_size + XDP_PACKET_HEADROOM;
+ const uint64_t ps = odp_sys_page_size();
+ /* AF_XDP requires frames to be between 2kB and page size, so with
+ * XDP_ZEROCOPY, if block size is less than 2kB, adjust it to 2kB, if
+ * it is larger than page size, make pool creation fail. */
+ if (disable_pktio)
+ return;
+
+ if (size > ps) {
+ ODP_ERR("Adjusted pool block size larger than page size: %u > %" PRIu64 "\n",
+ size, ps);
+ *block_size = 0U;
+ }
+
+ *flags |= ODP_SHM_HP;
+ *block_size = _ODP_MAX(size, MIN_FRAME_SIZE);
+}
+
+static int sock_xdp_umem_create(uint8_t *data, pool_t *pool)
+{
+ struct xsk_umem_config cfg;
+ xdp_umem_info_t *umem_info = (xdp_umem_info_t *)data;
+
+ umem_info->pool = pool;
+ /* Fill queue size is recommended to be >= HW RX ring size + AF_XDP RX
+ * ring size, so use size twice the size of AF_XDP RX ring. */
+ cfg.fill_size = NUM_XDP_DESCS * 2U; /* TODO: num descs vs pool size */
+ cfg.comp_size = NUM_XDP_DESCS;
+ cfg.frame_size = pool->block_size;
+ cfg.frame_headroom = sizeof(odp_packet_hdr_t) + pool->headroom;
+ cfg.flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG;
+
+ return xsk_umem__create(&umem_info->umem, pool->base_addr, pool->shm_size,
+ &umem_info->fill_q, &umem_info->compl_q, &cfg);
+}
+
+static void sock_xdp_umem_delete(uint8_t *data)
+{
+ xdp_umem_info_t *umem_info = (xdp_umem_info_t *)data;
+
+ while (xsk_umem__delete(umem_info->umem) == -EBUSY)
+ continue;
+}
+
+const _odp_pool_mem_src_ops_t _odp_pool_sock_xdp_mem_src_ops = {
+ .name = "xdp_zc",
+ .is_active = sock_xdp_is_mem_src_active,
+ .force_disable = sock_xdp_force_mem_src_disable,
+ .adjust_size = sock_xdp_adjust_block_size,
+ .bind = sock_xdp_umem_create,
+ .unbind = sock_xdp_umem_delete
+};
+
+#else
+/* Avoid warning about empty translation unit */
+typedef int _odp_dummy;
+#endif
diff --git a/test/performance/odp_l2fwd.c b/test/performance/odp_l2fwd.c
index b9e3106da..1833dcf78 100644
--- a/test/performance/odp_l2fwd.c
+++ b/test/performance/odp_l2fwd.c
@@ -36,6 +36,9 @@
/* Maximum number of pktio queues per interface */
#define MAX_QUEUES 32
+/* Maximum number of schedule groups */
+#define MAX_GROUPS 32
+
/* Maximum number of pktio interfaces */
#define MAX_PKTIOS 8
@@ -48,6 +51,9 @@
/* Default vector timeout */
#define DEFAULT_VEC_TMO ODP_TIME_MSEC_IN_NS
+/* Maximum thread info string length */
+#define EXTRA_STR_LEN 32
+
/* Packet input mode */
typedef enum pktin_mode_t {
DIRECT_RECV,
@@ -97,6 +103,7 @@ typedef struct {
int chksum; /* Checksum offload */
int sched_mode; /* Scheduler mode */
int num_groups; /* Number of scheduling groups */
+ int group_mode; /* How threads join groups */
int burst_rx; /* Receive burst size */
int pool_per_if; /* Create pool per interface */
uint32_t num_pkt; /* Number of packets per pool */
@@ -110,6 +117,9 @@ typedef struct {
int promisc_mode; /* Promiscuous mode enabled */
int flow_aware; /* Flow aware scheduling enabled */
int mtu; /* Interface MTU */
+ int num_prio;
+ odp_schedule_prio_t prio[MAX_PKTIOS]; /* Priority of input queues of an interface */
+
} appl_args_t;
/* Statistics */
@@ -144,11 +154,12 @@ typedef struct thread_args_t {
} pktio[MAX_PKTIOS];
/* Groups to join */
- odp_schedule_group_t group[MAX_PKTIOS];
+ odp_schedule_group_t group[MAX_GROUPS];
int thr_idx;
int num_pktio;
- int num_groups;
+ int num_grp_join;
+
} thread_args_t;
/*
@@ -192,6 +203,13 @@ typedef struct {
/* Break workers loop if set to 1 */
odp_atomic_u32_t exit_threads;
+ uint32_t pkt_len;
+ uint32_t num_pkt;
+ uint32_t seg_len;
+ uint32_t vector_num;
+ uint32_t vector_max_size;
+ char cpumaskstr[ODP_CPUMASK_STR_SIZE];
+
} args_t;
/* Global pointer to args */
@@ -399,6 +417,7 @@ static int run_worker_sched_mode_vector(void *arg)
int i;
int pktio, num_pktio;
uint16_t max_burst;
+ odp_thrmask_t mask;
odp_pktout_queue_t pktout[MAX_PKTIOS];
odp_queue_t tx_queue[MAX_PKTIOS];
thread_args_t *thr_args = arg;
@@ -409,19 +428,14 @@ static int run_worker_sched_mode_vector(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups > 0) {
- odp_thrmask_t mask;
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
- odp_thrmask_zero(&mask);
- odp_thrmask_set(&mask, thr);
-
- /* Join non-default groups */
- for (i = 0; i < thr_args->num_groups; i++) {
- if (odp_schedule_group_join(thr_args->group[i],
- &mask)) {
- ODPH_ERR("Join failed\n");
- return -1;
- }
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
}
}
@@ -548,8 +562,10 @@ static int run_worker_sched_mode(void *arg)
int i;
int pktio, num_pktio;
uint16_t max_burst;
+ odp_thrmask_t mask;
odp_pktout_queue_t pktout[MAX_PKTIOS];
odp_queue_t tx_queue[MAX_PKTIOS];
+ char extra_str[EXTRA_STR_LEN];
thread_args_t *thr_args = arg;
stats_t *stats = &thr_args->stats;
int use_event_queue = gbl_args->appl.out_mode;
@@ -558,22 +574,31 @@ static int run_worker_sched_mode(void *arg)
thr = odp_thread_id();
max_burst = gbl_args->appl.burst_rx;
- if (gbl_args->appl.num_groups > 0) {
- odp_thrmask_t mask;
+ memset(extra_str, 0, EXTRA_STR_LEN);
+ odp_thrmask_zero(&mask);
+ odp_thrmask_set(&mask, thr);
+
+ /* Join non-default groups */
+ for (i = 0; i < thr_args->num_grp_join; i++) {
+ if (odp_schedule_group_join(thr_args->group[i], &mask)) {
+ ODPH_ERR("Join failed: %i\n", i);
+ return -1;
+ }
- odp_thrmask_zero(&mask);
- odp_thrmask_set(&mask, thr);
+ if (gbl_args->appl.verbose) {
+ uint64_t tmp = (uint64_t)(uintptr_t)thr_args->group[i];
- /* Join non-default groups */
- for (i = 0; i < thr_args->num_groups; i++) {
- if (odp_schedule_group_join(thr_args->group[i],
- &mask)) {
- ODPH_ERR("Join failed\n");
- return -1;
- }
+ printf("[%02i] Joined group 0x%" PRIx64 "\n", thr, tmp);
}
}
+ if (thr_args->num_grp_join)
+ snprintf(extra_str, EXTRA_STR_LEN, ", joined %i groups", thr_args->num_grp_join);
+ else if (gbl_args->appl.num_groups == 0)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_ALL");
+ else if (gbl_args->appl.num_groups)
+ snprintf(extra_str, EXTRA_STR_LEN, ", GROUP_WORKER");
+
num_pktio = thr_args->num_pktio;
if (num_pktio > MAX_PKTIOS) {
@@ -586,10 +611,10 @@ static int run_worker_sched_mode(void *arg)
pktout[pktio] = thr_args->pktio[pktio].pktout;
}
- printf("[%02i] PKTIN_SCHED_%s, %s\n", thr,
+ printf("[%02i] PKTIN_SCHED_%s, %s%s\n", thr,
(in_mode == SCHED_PARALLEL) ? "PARALLEL" :
((in_mode == SCHED_ATOMIC) ? "ATOMIC" : "ORDERED"),
- (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT");
+ (use_event_queue) ? "PKTOUT_QUEUE" : "PKTOUT_DIRECT", extra_str);
odp_barrier_wait(&gbl_args->init_barrier);
@@ -851,7 +876,7 @@ static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_poo
pktio_capa.vector.max_size : pktio_capa.vector.min_size;
printf("\nWarning: Modified vector size to %u\n\n", vec_size);
} else {
- ODPH_ERR("Error: Invalid pktio vector size %u, valid range [%u, %u]\n",
+ ODPH_ERR("Invalid pktio vector size %u, valid range [%u, %u]\n",
vec_size, pktio_capa.vector.min_size, pktio_capa.vector.max_size);
return -1;
}
@@ -870,7 +895,7 @@ static int set_pktin_vector_params(odp_pktin_queue_param_t *pktin_param, odp_poo
pktio_capa.vector.max_tmo_ns : pktio_capa.vector.min_tmo_ns;
printf("\nWarning: Modified vector timeout to %" PRIu64 "\n\n", vec_tmo_ns);
} else {
- ODPH_ERR("Error: Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
+ ODPH_ERR("Invalid vector timeout %" PRIu64 ", valid range [%" PRIu64
", %" PRIu64 "]\n", vec_tmo_ns,
pktio_capa.vector.min_tmo_ns, pktio_capa.vector.max_tmo_ns);
return -1;
@@ -917,12 +942,12 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
pktio = odp_pktio_open(dev, pool, &pktio_param);
if (pktio == ODP_PKTIO_INVALID) {
- ODPH_ERR("Error: failed to open %s\n", dev);
+ ODPH_ERR("Pktio open failed: %s\n", dev);
return -1;
}
if (odp_pktio_info(pktio, &info)) {
- ODPH_ERR("Error: pktio info failed %s\n", dev);
+ ODPH_ERR("Pktio info failed: %s\n", dev);
return -1;
}
@@ -933,7 +958,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
odp_pktio_print(pktio);
if (odp_pktio_capability(pktio, &pktio_capa)) {
- ODPH_ERR("Error: pktio capability query failed %s\n", dev);
+ ODPH_ERR("Pktio capability query failed: %s\n", dev);
return -1;
}
@@ -957,14 +982,13 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (gbl_args->appl.promisc_mode) {
if (!pktio_capa.set_op.op.promisc_mode) {
- ODPH_ERR("Error: promisc mode set not supported %s\n",
- dev);
+ ODPH_ERR("Promisc mode set not supported: %s\n", dev);
return -1;
}
/* Enable promisc mode */
if (odp_pktio_promisc_mode_set(pktio, true)) {
- ODPH_ERR("Error: promisc mode enable failed %s\n", dev);
+ ODPH_ERR("Promisc mode enable failed: %s\n", dev);
return -1;
}
}
@@ -974,14 +998,14 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
uint32_t maxlen_output = pktio_capa.maxlen.max_output ? gbl_args->appl.mtu : 0;
if (!pktio_capa.set_op.op.maxlen) {
- ODPH_ERR("Error: modifying interface MTU not supported %s\n", dev);
+ ODPH_ERR("Modifying interface MTU not supported: %s\n", dev);
return -1;
}
if (maxlen_input &&
(maxlen_input < pktio_capa.maxlen.min_input ||
maxlen_input > pktio_capa.maxlen.max_input)) {
- ODPH_ERR("Error: unsupported MTU value %" PRIu32 " for %s "
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_input, dev,
pktio_capa.maxlen.min_input, pktio_capa.maxlen.max_input);
return -1;
@@ -989,14 +1013,14 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (maxlen_output &&
(maxlen_output < pktio_capa.maxlen.min_output ||
maxlen_output > pktio_capa.maxlen.max_output)) {
- ODPH_ERR("Error: unsupported MTU value %" PRIu32 " for %s "
+ ODPH_ERR("Unsupported MTU value %" PRIu32 " for %s "
"(min %" PRIu32 ", max %" PRIu32 ")\n", maxlen_output, dev,
pktio_capa.maxlen.min_output, pktio_capa.maxlen.max_output);
return -1;
}
if (odp_pktio_maxlen_set(pktio, maxlen_input, maxlen_output)) {
- ODPH_ERR("Error: setting MTU failed %s\n", dev);
+ ODPH_ERR("Setting MTU failed: %s\n", dev);
return -1;
}
}
@@ -1010,6 +1034,15 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
mode_tx = ODP_PKTIO_OP_MT_UNSAFE;
if (gbl_args->appl.sched_mode) {
+ odp_schedule_prio_t prio;
+
+ if (gbl_args->appl.num_prio) {
+ prio = gbl_args->appl.prio[idx];
+ } else {
+ prio = odp_schedule_default_prio();
+ gbl_args->appl.prio[idx] = prio;
+ }
+
if (gbl_args->appl.in_mode == SCHED_ATOMIC)
sync_mode = ODP_SCHED_SYNC_ATOMIC;
else if (gbl_args->appl.in_mode == SCHED_ORDERED)
@@ -1017,7 +1050,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
else
sync_mode = ODP_SCHED_SYNC_PARALLEL;
- pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
+ pktin_param.queue_param.sched.prio = prio;
pktin_param.queue_param.sched.sync = sync_mode;
pktin_param.queue_param.sched.group = group;
}
@@ -1046,7 +1079,7 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
if (gbl_args->appl.vector_mode) {
if (!pktio_capa.vector.supported) {
- ODPH_ERR("Error: packet vector input not supported %s\n", dev);
+ ODPH_ERR("Packet vector input not supported: %s\n", dev);
return -1;
}
if (set_pktin_vector_params(&pktin_param, vec_pool, pktio_capa))
@@ -1054,43 +1087,35 @@ static int create_pktio(const char *dev, int idx, int num_rx, int num_tx, odp_po
}
if (odp_pktin_queue_config(pktio, &pktin_param)) {
- ODPH_ERR("Error: input queue config failed %s\n", dev);
+ ODPH_ERR("Input queue config failed: %s\n", dev);
return -1;
}
if (odp_pktout_queue_config(pktio, &pktout_param)) {
- ODPH_ERR("Error: output queue config failed %s\n", dev);
+ ODPH_ERR("Output queue config failed: %s\n", dev);
return -1;
}
if (gbl_args->appl.in_mode == DIRECT_RECV) {
- if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin,
- num_rx) != num_rx) {
- ODPH_ERR("Error: pktin queue query failed %s\n", dev);
+ if (odp_pktin_queue(pktio, gbl_args->pktios[idx].pktin, num_rx) != num_rx) {
+ ODPH_ERR("Pktin queue query failed: %s\n", dev);
return -1;
}
} else {
- if (odp_pktin_event_queue(pktio,
- gbl_args->pktios[idx].rx_q,
- num_rx) != num_rx) {
- ODPH_ERR("Error: pktin event queue query failed %s\n",
- dev);
+ if (odp_pktin_event_queue(pktio, gbl_args->pktios[idx].rx_q, num_rx) != num_rx) {
+ ODPH_ERR("Pktin event queue query failed: %s\n", dev);
return -1;
}
}
if (gbl_args->appl.out_mode == PKTOUT_DIRECT) {
- if (odp_pktout_queue(pktio,
- gbl_args->pktios[idx].pktout,
- num_tx) != num_tx) {
- ODPH_ERR("Error: pktout queue query failed %s\n", dev);
+ if (odp_pktout_queue(pktio, gbl_args->pktios[idx].pktout, num_tx) != num_tx) {
+ ODPH_ERR("Pktout queue query failed: %s\n", dev);
return -1;
}
} else {
- if (odp_pktout_event_queue(pktio,
- gbl_args->pktios[idx].tx_q,
- num_tx) != num_tx) {
- ODPH_ERR("Error: event queue query failed %s\n", dev);
+ if (odp_pktout_event_queue(pktio, gbl_args->pktios[idx].tx_q, num_tx) != num_tx) {
+ ODPH_ERR("Event queue query failed: %s\n", dev);
return -1;
}
}
@@ -1435,11 +1460,25 @@ static void usage(char *progname)
" -e, --error_check <arg> 0: Don't check packet errors (default)\n"
" 1: Check packet errors\n"
" -k, --chksum <arg> 0: Don't use checksum offload (default)\n"
- " 1: Use checksum offload\n"
- " -g, --groups <num> Number of groups to use: 0 ... num\n"
- " -1: SCHED_GROUP_WORKER\n"
- " 0: SCHED_GROUP_ALL (default)\n"
- " num: must not exceed number of interfaces or workers\n"
+ " 1: Use checksum offload\n",
+ NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS);
+
+ printf(" -g, --groups <num> Number of new groups to create (1 ... num). Interfaces\n"
+ " are placed into the groups in round robin.\n"
+ " 0: Use SCHED_GROUP_ALL (default)\n"
+ " -1: Use SCHED_GROUP_WORKER\n"
+ " -G, --group_mode <arg> Select how threads join new groups (when -g > 0)\n"
+ " 0: All threads join all created groups (default)\n"
+ " 1: All threads join first N created groups.\n"
+ " N is number of interfaces (== active groups).\n"
+ " 2: Each thread joins a part of the first N groups\n"
+ " (in round robin).\n"
+ " -I, --prio <prio list> Schedule priority of packet input queues.\n"
+ " Comma separated list of priorities (no spaces). A value\n"
+ " per interface. All queues of an interface have the same\n"
+ " priority. Values must be between odp_schedule_min_prio\n"
+ " and odp_schedule_max_prio. odp_schedule_default_prio is\n"
+ " used by default.\n"
" -b, --burst_rx <num> 0: Use max burst size (default)\n"
" num: Max number of packets per receive call\n"
" -p, --packet_copy 0: Don't copy packet (default)\n"
@@ -1464,9 +1503,7 @@ static void usage(char *progname)
" -f, --flow_aware Enable flow aware scheduling.\n"
" -v, --verbose Verbose output.\n"
" -h, --help Display help and exit.\n\n"
- "\n", NO_PATH(progname), NO_PATH(progname), MAX_PKTIOS, DEFAULT_VEC_SIZE,
- DEFAULT_VEC_TMO, POOL_PKT_LEN
- );
+ "\n", DEFAULT_VEC_SIZE, DEFAULT_VEC_TMO, POOL_PKT_LEN);
}
/*
@@ -1481,8 +1518,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
int opt;
int long_index;
char *token;
- char *addr_str;
- size_t len;
+ char *tmp_str;
+ size_t str_len, len;
int i;
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
@@ -1497,6 +1534,8 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{"error_check", required_argument, NULL, 'e'},
{"chksum", required_argument, NULL, 'k'},
{"groups", required_argument, NULL, 'g'},
+ {"group_mode", required_argument, NULL, 'G'},
+ {"prio", required_argument, NULL, 'I'},
{"burst_rx", required_argument, NULL, 'b'},
{"packet_copy", required_argument, NULL, 'p'},
{"pool_per_if", required_argument, NULL, 'y'},
@@ -1515,7 +1554,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:b:p:y:n:l:L:w:x:z:M:uPfvh";
+ static const char *shortopts = "+c:t:a:i:m:o:r:d:s:e:k:g:G:I:b:p:y:n:l:L:w:x:z:M:uPfvh";
appl_args->time = 0; /* loop forever if time to run is 0 */
appl_args->accuracy = 1; /* get and print pps stats second */
@@ -1523,6 +1562,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->dst_change = 1; /* change eth dst address by default */
appl_args->src_change = 1; /* change eth src address by default */
appl_args->num_groups = 0; /* use default group */
+ appl_args->group_mode = 0;
appl_args->error_check = 0; /* don't check packet errors by default */
appl_args->packet_copy = 0;
appl_args->burst_rx = 0;
@@ -1539,6 +1579,7 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->vec_size = 0;
appl_args->vec_tmo_ns = 0;
appl_args->flow_aware = 0;
+ appl_args->num_prio = 0;
while (1) {
opt = getopt_long(argc, argv, shortopts, longopts, &long_index);
@@ -1556,60 +1597,58 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'a':
appl_args->accuracy = atoi(optarg);
break;
- /* parse packet-io interface names */
case 'r':
len = strlen(optarg);
if (len == 0) {
- usage(argv[0]);
+ ODPH_ERR("Bad dest address string\n");
exit(EXIT_FAILURE);
}
- len += 1; /* add room for '\0' */
- addr_str = malloc(len);
- if (addr_str == NULL) {
- usage(argv[0]);
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Dest address malloc() failed\n");
exit(EXIT_FAILURE);
}
/* store the mac addresses names */
- strcpy(addr_str, optarg);
- for (token = strtok(addr_str, ","), i = 0;
+ memcpy(tmp_str, optarg, str_len);
+ for (token = strtok(tmp_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
if (i >= MAX_PKTIOS) {
- printf("too many MAC addresses\n");
- usage(argv[0]);
+ ODPH_ERR("Too many MAC addresses\n");
exit(EXIT_FAILURE);
}
- if (odph_eth_addr_parse(&appl_args->addrs[i],
- token) != 0) {
- printf("invalid MAC address\n");
- usage(argv[0]);
+ if (odph_eth_addr_parse(&appl_args->addrs[i], token) != 0) {
+ ODPH_ERR("Invalid MAC address\n");
exit(EXIT_FAILURE);
}
}
appl_args->addr_count = i;
if (appl_args->addr_count < 1) {
- usage(argv[0]);
+ ODPH_ERR("Bad dest address count\n");
exit(EXIT_FAILURE);
}
- free(addr_str);
+ free(tmp_str);
break;
case 'i':
len = strlen(optarg);
if (len == 0) {
- usage(argv[0]);
+ ODPH_ERR("Bad pktio interface string\n");
exit(EXIT_FAILURE);
}
- len += 1; /* add room for '\0' */
- appl_args->if_str = malloc(len);
+ str_len = len + 1;
+
+ appl_args->if_str = malloc(str_len);
if (appl_args->if_str == NULL) {
- usage(argv[0]);
+ ODPH_ERR("Pktio interface malloc() failed\n");
exit(EXIT_FAILURE);
}
/* count the number of tokens separated by ',' */
- strcpy(appl_args->if_str, optarg);
+ memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL;
token = strtok(NULL, ","), i++)
@@ -1617,18 +1656,16 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
appl_args->if_count = i;
- if (appl_args->if_count < 1 ||
- appl_args->if_count > MAX_PKTIOS) {
- usage(argv[0]);
+ if (appl_args->if_count < 1 || appl_args->if_count > MAX_PKTIOS) {
+ ODPH_ERR("Bad pktio interface count: %i\n", appl_args->if_count);
exit(EXIT_FAILURE);
}
/* allocate storage for the if names */
- appl_args->if_names =
- calloc(appl_args->if_count, sizeof(char *));
+ appl_args->if_names = calloc(appl_args->if_count, sizeof(char *));
/* store the if names (reset names string) */
- strcpy(appl_args->if_str, optarg);
+ memcpy(appl_args->if_str, optarg, str_len);
for (token = strtok(appl_args->if_str, ","), i = 0;
token != NULL; token = strtok(NULL, ","), i++) {
appl_args->if_names[i] = token;
@@ -1667,6 +1704,44 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
case 'g':
appl_args->num_groups = atoi(optarg);
break;
+ case 'G':
+ appl_args->group_mode = atoi(optarg);
+ break;
+ case 'I':
+ len = strlen(optarg);
+ if (len == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ str_len = len + 1;
+
+ tmp_str = malloc(str_len);
+ if (tmp_str == NULL) {
+ ODPH_ERR("Priority list malloc() failed\n");
+ exit(EXIT_FAILURE);
+ }
+
+ memcpy(tmp_str, optarg, str_len);
+ token = strtok(tmp_str, ",");
+
+ for (i = 0; token != NULL; token = strtok(NULL, ","), i++) {
+ if (i >= MAX_PKTIOS) {
+ ODPH_ERR("Too many priorities\n");
+ exit(EXIT_FAILURE);
+ }
+
+ appl_args->prio[i] = atoi(token);
+ appl_args->num_prio++;
+ }
+
+ if (appl_args->num_prio == 0) {
+ ODPH_ERR("Bad priority list\n");
+ exit(EXIT_FAILURE);
+ }
+
+ free(tmp_str);
+ break;
case 'b':
appl_args->burst_rx = atoi(optarg);
break;
@@ -1719,20 +1794,23 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
}
if (appl_args->if_count == 0) {
- usage(argv[0]);
+ ODPH_ERR("No pktio interfaces\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (appl_args->num_prio && appl_args->num_prio != appl_args->if_count) {
+ ODPH_ERR("Different number of priorities and pktio interfaces\n");
exit(EXIT_FAILURE);
}
- if (appl_args->addr_count != 0 &&
- appl_args->addr_count != appl_args->if_count) {
- printf("Number of destination addresses differs from number"
- " of interfaces\n");
- usage(argv[0]);
+
+ if (appl_args->addr_count != 0 && appl_args->addr_count != appl_args->if_count) {
+ ODPH_ERR("Number of dest addresses differs from number of interfaces\n");
exit(EXIT_FAILURE);
}
if (appl_args->burst_rx > MAX_PKT_BURST) {
- printf("Error: Burst size (%i) too large. Maximum is %i.\n",
- appl_args->burst_rx, MAX_PKT_BURST);
+ ODPH_ERR("Burst size (%i) too large. Maximum is %i.\n",
+ appl_args->burst_rx, MAX_PKT_BURST);
exit(EXIT_FAILURE);
}
@@ -1750,11 +1828,10 @@ static void parse_args(int argc, char *argv[], appl_args_t *appl_args)
/*
* Print system and application info
*/
-static void print_info(appl_args_t *appl_args)
+static void print_info(void)
{
int i;
-
- odp_sys_info_print();
+ appl_args_t *appl_args = &gbl_args->appl;
printf("\n"
"odp_l2fwd options\n"
@@ -1801,6 +1878,28 @@ static void print_info(appl_args_t *appl_args)
appl_args->chksum ? "chksum " : "",
appl_args->packet_copy ? "packet_copy" : "");
}
+
+ printf("Num worker threads: %i\n", appl_args->num_workers);
+ printf("CPU mask: %s\n", gbl_args->cpumaskstr);
+
+ if (appl_args->num_groups > 0)
+ printf("num groups: %i\n", appl_args->num_groups);
+ else if (appl_args->num_groups == 0)
+ printf("group: ODP_SCHED_GROUP_ALL\n");
+ else
+ printf("group: ODP_SCHED_GROUP_WORKER\n");
+
+ printf("Packets per pool: %u\n", gbl_args->num_pkt);
+ printf("Packet length: %u\n", gbl_args->pkt_len);
+ printf("Segment length: %u\n", gbl_args->seg_len);
+ printf("Vectors per pool: %u\n", gbl_args->vector_num);
+ printf("Vector size: %u\n", gbl_args->vector_max_size);
+ printf("Priority per IF: ");
+
+ for (i = 0; i < appl_args->if_count; i++)
+ printf(" %i", appl_args->prio[i]);
+
+ printf("\n\n");
}
static void gbl_args_init(args_t *args)
@@ -1851,7 +1950,7 @@ static int set_vector_pool_params(odp_pool_param_t *params, odp_pool_capability_
vec_size = pool_capa.vector.max_size;
printf("\nWarning: Vector size reduced to %u\n\n", vec_size);
} else {
- ODPH_ERR("Error: Vector size too big %u. Maximum is %u.\n",
+ ODPH_ERR("Vector size too big %u. Maximum is %u.\n",
vec_size, pool_capa.vector.max_size);
return -1;
}
@@ -1871,7 +1970,7 @@ static int set_vector_pool_params(odp_pool_param_t *params, odp_pool_capability_
num_vec = pool_capa.vector.max_num;
printf("\nWarning: number of vectors reduced to %u\n\n", num_vec);
} else {
- ODPH_ERR("Error: Too many vectors (%u) per pool. Maximum is %u.\n",
+ ODPH_ERR("Too many vectors (%u) per pool. Maximum is %u.\n",
num_vec, pool_capa.vector.max_num);
return -1;
}
@@ -1896,7 +1995,6 @@ int main(int argc, char *argv[])
int num_workers, num_thr;
odp_shm_t shm;
odp_cpumask_t cpumask;
- char cpumaskstr[ODP_CPUMASK_STR_SIZE];
odph_ethaddr_t new_addr;
odp_pool_param_t params;
int ret;
@@ -1904,8 +2002,8 @@ int main(int argc, char *argv[])
int if_count, num_pools, num_vec_pools;
int (*thr_run_func)(void *);
odp_instance_t instance;
- int num_groups;
- odp_schedule_group_t group[MAX_PKTIOS];
+ int num_groups, max_groups;
+ odp_schedule_group_t group[MAX_GROUPS];
odp_pool_t pool_tbl[MAX_PKTIOS], vec_pool_tbl[MAX_PKTIOS];
odp_pool_t pool, vec_pool;
odp_init_t init;
@@ -1917,7 +2015,7 @@ int main(int argc, char *argv[])
/* Let helper collect its own arguments (e.g. --odph_proc) */
argc = odph_parse_options(argc, argv);
if (odph_options(&helper_options)) {
- ODPH_ERR("Error: reading ODP helper options failed.\n");
+ ODPH_ERR("Reading ODP helper options failed.\n");
exit(EXIT_FAILURE);
}
@@ -1939,13 +2037,13 @@ int main(int argc, char *argv[])
/* Init ODP before calling anything else */
if (odp_init_global(&instance, &init, NULL)) {
- ODPH_ERR("Error: ODP global init failed.\n");
+ ODPH_ERR("ODP global init failed.\n");
exit(EXIT_FAILURE);
}
/* Init this thread */
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
- ODPH_ERR("Error: ODP local init failed.\n");
+ ODPH_ERR("ODP local init failed.\n");
exit(EXIT_FAILURE);
}
@@ -1954,14 +2052,14 @@ int main(int argc, char *argv[])
ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
- ODPH_ERR("Error: shared mem reserve failed.\n");
+ ODPH_ERR("Shared mem reserve failed.\n");
exit(EXIT_FAILURE);
}
gbl_args = odp_shm_addr(shm);
if (gbl_args == NULL) {
- ODPH_ERR("Error: shared mem alloc failed.\n");
+ ODPH_ERR("Shared mem addr failed.\n");
exit(EXIT_FAILURE);
}
gbl_args_init(gbl_args);
@@ -1969,19 +2067,18 @@ int main(int argc, char *argv[])
/* Parse and store the application arguments */
parse_args(argc, argv, &gbl_args->appl);
+ odp_sys_info_print();
+
if (sched_mode(gbl_args->appl.in_mode))
gbl_args->appl.sched_mode = 1;
- /* Print both system and application information */
- print_info(&gbl_args->appl);
-
num_workers = MAX_WORKERS;
if (gbl_args->appl.cpu_count && gbl_args->appl.cpu_count < MAX_WORKERS)
num_workers = gbl_args->appl.cpu_count;
/* Get default worker cpumask */
num_workers = odp_cpumask_default_worker(&cpumask, num_workers);
- (void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
+ (void)odp_cpumask_to_str(&cpumask, gbl_args->cpumaskstr, sizeof(gbl_args->cpumaskstr));
gbl_args->appl.num_workers = num_workers;
@@ -1990,37 +2087,17 @@ int main(int argc, char *argv[])
if_count = gbl_args->appl.if_count;
- num_groups = gbl_args->appl.num_groups;
-
- printf("Num worker threads: %i\n", num_workers);
- printf("First CPU: %i\n", odp_cpumask_first(&cpumask));
- printf("CPU mask: %s\n", cpumaskstr);
-
- if (num_groups > 0)
- printf("num groups: %i\n", num_groups);
- else if (num_groups == 0)
- printf("group: ODP_SCHED_GROUP_ALL\n");
- else
- printf("group: ODP_SCHED_GROUP_WORKER\n");
-
-
- if (num_groups > if_count || num_groups > num_workers) {
- ODPH_ERR("Too many groups. Number of groups may not exceed "
- "number of interfaces or workers.\n");
- exit(EXIT_FAILURE);
- }
-
num_pools = 1;
if (gbl_args->appl.pool_per_if)
num_pools = if_count;
if (odp_pool_capability(&pool_capa)) {
- ODPH_ERR("Error: pool capability failed\n");
+ ODPH_ERR("Pool capability failed\n");
return -1;
}
if (num_pools > (int)pool_capa.pkt.max_pools) {
- ODPH_ERR("Error: Too many pools %i\n", num_pools);
+ ODPH_ERR("Too many pools %i\n", num_pools);
return -1;
}
@@ -2063,16 +2140,15 @@ int main(int argc, char *argv[])
printf("\nWarning: number of packets reduced to %u\n\n",
num_pkt);
} else {
- ODPH_ERR("Error: Too many packets %u. Maximum is %u.\n",
+ ODPH_ERR("Too many packets %u. Maximum is %u.\n",
num_pkt, pool_capa.pkt.max_num);
return -1;
}
}
- printf("Packets per pool: %u\n", num_pkt);
- printf("Packet length: %u\n", pkt_len);
- printf("Segment length: %u\n", seg_len);
- printf("\n\n");
+ gbl_args->num_pkt = num_pkt;
+ gbl_args->pkt_len = pkt_len;
+ gbl_args->seg_len = seg_len;
/* Create packet pool */
odp_pool_param_init(&params);
@@ -2085,7 +2161,7 @@ int main(int argc, char *argv[])
pool_tbl[i] = odp_pool_create("packet pool", &params);
if (pool_tbl[i] == ODP_POOL_INVALID) {
- ODPH_ERR("Error: pool create failed %i\n", i);
+ ODPH_ERR("Pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
@@ -2097,13 +2173,13 @@ int main(int argc, char *argv[])
num_vec_pools = 0;
if (gbl_args->appl.vector_mode) {
if (!sched_mode(gbl_args->appl.in_mode)) {
- ODPH_ERR("Error: vector mode only supports scheduler pktin modes (1-3)\n");
+ ODPH_ERR("Vector mode only supports scheduler pktin modes (1-3)\n");
return -1;
}
num_vec_pools = gbl_args->appl.pool_per_if ? if_count : 1;
if (num_vec_pools > (int)pool_capa.vector.max_pools) {
- ODPH_ERR("Error: Too many vector pools %i\n", num_vec_pools);
+ ODPH_ERR("Too many vector pools %i\n", num_vec_pools);
return -1;
}
@@ -2111,15 +2187,14 @@ int main(int argc, char *argv[])
if (set_vector_pool_params(&params, pool_capa))
return -1;
- printf("Vectors per pool: %u\n", params.vector.num);
- printf("Vector size: %u\n", params.vector.max_size);
- printf("\n\n");
+ gbl_args->vector_num = params.vector.num;
+ gbl_args->vector_max_size = params.vector.max_size;
for (i = 0; i < num_vec_pools; i++) {
vec_pool_tbl[i] = odp_pool_create("vector pool", &params);
if (vec_pool_tbl[i] == ODP_POOL_INVALID) {
- ODPH_ERR("Error: vector pool create failed %i\n", i);
+ ODPH_ERR("Vector pool create failed %i\n", i);
exit(EXIT_FAILURE);
}
@@ -2137,7 +2212,7 @@ int main(int argc, char *argv[])
odp_schedule_config_init(&sched_config);
if (odp_schedule_capability(&sched_capa)) {
- ODPH_ERR("Error: schedule capability failed\n");
+ ODPH_ERR("Schedule capability failed\n");
exit(EXIT_FAILURE);
}
@@ -2145,11 +2220,22 @@ int main(int argc, char *argv[])
if (sched_capa.max_flow_id) {
sched_config.max_flow_id = sched_capa.max_flow_id;
} else {
- ODPH_ERR("Error: flow aware mode not supported\n");
+ ODPH_ERR("Flow aware mode not supported\n");
exit(EXIT_FAILURE);
}
}
+ num_groups = gbl_args->appl.num_groups;
+ /* Predefined groups are enabled by default */
+ max_groups = sched_capa.max_groups - 3;
+ if (max_groups > MAX_GROUPS)
+ max_groups = MAX_GROUPS;
+
+ if (num_groups > max_groups) {
+ ODPH_ERR("Too many groups. Maximum is %i.\n", max_groups);
+ exit(EXIT_FAILURE);
+ }
+
odp_schedule_config(&sched_config);
/* Default */
@@ -2196,7 +2282,7 @@ int main(int argc, char *argv[])
if (odp_pktio_mac_addr(gbl_args->pktios[i].pktio,
gbl_args->port_eth_addr[i].addr,
ODPH_ETHADDR_LEN) != ODPH_ETHADDR_LEN) {
- ODPH_ERR("Error: interface ethernet address unknown\n");
+ ODPH_ERR("Interface ethernet address unknown\n");
exit(EXIT_FAILURE);
}
@@ -2217,6 +2303,9 @@ int main(int argc, char *argv[])
gbl_args->pktios[i].pktio = ODP_PKTIO_INVALID;
+ /* Print application information */
+ print_info();
+
bind_queues();
init_port_lookup_tbl();
@@ -2245,14 +2334,47 @@ int main(int argc, char *argv[])
thr_common.sync = 1;
for (i = 0; i < num_workers; ++i) {
+ int j;
+ int num_join;
+ int mode = gbl_args->appl.group_mode;
+
odph_thread_param_init(&thr_param[i]);
thr_param[i].start = thr_run_func;
thr_param[i].arg = &gbl_args->thread_args[i];
thr_param[i].thr_type = ODP_THREAD_WORKER;
- /* Round robin threads to groups */
- gbl_args->thread_args[i].num_groups = 1;
- gbl_args->thread_args[i].group[0] = group[i % num_groups];
+ gbl_args->thread_args[i].num_grp_join = 0;
+
+ /* Fill in list of groups to join */
+ if (gbl_args->appl.num_groups > 0) {
+ num_join = if_count < num_groups ? if_count : num_groups;
+
+ if (mode == 0 || mode == 1) {
+ /* All threads join all groups */
+ if (mode == 0)
+ num_join = num_groups;
+
+ gbl_args->thread_args[i].num_grp_join = num_join;
+
+ for (j = 0; j < num_join; j++)
+ gbl_args->thread_args[i].group[j] = group[j];
+ } else {
+ /* Thread joins first groups in round robin */
+ if (num_workers >= num_join) {
+ gbl_args->thread_args[i].num_grp_join = 1;
+ gbl_args->thread_args[i].group[0] = group[i % num_join];
+ } else {
+ int cnt = 0;
+
+ for (j = 0; i + j < num_join; j += num_workers) {
+ gbl_args->thread_args[i].group[cnt] = group[i + j];
+ cnt++;
+ }
+
+ gbl_args->thread_args[i].num_grp_join = cnt;
+ }
+ }
+ }
stats[i] = &gbl_args->thread_args[i].stats;
}
@@ -2261,7 +2383,7 @@ int main(int argc, char *argv[])
thr_param, num_workers);
if (num_thr != num_workers) {
- ODPH_ERR("Error: worker create failed %i\n", num_thr);
+ ODPH_ERR("Worker create failed: %i\n", num_thr);
exit(EXIT_FAILURE);
}
@@ -2275,8 +2397,7 @@ int main(int argc, char *argv[])
pktio = gbl_args->pktios[i].pktio;
ret = odp_pktio_start(pktio);
if (ret) {
- ODPH_ERR("Error: unable to start %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio start failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2286,8 +2407,7 @@ int main(int argc, char *argv[])
for (i = 0; i < if_count; ++i) {
if (odp_pktio_stop(gbl_args->pktios[i].pktio)) {
- ODPH_ERR("Error: unable to stop %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio stop failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2299,14 +2419,13 @@ int main(int argc, char *argv[])
/* Master thread waits for other threads to exit */
num_thr = odph_thread_join(gbl_args->thread_tbl, num_workers);
if (num_thr != num_workers) {
- ODPH_ERR("Error: worker join failed %i\n", num_thr);
+ ODPH_ERR("Worker join failed: %i\n", num_thr);
exit(EXIT_FAILURE);
}
for (i = 0; i < if_count; ++i) {
if (odp_pktio_close(gbl_args->pktios[i].pktio)) {
- ODPH_ERR("Error: unable to close %s\n",
- gbl_args->appl.if_names[i]);
+ ODPH_ERR("Pktio close failed: %s\n", gbl_args->appl.if_names[i]);
exit(EXIT_FAILURE);
}
}
@@ -2318,30 +2437,30 @@ int main(int argc, char *argv[])
for (i = 0; i < num_pools; i++) {
if (odp_pool_destroy(pool_tbl[i])) {
- ODPH_ERR("Error: pool destroy failed %i\n", i);
+ ODPH_ERR("Pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
for (i = 0; i < num_vec_pools; i++) {
if (odp_pool_destroy(vec_pool_tbl[i])) {
- ODPH_ERR("Error: vector pool destroy failed %i\n", i);
+ ODPH_ERR("Vector pool destroy failed: %i\n", i);
exit(EXIT_FAILURE);
}
}
if (odp_shm_free(shm)) {
- ODPH_ERR("Error: shm free\n");
+ ODPH_ERR("Shm free failed\n");
exit(EXIT_FAILURE);
}
if (odp_term_local()) {
- ODPH_ERR("Error: term local\n");
+ ODPH_ERR("Term local failed\n");
exit(EXIT_FAILURE);
}
if (odp_term_global(instance)) {
- ODPH_ERR("Error: term global\n");
+ ODPH_ERR("Term global failed\n");
exit(EXIT_FAILURE);
}
diff --git a/test/performance/odp_packet_gen.c b/test/performance/odp_packet_gen.c
index 77b6a27a7..1407887e4 100644
--- a/test/performance/odp_packet_gen.c
+++ b/test/performance/odp_packet_gen.c
@@ -30,10 +30,14 @@
#define RAND_16BIT_WORDS 128
/* Max retries to generate random data */
#define MAX_RAND_RETRIES 1000
+/* Maximum pktio index table size */
+#define MAX_PKTIO_INDEXES 1024
/* Minimum number of packets to receive in CI test */
#define MIN_RX_PACKETS_CI 800
+ODP_STATIC_ASSERT(MAX_PKTIOS <= UINT8_MAX, "Interface index must fit into uint8_t\n");
+
typedef struct test_options_t {
uint64_t gap_nsec;
uint64_t quit;
@@ -58,6 +62,7 @@ typedef struct test_options_t {
uint16_t udp_dst;
uint32_t wait_sec;
uint32_t mtu;
+ odp_bool_t promisc_mode;
struct vlan_hdr {
uint16_t tpid;
@@ -125,6 +130,9 @@ typedef struct test_global_t {
} pktio[MAX_PKTIOS];
+ /* Interface lookup table. Table index is pktio_index of the API. */
+ uint8_t if_from_pktio_idx[MAX_PKTIO_INDEXES];
+
} test_global_t;
static test_global_t *test_global;
@@ -173,6 +181,7 @@ static void print_usage(void)
" -d, --ipv4_dst IPv4 destination address. Default: 192.168.0.2\n"
" -o, --udp_src UDP source port. Default: 10000\n"
" -p, --udp_dst UDP destination port. Default: 20000\n"
+ " -P, --promisc_mode Enable promiscuous mode.\n"
" -c, --c_mode <counts> Counter mode for incrementing UDP port numbers.\n"
" Specify the number of port numbers used starting from\n"
" udp_src/udp_dst. Comma-separated (no spaces) list of\n"
@@ -254,6 +263,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{"ipv4_dst", required_argument, NULL, 'd'},
{"udp_src", required_argument, NULL, 'o'},
{"udp_dst", required_argument, NULL, 'p'},
+ {"promisc_mode", no_argument, NULL, 'P'},
{"c_mode", required_argument, NULL, 'c'},
{"mtu", required_argument, NULL, 'M'},
{"quit", required_argument, NULL, 'q'},
@@ -263,7 +273,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:h";
+ static const char *shortopts = "+i:e:r:t:n:l:L:M:b:x:g:v:s:d:o:p:c:q:u:w:Ph";
test_options->num_pktio = 0;
test_options->num_rx = 1;
@@ -275,6 +285,7 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
test_options->bursts = 1;
test_options->gap_nsec = 1000000;
test_options->num_vlan = 0;
+ test_options->promisc_mode = 0;
strncpy(test_options->ipv4_src_s, "192.168.0.1",
sizeof(test_options->ipv4_src_s) - 1);
strncpy(test_options->ipv4_dst_s, "192.168.0.2",
@@ -385,6 +396,9 @@ static int parse_options(int argc, char *argv[], test_global_t *global)
}
test_options->udp_dst = udp_port;
break;
+ case 'P':
+ test_options->promisc_mode = 1;
+ break;
case 'r':
test_options->num_rx = atoi(optarg);
break;
@@ -621,7 +635,7 @@ static int open_pktios(test_global_t *global)
odp_pktout_queue_param_t pktout_param;
char *name;
uint32_t i, seg_len;
- int j;
+ int j, pktio_idx;
test_options_t *test_options = &global->test_options;
uint32_t num_rx = test_options->num_rx;
int num_tx = test_options->num_tx;
@@ -649,6 +663,7 @@ static int open_pktios(test_global_t *global)
printf("%u bytes\n", test_options->mtu);
else
printf("interface default\n");
+ printf(" promisc mode: %s\n", test_options->promisc_mode ? "enabled" : "disabled");
printf(" tx burst size %u\n", test_options->burst_size);
printf(" tx bursts %u\n", test_options->bursts);
printf(" tx burst gap %" PRIu64 " nsec\n",
@@ -729,6 +744,9 @@ static int open_pktios(test_global_t *global)
global->pool = pool;
+ if (odp_pktio_max_index() >= MAX_PKTIO_INDEXES)
+ printf("Warning: max pktio index (%u) is too large\n", odp_pktio_max_index());
+
odp_pktio_param_init(&pktio_param);
pktio_param.in_mode = ODP_PKTIN_MODE_SCHED;
pktio_param.out_mode = ODP_PKTOUT_MODE_DIRECT;
@@ -750,6 +768,13 @@ static int open_pktios(test_global_t *global)
odp_pktio_print(pktio);
+ pktio_idx = odp_pktio_index(pktio);
+ if (pktio_idx < 0 || pktio_idx >= MAX_PKTIO_INDEXES) {
+ printf("Error (%s): Bad pktio index: %i\n", name, pktio_idx);
+ return -1;
+ }
+ global->if_from_pktio_idx[pktio_idx] = i;
+
if (odp_pktio_capability(pktio, &pktio_capa)) {
printf("Error (%s): Pktio capability failed.\n", name);
return -1;
@@ -814,6 +839,18 @@ static int open_pktios(test_global_t *global)
odp_pktio_config(pktio, &pktio_config);
+ if (test_options->promisc_mode) {
+ if (!pktio_capa.set_op.op.promisc_mode) {
+ ODPH_ERR("Error (%s): promisc mode set not supported\n", name);
+ return -1;
+ }
+
+ if (odp_pktio_promisc_mode_set(pktio, true)) {
+ ODPH_ERR("Error (%s): promisc mode enable failed\n", name);
+ return -1;
+ }
+ }
+
odp_pktin_queue_param_init(&pktin_param);
pktin_param.queue_param.sched.prio = odp_schedule_default_prio();
@@ -1056,8 +1093,11 @@ static int rx_thread(void *arg)
/* All packets from the same queue are from the same pktio interface */
int index = odp_packet_input_index(odp_packet_from_event(ev[0]));
- if (index >= 0)
- global->stat[thr].pktio[index].rx_packets += num;
+ if (index >= 0) {
+ int if_idx = global->if_from_pktio_idx[index];
+
+ global->stat[thr].pktio[if_idx].rx_packets += num;
+ }
}
odp_event_free_multi(ev, num);
diff --git a/test/performance/odp_sched_latency.c b/test/performance/odp_sched_latency.c
index aae08bfe0..0894a403d 100644
--- a/test/performance/odp_sched_latency.c
+++ b/test/performance/odp_sched_latency.c
@@ -25,6 +25,7 @@
#include <getopt.h>
#define MAX_QUEUES 4096 /**< Maximum number of queues */
+#define MAX_GROUPS 64
#define EVENT_POOL_SIZE (1024 * 1024) /**< Event pool size */
#define TEST_ROUNDS 10 /**< Test rounds for each thread (millions) */
#define MAIN_THREAD 1 /**< Thread ID performing maintenance tasks */
@@ -81,6 +82,8 @@ typedef struct {
unsigned int cpu_count; /**< CPU count */
odp_schedule_sync_t sync_type; /**< Scheduler sync type */
int forward_mode; /**< Event forwarding mode */
+ int num_group;
+ int isolate;
int test_rounds; /**< Number of test rounds (millions) */
int warm_up_rounds; /**< Number of warm-up rounds */
struct {
@@ -117,6 +120,9 @@ typedef struct {
odp_pool_t pool; /**< Pool for allocating test events */
test_args_t args; /**< Parsed command line arguments */
odp_queue_t queue[NUM_PRIOS][MAX_QUEUES]; /**< Scheduled queues */
+
+ odp_schedule_group_t group[NUM_PRIOS][MAX_GROUPS];
+
} test_globals_t;
/**
@@ -343,6 +349,38 @@ static void print_results(test_globals_t *globals)
}
}
+static int join_groups(test_globals_t *globals, int thr)
+{
+ odp_thrmask_t thrmask;
+ odp_schedule_group_t group;
+ int i, num;
+ int num_group = globals->args.num_group;
+
+ if (num_group <= 0)
+ return 0;
+
+ num = num_group;
+ if (globals->args.isolate)
+ num = 2 * num_group;
+
+ odp_thrmask_zero(&thrmask);
+ odp_thrmask_set(&thrmask, thr);
+
+ for (i = 0; i < num; i++) {
+ if (globals->args.isolate)
+ group = globals->group[i % 2][i / 2];
+ else
+ group = globals->group[0][i];
+
+ if (odp_schedule_group_join(group, &thrmask)) {
+ ODPH_ERR("Group join failed %i (thr %i)\n", i, thr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/**
* Measure latency of scheduled ODP events
*
@@ -485,6 +523,9 @@ static int run_thread(void *arg ODP_UNUSED)
return -1;
}
+ if (join_groups(globals, thr))
+ return -1;
+
if (thr == MAIN_THREAD) {
args = &globals->args;
@@ -528,6 +569,12 @@ static void usage(void)
" 0: Random (default)\n"
" 1: Incremental\n"
" 2: Use source queue\n"
+ " -g, --num_group <num> Number of schedule groups. Round robins queues into groups.\n"
+ " -1: SCHED_GROUP_WORKER\n"
+ " 0: SCHED_GROUP_ALL (default)\n"
+ " -i, --isolate <mode> Select if shared or isolated groups are used. Ignored when num_group <= 0.\n"
+ " 0: All queues share groups (default)\n"
+ " 1: Separate groups for high and low priority queues. Creates 2xnum_group groups.\n"
" -l, --lo-prio-queues <number> Number of low priority scheduled queues\n"
" -t, --hi-prio-queues <number> Number of high priority scheduled queues\n"
" -m, --lo-prio-events-per-queue <number> Number of events per low priority queue\n"
@@ -563,24 +610,29 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
static const struct option longopts[] = {
{"count", required_argument, NULL, 'c'},
+ {"duration", required_argument, NULL, 'd'},
{"forward-mode", required_argument, NULL, 'f'},
+ {"num_group", required_argument, NULL, 'g'},
+ {"isolate", required_argument, NULL, 'i'},
{"lo-prio-queues", required_argument, NULL, 'l'},
{"hi-prio-queues", required_argument, NULL, 't'},
{"lo-prio-events-per-queue", required_argument, NULL, 'm'},
{"hi-prio-events-per-queue", required_argument, NULL, 'n'},
{"lo-prio-events", required_argument, NULL, 'o'},
{"hi-prio-events", required_argument, NULL, 'p'},
- {"sample-per-prio", no_argument, NULL, 'r'},
{"sync", required_argument, NULL, 's'},
{"warm-up", required_argument, NULL, 'w'},
+ {"sample-per-prio", no_argument, NULL, 'r'},
{"help", no_argument, NULL, 'h'},
{NULL, 0, NULL, 0}
};
- static const char *shortopts = "+c:d:f:s:l:t:m:n:o:p:rw:h";
+ static const char *shortopts = "+c:d:f:g:i:l:t:m:n:o:p:s:w:rh";
args->cpu_count = 1;
args->forward_mode = EVENT_FORWARD_RAND;
+ args->num_group = 0;
+ args->isolate = 0;
args->test_rounds = TEST_ROUNDS;
args->warm_up_rounds = WARM_UP_ROUNDS;
args->sync_type = ODP_SCHED_SYNC_PARALLEL;
@@ -608,6 +660,12 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
case 'f':
args->forward_mode = atoi(optarg);
break;
+ case 'g':
+ args->num_group = atoi(optarg);
+ break;
+ case 'i':
+ args->isolate = atoi(optarg);
+ break;
case 'l':
args->prio[LO_PRIO].queues = atoi(optarg);
break;
@@ -676,6 +734,11 @@ static void parse_args(int argc, char *argv[], test_args_t *args)
usage();
exit(EXIT_FAILURE);
}
+
+ if (args->num_group > MAX_GROUPS) {
+ ODPH_ERR("Too many groups. Max supported %i.\n", MAX_GROUPS);
+ exit(EXIT_FAILURE);
+ }
}
static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
@@ -697,6 +760,68 @@ static void randomize_queues(odp_queue_t queues[], uint32_t num, uint64_t *seed)
}
}
+static int create_groups(test_globals_t *globals, odp_schedule_group_t group[], int num)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_thrmask_t zeromask;
+ int i, j, max;
+
+ if (num <= 0)
+ return 0;
+
+ if (odp_schedule_capability(&sched_capa)) {
+ ODPH_ERR("Schedule capability failed\n");
+ return 0;
+ }
+
+ max = sched_capa.max_groups - 3;
+ if (num > max) {
+ printf("Too many schedule groups %i (max %u)\n", num, max);
+ return 0;
+ }
+
+ for (i = 0; i < NUM_PRIOS; i++)
+ for (j = 0; j < MAX_GROUPS; j++)
+ globals->group[i][j] = ODP_SCHED_GROUP_INVALID;
+
+ odp_thrmask_zero(&zeromask);
+
+ for (i = 0; i < num; i++) {
+ group[i] = odp_schedule_group_create("test_group", &zeromask);
+
+ if (group[i] == ODP_SCHED_GROUP_INVALID) {
+ ODPH_ERR("Group create failed %i\n", i);
+ break;
+ }
+
+ if (globals->args.isolate) {
+ globals->group[i % 2][i / 2] = group[i];
+ } else {
+ globals->group[0][i] = group[i];
+ globals->group[1][i] = group[i];
+ }
+ }
+
+ return i;
+}
+
+static int destroy_groups(odp_schedule_group_t group[], int num)
+{
+ int i;
+
+ if (num <= 0)
+ return 0;
+
+ for (i = 0; i < num; i++) {
+ if (odp_schedule_group_destroy(group[i])) {
+ ODPH_ERR("Group destroy failed %i\n", i);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/**
* Test main function
*/
@@ -705,21 +830,23 @@ int main(int argc, char *argv[])
odp_instance_t instance;
odp_init_t init_param;
odph_helper_options_t helper_options;
- odph_thread_t *thread_tbl;
odph_thread_common_param_t thr_common;
odph_thread_param_t thr_param;
odp_cpumask_t cpumask;
- odp_pool_t pool;
odp_pool_capability_t pool_capa;
odp_pool_param_t params;
- odp_shm_t shm;
test_globals_t *globals;
test_args_t args;
char cpumaskstr[ODP_CPUMASK_STR_SIZE];
uint32_t pool_size;
- int i, j;
- int ret = 0;
+ int i, j, ret;
+ int num_group, tot_group;
+ odp_schedule_group_t group[2 * MAX_GROUPS];
+ odph_thread_t thread_tbl[ODP_THREAD_COUNT_MAX];
+ int err = 0;
int num_workers = 0;
+ odp_shm_t shm = ODP_SHM_INVALID;
+ odp_pool_t pool = ODP_POOL_INVALID;
printf("\nODP scheduling latency benchmark starts\n\n");
@@ -739,7 +866,7 @@ int main(int argc, char *argv[])
/* ODP global init */
if (odp_init_global(&instance, &init_param, NULL)) {
ODPH_ERR("ODP global init failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
/*
@@ -748,11 +875,17 @@ int main(int argc, char *argv[])
*/
if (odp_init_local(instance, ODP_THREAD_CONTROL)) {
ODPH_ERR("ODP global init failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
odp_sys_info_print();
+ num_group = args.num_group;
+
+ tot_group = 0;
+ if (num_group > 0)
+ tot_group = args.isolate ? 2 * num_group : num_group;
+
/* Get default worker cpumask */
if (args.cpu_count)
num_workers = args.cpu_count;
@@ -762,22 +895,22 @@ int main(int argc, char *argv[])
(void)odp_cpumask_to_str(&cpumask, cpumaskstr, sizeof(cpumaskstr));
- printf("CPU mask info:\n");
- printf(" Worker threads: %i\n", num_workers);
- printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
- printf(" CPU mask: %s\n", cpumaskstr);
-
- thread_tbl = calloc(sizeof(odph_thread_t), num_workers);
- if (!thread_tbl) {
- ODPH_ERR("no memory for thread_tbl\n");
- return -1;
- }
-
- shm = odp_shm_reserve("test_globals",
- sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
+ printf("Test options:\n");
+ printf(" Worker threads: %i\n", num_workers);
+ printf(" First CPU: %i\n", odp_cpumask_first(&cpumask));
+ printf(" CPU mask: %s\n", cpumaskstr);
+ printf(" Test rounds: %iM\n", args.test_rounds);
+ printf(" Warm-up rounds: %i\n", args.warm_up_rounds);
+ printf(" Isolated groups: %i\n", args.isolate);
+ printf(" Number of groups: %i\n", num_group);
+ printf(" Created groups: %i\n", tot_group);
+ printf("\n");
+
+ shm = odp_shm_reserve("test_globals", sizeof(test_globals_t), ODP_CACHE_LINE_SIZE, 0);
if (shm == ODP_SHM_INVALID) {
ODPH_ERR("Shared memory reserve failed.\n");
- return -1;
+ err = -1;
+ goto error;
}
globals = odp_shm_addr(shm);
@@ -791,7 +924,8 @@ int main(int argc, char *argv[])
*/
if (odp_pool_capability(&pool_capa)) {
ODPH_ERR("pool capa failed\n");
- return -1;
+ err = -1;
+ goto error;
}
pool_size = EVENT_POOL_SIZE;
@@ -808,10 +942,20 @@ int main(int argc, char *argv[])
if (pool == ODP_POOL_INVALID) {
ODPH_ERR("Pool create failed.\n");
- return -1;
+ err = -1;
+ goto error;
}
globals->pool = pool;
+ /* Create groups */
+ ret = create_groups(globals, group, tot_group);
+ if (ret != tot_group) {
+ ODPH_ERR("Group create failed.\n");
+ tot_group = ret;
+ err = -1;
+ goto error;
+ }
+
/*
* Create queues for schedule test
*/
@@ -819,8 +963,13 @@ int main(int argc, char *argv[])
char name[] = "sched_XX_YY";
odp_queue_t queue;
odp_queue_param_t param;
+ odp_schedule_group_t grp;
int prio;
+ grp = ODP_SCHED_GROUP_ALL;
+ if (num_group < 0)
+ grp = ODP_SCHED_GROUP_WORKER;
+
if (i == HI_PRIO)
prio = odp_schedule_max_prio();
else
@@ -833,17 +982,22 @@ int main(int argc, char *argv[])
param.type = ODP_QUEUE_TYPE_SCHED;
param.sched.prio = prio;
param.sched.sync = args.sync_type;
- param.sched.group = ODP_SCHED_GROUP_ALL;
for (j = 0; j < args.prio[i].queues; j++) {
name[9] = '0' + j / 10;
name[10] = '0' + j - 10 * (j / 10);
+ /* Round robin queues into groups */
+ if (num_group > 0)
+ grp = globals->group[i][j % num_group];
+
+ param.sched.group = grp;
+
queue = odp_queue_create(name, &param);
if (queue == ODP_QUEUE_INVALID) {
ODPH_ERR("Scheduled queue create failed.\n");
- return -1;
+ exit(EXIT_FAILURE);
}
globals->queue[i][j] = queue;
@@ -859,6 +1013,8 @@ int main(int argc, char *argv[])
odp_barrier_init(&globals->barrier, num_workers);
/* Create and launch worker threads */
+ memset(thread_tbl, 0, sizeof(thread_tbl));
+
odph_thread_common_param_init(&thr_common);
thr_common.instance = instance;
thr_common.cpumask = &cpumask;
@@ -873,7 +1029,6 @@ int main(int argc, char *argv[])
/* Wait for worker threads to terminate */
odph_thread_join(thread_tbl, num_workers);
- free(thread_tbl);
printf("ODP scheduling latency test complete\n\n");
@@ -885,14 +1040,36 @@ int main(int argc, char *argv[])
for (j = 0; j < num_queues; j++) {
queue = globals->queue[i][j];
- ret += odp_queue_destroy(queue);
+ if (odp_queue_destroy(queue)) {
+ ODPH_ERR("Queue destroy failed [%i][%i]\n", i, j);
+ err = -1;
+ break;
+ }
+ }
+ }
+
+error:
+ if (destroy_groups(group, tot_group)) {
+ ODPH_ERR("Group destroy failed\n");
+ err = -1;
+ }
+
+ if (pool != ODP_POOL_INVALID) {
+ if (odp_pool_destroy(pool)) {
+ ODPH_ERR("Pool destroy failed\n");
+ err = -1;
+ }
+ }
+
+ if (shm != ODP_SHM_INVALID) {
+ if (odp_shm_free(shm)) {
+ ODPH_ERR("SHM destroy failed\n");
+ err = -1;
}
}
- ret += odp_shm_free(shm);
- ret += odp_pool_destroy(pool);
- ret += odp_term_local();
- ret += odp_term_global(instance);
+ err += odp_term_local();
+ err += odp_term_global(instance);
- return ret;
+ return err;
}
diff --git a/test/validation/api/buffer/buffer.c b/test/validation/api/buffer/buffer.c
index 07b671228..19f39e1d3 100644
--- a/test/validation/api/buffer/buffer.c
+++ b/test/validation/api/buffer/buffer.c
@@ -1,5 +1,6 @@
/* Copyright (c) 2014-2018, Linaro Limited
* Copyright (c) 2019, Nokia
+ * Copyright (c) 2022, Marvell
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -51,7 +52,7 @@ static int buffer_suite_init(void)
return 0;
}
-static void buffer_test_pool_alloc_free(void)
+static void test_pool_alloc_free(const odp_pool_param_t *param)
{
odp_pool_t pool;
odp_event_t ev;
@@ -59,14 +60,15 @@ static void buffer_test_pool_alloc_free(void)
uint32_t num_buf = 0;
void *addr;
odp_event_subtype_t subtype;
- uint32_t num = default_param.buf.num;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
odp_buffer_t buffer[num];
odp_bool_t wrong_type = false, wrong_subtype = false;
odp_bool_t wrong_size = false, wrong_align = false;
- pool = odp_pool_create("default pool", &default_param);
+ pool = odp_pool_create("default pool", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
odp_pool_print(pool);
@@ -123,7 +125,7 @@ static void buffer_test_pool_alloc_free(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_alloc_free_multi(void)
+static void test_pool_alloc_free_multi(const odp_pool_param_t *param)
{
odp_pool_t pool;
uint32_t i, num_buf;
@@ -131,14 +133,15 @@ static void buffer_test_pool_alloc_free_multi(void)
odp_event_t ev;
void *addr;
odp_event_subtype_t subtype;
- uint32_t num = default_param.buf.num;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t num = param->buf.num;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
+
odp_buffer_t buffer[num + BURST];
odp_bool_t wrong_type = false, wrong_subtype = false;
odp_bool_t wrong_size = false, wrong_align = false;
- pool = odp_pool_create("default pool", &default_param);
+ pool = odp_pool_create("default pool", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
ret = 0;
@@ -203,16 +206,14 @@ static void buffer_test_pool_alloc_free_multi(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_single_pool(void)
+static void test_pool_single_pool(odp_pool_param_t *param)
{
odp_pool_t pool;
odp_buffer_t buffer;
- odp_pool_param_t param;
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
- pool = odp_pool_create("pool 0", &param);
+ pool = odp_pool_create("pool 0", param);
CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
odp_pool_print(pool);
@@ -246,23 +247,21 @@ static void buffer_test_pool_single_pool(void)
CU_ASSERT(odp_pool_destroy(pool) == 0);
}
-static void buffer_test_pool_two_pools(void)
+static void test_pool_two_pools(odp_pool_param_t *param)
{
odp_pool_t pool0, pool1;
odp_buffer_t buf, buffer[2];
- odp_pool_param_t param;
int num = 0;
if (pool_capa.buf.max_pools < 2)
return;
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
- pool0 = odp_pool_create("pool 0", &param);
+ pool0 = odp_pool_create("pool 0", param);
CU_ASSERT_FATAL(pool0 != ODP_POOL_INVALID);
- pool1 = odp_pool_create("pool 1", &param);
+ pool1 = odp_pool_create("pool 1", param);
CU_ASSERT_FATAL(pool1 != ODP_POOL_INVALID);
buffer[0] = odp_buffer_alloc(pool0);
@@ -309,15 +308,14 @@ static void buffer_test_pool_two_pools(void)
CU_ASSERT(odp_pool_destroy(pool1) == 0);
}
-static void buffer_test_pool_max_pools(void)
+static void test_pool_max_pools(odp_pool_param_t *param)
{
- odp_pool_param_t param;
uint32_t i, num_pool, num_buf;
void *addr;
odp_event_t ev;
uint32_t max_pools = pool_capa.buf.max_pools;
- uint32_t size = default_param.buf.size;
- uint32_t align = default_param.buf.align;
+ uint32_t size = param->buf.size;
+ uint32_t align = param->buf.align;
odp_pool_t pool[max_pools];
odp_buffer_t buffer[max_pools];
@@ -325,11 +323,10 @@ static void buffer_test_pool_max_pools(void)
printf("\n Creating %u pools\n", max_pools);
- memcpy(&param, &default_param, sizeof(odp_pool_param_t));
- param.buf.num = 1;
+ param->buf.num = 1;
for (i = 0; i < max_pools; i++) {
- pool[i] = odp_pool_create(NULL, &param);
+ pool[i] = odp_pool_create(NULL, param);
if (pool[i] == ODP_POOL_INVALID)
break;
@@ -370,12 +367,146 @@ static void buffer_test_pool_max_pools(void)
CU_ASSERT(odp_pool_destroy(pool[i]) == 0);
}
+static void buffer_test_pool_alloc_free(void)
+{
+ test_pool_alloc_free(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi(void)
+{
+ test_pool_alloc_free_multi(&default_param);
+}
+
+static void buffer_test_pool_alloc_free_multi_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_alloc_free_multi_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_alloc_free_multi(&param);
+}
+
+static void buffer_test_pool_single_pool(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_single_pool_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_single_pool(&param);
+}
+
+static void buffer_test_pool_two_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_two_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_two_pools(&param);
+}
+
+static void buffer_test_pool_max_pools(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_min_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.min_cache_size;
+ test_pool_max_pools(&param);
+}
+
+static void buffer_test_pool_max_pools_max_cache(void)
+{
+ odp_pool_param_t param;
+
+ memcpy(&param, &default_param, sizeof(odp_pool_param_t));
+ param.buf.cache_size = pool_capa.buf.max_cache_size;
+ test_pool_max_pools(&param);
+}
+
odp_testinfo_t buffer_suite[] = {
ODP_TEST_INFO(buffer_test_pool_alloc_free),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_max_cache),
ODP_TEST_INFO(buffer_test_pool_alloc_free_multi),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_alloc_free_multi_max_cache),
ODP_TEST_INFO(buffer_test_pool_single_pool),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_single_pool_max_cache),
ODP_TEST_INFO(buffer_test_pool_two_pools),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_two_pools_max_cache),
ODP_TEST_INFO(buffer_test_pool_max_pools),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_min_cache),
+ ODP_TEST_INFO(buffer_test_pool_max_pools_max_cache),
ODP_TEST_INFO_NULL,
};
diff --git a/test/validation/api/classification/odp_classification_common.c b/test/validation/api/classification/odp_classification_common.c
index dd8373b04..8eac41a1e 100644
--- a/test/validation/api/classification/odp_classification_common.c
+++ b/test/validation/api/classification/odp_classification_common.c
@@ -417,7 +417,7 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
ip->tot_len = odp_cpu_to_be_16(l3_len);
ip->ttl = DEFAULT_TTL;
ip->frag_offset = 0;
- ip->tos = 0;
+ ip->tos = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
odp_packet_has_ipv4_set(pkt, 1);
odph_ipv4_csum_update(pkt);
} else {
@@ -425,7 +425,8 @@ odp_packet_t create_packet(cls_packet_info_t pkt_info)
odp_packet_has_ipv6_set(pkt, 1);
ipv6 = (odph_ipv6hdr_t *)odp_packet_l3_ptr(pkt, NULL);
version = ODPH_IPV6 << ODPH_IPV6HDR_VERSION_SHIFT;
- tc = DEFAULT_TOS << ODPH_IPV6HDR_TC_SHIFT;
+ tc = pkt_info.dscp << ODPH_IP_TOS_DSCP_SHIFT;
+ tc <<= ODPH_IPV6HDR_TC_SHIFT;
flow = seqno << ODPH_IPV6HDR_FLOW_LABEL_SHIFT;
ver_tc_flow = version | tc | flow;
diff --git a/test/validation/api/classification/odp_classification_test_pmr.c b/test/validation/api/classification/odp_classification_test_pmr.c
index 068e2112c..e69f077a2 100644
--- a/test/validation/api/classification/odp_classification_test_pmr.c
+++ b/test/validation/api/classification/odp_classification_test_pmr.c
@@ -13,6 +13,8 @@
#define MAX_NUM_UDP 4
#define MARK_IP 1
#define MARK_UDP 2
+#define TEST_IPV4 false
+#define TEST_IPV6 true
static odp_pool_t pkt_pool;
/** sequence number of IP packets */
@@ -560,7 +562,7 @@ static void classification_test_pmr_term_udp_sport(void)
test_pmr(&pmr_param, pkt, NO_MATCH);
}
-static void classification_test_pmr_term_ipproto(void)
+static void classification_test_pmr_term_proto(odp_bool_t ipv6)
{
odp_packet_t pkt;
uint8_t val;
@@ -578,18 +580,73 @@ static void classification_test_pmr_term_ipproto(void)
pmr_param.val_sz = sizeof(val);
pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
pkt_info.l4_type = CLS_PKT_L4_UDP;
pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
test_pmr(&pmr_param, pkt, MATCH);
- pkt = create_packet(default_pkt_info);
+ pkt_info.l4_type = CLS_PKT_L4_TCP;
+ pkt = create_packet(pkt_info);
CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
test_pmr(&pmr_param, pkt, NO_MATCH);
}
+static void classification_test_pmr_term_ipv4_proto(void)
+{
+ classification_test_pmr_term_proto(TEST_IPV4);
+}
+
+static void classification_test_pmr_term_ipv6_proto(void)
+{
+ classification_test_pmr_term_proto(TEST_IPV6);
+}
+
+static void classification_test_pmr_term_dscp(odp_bool_t ipv6)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ odp_pmr_param_t pmr_param;
+ cls_packet_info_t pkt_info;
+
+ val = DSCP_CLASS4;
+ mask = 0x3f;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_IP_DSCP;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.ipv6 = ipv6;
+ pkt_info.l4_type = CLS_PKT_L4_UDP;
+ pkt_info.dscp = DSCP_CLASS4;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt_info.dscp = 0;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
+static void classification_test_pmr_term_ipv4_dscp(void)
+{
+ classification_test_pmr_term_dscp(TEST_IPV4);
+}
+
+static void classification_test_pmr_term_ipv6_dscp(void)
+{
+ classification_test_pmr_term_dscp(TEST_IPV6);
+}
+
static void classification_test_pmr_term_dmac(void)
{
odp_packet_t pkt;
@@ -797,6 +854,44 @@ static void classification_test_pmr_term_vlan_id_x(void)
test_pmr(&pmr_param, pkt, NO_MATCH);
}
+static void classification_test_pmr_term_vlan_pcp_0(void)
+{
+ odp_packet_t pkt;
+ uint8_t val;
+ uint8_t mask;
+ uint16_t tci;
+ odp_pmr_param_t pmr_param;
+ odph_ethhdr_t *eth;
+ odph_vlanhdr_t *vlan_0;
+ cls_packet_info_t pkt_info;
+
+ val = 5;
+ mask = 0x7;
+ tci = ((uint16_t)val) << ODPH_VLANHDR_PCP_SHIFT;
+ tci |= 0x123;
+
+ odp_cls_pmr_param_init(&pmr_param);
+ pmr_param.term = ODP_PMR_VLAN_PCP_0;
+ pmr_param.match.value = &val;
+ pmr_param.match.mask = &mask;
+ pmr_param.val_sz = sizeof(val);
+
+ pkt_info = default_pkt_info;
+ pkt_info.vlan = true;
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+ eth = (odph_ethhdr_t *)odp_packet_l2_ptr(pkt, NULL);
+ vlan_0 = (odph_vlanhdr_t *)(eth + 1);
+ vlan_0->tci = odp_cpu_to_be_16(tci);
+
+ test_pmr(&pmr_param, pkt, MATCH);
+
+ pkt = create_packet(pkt_info);
+ CU_ASSERT_FATAL(pkt != ODP_PACKET_INVALID);
+
+ test_pmr(&pmr_param, pkt, NO_MATCH);
+}
+
static void classification_test_pmr_term_eth_type_0(void)
{
odp_packet_t pkt;
@@ -1803,6 +1898,11 @@ static int check_capa_ip_proto(void)
return cls_capa.supported_terms.bit.ip_proto;
}
+static int check_capa_ip_dscp(void)
+{
+ return cls_capa.supported_terms.bit.ip_dscp;
+}
+
static int check_capa_dmac(void)
{
return cls_capa.supported_terms.bit.dmac;
@@ -1843,6 +1943,11 @@ static int check_capa_vlan_id_x(void)
return cls_capa.supported_terms.bit.vlan_id_x;
}
+static int check_capa_vlan_pcp_0(void)
+{
+ return cls_capa.supported_terms.bit.vlan_pcp_0;
+}
+
static int check_capa_ethtype_0(void)
{
return cls_capa.supported_terms.bit.ethtype_0;
@@ -1945,8 +2050,14 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_icmp_code),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_icmp_id,
check_capa_icmp_id),
- ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipproto,
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_proto,
+ check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv6_proto,
check_capa_ip_proto),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv4_dscp,
+ check_capa_ip_dscp),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_ipv6_dscp,
+ check_capa_ip_dscp),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_dmac,
check_capa_dmac),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_pool_set,
@@ -1967,6 +2078,8 @@ odp_testinfo_t classification_suite_pmr[] = {
check_capa_vlan_id_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_vlan_id_x,
check_capa_vlan_id_x),
+ ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_vlan_pcp_0,
+ check_capa_vlan_pcp_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_eth_type_0,
check_capa_ethtype_0),
ODP_TEST_INFO_CONDITIONAL(classification_test_pmr_term_eth_type_x,
diff --git a/test/validation/api/classification/odp_classification_testsuites.h b/test/validation/api/classification/odp_classification_testsuites.h
index 6b00e138b..592f37cd6 100644
--- a/test/validation/api/classification/odp_classification_testsuites.h
+++ b/test/validation/api/classification/odp_classification_testsuites.h
@@ -27,7 +27,8 @@ typedef struct cls_packet_info {
bool vlan_qinq;
odp_atomic_u32_t *seq;
cls_packet_l4_info l4_type;
- bool ipv6;
+ odp_bool_t ipv6;
+ uint8_t dscp;
uint32_t len;
} cls_packet_info_t;
diff --git a/test/validation/api/crypto/odp_crypto_test_inp.c b/test/validation/api/crypto/odp_crypto_test_inp.c
index 97f721dd5..e3eff88b9 100644
--- a/test/validation/api/crypto/odp_crypto_test_inp.c
+++ b/test/validation/api/crypto/odp_crypto_test_inp.c
@@ -353,9 +353,6 @@ static int alg_packet_op(odp_packet_t pkt,
return rc;
}
- if (!result.ok)
- CU_ASSERT(odp_packet_has_error(pkt));
-
*ok = result.ok;
return 0;
diff --git a/test/validation/api/pktio/pktio.c b/test/validation/api/pktio/pktio.c
index f6408c788..d3ce41a2c 100644
--- a/test/validation/api/pktio/pktio.c
+++ b/test/validation/api/pktio/pktio.c
@@ -3078,6 +3078,7 @@ static void pktio_test_pktin_ts(void)
ns1 = 100;
ts = odp_pktio_ts_from_ns(pktio_tx, ns1);
ns2 = odp_time_to_ns(ts);
+ CU_ASSERT_FATAL(res != 0);
res_ns = ODP_TIME_SEC_IN_NS / res;
if (ODP_TIME_SEC_IN_NS % res)
res_ns++;
diff --git a/test/validation/api/scheduler/scheduler.c b/test/validation/api/scheduler/scheduler.c
index 0dc2db360..490ac9fea 100644
--- a/test/validation/api/scheduler/scheduler.c
+++ b/test/validation/api/scheduler/scheduler.c
@@ -2297,6 +2297,57 @@ static void scheduler_test_ordered_lock(void)
CU_ASSERT(odp_queue_destroy(queue) == 0);
}
+static void enqueue_event(odp_queue_t queue)
+{
+ odp_pool_t pool;
+ odp_buffer_t buf;
+ odp_event_t ev;
+ int ret;
+
+ pool = odp_pool_lookup(MSG_POOL_NAME);
+ CU_ASSERT_FATAL(pool != ODP_POOL_INVALID);
+
+ buf = odp_buffer_alloc(pool);
+ CU_ASSERT_FATAL(buf != ODP_BUFFER_INVALID);
+ ev = odp_buffer_to_event(buf);
+ ret = odp_queue_enq(queue, ev);
+ CU_ASSERT_FATAL(ret == 0);
+}
+
+static void scheduler_test_order_wait_1_thread(void)
+{
+ odp_schedule_capability_t sched_capa;
+ odp_queue_param_t queue_param;
+ odp_queue_t queue;
+ odp_event_t ev;
+
+ CU_ASSERT(!odp_schedule_capability(&sched_capa));
+
+ sched_queue_param_init(&queue_param);
+ queue_param.sched.sync = ODP_SCHED_SYNC_ORDERED;
+ queue = odp_queue_create("ordered queue", &queue_param);
+ CU_ASSERT_FATAL(queue != ODP_QUEUE_INVALID);
+ CU_ASSERT_FATAL(odp_queue_type(queue) == ODP_QUEUE_TYPE_SCHED);
+ CU_ASSERT_FATAL(odp_queue_sched_type(queue) == ODP_SCHED_SYNC_ORDERED);
+
+ /* Set up an ordered scheduling context */
+ enqueue_event(queue);
+ ev = odp_schedule(NULL, ODP_SCHED_WAIT);
+ CU_ASSERT_FATAL(ev != ODP_EVENT_INVALID);
+ odp_event_free(ev);
+
+ /* Fail build if the capability field does not exist */
+ printf(" (capa=%d) ", sched_capa.order_wait);
+ /* Check that order wait does not get stuck or crash */
+ odp_schedule_order_wait();
+
+ /* Release the context */
+ ev = odp_schedule(NULL, ODP_SCHED_NO_WAIT);
+ CU_ASSERT(ev == ODP_EVENT_INVALID);
+
+ CU_ASSERT(odp_queue_destroy(queue) == 0);
+}
+
static int sched_and_plain_thread(void *arg)
{
odp_event_t ev1, ev2;
@@ -3226,6 +3277,7 @@ odp_testinfo_t scheduler_basic_suite[] = {
ODP_TEST_INFO(scheduler_test_pause_resume),
ODP_TEST_INFO(scheduler_test_pause_enqueue),
ODP_TEST_INFO(scheduler_test_ordered_lock),
+ ODP_TEST_INFO(scheduler_test_order_wait_1_thread),
ODP_TEST_INFO_CONDITIONAL(scheduler_test_flow_aware,
check_flow_aware_support),
ODP_TEST_INFO(scheduler_test_parallel),
diff --git a/test/validation/api/timer/timer.c b/test/validation/api/timer/timer.c
index a8af3f4fa..ccfbf5558 100644
--- a/test/validation/api/timer/timer.c
+++ b/test/validation/api/timer/timer.c
@@ -36,11 +36,12 @@
#define TICK_INVALID (~(uint64_t)0)
/* Test case options */
-#define PRIV 1
-#define EXP_RELAX 1
-#define WAIT 0
-#define CANCEL 1
-#define RESTART 1
+#define PRIV 1
+#define EXP_RELAX 1
+#define WAIT 0
+#define CANCEL 1
+#define RESTART 1
+#define FIRST_TICK 1
/* Timer helper structure */
struct test_timer {
@@ -2235,7 +2236,7 @@ static void timer_test_sched_all(void)
timer_test_all(ODP_QUEUE_TYPE_SCHED);
}
-static void timer_test_periodic(odp_queue_type_t queue_type)
+static void timer_test_periodic(odp_queue_type_t queue_type, int use_first)
{
odp_timer_capability_t timer_capa;
odp_timer_periodic_capability_t periodic_capa;
@@ -2369,7 +2370,9 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
cur_tick = odp_timer_current_tick(timer_pool);
tick = cur_tick + odp_timer_ns_to_tick(timer_pool, period_ns / 2);
- start_param.first_tick = tick;
+ if (use_first)
+ start_param.first_tick = tick;
+
start_param.freq_multiplier = multiplier;
start_param.tmo_ev = ev;
@@ -2424,7 +2427,9 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
}
CU_ASSERT(num_tmo == num);
- CU_ASSERT(diff_ns < 2 * duration_ns);
+
+ /* Allow +-30% error on test duration */
+ CU_ASSERT((diff_ns > 0.7 * duration_ns) && (diff_ns < 1.3 * duration_ns));
/* Stop periodic timer */
ret = odp_timer_periodic_cancel(timer);
@@ -2465,6 +2470,10 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
}
}
+ /* Check that ack() returned 2 on the last event */
+ CU_ASSERT(done);
+ CU_ASSERT(ret == 2);
+
CU_ASSERT(odp_timer_free(timer) == ODP_EVENT_INVALID);
odp_timer_pool_destroy(timer_pool);
CU_ASSERT(odp_queue_destroy(queue) == 0);
@@ -2473,12 +2482,22 @@ static void timer_test_periodic(odp_queue_type_t queue_type)
static void timer_test_periodic_sched(void)
{
- timer_test_periodic(ODP_QUEUE_TYPE_SCHED);
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, 0);
}
static void timer_test_periodic_plain(void)
{
- timer_test_periodic(ODP_QUEUE_TYPE_PLAIN);
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, 0);
+}
+
+static void timer_test_periodic_sched_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_SCHED, FIRST_TICK);
+}
+
+static void timer_test_periodic_plain_first(void)
+{
+ timer_test_periodic(ODP_QUEUE_TYPE_PLAIN, FIRST_TICK);
}
odp_testinfo_t timer_suite[] = {
@@ -2553,8 +2572,12 @@ odp_testinfo_t timer_suite[] = {
check_sched_queue_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched,
check_periodic_sched_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_sched_first,
+ check_periodic_sched_support),
ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain,
check_periodic_plain_support),
+ ODP_TEST_INFO_CONDITIONAL(timer_test_periodic_plain_first,
+ check_periodic_plain_support),
ODP_TEST_INFO_NULL,
};