155#define RTE_ETHDEV_HAS_LRO_SUPPORT
158#ifdef RTE_LIBRTE_ETHDEV_DEBUG
159#define RTE_ETHDEV_DEBUG_RX
160#define RTE_ETHDEV_DEBUG_TX
164#include <rte_compat.h>
172#include <rte_config.h>
176#include "rte_dev_info.h"
178extern int rte_eth_dev_logtype;
180#define RTE_ETHDEV_LOG(level, ...) \
181 rte_log(RTE_LOG_ ## level, rte_eth_dev_logtype, "" __VA_ARGS__)
246#define RTE_ETH_FOREACH_MATCHING_DEV(id, devargs, iter) \
247 for (rte_eth_iterator_init(iter, devargs), \
248 id = rte_eth_iterator_next(iter); \
249 id != RTE_MAX_ETHPORTS; \
250 id = rte_eth_iterator_next(iter))
290#define RTE_ETH_LINK_SPEED_AUTONEG 0
291#define RTE_ETH_LINK_SPEED_FIXED RTE_BIT32(0)
292#define RTE_ETH_LINK_SPEED_10M_HD RTE_BIT32(1)
293#define RTE_ETH_LINK_SPEED_10M RTE_BIT32(2)
294#define RTE_ETH_LINK_SPEED_100M_HD RTE_BIT32(3)
295#define RTE_ETH_LINK_SPEED_100M RTE_BIT32(4)
296#define RTE_ETH_LINK_SPEED_1G RTE_BIT32(5)
297#define RTE_ETH_LINK_SPEED_2_5G RTE_BIT32(6)
298#define RTE_ETH_LINK_SPEED_5G RTE_BIT32(7)
299#define RTE_ETH_LINK_SPEED_10G RTE_BIT32(8)
300#define RTE_ETH_LINK_SPEED_20G RTE_BIT32(9)
301#define RTE_ETH_LINK_SPEED_25G RTE_BIT32(10)
302#define RTE_ETH_LINK_SPEED_40G RTE_BIT32(11)
303#define RTE_ETH_LINK_SPEED_50G RTE_BIT32(12)
304#define RTE_ETH_LINK_SPEED_56G RTE_BIT32(13)
305#define RTE_ETH_LINK_SPEED_100G RTE_BIT32(14)
306#define RTE_ETH_LINK_SPEED_200G RTE_BIT32(15)
307#define RTE_ETH_LINK_SPEED_400G RTE_BIT32(16)
313#define RTE_ETH_SPEED_NUM_NONE 0
314#define RTE_ETH_SPEED_NUM_10M 10
315#define RTE_ETH_SPEED_NUM_100M 100
316#define RTE_ETH_SPEED_NUM_1G 1000
317#define RTE_ETH_SPEED_NUM_2_5G 2500
318#define RTE_ETH_SPEED_NUM_5G 5000
319#define RTE_ETH_SPEED_NUM_10G 10000
320#define RTE_ETH_SPEED_NUM_20G 20000
321#define RTE_ETH_SPEED_NUM_25G 25000
322#define RTE_ETH_SPEED_NUM_40G 40000
323#define RTE_ETH_SPEED_NUM_50G 50000
324#define RTE_ETH_SPEED_NUM_56G 56000
325#define RTE_ETH_SPEED_NUM_100G 100000
326#define RTE_ETH_SPEED_NUM_200G 200000
327#define RTE_ETH_SPEED_NUM_400G 400000
328#define RTE_ETH_SPEED_NUM_UNKNOWN UINT32_MAX
345#define RTE_ETH_LINK_HALF_DUPLEX 0
346#define RTE_ETH_LINK_FULL_DUPLEX 1
347#define RTE_ETH_LINK_DOWN 0
348#define RTE_ETH_LINK_UP 1
349#define RTE_ETH_LINK_FIXED 0
350#define RTE_ETH_LINK_AUTONEG 1
351#define RTE_ETH_LINK_MAX_STR_LEN 40
367#define RTE_ETH_MQ_RX_RSS_FLAG RTE_BIT32(0)
368#define RTE_ETH_MQ_RX_DCB_FLAG RTE_BIT32(1)
369#define RTE_ETH_MQ_RX_VMDQ_FLAG RTE_BIT32(2)
434 RTE_ETH_VLAN_TYPE_UNKNOWN = 0,
437 RTE_ETH_VLAN_TYPE_MAX,
469 RTE_ETH_HASH_FUNCTION_MAX,
472#define RTE_ETH_HASH_ALGO_TO_CAPA(x) RTE_BIT32(x)
473#define RTE_ETH_HASH_ALGO_CAPA_MASK(x) RTE_BIT32(RTE_ETH_HASH_FUNCTION_ ## x)
510#define RTE_ETH_FLOW_UNKNOWN 0
511#define RTE_ETH_FLOW_RAW 1
512#define RTE_ETH_FLOW_IPV4 2
513#define RTE_ETH_FLOW_FRAG_IPV4 3
514#define RTE_ETH_FLOW_NONFRAG_IPV4_TCP 4
515#define RTE_ETH_FLOW_NONFRAG_IPV4_UDP 5
516#define RTE_ETH_FLOW_NONFRAG_IPV4_SCTP 6
517#define RTE_ETH_FLOW_NONFRAG_IPV4_OTHER 7
518#define RTE_ETH_FLOW_IPV6 8
519#define RTE_ETH_FLOW_FRAG_IPV6 9
520#define RTE_ETH_FLOW_NONFRAG_IPV6_TCP 10
521#define RTE_ETH_FLOW_NONFRAG_IPV6_UDP 11
522#define RTE_ETH_FLOW_NONFRAG_IPV6_SCTP 12
523#define RTE_ETH_FLOW_NONFRAG_IPV6_OTHER 13
524#define RTE_ETH_FLOW_L2_PAYLOAD 14
525#define RTE_ETH_FLOW_IPV6_EX 15
526#define RTE_ETH_FLOW_IPV6_TCP_EX 16
527#define RTE_ETH_FLOW_IPV6_UDP_EX 17
529#define RTE_ETH_FLOW_PORT 18
530#define RTE_ETH_FLOW_VXLAN 19
531#define RTE_ETH_FLOW_GENEVE 20
532#define RTE_ETH_FLOW_NVGRE 21
533#define RTE_ETH_FLOW_VXLAN_GPE 22
534#define RTE_ETH_FLOW_GTPU 23
535#define RTE_ETH_FLOW_MAX 24
541#define RTE_ETH_RSS_IPV4 RTE_BIT64(2)
542#define RTE_ETH_RSS_FRAG_IPV4 RTE_BIT64(3)
543#define RTE_ETH_RSS_NONFRAG_IPV4_TCP RTE_BIT64(4)
544#define RTE_ETH_RSS_NONFRAG_IPV4_UDP RTE_BIT64(5)
545#define RTE_ETH_RSS_NONFRAG_IPV4_SCTP RTE_BIT64(6)
546#define RTE_ETH_RSS_NONFRAG_IPV4_OTHER RTE_BIT64(7)
547#define RTE_ETH_RSS_IPV6 RTE_BIT64(8)
548#define RTE_ETH_RSS_FRAG_IPV6 RTE_BIT64(9)
549#define RTE_ETH_RSS_NONFRAG_IPV6_TCP RTE_BIT64(10)
550#define RTE_ETH_RSS_NONFRAG_IPV6_UDP RTE_BIT64(11)
551#define RTE_ETH_RSS_NONFRAG_IPV6_SCTP RTE_BIT64(12)
552#define RTE_ETH_RSS_NONFRAG_IPV6_OTHER RTE_BIT64(13)
553#define RTE_ETH_RSS_L2_PAYLOAD RTE_BIT64(14)
554#define RTE_ETH_RSS_IPV6_EX RTE_BIT64(15)
555#define RTE_ETH_RSS_IPV6_TCP_EX RTE_BIT64(16)
556#define RTE_ETH_RSS_IPV6_UDP_EX RTE_BIT64(17)
557#define RTE_ETH_RSS_PORT RTE_BIT64(18)
558#define RTE_ETH_RSS_VXLAN RTE_BIT64(19)
559#define RTE_ETH_RSS_GENEVE RTE_BIT64(20)
560#define RTE_ETH_RSS_NVGRE RTE_BIT64(21)
561#define RTE_ETH_RSS_GTPU RTE_BIT64(23)
562#define RTE_ETH_RSS_ETH RTE_BIT64(24)
563#define RTE_ETH_RSS_S_VLAN RTE_BIT64(25)
564#define RTE_ETH_RSS_C_VLAN RTE_BIT64(26)
565#define RTE_ETH_RSS_ESP RTE_BIT64(27)
566#define RTE_ETH_RSS_AH RTE_BIT64(28)
567#define RTE_ETH_RSS_L2TPV3 RTE_BIT64(29)
568#define RTE_ETH_RSS_PFCP RTE_BIT64(30)
569#define RTE_ETH_RSS_PPPOE RTE_BIT64(31)
570#define RTE_ETH_RSS_ECPRI RTE_BIT64(32)
571#define RTE_ETH_RSS_MPLS RTE_BIT64(33)
572#define RTE_ETH_RSS_IPV4_CHKSUM RTE_BIT64(34)
586#define RTE_ETH_RSS_L4_CHKSUM RTE_BIT64(35)
588#define RTE_ETH_RSS_L2TPV2 RTE_BIT64(36)
599#define RTE_ETH_RSS_L3_SRC_ONLY RTE_BIT64(63)
600#define RTE_ETH_RSS_L3_DST_ONLY RTE_BIT64(62)
601#define RTE_ETH_RSS_L4_SRC_ONLY RTE_BIT64(61)
602#define RTE_ETH_RSS_L4_DST_ONLY RTE_BIT64(60)
603#define RTE_ETH_RSS_L2_SRC_ONLY RTE_BIT64(59)
604#define RTE_ETH_RSS_L2_DST_ONLY RTE_BIT64(58)
612#define RTE_ETH_RSS_L3_PRE32 RTE_BIT64(57)
613#define RTE_ETH_RSS_L3_PRE40 RTE_BIT64(56)
614#define RTE_ETH_RSS_L3_PRE48 RTE_BIT64(55)
615#define RTE_ETH_RSS_L3_PRE56 RTE_BIT64(54)
616#define RTE_ETH_RSS_L3_PRE64 RTE_BIT64(53)
617#define RTE_ETH_RSS_L3_PRE96 RTE_BIT64(52)
632#define RTE_ETH_RSS_LEVEL_PMD_DEFAULT (UINT64_C(0) << 50)
638#define RTE_ETH_RSS_LEVEL_OUTERMOST (UINT64_C(1) << 50)
644#define RTE_ETH_RSS_LEVEL_INNERMOST (UINT64_C(2) << 50)
645#define RTE_ETH_RSS_LEVEL_MASK (UINT64_C(3) << 50)
647#define RTE_ETH_RSS_LEVEL(rss_hf) ((rss_hf & RTE_ETH_RSS_LEVEL_MASK) >> 50)
659static inline uint64_t
662 if ((rss_hf & RTE_ETH_RSS_L3_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L3_DST_ONLY))
663 rss_hf &= ~(RTE_ETH_RSS_L3_SRC_ONLY | RTE_ETH_RSS_L3_DST_ONLY);
665 if ((rss_hf & RTE_ETH_RSS_L4_SRC_ONLY) && (rss_hf & RTE_ETH_RSS_L4_DST_ONLY))
666 rss_hf &= ~(RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY);
671#define RTE_ETH_RSS_IPV6_PRE32 ( \
673 RTE_ETH_RSS_L3_PRE32)
675#define RTE_ETH_RSS_IPV6_PRE40 ( \
677 RTE_ETH_RSS_L3_PRE40)
679#define RTE_ETH_RSS_IPV6_PRE48 ( \
681 RTE_ETH_RSS_L3_PRE48)
683#define RTE_ETH_RSS_IPV6_PRE56 ( \
685 RTE_ETH_RSS_L3_PRE56)
687#define RTE_ETH_RSS_IPV6_PRE64 ( \
689 RTE_ETH_RSS_L3_PRE64)
691#define RTE_ETH_RSS_IPV6_PRE96 ( \
693 RTE_ETH_RSS_L3_PRE96)
695#define RTE_ETH_RSS_IPV6_PRE32_UDP ( \
696 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
697 RTE_ETH_RSS_L3_PRE32)
699#define RTE_ETH_RSS_IPV6_PRE40_UDP ( \
700 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
701 RTE_ETH_RSS_L3_PRE40)
703#define RTE_ETH_RSS_IPV6_PRE48_UDP ( \
704 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
705 RTE_ETH_RSS_L3_PRE48)
707#define RTE_ETH_RSS_IPV6_PRE56_UDP ( \
708 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
709 RTE_ETH_RSS_L3_PRE56)
711#define RTE_ETH_RSS_IPV6_PRE64_UDP ( \
712 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
713 RTE_ETH_RSS_L3_PRE64)
715#define RTE_ETH_RSS_IPV6_PRE96_UDP ( \
716 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
717 RTE_ETH_RSS_L3_PRE96)
719#define RTE_ETH_RSS_IPV6_PRE32_TCP ( \
720 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
721 RTE_ETH_RSS_L3_PRE32)
723#define RTE_ETH_RSS_IPV6_PRE40_TCP ( \
724 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
725 RTE_ETH_RSS_L3_PRE40)
727#define RTE_ETH_RSS_IPV6_PRE48_TCP ( \
728 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
729 RTE_ETH_RSS_L3_PRE48)
731#define RTE_ETH_RSS_IPV6_PRE56_TCP ( \
732 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
733 RTE_ETH_RSS_L3_PRE56)
735#define RTE_ETH_RSS_IPV6_PRE64_TCP ( \
736 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
737 RTE_ETH_RSS_L3_PRE64)
739#define RTE_ETH_RSS_IPV6_PRE96_TCP ( \
740 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
741 RTE_ETH_RSS_L3_PRE96)
743#define RTE_ETH_RSS_IPV6_PRE32_SCTP ( \
744 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
745 RTE_ETH_RSS_L3_PRE32)
747#define RTE_ETH_RSS_IPV6_PRE40_SCTP ( \
748 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
749 RTE_ETH_RSS_L3_PRE40)
751#define RTE_ETH_RSS_IPV6_PRE48_SCTP ( \
752 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
753 RTE_ETH_RSS_L3_PRE48)
755#define RTE_ETH_RSS_IPV6_PRE56_SCTP ( \
756 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
757 RTE_ETH_RSS_L3_PRE56)
759#define RTE_ETH_RSS_IPV6_PRE64_SCTP ( \
760 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
761 RTE_ETH_RSS_L3_PRE64)
763#define RTE_ETH_RSS_IPV6_PRE96_SCTP ( \
764 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
765 RTE_ETH_RSS_L3_PRE96)
767#define RTE_ETH_RSS_IP ( \
769 RTE_ETH_RSS_FRAG_IPV4 | \
770 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
772 RTE_ETH_RSS_FRAG_IPV6 | \
773 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
776#define RTE_ETH_RSS_UDP ( \
777 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
778 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
779 RTE_ETH_RSS_IPV6_UDP_EX)
781#define RTE_ETH_RSS_TCP ( \
782 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
783 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
784 RTE_ETH_RSS_IPV6_TCP_EX)
786#define RTE_ETH_RSS_SCTP ( \
787 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
788 RTE_ETH_RSS_NONFRAG_IPV6_SCTP)
790#define RTE_ETH_RSS_TUNNEL ( \
791 RTE_ETH_RSS_VXLAN | \
792 RTE_ETH_RSS_GENEVE | \
795#define RTE_ETH_RSS_VLAN ( \
796 RTE_ETH_RSS_S_VLAN | \
800#define RTE_ETH_RSS_PROTO_MASK ( \
802 RTE_ETH_RSS_FRAG_IPV4 | \
803 RTE_ETH_RSS_NONFRAG_IPV4_TCP | \
804 RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
805 RTE_ETH_RSS_NONFRAG_IPV4_SCTP | \
806 RTE_ETH_RSS_NONFRAG_IPV4_OTHER | \
808 RTE_ETH_RSS_FRAG_IPV6 | \
809 RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
810 RTE_ETH_RSS_NONFRAG_IPV6_UDP | \
811 RTE_ETH_RSS_NONFRAG_IPV6_SCTP | \
812 RTE_ETH_RSS_NONFRAG_IPV6_OTHER | \
813 RTE_ETH_RSS_L2_PAYLOAD | \
814 RTE_ETH_RSS_IPV6_EX | \
815 RTE_ETH_RSS_IPV6_TCP_EX | \
816 RTE_ETH_RSS_IPV6_UDP_EX | \
818 RTE_ETH_RSS_VXLAN | \
819 RTE_ETH_RSS_GENEVE | \
820 RTE_ETH_RSS_NVGRE | \
828#define RTE_ETH_RSS_RETA_SIZE_64 64
829#define RTE_ETH_RSS_RETA_SIZE_128 128
830#define RTE_ETH_RSS_RETA_SIZE_256 256
831#define RTE_ETH_RSS_RETA_SIZE_512 512
832#define RTE_ETH_RETA_GROUP_SIZE 64
835#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS 64
836#define RTE_ETH_DCB_NUM_USER_PRIORITIES 8
837#define RTE_ETH_VMDQ_DCB_NUM_QUEUES 128
838#define RTE_ETH_DCB_NUM_QUEUES 128
842#define RTE_ETH_DCB_PG_SUPPORT RTE_BIT32(0)
843#define RTE_ETH_DCB_PFC_SUPPORT RTE_BIT32(1)
847#define RTE_ETH_VLAN_STRIP_OFFLOAD 0x0001
848#define RTE_ETH_VLAN_FILTER_OFFLOAD 0x0002
849#define RTE_ETH_VLAN_EXTEND_OFFLOAD 0x0004
850#define RTE_ETH_QINQ_STRIP_OFFLOAD 0x0008
852#define RTE_ETH_VLAN_STRIP_MASK 0x0001
853#define RTE_ETH_VLAN_FILTER_MASK 0x0002
854#define RTE_ETH_VLAN_EXTEND_MASK 0x0004
855#define RTE_ETH_QINQ_STRIP_MASK 0x0008
856#define RTE_ETH_VLAN_ID_MAX 0x0FFF
860#define RTE_ETH_NUM_RECEIVE_MAC_ADDR 128
863#define RTE_ETH_VMDQ_NUM_UC_HASH_ARRAY 128
869#define RTE_ETH_VMDQ_ACCEPT_UNTAG RTE_BIT32(0)
871#define RTE_ETH_VMDQ_ACCEPT_HASH_MC RTE_BIT32(1)
873#define RTE_ETH_VMDQ_ACCEPT_HASH_UC RTE_BIT32(2)
875#define RTE_ETH_VMDQ_ACCEPT_BROADCAST RTE_BIT32(3)
877#define RTE_ETH_VMDQ_ACCEPT_MULTICAST RTE_BIT32(4)
890 uint16_t
reta[RTE_ETH_RETA_GROUP_SIZE];
914struct rte_eth_dcb_rx_conf {
920struct rte_eth_vmdq_dcb_tx_conf {
926struct rte_eth_dcb_tx_conf {
932struct rte_eth_vmdq_tx_conf {
1158 uint16_t rx_nmempool;
1225#define RTE_ETH_MAX_HAIRPIN_PEERS 32
1439 RTE_ETH_TUNNEL_TYPE_NONE = 0,
1440 RTE_ETH_TUNNEL_TYPE_VXLAN,
1441 RTE_ETH_TUNNEL_TYPE_GENEVE,
1442 RTE_ETH_TUNNEL_TYPE_TEREDO,
1443 RTE_ETH_TUNNEL_TYPE_NVGRE,
1444 RTE_ETH_TUNNEL_TYPE_IP_IN_GRE,
1445 RTE_ETH_L2_TUNNEL_TYPE_E_TAG,
1446 RTE_ETH_TUNNEL_TYPE_VXLAN_GPE,
1447 RTE_ETH_TUNNEL_TYPE_ECPRI,
1448 RTE_ETH_TUNNEL_TYPE_MAX,
1480#define rte_intr_conf rte_eth_intr_conf
1507 struct rte_eth_dcb_rx_conf dcb_rx_conf;
1513 struct rte_eth_vmdq_dcb_tx_conf vmdq_dcb_tx_conf;
1515 struct rte_eth_dcb_tx_conf dcb_tx_conf;
1517 struct rte_eth_vmdq_tx_conf vmdq_tx_conf;
1528#define RTE_ETH_RX_OFFLOAD_VLAN_STRIP RTE_BIT64(0)
1529#define RTE_ETH_RX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1530#define RTE_ETH_RX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1531#define RTE_ETH_RX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1532#define RTE_ETH_RX_OFFLOAD_TCP_LRO RTE_BIT64(4)
1533#define RTE_ETH_RX_OFFLOAD_QINQ_STRIP RTE_BIT64(5)
1534#define RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(6)
1535#define RTE_ETH_RX_OFFLOAD_MACSEC_STRIP RTE_BIT64(7)
1536#define RTE_ETH_RX_OFFLOAD_VLAN_FILTER RTE_BIT64(9)
1537#define RTE_ETH_RX_OFFLOAD_VLAN_EXTEND RTE_BIT64(10)
1538#define RTE_ETH_RX_OFFLOAD_SCATTER RTE_BIT64(13)
1544#define RTE_ETH_RX_OFFLOAD_TIMESTAMP RTE_BIT64(14)
1545#define RTE_ETH_RX_OFFLOAD_SECURITY RTE_BIT64(15)
1546#define RTE_ETH_RX_OFFLOAD_KEEP_CRC RTE_BIT64(16)
1547#define RTE_ETH_RX_OFFLOAD_SCTP_CKSUM RTE_BIT64(17)
1548#define RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(18)
1549#define RTE_ETH_RX_OFFLOAD_RSS_HASH RTE_BIT64(19)
1550#define RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT RTE_BIT64(20)
1552#define RTE_ETH_RX_OFFLOAD_CHECKSUM (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM | \
1553 RTE_ETH_RX_OFFLOAD_UDP_CKSUM | \
1554 RTE_ETH_RX_OFFLOAD_TCP_CKSUM)
1555#define RTE_ETH_RX_OFFLOAD_VLAN (RTE_ETH_RX_OFFLOAD_VLAN_STRIP | \
1556 RTE_ETH_RX_OFFLOAD_VLAN_FILTER | \
1557 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND | \
1558 RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
1568#define RTE_ETH_TX_OFFLOAD_VLAN_INSERT RTE_BIT64(0)
1569#define RTE_ETH_TX_OFFLOAD_IPV4_CKSUM RTE_BIT64(1)
1570#define RTE_ETH_TX_OFFLOAD_UDP_CKSUM RTE_BIT64(2)
1571#define RTE_ETH_TX_OFFLOAD_TCP_CKSUM RTE_BIT64(3)
1572#define RTE_ETH_TX_OFFLOAD_SCTP_CKSUM RTE_BIT64(4)
1573#define RTE_ETH_TX_OFFLOAD_TCP_TSO RTE_BIT64(5)
1574#define RTE_ETH_TX_OFFLOAD_UDP_TSO RTE_BIT64(6)
1575#define RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM RTE_BIT64(7)
1576#define RTE_ETH_TX_OFFLOAD_QINQ_INSERT RTE_BIT64(8)
1577#define RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO RTE_BIT64(9)
1578#define RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO RTE_BIT64(10)
1579#define RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO RTE_BIT64(11)
1580#define RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO RTE_BIT64(12)
1581#define RTE_ETH_TX_OFFLOAD_MACSEC_INSERT RTE_BIT64(13)
1586#define RTE_ETH_TX_OFFLOAD_MT_LOCKFREE RTE_BIT64(14)
1588#define RTE_ETH_TX_OFFLOAD_MULTI_SEGS RTE_BIT64(15)
1594#define RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE RTE_BIT64(16)
1595#define RTE_ETH_TX_OFFLOAD_SECURITY RTE_BIT64(17)
1601#define RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO RTE_BIT64(18)
1607#define RTE_ETH_TX_OFFLOAD_IP_TNL_TSO RTE_BIT64(19)
1609#define RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM RTE_BIT64(20)
1615#define RTE_ETH_TX_OFFLOAD_SEND_ON_TIMESTAMP RTE_BIT64(21)
1625#define RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP RTE_BIT64(0)
1627#define RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP RTE_BIT64(1)
1637#define RTE_ETH_DEV_CAPA_RXQ_SHARE RTE_BIT64(2)
1639#define RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP RTE_BIT64(3)
1641#define RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP RTE_BIT64(4)
1649#define RTE_ETH_DEV_FALLBACK_RX_RINGSIZE 512
1650#define RTE_ETH_DEV_FALLBACK_TX_RINGSIZE 512
1651#define RTE_ETH_DEV_FALLBACK_RX_NBQUEUES 1
1652#define RTE_ETH_DEV_FALLBACK_TX_NBQUEUES 1
1669#define RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID (UINT16_MAX)
1788 uint32_t rss_algo_capa;
1828#define RTE_ETH_QUEUE_STATE_STOPPED 0
1829#define RTE_ETH_QUEUE_STATE_STARTED 1
1830#define RTE_ETH_QUEUE_STATE_HAIRPIN 2
1894#define RTE_ETH_BURST_FLAG_PER_QUEUE RTE_BIT64(0)
1903#define RTE_ETH_BURST_MODE_INFO_SIZE 1024
1908#define RTE_ETH_XSTATS_NAME_SIZE 64
1942#define RTE_ETH_DCB_NUM_TCS 8
1943#define RTE_ETH_MAX_VMDQ_POOL 64
1954 }
tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1959 }
tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS];
1987#define RTE_ETH_FEC_MODE_TO_CAPA(x) RTE_BIT32(x)
1990#define RTE_ETH_FEC_MODE_CAPA_MASK(x) RTE_BIT32(RTE_ETH_FEC_ ## x)
1993struct rte_eth_fec_capa {
1998#define RTE_ETH_ALL RTE_MAX_ETHPORTS
2001#define RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, retval) do { \
2002 if (!rte_eth_dev_is_valid_port(port_id)) { \
2003 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2008#define RTE_ETH_VALID_PORTID_OR_RET(port_id) do { \
2009 if (!rte_eth_dev_is_valid_port(port_id)) { \
2010 RTE_ETHDEV_LOG(ERR, "Invalid port_id=%u\n", port_id); \
2038 struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts,
2062 struct rte_mbuf *pkts[], uint16_t nb_pkts,
void *user_param);
2076struct rte_eth_dev_sriov {
2078 uint8_t nb_q_per_pool;
2079 uint16_t def_vmdq_idx;
2080 uint16_t def_pool_q_idx;
2082#define RTE_ETH_DEV_SRIOV(dev) ((dev)->data->sriov)
2084#define RTE_ETH_NAME_MAX_LEN RTE_DEV_NAME_MAX_LEN
2086#define RTE_ETH_DEV_NO_OWNER 0
2088#define RTE_ETH_MAX_OWNER_NAME_LEN 64
2090struct rte_eth_dev_owner {
2092 char name[RTE_ETH_MAX_OWNER_NAME_LEN];
2100#define RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE RTE_BIT32(0)
2102#define RTE_ETH_DEV_INTR_LSC RTE_BIT32(1)
2104#define RTE_ETH_DEV_BONDING_MEMBER RTE_BIT32(2)
2106#define RTE_ETH_DEV_INTR_RMV RTE_BIT32(3)
2108#define RTE_ETH_DEV_REPRESENTOR RTE_BIT32(4)
2110#define RTE_ETH_DEV_NOLIVE_MAC_ADDR RTE_BIT32(5)
2115#define RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS RTE_BIT32(6)
2130 const uint64_t owner_id);
2135#define RTE_ETH_FOREACH_DEV_OWNED_BY(p, o) \
2136 for (p = rte_eth_find_next_owned_by(0, o); \
2137 (unsigned int)p < (unsigned int)RTE_MAX_ETHPORTS; \
2138 p = rte_eth_find_next_owned_by(p + 1, o))
2153#define RTE_ETH_FOREACH_DEV(p) \
2154 RTE_ETH_FOREACH_DEV_OWNED_BY(p, RTE_ETH_DEV_NO_OWNER)
2169 const struct rte_device *parent);
2179#define RTE_ETH_FOREACH_DEV_OF(port_id, parent) \
2180 for (port_id = rte_eth_find_next_of(0, parent); \
2181 port_id < RTE_MAX_ETHPORTS; \
2182 port_id = rte_eth_find_next_of(port_id + 1, parent))
2208#define RTE_ETH_FOREACH_DEV_SIBLING(port_id, ref_port_id) \
2209 for (port_id = rte_eth_find_next_sibling(0, ref_port_id); \
2210 port_id < RTE_MAX_ETHPORTS; \
2211 port_id = rte_eth_find_next_sibling(port_id + 1, ref_port_id))
2236 const struct rte_eth_dev_owner *owner);
2249 const uint64_t owner_id);
2272 struct rte_eth_dev_owner *owner);
2383 uint16_t nb_tx_queue,
const struct rte_eth_conf *eth_conf);
2459 uint16_t nb_rx_desc,
unsigned int socket_id,
2492 (uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc,
2544 uint16_t nb_tx_desc,
unsigned int socket_id,
2574 (uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc,
2605 size_t len, uint32_t direction);
3265 uint64_t *values,
unsigned int size);
3322 uint16_t tx_queue_id, uint8_t stat_idx);
3343 uint16_t rx_queue_id,
3445 char *fw_version,
size_t fw_size);
3487 uint32_t *ptypes,
int num);
3519 uint32_t *set_ptypes,
unsigned int num);
3693 uint8_t avail_thresh);
3723 uint8_t *avail_thresh);
3725typedef void (*buffer_tx_error_fn)(
struct rte_mbuf **unsent, uint16_t count,
3733 buffer_tx_error_fn error_callback;
3734 void *error_userdata;
3747#define RTE_ETH_TX_BUFFER_SIZE(sz) \
3748 (sizeof(struct rte_eth_dev_tx_buffer) + (sz) * sizeof(struct rte_mbuf *))
3789 buffer_tx_error_fn callback,
void *userdata);
4225 int epfd,
int op,
void *data);
4304 struct rte_eth_fec_capa *speed_fec_capa,
4533 uint16_t reta_size);
4555 uint16_t reta_size);
4738struct rte_eth_rxtx_callback;
4765const struct rte_eth_rxtx_callback *
4795const struct rte_eth_rxtx_callback *
4824const struct rte_eth_rxtx_callback *
4862 const struct rte_eth_rxtx_callback *user_cb);
4898 const struct rte_eth_rxtx_callback *user_cb);
5033 struct rte_power_monitor_cond *pmc);
5150 struct rte_dev_eeprom_info *info);
5173 uint32_t nb_mc_addr);
5222 struct timespec *timestamp, uint32_t flags);
5240 struct timespec *timestamp);
5401 uint16_t *nb_rx_desc,
5402 uint16_t *nb_tx_desc);
5514#define RTE_ETH_RX_METADATA_USER_FLAG RTE_BIT64(0)
5517#define RTE_ETH_RX_METADATA_USER_MARK RTE_BIT64(1)
5520#define RTE_ETH_RX_METADATA_TUNNEL_ID RTE_BIT64(2)
5564#define RTE_ETH_DEV_REASSEMBLY_F_IPV4 (RTE_BIT32(0))
5566#define RTE_ETH_DEV_REASSEMBLY_F_IPV6 (RTE_BIT32(1))
5735 uint16_t offset, uint16_t num, FILE *file);
5762 uint16_t offset, uint16_t num, FILE *file);
5953uint16_t rte_eth_call_rx_callbacks(uint16_t port_id, uint16_t queue_id,
5954 struct rte_mbuf **rx_pkts, uint16_t nb_rx, uint16_t nb_pkts,
6044static inline uint16_t
6046 struct rte_mbuf **rx_pkts,
const uint16_t nb_pkts)
6049 struct rte_eth_fp_ops *p;
6052#ifdef RTE_ETHDEV_DEBUG_RX
6053 if (port_id >= RTE_MAX_ETHPORTS ||
6054 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6056 "Invalid port_id=%u or queue_id=%u\n",
6063 p = &rte_eth_fp_ops[port_id];
6064 qd = p->rxq.data[queue_id];
6066#ifdef RTE_ETHDEV_DEBUG_RX
6067 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6070 RTE_ETHDEV_LOG(ERR,
"Invalid Rx queue_id=%u for port_id=%u\n",
6076 nb_rx = p->rx_pkt_burst(qd, rx_pkts, nb_pkts);
6078#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6088 cb = rte_atomic_load_explicit(&p->rxq.clbk[queue_id],
6089 rte_memory_order_relaxed);
6091 nb_rx = rte_eth_call_rx_callbacks(port_id, queue_id,
6092 rx_pkts, nb_rx, nb_pkts, cb);
6096 rte_ethdev_trace_rx_burst(port_id, queue_id, (
void **)rx_pkts, nb_rx);
6120 struct rte_eth_fp_ops *p;
6123#ifdef RTE_ETHDEV_DEBUG_RX
6124 if (port_id >= RTE_MAX_ETHPORTS ||
6125 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6127 "Invalid port_id=%u or queue_id=%u\n",
6134 p = &rte_eth_fp_ops[port_id];
6135 qd = p->rxq.data[queue_id];
6137#ifdef RTE_ETHDEV_DEBUG_RX
6138 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6143 if (*p->rx_queue_count == NULL)
6145 return (
int)(*p->rx_queue_count)(qd);
6151#define RTE_ETH_RX_DESC_AVAIL 0
6152#define RTE_ETH_RX_DESC_DONE 1
6153#define RTE_ETH_RX_DESC_UNAVAIL 2
6193 struct rte_eth_fp_ops *p;
6196#ifdef RTE_ETHDEV_DEBUG_RX
6197 if (port_id >= RTE_MAX_ETHPORTS ||
6198 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6200 "Invalid port_id=%u or queue_id=%u\n",
6207 p = &rte_eth_fp_ops[port_id];
6208 qd = p->rxq.data[queue_id];
6210#ifdef RTE_ETHDEV_DEBUG_RX
6211 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6215 if (*p->rx_descriptor_status == NULL)
6217 return (*p->rx_descriptor_status)(qd, offset);
6223#define RTE_ETH_TX_DESC_FULL 0
6224#define RTE_ETH_TX_DESC_DONE 1
6225#define RTE_ETH_TX_DESC_UNAVAIL 2
6262 uint16_t queue_id, uint16_t offset)
6264 struct rte_eth_fp_ops *p;
6267#ifdef RTE_ETHDEV_DEBUG_TX
6268 if (port_id >= RTE_MAX_ETHPORTS ||
6269 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6271 "Invalid port_id=%u or queue_id=%u\n",
6278 p = &rte_eth_fp_ops[port_id];
6279 qd = p->txq.data[queue_id];
6281#ifdef RTE_ETHDEV_DEBUG_TX
6282 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
6286 if (*p->tx_descriptor_status == NULL)
6288 return (*p->tx_descriptor_status)(qd, offset);
6310uint16_t rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
6311 struct rte_mbuf **tx_pkts, uint16_t nb_pkts,
void *opaque);
6384static inline uint16_t
6386 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6388 struct rte_eth_fp_ops *p;
6391#ifdef RTE_ETHDEV_DEBUG_TX
6392 if (port_id >= RTE_MAX_ETHPORTS ||
6393 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6395 "Invalid port_id=%u or queue_id=%u\n",
6402 p = &rte_eth_fp_ops[port_id];
6403 qd = p->txq.data[queue_id];
6405#ifdef RTE_ETHDEV_DEBUG_TX
6406 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
6409 RTE_ETHDEV_LOG(ERR,
"Invalid Tx queue_id=%u for port_id=%u\n",
6415#ifdef RTE_ETHDEV_RXTX_CALLBACKS
6425 cb = rte_atomic_load_explicit(&p->txq.clbk[queue_id],
6426 rte_memory_order_relaxed);
6428 nb_pkts = rte_eth_call_tx_callbacks(port_id, queue_id,
6429 tx_pkts, nb_pkts, cb);
6433 nb_pkts = p->tx_pkt_burst(qd, tx_pkts, nb_pkts);
6435 rte_ethdev_trace_tx_burst(port_id, queue_id, (
void **)tx_pkts, nb_pkts);
6492#ifndef RTE_ETHDEV_TX_PREPARE_NOOP
6494static inline uint16_t
6496 struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
6498 struct rte_eth_fp_ops *p;
6501#ifdef RTE_ETHDEV_DEBUG_TX
6502 if (port_id >= RTE_MAX_ETHPORTS ||
6503 queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6505 "Invalid port_id=%u or queue_id=%u\n",
6513 p = &rte_eth_fp_ops[port_id];
6514 qd = p->txq.data[queue_id];
6516#ifdef RTE_ETHDEV_DEBUG_TX
6518 RTE_ETHDEV_LOG(ERR,
"Invalid Tx port_id=%u\n", port_id);
6523 RTE_ETHDEV_LOG(ERR,
"Invalid Tx queue_id=%u for port_id=%u\n",
6530 if (!p->tx_pkt_prepare)
6533 return p->tx_pkt_prepare(qd, tx_pkts, nb_pkts);
6547static inline uint16_t
6579static inline uint16_t
6584 uint16_t to_send = buffer->
length;
6595 buffer->error_callback(&buffer->
pkts[sent],
6596 (uint16_t)(to_send - sent),
6597 buffer->error_userdata);
6697static inline uint16_t
6699 uint16_t tx_port_id, uint16_t tx_queue_id,
6702 struct rte_eth_fp_ops *p1, *p2;
6706#ifdef RTE_ETHDEV_DEBUG_TX
6707 if (tx_port_id >= RTE_MAX_ETHPORTS ||
6708 tx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6710 "Invalid tx_port_id=%u or tx_queue_id=%u\n",
6711 tx_port_id, tx_queue_id);
6717 p1 = &rte_eth_fp_ops[tx_port_id];
6718 qd1 = p1->txq.data[tx_queue_id];
6720#ifdef RTE_ETHDEV_DEBUG_TX
6721 RTE_ETH_VALID_PORTID_OR_ERR_RET(tx_port_id, 0);
6724 RTE_ETHDEV_LOG(ERR,
"Invalid Tx queue_id=%u for port_id=%u\n",
6725 tx_queue_id, tx_port_id);
6729 if (p1->recycle_tx_mbufs_reuse == NULL)
6732#ifdef RTE_ETHDEV_DEBUG_RX
6733 if (rx_port_id >= RTE_MAX_ETHPORTS ||
6734 rx_queue_id >= RTE_MAX_QUEUES_PER_PORT) {
6735 RTE_ETHDEV_LOG(ERR,
"Invalid rx_port_id=%u or rx_queue_id=%u\n",
6736 rx_port_id, rx_queue_id);
6742 p2 = &rte_eth_fp_ops[rx_port_id];
6743 qd2 = p2->rxq.data[rx_queue_id];
6745#ifdef RTE_ETHDEV_DEBUG_RX
6746 RTE_ETH_VALID_PORTID_OR_ERR_RET(rx_port_id, 0);
6749 RTE_ETHDEV_LOG(ERR,
"Invalid Rx queue_id=%u for port_id=%u\n",
6750 rx_queue_id, rx_port_id);
6754 if (p2->recycle_rx_descriptors_refill == NULL)
6760 nb_mbufs = p1->recycle_tx_mbufs_reuse(qd1, recycle_rxq_info);
6769 p2->recycle_rx_descriptors_refill(qd2, nb_mbufs);
#define __rte_cache_min_aligned
#define __rte_always_inline
int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
rte_eth_event_ipsec_subtype
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_END
@ RTE_ETH_EVENT_IPSEC_UNKNOWN
@ RTE_ETH_EVENT_IPSEC_MAX
@ RTE_ETH_EVENT_IPSEC_SA_PKT_EXPIRY
@ RTE_ETH_EVENT_IPSEC_ESN_OVERFLOW
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_HARD_EXPIRY
@ RTE_ETH_EVENT_IPSEC_PMD_ERROR_START
@ RTE_ETH_EVENT_IPSEC_SA_BYTE_EXPIRY
@ RTE_ETH_EVENT_IPSEC_SA_TIME_EXPIRY
@ RTE_ETH_EVENT_IPSEC_SA_PKT_HARD_EXPIRY
int rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *time)
__rte_experimental int rte_eth_cman_config_get(uint16_t port_id, struct rte_eth_cman_config *config)
int rte_eth_dev_is_removed(uint16_t port_id)
__rte_experimental int rte_eth_dev_hairpin_capability_get(uint16_t port_id, struct rte_eth_hairpin_cap *cap)
int rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
int rte_eth_timesync_read_rx_timestamp(uint16_t port_id, struct timespec *timestamp, uint32_t flags)
static uint64_t rte_eth_rss_hf_refine(uint64_t rss_hf)
__rte_experimental int rte_eth_dev_map_aggr_tx_affinity(uint16_t port_id, uint16_t tx_queue_id, uint8_t affinity)
static __rte_always_inline uint16_t rte_eth_tx_buffer(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer, struct rte_mbuf *tx_pkt)
void rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
int rte_eth_timesync_adjust_time(uint16_t port_id, int64_t delta)
int rte_eth_dev_set_link_down(uint16_t port_id)
rte_eth_event_macsec_subtype
@ RTE_ETH_SUBEVENT_MACSEC_UNKNOWN
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_E_EQ0_C_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SL_GTE48
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_SC_EQ1_SCB_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_ES_EQ1_SC_EQ1
@ RTE_ETH_SUBEVENT_MACSEC_RX_SECTAG_V_EQ1
int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_queue, uint16_t nb_tx_queue, const struct rte_eth_conf *eth_conf)
int rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id, int epfd, int op, void *data)
@ RTE_ETH_EVENT_RECOVERY_FAILED
@ RTE_ETH_EVENT_INTR_RESET
@ RTE_ETH_EVENT_ERR_RECOVERING
@ RTE_ETH_EVENT_RECOVERY_SUCCESS
@ RTE_ETH_EVENT_FLOW_AGED
@ RTE_ETH_EVENT_QUEUE_STATE
@ RTE_ETH_EVENT_RX_AVAIL_THRESH
int rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
__rte_experimental int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo)
int rte_eth_dev_is_valid_port(uint16_t port_id)
@ RTE_ETH_CMAN_OBJ_RX_QUEUE_MEMPOOL
@ RTE_ETH_CMAN_OBJ_RX_QUEUE
#define RTE_ETH_DCB_NUM_USER_PRIORITIES
__rte_experimental int rte_eth_recycle_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
__rte_experimental int rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
int rte_eth_dev_reset(uint16_t port_id)
#define RTE_ETH_BURST_MODE_INFO_SIZE
__rte_experimental const char * rte_eth_dev_capability_name(uint64_t capability)
int rte_eth_allmulticast_disable(uint16_t port_id)
int rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, unsigned int n)
int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool)
int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
int rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
__rte_experimental int rte_eth_dev_count_aggr_ports(uint16_t port_id)
int rte_eth_dev_flow_ctrl_set(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_rss_reta_update(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
static uint16_t rte_eth_rx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **rx_pkts, const uint16_t nb_pkts)
int rte_eth_xstats_get_names(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size)
int rte_eth_dev_uc_all_hash_table_set(uint16_t port_id, uint8_t on)
__rte_experimental int rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
@ RTE_ETH_ERROR_HANDLE_MODE_PASSIVE
@ RTE_ETH_ERROR_HANDLE_MODE_NONE
@ RTE_ETH_ERROR_HANDLE_MODE_PROACTIVE
const struct rte_eth_rxtx_callback * rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
@ RTE_ETH_MQ_TX_VMDQ_ONLY
int rte_eth_promiscuous_get(uint16_t port_id)
uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
int rte_eth_led_off(uint16_t port_id)
int rte_eth_dev_flow_ctrl_get(uint16_t port_id, struct rte_eth_fc_conf *fc_conf)
int rte_eth_dev_set_link_up(uint16_t port_id)
__rte_experimental int rte_eth_fec_set(uint16_t port_id, uint32_t fec_capa)
int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_set_tx_queue_stats_mapping(uint16_t port_id, uint16_t tx_queue_id, uint8_t stat_idx)
uint16_t rte_eth_find_next(uint16_t port_id)
@ RTE_ETH_MQ_RX_VMDQ_DCB_RSS
@ RTE_ETH_MQ_RX_VMDQ_ONLY
int rte_eth_allmulticast_get(uint16_t port_id)
int rte_eth_xstats_get_by_id(uint16_t port_id, const uint64_t *ids, uint64_t *values, unsigned int size)
int rte_eth_allmulticast_enable(uint16_t port_id)
int rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
int rte_eth_promiscuous_enable(uint16_t port_id)
@ RTE_ETH_REPRESENTOR_NONE
int rte_eth_timesync_enable(uint16_t port_id)
__rte_experimental int rte_eth_link_to_str(char *str, size_t len, const struct rte_eth_link *eth_link)
int rte_eth_dev_rss_hash_conf_get(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
#define RTE_ETH_VMDQ_MAX_VLAN_FILTERS
int rte_eth_remove_tx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_priority_flow_ctrl_set(uint16_t port_id, struct rte_eth_pfc_conf *pfc_conf)
int rte_eth_dev_vlan_filter(uint16_t port_id, uint16_t vlan_id, int on)
int rte_eth_dev_udp_tunnel_port_add(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_dev_get_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
uint16_t rte_eth_iterator_next(struct rte_dev_iterator *iter)
__rte_experimental int rte_eth_ip_reassembly_capability_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *capa)
__rte_experimental int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_dev_set_vlan_strip_on_queue(uint16_t port_id, uint16_t rx_queue_id, int on)
int rte_eth_dev_set_vlan_ether_type(uint16_t port_id, enum rte_vlan_type vlan_type, uint16_t tag_type)
int rte_eth_dev_stop(uint16_t port_id)
int rte_eth_timesync_disable(uint16_t port_id)
__rte_experimental int rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, const struct rte_eth_hairpin_conf *conf)
uint16_t(* rte_rx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t max_pkts, void *user_param)
int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
static uint16_t rte_eth_tx_prepare(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf)
const struct rte_eth_rxtx_callback * rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id, rte_tx_callback_fn fn, void *user_param)
int rte_eth_promiscuous_disable(uint16_t port_id)
int rte_eth_remove_rx_callback(uint16_t port_id, uint16_t queue_id, const struct rte_eth_rxtx_callback *user_cb)
int rte_eth_dev_rx_intr_disable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_owner_delete(const uint64_t owner_id)
__rte_experimental int rte_eth_rx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on)
static uint16_t rte_eth_tx_burst(uint16_t port_id, uint16_t queue_id, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_rx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
int(* rte_eth_dev_cb_fn)(uint16_t port_id, enum rte_eth_event_type event, void *cb_arg, void *ret_param)
int rte_eth_dev_rx_intr_enable(uint16_t port_id, uint16_t queue_id)
int rte_eth_dev_get_eeprom_length(uint16_t port_id)
int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *mac_addr)
int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
int rte_eth_xstats_get_names_by_id(uint16_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id, struct rte_eth_pfc_queue_info *pfc_queue_info)
__rte_experimental int rte_eth_buffer_split_get_supported_hdr_ptypes(uint16_t port_id, uint32_t *ptypes, int num)
#define RTE_ETH_MQ_RX_DCB_FLAG
uint16_t rte_eth_find_next_sibling(uint16_t port_id_start, uint16_t ref_port_id)
const struct rte_eth_rxtx_callback * rte_eth_add_first_rx_callback(uint16_t port_id, uint16_t queue_id, rte_rx_callback_fn fn, void *user_param)
int rte_eth_xstats_get_id_by_name(uint16_t port_id, const char *xstat_name, uint64_t *id)
uint16_t rte_eth_dev_count_avail(void)
int rte_eth_dev_callback_unregister(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
__rte_experimental int rte_eth_fec_get(uint16_t port_id, uint32_t *fec_capa)
rte_eth_event_macsec_type
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_HARD_EXP
@ RTE_ETH_EVENT_MACSEC_SA_NOT_VALID
@ RTE_ETH_EVENT_MACSEC_RX_SA_PN_SOFT_EXP
@ RTE_ETH_EVENT_MACSEC_UNKNOWN
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_HARD_EXP
@ RTE_ETH_EVENT_MACSEC_SECTAG_VAL_ERR
@ RTE_ETH_EVENT_MACSEC_TX_SA_PN_SOFT_EXP
int rte_eth_led_on(uint16_t port_id)
int rte_eth_dev_rss_reta_query(uint16_t port_id, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size)
__rte_experimental int rte_eth_ip_reassembly_conf_set(uint16_t port_id, const struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id, uint8_t avail_thresh)
int rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *ptypes, int num)
__rte_experimental int rte_eth_cman_info_get(uint16_t port_id, struct rte_eth_cman_info *info)
int rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
int rte_eth_dev_set_mc_addr_list(uint16_t port_id, struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
int rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs)
int rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_burst_mode *mode)
int rte_eth_tx_buffer_set_err_callback(struct rte_eth_dev_tx_buffer *buffer, buffer_tx_error_fn callback, void *userdata)
int rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *mac_addr, uint32_t pool)
int rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on)
int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner)
uint32_t rte_eth_speed_bitflag(uint32_t speed, int duplex)
__rte_experimental int rte_eth_hairpin_unbind(uint16_t tx_port, uint16_t rx_port)
__rte_experimental int rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id, struct rte_eth_pfc_queue_conf *pfc_queue_conf)
__rte_experimental int rte_eth_hairpin_get_peer_ports(uint16_t port_id, uint16_t *peer_ports, size_t len, uint32_t direction)
int rte_eth_dev_get_vlan_offload(uint16_t port_id)
#define RTE_ETH_MQ_RX_RSS_FLAG
int rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
int rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_rxq_info *qinfo)
int rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
void * rte_eth_dev_get_sec_ctx(uint16_t port_id)
int rte_eth_dev_callback_register(uint16_t port_id, enum rte_eth_event_type event, rte_eth_dev_cb_fn cb_fn, void *cb_arg)
int rte_eth_dev_close(uint16_t port_id)
void rte_eth_tx_buffer_count_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_hairpin_bind(uint16_t tx_port, uint16_t rx_port)
int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
__rte_experimental int rte_eth_ip_reassembly_conf_get(uint16_t port_id, struct rte_eth_ip_reassembly_params *conf)
__rte_experimental int rte_eth_get_monitor_addr(uint16_t port_id, uint16_t queue_id, struct rte_power_monitor_cond *pmc)
int rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, struct rte_eth_txq_info *qinfo)
const char * rte_eth_dev_rx_offload_name(uint64_t offload)
int rte_eth_dev_udp_tunnel_port_delete(uint16_t port_id, struct rte_eth_udp_tunnel *tunnel_udp)
int rte_eth_tx_buffer_init(struct rte_eth_dev_tx_buffer *buffer, uint16_t size)
#define RTE_ETH_MQ_RX_VMDQ_FLAG
__rte_experimental int rte_eth_tx_descriptor_dump(uint16_t port_id, uint16_t queue_id, uint16_t offset, uint16_t num, FILE *file)
__rte_experimental int rte_eth_dev_conf_get(uint16_t port_id, struct rte_eth_conf *dev_conf)
void rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent, void *userdata)
__rte_experimental int rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, const struct rte_eth_hairpin_conf *conf)
int rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
static uint16_t rte_eth_tx_buffer_flush(uint16_t port_id, uint16_t queue_id, struct rte_eth_dev_tx_buffer *buffer)
int rte_eth_dev_socket_id(uint16_t port_id)
int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *link)
int rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
int rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc)
static int rte_eth_rx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
__rte_experimental int rte_eth_tx_queue_is_valid(uint16_t port_id, uint16_t queue_id)
static int rte_eth_tx_descriptor_status(uint16_t port_id, uint16_t queue_id, uint16_t offset)
int rte_eth_dev_get_dcb_info(uint16_t port_id, struct rte_eth_dcb_info *dcb_info)
static __rte_experimental uint16_t rte_eth_recycle_mbufs(uint16_t rx_port_id, uint16_t rx_queue_id, uint16_t tx_port_id, uint16_t tx_queue_id, struct rte_eth_recycle_rxq_info *recycle_rxq_info)
int rte_eth_dev_rss_hash_update(uint16_t port_id, struct rte_eth_rss_conf *rss_conf)
int rte_eth_dev_owner_new(uint64_t *owner_id)
int rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask, uint32_t *set_ptypes, unsigned int num)
int rte_eth_timesync_read_time(uint16_t port_id, struct timespec *time)
__rte_experimental int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id, uint8_t *avail_thresh)
int rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *mac_addr)
__rte_experimental const char * rte_eth_link_speed_to_str(uint32_t link_speed)
int rte_eth_xstats_reset(uint16_t port_id)
int rte_eth_stats_get(uint16_t port_id, struct rte_eth_stats *stats)
int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, uint32_t tx_rate)
__rte_experimental const char * rte_eth_dev_rss_algo_name(enum rte_eth_hash_function rss_algo)
int rte_eth_timesync_read_tx_timestamp(uint16_t port_id, struct timespec *timestamp)
uint16_t rte_eth_find_next_of(uint16_t port_id_start, const struct rte_device *parent)
@ RTE_ETH_VLAN_TYPE_OUTER
@ RTE_ETH_VLAN_TYPE_INNER
__rte_experimental int rte_eth_macaddrs_get(uint16_t port_id, struct rte_ether_addr *ma, unsigned int num)
uint16_t(* rte_tx_callback_fn)(uint16_t port_id, uint16_t queue, struct rte_mbuf *pkts[], uint16_t nb_pkts, void *user_param)
const char * rte_eth_dev_tx_offload_name(uint64_t offload)
@ RTE_ETH_HASH_FUNCTION_DEFAULT
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT
@ RTE_ETH_HASH_FUNCTION_SIMPLE_XOR
@ RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ
@ RTE_ETH_HASH_FUNCTION_TOEPLITZ
uint16_t rte_eth_dev_count_total(void)
#define RTE_ETH_XSTATS_NAME_SIZE
int rte_eth_dev_get_name_by_port(uint16_t port_id, char *name)
int rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
int rte_eth_stats_reset(uint16_t port_id)
int rte_eth_dev_set_rx_queue_stats_mapping(uint16_t port_id, uint16_t rx_queue_id, uint8_t stat_idx)
static int rte_eth_rx_queue_count(uint16_t port_id, uint16_t queue_id)
__rte_experimental int rte_eth_cman_config_set(uint16_t port_id, const struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_cman_config_init(uint16_t port_id, struct rte_eth_cman_config *config)
__rte_experimental int rte_eth_representor_info_get(uint16_t port_id, struct rte_eth_representor_info *info)
int rte_eth_dev_start(uint16_t port_id)
__rte_experimental int rte_eth_fec_get_capability(uint16_t port_id, struct rte_eth_fec_capa *speed_fec_capa, unsigned int num)
char info[RTE_ETH_BURST_MODE_INFO_SIZE]
uint8_t rsvd_mode_params[4]
enum rte_eth_cman_obj obj
struct rte_cman_red_params red
uint8_t rsvd_obj_params[4]
struct rte_eth_intr_conf intr_conf
struct rte_eth_txmode txmode
struct rte_eth_rxmode rxmode
uint32_t dcb_capability_en
struct rte_eth_conf::@122 rx_adv_conf
union rte_eth_conf::@123 tx_adv_conf
uint8_t tc_bws[RTE_ETH_DCB_NUM_TCS]
uint8_t prio_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
struct rte_eth_dcb_tc_queue_mapping tc_queue
struct rte_eth_dcb_tc_queue_mapping::@124 tc_rxq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
struct rte_eth_dcb_tc_queue_mapping::@125 tc_txq[RTE_ETH_MAX_VMDQ_POOL][RTE_ETH_DCB_NUM_TCS]
uint32_t max_hash_mac_addrs
struct rte_eth_desc_lim rx_desc_lim
struct rte_eth_txconf default_txconf
struct rte_device * device
struct rte_eth_rxconf default_rxconf
enum rte_eth_err_handle_mode err_handle_mode
uint32_t max_lro_pkt_size
uint64_t tx_queue_offload_capa
struct rte_eth_desc_lim tx_desc_lim
uint64_t flow_type_rss_offloads
struct rte_eth_dev_portconf default_txportconf
struct rte_eth_dev_portconf default_rxportconf
struct rte_eth_switch_info switch_info
struct rte_eth_rxseg_capa rx_seg_capa
uint64_t rx_queue_offload_capa
const uint32_t * dev_flags
enum rte_eth_event_ipsec_subtype subtype
enum rte_eth_event_macsec_type type
enum rte_eth_event_macsec_subtype subtype
enum rte_eth_fc_mode mode
uint8_t mac_ctrl_frame_fwd
struct rte_eth_hairpin_queue_cap tx_cap
struct rte_eth_hairpin_queue_cap rx_cap
uint32_t use_locked_device_memory
uint32_t locked_device_memory
struct rte_mbuf * next_frag
struct rte_eth_fc_conf fc
enum rte_eth_fc_mode mode
enum rte_eth_fc_mode mode_capa
uint16_t refill_requirement
struct rte_mbuf ** mbuf_ring
struct rte_eth_representor_range ranges[]
enum rte_eth_representor_type type
char name[RTE_DEV_NAME_MAX_LEN]
struct rte_eth_thresh rx_thresh
uint8_t rx_deferred_start
union rte_eth_rxseg * rx_seg
struct rte_mempool ** rx_mempools
uint32_t max_lro_pkt_size
enum rte_eth_rx_mq_mode mq_mode
struct rte_eth_rxconf conf
__extension__ uint32_t multi_pools
uint32_t offset_align_log2
uint64_t q_ibytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_opackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_errors[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_ipackets[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint64_t q_obytes[RTE_ETHDEV_QUEUE_STAT_CNTRS]
uint8_t tx_deferred_start
struct rte_eth_thresh tx_thresh
__extension__ uint8_t hw_vlan_insert_pvid
__extension__ uint8_t hw_vlan_reject_tagged
__extension__ uint8_t hw_vlan_reject_untagged
enum rte_eth_tx_mq_mode mq_mode
struct rte_eth_txconf conf
enum rte_eth_nb_pools nb_queue_pools
struct rte_eth_vmdq_dcb_conf::@118 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
uint8_t dcb_tc[RTE_ETH_DCB_NUM_USER_PRIORITIES]
uint8_t enable_default_pool
enum rte_eth_nb_pools nb_queue_pools
uint8_t enable_default_pool
struct rte_eth_vmdq_rx_conf::@119 pool_map[RTE_ETH_VMDQ_MAX_VLAN_FILTERS]
char name[RTE_ETH_XSTATS_NAME_SIZE]