DPDK  17.11.4
rte_mbuf.h
Go to the documentation of this file.
1 /*-
2  * BSD LICENSE
3  *
4  * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  * Copyright 2014 6WIND S.A.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  *
12  * * Redistributions of source code must retain the above copyright
13  * notice, this list of conditions and the following disclaimer.
14  * * Redistributions in binary form must reproduce the above copyright
15  * notice, this list of conditions and the following disclaimer in
16  * the documentation and/or other materials provided with the
17  * distribution.
18  * * Neither the name of Intel Corporation nor the names of its
19  * contributors may be used to endorse or promote products derived
20  * from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #ifndef _RTE_MBUF_H_
36 #define _RTE_MBUF_H_
37 
63 #include <stdint.h>
64 #include <rte_common.h>
65 #include <rte_config.h>
66 #include <rte_mempool.h>
67 #include <rte_memory.h>
68 #include <rte_atomic.h>
69 #include <rte_prefetch.h>
70 #include <rte_branch_prediction.h>
71 #include <rte_mbuf_ptype.h>
72 
73 #ifdef __cplusplus
74 extern "C" {
75 #endif
76 
77 /*
78  * Packet Offload Features Flags. It also carry packet type information.
79  * Critical resources. Both rx/tx shared these bits. Be cautious on any change
80  *
81  * - RX flags start at bit position zero, and get added to the left of previous
82  * flags.
83  * - The most-significant 3 bits are reserved for generic mbuf flags
84  * - TX flags therefore start at bit position 60 (i.e. 63-3), and new flags get
85  * added to the right of the previously defined flags i.e. they should count
86  * downwards, not upwards.
87  *
88  * Keep these flags synchronized with rte_get_rx_ol_flag_name() and
89  * rte_get_tx_ol_flag_name().
90  */
91 
99 #define PKT_RX_VLAN (1ULL << 0)
100 
101 #define PKT_RX_RSS_HASH (1ULL << 1)
102 #define PKT_RX_FDIR (1ULL << 2)
111 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
112 
120 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
121 
122 #define PKT_RX_EIP_CKSUM_BAD (1ULL << 5)
130 #define PKT_RX_VLAN_STRIPPED (1ULL << 6)
131 
140 #define PKT_RX_IP_CKSUM_MASK ((1ULL << 4) | (1ULL << 7))
141 
142 #define PKT_RX_IP_CKSUM_UNKNOWN 0
143 #define PKT_RX_IP_CKSUM_BAD (1ULL << 4)
144 #define PKT_RX_IP_CKSUM_GOOD (1ULL << 7)
145 #define PKT_RX_IP_CKSUM_NONE ((1ULL << 4) | (1ULL << 7))
146 
155 #define PKT_RX_L4_CKSUM_MASK ((1ULL << 3) | (1ULL << 8))
156 
157 #define PKT_RX_L4_CKSUM_UNKNOWN 0
158 #define PKT_RX_L4_CKSUM_BAD (1ULL << 3)
159 #define PKT_RX_L4_CKSUM_GOOD (1ULL << 8)
160 #define PKT_RX_L4_CKSUM_NONE ((1ULL << 3) | (1ULL << 8))
161 
162 #define PKT_RX_IEEE1588_PTP (1ULL << 9)
163 #define PKT_RX_IEEE1588_TMST (1ULL << 10)
164 #define PKT_RX_FDIR_ID (1ULL << 13)
165 #define PKT_RX_FDIR_FLX (1ULL << 14)
175 #define PKT_RX_QINQ_STRIPPED (1ULL << 15)
176 
182 #define PKT_RX_LRO (1ULL << 16)
183 
187 #define PKT_RX_TIMESTAMP (1ULL << 17)
188 
192 #define PKT_RX_SEC_OFFLOAD (1ULL << 18)
193 
197 #define PKT_RX_SEC_OFFLOAD_FAILED (1ULL << 19)
198 
206 #define PKT_RX_QINQ (1ULL << 20)
207 
208 /* add new RX flags here */
209 
210 /* add new TX flags here */
211 
215 #define PKT_TX_SEC_OFFLOAD (1ULL << 43)
216 
221 #define PKT_TX_MACSEC (1ULL << 44)
222 
231 #define PKT_TX_TUNNEL_VXLAN (0x1ULL << 45)
232 #define PKT_TX_TUNNEL_GRE (0x2ULL << 45)
233 #define PKT_TX_TUNNEL_IPIP (0x3ULL << 45)
234 #define PKT_TX_TUNNEL_GENEVE (0x4ULL << 45)
235 
236 #define PKT_TX_TUNNEL_MPLSINUDP (0x5ULL << 45)
237 /* add new TX TUNNEL type here */
238 #define PKT_TX_TUNNEL_MASK (0xFULL << 45)
239 
243 #define PKT_TX_QINQ_PKT (1ULL << 49)
254 #define PKT_TX_TCP_SEG (1ULL << 50)
255 
256 #define PKT_TX_IEEE1588_TMST (1ULL << 51)
266 #define PKT_TX_L4_NO_CKSUM (0ULL << 52)
267 #define PKT_TX_TCP_CKSUM (1ULL << 52)
268 #define PKT_TX_SCTP_CKSUM (2ULL << 52)
269 #define PKT_TX_UDP_CKSUM (3ULL << 52)
270 #define PKT_TX_L4_MASK (3ULL << 52)
278 #define PKT_TX_IP_CKSUM (1ULL << 54)
279 
286 #define PKT_TX_IPV4 (1ULL << 55)
287 
294 #define PKT_TX_IPV6 (1ULL << 56)
295 
296 #define PKT_TX_VLAN_PKT (1ULL << 57)
304 #define PKT_TX_OUTER_IP_CKSUM (1ULL << 58)
305 
311 #define PKT_TX_OUTER_IPV4 (1ULL << 59)
312 
318 #define PKT_TX_OUTER_IPV6 (1ULL << 60)
319 
324 #define PKT_TX_OFFLOAD_MASK ( \
325  PKT_TX_IP_CKSUM | \
326  PKT_TX_L4_MASK | \
327  PKT_TX_OUTER_IP_CKSUM | \
328  PKT_TX_TCP_SEG | \
329  PKT_TX_IEEE1588_TMST | \
330  PKT_TX_QINQ_PKT | \
331  PKT_TX_VLAN_PKT | \
332  PKT_TX_TUNNEL_MASK | \
333  PKT_TX_MACSEC | \
334  PKT_TX_SEC_OFFLOAD)
335 
336 #define __RESERVED (1ULL << 61)
338 #define IND_ATTACHED_MBUF (1ULL << 62)
340 /* Use final bit of flags to indicate a control mbuf */
341 #define CTRL_MBUF_FLAG (1ULL << 63)
344 #define RTE_MBUF_PRIV_ALIGN 8
345 
354 const char *rte_get_rx_ol_flag_name(uint64_t mask);
355 
368 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
369 
380 const char *rte_get_tx_ol_flag_name(uint64_t mask);
381 
394 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
395 
402 #define RTE_MBUF_DEFAULT_DATAROOM 2048
403 #define RTE_MBUF_DEFAULT_BUF_SIZE \
404  (RTE_MBUF_DEFAULT_DATAROOM + RTE_PKTMBUF_HEADROOM)
405 
406 /* define a set of marker types that can be used to refer to set points in the
407  * mbuf */
408 __extension__
409 typedef void *MARKER[0];
410 __extension__
411 typedef uint8_t MARKER8[0];
412 __extension__
413 typedef uint64_t MARKER64[0];
419 struct rte_mbuf {
420  MARKER cacheline0;
421 
422  void *buf_addr;
430  union {
431  rte_iova_t buf_iova;
433  } __rte_aligned(sizeof(rte_iova_t));
434 
435  /* next 8 bytes are initialised on RX descriptor rearm */
436  MARKER64 rearm_data;
437  uint16_t data_off;
438 
449  union {
451  uint16_t refcnt;
452  };
453  uint16_t nb_segs;
456  uint16_t port;
457 
458  uint64_t ol_flags;
460  /* remaining bytes are set on RX when pulling packet from descriptor */
461  MARKER rx_descriptor_fields1;
462 
463  /*
464  * The packet type, which is the combination of outer/inner L2, L3, L4
465  * and tunnel types. The packet_type is about data really present in the
466  * mbuf. Example: if vlan stripping is enabled, a received vlan packet
467  * would have RTE_PTYPE_L2_ETHER and not RTE_PTYPE_L2_VLAN because the
468  * vlan is stripped from the data.
469  */
471  union {
472  uint32_t packet_type;
473  struct {
474  uint32_t l2_type:4;
475  uint32_t l3_type:4;
476  uint32_t l4_type:4;
477  uint32_t tun_type:4;
479  union {
485  __extension__
486  struct {
487  uint8_t inner_l2_type:4;
489  uint8_t inner_l3_type:4;
491  };
492  };
493  uint32_t inner_l4_type:4;
494  };
495  };
496 
497  uint32_t pkt_len;
498  uint16_t data_len;
500  uint16_t vlan_tci;
501 
502  union {
503  uint32_t rss;
504  struct {
506  union {
507  struct {
508  uint16_t hash;
509  uint16_t id;
510  };
511  uint32_t lo;
513  };
514  uint32_t hi;
517  } fdir;
518  struct {
519  uint32_t lo;
520  uint32_t hi;
521  } sched;
522  uint32_t usr;
523  } hash;
526  uint16_t vlan_tci_outer;
527 
528  uint16_t buf_len;
533  uint64_t timestamp;
534 
535  /* second cache line - fields only used in slow path or on TX */
536  MARKER cacheline1 __rte_cache_min_aligned;
537 
539  union {
540  void *userdata;
541  uint64_t udata64;
542  };
543 
544  struct rte_mempool *pool;
545  struct rte_mbuf *next;
547  /* fields to support TX offloads */
549  union {
550  uint64_t tx_offload;
551  __extension__
552  struct {
553  uint64_t l2_len:7;
557  uint64_t l3_len:9;
558  uint64_t l4_len:8;
559  uint64_t tso_segsz:16;
561  /* fields for TX offloading of tunnels */
562  uint64_t outer_l3_len:9;
563  uint64_t outer_l2_len:7;
565  /* uint64_t unused:8; */
566  };
567  };
568 
571  uint16_t priv_size;
572 
574  uint16_t timesync;
575 
577  uint32_t seqn;
578 
580 
582 #define RTE_MBUF_MAX_NB_SEGS UINT16_MAX
583 
594 static inline void
596 {
597  rte_prefetch0(&m->cacheline0);
598 }
599 
611 static inline void
613 {
614 #if RTE_CACHE_LINE_SIZE == 64
615  rte_prefetch0(&m->cacheline1);
616 #else
617  RTE_SET_USED(m);
618 #endif
619 }
620 
621 
622 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
623 
632 static inline rte_iova_t
633 rte_mbuf_data_iova(const struct rte_mbuf *mb)
634 {
635  return mb->buf_iova + mb->data_off;
636 }
637 
638 __rte_deprecated
639 static inline phys_addr_t
640 rte_mbuf_data_dma_addr(const struct rte_mbuf *mb)
641 {
642  return rte_mbuf_data_iova(mb);
643 }
644 
657 static inline rte_iova_t
659 {
660  return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
661 }
662 
663 __rte_deprecated
664 static inline phys_addr_t
665 rte_mbuf_data_dma_addr_default(const struct rte_mbuf *mb)
666 {
667  return rte_mbuf_data_iova_default(mb);
668 }
669 
678 static inline struct rte_mbuf *
680 {
681  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
682 }
683 
692 static inline char *
694 {
695  char *buffer_addr;
696  buffer_addr = (char *)md + sizeof(*md) + rte_pktmbuf_priv_size(md->pool);
697  return buffer_addr;
698 }
699 
703 #define RTE_MBUF_INDIRECT(mb) ((mb)->ol_flags & IND_ATTACHED_MBUF)
704 
708 #define RTE_MBUF_DIRECT(mb) (!RTE_MBUF_INDIRECT(mb))
709 
718  uint16_t mbuf_priv_size;
719 };
720 
721 #ifdef RTE_LIBRTE_MBUF_DEBUG
722 
724 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
725 
726 #else /* RTE_LIBRTE_MBUF_DEBUG */
727 
729 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
730 
731 #endif /* RTE_LIBRTE_MBUF_DEBUG */
732 
733 #ifdef RTE_MBUF_REFCNT_ATOMIC
734 
742 static inline uint16_t
743 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
744 {
745  return (uint16_t)(rte_atomic16_read(&m->refcnt_atomic));
746 }
747 
755 static inline void
756 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
757 {
758  rte_atomic16_set(&m->refcnt_atomic, new_value);
759 }
760 
761 /* internal */
762 static inline uint16_t
763 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
764 {
765  return (uint16_t)(rte_atomic16_add_return(&m->refcnt_atomic, value));
766 }
767 
777 static inline uint16_t
778 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
779 {
780  /*
781  * The atomic_add is an expensive operation, so we don't want to
782  * call it in the case where we know we are the uniq holder of
783  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
784  * operation has to be used because concurrent accesses on the
785  * reference counter can occur.
786  */
787  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
788  rte_mbuf_refcnt_set(m, 1 + value);
789  return 1 + value;
790  }
791 
792  return __rte_mbuf_refcnt_update(m, value);
793 }
794 
795 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
796 
797 /* internal */
798 static inline uint16_t
799 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
800 {
801  m->refcnt = (uint16_t)(m->refcnt + value);
802  return m->refcnt;
803 }
804 
808 static inline uint16_t
809 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
810 {
811  return __rte_mbuf_refcnt_update(m, value);
812 }
813 
817 static inline uint16_t
819 {
820  return m->refcnt;
821 }
822 
826 static inline void
827 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
828 {
829  m->refcnt = new_value;
830 }
831 
832 #endif /* RTE_MBUF_REFCNT_ATOMIC */
833 
835 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
836  if ((m) != NULL) \
837  rte_prefetch0(m); \
838 } while (0)
839 
840 
853 void
854 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
855 
856 #define MBUF_RAW_ALLOC_CHECK(m) do { \
857  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1); \
858  RTE_ASSERT((m)->next == NULL); \
859  RTE_ASSERT((m)->nb_segs == 1); \
860  __rte_mbuf_sanity_check(m, 0); \
861 } while (0)
862 
882 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
883 {
884  struct rte_mbuf *m;
885  void *mb = NULL;
886 
887  if (rte_mempool_get(mp, &mb) < 0)
888  return NULL;
889  m = (struct rte_mbuf *)mb;
890  MBUF_RAW_ALLOC_CHECK(m);
891  return m;
892 }
893 
908 static __rte_always_inline void
910 {
911  RTE_ASSERT(RTE_MBUF_DIRECT(m));
912  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
913  RTE_ASSERT(m->next == NULL);
914  RTE_ASSERT(m->nb_segs == 1);
916  rte_mempool_put(m->pool, m);
917 }
918 
919 /* compat with older versions */
920 __rte_deprecated
921 static inline void
922 __rte_mbuf_raw_free(struct rte_mbuf *m)
923 {
925 }
926 
927 /* Operations on ctrl mbuf */
928 
948 void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg,
949  void *m, unsigned i);
950 
963 #define rte_ctrlmbuf_alloc(mp) rte_pktmbuf_alloc(mp)
964 
971 #define rte_ctrlmbuf_free(m) rte_pktmbuf_free(m)
972 
981 #define rte_ctrlmbuf_data(m) ((char *)((m)->buf_addr) + (m)->data_off)
982 
991 #define rte_ctrlmbuf_len(m) rte_pktmbuf_data_len(m)
992 
1002 static inline int
1004 {
1005  return !!(m->ol_flags & CTRL_MBUF_FLAG);
1006 }
1007 
1008 /* Operations on pkt mbuf */
1009 
1029 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
1030  void *m, unsigned i);
1031 
1032 
1050 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
1051 
1086 struct rte_mempool *
1087 rte_pktmbuf_pool_create(const char *name, unsigned n,
1088  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
1089  int socket_id);
1090 
1102 static inline uint16_t
1104 {
1105  struct rte_pktmbuf_pool_private *mbp_priv;
1106 
1107  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1108  return mbp_priv->mbuf_data_room_size;
1109 }
1110 
1123 static inline uint16_t
1125 {
1126  struct rte_pktmbuf_pool_private *mbp_priv;
1127 
1128  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
1129  return mbp_priv->mbuf_priv_size;
1130 }
1131 
1140 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
1141 {
1142  m->data_off = RTE_MIN(RTE_PKTMBUF_HEADROOM, (uint16_t)m->buf_len);
1143 }
1144 
1153 #define MBUF_INVALID_PORT UINT16_MAX
1154 
1155 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
1156 {
1157  m->next = NULL;
1158  m->pkt_len = 0;
1159  m->tx_offload = 0;
1160  m->vlan_tci = 0;
1161  m->vlan_tci_outer = 0;
1162  m->nb_segs = 1;
1163  m->port = MBUF_INVALID_PORT;
1164 
1165  m->ol_flags = 0;
1166  m->packet_type = 0;
1168 
1169  m->data_len = 0;
1171 }
1172 
1186 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
1187 {
1188  struct rte_mbuf *m;
1189  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
1190  rte_pktmbuf_reset(m);
1191  return m;
1192 }
1193 
1208 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1209  struct rte_mbuf **mbufs, unsigned count)
1210 {
1211  unsigned idx = 0;
1212  int rc;
1213 
1214  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1215  if (unlikely(rc))
1216  return rc;
1217 
1218  /* To understand duff's device on loop unwinding optimization, see
1219  * https://en.wikipedia.org/wiki/Duff's_device.
1220  * Here while() loop is used rather than do() while{} to avoid extra
1221  * check if count is zero.
1222  */
1223  switch (count % 4) {
1224  case 0:
1225  while (idx != count) {
1226  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1227  rte_pktmbuf_reset(mbufs[idx]);
1228  idx++;
1229  /* fall-through */
1230  case 3:
1231  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1232  rte_pktmbuf_reset(mbufs[idx]);
1233  idx++;
1234  /* fall-through */
1235  case 2:
1236  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1237  rte_pktmbuf_reset(mbufs[idx]);
1238  idx++;
1239  /* fall-through */
1240  case 1:
1241  MBUF_RAW_ALLOC_CHECK(mbufs[idx]);
1242  rte_pktmbuf_reset(mbufs[idx]);
1243  idx++;
1244  /* fall-through */
1245  }
1246  }
1247  return 0;
1248 }
1249 
1267 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1268 {
1269  struct rte_mbuf *md;
1270 
1271  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1272  rte_mbuf_refcnt_read(mi) == 1);
1273 
1274  /* if m is not direct, get the mbuf that embeds the data */
1275  if (RTE_MBUF_DIRECT(m))
1276  md = m;
1277  else
1278  md = rte_mbuf_from_indirect(m);
1279 
1280  rte_mbuf_refcnt_update(md, 1);
1281  mi->priv_size = m->priv_size;
1282  mi->buf_iova = m->buf_iova;
1283  mi->buf_addr = m->buf_addr;
1284  mi->buf_len = m->buf_len;
1285 
1286  mi->data_off = m->data_off;
1287  mi->data_len = m->data_len;
1288  mi->port = m->port;
1289  mi->vlan_tci = m->vlan_tci;
1290  mi->vlan_tci_outer = m->vlan_tci_outer;
1291  mi->tx_offload = m->tx_offload;
1292  mi->hash = m->hash;
1293 
1294  mi->next = NULL;
1295  mi->pkt_len = mi->data_len;
1296  mi->nb_segs = 1;
1297  mi->ol_flags = m->ol_flags | IND_ATTACHED_MBUF;
1298  mi->packet_type = m->packet_type;
1299  mi->timestamp = m->timestamp;
1300 
1301  __rte_mbuf_sanity_check(mi, 1);
1303 }
1304 
1318 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1319 {
1320  struct rte_mbuf *md = rte_mbuf_from_indirect(m);
1321  struct rte_mempool *mp = m->pool;
1322  uint32_t mbuf_size, buf_len, priv_size;
1323 
1324  priv_size = rte_pktmbuf_priv_size(mp);
1325  mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1327 
1328  m->priv_size = priv_size;
1329  m->buf_addr = (char *)m + mbuf_size;
1330  m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1331  m->buf_len = (uint16_t)buf_len;
1333  m->data_len = 0;
1334  m->ol_flags = 0;
1335 
1336  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1337  md->next = NULL;
1338  md->nb_segs = 1;
1339  rte_mbuf_refcnt_set(md, 1);
1340  rte_mbuf_raw_free(md);
1341  }
1342 }
1343 
1358 static __rte_always_inline struct rte_mbuf *
1360 {
1362 
1363  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1364 
1365  if (RTE_MBUF_INDIRECT(m))
1366  rte_pktmbuf_detach(m);
1367 
1368  if (m->next != NULL) {
1369  m->next = NULL;
1370  m->nb_segs = 1;
1371  }
1372 
1373  return m;
1374 
1375  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1376 
1377  if (RTE_MBUF_INDIRECT(m))
1378  rte_pktmbuf_detach(m);
1379 
1380  if (m->next != NULL) {
1381  m->next = NULL;
1382  m->nb_segs = 1;
1383  }
1384  rte_mbuf_refcnt_set(m, 1);
1385 
1386  return m;
1387  }
1388  return NULL;
1389 }
1390 
1391 /* deprecated, replaced by rte_pktmbuf_prefree_seg() */
1392 __rte_deprecated
1393 static inline struct rte_mbuf *
1394 __rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
1395 {
1396  return rte_pktmbuf_prefree_seg(m);
1397 }
1398 
1408 static __rte_always_inline void
1410 {
1411  m = rte_pktmbuf_prefree_seg(m);
1412  if (likely(m != NULL))
1413  rte_mbuf_raw_free(m);
1414 }
1415 
1425 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1426 {
1427  struct rte_mbuf *m_next;
1428 
1429  if (m != NULL)
1431 
1432  while (m != NULL) {
1433  m_next = m->next;
1435  m = m_next;
1436  }
1437 }
1438 
1456 static inline struct rte_mbuf *rte_pktmbuf_clone(struct rte_mbuf *md,
1457  struct rte_mempool *mp)
1458 {
1459  struct rte_mbuf *mc, *mi, **prev;
1460  uint32_t pktlen;
1461  uint16_t nseg;
1462 
1463  if (unlikely ((mc = rte_pktmbuf_alloc(mp)) == NULL))
1464  return NULL;
1465 
1466  mi = mc;
1467  prev = &mi->next;
1468  pktlen = md->pkt_len;
1469  nseg = 0;
1470 
1471  do {
1472  nseg++;
1473  rte_pktmbuf_attach(mi, md);
1474  *prev = mi;
1475  prev = &mi->next;
1476  } while ((md = md->next) != NULL &&
1477  (mi = rte_pktmbuf_alloc(mp)) != NULL);
1478 
1479  *prev = NULL;
1480  mc->nb_segs = nseg;
1481  mc->pkt_len = pktlen;
1482 
1483  /* Allocation of new indirect segment failed */
1484  if (unlikely (mi == NULL)) {
1485  rte_pktmbuf_free(mc);
1486  return NULL;
1487  }
1488 
1489  __rte_mbuf_sanity_check(mc, 1);
1490  return mc;
1491 }
1492 
1504 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1505 {
1507 
1508  do {
1509  rte_mbuf_refcnt_update(m, v);
1510  } while ((m = m->next) != NULL);
1511 }
1512 
1521 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1522 {
1524  return m->data_off;
1525 }
1526 
1535 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1536 {
1538  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1539  m->data_len);
1540 }
1541 
1550 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1551 {
1552  struct rte_mbuf *m2 = (struct rte_mbuf *)m;
1553 
1555  while (m2->next != NULL)
1556  m2 = m2->next;
1557  return m2;
1558 }
1559 
1574 #define rte_pktmbuf_mtod_offset(m, t, o) \
1575  ((t)((char *)(m)->buf_addr + (m)->data_off + (o)))
1576 
1589 #define rte_pktmbuf_mtod(m, t) rte_pktmbuf_mtod_offset(m, t, 0)
1590 
1600 #define rte_pktmbuf_iova_offset(m, o) \
1601  (rte_iova_t)((m)->buf_iova + (m)->data_off + (o))
1602 
1603 /* deprecated */
1604 #define rte_pktmbuf_mtophys_offset(m, o) \
1605  rte_pktmbuf_iova_offset(m, o)
1606 
1614 #define rte_pktmbuf_iova(m) rte_pktmbuf_iova_offset(m, 0)
1615 
1616 /* deprecated */
1617 #define rte_pktmbuf_mtophys(m) rte_pktmbuf_iova(m)
1618 
1627 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1628 
1637 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1638 
1654 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1655  uint16_t len)
1656 {
1658 
1659  if (unlikely(len > rte_pktmbuf_headroom(m)))
1660  return NULL;
1661 
1662  m->data_off -= len;
1663  m->data_len = (uint16_t)(m->data_len + len);
1664  m->pkt_len = (m->pkt_len + len);
1665 
1666  return (char *)m->buf_addr + m->data_off;
1667 }
1668 
1684 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1685 {
1686  void *tail;
1687  struct rte_mbuf *m_last;
1688 
1690 
1691  m_last = rte_pktmbuf_lastseg(m);
1692  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1693  return NULL;
1694 
1695  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1696  m_last->data_len = (uint16_t)(m_last->data_len + len);
1697  m->pkt_len = (m->pkt_len + len);
1698  return (char*) tail;
1699 }
1700 
1715 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1716 {
1718 
1719  if (unlikely(len > m->data_len))
1720  return NULL;
1721 
1722  m->data_len = (uint16_t)(m->data_len - len);
1723  m->data_off += len;
1724  m->pkt_len = (m->pkt_len - len);
1725  return (char *)m->buf_addr + m->data_off;
1726 }
1727 
1742 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1743 {
1744  struct rte_mbuf *m_last;
1745 
1747 
1748  m_last = rte_pktmbuf_lastseg(m);
1749  if (unlikely(len > m_last->data_len))
1750  return -1;
1751 
1752  m_last->data_len = (uint16_t)(m_last->data_len - len);
1753  m->pkt_len = (m->pkt_len - len);
1754  return 0;
1755 }
1756 
1766 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1767 {
1769  return !!(m->nb_segs == 1);
1770 }
1771 
1775 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1776  uint32_t len, void *buf);
1777 
1798 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1799  uint32_t off, uint32_t len, void *buf)
1800 {
1801  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1802  return rte_pktmbuf_mtod_offset(m, char *, off);
1803  else
1804  return __rte_pktmbuf_read(m, off, len, buf);
1805 }
1806 
1823 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1824 {
1825  struct rte_mbuf *cur_tail;
1826 
1827  /* Check for number-of-segments-overflow */
1828  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1829  return -EOVERFLOW;
1830 
1831  /* Chain 'tail' onto the old tail */
1832  cur_tail = rte_pktmbuf_lastseg(head);
1833  cur_tail->next = tail;
1834 
1835  /* accumulate number of segments and total length. */
1836  head->nb_segs += tail->nb_segs;
1837  head->pkt_len += tail->pkt_len;
1838 
1839  /* pkt_len is only set in the head */
1840  tail->pkt_len = tail->data_len;
1841 
1842  return 0;
1843 }
1844 
1855 static inline int
1857 {
1858  uint64_t ol_flags = m->ol_flags;
1859  uint64_t inner_l3_offset = m->l2_len;
1860 
1861  /* Does packet set any of available offloads? */
1862  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
1863  return 0;
1864 
1866  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
1867 
1868  /* Headers are fragmented */
1869  if (rte_pktmbuf_data_len(m) < inner_l3_offset + m->l3_len + m->l4_len)
1870  return -ENOTSUP;
1871 
1872  /* IP checksum can be counted only for IPv4 packet */
1874  return -EINVAL;
1875 
1876  /* IP type not set when required */
1878  if (!(ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)))
1879  return -EINVAL;
1880 
1881  /* Check requirements for TSO packet */
1882  if (ol_flags & PKT_TX_TCP_SEG)
1883  if ((m->tso_segsz == 0) ||
1884  ((ol_flags & PKT_TX_IPV4) &&
1885  !(ol_flags & PKT_TX_IP_CKSUM)))
1886  return -EINVAL;
1887 
1888  /* PKT_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1889  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
1891  return -EINVAL;
1892 
1893  return 0;
1894 }
1895 
1908 static inline int
1910 {
1911  int seg_len, copy_len;
1912  struct rte_mbuf *m;
1913  struct rte_mbuf *m_next;
1914  char *buffer;
1915 
1916  if (rte_pktmbuf_is_contiguous(mbuf))
1917  return 0;
1918 
1919  /* Extend first segment to the total packet length */
1920  copy_len = rte_pktmbuf_pkt_len(mbuf) - rte_pktmbuf_data_len(mbuf);
1921 
1922  if (unlikely(copy_len > rte_pktmbuf_tailroom(mbuf)))
1923  return -1;
1924 
1925  buffer = rte_pktmbuf_mtod_offset(mbuf, char *, mbuf->data_len);
1926  mbuf->data_len = (uint16_t)(mbuf->pkt_len);
1927 
1928  /* Append data from next segments to the first one */
1929  m = mbuf->next;
1930  while (m != NULL) {
1931  m_next = m->next;
1932 
1933  seg_len = rte_pktmbuf_data_len(m);
1934  rte_memcpy(buffer, rte_pktmbuf_mtod(m, char *), seg_len);
1935  buffer += seg_len;
1936 
1938  m = m_next;
1939  }
1940 
1941  mbuf->next = NULL;
1942  mbuf->nb_segs = 1;
1943 
1944  return 0;
1945 }
1946 
1961 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1962 
1963 #ifdef __cplusplus
1964 }
1965 #endif
1966 
1967 #endif /* _RTE_MBUF_H_ */
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:633
struct rte_mbuf * next
Definition: rte_mbuf.h:545
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:717
uint64_t timestamp
Definition: rte_mbuf.h:533
uint16_t vlan_tci_outer
Definition: rte_mbuf.h:526
#define __rte_always_inline
Definition: rte_common.h:150
static int16_t rte_atomic16_read(const rte_atomic16_t *v)
Definition: rte_atomic.h:204
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:1186
uint8_t inner_esp_next_proto
Definition: rte_mbuf.h:480
__extension__ typedef void * MARKER[0]
Definition: rte_mbuf.h:409
#define RTE_MBUF_DIRECT(mb)
Definition: rte_mbuf.h:708
#define IND_ATTACHED_MBUF
Definition: rte_mbuf.h:338
rte_iova_t buf_physaddr
Definition: rte_mbuf.h:432
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1124
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1856
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1425
uint64_t l2_len
Definition: rte_mbuf.h:553
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
Definition: rte_mbuf.h:1456
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1409
void * buf_addr
Definition: rte_mbuf.h:422
uint32_t l2_type
Definition: rte_mbuf.h:474
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:679
uint16_t data_len
Definition: rte_mbuf.h:498
uint32_t lo
Definition: rte_mbuf.h:511
void * userdata
Definition: rte_mbuf.h:540
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1823
struct rte_mbuf::@113::@124 fdir
uint8_t inner_l2_type
Definition: rte_mbuf.h:487
uint64_t tso_segsz
Definition: rte_mbuf.h:559
__extension__ typedef uint8_t MARKER8[0]
Definition: rte_mbuf.h:411
uint64_t l4_len
Definition: rte_mbuf.h:558
struct rte_mbuf::@113::@125 sched
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1521
struct rte_mbuf __rte_cache_aligned
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1208
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:1140
uint32_t cache_size
Definition: rte_mempool.h:241
#define PKT_TX_OUTER_IP_CKSUM
Definition: rte_mbuf.h:304
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:612
#define PKT_TX_IPV6
Definition: rte_mbuf.h:294
uint16_t nb_segs
Definition: rte_mbuf.h:453
uint16_t port
Definition: rte_mbuf.h:456
uint64_t outer_l3_len
Definition: rte_mbuf.h:562
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1359
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1766
uint64_t l3_len
Definition: rte_mbuf.h:557
uint32_t l4_type
Definition: rte_mbuf.h:476
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define PKT_TX_OUTER_IPV4
Definition: rte_mbuf.h:311
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1535
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:909
#define PKT_TX_TCP_SEG
Definition: rte_mbuf.h:254
#define unlikely(x)
uint16_t priv_size
Definition: rte_mbuf.h:571
uint16_t timesync
Definition: rte_mbuf.h:574
uint32_t hi
Definition: rte_mbuf.h:514
__extension__ typedef uint64_t MARKER64[0]
Definition: rte_mbuf.h:413
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
#define RTE_MIN(a, b)
Definition: rte_common.h:330
#define PKT_TX_IPV4
Definition: rte_mbuf.h:286
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:729
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:818
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1909
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1392
uint64_t outer_l2_len
Definition: rte_mbuf.h:563
static void rte_atomic16_set(rte_atomic16_t *v, int16_t new_value)
Definition: rte_atomic.h:218
#define CTRL_MBUF_FLAG
Definition: rte_mbuf.h:341
uint16_t refcnt
Definition: rte_mbuf.h:451
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1715
#define rte_pktmbuf_pkt_len(m)
Definition: rte_mbuf.h:1627
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1267
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1364
uint32_t tun_type
Definition: rte_mbuf.h:477
static int16_t rte_atomic16_add_return(rte_atomic16_t *v, int16_t inc)
Definition: rte_atomic.h:299
static int rte_is_ctrlmbuf(struct rte_mbuf *m)
Definition: rte_mbuf.h:1003
uint64_t ol_flags
Definition: rte_mbuf.h:458
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1318
uint32_t pkt_len
Definition: rte_mbuf.h:497
#define PKT_TX_L4_MASK
Definition: rte_mbuf.h:270
uint16_t buf_len
Definition: rte_mbuf.h:528
uint32_t inner_l4_type
Definition: rte_mbuf.h:493
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1637
#define rte_pktmbuf_mtod(m, t)
Definition: rte_mbuf.h:1589
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:809
uint32_t packet_type
Definition: rte_mbuf.h:472
#define MBUF_INVALID_PORT
Definition: rte_mbuf.h:1153
uint32_t seqn
Definition: rte_mbuf.h:577
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:1103
uint8_t inner_l3_type
Definition: rte_mbuf.h:489
const char * rte_get_rx_ol_flag_name(uint64_t mask)
#define RTE_STD_C11
Definition: rte_common.h:66
#define PKT_TX_IP_CKSUM
Definition: rte_mbuf.h:278
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
struct rte_mempool * pool
Definition: rte_mbuf.h:544
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1684
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:827
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:658
uint32_t rss
Definition: rte_mbuf.h:503
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1742
uint64_t rte_iova_t
Definition: rte_memory.h:107
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:693
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1798
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1654
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:882
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1504
uint64_t phys_addr_t
Definition: rte_memory.h:98
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:167
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1550
RTE_STD_C11 union rte_mbuf::@110 __rte_aligned
static void * rte_memcpy(void *dst, const void *src, size_t n)
#define PKT_TX_OFFLOAD_MASK
Definition: rte_mbuf.h:324
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1475
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1233
uint64_t udata64
Definition: rte_mbuf.h:541
uint32_t l3_type
Definition: rte_mbuf.h:475
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:595
rte_atomic16_t refcnt_atomic
Definition: rte_mbuf.h:450
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1510
uint64_t tx_offload
Definition: rte_mbuf.h:550
char name[RTE_MEMZONE_NAMESIZE]
Definition: rte_mempool.h:230
void rte_ctrlmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
uint16_t vlan_tci
Definition: rte_mbuf.h:500
#define RTE_MBUF_INDIRECT(mb)
Definition: rte_mbuf.h:703
#define RTE_SET_USED(x)
Definition: rte_common.h:111
#define rte_pktmbuf_mtod_offset(m, t, o)
Definition: rte_mbuf.h:1574
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t usr
Definition: rte_mbuf.h:522