DPDK 21.11.2
Loading...
Searching...
No Matches
rte_mbuf.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
4 */
5
6#ifndef _RTE_MBUF_H_
7#define _RTE_MBUF_H_
8
34#include <stdint.h>
35#include <rte_compat.h>
36#include <rte_common.h>
37#include <rte_config.h>
38#include <rte_mempool.h>
39#include <rte_memory.h>
40#include <rte_prefetch.h>
42#include <rte_byteorder.h>
43#include <rte_mbuf_ptype.h>
44#include <rte_mbuf_core.h>
45
46#ifdef __cplusplus
47extern "C" {
48#endif
49
58const char *rte_get_rx_ol_flag_name(uint64_t mask);
59
72int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
73
84const char *rte_get_tx_ol_flag_name(uint64_t mask);
85
98int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
99
110static inline void
112{
113 rte_prefetch0(&m->cacheline0);
114}
115
127static inline void
129{
130#if RTE_CACHE_LINE_SIZE == 64
131 rte_prefetch0(&m->cacheline1);
132#else
133 RTE_SET_USED(m);
134#endif
135}
136
137
138static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
139
148static inline rte_iova_t
150{
151 return mb->buf_iova + mb->data_off;
152}
153
166static inline rte_iova_t
168{
169 return mb->buf_iova + RTE_PKTMBUF_HEADROOM;
170}
171
180static inline struct rte_mbuf *
182{
183 return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
184}
185
201static inline char *
203{
204 return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
205}
206
215static inline char *
217{
218 return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
219}
220
234static inline char *
236{
237 return rte_mbuf_buf_addr(md, md->pool);
238}
239
252static inline void *
254{
255 return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
256}
257
266 uint16_t mbuf_priv_size;
267 uint32_t flags;
268};
269
278static inline uint32_t
280{
281 struct rte_pktmbuf_pool_private *mbp_priv;
282
283 mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
284 return mbp_priv->flags;
285}
286
293#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
294
302#define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
303 (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
304
305#ifdef RTE_LIBRTE_MBUF_DEBUG
306
308#define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
309
310#else /* RTE_LIBRTE_MBUF_DEBUG */
311
313#define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
314
315#endif /* RTE_LIBRTE_MBUF_DEBUG */
316
317#ifdef RTE_MBUF_REFCNT_ATOMIC
318
326static inline uint16_t
327rte_mbuf_refcnt_read(const struct rte_mbuf *m)
328{
329 return __atomic_load_n(&m->refcnt, __ATOMIC_RELAXED);
330}
331
339static inline void
340rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
341{
342 __atomic_store_n(&m->refcnt, new_value, __ATOMIC_RELAXED);
343}
344
345/* internal */
346static inline uint16_t
347__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
348{
349 return __atomic_add_fetch(&m->refcnt, (uint16_t)value,
350 __ATOMIC_ACQ_REL);
351}
352
362static inline uint16_t
363rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
364{
365 /*
366 * The atomic_add is an expensive operation, so we don't want to
367 * call it in the case where we know we are the unique holder of
368 * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
369 * operation has to be used because concurrent accesses on the
370 * reference counter can occur.
371 */
372 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
373 ++value;
374 rte_mbuf_refcnt_set(m, (uint16_t)value);
375 return (uint16_t)value;
376 }
377
378 return __rte_mbuf_refcnt_update(m, value);
379}
380
381#else /* ! RTE_MBUF_REFCNT_ATOMIC */
382
383/* internal */
384static inline uint16_t
385__rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
386{
387 m->refcnt = (uint16_t)(m->refcnt + value);
388 return m->refcnt;
389}
390
394static inline uint16_t
395rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
396{
397 return __rte_mbuf_refcnt_update(m, value);
398}
399
403static inline uint16_t
405{
406 return m->refcnt;
407}
408
412static inline void
413rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
414{
415 m->refcnt = new_value;
416}
417
418#endif /* RTE_MBUF_REFCNT_ATOMIC */
419
428static inline uint16_t
430{
431 return __atomic_load_n(&shinfo->refcnt, __ATOMIC_RELAXED);
432}
433
442static inline void
444 uint16_t new_value)
445{
446 __atomic_store_n(&shinfo->refcnt, new_value, __ATOMIC_RELAXED);
447}
448
460static inline uint16_t
462 int16_t value)
463{
464 if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
465 ++value;
466 rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
467 return (uint16_t)value;
468 }
469
470 return __atomic_add_fetch(&shinfo->refcnt, (uint16_t)value,
471 __ATOMIC_ACQ_REL);
472}
473
475#define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
476 if ((m) != NULL) \
477 rte_prefetch0(m); \
478} while (0)
479
480
493void
494rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
495
515int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
516 const char **reason);
517
530static __rte_always_inline void
532{
533 RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
534 RTE_ASSERT(m->next == NULL);
535 RTE_ASSERT(m->nb_segs == 1);
537}
538
540#define MBUF_RAW_ALLOC_CHECK(m) __rte_mbuf_raw_sanity_check(m)
541
561static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
562{
563 struct rte_mbuf *m;
564
565 if (rte_mempool_get(mp, (void **)&m) < 0)
566 return NULL;
568 return m;
569}
570
585static __rte_always_inline void
587{
588 RTE_ASSERT(!RTE_MBUF_CLONED(m) &&
591 rte_mempool_put(m->pool, m);
592}
593
616void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
617 void *m, unsigned i);
618
639void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
640
675struct rte_mempool *
676rte_pktmbuf_pool_create(const char *name, unsigned n,
677 unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
678 int socket_id);
679
717struct rte_mempool *
718rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
719 unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
720 int socket_id, const char *ops_name);
721
724 void *buf_ptr;
726 size_t buf_len;
727 uint16_t elt_size;
728};
729
771__rte_experimental
772struct rte_mempool *
773rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
774 unsigned int cache_size, uint16_t priv_size,
775 uint16_t data_room_size, int socket_id,
776 const struct rte_pktmbuf_extmem *ext_mem,
777 unsigned int ext_num);
778
790static inline uint16_t
792{
793 struct rte_pktmbuf_pool_private *mbp_priv;
794
795 mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
796 return mbp_priv->mbuf_data_room_size;
797}
798
811static inline uint16_t
813{
814 struct rte_pktmbuf_pool_private *mbp_priv;
815
816 mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
817 return mbp_priv->mbuf_priv_size;
818}
819
828static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
829{
830 m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
831 (uint16_t)m->buf_len);
832}
833
842static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
843{
844 m->next = NULL;
845 m->pkt_len = 0;
846 m->tx_offload = 0;
847 m->vlan_tci = 0;
848 m->vlan_tci_outer = 0;
849 m->nb_segs = 1;
851
853 m->packet_type = 0;
855
856 m->data_len = 0;
858}
859
873static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
874{
875 struct rte_mbuf *m;
876 if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
878 return m;
879}
880
895static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
896 struct rte_mbuf **mbufs, unsigned count)
897{
898 unsigned idx = 0;
899 int rc;
900
901 rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
902 if (unlikely(rc))
903 return rc;
904
905 /* To understand duff's device on loop unwinding optimization, see
906 * https://en.wikipedia.org/wiki/Duff's_device.
907 * Here while() loop is used rather than do() while{} to avoid extra
908 * check if count is zero.
909 */
910 switch (count % 4) {
911 case 0:
912 while (idx != count) {
913 __rte_mbuf_raw_sanity_check(mbufs[idx]);
914 rte_pktmbuf_reset(mbufs[idx]);
915 idx++;
916 /* fall-through */
917 case 3:
918 __rte_mbuf_raw_sanity_check(mbufs[idx]);
919 rte_pktmbuf_reset(mbufs[idx]);
920 idx++;
921 /* fall-through */
922 case 2:
923 __rte_mbuf_raw_sanity_check(mbufs[idx]);
924 rte_pktmbuf_reset(mbufs[idx]);
925 idx++;
926 /* fall-through */
927 case 1:
928 __rte_mbuf_raw_sanity_check(mbufs[idx]);
929 rte_pktmbuf_reset(mbufs[idx]);
930 idx++;
931 /* fall-through */
932 }
933 }
934 return 0;
935}
936
969static inline struct rte_mbuf_ext_shared_info *
970rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
972{
973 struct rte_mbuf_ext_shared_info *shinfo;
974 void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
975 void *addr;
976
977 addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
978 sizeof(uintptr_t));
979 if (addr <= buf_addr)
980 return NULL;
981
982 shinfo = (struct rte_mbuf_ext_shared_info *)addr;
983 shinfo->free_cb = free_cb;
984 shinfo->fcb_opaque = fcb_opaque;
985 rte_mbuf_ext_refcnt_set(shinfo, 1);
986
987 *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
988 return shinfo;
989}
990
1051static inline void
1052rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1053 rte_iova_t buf_iova, uint16_t buf_len,
1054 struct rte_mbuf_ext_shared_info *shinfo)
1055{
1056 /* mbuf should not be read-only */
1057 RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1058 RTE_ASSERT(shinfo->free_cb != NULL);
1059
1060 m->buf_addr = buf_addr;
1061 m->buf_iova = buf_iova;
1062 m->buf_len = buf_len;
1063
1064 m->data_len = 0;
1065 m->data_off = 0;
1066
1068 m->shinfo = shinfo;
1069}
1070
1078#define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1079
1088static inline void
1089rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1090{
1091 memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1092}
1093
1094/* internal */
1095static inline void
1096__rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1097{
1098 mdst->port = msrc->port;
1099 mdst->vlan_tci = msrc->vlan_tci;
1100 mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1101 mdst->tx_offload = msrc->tx_offload;
1102 mdst->hash = msrc->hash;
1103 mdst->packet_type = msrc->packet_type;
1104 rte_mbuf_dynfield_copy(mdst, msrc);
1105}
1106
1128static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1129{
1130 RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1131 rte_mbuf_refcnt_read(mi) == 1);
1132
1133 if (RTE_MBUF_HAS_EXTBUF(m)) {
1135 mi->ol_flags = m->ol_flags;
1136 mi->shinfo = m->shinfo;
1137 } else {
1138 /* if m is not direct, get the mbuf that embeds the data */
1140 mi->priv_size = m->priv_size;
1142 }
1143
1144 __rte_pktmbuf_copy_hdr(mi, m);
1145
1146 mi->data_off = m->data_off;
1147 mi->data_len = m->data_len;
1148 mi->buf_iova = m->buf_iova;
1149 mi->buf_addr = m->buf_addr;
1150 mi->buf_len = m->buf_len;
1151
1152 mi->next = NULL;
1153 mi->pkt_len = mi->data_len;
1154 mi->nb_segs = 1;
1155
1158}
1159
1167static inline void
1168__rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1169{
1170 RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1171 RTE_ASSERT(m->shinfo != NULL);
1172
1173 if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1175}
1176
1183static inline void
1184__rte_pktmbuf_free_direct(struct rte_mbuf *m)
1185{
1186 struct rte_mbuf *md;
1187
1188 RTE_ASSERT(RTE_MBUF_CLONED(m));
1189
1190 md = rte_mbuf_from_indirect(m);
1191
1192 if (rte_mbuf_refcnt_update(md, -1) == 0) {
1193 md->next = NULL;
1194 md->nb_segs = 1;
1195 rte_mbuf_refcnt_set(md, 1);
1197 }
1198}
1199
1218static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1219{
1220 struct rte_mempool *mp = m->pool;
1221 uint32_t mbuf_size, buf_len;
1222 uint16_t priv_size;
1223
1224 if (RTE_MBUF_HAS_EXTBUF(m)) {
1225 /*
1226 * The mbuf has the external attached buffer,
1227 * we should check the type of the memory pool where
1228 * the mbuf was allocated from to detect the pinned
1229 * external buffer.
1230 */
1231 uint32_t flags = rte_pktmbuf_priv_flags(mp);
1232
1234 /*
1235 * The pinned external buffer should not be
1236 * detached from its backing mbuf, just exit.
1237 */
1238 return;
1239 }
1240 __rte_pktmbuf_free_extbuf(m);
1241 } else {
1242 __rte_pktmbuf_free_direct(m);
1243 }
1244 priv_size = rte_pktmbuf_priv_size(mp);
1245 mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1246 buf_len = rte_pktmbuf_data_room_size(mp);
1247
1248 m->priv_size = priv_size;
1249 m->buf_addr = (char *)m + mbuf_size;
1250 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
1251 m->buf_len = (uint16_t)buf_len;
1253 m->data_len = 0;
1254 m->ol_flags = 0;
1255}
1256
1270static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1271{
1272 struct rte_mbuf_ext_shared_info *shinfo;
1273
1274 /* Clear flags, mbuf is being freed. */
1276 shinfo = m->shinfo;
1277
1278 /* Optimize for performance - do not dec/reinit */
1279 if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1280 return 0;
1281
1282 /*
1283 * Direct usage of add primitive to avoid
1284 * duplication of comparing with one.
1285 */
1286 if (likely(__atomic_add_fetch(&shinfo->refcnt, (uint16_t)-1,
1287 __ATOMIC_ACQ_REL)))
1288 return 1;
1289
1290 /* Reinitialize counter before mbuf freeing. */
1291 rte_mbuf_ext_refcnt_set(shinfo, 1);
1292 return 0;
1293}
1294
1309static __rte_always_inline struct rte_mbuf *
1311{
1313
1314 if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1315
1316 if (!RTE_MBUF_DIRECT(m)) {
1318 if (RTE_MBUF_HAS_EXTBUF(m) &&
1320 __rte_pktmbuf_pinned_extbuf_decref(m))
1321 return NULL;
1322 }
1323
1324 if (m->next != NULL)
1325 m->next = NULL;
1326 if (m->nb_segs != 1)
1327 m->nb_segs = 1;
1328
1329 return m;
1330
1331 } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1332
1333 if (!RTE_MBUF_DIRECT(m)) {
1335 if (RTE_MBUF_HAS_EXTBUF(m) &&
1337 __rte_pktmbuf_pinned_extbuf_decref(m))
1338 return NULL;
1339 }
1340
1341 if (m->next != NULL)
1342 m->next = NULL;
1343 if (m->nb_segs != 1)
1344 m->nb_segs = 1;
1345 rte_mbuf_refcnt_set(m, 1);
1346
1347 return m;
1348 }
1349 return NULL;
1350}
1351
1361static __rte_always_inline void
1363{
1365 if (likely(m != NULL))
1367}
1368
1378static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1379{
1380 struct rte_mbuf *m_next;
1381
1382 if (m != NULL)
1384
1385 while (m != NULL) {
1386 m_next = m->next;
1388 m = m_next;
1389 }
1390}
1391
1404void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1405
1423struct rte_mbuf *
1425
1447struct rte_mbuf *
1448rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1449 uint32_t offset, uint32_t length);
1450
1462static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1463{
1465
1466 do {
1468 } while ((m = m->next) != NULL);
1469}
1470
1479static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1480{
1482 return m->data_off;
1483}
1484
1493static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1494{
1496 return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1497 m->data_len);
1498}
1499
1508static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1509{
1511 while (m->next != NULL)
1512 m = m->next;
1513 return m;
1514}
1515
1524#define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1525
1534#define rte_pktmbuf_data_len(m) ((m)->data_len)
1535
1551static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1552 uint16_t len)
1553{
1555
1556 if (unlikely(len > rte_pktmbuf_headroom(m)))
1557 return NULL;
1558
1559 /* NB: elaborating the subtraction like this instead of using
1560 * -= allows us to ensure the result type is uint16_t
1561 * avoiding compiler warnings on gcc 8.1 at least */
1562 m->data_off = (uint16_t)(m->data_off - len);
1563 m->data_len = (uint16_t)(m->data_len + len);
1564 m->pkt_len = (m->pkt_len + len);
1565
1566 return (char *)m->buf_addr + m->data_off;
1567}
1568
1584static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1585{
1586 void *tail;
1587 struct rte_mbuf *m_last;
1588
1590
1591 m_last = rte_pktmbuf_lastseg(m);
1592 if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1593 return NULL;
1594
1595 tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1596 m_last->data_len = (uint16_t)(m_last->data_len + len);
1597 m->pkt_len = (m->pkt_len + len);
1598 return (char*) tail;
1599}
1600
1615static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1616{
1618
1619 if (unlikely(len > m->data_len))
1620 return NULL;
1621
1622 /* NB: elaborating the addition like this instead of using
1623 * += allows us to ensure the result type is uint16_t
1624 * avoiding compiler warnings on gcc 8.1 at least */
1625 m->data_len = (uint16_t)(m->data_len - len);
1626 m->data_off = (uint16_t)(m->data_off + len);
1627 m->pkt_len = (m->pkt_len - len);
1628 return (char *)m->buf_addr + m->data_off;
1629}
1630
1645static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1646{
1647 struct rte_mbuf *m_last;
1648
1650
1651 m_last = rte_pktmbuf_lastseg(m);
1652 if (unlikely(len > m_last->data_len))
1653 return -1;
1654
1655 m_last->data_len = (uint16_t)(m_last->data_len - len);
1656 m->pkt_len = (m->pkt_len - len);
1657 return 0;
1658}
1659
1669static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1670{
1672 return m->nb_segs == 1;
1673}
1674
1678const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1679 uint32_t len, void *buf);
1680
1701static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1702 uint32_t off, uint32_t len, void *buf)
1703{
1704 if (likely(off + len <= rte_pktmbuf_data_len(m)))
1705 return rte_pktmbuf_mtod_offset(m, char *, off);
1706 else
1707 return __rte_pktmbuf_read(m, off, len, buf);
1708}
1709
1726static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1727{
1728 struct rte_mbuf *cur_tail;
1729
1730 /* Check for number-of-segments-overflow */
1731 if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1732 return -EOVERFLOW;
1733
1734 /* Chain 'tail' onto the old tail */
1735 cur_tail = rte_pktmbuf_lastseg(head);
1736 cur_tail->next = tail;
1737
1738 /* accumulate number of segments and total length.
1739 * NB: elaborating the addition like this instead of using
1740 * -= allows us to ensure the result type is uint16_t
1741 * avoiding compiler warnings on gcc 8.1 at least */
1742 head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1743 head->pkt_len += tail->pkt_len;
1744
1745 /* pkt_len is only set in the head */
1746 tail->pkt_len = tail->data_len;
1747
1748 return 0;
1749}
1750
1772static __rte_always_inline uint64_t
1773rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1774 uint64_t ol3, uint64_t ol2, uint64_t unused)
1775{
1776 return il2 << RTE_MBUF_L2_LEN_OFS |
1777 il3 << RTE_MBUF_L3_LEN_OFS |
1778 il4 << RTE_MBUF_L4_LEN_OFS |
1779 tso << RTE_MBUF_TSO_SEGSZ_OFS |
1780 ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1781 ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1782 unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1783}
1784
1795static inline int
1797{
1798 uint64_t ol_flags = m->ol_flags;
1799
1800 /* Does packet set any of available offloads? */
1802 return 0;
1803
1804 /* IP checksum can be counted only for IPv4 packet */
1806 return -EINVAL;
1807
1808 /* IP type not set when required */
1811 return -EINVAL;
1812
1813 /* Check requirements for TSO packet */
1815 if ((m->tso_segsz == 0) ||
1818 return -EINVAL;
1819
1820 /* RTE_MBUF_F_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1823 return -EINVAL;
1824
1825 return 0;
1826}
1827
1831int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1832
1845static inline int
1847{
1848 if (rte_pktmbuf_is_contiguous(mbuf))
1849 return 0;
1850 return __rte_pktmbuf_linearize(mbuf);
1851}
1852
1867void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1868
1872static inline uint32_t
1874{
1875 return m->hash.sched.queue_id;
1876}
1877
1881static inline uint8_t
1883{
1884 return m->hash.sched.traffic_class;
1885}
1886
1890static inline uint8_t
1892{
1893 return m->hash.sched.color;
1894}
1895
1908static inline void
1909rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
1910 uint8_t *traffic_class,
1911 uint8_t *color)
1912{
1913 struct rte_mbuf_sched sched = m->hash.sched;
1914
1915 *queue_id = sched.queue_id;
1917 *color = sched.color;
1918}
1919
1923static inline void
1925{
1926 m->hash.sched.queue_id = queue_id;
1927}
1928
1932static inline void
1934{
1935 m->hash.sched.traffic_class = traffic_class;
1936}
1937
1941static inline void
1943{
1944 m->hash.sched.color = color;
1945}
1946
1959static inline void
1961 uint8_t traffic_class,
1962 uint8_t color)
1963{
1964 m->hash.sched = (struct rte_mbuf_sched){
1965 .queue_id = queue_id,
1966 .traffic_class = traffic_class,
1967 .color = color,
1968 .reserved = 0,
1969 };
1970}
1971
1972#ifdef __cplusplus
1973}
1974#endif
1975
1976#endif /* _RTE_MBUF_H_ */
#define likely(x)
#define unlikely(x)
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:297
#define RTE_MIN(a, b)
Definition: rte_common.h:593
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:280
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:273
uint64_t rte_iova_t
Definition: rte_common.h:438
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:268
#define RTE_SET_USED(x)
Definition: rte_common.h:138
#define __rte_unused
Definition: rte_common.h:123
#define __rte_always_inline
Definition: rte_common.h:233
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1362
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:128
static __rte_always_inline uint64_t rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, uint64_t ol3, uint64_t ol2, uint64_t unused)
Definition: rte_mbuf.h:1773
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1378
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:167
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:149
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1493
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1534
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1479
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1873
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:111
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1551
static char * rte_mbuf_data_addr_default(struct rte_mbuf *mb)
Definition: rte_mbuf.h:216
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:1960
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1089
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1615
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:1942
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1669
int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1584
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:1909
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1218
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:235
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:970
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:279
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1462
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:413
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:561
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:461
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:313
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1701
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:404
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1846
__rte_experimental struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:586
struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:873
static char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:202
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:443
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1882
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:429
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:395
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1645
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:302
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:1924
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:791
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1508
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:1933
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:293
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1310
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1796
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1052
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:181
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:895
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:828
static void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:253
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1891
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1726
static __rte_always_inline void __rte_mbuf_raw_sanity_check(__rte_unused const struct rte_mbuf *m)
Definition: rte_mbuf.h:531
const char * rte_get_rx_ol_flag_name(uint64_t mask)
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:812
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:842
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1128
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
#define RTE_MBUF_F_EXTERNAL
#define RTE_MBUF_F_TX_IP_CKSUM
#define RTE_MBUF_MAX_NB_SEGS
#define RTE_MBUF_F_TX_TCP_SEG
#define RTE_MBUF_F_TX_L4_MASK
#define RTE_MBUF_F_TX_OUTER_IPV4
#define RTE_MBUF_HAS_EXTBUF(mb)
#define RTE_MBUF_F_TX_OFFLOAD_MASK
#define RTE_MBUF_CLONED(mb)
#define RTE_MBUF_DIRECT(mb)
#define RTE_MBUF_F_TX_IPV4
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define rte_pktmbuf_mtod_offset(m, t, o)
#define RTE_MBUF_F_TX_IPV6
#define RTE_MBUF_F_INDIRECT
#define RTE_MBUF_PORT_INVALID
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1575
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1729
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1604
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1439
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1757
static void rte_prefetch0(const volatile void *p)
rte_mbuf_extbuf_free_callback_t free_cb
uint32_t queue_id
uint8_t traffic_class
uint64_t ol_flags
uint16_t nb_segs
uint16_t vlan_tci
uint16_t priv_size
uint32_t pkt_len
uint16_t buf_len
struct rte_mbuf_ext_shared_info * shinfo
uint32_t packet_type
uint16_t port
void * buf_addr
struct rte_mempool * pool
uint32_t dynfield1[9]
uint16_t vlan_tci_outer
struct rte_mbuf * next
uint64_t tx_offload
uint16_t data_len
uint64_t tso_segsz
uint16_t refcnt
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:213
uint32_t cache_size
Definition: rte_mempool.h:224
unsigned int flags
Definition: rte_mempool.h:221
uint16_t elt_size
Definition: rte_mbuf.h:727
rte_iova_t buf_iova
Definition: rte_mbuf.h:725
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:265