DPDK  24.07.0
rte_net.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4 
5 #ifndef _RTE_NET_PTYPE_H_
6 #define _RTE_NET_PTYPE_H_
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
12 #include <rte_ip.h>
13 #include <rte_udp.h>
14 #include <rte_tcp.h>
15 
21  uint8_t l2_len;
22  uint8_t inner_l2_len;
23  uint16_t l3_len;
24  uint16_t inner_l3_len;
25  uint16_t tunnel_len;
26  uint8_t l4_len;
27  uint8_t inner_l4_len;
28 };
29 
50 int
51 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
52  int *frag);
53 
85 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
86  struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
87 
108 static inline int
109 rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
110 {
111  const uint64_t inner_requests = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK |
113  const uint64_t outer_requests = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
115  /* Initialise ipv4_hdr to avoid false positive compiler warnings. */
116  struct rte_ipv4_hdr *ipv4_hdr = NULL;
117  struct rte_ipv6_hdr *ipv6_hdr;
118  struct rte_tcp_hdr *tcp_hdr;
119  struct rte_udp_hdr *udp_hdr;
120  uint64_t inner_l3_offset = m->l2_len;
121 
122  /*
123  * Does packet set any of available offloads?
124  * Mainly it is required to avoid fragmented headers check if
125  * no offloads are requested.
126  */
127  if (!(ol_flags & (inner_requests | outer_requests)))
128  return 0;
129 
131  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
132  /*
133  * prepare outer IPv4 header checksum by setting it to 0,
134  * in order to be computed by hardware NICs.
135  */
136  if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
137  ipv4_hdr = rte_pktmbuf_mtod_offset(m,
138  struct rte_ipv4_hdr *, m->outer_l2_len);
139  ipv4_hdr->hdr_checksum = 0;
140  }
141  if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM || ol_flags & inner_requests) {
142  if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
143  ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
144  m->outer_l2_len);
145  udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
146  m->outer_l3_len);
147  if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
148  udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
149  m->ol_flags);
150  else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
151  udp_hdr->dgram_cksum = 0;
152  } else {
153  ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
154  m->outer_l2_len);
155  udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
156  m->outer_l2_len + m->outer_l3_len);
157  if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
158  udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
159  m->ol_flags);
160  else if (ipv6_hdr->proto == IPPROTO_UDP)
161  udp_hdr->dgram_cksum = 0;
162  }
163  }
164  }
165 
166  /*
167  * Check if headers are fragmented.
168  * The check could be less strict depending on which offloads are
169  * requested and headers to be used, but let's keep it simple.
170  */
172  inner_l3_offset + m->l3_len + m->l4_len))
173  return -ENOTSUP;
174 
175  if (ol_flags & RTE_MBUF_F_TX_IPV4) {
176  ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
177  inner_l3_offset);
178 
179  if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
180  ipv4_hdr->hdr_checksum = 0;
181  }
182 
183  if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_UDP_CKSUM ||
184  (ol_flags & RTE_MBUF_F_TX_UDP_SEG)) {
185  if (ol_flags & RTE_MBUF_F_TX_IPV4) {
186  udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
187  m->l3_len);
188  udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
189  ol_flags);
190  } else {
191  ipv6_hdr = rte_pktmbuf_mtod_offset(m,
192  struct rte_ipv6_hdr *, inner_l3_offset);
193  /* non-TSO udp */
194  udp_hdr = rte_pktmbuf_mtod_offset(m,
195  struct rte_udp_hdr *,
196  inner_l3_offset + m->l3_len);
197  udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
198  ol_flags);
199  }
200  } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM ||
201  (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
202  if (ol_flags & RTE_MBUF_F_TX_IPV4) {
203  /* non-TSO tcp or TSO */
204  tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
205  m->l3_len);
206  tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
207  ol_flags);
208  } else {
209  ipv6_hdr = rte_pktmbuf_mtod_offset(m,
210  struct rte_ipv6_hdr *, inner_l3_offset);
211  /* non-TSO tcp or TSO */
212  tcp_hdr = rte_pktmbuf_mtod_offset(m,
213  struct rte_tcp_hdr *,
214  inner_l3_offset + m->l3_len);
215  tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
216  ol_flags);
217  }
218  }
219 
220  return 0;
221 }
222 
241 static inline int
242 rte_net_intel_cksum_prepare(struct rte_mbuf *m)
243 {
244  return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
245 }
246 
247 #ifdef __cplusplus
248 }
249 #endif
250 
251 
252 #endif /* _RTE_NET_PTYPE_H_ */
#define RTE_MBUF_F_TX_TCP_CKSUM
#define RTE_MBUF_F_TX_OUTER_IPV6
uint64_t l2_len
uint64_t l4_len
#define RTE_MBUF_F_TX_UDP_SEG
rte_be16_t cksum
Definition: rte_tcp.h:36
uint64_t outer_l3_len
#define rte_pktmbuf_mtod_offset(m, t, o)
#define RTE_MBUF_F_TX_OUTER_UDP_CKSUM
uint64_t l3_len
#define unlikely(x)
static uint16_t rte_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
Definition: rte_ip.h:333
static uint16_t rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
Definition: rte_ip.h:588
uint64_t outer_l2_len
uint8_t proto
Definition: rte_ip.h:531
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
#define RTE_MBUF_F_TX_UDP_CKSUM
uint64_t ol_flags
rte_be16_t dgram_cksum
Definition: rte_udp.h:32
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1568
#define RTE_MBUF_F_TX_L4_MASK
#define RTE_MBUF_F_TX_IP_CKSUM
#define RTE_MBUF_F_TX_TCP_SEG
uint8_t next_proto_id
Definition: rte_ip.h:60
#define RTE_MBUF_F_TX_OUTER_IPV4
#define RTE_MBUF_F_TX_IPV4
rte_be16_t hdr_checksum
Definition: rte_ip.h:61