DPDK  19.05.0
rte_net.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  */
4 
5 #ifndef _RTE_NET_PTYPE_H_
6 #define _RTE_NET_PTYPE_H_
7 
8 #ifdef __cplusplus
9 extern "C" {
10 #endif
11 
12 #include <rte_ip.h>
13 #include <rte_udp.h>
14 #include <rte_tcp.h>
15 #include <rte_sctp.h>
16 
22  uint8_t l2_len;
23  uint8_t l3_len;
24  uint8_t l4_len;
25  uint8_t tunnel_len;
26  uint8_t inner_l2_len;
27  uint8_t inner_l3_len;
28  uint8_t inner_l4_len;
29 };
30 
54 int __rte_experimental
55 rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
56  int *frag);
57 
89 uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
90  struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
91 
112 static inline int
113 rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
114 {
115  struct ipv4_hdr *ipv4_hdr;
116  struct ipv6_hdr *ipv6_hdr;
117  struct tcp_hdr *tcp_hdr;
118  struct udp_hdr *udp_hdr;
119  uint64_t inner_l3_offset = m->l2_len;
120 
121 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
122  /*
123  * Does packet set any of available offloads?
124  * Mainly it is required to avoid fragmented headers check if
125  * no offloads are requested.
126  */
127  if (!(ol_flags & PKT_TX_OFFLOAD_MASK))
128  return 0;
129 #endif
130 
131  if ((ol_flags & PKT_TX_OUTER_IP_CKSUM) ||
132  (ol_flags & PKT_TX_OUTER_IPV6))
133  inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
134 
135 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
136  /*
137  * Check if headers are fragmented.
138  * The check could be less strict depending on which offloads are
139  * requested and headers to be used, but let's keep it simple.
140  */
142  inner_l3_offset + m->l3_len + m->l4_len))
143  return -ENOTSUP;
144 #endif
145 
146  if (ol_flags & PKT_TX_IPV4) {
147  ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
148  inner_l3_offset);
149 
150  if (ol_flags & PKT_TX_IP_CKSUM)
151  ipv4_hdr->hdr_checksum = 0;
152  }
153 
154  if ((ol_flags & PKT_TX_UDP_CKSUM) == PKT_TX_UDP_CKSUM) {
155  if (ol_flags & PKT_TX_IPV4) {
156  udp_hdr = (struct udp_hdr *)((char *)ipv4_hdr +
157  m->l3_len);
158  udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
159  ol_flags);
160  } else {
161  ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
162  inner_l3_offset);
163  /* non-TSO udp */
164  udp_hdr = rte_pktmbuf_mtod_offset(m, struct udp_hdr *,
165  inner_l3_offset + m->l3_len);
166  udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
167  ol_flags);
168  }
169  } else if ((ol_flags & PKT_TX_TCP_CKSUM) ||
170  (ol_flags & PKT_TX_TCP_SEG)) {
171  if (ol_flags & PKT_TX_IPV4) {
172  /* non-TSO tcp or TSO */
173  tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr +
174  m->l3_len);
175  tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
176  ol_flags);
177  } else {
178  ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *,
179  inner_l3_offset);
180  /* non-TSO tcp or TSO */
181  tcp_hdr = rte_pktmbuf_mtod_offset(m, struct tcp_hdr *,
182  inner_l3_offset + m->l3_len);
183  tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
184  ol_flags);
185  }
186  }
187 
188  return 0;
189 }
190 
209 static inline int
210 rte_net_intel_cksum_prepare(struct rte_mbuf *m)
211 {
212  return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
213 }
214 
215 #ifdef __cplusplus
216 }
217 #endif
218 
219 
220 #endif /* _RTE_NET_PTYPE_H_ */