DPDK 25.07.0
rte_net.h
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 */
4
5#ifndef _RTE_NET_PTYPE_H_
6#define _RTE_NET_PTYPE_H_
7
8#include <rte_ip.h>
9#include <rte_udp.h>
10#include <rte_tcp.h>
11
12#ifdef __cplusplus
13extern "C" {
14#endif
15
21 uint8_t l2_len;
22 /* Outer_L4_len + ... + inner L2_len for tunneling pkt. */
23 uint8_t inner_l2_len;
24 uint16_t l3_len;
25 uint16_t inner_l3_len;
26 /* Protocol header of tunnel packets */
27 uint16_t tunnel_len;
28 uint8_t l4_len;
29 uint8_t inner_l4_len;
30};
31
52int
53rte_net_skip_ip6_ext(uint16_t proto, const struct rte_mbuf *m, uint32_t *off,
54 int *frag);
55
87uint32_t rte_net_get_ptype(const struct rte_mbuf *m,
88 struct rte_net_hdr_lens *hdr_lens, uint32_t layers);
89
110static inline int
111rte_net_intel_cksum_flags_prepare(struct rte_mbuf *m, uint64_t ol_flags)
112{
113 const uint64_t inner_requests = RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK |
115 const uint64_t outer_requests = RTE_MBUF_F_TX_OUTER_IP_CKSUM |
117 /* Initialise ipv4_hdr to avoid false positive compiler warnings. */
118 struct rte_ipv4_hdr *ipv4_hdr = NULL;
119 struct rte_ipv6_hdr *ipv6_hdr;
120 struct rte_tcp_hdr *tcp_hdr;
121 struct rte_udp_hdr *udp_hdr;
122 uint64_t inner_l3_offset = m->l2_len;
123
124 /*
125 * Does packet set any of available offloads?
126 * Mainly it is required to avoid fragmented headers check if
127 * no offloads are requested.
128 */
129 if (!(ol_flags & (inner_requests | outer_requests)))
130 return 0;
131
133 inner_l3_offset += m->outer_l2_len + m->outer_l3_len;
134 /*
135 * prepare outer IPv4 header checksum by setting it to 0,
136 * in order to be computed by hardware NICs.
137 */
138 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
139 ipv4_hdr = rte_pktmbuf_mtod_offset(m,
140 struct rte_ipv4_hdr *, m->outer_l2_len);
141 ipv4_hdr->hdr_checksum = 0;
142 }
143 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM || ol_flags & inner_requests) {
144 if (ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) {
145 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
146 m->outer_l2_len);
147 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
148 m->outer_l3_len);
149 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
150 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
151 m->ol_flags);
152 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
153 udp_hdr->dgram_cksum = 0;
154 } else {
155 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
156 m->outer_l2_len);
157 udp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_udp_hdr *,
158 m->outer_l2_len + m->outer_l3_len);
159 if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)
160 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
161 m->ol_flags);
162 else if (ipv6_hdr->proto == IPPROTO_UDP)
163 udp_hdr->dgram_cksum = 0;
164 }
165 }
166 }
167
168 /*
169 * Check if headers are fragmented.
170 * The check could be less strict depending on which offloads are
171 * requested and headers to be used, but let's keep it simple.
172 */
174 inner_l3_offset + m->l3_len + m->l4_len))
175 return -ENOTSUP;
176
177 if (ol_flags & RTE_MBUF_F_TX_IPV4) {
178 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv4_hdr *,
179 inner_l3_offset);
180
181 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
182 ipv4_hdr->hdr_checksum = 0;
183 }
184
186 (ol_flags & RTE_MBUF_F_TX_UDP_SEG)) {
187 if (ol_flags & RTE_MBUF_F_TX_IPV4) {
188 udp_hdr = (struct rte_udp_hdr *)((char *)ipv4_hdr +
189 m->l3_len);
190 udp_hdr->dgram_cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
191 ol_flags);
192 } else {
193 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
194 struct rte_ipv6_hdr *, inner_l3_offset);
195 /* non-TSO udp */
196 udp_hdr = rte_pktmbuf_mtod_offset(m,
197 struct rte_udp_hdr *,
198 inner_l3_offset + m->l3_len);
199 udp_hdr->dgram_cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
200 ol_flags);
201 }
202 } else if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) == RTE_MBUF_F_TX_TCP_CKSUM ||
203 (ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
204 if (ol_flags & RTE_MBUF_F_TX_IPV4) {
205 /* non-TSO tcp or TSO */
206 tcp_hdr = (struct rte_tcp_hdr *)((char *)ipv4_hdr +
207 m->l3_len);
208 tcp_hdr->cksum = rte_ipv4_phdr_cksum(ipv4_hdr,
209 ol_flags);
210 } else {
211 ipv6_hdr = rte_pktmbuf_mtod_offset(m,
212 struct rte_ipv6_hdr *, inner_l3_offset);
213 /* non-TSO tcp or TSO */
214 tcp_hdr = rte_pktmbuf_mtod_offset(m,
215 struct rte_tcp_hdr *,
216 inner_l3_offset + m->l3_len);
217 tcp_hdr->cksum = rte_ipv6_phdr_cksum(ipv6_hdr,
218 ol_flags);
219 }
220 }
221
222 return 0;
223}
224
243static inline int
244rte_net_intel_cksum_prepare(struct rte_mbuf *m)
245{
246 return rte_net_intel_cksum_flags_prepare(m, m->ol_flags);
247}
248
249#ifdef __cplusplus
250}
251#endif
252
253
254#endif /* _RTE_NET_PTYPE_H_ */
#define unlikely(x)
static uint16_t rte_ipv4_phdr_cksum(const struct rte_ipv4_hdr *ipv4_hdr, uint64_t ol_flags)
Definition: rte_ip4.h:222
static uint16_t rte_ipv6_phdr_cksum(const struct rte_ipv6_hdr *ipv6_hdr, uint64_t ol_flags)
Definition: rte_ip6.h:556
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1648
#define RTE_MBUF_F_TX_OUTER_UDP_CKSUM
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
#define RTE_MBUF_F_TX_UDP_SEG
#define RTE_MBUF_F_TX_IP_CKSUM
#define RTE_MBUF_F_TX_OUTER_IPV6
#define RTE_MBUF_F_TX_TCP_SEG
#define RTE_MBUF_F_TX_L4_MASK
#define RTE_MBUF_F_TX_OUTER_IPV4
#define RTE_MBUF_F_TX_TCP_CKSUM
#define RTE_MBUF_F_TX_IPV4
#define rte_pktmbuf_mtod_offset(m, t, o)
#define RTE_MBUF_F_TX_UDP_CKSUM
uint64_t ol_flags
uint64_t l4_len
uint64_t l3_len
uint64_t l2_len
uint64_t outer_l3_len
uint64_t outer_l2_len
rte_be16_t cksum
Definition: rte_tcp.h:32
rte_be16_t dgram_cksum
Definition: rte_udp.h:28