DPDK  19.08.2
rte_ip_frag.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #ifndef _RTE_IP_FRAG_H_
6 #define _RTE_IP_FRAG_H_
7 
15 #ifdef __cplusplus
16 extern "C" {
17 #endif
18 
19 #include <stdint.h>
20 #include <stdio.h>
21 
22 #include <rte_config.h>
23 #include <rte_malloc.h>
24 #include <rte_memory.h>
25 #include <rte_ip.h>
26 #include <rte_byteorder.h>
27 
28 struct rte_mbuf;
29 
30 enum {
34  IP_MAX_FRAG_NUM = RTE_LIBRTE_IP_FRAG_MAX_FRAG,
36 };
37 
39 struct ip_frag {
40  uint16_t ofs;
41  uint16_t len;
42  struct rte_mbuf *mb;
43 };
44 
46 struct ip_frag_key {
47  uint64_t src_dst[4];
50  union {
51  uint64_t id_key_len;
52  __extension__
53  struct {
54  uint32_t id;
55  uint32_t key_len;
56  };
57  };
58 };
59 
64 struct ip_frag_pkt {
65  TAILQ_ENTRY(ip_frag_pkt) lru;
66  struct ip_frag_key key;
67  uint64_t start;
68  uint32_t total_size;
69  uint32_t frag_size;
70  uint32_t last_idx;
71  struct ip_frag frags[IP_MAX_FRAG_NUM];
73 
74 #define IP_FRAG_DEATH_ROW_LEN 32
76 /* death row size in mbufs */
77 #define IP_FRAG_DEATH_ROW_MBUF_LEN (IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1))
78 
81  uint32_t cnt;
82  struct rte_mbuf *row[IP_FRAG_DEATH_ROW_MBUF_LEN];
84 };
85 
86 TAILQ_HEAD(ip_pkt_list, ip_frag_pkt);
90  uint64_t find_num;
91  uint64_t add_num;
92  uint64_t del_num;
93  uint64_t reuse_num;
94  uint64_t fail_total;
95  uint64_t fail_nospace;
97 
100  uint64_t max_cycles;
101  uint32_t entry_mask;
102  uint32_t max_entries;
103  uint32_t use_entries;
104  uint32_t bucket_entries;
105  uint32_t nb_entries;
106  uint32_t nb_buckets;
107  struct ip_frag_pkt *last;
108  struct ip_pkt_list lru;
109  struct ip_frag_tbl_stat stat;
110  __extension__ struct ip_frag_pkt pkt[0];
111 };
112 
114 #define RTE_IPV6_EHDR_MF_SHIFT 0
115 #define RTE_IPV6_EHDR_MF_MASK 1
116 #define RTE_IPV6_EHDR_FO_SHIFT 3
117 #define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1))
118 #define RTE_IPV6_EHDR_FO_ALIGN (1 << RTE_IPV6_EHDR_FO_SHIFT)
119 
120 #define RTE_IPV6_FRAG_USED_MASK \
121  (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK)
122 
123 #define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK)
124 #define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT)
125 
126 #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \
127  (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK))
128 
129 struct ipv6_extension_fragment {
130  uint8_t next_header;
131  uint8_t reserved;
132  uint16_t frag_data;
133  uint32_t id;
134 } __attribute__((__packed__));
135 
136 
137 
157 struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num,
158  uint32_t bucket_entries, uint32_t max_entries,
159  uint64_t max_cycles, int socket_id);
160 
167 void
169 
191 int32_t
192 rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in,
193  struct rte_mbuf **pkts_out,
194  uint16_t nb_pkts_out,
195  uint16_t mtu_size,
196  struct rte_mempool *pool_direct,
197  struct rte_mempool *pool_indirect);
198 
221  struct rte_ip_frag_death_row *dr,
222  struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr,
223  struct ipv6_extension_fragment *frag_hdr);
224 
236 static inline struct ipv6_extension_fragment *
238 {
239  if (hdr->proto == IPPROTO_FRAGMENT) {
240  return (struct ipv6_extension_fragment *) ++hdr;
241  }
242  else
243  return NULL;
244 }
245 
269 int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in,
270  struct rte_mbuf **pkts_out,
271  uint16_t nb_pkts_out, uint16_t mtu_size,
272  struct rte_mempool *pool_direct,
273  struct rte_mempool *pool_indirect);
274 
295  struct rte_ip_frag_death_row *dr,
296  struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr);
297 
306 static inline int
308 {
309  uint16_t flag_offset, ip_flag, ip_ofs;
310 
311  flag_offset = rte_be_to_cpu_16(hdr->fragment_offset);
312  ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK);
313  ip_flag = (uint16_t)(flag_offset & RTE_IPV4_HDR_MF_FLAG);
314 
315  return ip_flag != 0 || ip_ofs != 0;
316 }
317 
327  uint32_t prefetch);
328 
329 
338 void
339 rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl);
340 
351 __rte_experimental
352 void
354  struct rte_ip_frag_death_row *dr, uint64_t tms);
355 
356 #ifdef __cplusplus
357 }
358 #endif
359 
360 #endif /* _RTE_IP_FRAG_H_ */
static struct ipv6_extension_fragment * rte_ipv6_frag_get_ipv6_fragment_header(struct rte_ipv6_hdr *hdr)
Definition: rte_ip_frag.h:237
uint32_t entry_mask
Definition: rte_ip_frag.h:101
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr)
__rte_experimental void rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, uint64_t tms)
uint32_t nb_entries
Definition: rte_ip_frag.h:105
void rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
uint64_t del_num
Definition: rte_ip_frag.h:92
void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, uint32_t prefetch)
uint32_t nb_buckets
Definition: rte_ip_frag.h:106
uint64_t fail_nospace
Definition: rte_ip_frag.h:95
uint16_t fragment_offset
Definition: rte_ip.h:38
uint32_t bucket_entries
Definition: rte_ip_frag.h:104
uint64_t reuse_num
Definition: rte_ip_frag.h:93
struct rte_mbuf * rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
uint64_t add_num
Definition: rte_ip_frag.h:91
uint8_t proto
Definition: rte_ip.h:361
int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
uint32_t max_entries
Definition: rte_ip_frag.h:102
struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, uint32_t max_entries, uint64_t max_cycles, int socket_id)
uint32_t use_entries
Definition: rte_ip_frag.h:103
uint64_t max_cycles
Definition: rte_ip_frag.h:100
#define __rte_cache_aligned
Definition: rte_memory.h:64
static int rte_ipv4_frag_pkt_is_fragmented(const struct rte_ipv4_hdr *hdr)
Definition: rte_ip_frag.h:307
#define RTE_STD_C11
Definition: rte_common.h:40
struct ip_frag_pkt * last
Definition: rte_ip_frag.h:107
int32_t rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
TAILQ_HEAD(vdev_driver_list, rte_vdev_driver)
uint64_t fail_total
Definition: rte_ip_frag.h:94
static uint16_t rte_be_to_cpu_16(rte_be16_t x)
uint64_t find_num
Definition: rte_ip_frag.h:90
void rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)