34 #ifndef _RTE_IP_FRAG_H_
35 #define _RTE_IP_FRAG_H_
93 TAILQ_ENTRY(ip_frag_pkt) lru;
94 struct ip_frag_key key;
102 #define IP_FRAG_DEATH_ROW_LEN 32
105 struct rte_ip_frag_death_row {
135 __extension__
struct ip_frag_pkt
pkt[0];
139 #define RTE_IPV6_EHDR_MF_SHIFT 0
140 #define RTE_IPV6_EHDR_MF_MASK 1
141 #define RTE_IPV6_EHDR_FO_SHIFT 3
142 #define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1))
144 #define RTE_IPV6_FRAG_USED_MASK \
145 (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK)
147 #define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK)
148 #define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT)
150 #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \
151 (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK))
153 struct ipv6_extension_fragment {
158 } __attribute__((__packed__));
221 uint16_t nb_pkts_out,
250 struct ipv6_extension_fragment *frag_hdr);
263 static inline struct ipv6_extension_fragment *
266 if (hdr->
proto == IPPROTO_FRAGMENT) {
267 return (
struct ipv6_extension_fragment *) ++hdr;
298 uint16_t nb_pkts_out, uint16_t mtu_size,
335 uint16_t flag_offset, ip_flag, ip_ofs;
338 ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
339 ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
341 return ip_flag != 0 || ip_ofs != 0;
static void rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr)
static struct ipv6_extension_fragment * rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
struct ip_frag_tbl_stat stat
__extension__ struct ip_frag_pkt pkt[0]
static uint16_t rte_be_to_cpu_16(uint16_t x)
void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, uint32_t prefetch)
int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, uint32_t max_entries, uint64_t max_cycles, int socket_id)
TAILQ_HEAD(rte_driver_list, rte_driver)
#define IP_FRAG_DEATH_ROW_LEN
#define __rte_cache_aligned
struct ip_frag_pkt * last
int32_t rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
static int rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr *hdr)
void rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
struct rte_mbuf * rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)