5 #ifndef _RTE_IP_FRAG_H_ 6 #define _RTE_IP_FRAG_H_ 22 #include <rte_config.h> 65 TAILQ_ENTRY(ip_frag_pkt) lru;
66 struct ip_frag_key key;
71 struct ip_frag frags[IP_MAX_FRAG_NUM];
74 #define IP_FRAG_DEATH_ROW_LEN 32 77 #define IP_FRAG_DEATH_ROW_MBUF_LEN (IP_FRAG_DEATH_ROW_LEN * (IP_MAX_FRAG_NUM + 1)) 82 struct rte_mbuf *row[IP_FRAG_DEATH_ROW_MBUF_LEN];
108 struct ip_pkt_list lru;
110 __extension__
struct ip_frag_pkt pkt[0];
114 #define RTE_IPV6_EHDR_MF_SHIFT 0 115 #define RTE_IPV6_EHDR_MF_MASK 1 116 #define RTE_IPV6_EHDR_FO_SHIFT 3 117 #define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1)) 118 #define RTE_IPV6_EHDR_FO_ALIGN (1 << RTE_IPV6_EHDR_FO_SHIFT) 120 #define RTE_IPV6_FRAG_USED_MASK \ 121 (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK) 123 #define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK) 124 #define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT) 126 #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \ 127 (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK)) 129 struct ipv6_extension_fragment {
134 } __attribute__((__packed__));
194 uint16_t nb_pkts_out,
223 struct ipv6_extension_fragment *frag_hdr);
236 static inline struct ipv6_extension_fragment *
239 if (hdr->
proto == IPPROTO_FRAGMENT) {
240 return (
struct ipv6_extension_fragment *) ++hdr;
271 uint16_t nb_pkts_out, uint16_t mtu_size,
309 uint16_t flag_offset, ip_flag, ip_ofs;
312 ip_ofs = (uint16_t)(flag_offset & RTE_IPV4_HDR_OFFSET_MASK);
313 ip_flag = (uint16_t)(flag_offset & RTE_IPV4_HDR_MF_FLAG);
315 return ip_flag != 0 || ip_ofs != 0;
static struct ipv6_extension_fragment * rte_ipv6_frag_get_ipv6_fragment_header(struct rte_ipv6_hdr *hdr)
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct rte_ipv4_hdr *ip_hdr)
rte_be16_t fragment_offset
__rte_experimental void rte_frag_table_del_expired_entries(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, uint64_t tms)
void rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, uint32_t prefetch)
struct rte_mbuf * rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct rte_ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, uint32_t max_entries, uint64_t max_cycles, int socket_id)
static int rte_ipv4_frag_pkt_is_fragmented(const struct rte_ipv4_hdr *hdr)
#define __rte_cache_aligned
struct ip_frag_pkt * last
int32_t rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
TAILQ_HEAD(vdev_driver_list, rte_vdev_driver)
static uint16_t rte_be_to_cpu_16(rte_be16_t x)
void rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)