#include <arpa/inet.h>
#include <getopt.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include <linux/virtio_net.h>
#include <linux/virtio_ring.h>
#include <signal.h>
#include <stdint.h>
#include <sys/eventfd.h>
#include <sys/param.h>
#include <unistd.h>
#include "main.h"
#ifndef MAX_QUEUES
#define MAX_QUEUES 128
#endif
#define MAX_SUP_PORTS 1
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE  RTE_MBUF_DEFAULT_BUF_SIZE
#define BURST_TX_DRAIN_US 100   
#define BURST_RX_WAIT_US 15 
#define BURST_RX_RETRIES 4      
#define JUMBO_FRAME_MAX_SIZE    0x2600
#define DEVICE_MAC_LEARNING 0
#define DEVICE_RX           1
#define DEVICE_SAFE_REMOVE  2
#define RTE_TEST_RX_DESC_DEFAULT 1024
#define RTE_TEST_TX_DESC_DEFAULT 512
#define INVALID_PORT_ID 0xFF
#define MAX_DEVICES 64
#define MAX_LONG_OPT_SZ 64
static uint32_t enabled_port_mask = 0;
static uint32_t promiscuous;
static uint32_t num_queues = 0;
static uint32_t num_devices;
static int mergeable;
typedef enum {
    VM2VM_DISABLED = 0,
    VM2VM_SOFTWARE = 1,
    VM2VM_HARDWARE = 2,
    VM2VM_LAST
} vm2vm_type;
static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE;
static uint32_t enable_stats = 0;
static uint32_t enable_retry = 1;
static uint32_t enable_tx_csum;
static uint32_t enable_tso;
static int client_mode;
static int dequeue_zero_copy;
static int builtin_net_driver;
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char *socket_files;
static int nb_sockets;
        .split_hdr_size = 0,
        
    },
    .txmode = {
        .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
                 DEV_TX_OFFLOAD_TCP_CKSUM |
                 DEV_TX_OFFLOAD_TCP_TSO),
    },
        
        .vmdq_rx_conf = {
            .enable_default_pool = 0,
            .default_pool = 0,
            .nb_pool_maps = 0,
            .pool_map = {{0, 0},},
        },
    },
};
static unsigned lcore_ids[RTE_MAX_LCORE];
static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0; 
static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
static uint16_t queues_per_pool;
const uint16_t vlan_tags[] = {
    1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
    1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
    1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
    1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
    1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
    1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
    1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
    1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
};
static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
 
static struct vhost_dev_tailq_list vhost_dev_list =
    TAILQ_HEAD_INITIALIZER(vhost_dev_list);
static struct lcore_info lcore_info[RTE_MAX_LCORE];
struct mbuf_table {
    unsigned len;
    unsigned txq_id;
    struct rte_mbuf *m_table[MAX_PKT_BURST];
 
};
struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
#define MBUF_TABLE_DRAIN_TSC    ((rte_get_tsc_hz() + US_PER_S - 1) \
                 / US_PER_S * BURST_TX_DRAIN_US)
#define VLAN_HLEN       4
static inline int
get_eth_conf(
struct rte_eth_conf *eth_conf, uint32_t num_devices)
{
    unsigned i;
    memset(&conf, 0, sizeof(conf));
    conf.nb_pool_maps = num_devices;
    for (i = 0; i < conf.nb_pool_maps; i++) {
        conf.pool_map[i].vlan_id = vlan_tags[ i ];
        conf.pool_map[i].pools = (1UL << i);
    }
    (void)(
rte_memcpy(eth_conf, &vmdq_conf_default, 
sizeof(*eth_conf)));
    return 0;
}
static inline int
validate_num_devices(uint32_t max_nb_devices)
{
    if (num_devices > max_nb_devices) {
        RTE_LOG(ERR, VHOST_PORT, 
"invalid number of devices\n");
 
        return -1;
    }
    return 0;
}
static inline int
{
    int16_t rx_rings, tx_rings;
    uint16_t rx_ring_size, tx_ring_size;
    int retval;
    uint16_t q;
    
    rxconf = &dev_info.default_rxconf;
    txconf = &dev_info.default_txconf;
    
    num_devices = dev_info.max_vmdq_pools;
    rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
    tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
    
    if (dequeue_zero_copy)
        tx_ring_size = 64;
    retval = validate_num_devices(MAX_DEVICES);
    if (retval < 0)
        return retval;
    
    retval = get_eth_conf(&port_conf, num_devices);
    if (retval < 0)
        return retval;
    
    num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
    queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
    num_vmdq_queues = num_devices * queues_per_pool;
    num_queues = num_pf_queues + num_vmdq_queues;
    vmdq_queue_base = dev_info.vmdq_queue_base;
    vmdq_pool_base  = dev_info.vmdq_pool_base;
    printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
        num_pf_queues, num_devices, queues_per_pool);
        return -1;
    rx_rings = (uint16_t)dev_info.max_rx_queues;
        port_conf.txmode.offloads |=
    
    if (retval != 0) {
        RTE_LOG(ERR, VHOST_PORT, 
"Failed to configure port %u: %s.\n",
 
            port, strerror(-retval));
        return retval;
    }
        &tx_ring_size);
    if (retval != 0) {
        RTE_LOG(ERR, VHOST_PORT, 
"Failed to adjust number of descriptors " 
            "for port %u: %s.\n", port, strerror(-retval));
        return retval;
    }
    if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT) {
        RTE_LOG(ERR, VHOST_PORT, 
"Mbuf pool has an insufficient size " 
            "for Rx queues on port %u.\n", port);
        return -1;
    }
    
    rxconf->
offloads = port_conf.rxmode.offloads;
    for (q = 0; q < rx_rings; q ++) {
                        rxconf,
                        mbuf_pool);
        if (retval < 0) {
                "Failed to setup rx queue %u of port %u: %s.\n",
                q, port, strerror(-retval));
            return retval;
        }
    }
    txconf->
offloads = port_conf.txmode.offloads;
    for (q = 0; q < tx_rings; q ++) {
                        txconf);
        if (retval < 0) {
                "Failed to setup tx queue %u of port %u: %s.\n",
                q, port, strerror(-retval));
            return retval;
        }
    }
    
    if (retval < 0) {
        RTE_LOG(ERR, VHOST_PORT, 
"Failed to start port %u: %s\n",
 
            port, strerror(-retval));
        return retval;
    }
    if (promiscuous)
    RTE_LOG(INFO, VHOST_PORT, 
"Max virtio devices supported: %u\n", num_devices);
 
    RTE_LOG(INFO, VHOST_PORT, 
"Port %u MAC: %02"PRIx8
" %02"PRIx8
" %02"PRIx8
 
            " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
            port,
            vmdq_ports_eth_addr[port].addr_bytes[0],
            vmdq_ports_eth_addr[port].addr_bytes[1],
            vmdq_ports_eth_addr[port].addr_bytes[2],
            vmdq_ports_eth_addr[port].addr_bytes[3],
            vmdq_ports_eth_addr[port].addr_bytes[4],
            vmdq_ports_eth_addr[port].addr_bytes[5]);
    return 0;
}
static int
us_vhost_parse_socket_path(const char *q_arg)
{
    char *old;
    
    if (strnlen(q_arg, PATH_MAX) == PATH_MAX)
        return -1;
    old = socket_files;
    socket_files = realloc(socket_files, PATH_MAX * (nb_sockets + 1));
    if (socket_files == NULL) {
        free(old);
        return -1;
    }
    snprintf(socket_files + nb_sockets * PATH_MAX, PATH_MAX, "%s", q_arg);
    nb_sockets++;
    return 0;
}
static int
parse_portmask(const char *portmask)
{
    char *end = NULL;
    unsigned long pm;
    errno = 0;
    
    pm = strtoul(portmask, &end, 16);
    if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
        return -1;
    if (pm == 0)
        return -1;
    return pm;
}
static int
parse_num_opt(const char *q_arg, uint32_t max_valid_value)
{
    char *end = NULL;
    unsigned long num;
    errno = 0;
    
    num = strtoul(q_arg, &end, 10);
    if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
        return -1;
    if (num > max_valid_value)
        return -1;
    return num;
}
static void
us_vhost_usage(const char *prgname)
{
    RTE_LOG(INFO, VHOST_CONFIG, 
"%s [EAL options] -- -p PORTMASK\n" 
    "       --vm2vm [0|1|2]\n"
    "       --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
    "       --socket-file <path>\n"
    "       --nb-devices ND\n"
    "       -p PORTMASK: Set mask for ports to be used by application\n"
    "       --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
    "       --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
    "       --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
    "       --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
    "       --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
    "       --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n"
    "       --socket-file: The path of the socket file.\n"
    "       --tx-csum [0|1] disable/enable TX checksum offload.\n"
    "       --tso [0|1] disable/enable TCP segment offload.\n"
    "       --client register a vhost-user socket as client mode.\n"
    "       --dequeue-zero-copy enables dequeue zero copy\n",
           prgname);
}
static int
us_vhost_parse_args(int argc, char **argv)
{
    int opt, ret;
    int option_index;
    unsigned i;
    const char *prgname = argv[0];
    static struct option long_option[] = {
        {"vm2vm", required_argument, NULL, 0},
        {"rx-retry", required_argument, NULL, 0},
        {"rx-retry-delay", required_argument, NULL, 0},
        {"rx-retry-num", required_argument, NULL, 0},
        {"mergeable", required_argument, NULL, 0},
        {"stats", required_argument, NULL, 0},
        {"socket-file", required_argument, NULL, 0},
        {"tx-csum", required_argument, NULL, 0},
        {"tso", required_argument, NULL, 0},
        {"client", no_argument, &client_mode, 1},
        {"dequeue-zero-copy", no_argument, &dequeue_zero_copy, 1},
        {"builtin-net-driver", no_argument, &builtin_net_driver, 1},
        {NULL, 0, 0, 0},
    };
    
    while ((opt = getopt_long(argc, argv, "p:P",
            long_option, &option_index)) != EOF) {
        switch (opt) {
        
        case 'p':
            enabled_port_mask = parse_portmask(optarg);
            if (enabled_port_mask == 0) {
                RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid portmask\n");
 
                us_vhost_usage(prgname);
                return -1;
            }
            break;
        case 'P':
            promiscuous = 1;
            break;
        case 0:
            
            if (!strncmp(long_option[option_index].name, "vm2vm",
                MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
                if (ret == -1) {
                        "Invalid argument for "
                        "vm2vm [0|1|2]\n");
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    vm2vm_mode = (vm2vm_type)ret;
                }
            }
            
            if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, 1);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for rx-retry [0|1]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    enable_retry = ret;
                }
            }
            
            if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, 1);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for tx-csum [0|1]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else
                    enable_tx_csum = ret;
            }
            
            if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, 1);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for tso [0|1]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else
                    enable_tso = ret;
            }
            
            if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, INT32_MAX);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for rx-retry-delay [0-N]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    burst_rx_delay_time = ret;
                }
            }
            
            if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, INT32_MAX);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for rx-retry-num [0-N]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    burst_rx_retry_num = ret;
                }
            }
            
            if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, 1);
                if (ret == -1) {
                    RTE_LOG(INFO, VHOST_CONFIG, 
"Invalid argument for mergeable [0|1]\n");
 
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    mergeable = !!ret;
                    if (ret) {
                            DEV_RX_OFFLOAD_JUMBO_FRAME;
                            = JUMBO_FRAME_MAX_SIZE;
                    }
                }
            }
            
            if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
                ret = parse_num_opt(optarg, INT32_MAX);
                if (ret == -1) {
                        "Invalid argument for stats [0..N]\n");
                    us_vhost_usage(prgname);
                    return -1;
                } else {
                    enable_stats = ret;
                }
            }
            
            if (!strncmp(long_option[option_index].name,
                        "socket-file", MAX_LONG_OPT_SZ)) {
                if (us_vhost_parse_socket_path(optarg) == -1) {
                    "Invalid argument for socket name (Max %d characters)\n",
                    PATH_MAX);
                    us_vhost_usage(prgname);
                    return -1;
                }
            }
            break;
            
        default:
            us_vhost_usage(prgname);
            return -1;
        }
    }
    for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
        if (enabled_port_mask & (1 << i))
            ports[num_ports++] = i;
    }
    if ((num_ports ==  0) || (num_ports > MAX_SUP_PORTS)) {
        RTE_LOG(INFO, VHOST_PORT, 
"Current enabled port number is %u," 
            "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
        return -1;
    }
    return 0;
}
static unsigned check_ports_num(unsigned nb_ports)
{
    unsigned valid_num_ports = num_ports;
    unsigned portid;
    if (num_ports > nb_ports) {
        RTE_LOG(INFO, VHOST_PORT, 
"\nSpecified port number(%u) exceeds total system port number(%u)\n",
 
            num_ports, nb_ports);
        num_ports = nb_ports;
    }
    for (portid = 0; portid < num_ports; portid ++) {
                "\nSpecified port ID(%u) is not valid\n",
                ports[portid]);
            ports[portid] = INVALID_PORT_ID;
            valid_num_ports--;
        }
    }
    return valid_num_ports;
}
{
    struct vhost_dev *vdev;
    TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
        if (vdev->ready == DEVICE_RX &&
            return vdev;
    }
    return NULL;
}
static int
link_vmdq(
struct vhost_dev *vdev, 
struct rte_mbuf *m)
{
    int i, ret;
    
    if (find_vhost_dev(&pkt_hdr->
s_addr)) {
 
            "(%d) device is using a registered MAC!\n",
            vdev->vid);
        return -1;
    }
    
    vdev->vlan_tag = vlan_tags[vdev->vid];
    
        "(%d) mac %02x:%02x:%02x:%02x:%02x:%02x and vlan %d registered\n",
        vdev->vid,
        vdev->mac_address.addr_bytes[0], vdev->mac_address.addr_bytes[1],
        vdev->mac_address.addr_bytes[2], vdev->mac_address.addr_bytes[3],
        vdev->mac_address.addr_bytes[4], vdev->mac_address.addr_bytes[5],
        vdev->vlan_tag);
    
                (uint32_t)vdev->vid + vmdq_pool_base);
    if (ret)
            "(%d) failed to add device MAC address to VMDQ\n",
            vdev->vid);
    
    vdev->ready = DEVICE_RX;
    return 0;
}
static inline void
unlink_vmdq(struct vhost_dev *vdev)
{
    unsigned i = 0;
    unsigned rx_count;
    struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
 
    if (vdev->ready == DEVICE_RX) {
        
        for (i = 0; i < 6; i++)
            vdev->mac_address.addr_bytes[i] = 0;
        vdev->vlan_tag = 0;
        
                    (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
        while (rx_count) {
            for (i = 0; i < rx_count; i++)
                    (uint16_t)vdev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
        }
        vdev->ready = DEVICE_MAC_LEARNING;
    }
}
virtio_xmit(struct vhost_dev *dst_vdev, struct vhost_dev *src_vdev,
{
    uint16_t ret;
    if (builtin_net_driver) {
        ret = vs_enqueue_pkts(dst_vdev, VIRTIO_RXQ, &m, 1);
    } else {
    }
    if (enable_stats) {
        src_vdev->stats.tx_total++;
        src_vdev->stats.tx += ret;
    }
}
virtio_tx_local(
struct vhost_dev *vdev, 
struct rte_mbuf *m)
{
    struct vhost_dev *dst_vdev;
    dst_vdev = find_vhost_dev(&pkt_hdr->
d_addr);
    if (!dst_vdev)
        return -1;
    if (vdev->vid == dst_vdev->vid) {
            "(%d) TX: src and dst MAC is same. Dropping packet.\n",
            vdev->vid);
        return 0;
    }
        "(%d) TX: MAC address is local\n", dst_vdev->vid);
            "(%d) device is marked for removal\n", dst_vdev->vid);
        return 0;
    }
    virtio_xmit(dst_vdev, vdev, m);
    return 0;
}
find_local_dest(
struct vhost_dev *vdev, 
struct rte_mbuf *m,
    uint32_t *offset, uint16_t *vlan_tag)
{
    struct vhost_dev *dst_vdev;
    dst_vdev = find_vhost_dev(&pkt_hdr->
d_addr);
    if (!dst_vdev)
        return 0;
    if (vdev->vid == dst_vdev->vid) {
            "(%d) TX: src and dst MAC is same. Dropping packet.\n",
            vdev->vid);
        return -1;
    }
    
    *offset  = VLAN_HLEN;
    *vlan_tag = vlan_tags[vdev->vid];
        "(%d) TX: pkt to local VM device id: (%d), vlan tag: %u.\n",
        vdev->vid, dst_vdev->vid, *vlan_tag);
    return 0;
}
static uint16_t
get_psd_sum(void *l3_hdr, uint64_t ol_flags)
{
    else 
}
static void virtio_tx_offload(
struct rte_mbuf *m)
 
{
    void *l3_hdr;
    l3_hdr = (
char *)eth_hdr + m->
l2_len;
        ipv4_hdr = l3_hdr;
    }
    tcp_hdr = (
struct tcp_hdr *)((
char *)l3_hdr + m->
l3_len);
}
static inline void
free_pkts(
struct rte_mbuf **pkts, uint16_t n)
{
    while (n--)
}
do_drain_mbuf_table(struct mbuf_table *tx_q)
{
    uint16_t count;
                 tx_q->m_table, tx_q->len);
        free_pkts(&tx_q->m_table[count], tx_q->len - count);
    tx_q->len = 0;
}
virtio_tx_route(
struct vhost_dev *vdev, 
struct rte_mbuf *m, uint16_t vlan_tag)
{
    struct mbuf_table *tx_q;
    unsigned offset = 0;
        struct vhost_dev *vdev2;
        TAILQ_FOREACH(vdev2, &vhost_dev_list, global_vdev_entry) {
            if (vdev2 != vdev)
                virtio_xmit(vdev2, vdev, m);
        }
        goto queue2nic;
    }
    
    if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(vdev, m) == 0)) {
        return;
    }
    if (
unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
 
        if (
unlikely(find_local_dest(vdev, m, &offset,
 
                         &vlan_tag) != 0)) {
            return;
        }
    }
        "(%d) TX: MAC address is external\n", vdev->vid);
queue2nic:
    
    tx_q = &lcore_tx_queue[lcore_id];
        
        if ((vm2vm_mode == VM2VM_HARDWARE) &&
    } else {
        
        if (
unlikely(vm2vm_mode == VM2VM_HARDWARE)) {
 
            else {
                while ((seg->
next != NULL) &&
 
            }
        }
    }
        virtio_tx_offload(m);
    tx_q->m_table[tx_q->len++] = m;
    if (enable_stats) {
        vdev->stats.tx_total++;
        vdev->stats.tx++;
    }
    if (
unlikely(tx_q->len == MAX_PKT_BURST))
 
        do_drain_mbuf_table(tx_q);
}
drain_mbuf_table(struct mbuf_table *tx_q)
{
    static uint64_t prev_tsc;
    uint64_t cur_tsc;
    if (tx_q->len == 0)
        return;
    cur_tsc = rte_rdtsc();
    if (
unlikely(cur_tsc - prev_tsc > MBUF_TABLE_DRAIN_TSC)) {
 
        prev_tsc = cur_tsc;
            "TX queue drained after timeout with burst size %u\n",
            tx_q->len);
        do_drain_mbuf_table(tx_q);
    }
}
drain_eth_rx(struct vhost_dev *vdev)
{
    uint16_t rx_count, enqueue_count;
                    pkts, MAX_PKT_BURST);
    if (!rx_count)
        return;
    
    if (enable_retry &&
            VIRTIO_RXQ))) {
        uint32_t retry;
        for (retry = 0; retry < burst_rx_retry_num; retry++) {
                    VIRTIO_RXQ))
                break;
        }
    }
    if (builtin_net_driver) {
        enqueue_count = vs_enqueue_pkts(vdev, VIRTIO_RXQ,
                        pkts, rx_count);
    } else {
                        pkts, rx_count);
    }
    if (enable_stats) {
    }
    free_pkts(pkts, rx_count);
}
drain_virtio_tx(struct vhost_dev *vdev)
{
    uint16_t count;
    uint16_t i;
    if (builtin_net_driver) {
        count = vs_dequeue_pkts(vdev, VIRTIO_TXQ, mbuf_pool,
                    pkts, MAX_PKT_BURST);
    } else {
                    mbuf_pool, pkts, MAX_PKT_BURST);
    }
    
    if (
unlikely(vdev->ready == DEVICE_MAC_LEARNING) && count) {
 
        if (vdev->remove || link_vmdq(vdev, pkts[0]) == -1)
            free_pkts(pkts, count);
    }
    for (i = 0; i < count; ++i)
        virtio_tx_route(vdev, pkts[i], vlan_tags[vdev->vid]);
}
static int
{
    unsigned i;
    struct vhost_dev *vdev;
    struct mbuf_table *tx_q;
    RTE_LOG(INFO, VHOST_DATA, 
"Procesing on Core %u started\n", lcore_id);
 
    tx_q = &lcore_tx_queue[lcore_id];
        if (lcore_ids[i] == lcore_id) {
            tx_q->txq_id = i;
            break;
        }
    }
    while(1) {
        drain_mbuf_table(tx_q);
        
        if (lcore_info[lcore_id].dev_removal_flag == REQUEST_DEV_REMOVAL)
            lcore_info[lcore_id].dev_removal_flag = ACK_DEV_REMOVAL;
        
        TAILQ_FOREACH(vdev, &lcore_info[lcore_id].vdev_list,
                  lcore_vdev_entry) {
                unlink_vmdq(vdev);
                vdev->ready = DEVICE_SAFE_REMOVE;
                continue;
            }
            if (
likely(vdev->ready == DEVICE_RX))
 
                drain_eth_rx(vdev);
                drain_virtio_tx(vdev);
        }
    }
    return 0;
}
static void
destroy_device(int vid)
{
    struct vhost_dev *vdev = NULL;
    int lcore;
    TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
        if (vdev->vid == vid)
            break;
    }
    if (!vdev)
        return;
    
    vdev->remove = 1;
    while(vdev->ready != DEVICE_SAFE_REMOVE) {
    }
    if (builtin_net_driver)
        vs_vhost_net_remove(vdev);
    TAILQ_REMOVE(&lcore_info[vdev->coreid].vdev_list, vdev,
             lcore_vdev_entry);
    TAILQ_REMOVE(&vhost_dev_list, vdev, global_vdev_entry);
    
        lcore_info[lcore].dev_removal_flag = REQUEST_DEV_REMOVAL;
    
        while (lcore_info[lcore].dev_removal_flag != ACK_DEV_REMOVAL)
    }
    lcore_info[vdev->coreid].device_num--;
        "(%d) device has been removed from data core\n",
        vdev->vid);
}
static int
new_device(int vid)
{
    int lcore, core_add = 0;
    uint32_t device_num_min = num_devices;
    struct vhost_dev *vdev;
    vdev = 
rte_zmalloc(
"vhost device", 
sizeof(*vdev), RTE_CACHE_LINE_SIZE);
    if (vdev == NULL) {
            "(%d) couldn't allocate memory for vhost dev\n",
            vid);
        return -1;
    }
    vdev->vid = vid;
    if (builtin_net_driver)
        vs_vhost_net_setup(vdev);
    TAILQ_INSERT_TAIL(&vhost_dev_list, vdev, global_vdev_entry);
    vdev->vmdq_rx_q = vid * queues_per_pool + vmdq_queue_base;
    
    vdev->ready = DEVICE_MAC_LEARNING;
    vdev->remove = 0;
    
        if (lcore_info[lcore].device_num < device_num_min) {
            device_num_min = lcore_info[lcore].device_num;
            core_add = lcore;
        }
    }
    vdev->coreid = core_add;
    TAILQ_INSERT_TAIL(&lcore_info[vdev->coreid].vdev_list, vdev,
              lcore_vdev_entry);
    lcore_info[vdev->coreid].device_num++;
    
    rte_vhost_enable_guest_notification(vid, VIRTIO_RXQ, 0);
    rte_vhost_enable_guest_notification(vid, VIRTIO_TXQ, 0);
        "(%d) device has been added to data core %d\n",
        vid, vdev->coreid);
    return 0;
}
{
};
static void *
print_stats(__rte_unused void *arg)
{
    struct vhost_dev *vdev;
    uint64_t tx_dropped, rx_dropped;
    uint64_t tx, tx_total, rx, rx_total;
    const char clr[] = { 27, '[', '2', 'J', '\0' };
    const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
    while(1) {
        sleep(enable_stats);
        
        printf("%s%s\n", clr, top_left);
        printf("Device statistics =================================\n");
        TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) {
            tx_total   = vdev->stats.tx_total;
            tx         = vdev->stats.tx;
            tx_dropped = tx_total - tx;
            rx_dropped = rx_total - rx;
            printf("Statistics for device %d\n"
                "-----------------------\n"
                "TX total:              %" PRIu64 "\n"
                "TX dropped:            %" PRIu64 "\n"
                "TX successful:         %" PRIu64 "\n"
                "RX total:              %" PRIu64 "\n"
                "RX dropped:            %" PRIu64 "\n"
                "RX successful:         %" PRIu64 "\n",
                vdev->vid,
                tx_total, tx_dropped, tx,
                rx_total, rx_dropped, rx);
        }
        printf("===================================================\n");
        fflush(stdout);
    }
    return NULL;
}
static void
unregister_drivers(int socket_num)
{
    int i, ret;
    for (i = 0; i < socket_num; i++) {
        ret = rte_vhost_driver_unregister(socket_files + i * PATH_MAX);
        if (ret != 0)
                "Fail to unregister vhost driver for %s.\n",
                socket_files + i * PATH_MAX);
    }
}
static void
sigint_handler(__rte_unused int signum)
{
    
    unregister_drivers(nb_sockets);
    exit(0);
}
static void
create_mbuf_pool(uint16_t nr_port, uint32_t nr_switch_core, uint32_t mbuf_size,
    uint32_t nr_queues, uint32_t nr_rx_desc, uint32_t nr_mbuf_cache)
{
    uint32_t nr_mbufs;
    uint32_t nr_mbufs_per_core;
    uint32_t mtu = 1500;
    if (mergeable)
        mtu = 9000;
    if (enable_tso)
        mtu = 64 * 1024;
    nr_mbufs_per_core  = (mtu + mbuf_size) * MAX_PKT_BURST /
            (mbuf_size - RTE_PKTMBUF_HEADROOM);
    nr_mbufs_per_core += nr_rx_desc;
    nr_mbufs_per_core  = 
RTE_MAX(nr_mbufs_per_core, nr_mbuf_cache);
    nr_mbufs  = nr_queues * nr_rx_desc;
    nr_mbufs += nr_mbufs_per_core * nr_switch_core;
    nr_mbufs *= nr_port;
                        nr_mbuf_cache, 0, mbuf_size,
    if (mbuf_pool == NULL)
        rte_exit(EXIT_FAILURE, 
"Cannot create mbuf pool\n");
 
}
int
main(int argc, char *argv[])
{
    unsigned lcore_id, core_id = 0;
    unsigned nb_ports, valid_num_ports;
    int ret, i;
    uint16_t portid;
    static pthread_t tid;
    uint64_t flags = 0;
    signal(SIGINT, sigint_handler);
    
    if (ret < 0)
        rte_exit(EXIT_FAILURE, 
"Error with EAL initialization\n");
 
    argc -= ret;
    argv += ret;
    
    ret = us_vhost_parse_args(argc, argv);
    if (ret < 0)
        rte_exit(EXIT_FAILURE, 
"Invalid argument\n");
 
    for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
        TAILQ_INIT(&lcore_info[lcore_id].vdev_list);
            lcore_ids[core_id++] = lcore_id;
    }
        rte_exit(EXIT_FAILURE,
"Not enough cores\n");
 
    
    
    valid_num_ports = check_ports_num(nb_ports);
    if ((valid_num_ports ==  0) || (valid_num_ports > MAX_SUP_PORTS)) {
        RTE_LOG(INFO, VHOST_PORT, 
"Current enabled port number is %u," 
            "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
        return -1;
    }
    
             MAX_QUEUES, RTE_TEST_RX_DESC_DEFAULT, MBUF_CACHE_SIZE);
    if (vm2vm_mode == VM2VM_HARDWARE) {
        
            "Enable loop back for L2 switch in vmdq.\n");
    }
    
        
        if ((enabled_port_mask & (1 << portid)) == 0) {
                "Skipping disabled port %d\n", portid);
            continue;
        }
        if (port_init(portid) != 0)
                "Cannot initialize network ports\n");
    }
    
    if (enable_stats) {
                    print_stats, NULL);
        if (ret < 0)
                "Cannot create print-stats thread\n");
    }
    
    if (client_mode)
        flags |= RTE_VHOST_USER_CLIENT;
    if (dequeue_zero_copy)
        flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
    
    for (i = 0; i < nb_sockets; i++) {
        char *file = socket_files + i * PATH_MAX;
        if (ret != 0) {
            unregister_drivers(i);
                "vhost driver register failure.\n");
        }
        if (builtin_net_driver)
        if (mergeable == 0) {
                1ULL << VIRTIO_NET_F_MRG_RXBUF);
        }
        if (enable_tx_csum == 0) {
                1ULL << VIRTIO_NET_F_CSUM);
        }
        if (enable_tso == 0) {
                1ULL << VIRTIO_NET_F_HOST_TSO4);
                1ULL << VIRTIO_NET_F_HOST_TSO6);
                1ULL << VIRTIO_NET_F_GUEST_TSO4);
                1ULL << VIRTIO_NET_F_GUEST_TSO6);
        }
        if (promiscuous) {
                1ULL << VIRTIO_NET_F_CTRL_RX);
        }
        ret = rte_vhost_driver_callback_register(file,
            &virtio_net_device_ops);
        if (ret != 0) {
                "failed to register vhost driver callbacks.\n");
        }
                "failed to start vhost driver.\n");
        }
    }
    return 0;
}