34 #ifndef _VIRTIO_NET_H_
35 #define _VIRTIO_NET_H_
43 #include <linux/virtio_ring.h>
44 #include <linux/virtio_net.h>
45 #include <sys/eventfd.h>
46 #include <sys/socket.h>
54 #define VHOST_MEMORY_MAX_NREGIONS 8
57 #define VIRTIO_DEV_RUNNING 1
60 #define VIRTIO_DEV_STOPPED -1
64 enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM};
66 #define BUF_VECTOR_MAX 256
104 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
144 static inline uint16_t __attribute__((always_inline))
145 rte_vring_available_entries(struct
virtio_net *dev, uint16_t queue_id)
155 static inline uint64_t __attribute__((always_inline))
160 uint64_t vhost_va = 0;
162 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
163 region = &dev->mem->regions[regionidx];
184 uint64_t rte_vhost_feature_get(
void);
186 int rte_vhost_enable_guest_notification(
struct virtio_net *dev, uint16_t queue_id,
int enable);
189 int rte_vhost_driver_register(
const char *dev_name);
192 int rte_vhost_driver_unregister(
const char *dev_name);
197 int rte_vhost_driver_session_start(
void);
216 struct rte_mbuf **pkts, uint16_t count);
struct vhost_virtqueue * virtqueue[VIRTIO_QNUM]
volatile uint16_t last_used_idx
uint16_t rte_vhost_enqueue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint16_t count)
uint16_t rte_vhost_dequeue_burst(struct virtio_net *dev, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
uint64_t guest_phys_address_end
struct vring_avail * avail
void(* destroy_device)(volatile struct virtio_net *)
int rte_vhost_feature_disable(uint64_t feature_mask)
volatile uint16_t last_used_idx_res
struct virtio_memory_regions regions[0]
struct buf_vector buf_vec[BUF_VECTOR_MAX]
int(* new_device)(struct virtio_net *)
static uint64_t gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
uint64_t guest_phys_address
int rte_vhost_feature_enable(uint64_t feature_mask)
struct virtio_memory * mem
uint64_t userspace_address
#define __rte_cache_aligned