aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev/virtio')
-rw-r--r--sys/dev/virtio/block/virtio_blk.c74
-rw-r--r--sys/dev/virtio/console/virtio_console.c4
-rw-r--r--sys/dev/virtio/gpu/virtio_gpu.c16
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.c12
-rw-r--r--sys/dev/virtio/network/if_vtnet.c138
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h10
-rw-r--r--sys/dev/virtio/network/virtio_net.h2
-rw-r--r--sys/dev/virtio/p9fs/virtio_p9fs.c494
-rw-r--r--sys/dev/virtio/p9fs/virtio_p9fs.h39
-rw-r--r--sys/dev/virtio/pci/virtio_pci.c14
-rw-r--r--sys/dev/virtio/pci/virtio_pci_legacy.c4
-rw-r--r--sys/dev/virtio/pci/virtio_pci_modern.c4
-rw-r--r--sys/dev/virtio/scsi/virtio_scsi.c3
-rw-r--r--sys/dev/virtio/virtqueue.c2
14 files changed, 681 insertions, 135 deletions
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
index d7fa903936a1..5eb681128e9c 100644
--- a/sys/dev/virtio/block/virtio_blk.c
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -699,10 +699,14 @@ vtblk_alloc_virtqueue(struct vtblk_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info;
+ int indir_segs;
dev = sc->vtblk_dev;
- VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
+ indir_segs = 0;
+ if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
+ indir_segs = sc->vtblk_max_nsegs;
+ VQ_ALLOC_INFO_INIT(&vq_info, indir_segs,
vtblk_vq_intr, sc, &sc->vtblk_vq,
"%s request", device_get_nameunit(dev));
@@ -755,6 +759,8 @@ vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
dp->d_hba_device = virtio_get_device(dev);
dp->d_hba_subvendor = virtio_get_subvendor(dev);
dp->d_hba_subdevice = virtio_get_subdevice(dev);
+ strlcpy(dp->d_attachment, device_get_nameunit(dev),
+ sizeof(dp->d_attachment));
if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
dp->d_flags |= DISKFLAG_WRITE_PROTECT;
@@ -1177,6 +1183,35 @@ vtblk_request_error(struct vtblk_request *req)
return (error);
}
+static struct bio *
+vtblk_queue_complete_one(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+ struct bio *bp;
+
+ if (sc->vtblk_req_ordered != NULL) {
+ MPASS(sc->vtblk_req_ordered == req);
+ sc->vtblk_req_ordered = NULL;
+ }
+
+ bp = req->vbr_bp;
+ if (req->vbr_mapp != NULL) {
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->vtblk_dmat, req->vbr_mapp);
+ break;
+ case BIO_WRITE:
+ bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->vtblk_dmat, req->vbr_mapp);
+ break;
+ }
+ }
+ bp->bio_error = vtblk_request_error(req);
+ return (bp);
+}
+
static void
vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
@@ -1184,31 +1219,9 @@ vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
struct bio *bp;
while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
- if (sc->vtblk_req_ordered != NULL) {
- MPASS(sc->vtblk_req_ordered == req);
- sc->vtblk_req_ordered = NULL;
- }
+ bp = vtblk_queue_complete_one(sc, req);
- bp = req->vbr_bp;
- if (req->vbr_mapp != NULL) {
- switch (bp->bio_cmd) {
- case BIO_READ:
- bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->vtblk_dmat,
- req->vbr_mapp);
- break;
- case BIO_WRITE:
- bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->vtblk_dmat,
- req->vbr_mapp);
- break;
- }
- }
- bp->bio_error = vtblk_request_error(req);
TAILQ_INSERT_TAIL(queue, bp, bio_queue);
-
vtblk_request_enqueue(sc, req);
}
}
@@ -1412,8 +1425,6 @@ vtblk_ident(struct vtblk_softc *sc)
error = vtblk_poll_request(sc, req);
VTBLK_UNLOCK(sc);
- vtblk_request_enqueue(sc, req);
-
if (error) {
device_printf(sc->vtblk_dev,
"error getting device identifier: %d\n", error);
@@ -1423,7 +1434,9 @@ vtblk_ident(struct vtblk_softc *sc)
static int
vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
{
+ struct vtblk_request *req1 __diagused;
struct virtqueue *vq;
+ struct bio *bp;
int error;
vq = sc->vtblk_vq;
@@ -1436,13 +1449,18 @@ vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
return (error);
virtqueue_notify(vq);
- virtqueue_poll(vq, NULL);
+ req1 = virtqueue_poll(vq, NULL);
+ KASSERT(req == req1,
+ ("%s: polling completed %p not %p", __func__, req1, req));
- error = vtblk_request_error(req);
+ bp = vtblk_queue_complete_one(sc, req);
+ error = bp->bio_error;
if (error && bootverbose) {
device_printf(sc->vtblk_dev,
"%s: IO error: %d\n", __func__, error);
}
+ if (req != &sc->vtblk_dump_request)
+ vtblk_request_enqueue(sc, req);
return (error);
}
diff --git a/sys/dev/virtio/console/virtio_console.c b/sys/dev/virtio/console/virtio_console.c
index 4a3fb1e97e57..66433565ce25 100644
--- a/sys/dev/virtio/console/virtio_console.c
+++ b/sys/dev/virtio/console/virtio_console.c
@@ -978,7 +978,7 @@ vtcon_ctrl_poll(struct vtcon_softc *sc,
*/
VTCON_CTRL_TX_LOCK(sc);
KASSERT(virtqueue_empty(vq),
- ("%s: virtqueue is not emtpy", __func__));
+ ("%s: virtqueue is not empty", __func__));
error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
@@ -1366,7 +1366,7 @@ vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize)
vq = port->vtcport_outvq;
KASSERT(virtqueue_empty(vq),
- ("%s: port %p out virtqueue not emtpy", __func__, port));
+ ("%s: port %p out virtqueue not empty", __func__, port));
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, bufsize);
diff --git a/sys/dev/virtio/gpu/virtio_gpu.c b/sys/dev/virtio/gpu/virtio_gpu.c
index f18eef985cc6..6f786a450900 100644
--- a/sys/dev/virtio/gpu/virtio_gpu.c
+++ b/sys/dev/virtio/gpu/virtio_gpu.c
@@ -102,6 +102,7 @@ static vd_bitblt_text_t vtgpu_fb_bitblt_text;
static vd_bitblt_bmp_t vtgpu_fb_bitblt_bitmap;
static vd_drawrect_t vtgpu_fb_drawrect;
static vd_setpixel_t vtgpu_fb_setpixel;
+static vd_bitblt_argb_t vtgpu_fb_bitblt_argb;
static struct vt_driver vtgpu_fb_driver = {
.vd_name = "virtio_gpu",
@@ -111,6 +112,7 @@ static struct vt_driver vtgpu_fb_driver = {
.vd_bitblt_text = vtgpu_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vtgpu_fb_bitblt_argb,
.vd_drawrect = vtgpu_fb_drawrect,
.vd_setpixel = vtgpu_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
@@ -180,6 +182,16 @@ vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
vtgpu_resource_flush(sc, x, y, width, height);
}
+static int
+vtgpu_fb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+
+ return (EOPNOTSUPP);
+}
+
static void
vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2,
int fill, term_color_t color)
@@ -359,8 +371,8 @@ vtgpu_detach(device_t dev)
vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
if (sc->vtgpu_fb_info.fb_vbase != 0) {
MPASS(sc->vtgpu_fb_info.fb_size != 0);
- contigfree((void *)sc->vtgpu_fb_info.fb_vbase,
- sc->vtgpu_fb_info.fb_size, M_DEVBUF);
+ free((void *)sc->vtgpu_fb_info.fb_vbase,
+ M_DEVBUF);
}
/* TODO: Tell the host we are detaching */
diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index b1a4230f7b46..5a81c8a24779 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -275,7 +275,7 @@ vtmmio_attach(device_t dev)
/* Tell the host we've noticed this device. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
- if ((child = device_add_child(dev, NULL, -1)) == NULL) {
+ if ((child = device_add_child(dev, NULL, DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "Cannot create child device.\n");
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_detach(dev);
@@ -292,17 +292,13 @@ static int
vtmmio_detach(device_t dev)
{
struct vtmmio_softc *sc;
- device_t child;
int error;
sc = device_get_softc(dev);
- if ((child = sc->vtmmio_child_dev) != NULL) {
- error = device_delete_child(dev, child);
- if (error)
- return (error);
- sc->vtmmio_child_dev = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
vtmmio_reset(sc);
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 9c14e688f364..2ff9be9680b8 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -115,7 +115,7 @@ static void vtnet_free_rxtx_queues(struct vtnet_softc *);
static int vtnet_alloc_rx_filters(struct vtnet_softc *);
static void vtnet_free_rx_filters(struct vtnet_softc *);
static int vtnet_alloc_virtqueues(struct vtnet_softc *);
-static int vtnet_alloc_interface(struct vtnet_softc *);
+static void vtnet_alloc_interface(struct vtnet_softc *);
static int vtnet_setup_interface(struct vtnet_softc *);
static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int);
static int vtnet_ioctl_ifflags(struct vtnet_softc *);
@@ -163,24 +163,24 @@ static struct mbuf *
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
struct vtnet_tx_header *);
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
-#ifdef VTNET_LEGACY_TX
+
+/* Required for ALTQ */
static void vtnet_start_locked(struct vtnet_txq *, if_t);
static void vtnet_start(if_t);
-#else
+
+/* Required for MQ */
static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
static int vtnet_txq_mq_start(if_t, struct mbuf *);
static void vtnet_txq_tq_deferred(void *, int);
-#endif
+static void vtnet_qflush(if_t);
+
+
static void vtnet_txq_start(struct vtnet_txq *);
static void vtnet_txq_tq_intr(void *, int);
static int vtnet_txq_eof(struct vtnet_txq *);
static void vtnet_tx_vq_intr(void *);
static void vtnet_tx_start_all(struct vtnet_softc *);
-#ifndef VTNET_LEGACY_TX
-static void vtnet_qflush(if_t);
-#endif
-
static int vtnet_watchdog(struct vtnet_txq *);
static void vtnet_accum_stats(struct vtnet_softc *,
struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
@@ -309,6 +309,19 @@ static int vtnet_lro_mbufq_depth = 0;
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
+/* Deactivate ALTQ Support */
+static int vtnet_altq_disable = 0;
+SYSCTL_INT(_hw_vtnet, OID_AUTO, altq_disable, CTLFLAG_RDTUN,
+ &vtnet_altq_disable, 0, "Disables ALTQ Support");
+
+/*
+ * For the driver to be considered as having altq enabled,
+ * it must be compiled with an ALTQ capable kernel,
+ * and the tunable hw.vtnet.altq_disable must be zero
+ */
+#define VTNET_ALTQ_ENABLED (VTNET_ALTQ_CAPABLE && (!vtnet_altq_disable))
+
+
static uma_zone_t vtnet_tx_header_zone;
static struct virtio_feature_desc vtnet_feature_desc[] = {
@@ -437,12 +450,7 @@ vtnet_attach(device_t dev)
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
vtnet_load_tunables(sc);
- error = vtnet_alloc_interface(sc);
- if (error) {
- device_printf(dev, "cannot allocate interface\n");
- goto fail;
- }
-
+ vtnet_alloc_interface(sc);
vtnet_setup_sysctl(sc);
error = vtnet_setup_features(sc);
@@ -648,12 +656,9 @@ vtnet_negotiate_features(struct vtnet_softc *sc)
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
features &= ~VTNET_LRO_FEATURES;
-#ifndef VTNET_LEGACY_TX
- if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
+ /* Deactivate MQ Feature flag, if driver has ALTQ enabled, or MQ is explicitly disabled */
+ if (VTNET_ALTQ_ENABLED || vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
features &= ~VIRTIO_NET_F_MQ;
-#else
- features &= ~VIRTIO_NET_F_MQ;
-#endif
negotiated_features = virtio_negotiate_features(dev, features);
@@ -662,7 +667,7 @@ vtnet_negotiate_features(struct vtnet_softc *sc)
mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
- if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
+ if (mtu < VTNET_MIN_MTU) {
device_printf(dev, "Invalid MTU value: %d. "
"MTU feature disabled.\n", mtu);
features &= ~VIRTIO_NET_F_MTU;
@@ -871,14 +876,14 @@ vtnet_init_txq(struct vtnet_softc *sc, int id)
if (txq->vtntx_sg == NULL)
return (ENOMEM);
-#ifndef VTNET_LEGACY_TX
- txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
- M_NOWAIT, &txq->vtntx_mtx);
- if (txq->vtntx_br == NULL)
- return (ENOMEM);
+ if (!VTNET_ALTQ_ENABLED) {
+ txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
+ M_NOWAIT, &txq->vtntx_mtx);
+ if (txq->vtntx_br == NULL)
+ return (ENOMEM);
- TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
-#endif
+ TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
+ }
TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
taskqueue_thread_enqueue, &txq->vtntx_tq);
@@ -949,12 +954,12 @@ vtnet_destroy_txq(struct vtnet_txq *txq)
txq->vtntx_sg = NULL;
}
-#ifndef VTNET_LEGACY_TX
- if (txq->vtntx_br != NULL) {
- buf_ring_free(txq->vtntx_br, M_DEVBUF);
- txq->vtntx_br = NULL;
+ if (!VTNET_ALTQ_ENABLED) {
+ if (txq->vtntx_br != NULL) {
+ buf_ring_free(txq->vtntx_br, M_DEVBUF);
+ txq->vtntx_br = NULL;
+ }
}
-#endif
if (mtx_initialized(&txq->vtntx_mtx) != 0)
mtx_destroy(&txq->vtntx_mtx);
@@ -1042,19 +1047,19 @@ vtnet_alloc_virtqueues(struct vtnet_softc *sc)
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
- VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
+ VQ_ALLOC_INFO_INIT(&info[idx + 1], sc->vtnet_tx_nsegs,
vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
/* These queues will not be used so allocate the minimum resources. */
- for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
+ for (; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
- VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
+ VQ_ALLOC_INFO_INIT(&info[idx + 1], 0, NULL, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
@@ -1069,7 +1074,7 @@ vtnet_alloc_virtqueues(struct vtnet_softc *sc)
return (error);
}
-static int
+static void
vtnet_alloc_interface(struct vtnet_softc *sc)
{
device_t dev;
@@ -1078,14 +1083,9 @@ vtnet_alloc_interface(struct vtnet_softc *sc)
dev = sc->vtnet_dev;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOMEM);
-
sc->vtnet_ifp = ifp;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-
- return (0);
}
static int
@@ -1103,15 +1103,16 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if_setinitfn(ifp, vtnet_init);
if_setioctlfn(ifp, vtnet_ioctl);
if_setgetcounterfn(ifp, vtnet_get_counter);
-#ifndef VTNET_LEGACY_TX
- if_settransmitfn(ifp, vtnet_txq_mq_start);
- if_setqflushfn(ifp, vtnet_qflush);
-#else
- struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
- if_setstartfn(ifp, vtnet_start);
- if_setsendqlen(ifp, virtqueue_size(vq) - 1);
- if_setsendqready(ifp);
-#endif
+
+ if (!VTNET_ALTQ_ENABLED) {
+ if_settransmitfn(ifp, vtnet_txq_mq_start);
+ if_setqflushfn(ifp, vtnet_qflush);
+ } else {
+ struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
+ if_setstartfn(ifp, vtnet_start);
+ if_setsendqlen(ifp, virtqueue_size(vq) - 1);
+ if_setsendqready(ifp);
+ }
vtnet_get_macaddr(sc);
@@ -2448,7 +2449,7 @@ vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
- if (__predict_false(tcp->th_flags & TH_CWR)) {
+ if (__predict_false(tcp_get_flags(tcp) & TH_CWR)) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
* FreeBSD, ECN support is not on a per-interface basis,
@@ -2624,7 +2625,6 @@ fail:
return (error);
}
-#ifdef VTNET_LEGACY_TX
static void
vtnet_start_locked(struct vtnet_txq *txq, if_t ifp)
@@ -2690,7 +2690,6 @@ vtnet_start(if_t ifp)
VTNET_TXQ_UNLOCK(txq);
}
-#else /* !VTNET_LEGACY_TX */
static int
vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
@@ -2801,7 +2800,6 @@ vtnet_txq_tq_deferred(void *xtxq, int pending __unused)
VTNET_TXQ_UNLOCK(txq);
}
-#endif /* VTNET_LEGACY_TX */
static void
vtnet_txq_start(struct vtnet_txq *txq)
@@ -2812,13 +2810,14 @@ vtnet_txq_start(struct vtnet_txq *txq)
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
-#ifdef VTNET_LEGACY_TX
- if (!if_sendq_empty(ifp))
- vtnet_start_locked(txq, ifp);
-#else
- if (!drbr_empty(ifp, txq->vtntx_br))
- vtnet_txq_mq_start_locked(txq, NULL);
-#endif
+ if (!VTNET_ALTQ_ENABLED) {
+ if (!drbr_empty(ifp, txq->vtntx_br))
+ vtnet_txq_mq_start_locked(txq, NULL);
+ } else {
+ if (!if_sendq_empty(ifp))
+ vtnet_start_locked(txq, ifp);
+
+ }
}
static void
@@ -2933,7 +2932,6 @@ vtnet_tx_start_all(struct vtnet_softc *sc)
}
}
-#ifndef VTNET_LEGACY_TX
static void
vtnet_qflush(if_t ifp)
{
@@ -2955,7 +2953,6 @@ vtnet_qflush(if_t ifp)
if_qflush(ifp);
}
-#endif
static int
vtnet_watchdog(struct vtnet_txq *txq)
@@ -3036,12 +3033,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt)
return (rxaccum.vrxs_ierrors);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
-#ifndef VTNET_LEGACY_TX
case IFCOUNTER_OBYTES:
- return (txaccum.vtxs_obytes);
+ if (!VTNET_ALTQ_ENABLED)
+ return (txaccum.vtxs_obytes);
+ /* FALLTHROUGH */
case IFCOUNTER_OMCASTS:
- return (txaccum.vtxs_omcasts);
-#endif
+ if (!VTNET_ALTQ_ENABLED)
+ return (txaccum.vtxs_omcasts);
+ /* FALLTHROUGH */
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -3145,9 +3144,8 @@ vtnet_drain_taskqueues(struct vtnet_softc *sc)
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
-#ifndef VTNET_LEGACY_TX
- taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
-#endif
+ if (!VTNET_ALTQ_ENABLED)
+ taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
}
}
}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index d690ad3bf63c..0144b0f3232d 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -29,8 +29,10 @@
#ifndef _IF_VTNETVAR_H
#define _IF_VTNETVAR_H
+#define VTNET_ALTQ_CAPABLE (0)
#ifdef ALTQ
-#define VTNET_LEGACY_TX
+#undef VTNET_ALTQ_CAPABLE
+#define VTNET_ALTQ_CAPABLE (1)
#endif
struct vtnet_softc;
@@ -112,18 +114,14 @@ struct vtnet_txq {
struct vtnet_softc *vtntx_sc;
struct virtqueue *vtntx_vq;
struct sglist *vtntx_sg;
-#ifndef VTNET_LEGACY_TX
struct buf_ring *vtntx_br;
-#endif
int vtntx_id;
int vtntx_watchdog;
int vtntx_intr_threshold;
struct vtnet_txq_stats vtntx_stats;
struct taskqueue *vtntx_tq;
struct task vtntx_intrtask;
-#ifndef VTNET_LEGACY_TX
struct task vtntx_defrtask;
-#endif
#ifdef DEV_NETMAP
struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr;
#endif /* DEV_NETMAP */
@@ -374,7 +372,7 @@ CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
*/
#define VTNET_DEFAULT_BUFRING_SIZE 4096
-#define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
+#define VTNET_CORE_MTX(_sc) (&(_sc)->vtnet_mtx)
#define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
diff --git a/sys/dev/virtio/network/virtio_net.h b/sys/dev/virtio/network/virtio_net.h
index 4b728f7af21a..9ea53cbe2376 100644
--- a/sys/dev/virtio/network/virtio_net.h
+++ b/sys/dev/virtio/network/virtio_net.h
@@ -481,7 +481,7 @@ virtio_net_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type,
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
- if (tcp->th_flags & TH_CWR) {
+ if (tcp_get_flags(tcp) & TH_CWR) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
* ECN support is not on a per-interface basis, but globally via
diff --git a/sys/dev/virtio/p9fs/virtio_p9fs.c b/sys/dev/virtio/p9fs/virtio_p9fs.c
new file mode 100644
index 000000000000..aa84d3970698
--- /dev/null
+++ b/sys/dev/virtio/p9fs/virtio_p9fs.c
@@ -0,0 +1,494 @@
+/*-
+ * Copyright (c) 2017 Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * The Virtio 9P transport driver. This file contains all functions related to
+ * the virtqueue infrastructure which include creating the virtqueue, host
+ * interactions, interrupts etc.
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/queue.h>
+#include <sys/bus.h>
+#include <sys/kthread.h>
+#include <sys/condvar.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <fs/p9fs/p9_client.h>
+#include <fs/p9fs/p9_debug.h>
+#include <fs/p9fs/p9_protocol.h>
+#include <fs/p9fs/p9_transport.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/virtio_ring.h>
+#include <dev/virtio/p9fs/virtio_p9fs.h>
+
+#define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
+#define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
+#define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
+#define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
+ "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
+#define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
+#define MAX_SUPPORTED_SGS 20
+static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
+
+struct vt9p_softc {
+ device_t vt9p_dev;
+ struct mtx vt9p_mtx;
+ struct sglist *vt9p_sglist;
+ struct cv submit_cv;
+ bool busy;
+ struct virtqueue *vt9p_vq;
+ int max_nsegs;
+ uint16_t mount_tag_len;
+ char *mount_tag;
+ STAILQ_ENTRY(vt9p_softc) chan_next;
+};
+
+/* Global channel list, Each channel will correspond to a mount point */
+static STAILQ_HEAD( ,vt9p_softc) global_chan_list =
+ STAILQ_HEAD_INITIALIZER(global_chan_list);
+struct mtx global_chan_list_mtx;
+MTX_SYSINIT(global_chan_list_mtx, &global_chan_list_mtx, "9pglobal", MTX_DEF);
+
+static struct virtio_feature_desc virtio_9p_feature_desc[] = {
+ { VIRTIO_9PNET_F_MOUNT_TAG, "9PMountTag" },
+ { 0, NULL }
+};
+
+/* We don't currently allow canceling of virtio requests */
+static int
+vt9p_cancel(void *handle, struct p9_req_t *req)
+{
+ return (1);
+}
+
+SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
+
+/*
+ * Maximum number of seconds vt9p_request thread sleep waiting for an
+ * ack from the host, before exiting
+ */
+static unsigned int vt9p_ackmaxidle = 120;
+SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
+ "Maximum time request thread waits for ack from host");
+
+/*
+ * Wait for completion of a p9 request.
+ *
+ * This routine will sleep and release the chan mtx during the period.
+ * chan mtx will be acquired again upon return.
+ */
+static int
+vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
+{
+ KASSERT(req->tc->tag != req->rc->tag,
+ ("%s: request %p already completed", __func__, req));
+
+ if (msleep(req, VT9P_MTX(chan), 0, "chan lock", vt9p_ackmaxidle * hz)) {
+ /*
+ * Waited for 120s. No response from host.
+ * Can't wait for ever..
+ */
+ P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
+ "for an ack from host\n", vt9p_ackmaxidle);
+ return (EIO);
+ }
+ KASSERT(req->tc->tag == req->rc->tag,
+ ("%s spurious event on request %p", __func__, req));
+ return (0);
+}
+
+/*
+ * Request handler. This is called for every request submitted to the host
+ * It basically maps the tc/rc buffers to sg lists and submits the requests
+ * into the virtqueue. Since we have implemented a synchronous version, the
+ * submission thread sleeps until the ack in the interrupt wakes it up. Once
+ * it wakes up, it returns back to the P9fs layer. The rc buffer is then
+ * processed and completed to its upper layers.
+ */
+static int
+vt9p_request(void *handle, struct p9_req_t *req)
+{
+ int error;
+ struct vt9p_softc *chan;
+ int readable, writable;
+ struct sglist *sg;
+ struct virtqueue *vq;
+
+ chan = handle;
+ sg = chan->vt9p_sglist;
+ vq = chan->vt9p_vq;
+
+ P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
+
+ /* Grab the channel lock*/
+ VT9P_LOCK(chan);
+req_retry:
+ sglist_reset(sg);
+ /* Handle out VirtIO ring buffers */
+ error = sglist_append(sg, req->tc->sdata, req->tc->size);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+ readable = sg->sg_nseg;
+
+ error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+ writable = sg->sg_nseg - readable;
+
+ error = virtqueue_enqueue(vq, req, sg, readable, writable);
+ if (error != 0) {
+ if (error == ENOSPC) {
+ /*
+ * Condvar for the submit queue. Unlock the chan
+ * since wakeup needs one.
+ */
+ cv_wait(&chan->submit_cv, VT9P_MTX(chan));
+ P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
+ goto req_retry;
+ } else {
+ P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
+ VT9P_UNLOCK(chan);
+ return (EIO);
+ }
+ }
+
+ /* We have to notify */
+ virtqueue_notify(vq);
+
+ error = vt9p_req_wait(chan, req);
+ if (error != 0) {
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+
+ VT9P_UNLOCK(chan);
+
+ P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
+
+ return (0);
+}
+
+/*
+ * Completion of the request from the virtqueue. This interrupt handler is
+ * setup at initialization and is called for every completing request. It
+ * just wakes up the sleeping submission requests.
+ */
+static void
+vt9p_intr_complete(void *xsc)
+{
+ struct vt9p_softc *chan;
+ struct virtqueue *vq;
+ struct p9_req_t *curreq;
+
+ chan = (struct vt9p_softc *)xsc;
+ vq = chan->vt9p_vq;
+
+ P9_DEBUG(TRANS, "%s: completing\n", __func__);
+
+ VT9P_LOCK(chan);
+again:
+ while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
+ curreq->rc->tag = curreq->tc->tag;
+ wakeup_one(curreq);
+ }
+ if (virtqueue_enable_intr(vq) != 0) {
+ virtqueue_disable_intr(vq);
+ goto again;
+ }
+ cv_signal(&chan->submit_cv);
+ VT9P_UNLOCK(chan);
+}
+
+/*
+ * Allocation of the virtqueue with interrupt complete routines.
+ */
+static int
+vt9p_alloc_virtqueue(struct vt9p_softc *sc)
+{
+ struct vq_alloc_info vq_info;
+ device_t dev;
+
+ dev = sc->vt9p_dev;
+
+ VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
+ vt9p_intr_complete, sc, &sc->vt9p_vq,
+ "%s request", device_get_nameunit(dev));
+
+ return (virtio_alloc_virtqueues(dev, 1, &vq_info));
+}
+
+/* Probe for existence of 9P virtio channels */
+static int
+vt9p_probe(device_t dev)
+{
+
+ /* If the virtio device type is a 9P device, then we claim and attach it */
+ if (virtio_get_device_type(dev) != VIRTIO_ID_9P)
+ return (ENXIO);
+ device_set_desc(dev, "VirtIO 9P Transport");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+vt9p_stop(struct vt9p_softc *sc)
+{
+
+ /* Device specific stops .*/
+ virtqueue_disable_intr(sc->vt9p_vq);
+ virtio_stop(sc->vt9p_dev);
+}
+
+/* Detach the 9P virtio PCI device */
+static int
+vt9p_detach(device_t dev)
+{
+ struct vt9p_softc *sc;
+
+ sc = device_get_softc(dev);
+ VT9P_LOCK(sc);
+ vt9p_stop(sc);
+ VT9P_UNLOCK(sc);
+
+ if (sc->vt9p_sglist) {
+ sglist_free(sc->vt9p_sglist);
+ sc->vt9p_sglist = NULL;
+ }
+ if (sc->mount_tag) {
+ free(sc->mount_tag, M_P9FS_MNTTAG);
+ sc->mount_tag = NULL;
+ }
+ mtx_lock(&global_chan_list_mtx);
+ STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
+ mtx_unlock(&global_chan_list_mtx);
+
+ VT9P_LOCK_DESTROY(sc);
+ cv_destroy(&sc->submit_cv);
+
+ return (0);
+}
+
+/* Attach the 9P virtio PCI device */
+static int
+vt9p_attach(device_t dev)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct vt9p_softc *chan;
+ char *mount_tag;
+ int error;
+ uint16_t mount_tag_len;
+
+ chan = device_get_softc(dev);
+ chan->vt9p_dev = dev;
+
+ /* Init the channel lock. */
+ VT9P_LOCK_INIT(chan);
+ /* Initialize the condition variable */
+ cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
+ chan->max_nsegs = MAX_SUPPORTED_SGS;
+ chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK);
+
+ /* Negotiate the features from the host */
+ virtio_set_feature_desc(dev, virtio_9p_feature_desc);
+ virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
+
+ /*
+ * If mount tag feature is supported read the mount tag
+ * from device config
+ */
+ if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
+ mount_tag_len = virtio_read_dev_config_2(dev,
+ offsetof(struct virtio_9pnet_config, mount_tag_len));
+ else {
+ error = EINVAL;
+ P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
+ goto out;
+ }
+ mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
+ M_WAITOK | M_ZERO);
+
+ virtio_read_device_config_array(dev,
+ offsetof(struct virtio_9pnet_config, mount_tag),
+ mount_tag, 1, mount_tag_len);
+
+ device_printf(dev, "Mount tag: %s\n", mount_tag);
+
+ mount_tag_len++;
+ chan->mount_tag_len = mount_tag_len;
+ chan->mount_tag = mount_tag;
+
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
+ CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
+
+ /* We expect one virtqueue, for requests. */
+ error = vt9p_alloc_virtqueue(chan);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
+ goto out;
+ }
+ error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
+ goto out;
+ }
+ error = virtqueue_enable_intr(chan->vt9p_vq);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
+ goto out;
+ }
+
+ mtx_lock(&global_chan_list_mtx);
+ /* Insert the channel in global channel list */
+ STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
+ mtx_unlock(&global_chan_list_mtx);
+
+ return (0);
+out:
+ /* Something went wrong, detach the device */
+ vt9p_detach(dev);
+ return (error);
+}
+
+/*
+ * Allocate a new virtio channel. This sets up a transport channel
+ * for 9P communication
+ */
+static int
+vt9p_create(const char *mount_tag, void **handlep)
+{
+ struct vt9p_softc *sc, *chan;
+
+ chan = NULL;
+
+ /*
+ * Find out the corresponding channel for a client from global list
+ * of channels based on mount tag and attach it to client
+ */
+ mtx_lock(&global_chan_list_mtx);
+ STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
+ if (!strcmp(sc->mount_tag, mount_tag)) {
+ chan = sc;
+ break;
+ }
+ }
+ mtx_unlock(&global_chan_list_mtx);
+
+ /*
+ * If chan is already attached to a client then it cannot be used for
+ * another client.
+ */
+ if (chan && chan->busy) {
+ //p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
+ return (EBUSY);
+ }
+
+ /* If we dont have one, for now bail out.*/
+ if (chan) {
+ *handlep = (void *)chan;
+ chan->busy = true;
+ } else {
+ P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
+ __func__, mount_tag);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static void
+vt9p_close(void *handle)
+{
+ struct vt9p_softc *chan = handle;
+
+ chan->busy = false;
+}
+
+static struct p9_trans_module vt9p_trans = {
+ .name = "virtio",
+ .create = vt9p_create,
+ .close = vt9p_close,
+ .request = vt9p_request,
+ .cancel = vt9p_cancel,
+};
+
+static device_method_t vt9p_mthds[] = {
+ /* Device methods. */
+ DEVMETHOD(device_probe, vt9p_probe),
+ DEVMETHOD(device_attach, vt9p_attach),
+ DEVMETHOD(device_detach, vt9p_detach),
+ DEVMETHOD_END
+};
+
+static driver_t vt9p_drv = {
+ "virtio_p9fs",
+ vt9p_mthds,
+ sizeof(struct vt9p_softc)
+};
+
+static int
+vt9p_modevent(module_t mod, int type, void *unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ p9_init_zones();
+ p9_register_trans(&vt9p_trans);
+ break;
+ case MOD_UNLOAD:
+ p9_destroy_zones();
+ break;
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+ return (error);
+}
+
+VIRTIO_DRIVER_MODULE(virtio_p9fs, vt9p_drv, vt9p_modevent, NULL);
+MODULE_VERSION(virtio_p9fs, 1);
+MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
+MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
diff --git a/sys/dev/virtio/p9fs/virtio_p9fs.h b/sys/dev/virtio/p9fs/virtio_p9fs.h
new file mode 100644
index 000000000000..924b413d29a5
--- /dev/null
+++ b/sys/dev/virtio/p9fs/virtio_p9fs.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2017 Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __VIRTIO_9P_CONFIG__
+#define __VIRTIO_9P_CONFIG__
+
+/* Mount point feature specified in config variable */
+#define VIRTIO_9PNET_F_MOUNT_TAG 1
+
+struct virtio_9pnet_config {
+ /* Mount tag length */
+ uint16_t mount_tag_len;
+ /* non NULL terminated tag name */
+ uint8_t mount_tag[0];
+};
+#endif /* __VIRTIO_9P_CONFIG__ */
diff --git a/sys/dev/virtio/pci/virtio_pci.c b/sys/dev/virtio/pci/virtio_pci.c
index 4d93e94e59f2..b7b34b448f6e 100644
--- a/sys/dev/virtio/pci/virtio_pci.c
+++ b/sys/dev/virtio/pci/virtio_pci.c
@@ -168,7 +168,7 @@ vtpci_add_child(struct vtpci_common *cn)
dev = cn->vtpci_dev;
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "cannot create child device\n");
return (ENOMEM);
@@ -182,18 +182,14 @@ vtpci_add_child(struct vtpci_common *cn)
int
vtpci_delete_child(struct vtpci_common *cn)
{
- device_t dev, child;
+ device_t dev;
int error;
dev = cn->vtpci_dev;
- child = cn->vtpci_child_dev;
- if (child != NULL) {
- error = device_delete_child(dev, child);
- if (error)
- return (error);
- cn->vtpci_child_dev = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
return (0);
}
diff --git a/sys/dev/virtio/pci/virtio_pci_legacy.c b/sys/dev/virtio/pci/virtio_pci_legacy.c
index 238434c350fb..18dbb00e0d8c 100644
--- a/sys/dev/virtio/pci/virtio_pci_legacy.c
+++ b/sys/dev/virtio/pci/virtio_pci_legacy.c
@@ -190,7 +190,6 @@ DRIVER_MODULE(virtio_pci_legacy, pci, vtpci_legacy_driver, 0, 0);
static int
vtpci_legacy_probe(device_t dev)
{
- char desc[64];
const char *name;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
@@ -207,8 +206,7 @@ vtpci_legacy_probe(device_t dev)
if (name == NULL)
name = "Unknown";
- snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "VirtIO PCI (legacy) %s adapter", name);
/* Prefer transitional modern VirtIO PCI. */
return (BUS_PROBE_LOW_PRIORITY);
diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c
index 8f9b1f21aeab..eb1d5a1e6989 100644
--- a/sys/dev/virtio/pci/virtio_pci_modern.c
+++ b/sys/dev/virtio/pci/virtio_pci_modern.c
@@ -244,7 +244,6 @@ DRIVER_MODULE(virtio_pci_modern, pci, vtpci_modern_driver, 0, 0);
static int
vtpci_modern_probe(device_t dev)
{
- char desc[64];
const char *name;
uint16_t devid;
@@ -269,8 +268,7 @@ vtpci_modern_probe(device_t dev)
if (name == NULL)
name = "Unknown";
- snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "VirtIO PCI (modern) %s adapter", name);
return (BUS_PROBE_DEFAULT);
}
diff --git a/sys/dev/virtio/scsi/virtio_scsi.c b/sys/dev/virtio/scsi/virtio_scsi.c
index 68da81a97855..857da56ba426 100644
--- a/sys/dev/virtio/scsi/virtio_scsi.c
+++ b/sys/dev/virtio/scsi/virtio_scsi.c
@@ -41,8 +41,7 @@
#include <sys/callout.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index c92f635832f2..8cc3326dc08e 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -360,7 +360,7 @@ virtqueue_free(struct virtqueue *vq)
virtqueue_free_indirect(vq);
if (vq->vq_ring_mem != NULL) {
- contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
+ free(vq->vq_ring_mem, M_DEVBUF);
vq->vq_ring_size = 0;
vq->vq_ring_mem = NULL;
}