virtio, vhost, pc fixes for 2.4
The only notable thing here is vhost-user multiqueue revert. We'll work on making it stable in 2.5, reverting now means we won't have to maintain bug for bug compability forever. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVrNpQAAoJECgfDbjSjVRp5EwH/jh1iiTG4iXDbSnxPJ/FWwJt 9hgd+GeNT9K5Dc1A+X6d80OdeaH5bjFNqfUXSrIsLKDWYtj01AU2+dgGS3j0/Uv1 FRrrWsnN5idcYuqlznk7X9Eu2mb7npGZnjF4PmDU5Hq5eRj/mgJbWvKYqMolLYtv cuSHxhbA3dlUTIjfXccMF94kvFQUpfs2A3ip9osmCKE15fDS2zXKT3mIj/itnj3o DFRwkmiKYJwwi9cl7F2svzYnqp00mAMaVJ3UrzYFZhPylzWQfIJCgu47PMfGsJAx kPvY+IA3WEk0sMTVRNSWj61QNax4G/oM8d4ipnRpt3k0UzWg1XNTZkRIAg40j8A= =SW7Q -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging virtio, vhost, pc fixes for 2.4 The only notable thing here is vhost-user multiqueue revert. We'll work on making it stable in 2.5, reverting now means we won't have to maintain bug for bug compability forever. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Mon Jul 20 12:24:00 2015 BST using RSA key ID D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" * remotes/mst/tags/for_upstream: virtio-net: remove virtio queues if the guest doesn't support multiqueue virtio-net: Flush incoming queues when DRIVER_OK is being set pci_add_capability: remove duplicate comments virtio-net: unbreak any layout Revert "vhost-user: add multi queue support" ich9: fix skipped vmstate_memhp_state subsection Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
		
						commit
						f73ca73634
					
				@ -127,11 +127,6 @@ in the ancillary data:
 | 
				
			|||||||
If Master is unable to send the full message or receives a wrong reply it will
 | 
					If Master is unable to send the full message or receives a wrong reply it will
 | 
				
			||||||
close the connection. An optional reconnection mechanism can be implemented.
 | 
					close the connection. An optional reconnection mechanism can be implemented.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Multi queue support
 | 
					 | 
				
			||||||
-------------------
 | 
					 | 
				
			||||||
The protocol supports multiple queues by setting all index fields in the sent
 | 
					 | 
				
			||||||
messages to a properly calculated value.
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Message types
 | 
					Message types
 | 
				
			||||||
-------------
 | 
					-------------
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -206,9 +206,6 @@ const VMStateDescription vmstate_ich9_pm = {
 | 
				
			|||||||
    },
 | 
					    },
 | 
				
			||||||
    .subsections = (const VMStateDescription*[]) {
 | 
					    .subsections = (const VMStateDescription*[]) {
 | 
				
			||||||
        &vmstate_memhp_state,
 | 
					        &vmstate_memhp_state,
 | 
				
			||||||
        NULL
 | 
					 | 
				
			||||||
    },
 | 
					 | 
				
			||||||
    .subsections = (const VMStateDescription*[]) {
 | 
					 | 
				
			||||||
        &vmstate_tco_io_state,
 | 
					        &vmstate_tco_io_state,
 | 
				
			||||||
        NULL
 | 
					        NULL
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
				
			|||||||
@ -160,7 +160,6 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    net->dev.nvqs = 2;
 | 
					    net->dev.nvqs = 2;
 | 
				
			||||||
    net->dev.vqs = net->vqs;
 | 
					    net->dev.vqs = net->vqs;
 | 
				
			||||||
    net->dev.vq_index = net->nc->queue_index;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    r = vhost_dev_init(&net->dev, options->opaque,
 | 
					    r = vhost_dev_init(&net->dev, options->opaque,
 | 
				
			||||||
                       options->backend_type);
 | 
					                       options->backend_type);
 | 
				
			||||||
@ -287,7 +286,7 @@ static void vhost_net_stop_one(struct vhost_net *net,
 | 
				
			|||||||
        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
 | 
					        for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
 | 
				
			||||||
            const VhostOps *vhost_ops = net->dev.vhost_ops;
 | 
					            const VhostOps *vhost_ops = net->dev.vhost_ops;
 | 
				
			||||||
            int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
 | 
					            int r = vhost_ops->vhost_call(&net->dev, VHOST_RESET_OWNER,
 | 
				
			||||||
                                          &file);
 | 
					                                          NULL);
 | 
				
			||||||
            assert(r >= 0);
 | 
					            assert(r >= 0);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
				
			|||||||
@ -162,6 +162,8 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 | 
				
			|||||||
    virtio_net_vhost_status(n, status);
 | 
					    virtio_net_vhost_status(n, status);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (i = 0; i < n->max_queues; i++) {
 | 
					    for (i = 0; i < n->max_queues; i++) {
 | 
				
			||||||
 | 
					        NetClientState *ncs = qemu_get_subqueue(n->nic, i);
 | 
				
			||||||
 | 
					        bool queue_started;
 | 
				
			||||||
        q = &n->vqs[i];
 | 
					        q = &n->vqs[i];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
 | 
					        if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
 | 
				
			||||||
@ -169,12 +171,18 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
 | 
				
			|||||||
        } else {
 | 
					        } else {
 | 
				
			||||||
            queue_status = status;
 | 
					            queue_status = status;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					        queue_started =
 | 
				
			||||||
 | 
					            virtio_net_started(n, queue_status) && !n->vhost_started;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        if (queue_started) {
 | 
				
			||||||
 | 
					            qemu_flush_queued_packets(ncs);
 | 
				
			||||||
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (!q->tx_waiting) {
 | 
					        if (!q->tx_waiting) {
 | 
				
			||||||
            continue;
 | 
					            continue;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (virtio_net_started(n, queue_status) && !n->vhost_started) {
 | 
					        if (queue_started) {
 | 
				
			||||||
            if (q->tx_timer) {
 | 
					            if (q->tx_timer) {
 | 
				
			||||||
                timer_mod(q->tx_timer,
 | 
					                timer_mod(q->tx_timer,
 | 
				
			||||||
                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
 | 
					                               qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
 | 
				
			||||||
@ -1142,7 +1150,8 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 | 
				
			|||||||
        ssize_t ret, len;
 | 
					        ssize_t ret, len;
 | 
				
			||||||
        unsigned int out_num = elem.out_num;
 | 
					        unsigned int out_num = elem.out_num;
 | 
				
			||||||
        struct iovec *out_sg = &elem.out_sg[0];
 | 
					        struct iovec *out_sg = &elem.out_sg[0];
 | 
				
			||||||
        struct iovec sg[VIRTQUEUE_MAX_SIZE];
 | 
					        struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1];
 | 
				
			||||||
 | 
					        struct virtio_net_hdr_mrg_rxbuf mhdr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (out_num < 1) {
 | 
					        if (out_num < 1) {
 | 
				
			||||||
            error_report("virtio-net header not in first element");
 | 
					            error_report("virtio-net header not in first element");
 | 
				
			||||||
@ -1150,13 +1159,25 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (n->has_vnet_hdr) {
 | 
					        if (n->has_vnet_hdr) {
 | 
				
			||||||
            if (out_sg[0].iov_len < n->guest_hdr_len) {
 | 
					            if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
 | 
				
			||||||
 | 
					                n->guest_hdr_len) {
 | 
				
			||||||
                error_report("virtio-net header incorrect");
 | 
					                error_report("virtio-net header incorrect");
 | 
				
			||||||
                exit(1);
 | 
					                exit(1);
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
 | 
					            if (virtio_needs_swap(vdev)) {
 | 
				
			||||||
 | 
					                virtio_net_hdr_swap(vdev, (void *) &mhdr);
 | 
				
			||||||
 | 
					                sg2[0].iov_base = &mhdr;
 | 
				
			||||||
 | 
					                sg2[0].iov_len = n->guest_hdr_len;
 | 
				
			||||||
 | 
					                out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
 | 
				
			||||||
 | 
					                                   out_sg, out_num,
 | 
				
			||||||
 | 
					                                   n->guest_hdr_len, -1);
 | 
				
			||||||
 | 
					                if (out_num == VIRTQUEUE_MAX_SIZE) {
 | 
				
			||||||
 | 
					                    goto drop;
 | 
				
			||||||
 | 
							}
 | 
				
			||||||
 | 
					                out_num += 1;
 | 
				
			||||||
 | 
					                out_sg = sg2;
 | 
				
			||||||
 | 
						    }
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					 | 
				
			||||||
        /*
 | 
					        /*
 | 
				
			||||||
         * If host wants to see the guest header as is, we can
 | 
					         * If host wants to see the guest header as is, we can
 | 
				
			||||||
         * pass it on unchanged. Otherwise, copy just the parts
 | 
					         * pass it on unchanged. Otherwise, copy just the parts
 | 
				
			||||||
@ -1186,7 +1207,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        len += ret;
 | 
					        len += ret;
 | 
				
			||||||
 | 
					drop:
 | 
				
			||||||
        virtqueue_push(q->tx_vq, &elem, 0);
 | 
					        virtqueue_push(q->tx_vq, &elem, 0);
 | 
				
			||||||
        virtio_notify(vdev, q->tx_vq);
 | 
					        virtio_notify(vdev, q->tx_vq);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1306,9 +1327,86 @@ static void virtio_net_tx_bh(void *opaque)
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void virtio_net_add_queue(VirtIONet *n, int index)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    VirtIODevice *vdev = VIRTIO_DEVICE(n);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    n->vqs[index].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
 | 
				
			||||||
 | 
					    if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
 | 
				
			||||||
 | 
					        n->vqs[index].tx_vq =
 | 
				
			||||||
 | 
					            virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
 | 
				
			||||||
 | 
					        n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
 | 
				
			||||||
 | 
					                                              virtio_net_tx_timer,
 | 
				
			||||||
 | 
					                                              &n->vqs[index]);
 | 
				
			||||||
 | 
					    } else {
 | 
				
			||||||
 | 
					        n->vqs[index].tx_vq =
 | 
				
			||||||
 | 
					            virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
 | 
				
			||||||
 | 
					        n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    n->vqs[index].tx_waiting = 0;
 | 
				
			||||||
 | 
					    n->vqs[index].n = n;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void virtio_net_del_queue(VirtIONet *n, int index)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    VirtIODevice *vdev = VIRTIO_DEVICE(n);
 | 
				
			||||||
 | 
					    VirtIONetQueue *q = &n->vqs[index];
 | 
				
			||||||
 | 
					    NetClientState *nc = qemu_get_subqueue(n->nic, index);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    qemu_purge_queued_packets(nc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    virtio_del_queue(vdev, index * 2);
 | 
				
			||||||
 | 
					    if (q->tx_timer) {
 | 
				
			||||||
 | 
					        timer_del(q->tx_timer);
 | 
				
			||||||
 | 
					        timer_free(q->tx_timer);
 | 
				
			||||||
 | 
					    } else {
 | 
				
			||||||
 | 
					        qemu_bh_delete(q->tx_bh);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					    virtio_del_queue(vdev, index * 2 + 1);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    VirtIODevice *vdev = VIRTIO_DEVICE(n);
 | 
				
			||||||
 | 
					    int old_num_queues = virtio_get_num_queues(vdev);
 | 
				
			||||||
 | 
					    int new_num_queues = new_max_queues * 2 + 1;
 | 
				
			||||||
 | 
					    int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    assert(old_num_queues >= 3);
 | 
				
			||||||
 | 
					    assert(old_num_queues % 2 == 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    if (old_num_queues == new_num_queues) {
 | 
				
			||||||
 | 
					        return;
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /*
 | 
				
			||||||
 | 
					     * We always need to remove and add ctrl vq if
 | 
				
			||||||
 | 
					     * old_num_queues != new_num_queues. Remove ctrl_vq first,
 | 
				
			||||||
 | 
					     * and then we only enter one of the following too loops.
 | 
				
			||||||
 | 
					     */
 | 
				
			||||||
 | 
					    virtio_del_queue(vdev, old_num_queues - 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
 | 
				
			||||||
 | 
					        /* new_num_queues < old_num_queues */
 | 
				
			||||||
 | 
					        virtio_net_del_queue(n, i / 2);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
 | 
				
			||||||
 | 
					        /* new_num_queues > old_num_queues */
 | 
				
			||||||
 | 
					        virtio_net_add_queue(n, i / 2);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /* add ctrl_vq last */
 | 
				
			||||||
 | 
					    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
 | 
					static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    int max = multiqueue ? n->max_queues : 1;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    n->multiqueue = multiqueue;
 | 
					    n->multiqueue = multiqueue;
 | 
				
			||||||
 | 
					    virtio_net_change_num_queues(n, max);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    virtio_net_set_queues(n);
 | 
					    virtio_net_set_queues(n);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -1583,21 +1681,7 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp)
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (i = 0; i < n->max_queues; i++) {
 | 
					    for (i = 0; i < n->max_queues; i++) {
 | 
				
			||||||
        n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
 | 
					        virtio_net_add_queue(n, i);
 | 
				
			||||||
        if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
 | 
					 | 
				
			||||||
            n->vqs[i].tx_vq =
 | 
					 | 
				
			||||||
                virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
 | 
					 | 
				
			||||||
            n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
 | 
					 | 
				
			||||||
                                              virtio_net_tx_timer,
 | 
					 | 
				
			||||||
                                              &n->vqs[i]);
 | 
					 | 
				
			||||||
        } else {
 | 
					 | 
				
			||||||
            n->vqs[i].tx_vq =
 | 
					 | 
				
			||||||
                virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
 | 
					 | 
				
			||||||
            n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        n->vqs[i].tx_waiting = 0;
 | 
					 | 
				
			||||||
        n->vqs[i].n = n;
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
 | 
					    n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
 | 
				
			||||||
@ -1651,7 +1735,7 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
 | 
					    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
 | 
				
			||||||
    VirtIONet *n = VIRTIO_NET(dev);
 | 
					    VirtIONet *n = VIRTIO_NET(dev);
 | 
				
			||||||
    int i;
 | 
					    int i, max_queues;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* This will stop vhost backend if appropriate. */
 | 
					    /* This will stop vhost backend if appropriate. */
 | 
				
			||||||
    virtio_net_set_status(vdev, 0);
 | 
					    virtio_net_set_status(vdev, 0);
 | 
				
			||||||
@ -1666,18 +1750,9 @@ static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
 | 
				
			|||||||
    g_free(n->mac_table.macs);
 | 
					    g_free(n->mac_table.macs);
 | 
				
			||||||
    g_free(n->vlans);
 | 
					    g_free(n->vlans);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (i = 0; i < n->max_queues; i++) {
 | 
					    max_queues = n->multiqueue ? n->max_queues : 1;
 | 
				
			||||||
        VirtIONetQueue *q = &n->vqs[i];
 | 
					    for (i = 0; i < max_queues; i++) {
 | 
				
			||||||
        NetClientState *nc = qemu_get_subqueue(n->nic, i);
 | 
					        virtio_net_del_queue(n, i);
 | 
				
			||||||
 | 
					 | 
				
			||||||
        qemu_purge_queued_packets(nc);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        if (q->tx_timer) {
 | 
					 | 
				
			||||||
            timer_del(q->tx_timer);
 | 
					 | 
				
			||||||
            timer_free(q->tx_timer);
 | 
					 | 
				
			||||||
        } else if (q->tx_bh) {
 | 
					 | 
				
			||||||
            qemu_bh_delete(q->tx_bh);
 | 
					 | 
				
			||||||
        }
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    timer_del(n->announce_timer);
 | 
					    timer_del(n->announce_timer);
 | 
				
			||||||
 | 
				
			|||||||
@ -2101,12 +2101,10 @@ static void pci_del_option_rom(PCIDevice *pdev)
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
 * if !offset
 | 
					 | 
				
			||||||
 * Reserve space and add capability to the linked list in pci config space
 | 
					 | 
				
			||||||
 *
 | 
					 | 
				
			||||||
 * if offset = 0,
 | 
					 * if offset = 0,
 | 
				
			||||||
 * Find and reserve space and add capability to the linked list
 | 
					 * Find and reserve space and add capability to the linked list
 | 
				
			||||||
 * in pci config space */
 | 
					 * in pci config space
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
 | 
					int pci_add_capability(PCIDevice *pdev, uint8_t cap_id,
 | 
				
			||||||
                       uint8_t offset, uint8_t size)
 | 
					                       uint8_t offset, uint8_t size)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
				
			|||||||
@ -210,12 +210,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
 | 
				
			|||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case VHOST_SET_OWNER:
 | 
					    case VHOST_SET_OWNER:
 | 
				
			||||||
        break;
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
    case VHOST_RESET_OWNER:
 | 
					    case VHOST_RESET_OWNER:
 | 
				
			||||||
        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
 | 
					 | 
				
			||||||
        msg.state.index += dev->vq_index;
 | 
					 | 
				
			||||||
        msg.size = sizeof(m.state);
 | 
					 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case VHOST_SET_MEM_TABLE:
 | 
					    case VHOST_SET_MEM_TABLE:
 | 
				
			||||||
@ -258,20 +253,17 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
 | 
				
			|||||||
    case VHOST_SET_VRING_NUM:
 | 
					    case VHOST_SET_VRING_NUM:
 | 
				
			||||||
    case VHOST_SET_VRING_BASE:
 | 
					    case VHOST_SET_VRING_BASE:
 | 
				
			||||||
        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
 | 
					        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
 | 
				
			||||||
        msg.state.index += dev->vq_index;
 | 
					 | 
				
			||||||
        msg.size = sizeof(m.state);
 | 
					        msg.size = sizeof(m.state);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case VHOST_GET_VRING_BASE:
 | 
					    case VHOST_GET_VRING_BASE:
 | 
				
			||||||
        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
 | 
					        memcpy(&msg.state, arg, sizeof(struct vhost_vring_state));
 | 
				
			||||||
        msg.state.index += dev->vq_index;
 | 
					 | 
				
			||||||
        msg.size = sizeof(m.state);
 | 
					        msg.size = sizeof(m.state);
 | 
				
			||||||
        need_reply = 1;
 | 
					        need_reply = 1;
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case VHOST_SET_VRING_ADDR:
 | 
					    case VHOST_SET_VRING_ADDR:
 | 
				
			||||||
        memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
 | 
					        memcpy(&msg.addr, arg, sizeof(struct vhost_vring_addr));
 | 
				
			||||||
        msg.addr.index += dev->vq_index;
 | 
					 | 
				
			||||||
        msg.size = sizeof(m.addr);
 | 
					        msg.size = sizeof(m.addr);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -279,7 +271,7 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
 | 
				
			|||||||
    case VHOST_SET_VRING_CALL:
 | 
					    case VHOST_SET_VRING_CALL:
 | 
				
			||||||
    case VHOST_SET_VRING_ERR:
 | 
					    case VHOST_SET_VRING_ERR:
 | 
				
			||||||
        file = arg;
 | 
					        file = arg;
 | 
				
			||||||
        msg.u64 = (file->index + dev->vq_index) & VHOST_USER_VRING_IDX_MASK;
 | 
					        msg.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
 | 
				
			||||||
        msg.size = sizeof(m.u64);
 | 
					        msg.size = sizeof(m.u64);
 | 
				
			||||||
        if (ioeventfd_enabled() && file->fd > 0) {
 | 
					        if (ioeventfd_enabled() && file->fd > 0) {
 | 
				
			||||||
            fds[fd_num++] = file->fd;
 | 
					            fds[fd_num++] = file->fd;
 | 
				
			||||||
@ -321,7 +313,6 @@ static int vhost_user_call(struct vhost_dev *dev, unsigned long int request,
 | 
				
			|||||||
                error_report("Received bad msg size.");
 | 
					                error_report("Received bad msg size.");
 | 
				
			||||||
                return -1;
 | 
					                return -1;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            msg.state.index -= dev->vq_index;
 | 
					 | 
				
			||||||
            memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
 | 
					            memcpy(arg, &msg.state, sizeof(struct vhost_vring_state));
 | 
				
			||||||
            break;
 | 
					            break;
 | 
				
			||||||
        default:
 | 
					        default:
 | 
				
			||||||
 | 
				
			|||||||
@ -143,6 +143,15 @@ static inline uint64_t virtio_ldq_p(VirtIODevice *vdev, const void *ptr)
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					static inline bool virtio_needs_swap(VirtIODevice *vdev)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					#ifdef HOST_WORDS_BIGENDIAN
 | 
				
			||||||
 | 
					    return virtio_access_is_big_endian(vdev) ? false : true;
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					    return virtio_access_is_big_endian(vdev) ? true : false;
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
 | 
					static inline uint16_t virtio_tswap16(VirtIODevice *vdev, uint16_t s)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
#ifdef HOST_WORDS_BIGENDIAN
 | 
					#ifdef HOST_WORDS_BIGENDIAN
 | 
				
			||||||
 | 
				
			|||||||
@ -120,39 +120,35 @@ static void net_vhost_user_event(void *opaque, int event)
 | 
				
			|||||||
    case CHR_EVENT_OPENED:
 | 
					    case CHR_EVENT_OPENED:
 | 
				
			||||||
        vhost_user_start(s);
 | 
					        vhost_user_start(s);
 | 
				
			||||||
        net_vhost_link_down(s, false);
 | 
					        net_vhost_link_down(s, false);
 | 
				
			||||||
        error_report("chardev \"%s\" went up", s->nc.info_str);
 | 
					        error_report("chardev \"%s\" went up", s->chr->label);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case CHR_EVENT_CLOSED:
 | 
					    case CHR_EVENT_CLOSED:
 | 
				
			||||||
        net_vhost_link_down(s, true);
 | 
					        net_vhost_link_down(s, true);
 | 
				
			||||||
        vhost_user_stop(s);
 | 
					        vhost_user_stop(s);
 | 
				
			||||||
        error_report("chardev \"%s\" went down", s->nc.info_str);
 | 
					        error_report("chardev \"%s\" went down", s->chr->label);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int net_vhost_user_init(NetClientState *peer, const char *device,
 | 
					static int net_vhost_user_init(NetClientState *peer, const char *device,
 | 
				
			||||||
                               const char *name, CharDriverState *chr,
 | 
					                               const char *name, CharDriverState *chr)
 | 
				
			||||||
                               uint32_t queues)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    NetClientState *nc;
 | 
					    NetClientState *nc;
 | 
				
			||||||
    VhostUserState *s;
 | 
					    VhostUserState *s;
 | 
				
			||||||
    int i;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    for (i = 0; i < queues; i++) {
 | 
					 | 
				
			||||||
    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
 | 
					    nc = qemu_new_net_client(&net_vhost_user_info, peer, device, name);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user%d to %s",
 | 
					    snprintf(nc->info_str, sizeof(nc->info_str), "vhost-user to %s",
 | 
				
			||||||
                 i, chr->label);
 | 
					             chr->label);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    s = DO_UPCAST(VhostUserState, nc, nc);
 | 
					    s = DO_UPCAST(VhostUserState, nc, nc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* We don't provide a receive callback */
 | 
					    /* We don't provide a receive callback */
 | 
				
			||||||
    s->nc.receive_disabled = 1;
 | 
					    s->nc.receive_disabled = 1;
 | 
				
			||||||
    s->chr = chr;
 | 
					    s->chr = chr;
 | 
				
			||||||
        s->nc.queue_index = i;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
 | 
					    qemu_chr_add_handlers(s->chr, NULL, NULL, net_vhost_user_event, s);
 | 
				
			||||||
    }
 | 
					
 | 
				
			||||||
    return 0;
 | 
					    return 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -230,7 +226,6 @@ static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp)
 | 
				
			|||||||
int net_init_vhost_user(const NetClientOptions *opts, const char *name,
 | 
					int net_init_vhost_user(const NetClientOptions *opts, const char *name,
 | 
				
			||||||
                        NetClientState *peer, Error **errp)
 | 
					                        NetClientState *peer, Error **errp)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    uint32_t queues;
 | 
					 | 
				
			||||||
    const NetdevVhostUserOptions *vhost_user_opts;
 | 
					    const NetdevVhostUserOptions *vhost_user_opts;
 | 
				
			||||||
    CharDriverState *chr;
 | 
					    CharDriverState *chr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -248,12 +243,6 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name,
 | 
				
			|||||||
        return -1;
 | 
					        return -1;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* number of queues for multiqueue */
 | 
					 | 
				
			||||||
    if (vhost_user_opts->has_queues) {
 | 
					 | 
				
			||||||
        queues = vhost_user_opts->queues;
 | 
					 | 
				
			||||||
    } else {
 | 
					 | 
				
			||||||
        queues = 1;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return net_vhost_user_init(peer, "vhost_user", name, chr, queues);
 | 
					    return net_vhost_user_init(peer, "vhost_user", name, chr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
				
			|||||||
@ -2466,16 +2466,12 @@
 | 
				
			|||||||
#
 | 
					#
 | 
				
			||||||
# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
 | 
					# @vhostforce: #optional vhost on for non-MSIX virtio guests (default: false).
 | 
				
			||||||
#
 | 
					#
 | 
				
			||||||
# @queues: #optional number of queues to be created for multiqueue vhost-user
 | 
					 | 
				
			||||||
#          (default: 1) (Since 2.4)
 | 
					 | 
				
			||||||
#
 | 
					 | 
				
			||||||
# Since 2.1
 | 
					# Since 2.1
 | 
				
			||||||
##
 | 
					##
 | 
				
			||||||
{ 'struct': 'NetdevVhostUserOptions',
 | 
					{ 'struct': 'NetdevVhostUserOptions',
 | 
				
			||||||
  'data': {
 | 
					  'data': {
 | 
				
			||||||
    'chardev':        'str',
 | 
					    'chardev':        'str',
 | 
				
			||||||
    '*vhostforce':    'bool',
 | 
					    '*vhostforce':    'bool' } }
 | 
				
			||||||
    '*queues':        'uint32' } }
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
##
 | 
					##
 | 
				
			||||||
# @NetClientOptions
 | 
					# @NetClientOptions
 | 
				
			||||||
 | 
				
			|||||||
@ -1963,14 +1963,13 @@ The hubport netdev lets you connect a NIC to a QEMU "vlan" instead of a single
 | 
				
			|||||||
netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 | 
					netdev.  @code{-net} and @code{-device} with parameter @option{vlan} create the
 | 
				
			||||||
required hub automatically.
 | 
					required hub automatically.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off][,queues=n]
 | 
					@item -netdev vhost-user,chardev=@var{id}[,vhostforce=on|off]
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
 | 
					Establish a vhost-user netdev, backed by a chardev @var{id}. The chardev should
 | 
				
			||||||
be a unix domain socket backed one. The vhost-user uses a specifically defined
 | 
					be a unix domain socket backed one. The vhost-user uses a specifically defined
 | 
				
			||||||
protocol to pass vhost ioctl replacement messages to an application on the other
 | 
					protocol to pass vhost ioctl replacement messages to an application on the other
 | 
				
			||||||
end of the socket. On non-MSIX guests, the feature can be forced with
 | 
					end of the socket. On non-MSIX guests, the feature can be forced with
 | 
				
			||||||
@var{vhostforce}. Use 'queues=@var{n}' to specify the number of queues to
 | 
					@var{vhostforce}.
 | 
				
			||||||
be created for multiqueue vhost-user.
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
Example:
 | 
					Example:
 | 
				
			||||||
@example
 | 
					@example
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user