[pve-devel] Applied: [PATCH kvm] pull in some stable hotfixes

Wolfgang Bumiller w.bumiller at proxmox.com
Thu Aug 25 10:52:35 CEST 2016


---
 ...rtio-recalculate-vq-inuse-after-migration.patch |  52 +++++++++++
 ...o-decrement-vq-inuse-in-virtqueue_discard.patch |  34 +++++++
 ...008-iscsi-pass-SCSI-status-back-for-SG_IO.patch |  26 ++++++
 ...9-net-limit-allocation-in-nc_sendv_compat.patch |  37 ++++++++
 ...0010-ui-fix-refresh-of-VNC-server-surface.patch | 102 +++++++++++++++++++++
 debian/patches/series                              |   5 +
 6 files changed, 256 insertions(+)
 create mode 100644 debian/patches/extra/0006-virtio-recalculate-vq-inuse-after-migration.patch
 create mode 100644 debian/patches/extra/0007-virtio-decrement-vq-inuse-in-virtqueue_discard.patch
 create mode 100644 debian/patches/extra/0008-iscsi-pass-SCSI-status-back-for-SG_IO.patch
 create mode 100644 debian/patches/extra/0009-net-limit-allocation-in-nc_sendv_compat.patch
 create mode 100644 debian/patches/extra/0010-ui-fix-refresh-of-VNC-server-surface.patch

diff --git a/debian/patches/extra/0006-virtio-recalculate-vq-inuse-after-migration.patch b/debian/patches/extra/0006-virtio-recalculate-vq-inuse-after-migration.patch
new file mode 100644
index 0000000..8c5567e
--- /dev/null
+++ b/debian/patches/extra/0006-virtio-recalculate-vq-inuse-after-migration.patch
@@ -0,0 +1,52 @@
+From ed3112487835fb5c5ec685e15032641c25018f87 Mon Sep 17 00:00:00 2001
+From: Stefan Hajnoczi <stefanha at redhat.com>
+Date: Mon, 15 Aug 2016 13:54:15 +0100
+Subject: [PATCH 06/10] virtio: recalculate vq->inuse after migration
+
+The vq->inuse field is not migrated.  Many devices don't hold
+VirtQueueElements across migration so it doesn't matter that vq->inuse
+starts at 0 on the destination QEMU.
+
+At least virtio-serial, virtio-blk, and virtio-balloon migrate while
+holding VirtQueueElements.  For these devices we need to recalculate
+vq->inuse upon load so the value is correct.
+
+Cc: qemu-stable at nongnu.org
+Signed-off-by: Stefan Hajnoczi <stefanha at redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
+---
+ hw/virtio/virtio.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
+index 111ad8e..486147b 100644
+--- a/hw/virtio/virtio.c
++++ b/hw/virtio/virtio.c
+@@ -1629,6 +1629,21 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
+             }
+             vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
+             vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
++
++            /*
++             * Some devices migrate VirtQueueElements that have been popped
++             * from the avail ring but not yet returned to the used ring.
++             */
++            vdev->vq[i].inuse = vdev->vq[i].last_avail_idx -
++                                vdev->vq[i].used_idx;
++            if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
++                error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
++                             "used_idx 0x%x",
++                             i, vdev->vq[i].vring.num,
++                             vdev->vq[i].last_avail_idx,
++                             vdev->vq[i].used_idx);
++                return -1;
++            }
+         }
+     }
+ 
+-- 
+2.1.4
+
diff --git a/debian/patches/extra/0007-virtio-decrement-vq-inuse-in-virtqueue_discard.patch b/debian/patches/extra/0007-virtio-decrement-vq-inuse-in-virtqueue_discard.patch
new file mode 100644
index 0000000..5ad2487
--- /dev/null
+++ b/debian/patches/extra/0007-virtio-decrement-vq-inuse-in-virtqueue_discard.patch
@@ -0,0 +1,34 @@
+From e8bc5606ad11e053a7c9ca64ed1aeaed18be8e4e Mon Sep 17 00:00:00 2001
+From: Stefan Hajnoczi <stefanha at redhat.com>
+Date: Mon, 15 Aug 2016 13:54:16 +0100
+Subject: [PATCH 07/10] virtio: decrement vq->inuse in virtqueue_discard()
+
+virtqueue_discard() moves vq->last_avail_idx back so the element can be
+popped again.  It's necessary to decrement vq->inuse to avoid "leaking"
+the element count.
+
+Cc: qemu-stable at nongnu.org
+Signed-off-by: Stefan Hajnoczi <stefanha at redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Reviewed-by: Cornelia Huck <cornelia.huck at de.ibm.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
+---
+ hw/virtio/virtio.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
+index 486147b..787849a 100644
+--- a/hw/virtio/virtio.c
++++ b/hw/virtio/virtio.c
+@@ -267,6 +267,7 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
+                        unsigned int len)
+ {
+     vq->last_avail_idx--;
++    vq->inuse--;
+     virtqueue_unmap_sg(vq, elem, len);
+ }
+ 
+-- 
+2.1.4
+
diff --git a/debian/patches/extra/0008-iscsi-pass-SCSI-status-back-for-SG_IO.patch b/debian/patches/extra/0008-iscsi-pass-SCSI-status-back-for-SG_IO.patch
new file mode 100644
index 0000000..2acfa61
--- /dev/null
+++ b/debian/patches/extra/0008-iscsi-pass-SCSI-status-back-for-SG_IO.patch
@@ -0,0 +1,26 @@
+From b3d69a0d7dc3945ff134e69593b5fcec447e8389 Mon Sep 17 00:00:00 2001
+From: Vadim Rozenfeld <vrozenfe at redhat.com>
+Date: Fri, 13 May 2016 13:03:22 +0200
+Subject: [PATCH 08/10] iscsi: pass SCSI status back for SG_IO
+
+Signed-off-by: Vadim Rozenfeld <vrozenfe at redhat.com>
+Signed-off-by: Paolo Bonzini <pbonzini at redhat.com>
+---
+ block/iscsi.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/block/iscsi.c b/block/iscsi.c
+index 0466c30..1f6a0e5 100644
+--- a/block/iscsi.c
++++ b/block/iscsi.c
+@@ -768,6 +768,7 @@ iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
+     acb->ioh->driver_status = 0;
+     acb->ioh->host_status   = 0;
+     acb->ioh->resid         = 0;
++    acb->ioh->status        = status;
+ 
+ #define SG_ERR_DRIVER_SENSE    0x08
+ 
+-- 
+2.1.4
+
diff --git a/debian/patches/extra/0009-net-limit-allocation-in-nc_sendv_compat.patch b/debian/patches/extra/0009-net-limit-allocation-in-nc_sendv_compat.patch
new file mode 100644
index 0000000..881e34a
--- /dev/null
+++ b/debian/patches/extra/0009-net-limit-allocation-in-nc_sendv_compat.patch
@@ -0,0 +1,37 @@
+From 1419429813905c95e2648516a8a23ad43e2c7297 Mon Sep 17 00:00:00 2001
+From: Peter Lieven <pl at kamp.de>
+Date: Thu, 30 Jun 2016 11:49:40 +0200
+Subject: [PATCH 09/10] net: limit allocation in nc_sendv_compat
+
+we only need to allocate enough memory to hold the packet. This might be
+less than NET_BUFSIZE. Additionally fail early if the packet is larger
+than NET_BUFSIZE.
+
+Signed-off-by: Peter Lieven <pl at kamp.de>
+---
+ net/net.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/net/net.c b/net/net.c
+index 2b9de86..af36a2a 100644
+--- a/net/net.c
++++ b/net/net.c
+@@ -692,9 +692,13 @@ static ssize_t nc_sendv_compat(NetClientState *nc, const struct iovec *iov,
+         buffer = iov[0].iov_base;
+         offset = iov[0].iov_len;
+     } else {
+-        buf = g_new(uint8_t, NET_BUFSIZE);
++        offset = iov_size(iov, iovcnt);
++        if (offset > NET_BUFSIZE) {
++            return -1;
++        }
++        buf = g_malloc(offset);
+         buffer = buf;
+-        offset = iov_to_buf(iov, iovcnt, 0, buf, NET_BUFSIZE);
++        offset = iov_to_buf(iov, iovcnt, 0, buf, offset);
+     }
+ 
+     if (flags & QEMU_NET_PACKET_FLAG_RAW && nc->info->receive_raw) {
+-- 
+2.1.4
+
diff --git a/debian/patches/extra/0010-ui-fix-refresh-of-VNC-server-surface.patch b/debian/patches/extra/0010-ui-fix-refresh-of-VNC-server-surface.patch
new file mode 100644
index 0000000..fd2ad03
--- /dev/null
+++ b/debian/patches/extra/0010-ui-fix-refresh-of-VNC-server-surface.patch
@@ -0,0 +1,102 @@
+From e74eda6892bede3f22ce449af32fe941147396d8 Mon Sep 17 00:00:00 2001
+From: "Daniel P. Berrange" <berrange at redhat.com>
+Date: Tue, 16 Aug 2016 17:30:32 +0100
+Subject: [PATCH 10/10] ui: fix refresh of VNC server surface
+
+In previous commit
+
+  commit c7628bff4138ce906a3620d12e0820c1cf6c140d
+  Author: Gerd Hoffmann <kraxel at redhat.com>
+  Date:   Fri Oct 30 12:10:09 2015 +0100
+
+    vnc: only alloc server surface with clients connected
+
+the VNC server was changed so that the 'vd->server' pixman
+image was only allocated when a client is connected.
+
+Since then if a client disconnects and then reconnects to
+the VNC server all they will see is a black screen until
+they do something that triggers a refresh. On a graphical
+desktop this is not often noticed since there's many things
+going on which cause a refresh. On a plain text console it
+is really obvious since nothing refreshes frequently.
+
+The problem is that the VNC server didn't update the guest
+dirty bitmap, so still believes its server image is in sync
+with the guest contents.
+
+To fix this we must explicitly mark the entire guest desktop
+as dirty after re-creating the server surface. Move this
+logic into vnc_update_server_surface() so it is guaranteed
+to be call in all code paths that re-create the surface
+instead of only in vnc_dpy_switch()
+
+Signed-off-by: Daniel P. Berrange <berrange at redhat.com>
+---
+ ui/vnc.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/ui/vnc.c b/ui/vnc.c
+index 66e2163..de145cd 100644
+--- a/ui/vnc.c
++++ b/ui/vnc.c
+@@ -806,6 +806,8 @@ void *vnc_server_fb_ptr(VncDisplay *vd, int x, int y)
+ 
+ static void vnc_update_server_surface(VncDisplay *vd)
+ {
++    int width, height;
++
+     qemu_pixman_image_unref(vd->server);
+     vd->server = NULL;
+ 
+@@ -813,10 +815,15 @@ static void vnc_update_server_surface(VncDisplay *vd)
+         return;
+     }
+ 
++    width = vnc_width(vd);
++    height = vnc_height(vd);
+     vd->server = pixman_image_create_bits(VNC_SERVER_FB_FORMAT,
+-                                          vnc_width(vd),
+-                                          vnc_height(vd),
++                                          width, height,
+                                           NULL, 0);
++
++    memset(vd->guest.dirty, 0x00, sizeof(vd->guest.dirty));
++    vnc_set_area_dirty(vd->guest.dirty, vd, 0, 0,
++                       width, height);
+ }
+ 
+ static void vnc_dpy_switch(DisplayChangeListener *dcl,
+@@ -824,7 +831,6 @@ static void vnc_dpy_switch(DisplayChangeListener *dcl,
+ {
+     VncDisplay *vd = container_of(dcl, VncDisplay, dcl);
+     VncState *vs;
+-    int width, height;
+ 
+     vnc_abort_display_jobs(vd);
+     vd->ds = surface;
+@@ -836,11 +842,6 @@ static void vnc_dpy_switch(DisplayChangeListener *dcl,
+     qemu_pixman_image_unref(vd->guest.fb);
+     vd->guest.fb = pixman_image_ref(surface->image);
+     vd->guest.format = surface->format;
+-    width = vnc_width(vd);
+-    height = vnc_height(vd);
+-    memset(vd->guest.dirty, 0x00, sizeof(vd->guest.dirty));
+-    vnc_set_area_dirty(vd->guest.dirty, vd, 0, 0,
+-                       width, height);
+ 
+     QTAILQ_FOREACH(vs, &vd->clients, next) {
+         vnc_colordepth(vs);
+@@ -850,7 +851,8 @@ static void vnc_dpy_switch(DisplayChangeListener *dcl,
+         }
+         memset(vs->dirty, 0x00, sizeof(vs->dirty));
+         vnc_set_area_dirty(vs->dirty, vd, 0, 0,
+-                           width, height);
++                           vnc_width(vd),
++                           vnc_height(vd));
+     }
+ }
+ 
+-- 
+2.1.4
+
diff --git a/debian/patches/series b/debian/patches/series
index 8aeddda..972f109 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -62,3 +62,8 @@ extra/0002-net-vmxnet3-check-for-device_active-before-write.patch
 extra/0003-net-vmxnet-use-g_new-for-pkt-initialisation.patch
 extra/0004-net-vmxnet-check-IP-header-length.patch
 extra/0005-net-vmxnet-initialise-local-tx-descriptor.patch
+extra/0006-virtio-recalculate-vq-inuse-after-migration.patch
+extra/0007-virtio-decrement-vq-inuse-in-virtqueue_discard.patch
+extra/0008-iscsi-pass-SCSI-status-back-for-SG_IO.patch
+extra/0009-net-limit-allocation-in-nc_sendv_compat.patch
+extra/0010-ui-fix-refresh-of-VNC-server-surface.patch
-- 
2.1.4





More information about the pve-devel mailing list