[pve-devel] [PATCH qemu] cherry-pick stable fixes for 7.2

Fiona Ebner f.ebner at proxmox.com
Tue Jan 10 10:34:25 CET 2023


Two for virtio-mem and one for vIOMMU. Both features are not yet
exposed in PVE's qemu-server, but planned to be added.

Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
---

@Thomas: the migration optimization I mentioned off-list seems to be
just for 'background snapshot', but not relevant for live migration,
so not picking it up.

 ...he-bitmap-index-of-the-section-offse.patch |  44 ++++++
 ...he-iterator-variable-in-a-vmem-rdl_l.patch |  36 +++++
 ...ty-bitmap-syncing-when-vIOMMU-is-ena.patch | 141 ++++++++++++++++++
 debian/patches/series                         |   3 +
 4 files changed, 224 insertions(+)
 create mode 100644 debian/patches/extra/0003-virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch
 create mode 100644 debian/patches/extra/0004-virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch
 create mode 100644 debian/patches/extra/0005-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch

diff --git a/debian/patches/extra/0003-virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch b/debian/patches/extra/0003-virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch
new file mode 100644
index 0000000..b54c0cc
--- /dev/null
+++ b/debian/patches/extra/0003-virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch
@@ -0,0 +1,44 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Chenyi Qiang <chenyi.qiang at intel.com>
+Date: Fri, 16 Dec 2022 14:22:31 +0800
+Subject: [PATCH] virtio-mem: Fix the bitmap index of the section offset
+
+vmem->bitmap indexes the memory region of the virtio-mem backend at a
+granularity of block_size. To calculate the index of target section offset,
+the block_size should be divided instead of the bitmap_size.
+
+Fixes: 2044969f0b ("virtio-mem: Implement RamDiscardManager interface")
+Signed-off-by: Chenyi Qiang <chenyi.qiang at intel.com>
+Message-Id: <20221216062231.11181-1-chenyi.qiang at intel.com>
+Reviewed-by: David Hildenbrand <david at redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Cc: qemu-stable at nongnu.org
+Signed-off-by: David Hildenbrand <david at redhat.com>
+(cherry-picked from commit b11cf32e07a2f7ff0d171b89497381a04c9d07e0)
+Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
+---
+ hw/virtio/virtio-mem.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
+index ed170def48..e19ee817fe 100644
+--- a/hw/virtio/virtio-mem.c
++++ b/hw/virtio/virtio-mem.c
+@@ -235,7 +235,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem,
+     uint64_t offset, size;
+     int ret = 0;
+ 
+-    first_bit = s->offset_within_region / vmem->bitmap_size;
++    first_bit = s->offset_within_region / vmem->block_size;
+     first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
+     while (first_bit < vmem->bitmap_size) {
+         MemoryRegionSection tmp = *s;
+@@ -267,7 +267,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem,
+     uint64_t offset, size;
+     int ret = 0;
+ 
+-    first_bit = s->offset_within_region / vmem->bitmap_size;
++    first_bit = s->offset_within_region / vmem->block_size;
+     first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit);
+     while (first_bit < vmem->bitmap_size) {
+         MemoryRegionSection tmp = *s;
diff --git a/debian/patches/extra/0004-virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch b/debian/patches/extra/0004-virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch
new file mode 100644
index 0000000..c303094
--- /dev/null
+++ b/debian/patches/extra/0004-virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch
@@ -0,0 +1,36 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Chenyi Qiang <chenyi.qiang at intel.com>
+Date: Wed, 28 Dec 2022 17:03:12 +0800
+Subject: [PATCH] virtio-mem: Fix the iterator variable in a vmem->rdl_list
+ loop
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+It should be the variable rdl2 to revert the already-notified listeners.
+
+Fixes: 2044969f0b ("virtio-mem: Implement RamDiscardManager interface")
+Signed-off-by: Chenyi Qiang <chenyi.qiang at intel.com>
+Message-Id: <20221228090312.17276-1-chenyi.qiang at intel.com>
+Cc: qemu-stable at nongnu.org
+Reviewed-by: Philippe Mathieu-Daudé <philmd at linaro.org>
+Signed-off-by: David Hildenbrand <david at redhat.com>
+(cherry-picked from commit 29f1b328e3b767cba2661920a8470738469b9e36)
+Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
+---
+ hw/virtio/virtio-mem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c
+index e19ee817fe..56db586c89 100644
+--- a/hw/virtio/virtio-mem.c
++++ b/hw/virtio/virtio-mem.c
+@@ -341,7 +341,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset,
+     if (ret) {
+         /* Notify all already-notified listeners. */
+         QLIST_FOREACH(rdl2, &vmem->rdl_list, next) {
+-            MemoryRegionSection tmp = *rdl->section;
++            MemoryRegionSection tmp = *rdl2->section;
+ 
+             if (rdl2 == rdl) {
+                 break;
diff --git a/debian/patches/extra/0005-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch b/debian/patches/extra/0005-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch
new file mode 100644
index 0000000..b72b3da
--- /dev/null
+++ b/debian/patches/extra/0005-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch
@@ -0,0 +1,141 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang at redhat.com>
+Date: Fri, 16 Dec 2022 11:35:52 +0800
+Subject: [PATCH] vhost: fix vq dirty bitmap syncing when vIOMMU is enabled
+
+When vIOMMU is enabled, the vq->used_phys is actually the IOVA not
+GPA. So we need to translate it to GPA before the syncing otherwise we
+may hit the following crash since IOVA could be out of the scope of
+the GPA log size. This could be noted when using virtio-IOMMU with
+vhost using 1G memory.
+
+Fixes: c471ad0e9bd46 ("vhost_net: device IOTLB support")
+Cc: qemu-stable at nongnu.org
+Tested-by: Lei Yang <leiyang at redhat.com>
+Reported-by: Yalan Zhang <yalzhang at redhat.com>
+Signed-off-by: Jason Wang <jasowang at redhat.com>
+Message-Id: <20221216033552.77087-1-jasowang at redhat.com>
+Reviewed-by: Michael S. Tsirkin <mst at redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst at redhat.com>
+(cherry-picked from commit 345cc1cbcbce2bab00abc2b88338d7d89c702d6b)
+Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
+---
+ hw/virtio/vhost.c | 84 ++++++++++++++++++++++++++++++++++++-----------
+ 1 file changed, 64 insertions(+), 20 deletions(-)
+
+diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
+index 7fb008bc9e..fdcd1a8fdf 100644
+--- a/hw/virtio/vhost.c
++++ b/hw/virtio/vhost.c
+@@ -20,6 +20,7 @@
+ #include "qemu/range.h"
+ #include "qemu/error-report.h"
+ #include "qemu/memfd.h"
++#include "qemu/log.h"
+ #include "standard-headers/linux/vhost_types.h"
+ #include "hw/virtio/virtio-bus.h"
+ #include "hw/virtio/virtio-access.h"
+@@ -106,6 +107,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev,
+     }
+ }
+ 
++static bool vhost_dev_has_iommu(struct vhost_dev *dev)
++{
++    VirtIODevice *vdev = dev->vdev;
++
++    /*
++     * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
++     * incremental memory mapping API via IOTLB API. For platform that
++     * does not have IOMMU, there's no need to enable this feature
++     * which may cause unnecessary IOTLB miss/update transactions.
++     */
++    if (vdev) {
++        return virtio_bus_device_iommu_enabled(vdev) &&
++            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
++    } else {
++        return false;
++    }
++}
++
+ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
+                                    MemoryRegionSection *section,
+                                    hwaddr first,
+@@ -137,8 +156,51 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev,
+             continue;
+         }
+ 
+-        vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys,
+-                              range_get_last(vq->used_phys, vq->used_size));
++        if (vhost_dev_has_iommu(dev)) {
++            IOMMUTLBEntry iotlb;
++            hwaddr used_phys = vq->used_phys, used_size = vq->used_size;
++            hwaddr phys, s, offset;
++
++            while (used_size) {
++                rcu_read_lock();
++                iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as,
++                                                      used_phys,
++                                                      true,
++                                                      MEMTXATTRS_UNSPECIFIED);
++                rcu_read_unlock();
++
++                if (!iotlb.target_as) {
++                    qemu_log_mask(LOG_GUEST_ERROR, "translation "
++                                  "failure for used_iova %"PRIx64"\n",
++                                  used_phys);
++                    return -EINVAL;
++                }
++
++                offset = used_phys & iotlb.addr_mask;
++                phys = iotlb.translated_addr + offset;
++
++                /*
++                 * Distance from start of used ring until last byte of
++                 * IOMMU page.
++                 */
++                s = iotlb.addr_mask - offset;
++                /*
++                 * Size of used ring, or of the part of it until end
++                 * of IOMMU page. To avoid zero result, do the adding
++                 * outside of MIN().
++                 */
++                s = MIN(s, used_size - 1) + 1;
++
++                vhost_dev_sync_region(dev, section, start_addr, end_addr, phys,
++                                      range_get_last(phys, s));
++                used_size -= s;
++                used_phys += s;
++            }
++        } else {
++            vhost_dev_sync_region(dev, section, start_addr,
++                                  end_addr, vq->used_phys,
++                                  range_get_last(vq->used_phys, vq->used_size));
++        }
+     }
+     return 0;
+ }
+@@ -306,24 +368,6 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
+     dev->log_size = size;
+ }
+ 
+-static bool vhost_dev_has_iommu(struct vhost_dev *dev)
+-{
+-    VirtIODevice *vdev = dev->vdev;
+-
+-    /*
+-     * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support
+-     * incremental memory mapping API via IOTLB API. For platform that
+-     * does not have IOMMU, there's no need to enable this feature
+-     * which may cause unnecessary IOTLB miss/update transactions.
+-     */
+-    if (vdev) {
+-        return virtio_bus_device_iommu_enabled(vdev) &&
+-            virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM);
+-    } else {
+-        return false;
+-    }
+-}
+-
+ static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr,
+                               hwaddr *plen, bool is_write)
+ {
diff --git a/debian/patches/series b/debian/patches/series
index 191ba29..dc3bebd 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -1,5 +1,8 @@
 extra/0001-monitor-qmp-fix-race-with-clients-disconnecting-earl.patch
 extra/0002-init-daemonize-defuse-PID-file-resolve-error.patch
+extra/0003-virtio-mem-Fix-the-bitmap-index-of-the-section-offse.patch
+extra/0004-virtio-mem-Fix-the-iterator-variable-in-a-vmem-rdl_l.patch
+extra/0005-vhost-fix-vq-dirty-bitmap-syncing-when-vIOMMU-is-ena.patch
 bitmap-mirror/0001-drive-mirror-add-support-for-sync-bitmap-mode-never.patch
 bitmap-mirror/0002-drive-mirror-add-support-for-conditional-and-always-.patch
 bitmap-mirror/0003-mirror-add-check-for-bitmap-mode-without-bitmap.patch
-- 
2.30.2






More information about the pve-devel mailing list