[pve-devel] [PATCH qemu-server 08/22] drive: move storage_allows_io_uring_default() and drive_uses_cache_direct() helpers to drive module
Fiona Ebner
f.ebner at proxmox.com
Thu Jun 12 16:02:39 CEST 2025
Suggested-by: Alexandre Derumier <alexandre.derumier at groupe-cyllene.com>
Signed-off-by: Fiona Ebner <f.ebner at proxmox.com>
---
PVE/QemuServer.pm | 46 +++++++++++------------------------------
PVE/QemuServer/Drive.pm | 33 +++++++++++++++++++++++++++++
2 files changed, 45 insertions(+), 34 deletions(-)
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 616afd7c..24b791e8 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -55,7 +55,16 @@ use PVE::QemuServer::Helpers qw(config_aware_timeout min_version kvm_user_versio
use PVE::QemuServer::Cloudinit;
use PVE::QemuServer::CGroup;
use PVE::QemuServer::CPUConfig qw(print_cpu_device get_cpu_options get_cpu_bitness is_native_arch get_amd_sev_object get_amd_sev_type);
-use PVE::QemuServer::Drive qw(is_valid_drivename checked_volume_format drive_is_cloudinit drive_is_cdrom drive_is_read_only parse_drive print_drive);
+use PVE::QemuServer::Drive qw(
+is_valid_drivename
+checked_volume_format
+drive_is_cloudinit
+drive_is_cdrom
+drive_is_read_only
+parse_drive
+print_drive
+storage_allows_io_uring_default
+);
use PVE::QemuServer::Machine;
use PVE::QemuServer::Memory qw(get_current_memory);
use PVE::QemuServer::MetaInfo;
@@ -1411,37 +1420,6 @@ sub get_initiator_name {
return $initiator;
}
-my sub storage_allows_io_uring_default {
- my ($scfg, $cache_direct) = @_;
-
- # io_uring with cache mode writeback or writethrough on krbd will hang...
- return if $scfg && $scfg->{type} eq 'rbd' && $scfg->{krbd} && !$cache_direct;
-
- # io_uring with cache mode writeback or writethrough on LVM will hang, without cache only
- # sometimes, just plain disable...
- return if $scfg && $scfg->{type} eq 'lvm';
-
- # io_uring causes problems when used with CIFS since kernel 5.15
- # Some discussion: https://www.spinics.net/lists/linux-cifs/msg26734.html
- return if $scfg && $scfg->{type} eq 'cifs';
-
- return 1;
-}
-
-my sub drive_uses_cache_direct {
- my ($drive, $scfg) = @_;
-
- my $cache_direct = 0;
-
- if (my $cache = $drive->{cache}) {
- $cache_direct = $cache =~ /^(?:off|none|directsync)$/;
- } elsif (!drive_is_cdrom($drive) && !($scfg && $scfg->{type} eq 'btrfs' && !$scfg->{nocow})) {
- $cache_direct = 1;
- }
-
- return $cache_direct;
-}
-
sub print_drive_commandline_full {
my ($storecfg, $vmid, $drive, $live_restore_name) = @_;
@@ -1503,7 +1481,7 @@ sub print_drive_commandline_full {
$opts .= ",format=$format";
}
- my $cache_direct = drive_uses_cache_direct($drive, $scfg);
+ my $cache_direct = PVE::QemuServer::Drive::drive_uses_cache_direct($drive, $scfg);
$opts .= ",cache=none" if !$drive->{cache} && $cache_direct;
@@ -8415,7 +8393,7 @@ my sub clone_disk_check_io_uring {
my $src_scfg = PVE::Storage::storage_config($storecfg, $src_storeid);
my $dst_scfg = PVE::Storage::storage_config($storecfg, $dst_storeid);
- my $cache_direct = drive_uses_cache_direct($src_drive, $src_scfg);
+ my $cache_direct = PVE::QemuServer::Drive::drive_uses_cache_direct($src_drive, $src_scfg);
my $src_uses_io_uring;
if ($src_drive->{aio}) {
diff --git a/PVE/QemuServer/Drive.pm b/PVE/QemuServer/Drive.pm
index c7fd29e3..7caa5502 100644
--- a/PVE/QemuServer/Drive.pm
+++ b/PVE/QemuServer/Drive.pm
@@ -24,6 +24,7 @@ drive_is_read_only
get_scsi_devicetype
parse_drive
print_drive
+storage_allows_io_uring_default
);
our $QEMU_FORMAT_RE = qr/raw|qcow|qcow2|qed|vmdk|cloop/;
@@ -983,4 +984,36 @@ sub get_scsi_device_type {
return $devicetype;
}
+
+sub storage_allows_io_uring_default {
+ my ($scfg, $cache_direct) = @_;
+
+ # io_uring with cache mode writeback or writethrough on krbd will hang...
+ return if $scfg && $scfg->{type} eq 'rbd' && $scfg->{krbd} && !$cache_direct;
+
+ # io_uring with cache mode writeback or writethrough on LVM will hang, without cache only
+ # sometimes, just plain disable...
+ return if $scfg && $scfg->{type} eq 'lvm';
+
+ # io_uring causes problems when used with CIFS since kernel 5.15
+ # Some discussion: https://www.spinics.net/lists/linux-cifs/msg26734.html
+ return if $scfg && $scfg->{type} eq 'cifs';
+
+ return 1;
+}
+
+sub drive_uses_cache_direct {
+ my ($drive, $scfg) = @_;
+
+ my $cache_direct = 0;
+
+ if (my $cache = $drive->{cache}) {
+ $cache_direct = $cache =~ /^(?:off|none|directsync)$/;
+ } elsif (!drive_is_cdrom($drive) && !($scfg && $scfg->{type} eq 'btrfs' && !$scfg->{nocow})) {
+ $cache_direct = 1;
+ }
+
+ return $cache_direct;
+}
+
1;
--
2.39.5
More information about the pve-devel
mailing list