[pve-devel] [PATCH qemu-server 4/4] cleanup: error messages
Wolfgang Bumiller
w.bumiller at proxmox.com
Thu Jan 5 10:09:29 CET 2017
---
PVE/API2/Qemu.pm | 4 ++--
PVE/QemuMigrate.pm | 2 +-
PVE/QemuServer.pm | 16 ++++++++--------
3 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index e48bf6d..288a9cd 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -1643,7 +1643,7 @@ __PACKAGE__->register_method({
},
machine => get_standard_option('pve-qm-machine'),
targetstorage => {
- description => "Target migration storage . (1 = same storeid than original)",
+ description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
type => 'string',
optional => 1
}
@@ -2753,7 +2753,7 @@ __PACKAGE__->register_method({
my $vmid = extract_param($param, 'vmid');
- raise_param_exc({ targetstorage => "Live Storage migration can only be done online" })
+ raise_param_exc({ targetstorage => "Live storage migration can only be done online." })
if !$param->{online} && $param->{targetstorage};
raise_param_exc({ force => "Only root may use this option." })
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index ed1bef6..a5de8d0 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -596,7 +596,7 @@ sub phase2 {
$self->{storage_migration_jobs} = {};
$self->log('info', "starting storage migration");
- die "the number of destination local disk is not equal to number of source local disk"
+ die "The number of local disks does not match between the source and the destination.\n"
if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
foreach my $drive (keys %{$self->{target_drive}}){
my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index c2fa20b..3c9d30a 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5923,7 +5923,7 @@ sub qemu_drive_mirror {
my $pid = fork();
if (!defined($pid)) {
- die "forking socat tunnel failed";
+ die "forking socat tunnel failed\n";
} elsif ($pid == 0) {
exec(@$cmd);
warn "exec failed: $!\n";
@@ -5992,7 +5992,7 @@ sub qemu_drive_mirror_monitor {
next;
}
- die "$job: mirroring has been cancelled. Maybe do you have bad sectors?" if !defined($running_mirror_jobs->{$job});
+ die "$job: mirroring has been cancelled\n" if !defined($running_mirror_jobs->{$job});
my $busy = $running_mirror_jobs->{$job}->{busy};
my $ready = $running_mirror_jobs->{$job}->{ready};
@@ -6038,14 +6038,14 @@ sub qemu_drive_mirror_monitor {
foreach my $job (keys %$jobs) {
# try to switch the disk if source and destination are on the same guest
- print "$job : Try to complete block job\n";
+ print "$job: Completing block job...\n";
eval { vm_mon_cmd($vmid, "block-job-complete", device => $job) };
if ($@ =~ m/cannot be completed/) {
- print "$job : block job cannot be complete. Try again \n";
+ print "$job: Block job cannot be completed, try again.\n";
$err_complete++;
}else {
- print "$job : complete ok : flushing pending writes\n";
+ print "$job: Completed successfully.\n";
$jobs->{$job}->{complete} = 1;
eval { qemu_blockjobs_finish_tunnel($vmid, $job, $jobs->{$job}->{pid}) } ;
}
@@ -6068,7 +6068,7 @@ sub qemu_blockjobs_cancel {
my ($vmid, $jobs) = @_;
foreach my $job (keys %$jobs) {
- print "$job: try to cancel block job\n";
+ print "$job: Cancelling block job\n";
eval { vm_mon_cmd($vmid, "block-job-cancel", device => $job); };
$jobs->{$job}->{cancel} = 1;
}
@@ -6083,8 +6083,8 @@ sub qemu_blockjobs_cancel {
foreach my $job (keys %$jobs) {
- if(defined($jobs->{$job}->{cancel}) && !defined($running_jobs->{$job})) {
- print "$job : finished\n";
+ if (defined($jobs->{$job}->{cancel}) && !defined($running_jobs->{$job})) {
+ print "$job: Done.\n";
eval { qemu_blockjobs_finish_tunnel($vmid, $job, $jobs->{$job}->{pid}) } ;
delete $jobs->{$job};
}
--
2.1.4
More information about the pve-devel
mailing list