[pve-devel] [PATCH qemu 4/4] savevm-async: add debug timing prints

Stefan Reiter s.reiter at proxmox.com
Wed May 27 11:33:22 CEST 2020


Signed-off-by: Stefan Reiter <s.reiter at proxmox.com>
---

Doesn't have to be applied, but I thought I'd send it along anyway, since it
helped me test patch 3 greatly.

 savevm-async.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/savevm-async.c b/savevm-async.c
index 4ce83a0691..8848884593 100644
--- a/savevm-async.c
+++ b/savevm-async.c
@@ -202,6 +202,8 @@ static void process_savevm_finalize(void *opaque)
     AioContext *iohandler_ctx = iohandler_get_aio_context();
     MigrationState *ms = migrate_get_current();
 
+    int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
     qemu_bh_delete(snap_state.finalize_bh);
     snap_state.finalize_bh = NULL;
     snap_state.co = NULL;
@@ -226,6 +228,8 @@ static void process_savevm_finalize(void *opaque)
     }
 
     DPRINTF("state saving complete\n");
+    DPRINTF("timing: process_savevm_finalize (state saving) took %ld ms\n",
+        qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
 
     /* clear migration state */
     migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP,
@@ -247,6 +251,9 @@ static void process_savevm_finalize(void *opaque)
         vm_start();
         snap_state.saved_vm_running = false;
     }
+
+    DPRINTF("timing: process_savevm_finalize (full) took %ld ms\n",
+        qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
 }
 
 static void coroutine_fn process_savevm_co(void *opaque)
@@ -256,6 +263,8 @@ static void coroutine_fn process_savevm_co(void *opaque)
     BdrvNextIterator it;
     BlockDriverState *bs = NULL;
 
+    int64_t start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+
     ret = qemu_file_get_error(snap_state.file);
     if (ret < 0) {
         save_snapshot_error("qemu_savevm_state_setup failed");
@@ -290,11 +299,15 @@ static void coroutine_fn process_savevm_co(void *opaque)
         }
     }
 
+    DPRINTF("timing: process_savevm_co took %ld ms\n",
+        qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time);
+
     /* If a drive runs in an IOThread we can flush it async, and only
      * need to sync-flush whatever IO happens between now and
      * vm_stop_force_state. bdrv_next can only be called from main AioContext,
      * so move there now and after every flush.
      */
+    int64_t start_time_flush = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
     aio_co_reschedule_self(qemu_get_aio_context());
     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
         /* target has BDRV_O_NO_FLUSH, no sense calling bdrv_flush on it */
@@ -311,6 +324,9 @@ static void coroutine_fn process_savevm_co(void *opaque)
         }
     }
 
+    DPRINTF("timing: async flushing took %ld ms\n",
+        qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - start_time_flush);
+
     qemu_bh_schedule(snap_state.finalize_bh);
 }
 
-- 
2.20.1





More information about the pve-devel mailing list