migration: split common postcopy out of ram postcopy
Split common postcopy staff from ram postcopy staff. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
		
							parent
							
								
									86e1167e9a
								
							
						
					
					
						commit
						58110f0acb
					
				| @ -1443,6 +1443,11 @@ bool migrate_postcopy_ram(void) | |||||||
|     return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; |     return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM]; | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
|  | bool migrate_postcopy(void) | ||||||
|  | { | ||||||
|  |     return migrate_postcopy_ram(); | ||||||
|  | } | ||||||
|  | 
 | ||||||
| bool migrate_auto_converge(void) | bool migrate_auto_converge(void) | ||||||
| { | { | ||||||
|     MigrationState *s; |     MigrationState *s; | ||||||
| @ -1826,9 +1831,11 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) | |||||||
|      * need to tell the destination to throw any pages it's already received |      * need to tell the destination to throw any pages it's already received | ||||||
|      * that are dirty |      * that are dirty | ||||||
|      */ |      */ | ||||||
|     if (ram_postcopy_send_discard_bitmap(ms)) { |     if (migrate_postcopy_ram()) { | ||||||
|         error_report("postcopy send discard bitmap failed"); |         if (ram_postcopy_send_discard_bitmap(ms)) { | ||||||
|         goto fail; |             error_report("postcopy send discard bitmap failed"); | ||||||
|  |             goto fail; | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /*
 |     /*
 | ||||||
| @ -1837,8 +1844,10 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) | |||||||
|      * wrap their state up here |      * wrap their state up here | ||||||
|      */ |      */ | ||||||
|     qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); |     qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX); | ||||||
|     /* Ping just for debugging, helps line traces up */ |     if (migrate_postcopy_ram()) { | ||||||
|     qemu_savevm_send_ping(ms->to_dst_file, 2); |         /* Ping just for debugging, helps line traces up */ | ||||||
|  |         qemu_savevm_send_ping(ms->to_dst_file, 2); | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     /*
 |     /*
 | ||||||
|      * While loading the device state we may trigger page transfer |      * While loading the device state we may trigger page transfer | ||||||
| @ -1863,7 +1872,9 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) | |||||||
|     qemu_savevm_send_postcopy_listen(fb); |     qemu_savevm_send_postcopy_listen(fb); | ||||||
| 
 | 
 | ||||||
|     qemu_savevm_state_complete_precopy(fb, false, false); |     qemu_savevm_state_complete_precopy(fb, false, false); | ||||||
|     qemu_savevm_send_ping(fb, 3); |     if (migrate_postcopy_ram()) { | ||||||
|  |         qemu_savevm_send_ping(fb, 3); | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     qemu_savevm_send_postcopy_run(fb); |     qemu_savevm_send_postcopy_run(fb); | ||||||
| 
 | 
 | ||||||
| @ -1898,11 +1909,13 @@ static int postcopy_start(MigrationState *ms, bool *old_vm_running) | |||||||
| 
 | 
 | ||||||
|     qemu_mutex_unlock_iothread(); |     qemu_mutex_unlock_iothread(); | ||||||
| 
 | 
 | ||||||
|     /*
 |     if (migrate_postcopy_ram()) { | ||||||
|      * Although this ping is just for debug, it could potentially be |         /*
 | ||||||
|      * used for getting a better measurement of downtime at the source. |          * Although this ping is just for debug, it could potentially be | ||||||
|      */ |          * used for getting a better measurement of downtime at the source. | ||||||
|     qemu_savevm_send_ping(ms->to_dst_file, 4); |          */ | ||||||
|  |         qemu_savevm_send_ping(ms->to_dst_file, 4); | ||||||
|  |     } | ||||||
| 
 | 
 | ||||||
|     if (migrate_release_ram()) { |     if (migrate_release_ram()) { | ||||||
|         ram_postcopy_migrated_memory_release(ms); |         ram_postcopy_migrated_memory_release(ms); | ||||||
| @ -2080,7 +2093,7 @@ static void *migration_thread(void *opaque) | |||||||
|         qemu_savevm_send_ping(s->to_dst_file, 1); |         qemu_savevm_send_ping(s->to_dst_file, 1); | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (migrate_postcopy_ram()) { |     if (migrate_postcopy()) { | ||||||
|         /*
 |         /*
 | ||||||
|          * Tell the destination that we *might* want to do postcopy later; |          * Tell the destination that we *might* want to do postcopy later; | ||||||
|          * if the other end can't do postcopy it should fail now, nice and |          * if the other end can't do postcopy it should fail now, nice and | ||||||
| @ -2113,7 +2126,7 @@ static void *migration_thread(void *opaque) | |||||||
|             if (pending_size && pending_size >= threshold_size) { |             if (pending_size && pending_size >= threshold_size) { | ||||||
|                 /* Still a significant amount to transfer */ |                 /* Still a significant amount to transfer */ | ||||||
| 
 | 
 | ||||||
|                 if (migrate_postcopy_ram() && |                 if (migrate_postcopy() && | ||||||
|                     s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && |                     s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && | ||||||
|                     pend_nonpost <= threshold_size && |                     pend_nonpost <= threshold_size && | ||||||
|                     atomic_read(&s->start_postcopy)) { |                     atomic_read(&s->start_postcopy)) { | ||||||
|  | |||||||
| @ -169,6 +169,8 @@ bool migration_is_blocked(Error **errp); | |||||||
| bool migration_in_postcopy(void); | bool migration_in_postcopy(void); | ||||||
| MigrationState *migrate_get_current(void); | MigrationState *migrate_get_current(void); | ||||||
| 
 | 
 | ||||||
|  | bool migrate_postcopy(void); | ||||||
|  | 
 | ||||||
| bool migrate_release_ram(void); | bool migrate_release_ram(void); | ||||||
| bool migrate_postcopy_ram(void); | bool migrate_postcopy_ram(void); | ||||||
| bool migrate_zero_blocks(void); | bool migrate_zero_blocks(void); | ||||||
|  | |||||||
| @ -89,7 +89,7 @@ static struct mig_cmd_args { | |||||||
|     [MIG_CMD_INVALID]          = { .len = -1, .name = "INVALID" }, |     [MIG_CMD_INVALID]          = { .len = -1, .name = "INVALID" }, | ||||||
|     [MIG_CMD_OPEN_RETURN_PATH] = { .len =  0, .name = "OPEN_RETURN_PATH" }, |     [MIG_CMD_OPEN_RETURN_PATH] = { .len =  0, .name = "OPEN_RETURN_PATH" }, | ||||||
|     [MIG_CMD_PING]             = { .len = sizeof(uint32_t), .name = "PING" }, |     [MIG_CMD_PING]             = { .len = sizeof(uint32_t), .name = "PING" }, | ||||||
|     [MIG_CMD_POSTCOPY_ADVISE]  = { .len = 16, .name = "POSTCOPY_ADVISE" }, |     [MIG_CMD_POSTCOPY_ADVISE]  = { .len = -1, .name = "POSTCOPY_ADVISE" }, | ||||||
|     [MIG_CMD_POSTCOPY_LISTEN]  = { .len =  0, .name = "POSTCOPY_LISTEN" }, |     [MIG_CMD_POSTCOPY_LISTEN]  = { .len =  0, .name = "POSTCOPY_LISTEN" }, | ||||||
|     [MIG_CMD_POSTCOPY_RUN]     = { .len =  0, .name = "POSTCOPY_RUN" }, |     [MIG_CMD_POSTCOPY_RUN]     = { .len =  0, .name = "POSTCOPY_RUN" }, | ||||||
|     [MIG_CMD_POSTCOPY_RAM_DISCARD] = { |     [MIG_CMD_POSTCOPY_RAM_DISCARD] = { | ||||||
| @ -98,6 +98,23 @@ static struct mig_cmd_args { | |||||||
|     [MIG_CMD_MAX]              = { .len = -1, .name = "MAX" }, |     [MIG_CMD_MAX]              = { .len = -1, .name = "MAX" }, | ||||||
| }; | }; | ||||||
| 
 | 
 | ||||||
|  | /* Note for MIG_CMD_POSTCOPY_ADVISE:
 | ||||||
|  |  * The format of arguments is depending on postcopy mode: | ||||||
|  |  * - postcopy RAM only | ||||||
|  |  *   uint64_t host page size | ||||||
|  |  *   uint64_t taget page size | ||||||
|  |  * | ||||||
|  |  * - postcopy RAM and postcopy dirty bitmaps | ||||||
|  |  *   format is the same as for postcopy RAM only | ||||||
|  |  * | ||||||
|  |  * - postcopy dirty bitmaps only | ||||||
|  |  *   Nothing. Command length field is 0. | ||||||
|  |  * | ||||||
|  |  * Be careful: adding a new postcopy entity with some other parameters should | ||||||
|  |  * not break format self-description ability. Good way is to introduce some | ||||||
|  |  * generic extendable format with an exception for two old entities. | ||||||
|  |  */ | ||||||
|  | 
 | ||||||
| static int announce_self_create(uint8_t *buf, | static int announce_self_create(uint8_t *buf, | ||||||
|                                 uint8_t *mac_addr) |                                 uint8_t *mac_addr) | ||||||
| { | { | ||||||
| @ -861,12 +878,17 @@ int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len) | |||||||
| /* Send prior to any postcopy transfer */ | /* Send prior to any postcopy transfer */ | ||||||
| void qemu_savevm_send_postcopy_advise(QEMUFile *f) | void qemu_savevm_send_postcopy_advise(QEMUFile *f) | ||||||
| { | { | ||||||
|     uint64_t tmp[2]; |     if (migrate_postcopy_ram()) { | ||||||
|     tmp[0] = cpu_to_be64(ram_pagesize_summary()); |         uint64_t tmp[2]; | ||||||
|     tmp[1] = cpu_to_be64(qemu_target_page_size()); |         tmp[0] = cpu_to_be64(ram_pagesize_summary()); | ||||||
|  |         tmp[1] = cpu_to_be64(qemu_target_page_size()); | ||||||
| 
 | 
 | ||||||
|     trace_qemu_savevm_send_postcopy_advise(); |         trace_qemu_savevm_send_postcopy_advise(); | ||||||
|     qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp); |         qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, | ||||||
|  |                                  16, (uint8_t *)tmp); | ||||||
|  |     } else { | ||||||
|  |         qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL); | ||||||
|  |     } | ||||||
| } | } | ||||||
| 
 | 
 | ||||||
| /* Sent prior to starting the destination running in postcopy, discard pages
 | /* Sent prior to starting the destination running in postcopy, discard pages
 | ||||||
| @ -1354,6 +1376,10 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis) | |||||||
|         return -1; |         return -1; | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|  |     if (!migrate_postcopy_ram()) { | ||||||
|  |         return 0; | ||||||
|  |     } | ||||||
|  | 
 | ||||||
|     if (!postcopy_ram_supported_by_host()) { |     if (!postcopy_ram_supported_by_host()) { | ||||||
|         postcopy_state_set(POSTCOPY_INCOMING_NONE); |         postcopy_state_set(POSTCOPY_INCOMING_NONE); | ||||||
|         return -1; |         return -1; | ||||||
| @ -1564,7 +1590,9 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) | |||||||
|          * A rare case, we entered listen without having to do any discards, |          * A rare case, we entered listen without having to do any discards, | ||||||
|          * so do the setup that's normally done at the time of the 1st discard. |          * so do the setup that's normally done at the time of the 1st discard. | ||||||
|          */ |          */ | ||||||
|         postcopy_ram_prepare_discard(mis); |         if (migrate_postcopy_ram()) { | ||||||
|  |             postcopy_ram_prepare_discard(mis); | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     /*
 |     /*
 | ||||||
| @ -1572,8 +1600,10 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) | |||||||
|      * However, at this point the CPU shouldn't be running, and the IO |      * However, at this point the CPU shouldn't be running, and the IO | ||||||
|      * shouldn't be doing anything yet so don't actually expect requests |      * shouldn't be doing anything yet so don't actually expect requests | ||||||
|      */ |      */ | ||||||
|     if (postcopy_ram_enable_notify(mis)) { |     if (migrate_postcopy_ram()) { | ||||||
|         return -1; |         if (postcopy_ram_enable_notify(mis)) { | ||||||
|  |             return -1; | ||||||
|  |         } | ||||||
|     } |     } | ||||||
| 
 | 
 | ||||||
|     if (mis->have_listen_thread) { |     if (mis->have_listen_thread) { | ||||||
|  | |||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Vladimir Sementsov-Ogievskiy
						Vladimir Sementsov-Ogievskiy