migration: extend migration_bitmap
Prevously, if we hotplug a device(e.g. device_add e1000) during migration is processing in source side, qemu will add a new ram block but migration_bitmap is not extended. In this case, migration_bitmap will overflow and lead qemu abort unexpectedly. Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
This commit is contained in:
		
							parent
							
								
									2ff64038a5
								
							
						
					
					
						commit
						dd63169766
					
				
							
								
								
									
										5
									
								
								exec.c
									
									
									
									
									
								
							
							
						
						
									
										5
									
								
								exec.c
									
									
									
									
									
								
							@ -1414,6 +1414,11 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    new_ram_size = MAX(old_ram_size,
 | 
				
			||||||
 | 
					              (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS);
 | 
				
			||||||
 | 
					    if (new_ram_size > old_ram_size) {
 | 
				
			||||||
 | 
					        migration_bitmap_extend(old_ram_size, new_ram_size);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
 | 
					    /* Keep the list sorted from biggest to smallest block.  Unlike QTAILQ,
 | 
				
			||||||
     * QLIST (which has an RCU-friendly variant) does not have insertion at
 | 
					     * QLIST (which has an RCU-friendly variant) does not have insertion at
 | 
				
			||||||
     * tail, so save the last element in last_block.
 | 
					     * tail, so save the last element in last_block.
 | 
				
			||||||
 | 
				
			|||||||
@ -365,4 +365,7 @@ static inline bool cpu_can_do_io(CPUState *cpu)
 | 
				
			|||||||
    return cpu->can_do_io != 0;
 | 
					    return cpu->can_do_io != 0;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					#if !defined(CONFIG_USER_ONLY)
 | 
				
			||||||
 | 
					void migration_bitmap_extend(ram_addr_t old, ram_addr_t new);
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
				
			|||||||
@ -222,6 +222,7 @@ static RAMBlock *last_seen_block;
 | 
				
			|||||||
static RAMBlock *last_sent_block;
 | 
					static RAMBlock *last_sent_block;
 | 
				
			||||||
static ram_addr_t last_offset;
 | 
					static ram_addr_t last_offset;
 | 
				
			||||||
static unsigned long *migration_bitmap;
 | 
					static unsigned long *migration_bitmap;
 | 
				
			||||||
 | 
					static QemuMutex migration_bitmap_mutex;
 | 
				
			||||||
static uint64_t migration_dirty_pages;
 | 
					static uint64_t migration_dirty_pages;
 | 
				
			||||||
static uint32_t last_version;
 | 
					static uint32_t last_version;
 | 
				
			||||||
static bool ram_bulk_stage;
 | 
					static bool ram_bulk_stage;
 | 
				
			||||||
@ -569,11 +570,13 @@ static void migration_bitmap_sync(void)
 | 
				
			|||||||
    trace_migration_bitmap_sync_start();
 | 
					    trace_migration_bitmap_sync_start();
 | 
				
			||||||
    address_space_sync_dirty_bitmap(&address_space_memory);
 | 
					    address_space_sync_dirty_bitmap(&address_space_memory);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    qemu_mutex_lock(&migration_bitmap_mutex);
 | 
				
			||||||
    rcu_read_lock();
 | 
					    rcu_read_lock();
 | 
				
			||||||
    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
 | 
					    QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
 | 
				
			||||||
        migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
 | 
					        migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    rcu_read_unlock();
 | 
					    rcu_read_unlock();
 | 
				
			||||||
 | 
					    qemu_mutex_unlock(&migration_bitmap_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    trace_migration_bitmap_sync_end(migration_dirty_pages
 | 
					    trace_migration_bitmap_sync_end(migration_dirty_pages
 | 
				
			||||||
                                    - num_dirty_pages_init);
 | 
					                                    - num_dirty_pages_init);
 | 
				
			||||||
@ -1062,6 +1065,30 @@ static void reset_ram_globals(void)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
#define MAX_WAIT 50 /* ms, half buffered_file limit */
 | 
					#define MAX_WAIT 50 /* ms, half buffered_file limit */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    /* called in qemu main thread, so there is
 | 
				
			||||||
 | 
					     * no writing race against this migration_bitmap
 | 
				
			||||||
 | 
					     */
 | 
				
			||||||
 | 
					    if (migration_bitmap) {
 | 
				
			||||||
 | 
					        unsigned long *old_bitmap = migration_bitmap, *bitmap;
 | 
				
			||||||
 | 
					        bitmap = bitmap_new(new);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        /* prevent migration_bitmap content from being set bit
 | 
				
			||||||
 | 
					         * by migration_bitmap_sync_range() at the same time.
 | 
				
			||||||
 | 
					         * it is safe to migration if migration_bitmap is cleared bit
 | 
				
			||||||
 | 
					         * at the same time.
 | 
				
			||||||
 | 
					         */
 | 
				
			||||||
 | 
					        qemu_mutex_lock(&migration_bitmap_mutex);
 | 
				
			||||||
 | 
					        bitmap_copy(bitmap, old_bitmap, old);
 | 
				
			||||||
 | 
					        bitmap_set(bitmap, old, new - old);
 | 
				
			||||||
 | 
					        atomic_rcu_set(&migration_bitmap, bitmap);
 | 
				
			||||||
 | 
					        qemu_mutex_unlock(&migration_bitmap_mutex);
 | 
				
			||||||
 | 
					        migration_dirty_pages += new - old;
 | 
				
			||||||
 | 
					        synchronize_rcu();
 | 
				
			||||||
 | 
					        g_free(old_bitmap);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
 | 
					/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
 | 
				
			||||||
 * long-running RCU critical section.  When rcu-reclaims in the code
 | 
					 * long-running RCU critical section.  When rcu-reclaims in the code
 | 
				
			||||||
@ -1078,6 +1105,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
 | 
				
			|||||||
    dirty_rate_high_cnt = 0;
 | 
					    dirty_rate_high_cnt = 0;
 | 
				
			||||||
    bitmap_sync_count = 0;
 | 
					    bitmap_sync_count = 0;
 | 
				
			||||||
    migration_bitmap_sync_init();
 | 
					    migration_bitmap_sync_init();
 | 
				
			||||||
 | 
					    qemu_mutex_init(&migration_bitmap_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if (migrate_use_xbzrle()) {
 | 
					    if (migrate_use_xbzrle()) {
 | 
				
			||||||
        XBZRLE_cache_lock();
 | 
					        XBZRLE_cache_lock();
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user