Fix qemu_wait_io_event processing in io-thread mode
When checking for I/O events in the tcg CPU loop, make sure that we call qemu_wait_io_event_common for all CPUs, not only the current one. Otherwise pause_all_vcpus may lock up or run_on_cpu requests may starve. Rename qemu_wait_io_event to qemu_tcg_wait_io_event at this chance and purge its argument list as it has no use for it. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Acked-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
This commit is contained in:
		
							parent
							
								
									c629a4bc97
								
							
						
					
					
						commit
						6cabe1f303
					
				
							
								
								
									
										11
									
								
								cpus.c
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								cpus.c
									
									
									
									
									
								
							@ -403,10 +403,12 @@ static void qemu_wait_io_event_common(CPUState *env)
 | 
				
			|||||||
    flush_queued_work(env);
 | 
					    flush_queued_work(env);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void qemu_wait_io_event(CPUState *env)
 | 
					static void qemu_tcg_wait_io_event(void)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    CPUState *env;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    while (!tcg_has_work())
 | 
					    while (!tcg_has_work())
 | 
				
			||||||
        qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
 | 
					        qemu_cond_timedwait(tcg_halt_cond, &qemu_global_mutex, 1000);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    qemu_mutex_unlock(&qemu_global_mutex);
 | 
					    qemu_mutex_unlock(&qemu_global_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -419,7 +421,10 @@ static void qemu_wait_io_event(CPUState *env)
 | 
				
			|||||||
    qemu_mutex_unlock(&qemu_fair_mutex);
 | 
					    qemu_mutex_unlock(&qemu_fair_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    qemu_mutex_lock(&qemu_global_mutex);
 | 
					    qemu_mutex_lock(&qemu_global_mutex);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    for (env = first_cpu; env != NULL; env = env->next_cpu) {
 | 
				
			||||||
        qemu_wait_io_event_common(env);
 | 
					        qemu_wait_io_event_common(env);
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void qemu_kvm_eat_signal(CPUState *env, int timeout)
 | 
					static void qemu_kvm_eat_signal(CPUState *env, int timeout)
 | 
				
			||||||
@ -504,7 +509,7 @@ static void *tcg_cpu_thread_fn(void *arg)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    while (1) {
 | 
					    while (1) {
 | 
				
			||||||
        tcg_cpu_exec();
 | 
					        tcg_cpu_exec();
 | 
				
			||||||
        qemu_wait_io_event(cur_cpu);
 | 
					        qemu_tcg_wait_io_event();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    return NULL;
 | 
					    return NULL;
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user