tcg: Prepare safe access to tb_flushed out of tb_lock
Ensure atomicity and ordering of CPU's 'tb_flushed' access for future translation block lookup out of 'tb_lock'. This field can only be touched from another thread by tb_flush() in user mode emulation. So the only access to be sequential atomic is: * a single write in tb_flush(); * reads/writes out of 'tb_lock'. In future, before enabling MTTCG in system mode, tb_flush() must be safe and this field becomes unnecessary. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <20160715175852.30749-5-sergey.fedorov@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
		
							parent
							
								
									89a16b1e42
								
							
						
					
					
						commit
						118b07308a
					
				
							
								
								
									
										14
									
								
								cpu-exec.c
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								cpu-exec.c
									
									
									
									
									
								
							@ -338,13 +338,6 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
 | 
				
			|||||||
                 tb->flags != flags)) {
 | 
					                 tb->flags != flags)) {
 | 
				
			||||||
        tb = tb_find_slow(cpu, pc, cs_base, flags);
 | 
					        tb = tb_find_slow(cpu, pc, cs_base, flags);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    if (cpu->tb_flushed) {
 | 
					 | 
				
			||||||
        /* Ensure that no TB jump will be modified as the
 | 
					 | 
				
			||||||
         * translation buffer has been flushed.
 | 
					 | 
				
			||||||
         */
 | 
					 | 
				
			||||||
        last_tb = NULL;
 | 
					 | 
				
			||||||
        cpu->tb_flushed = false;
 | 
					 | 
				
			||||||
    }
 | 
					 | 
				
			||||||
#ifndef CONFIG_USER_ONLY
 | 
					#ifndef CONFIG_USER_ONLY
 | 
				
			||||||
    /* We don't take care of direct jumps when address mapping changes in
 | 
					    /* We don't take care of direct jumps when address mapping changes in
 | 
				
			||||||
     * system emulation. So it's not safe to make a direct jump to a TB
 | 
					     * system emulation. So it's not safe to make a direct jump to a TB
 | 
				
			||||||
@ -356,8 +349,13 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu,
 | 
				
			|||||||
#endif
 | 
					#endif
 | 
				
			||||||
    /* See if we can patch the calling TB. */
 | 
					    /* See if we can patch the calling TB. */
 | 
				
			||||||
    if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
 | 
					    if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
 | 
				
			||||||
 | 
					        /* Check if translation buffer has been flushed */
 | 
				
			||||||
 | 
					        if (cpu->tb_flushed) {
 | 
				
			||||||
 | 
					            cpu->tb_flushed = false;
 | 
				
			||||||
 | 
					        } else {
 | 
				
			||||||
            tb_add_jump(last_tb, tb_exit, tb);
 | 
					            tb_add_jump(last_tb, tb_exit, tb);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					    }
 | 
				
			||||||
    tb_unlock();
 | 
					    tb_unlock();
 | 
				
			||||||
    return tb;
 | 
					    return tb;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -617,7 +615,7 @@ int cpu_exec(CPUState *cpu)
 | 
				
			|||||||
                break;
 | 
					                break;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            cpu->tb_flushed = false; /* reset before first TB lookup */
 | 
					            atomic_mb_set(&cpu->tb_flushed, false); /* reset before first TB lookup */
 | 
				
			||||||
            for(;;) {
 | 
					            for(;;) {
 | 
				
			||||||
                cpu_handle_interrupt(cpu, &last_tb);
 | 
					                cpu_handle_interrupt(cpu, &last_tb);
 | 
				
			||||||
                tb = tb_find_fast(cpu, last_tb, tb_exit);
 | 
					                tb = tb_find_fast(cpu, last_tb, tb_exit);
 | 
				
			||||||
 | 
				
			|||||||
@ -848,7 +848,6 @@ void tb_flush(CPUState *cpu)
 | 
				
			|||||||
        > tcg_ctx.code_gen_buffer_size) {
 | 
					        > tcg_ctx.code_gen_buffer_size) {
 | 
				
			||||||
        cpu_abort(cpu, "Internal error: code buffer overflow\n");
 | 
					        cpu_abort(cpu, "Internal error: code buffer overflow\n");
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    tcg_ctx.tb_ctx.nb_tbs = 0;
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    CPU_FOREACH(cpu) {
 | 
					    CPU_FOREACH(cpu) {
 | 
				
			||||||
        int i;
 | 
					        int i;
 | 
				
			||||||
@ -856,9 +855,10 @@ void tb_flush(CPUState *cpu)
 | 
				
			|||||||
        for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
 | 
					        for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
 | 
				
			||||||
            atomic_set(&cpu->tb_jmp_cache[i], NULL);
 | 
					            atomic_set(&cpu->tb_jmp_cache[i], NULL);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        cpu->tb_flushed = true;
 | 
					        atomic_mb_set(&cpu->tb_flushed, true);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    tcg_ctx.tb_ctx.nb_tbs = 0;
 | 
				
			||||||
    qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
 | 
					    qht_reset_size(&tcg_ctx.tb_ctx.htable, CODE_GEN_HTABLE_SIZE);
 | 
				
			||||||
    page_flush_tb();
 | 
					    page_flush_tb();
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user