tcg: Clean up from 'next_tb'
The value returned from tcg_qemu_tb_exec() is the value passed to the corresponding tcg_gen_exit_tb() at translation time of the last TB attempted to execute. It is a little confusing to store it in a variable named 'next_tb'. In fact, it is a combination of 4-byte aligned pointer and additional information in its two least significant bits. Break it down right away into two variables named 'last_tb' and 'tb_exit' which are a pointer to the last TB attempted to execute and the TB exit reason, correspondingly. This simplifies the code and improves its readability. Correct a misleading documentation comment for tcg_qemu_tb_exec() and fix logging in cpu_tb_exec(). Also rename a misleading 'next_tb' in another couple of places. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
		
							parent
							
								
									7687bf52e5
								
							
						
					
					
						commit
						819af24b9c
					
				
							
								
								
									
										59
									
								
								cpu-exec.c
									
									
									
									
									
								
							
							
						
						
									
										59
									
								
								cpu-exec.c
									
									
									
									
									
								
							| @ -136,7 +136,9 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu) | ||||
| static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) | ||||
| { | ||||
|     CPUArchState *env = cpu->env_ptr; | ||||
|     uintptr_t next_tb; | ||||
|     uintptr_t ret; | ||||
|     TranslationBlock *last_tb; | ||||
|     int tb_exit; | ||||
|     uint8_t *tb_ptr = itb->tc_ptr; | ||||
| 
 | ||||
|     qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, | ||||
| @ -160,36 +162,37 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) | ||||
| #endif /* DEBUG_DISAS */ | ||||
| 
 | ||||
|     cpu->can_do_io = !use_icount; | ||||
|     next_tb = tcg_qemu_tb_exec(env, tb_ptr); | ||||
|     ret = tcg_qemu_tb_exec(env, tb_ptr); | ||||
|     cpu->can_do_io = 1; | ||||
|     trace_exec_tb_exit((void *) (next_tb & ~TB_EXIT_MASK), | ||||
|                        next_tb & TB_EXIT_MASK); | ||||
|     last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); | ||||
|     tb_exit = ret & TB_EXIT_MASK; | ||||
|     trace_exec_tb_exit(last_tb, tb_exit); | ||||
| 
 | ||||
|     if ((next_tb & TB_EXIT_MASK) > TB_EXIT_IDX1) { | ||||
|     if (tb_exit > TB_EXIT_IDX1) { | ||||
|         /* We didn't start executing this TB (eg because the instruction
 | ||||
|          * counter hit zero); we must restore the guest PC to the address | ||||
|          * of the start of the TB. | ||||
|          */ | ||||
|         CPUClass *cc = CPU_GET_CLASS(cpu); | ||||
|         TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); | ||||
|         qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, | ||||
|         qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, | ||||
|                                "Stopped execution of TB chain before %p [" | ||||
|                                TARGET_FMT_lx "] %s\n", | ||||
|                                itb->tc_ptr, itb->pc, lookup_symbol(itb->pc)); | ||||
|                                last_tb->tc_ptr, last_tb->pc, | ||||
|                                lookup_symbol(last_tb->pc)); | ||||
|         if (cc->synchronize_from_tb) { | ||||
|             cc->synchronize_from_tb(cpu, tb); | ||||
|             cc->synchronize_from_tb(cpu, last_tb); | ||||
|         } else { | ||||
|             assert(cc->set_pc); | ||||
|             cc->set_pc(cpu, tb->pc); | ||||
|             cc->set_pc(cpu, last_tb->pc); | ||||
|         } | ||||
|     } | ||||
|     if ((next_tb & TB_EXIT_MASK) == TB_EXIT_REQUESTED) { | ||||
|     if (tb_exit == TB_EXIT_REQUESTED) { | ||||
|         /* We were asked to stop executing TBs (probably a pending
 | ||||
|          * interrupt. We've now stopped, so clear the flag. | ||||
|          */ | ||||
|         cpu->tcg_exit_req = 0; | ||||
|     } | ||||
|     return next_tb; | ||||
|     return ret; | ||||
| } | ||||
| 
 | ||||
| #ifndef CONFIG_USER_ONLY | ||||
| @ -358,8 +361,8 @@ int cpu_exec(CPUState *cpu) | ||||
|     CPUArchState *env = &x86_cpu->env; | ||||
| #endif | ||||
|     int ret, interrupt_request; | ||||
|     TranslationBlock *tb; | ||||
|     uintptr_t next_tb; | ||||
|     TranslationBlock *tb, *last_tb; | ||||
|     int tb_exit = 0; | ||||
|     SyncClocks sc; | ||||
| 
 | ||||
|     /* replay_interrupt may need current_cpu */ | ||||
| @ -442,7 +445,7 @@ int cpu_exec(CPUState *cpu) | ||||
| #endif | ||||
|             } | ||||
| 
 | ||||
|             next_tb = 0; /* force lookup of first TB */ | ||||
|             last_tb = NULL; /* forget the last executed TB after exception */ | ||||
|             for(;;) { | ||||
|                 interrupt_request = cpu->interrupt_request; | ||||
|                 if (unlikely(interrupt_request)) { | ||||
| @ -487,7 +490,7 @@ int cpu_exec(CPUState *cpu) | ||||
|                     else { | ||||
|                         replay_interrupt(); | ||||
|                         if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { | ||||
|                             next_tb = 0; | ||||
|                             last_tb = NULL; | ||||
|                         } | ||||
|                     } | ||||
|                     /* Don't use the cached interrupt_request value,
 | ||||
| @ -496,7 +499,7 @@ int cpu_exec(CPUState *cpu) | ||||
|                         cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; | ||||
|                         /* ensure that no TB jump will be modified as
 | ||||
|                            the program flow was changed */ | ||||
|                         next_tb = 0; | ||||
|                         last_tb = NULL; | ||||
|                     } | ||||
|                 } | ||||
|                 if (unlikely(cpu->exit_request | ||||
| @ -513,22 +516,24 @@ int cpu_exec(CPUState *cpu) | ||||
|                     /* as some TB could have been invalidated because
 | ||||
|                        of memory exceptions while generating the code, we | ||||
|                        must recompute the hash index here */ | ||||
|                     next_tb = 0; | ||||
|                     last_tb = NULL; | ||||
|                     tcg_ctx.tb_ctx.tb_invalidated_flag = 0; | ||||
|                 } | ||||
|                 /* See if we can patch the calling TB. */ | ||||
|                 if (next_tb != 0 && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | ||||
|                     tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), | ||||
|                                 next_tb & TB_EXIT_MASK, tb); | ||||
|                 if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { | ||||
|                     tb_add_jump(last_tb, tb_exit, tb); | ||||
|                 } | ||||
|                 tb_unlock(); | ||||
|                 if (likely(!cpu->exit_request)) { | ||||
|                     uintptr_t ret; | ||||
|                     trace_exec_tb(tb, tb->pc); | ||||
|                     /* execute the generated code */ | ||||
|                     cpu->current_tb = tb; | ||||
|                     next_tb = cpu_tb_exec(cpu, tb); | ||||
|                     ret = cpu_tb_exec(cpu, tb); | ||||
|                     cpu->current_tb = NULL; | ||||
|                     switch (next_tb & TB_EXIT_MASK) { | ||||
|                     last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); | ||||
|                     tb_exit = ret & TB_EXIT_MASK; | ||||
|                     switch (tb_exit) { | ||||
|                     case TB_EXIT_REQUESTED: | ||||
|                         /* Something asked us to stop executing
 | ||||
|                          * chained TBs; just continue round the main | ||||
| @ -541,7 +546,7 @@ int cpu_exec(CPUState *cpu) | ||||
|                          * or cpu->interrupt_request. | ||||
|                          */ | ||||
|                         smp_rmb(); | ||||
|                         next_tb = 0; | ||||
|                         last_tb = NULL; | ||||
|                         break; | ||||
|                     case TB_EXIT_ICOUNT_EXPIRED: | ||||
|                     { | ||||
| @ -559,12 +564,12 @@ int cpu_exec(CPUState *cpu) | ||||
|                         } else { | ||||
|                             if (insns_left > 0) { | ||||
|                                 /* Execute remaining instructions.  */ | ||||
|                                 tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK); | ||||
|                                 cpu_exec_nocache(cpu, insns_left, tb, false); | ||||
|                                 cpu_exec_nocache(cpu, insns_left, | ||||
|                                                  last_tb, false); | ||||
|                                 align_clocks(&sc, cpu); | ||||
|                             } | ||||
|                             cpu->exception_index = EXCP_INTERRUPT; | ||||
|                             next_tb = 0; | ||||
|                             last_tb = NULL; | ||||
|                             cpu_loop_exit(cpu); | ||||
|                         } | ||||
|                         break; | ||||
|  | ||||
							
								
								
									
										19
									
								
								tcg/tcg.h
									
									
									
									
									
								
							
							
						
						
									
										19
									
								
								tcg/tcg.h
									
									
									
									
									
								
							| @ -925,7 +925,7 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi) | ||||
| 
 | ||||
| /**
 | ||||
|  * tcg_qemu_tb_exec: | ||||
|  * @env: CPUArchState * for the CPU | ||||
|  * @env: pointer to CPUArchState for the CPU | ||||
|  * @tb_ptr: address of generated code for the TB to execute | ||||
|  * | ||||
|  * Start executing code from a given translation block. | ||||
| @ -936,30 +936,31 @@ static inline unsigned get_mmuidx(TCGMemOpIdx oi) | ||||
|  * which has not yet been directly linked, or an asynchronous | ||||
|  * event such as an interrupt needs handling. | ||||
|  * | ||||
|  * The return value is a pointer to the next TB to execute | ||||
|  * (if known; otherwise zero). This pointer is assumed to be | ||||
|  * 4-aligned, and the bottom two bits are used to return further | ||||
|  * information: | ||||
|  * Return: The return value is the value passed to the corresponding | ||||
|  * tcg_gen_exit_tb() at translation time of the last TB attempted to execute. | ||||
|  * The value is either zero or a 4-byte aligned pointer to that TB combined | ||||
|  * with additional information in its two least significant bits. The | ||||
|  * additional information is encoded as follows: | ||||
|  *  0, 1: the link between this TB and the next is via the specified | ||||
|  *        TB index (0 or 1). That is, we left the TB via (the equivalent | ||||
|  *        of) "goto_tb <index>". The main loop uses this to determine | ||||
|  *        how to link the TB just executed to the next. | ||||
|  *  2:    we are using instruction counting code generation, and we | ||||
|  *        did not start executing this TB because the instruction counter | ||||
|  *        would hit zero midway through it. In this case the next-TB pointer | ||||
|  *        would hit zero midway through it. In this case the pointer | ||||
|  *        returned is the TB we were about to execute, and the caller must | ||||
|  *        arrange to execute the remaining count of instructions. | ||||
|  *  3:    we stopped because the CPU's exit_request flag was set | ||||
|  *        (usually meaning that there is an interrupt that needs to be | ||||
|  *        handled). The next-TB pointer returned is the TB we were | ||||
|  *        about to execute when we noticed the pending exit request. | ||||
|  *        handled). The pointer returned is the TB we were about to execute | ||||
|  *        when we noticed the pending exit request. | ||||
|  * | ||||
|  * If the bottom two bits indicate an exit-via-index then the CPU | ||||
|  * state is correctly synchronised and ready for execution of the next | ||||
|  * TB (and in particular the guest PC is the address to execute next). | ||||
|  * Otherwise, we gave up on execution of this TB before it started, and | ||||
|  * the caller must fix up the CPU state by calling the CPU's | ||||
|  * synchronize_from_tb() method with the next-TB pointer we return (falling | ||||
|  * synchronize_from_tb() method with the TB pointer we return (falling | ||||
|  * back to calling the CPU's set_pc method with tb->pb if no | ||||
|  * synchronize_from_tb() method exists). | ||||
|  * | ||||
|  | ||||
							
								
								
									
										6
									
								
								tci.c
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								tci.c
									
									
									
									
									
								
							| @ -467,7 +467,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||||
| { | ||||
|     long tcg_temps[CPU_TEMP_BUF_NLONGS]; | ||||
|     uintptr_t sp_value = (uintptr_t)(tcg_temps + CPU_TEMP_BUF_NLONGS); | ||||
|     uintptr_t next_tb = 0; | ||||
|     uintptr_t ret = 0; | ||||
| 
 | ||||
|     tci_reg[TCG_AREG0] = (tcg_target_ulong)env; | ||||
|     tci_reg[TCG_REG_CALL_STACK] = sp_value; | ||||
| @ -1085,7 +1085,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||||
|             /* QEMU specific operations. */ | ||||
| 
 | ||||
|         case INDEX_op_exit_tb: | ||||
|             next_tb = *(uint64_t *)tb_ptr; | ||||
|             ret = *(uint64_t *)tb_ptr; | ||||
|             goto exit; | ||||
|             break; | ||||
|         case INDEX_op_goto_tb: | ||||
| @ -1243,5 +1243,5 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) | ||||
|         tci_assert(tb_ptr == old_code_ptr + op_size); | ||||
|     } | ||||
| exit: | ||||
|     return next_tb; | ||||
|     return ret; | ||||
| } | ||||
|  | ||||
| @ -1614,7 +1614,7 @@ kvm_failed_spr_get(int str, const char *msg) "Warning: Unable to retrieve SPR %d | ||||
| # cpu-exec.c | ||||
| disable exec_tb(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR | ||||
| disable exec_tb_nocache(void *tb, uintptr_t pc) "tb:%p pc=0x%"PRIxPTR | ||||
| disable exec_tb_exit(void *next_tb, unsigned int flags) "tb:%p flags=%x" | ||||
| disable exec_tb_exit(void *last_tb, unsigned int flags) "tb:%p flags=%x" | ||||
| 
 | ||||
| # translate-all.c | ||||
| translate_block(void *tb, uintptr_t pc, uint8_t *tb_code) "tb:%p, pc:0x%"PRIxPTR", tb_code:%p" | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Sergey Fedorov
						Sergey Fedorov