Per-memop alignment
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVVPTHAAoJEK0ScMxN0Cebt0gH/i67aFKsjweF4LRsSlCm+0NO SFUVooPB08bw2jRIXL+znLy/N4T/anUCJSRKEV2Wp6ihXbF9mcLk2ze6pImK3Gqd ImvZ8tuzQ11G2evkdN+CEYtjGWJ4HEM3qAd+6Cv1Lmk3Kw1mtaebi1AXOOGi5eCB 5sK5L4ov6++kn1UDhuDyL0vRb3gHYRMhPxTv8RayK2LjcNw5LS9mN2FT7op5ATmM REf2uH4+c/7kKj25n1UR5Pg+j13jgd4bTiD8iMlUBvQTMftw7Oo7ggGz/eEZRguY Xe1lL3s/RgZushp86H74ohjwm/TPTcOLuZ3HrVCsge34xlsl4WjDxbxN+vIw0SQ= =oWEZ -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20150514' into staging Per-memop alignment # gpg: Signature made Thu May 14 20:17:27 2015 BST using RSA key ID 4DD0279B # gpg: Good signature from "Richard Henderson <rth7680@gmail.com>" # gpg: aka "Richard Henderson <rth@redhat.com>" # gpg: aka "Richard Henderson <rth@twiddle.net>" * remotes/rth/tags/pull-tcg-20150514: tcg: Add MO_ALIGN, MO_UNALN tcg: Push merged memop+mmu_idx parameter to softmmu routines tcg: Merge memop and mmu_idx parameters to qemu_ld/st Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
		
						commit
						99e7627a70
					
				@ -168,9 +168,10 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
 | 
				
			|||||||
#ifdef SOFTMMU_CODE_ACCESS
 | 
					#ifdef SOFTMMU_CODE_ACCESS
 | 
				
			||||||
static __attribute__((unused))
 | 
					static __attribute__((unused))
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
					WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                            uintptr_t retaddr)
 | 
					                            TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    unsigned mmu_idx = get_mmuidx(oi);
 | 
				
			||||||
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
					    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
				
			||||||
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 | 
					    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 | 
				
			||||||
    uintptr_t haddr;
 | 
					    uintptr_t haddr;
 | 
				
			||||||
@ -182,12 +183,11 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
    /* If the TLB entry is for a different page, reload and try again.  */
 | 
					    /* If the TLB entry is for a different page, reload and try again.  */
 | 
				
			||||||
    if ((addr & TARGET_PAGE_MASK)
 | 
					    if ((addr & TARGET_PAGE_MASK)
 | 
				
			||||||
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
					         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
        if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 | 
					        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 | 
				
			||||||
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                     mmu_idx, retaddr);
 | 
					                     mmu_idx, retaddr);
 | 
				
			||||||
@ -218,16 +218,16 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
        DATA_TYPE res1, res2;
 | 
					        DATA_TYPE res1, res2;
 | 
				
			||||||
        unsigned shift;
 | 
					        unsigned shift;
 | 
				
			||||||
    do_unaligned_access:
 | 
					    do_unaligned_access:
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
#endif
 | 
					        }
 | 
				
			||||||
        addr1 = addr & ~(DATA_SIZE - 1);
 | 
					        addr1 = addr & ~(DATA_SIZE - 1);
 | 
				
			||||||
        addr2 = addr1 + DATA_SIZE;
 | 
					        addr2 = addr1 + DATA_SIZE;
 | 
				
			||||||
        /* Note the adjustment at the beginning of the function.
 | 
					        /* Note the adjustment at the beginning of the function.
 | 
				
			||||||
           Undo that for the recursion.  */
 | 
					           Undo that for the recursion.  */
 | 
				
			||||||
        res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
 | 
					        res1 = helper_le_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
 | 
					        res2 = helper_le_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        shift = (addr & (DATA_SIZE - 1)) * 8;
 | 
					        shift = (addr & (DATA_SIZE - 1)) * 8;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /* Little-endian combine.  */
 | 
					        /* Little-endian combine.  */
 | 
				
			||||||
@ -236,12 +236,11 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Handle aligned access or unaligned access in the same page.  */
 | 
					    /* Handle aligned access or unaligned access in the same page.  */
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					    if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
    if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                             mmu_idx, retaddr);
 | 
					                             mmu_idx, retaddr);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
					    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
				
			||||||
#if DATA_SIZE == 1
 | 
					#if DATA_SIZE == 1
 | 
				
			||||||
@ -256,9 +255,10 @@ WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
#ifdef SOFTMMU_CODE_ACCESS
 | 
					#ifdef SOFTMMU_CODE_ACCESS
 | 
				
			||||||
static __attribute__((unused))
 | 
					static __attribute__((unused))
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
					WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                            uintptr_t retaddr)
 | 
					                            TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    unsigned mmu_idx = get_mmuidx(oi);
 | 
				
			||||||
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
					    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
				
			||||||
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 | 
					    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
 | 
				
			||||||
    uintptr_t haddr;
 | 
					    uintptr_t haddr;
 | 
				
			||||||
@ -270,12 +270,11 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
    /* If the TLB entry is for a different page, reload and try again.  */
 | 
					    /* If the TLB entry is for a different page, reload and try again.  */
 | 
				
			||||||
    if ((addr & TARGET_PAGE_MASK)
 | 
					    if ((addr & TARGET_PAGE_MASK)
 | 
				
			||||||
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
					         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
        if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 | 
					        if (!VICTIM_TLB_HIT(ADDR_READ)) {
 | 
				
			||||||
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                     mmu_idx, retaddr);
 | 
					                     mmu_idx, retaddr);
 | 
				
			||||||
@ -306,16 +305,16 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
        DATA_TYPE res1, res2;
 | 
					        DATA_TYPE res1, res2;
 | 
				
			||||||
        unsigned shift;
 | 
					        unsigned shift;
 | 
				
			||||||
    do_unaligned_access:
 | 
					    do_unaligned_access:
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
#endif
 | 
					        }
 | 
				
			||||||
        addr1 = addr & ~(DATA_SIZE - 1);
 | 
					        addr1 = addr & ~(DATA_SIZE - 1);
 | 
				
			||||||
        addr2 = addr1 + DATA_SIZE;
 | 
					        addr2 = addr1 + DATA_SIZE;
 | 
				
			||||||
        /* Note the adjustment at the beginning of the function.
 | 
					        /* Note the adjustment at the beginning of the function.
 | 
				
			||||||
           Undo that for the recursion.  */
 | 
					           Undo that for the recursion.  */
 | 
				
			||||||
        res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
 | 
					        res1 = helper_be_ld_name(env, addr1, oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
 | 
					        res2 = helper_be_ld_name(env, addr2, oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        shift = (addr & (DATA_SIZE - 1)) * 8;
 | 
					        shift = (addr & (DATA_SIZE - 1)) * 8;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        /* Big-endian combine.  */
 | 
					        /* Big-endian combine.  */
 | 
				
			||||||
@ -324,12 +323,11 @@ WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Handle aligned access or unaligned access in the same page.  */
 | 
					    /* Handle aligned access or unaligned access in the same page.  */
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					    if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
    if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
					        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
 | 
				
			||||||
                             mmu_idx, retaddr);
 | 
					                             mmu_idx, retaddr);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
					    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
				
			||||||
    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
 | 
					    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
 | 
				
			||||||
@ -341,7 +339,8 @@ DATA_TYPE
 | 
				
			|||||||
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
 | 
					glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                         int mmu_idx)
 | 
					                                         int mmu_idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    return helper_te_ld_name (env, addr, mmu_idx, GETRA());
 | 
					    TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
 | 
				
			||||||
 | 
					    return helper_te_ld_name (env, addr, oi, GETRA());
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifndef SOFTMMU_CODE_ACCESS
 | 
					#ifndef SOFTMMU_CODE_ACCESS
 | 
				
			||||||
@ -350,16 +349,16 @@ glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
 | 
				
			|||||||
   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
 | 
					   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
 | 
				
			||||||
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
 | 
					#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
 | 
				
			||||||
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
 | 
					WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                             int mmu_idx, uintptr_t retaddr)
 | 
					                             TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
 | 
					    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
# if DATA_SIZE > 1
 | 
					# if DATA_SIZE > 1
 | 
				
			||||||
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
 | 
					WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                             int mmu_idx, uintptr_t retaddr)
 | 
					                             TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
 | 
					    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
# endif
 | 
					# endif
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
@ -386,8 +385,9 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
					void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr)
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    unsigned mmu_idx = get_mmuidx(oi);
 | 
				
			||||||
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
					    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
				
			||||||
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 | 
					    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 | 
				
			||||||
    uintptr_t haddr;
 | 
					    uintptr_t haddr;
 | 
				
			||||||
@ -398,12 +398,11 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
    /* If the TLB entry is for a different page, reload and try again.  */
 | 
					    /* If the TLB entry is for a different page, reload and try again.  */
 | 
				
			||||||
    if ((addr & TARGET_PAGE_MASK)
 | 
					    if ((addr & TARGET_PAGE_MASK)
 | 
				
			||||||
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
					        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
        if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        if (!VICTIM_TLB_HIT(addr_write)) {
 | 
					        if (!VICTIM_TLB_HIT(addr_write)) {
 | 
				
			||||||
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 | 
					            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
@ -431,10 +430,10 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
                     >= TARGET_PAGE_SIZE)) {
 | 
					                     >= TARGET_PAGE_SIZE)) {
 | 
				
			||||||
        int i;
 | 
					        int i;
 | 
				
			||||||
    do_unaligned_access:
 | 
					    do_unaligned_access:
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
#endif
 | 
					        }
 | 
				
			||||||
        /* XXX: not efficient, but simple */
 | 
					        /* XXX: not efficient, but simple */
 | 
				
			||||||
        /* Note: relies on the fact that tlb_fill() does not remove the
 | 
					        /* Note: relies on the fact that tlb_fill() does not remove the
 | 
				
			||||||
         * previous page from the TLB cache.  */
 | 
					         * previous page from the TLB cache.  */
 | 
				
			||||||
@ -444,18 +443,17 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
            /* Note the adjustment at the beginning of the function.
 | 
					            /* Note the adjustment at the beginning of the function.
 | 
				
			||||||
               Undo that for the recursion.  */
 | 
					               Undo that for the recursion.  */
 | 
				
			||||||
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 | 
					            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 | 
				
			||||||
                                            mmu_idx, retaddr + GETPC_ADJ);
 | 
					                                            oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Handle aligned access or unaligned access in the same page.  */
 | 
					    /* Handle aligned access or unaligned access in the same page.  */
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					    if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
    if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                             mmu_idx, retaddr);
 | 
					                             mmu_idx, retaddr);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
					    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
				
			||||||
#if DATA_SIZE == 1
 | 
					#if DATA_SIZE == 1
 | 
				
			||||||
@ -467,8 +465,9 @@ void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
#if DATA_SIZE > 1
 | 
					#if DATA_SIZE > 1
 | 
				
			||||||
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
					void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr)
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    unsigned mmu_idx = get_mmuidx(oi);
 | 
				
			||||||
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
					    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
 | 
				
			||||||
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 | 
					    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
 | 
				
			||||||
    uintptr_t haddr;
 | 
					    uintptr_t haddr;
 | 
				
			||||||
@ -479,12 +478,11 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
    /* If the TLB entry is for a different page, reload and try again.  */
 | 
					    /* If the TLB entry is for a different page, reload and try again.  */
 | 
				
			||||||
    if ((addr & TARGET_PAGE_MASK)
 | 
					    if ((addr & TARGET_PAGE_MASK)
 | 
				
			||||||
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
					        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
        if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					            && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        if (!VICTIM_TLB_HIT(addr_write)) {
 | 
					        if (!VICTIM_TLB_HIT(addr_write)) {
 | 
				
			||||||
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 | 
					            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
@ -512,10 +510,10 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
                     >= TARGET_PAGE_SIZE)) {
 | 
					                     >= TARGET_PAGE_SIZE)) {
 | 
				
			||||||
        int i;
 | 
					        int i;
 | 
				
			||||||
    do_unaligned_access:
 | 
					    do_unaligned_access:
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					        if ((get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					            cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                                 mmu_idx, retaddr);
 | 
					                                 mmu_idx, retaddr);
 | 
				
			||||||
#endif
 | 
					        }
 | 
				
			||||||
        /* XXX: not efficient, but simple */
 | 
					        /* XXX: not efficient, but simple */
 | 
				
			||||||
        /* Note: relies on the fact that tlb_fill() does not remove the
 | 
					        /* Note: relies on the fact that tlb_fill() does not remove the
 | 
				
			||||||
         * previous page from the TLB cache.  */
 | 
					         * previous page from the TLB cache.  */
 | 
				
			||||||
@ -525,18 +523,17 @@ void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
 | 
				
			|||||||
            /* Note the adjustment at the beginning of the function.
 | 
					            /* Note the adjustment at the beginning of the function.
 | 
				
			||||||
               Undo that for the recursion.  */
 | 
					               Undo that for the recursion.  */
 | 
				
			||||||
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 | 
					            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
 | 
				
			||||||
                                            mmu_idx, retaddr + GETPC_ADJ);
 | 
					                                            oi, retaddr + GETPC_ADJ);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        return;
 | 
					        return;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Handle aligned access or unaligned access in the same page.  */
 | 
					    /* Handle aligned access or unaligned access in the same page.  */
 | 
				
			||||||
#ifdef ALIGNED_ONLY
 | 
					    if ((addr & (DATA_SIZE - 1)) != 0
 | 
				
			||||||
    if ((addr & (DATA_SIZE - 1)) != 0) {
 | 
					        && (get_memop(oi) & MO_AMASK) == MO_ALIGN) {
 | 
				
			||||||
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
					        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
 | 
				
			||||||
                             mmu_idx, retaddr);
 | 
					                             mmu_idx, retaddr);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
					    haddr = addr + env->tlb_table[mmu_idx][index].addend;
 | 
				
			||||||
    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
 | 
					    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
 | 
				
			||||||
@ -547,7 +544,8 @@ void
 | 
				
			|||||||
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
 | 
					glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                         DATA_TYPE val, int mmu_idx)
 | 
					                                         DATA_TYPE val, int mmu_idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    helper_te_st_name(env, addr, val, mmu_idx, GETRA());
 | 
					    TCGMemOpIdx oi = make_memop_idx(SHIFT, mmu_idx);
 | 
				
			||||||
 | 
					    helper_te_st_name(env, addr, val, oi, GETRA());
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#endif /* !defined(SOFTMMU_CODE_ACCESS) */
 | 
					#endif /* !defined(SOFTMMU_CODE_ACCESS) */
 | 
				
			||||||
 | 
				
			|||||||
@ -6013,13 +6013,15 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
 | 
				
			|||||||
        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
 | 
					        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
 | 
				
			||||||
        void *hostaddr[maxidx];
 | 
					        void *hostaddr[maxidx];
 | 
				
			||||||
        int try, i;
 | 
					        int try, i;
 | 
				
			||||||
 | 
					        unsigned mmu_idx = cpu_mmu_index(env);
 | 
				
			||||||
 | 
					        TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        for (try = 0; try < 2; try++) {
 | 
					        for (try = 0; try < 2; try++) {
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            for (i = 0; i < maxidx; i++) {
 | 
					            for (i = 0; i < maxidx; i++) {
 | 
				
			||||||
                hostaddr[i] = tlb_vaddr_to_host(env,
 | 
					                hostaddr[i] = tlb_vaddr_to_host(env,
 | 
				
			||||||
                                                vaddr + TARGET_PAGE_SIZE * i,
 | 
					                                                vaddr + TARGET_PAGE_SIZE * i,
 | 
				
			||||||
                                                1, cpu_mmu_index(env));
 | 
					                                                1, mmu_idx);
 | 
				
			||||||
                if (!hostaddr[i]) {
 | 
					                if (!hostaddr[i]) {
 | 
				
			||||||
                    break;
 | 
					                    break;
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
@ -6040,12 +6042,12 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
 | 
				
			|||||||
             * this purpose use the actual register value passed to us
 | 
					             * this purpose use the actual register value passed to us
 | 
				
			||||||
             * so that we get the fault address right.
 | 
					             * so that we get the fault address right.
 | 
				
			||||||
             */
 | 
					             */
 | 
				
			||||||
            helper_ret_stb_mmu(env, vaddr_in, 0, cpu_mmu_index(env), GETRA());
 | 
					            helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETRA());
 | 
				
			||||||
            /* Now we can populate the other TLB entries, if any */
 | 
					            /* Now we can populate the other TLB entries, if any */
 | 
				
			||||||
            for (i = 0; i < maxidx; i++) {
 | 
					            for (i = 0; i < maxidx; i++) {
 | 
				
			||||||
                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
 | 
					                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
 | 
				
			||||||
                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
 | 
					                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
 | 
				
			||||||
                    helper_ret_stb_mmu(env, va, 0, cpu_mmu_index(env), GETRA());
 | 
					                    helper_ret_stb_mmu(env, va, 0, oi, GETRA());
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
@ -6062,7 +6064,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
 | 
				
			|||||||
         *    bounce buffer was in use
 | 
					         *    bounce buffer was in use
 | 
				
			||||||
         */
 | 
					         */
 | 
				
			||||||
        for (i = 0; i < blocklen; i++) {
 | 
					        for (i = 0; i < blocklen; i++) {
 | 
				
			||||||
            helper_ret_stb_mmu(env, vaddr + i, 0, cpu_mmu_index(env), GETRA());
 | 
					            helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETRA());
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
 | 
				
			|||||||
@ -959,7 +959,7 @@ static inline void tcg_out_addsub2(TCGContext *s, int ext, TCGReg rl,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
 | 
					/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
 | 
				
			||||||
 *                                     int mmu_idx, uintptr_t ra)
 | 
					 *                                     TCGMemOpIdx oi, uintptr_t ra)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void * const qemu_ld_helpers[16] = {
 | 
					static void * const qemu_ld_helpers[16] = {
 | 
				
			||||||
    [MO_UB]   = helper_ret_ldub_mmu,
 | 
					    [MO_UB]   = helper_ret_ldub_mmu,
 | 
				
			||||||
@ -972,7 +972,8 @@ static void * const qemu_ld_helpers[16] = {
 | 
				
			|||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
 | 
					/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
 | 
				
			||||||
 *                                     uintxx_t val, int mmu_idx, uintptr_t ra)
 | 
					 *                                     uintxx_t val, TCGMemOpIdx oi,
 | 
				
			||||||
 | 
					 *                                     uintptr_t ra)
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void * const qemu_st_helpers[16] = {
 | 
					static void * const qemu_st_helpers[16] = {
 | 
				
			||||||
    [MO_UB]   = helper_ret_stb_mmu,
 | 
					    [MO_UB]   = helper_ret_stb_mmu,
 | 
				
			||||||
@ -993,14 +994,15 @@ static inline void tcg_out_adr(TCGContext *s, TCGReg rd, void *target)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGMemOp size = opc & MO_SIZE;
 | 
					    TCGMemOp size = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    reloc_pc19(lb->label_ptr[0], s->code_ptr);
 | 
					    reloc_pc19(lb->label_ptr[0], s->code_ptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
 | 
					    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
 | 
				
			||||||
    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
 | 
					    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi);
 | 
				
			||||||
    tcg_out_adr(s, TCG_REG_X3, lb->raddr);
 | 
					    tcg_out_adr(s, TCG_REG_X3, lb->raddr);
 | 
				
			||||||
    tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
 | 
					    tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
 | 
				
			||||||
    if (opc & MO_SIGN) {
 | 
					    if (opc & MO_SIGN) {
 | 
				
			||||||
@ -1014,33 +1016,32 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGMemOp size = opc & MO_SIZE;
 | 
					    TCGMemOp size = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    reloc_pc19(lb->label_ptr[0], s->code_ptr);
 | 
					    reloc_pc19(lb->label_ptr[0], s->code_ptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_X0, TCG_AREG0);
 | 
					    tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X0, TCG_AREG0);
 | 
				
			||||||
    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
 | 
					    tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg);
 | 
				
			||||||
    tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
 | 
					    tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg);
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi);
 | 
				
			||||||
    tcg_out_adr(s, TCG_REG_X4, lb->raddr);
 | 
					    tcg_out_adr(s, TCG_REG_X4, lb->raddr);
 | 
				
			||||||
    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
					    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
				
			||||||
    tcg_out_goto(s, lb->raddr);
 | 
					    tcg_out_goto(s, lb->raddr);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGType ext, TCGReg data_reg, TCGReg addr_reg,
 | 
					                                TCGType ext, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			||||||
                                int mem_index, tcg_insn_unit *raddr,
 | 
					                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
 | 
				
			||||||
                                tcg_insn_unit *label_ptr)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->type = ext;
 | 
					    label->type = ext;
 | 
				
			||||||
    label->datalo_reg = data_reg;
 | 
					    label->datalo_reg = data_reg;
 | 
				
			||||||
    label->addrlo_reg = addr_reg;
 | 
					    label->addrlo_reg = addr_reg;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = label_ptr;
 | 
					    label->label_ptr[0] = label_ptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -1197,16 +1198,18 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp memop,
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 | 
					static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			||||||
                            TCGMemOp memop, TCGType ext, int mem_index)
 | 
					                            TCGMemOpIdx oi, TCGType ext)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp memop = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned mem_index = get_mmuidx(oi);
 | 
				
			||||||
    TCGMemOp s_bits = memop & MO_SIZE;
 | 
					    TCGMemOp s_bits = memop & MO_SIZE;
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
 | 
					    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 1);
 | 
				
			||||||
    tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1);
 | 
					    tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_REG_X1);
 | 
				
			||||||
    add_qemu_ldst_label(s, true, memop, ext, data_reg, addr_reg,
 | 
					    add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else /* !CONFIG_SOFTMMU */
 | 
					#else /* !CONFIG_SOFTMMU */
 | 
				
			||||||
    tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg,
 | 
					    tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg,
 | 
				
			||||||
                           GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
 | 
					                           GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
 | 
				
			||||||
@ -1214,16 +1217,18 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 | 
					static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			||||||
                            TCGMemOp memop, int mem_index)
 | 
					                            TCGMemOpIdx oi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp memop = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned mem_index = get_mmuidx(oi);
 | 
				
			||||||
    TCGMemOp s_bits = memop & MO_SIZE;
 | 
					    TCGMemOp s_bits = memop & MO_SIZE;
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
 | 
					    tcg_out_tlb_read(s, addr_reg, s_bits, &label_ptr, mem_index, 0);
 | 
				
			||||||
    tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
 | 
					    tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_REG_X1);
 | 
				
			||||||
    add_qemu_ldst_label(s, false, memop, s_bits == MO_64, data_reg, addr_reg,
 | 
					    add_qemu_ldst_label(s, false, oi, s_bits == MO_64, data_reg, addr_reg,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else /* !CONFIG_SOFTMMU */
 | 
					#else /* !CONFIG_SOFTMMU */
 | 
				
			||||||
    tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg,
 | 
					    tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg,
 | 
				
			||||||
                           GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
 | 
					                           GUEST_BASE ? TCG_REG_GUEST_BASE : TCG_REG_XZR);
 | 
				
			||||||
@ -1515,11 +1520,11 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    case INDEX_op_qemu_ld_i32:
 | 
					    case INDEX_op_qemu_ld_i32:
 | 
				
			||||||
    case INDEX_op_qemu_ld_i64:
 | 
					    case INDEX_op_qemu_ld_i64:
 | 
				
			||||||
        tcg_out_qemu_ld(s, a0, a1, a2, ext, args[3]);
 | 
					        tcg_out_qemu_ld(s, a0, a1, a2, ext);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_st_i32:
 | 
					    case INDEX_op_qemu_st_i32:
 | 
				
			||||||
    case INDEX_op_qemu_st_i64:
 | 
					    case INDEX_op_qemu_st_i64:
 | 
				
			||||||
        tcg_out_qemu_st(s, REG0(0), a1, a2, args[3]);
 | 
					        tcg_out_qemu_st(s, REG0(0), a1, a2);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case INDEX_op_bswap64_i64:
 | 
					    case INDEX_op_bswap64_i64:
 | 
				
			||||||
 | 
				
			|||||||
@ -1221,20 +1221,19 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
 | 
				
			|||||||
/* Record the context of a call to the out of line helper code for the slow
 | 
					/* Record the context of a call to the out of line helper code for the slow
 | 
				
			||||||
   path for a load or store, so that we can later generate the correct
 | 
					   path for a load or store, so that we can later generate the correct
 | 
				
			||||||
   helper code.  */
 | 
					   helper code.  */
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
 | 
					                                TCGReg datalo, TCGReg datahi, TCGReg addrlo,
 | 
				
			||||||
                                TCGReg addrhi, int mem_index,
 | 
					                                TCGReg addrhi, tcg_insn_unit *raddr,
 | 
				
			||||||
                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
 | 
					                                tcg_insn_unit *label_ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->datalo_reg = datalo;
 | 
					    label->datalo_reg = datalo;
 | 
				
			||||||
    label->datahi_reg = datahi;
 | 
					    label->datahi_reg = datahi;
 | 
				
			||||||
    label->addrlo_reg = addrlo;
 | 
					    label->addrlo_reg = addrlo;
 | 
				
			||||||
    label->addrhi_reg = addrhi;
 | 
					    label->addrhi_reg = addrhi;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = label_ptr;
 | 
					    label->label_ptr[0] = label_ptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -1242,7 +1241,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
				
			|||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg argreg, datalo, datahi;
 | 
					    TCGReg argreg, datalo, datahi;
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    void *func;
 | 
					    void *func;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 | 
					    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 | 
				
			||||||
@ -1253,7 +1253,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
 | 
					        argreg = tcg_out_arg_reg32(s, argreg, lb->addrlo_reg);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
 | 
					    argreg = tcg_out_arg_imm32(s, argreg, oi);
 | 
				
			||||||
    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
 | 
					    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* For armv6 we can use the canonical unsigned helpers and minimize
 | 
					    /* For armv6 we can use the canonical unsigned helpers and minimize
 | 
				
			||||||
@ -1302,7 +1302,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg argreg, datalo, datahi;
 | 
					    TCGReg argreg, datalo, datahi;
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 | 
					    reloc_pc24(lb->label_ptr[0], s->code_ptr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1332,7 +1333,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
        break;
 | 
					        break;
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    argreg = tcg_out_arg_imm32(s, argreg, lb->mem_index);
 | 
					    argreg = tcg_out_arg_imm32(s, argreg, oi);
 | 
				
			||||||
    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
 | 
					    argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Tail-call to the helper, which will return to the fast path.  */
 | 
					    /* Tail-call to the helper, which will return to the fast path.  */
 | 
				
			||||||
@ -1463,6 +1464,7 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, TCGMemOp opc,
 | 
				
			|||||||
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
					static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
 | 
					    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1474,10 +1476,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    datahi = (is64 ? *args++ : 0);
 | 
					    datahi = (is64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
					    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1);
 | 
					    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 1);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* This a conditional BL only to load a pointer within this opcode into LR
 | 
					    /* This a conditional BL only to load a pointer within this opcode into LR
 | 
				
			||||||
@ -1487,8 +1490,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
 | 
					    tcg_out_qemu_ld_index(s, opc, datalo, datahi, addrlo, addend);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else /* !CONFIG_SOFTMMU */
 | 
					#else /* !CONFIG_SOFTMMU */
 | 
				
			||||||
    if (GUEST_BASE) {
 | 
					    if (GUEST_BASE) {
 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
 | 
					        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
 | 
				
			||||||
@ -1592,6 +1595,7 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, TCGMemOp opc,
 | 
				
			|||||||
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
					static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
 | 
					    TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1603,10 +1607,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    datahi = (is64 ? *args++ : 0);
 | 
					    datahi = (is64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
					    addrhi = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0);
 | 
					    addend = tcg_out_tlb_read(s, addrlo, addrhi, opc & MO_SIZE, mem_index, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
 | 
					    tcg_out_qemu_st_index(s, COND_EQ, opc, datalo, datahi, addrlo, addend);
 | 
				
			||||||
@ -1615,8 +1620,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    label_ptr = s->code_ptr;
 | 
					    label_ptr = s->code_ptr;
 | 
				
			||||||
    tcg_out_bl_noaddr(s, COND_NE);
 | 
					    tcg_out_bl_noaddr(s, COND_NE);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else /* !CONFIG_SOFTMMU */
 | 
					#else /* !CONFIG_SOFTMMU */
 | 
				
			||||||
    if (GUEST_BASE) {
 | 
					    if (GUEST_BASE) {
 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
 | 
					        tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP, GUEST_BASE);
 | 
				
			||||||
 | 
				
			|||||||
@ -1244,21 +1244,20 @@ static inline void tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
 | 
				
			|||||||
 * Record the context of a call to the out of line helper code for the slow path
 | 
					 * Record the context of a call to the out of line helper code for the slow path
 | 
				
			||||||
 * for a load or store, so that we can later generate the correct helper code
 | 
					 * for a load or store, so that we can later generate the correct helper code
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGReg datalo, TCGReg datahi,
 | 
					                                TCGReg datalo, TCGReg datahi,
 | 
				
			||||||
                                TCGReg addrlo, TCGReg addrhi,
 | 
					                                TCGReg addrlo, TCGReg addrhi,
 | 
				
			||||||
                                int mem_index, tcg_insn_unit *raddr,
 | 
					                                tcg_insn_unit *raddr,
 | 
				
			||||||
                                tcg_insn_unit **label_ptr)
 | 
					                                tcg_insn_unit **label_ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->datalo_reg = datalo;
 | 
					    label->datalo_reg = datalo;
 | 
				
			||||||
    label->datahi_reg = datahi;
 | 
					    label->datahi_reg = datahi;
 | 
				
			||||||
    label->addrlo_reg = addrlo;
 | 
					    label->addrlo_reg = addrlo;
 | 
				
			||||||
    label->addrhi_reg = addrhi;
 | 
					    label->addrhi_reg = addrhi;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = label_ptr[0];
 | 
					    label->label_ptr[0] = label_ptr[0];
 | 
				
			||||||
    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
 | 
					    if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
 | 
				
			||||||
@ -1271,7 +1270,8 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
				
			|||||||
 */
 | 
					 */
 | 
				
			||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
					static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = l->opc;
 | 
					    TCGMemOpIdx oi = l->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGReg data_reg;
 | 
					    TCGReg data_reg;
 | 
				
			||||||
    tcg_insn_unit **label_ptr = &l->label_ptr[0];
 | 
					    tcg_insn_unit **label_ptr = &l->label_ptr[0];
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1295,15 +1295,14 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
            ofs += 4;
 | 
					            ofs += 4;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
 | 
					        tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
 | 
				
			||||||
        ofs += 4;
 | 
					        ofs += 4;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
 | 
					        tcg_out_sti(s, TCG_TYPE_PTR, TCG_REG_ESP, ofs, (uintptr_t)l->raddr);
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
 | 
					        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
 | 
				
			||||||
        /* The second argument is already loaded with addrlo.  */
 | 
					        /* The second argument is already loaded with addrlo.  */
 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
 | 
					        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi);
 | 
				
			||||||
                     l->mem_index);
 | 
					 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
 | 
					        tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3],
 | 
				
			||||||
                     (uintptr_t)l->raddr);
 | 
					                     (uintptr_t)l->raddr);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
@ -1354,7 +1353,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
 */
 | 
					 */
 | 
				
			||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
					static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = l->opc;
 | 
					    TCGMemOpIdx oi = l->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGMemOp s_bits = opc & MO_SIZE;
 | 
					    TCGMemOp s_bits = opc & MO_SIZE;
 | 
				
			||||||
    tcg_insn_unit **label_ptr = &l->label_ptr[0];
 | 
					    tcg_insn_unit **label_ptr = &l->label_ptr[0];
 | 
				
			||||||
    TCGReg retaddr;
 | 
					    TCGReg retaddr;
 | 
				
			||||||
@ -1387,19 +1387,18 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
            ofs += 4;
 | 
					            ofs += 4;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
 | 
					        tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, oi);
 | 
				
			||||||
        ofs += 4;
 | 
					        ofs += 4;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        retaddr = TCG_REG_EAX;
 | 
					        retaddr = TCG_REG_EAX;
 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_I32, retaddr, (uintptr_t)l->raddr);
 | 
					        tcg_out_movi(s, TCG_TYPE_PTR, retaddr, (uintptr_t)l->raddr);
 | 
				
			||||||
        tcg_out_st(s, TCG_TYPE_I32, retaddr, TCG_REG_ESP, ofs);
 | 
					        tcg_out_st(s, TCG_TYPE_PTR, retaddr, TCG_REG_ESP, ofs);
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
 | 
					        tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
 | 
				
			||||||
        /* The second argument is already loaded with addrlo.  */
 | 
					        /* The second argument is already loaded with addrlo.  */
 | 
				
			||||||
        tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
 | 
					        tcg_out_mov(s, (s_bits == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32),
 | 
				
			||||||
                    tcg_target_call_iarg_regs[2], l->datalo_reg);
 | 
					                    tcg_target_call_iarg_regs[2], l->datalo_reg);
 | 
				
			||||||
        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
 | 
					        tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], oi);
 | 
				
			||||||
                     l->mem_index);
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
 | 
					        if (ARRAY_SIZE(tcg_target_call_iarg_regs) > 4) {
 | 
				
			||||||
            retaddr = tcg_target_call_iarg_regs[4];
 | 
					            retaddr = tcg_target_call_iarg_regs[4];
 | 
				
			||||||
@ -1531,6 +1530,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg datalo, datahi, addrlo;
 | 
					    TCGReg datalo, datahi, addrlo;
 | 
				
			||||||
    TCGReg addrhi __attribute__((unused));
 | 
					    TCGReg addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1542,10 +1542,11 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
 | 
					    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 | 
					    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    mem_index = *args++;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
 | 
					    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
 | 
				
			||||||
@ -1555,8 +1556,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
 | 
					    tcg_out_qemu_ld_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Record the current context of a load into ldst label */
 | 
					    /* Record the current context of a load into ldst label */
 | 
				
			||||||
    add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        int32_t offset = GUEST_BASE;
 | 
					        int32_t offset = GUEST_BASE;
 | 
				
			||||||
@ -1662,6 +1663,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg datalo, datahi, addrlo;
 | 
					    TCGReg datalo, datahi, addrlo;
 | 
				
			||||||
    TCGReg addrhi __attribute__((unused));
 | 
					    TCGReg addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1673,10 +1675,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
 | 
					    datahi = (TCG_TARGET_REG_BITS == 32 && is64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 | 
					    addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    mem_index = *args++;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
 | 
					    tcg_out_tlb_load(s, addrlo, addrhi, mem_index, s_bits,
 | 
				
			||||||
@ -1686,8 +1689,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
 | 
				
			|||||||
    tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
 | 
					    tcg_out_qemu_st_direct(s, datalo, datahi, TCG_REG_L1, 0, 0, opc);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Record the current context of a store into ldst label */
 | 
					    /* Record the current context of a store into ldst label */
 | 
				
			||||||
    add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    {
 | 
					    {
 | 
				
			||||||
        int32_t offset = GUEST_BASE;
 | 
					        int32_t offset = GUEST_BASE;
 | 
				
			||||||
 | 
				
			|||||||
@ -1634,14 +1634,16 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
 | 
				
			|||||||
        OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
 | 
					        OPC_LD1_M1, OPC_LD2_M1, OPC_LD4_M1, OPC_LD8_M1
 | 
				
			||||||
    };
 | 
					    };
 | 
				
			||||||
    int addr_reg, data_reg, mem_index;
 | 
					    int addr_reg, data_reg, mem_index;
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc, s_bits;
 | 
					    TCGMemOp opc, s_bits;
 | 
				
			||||||
    uint64_t fin1, fin2;
 | 
					    uint64_t fin1, fin2;
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    data_reg = args[0];
 | 
					    data_reg = args[0];
 | 
				
			||||||
    addr_reg = args[1];
 | 
					    addr_reg = args[1];
 | 
				
			||||||
    opc = args[2];
 | 
					    oi = args[2];
 | 
				
			||||||
    mem_index = args[3];
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Read the TLB entry */
 | 
					    /* Read the TLB entry */
 | 
				
			||||||
@ -1669,7 +1671,7 @@ static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args)
 | 
				
			|||||||
                   tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
 | 
					                   tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
 | 
				
			||||||
                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
 | 
					                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
 | 
				
			||||||
                               TCG_REG_R2, TCG_REG_R57),
 | 
					                               TCG_REG_R2, TCG_REG_R57),
 | 
				
			||||||
                   tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, mem_index));
 | 
					                   tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R58, oi));
 | 
				
			||||||
    label_ptr = s->code_ptr;
 | 
					    label_ptr = s->code_ptr;
 | 
				
			||||||
    tcg_out_bundle(s, miB,
 | 
					    tcg_out_bundle(s, miB,
 | 
				
			||||||
                   tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
 | 
					                   tcg_opc_m1 (TCG_REG_P6, opc_ld_m1[s_bits],
 | 
				
			||||||
@ -1696,13 +1698,15 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
 | 
				
			|||||||
    TCGReg addr_reg, data_reg;
 | 
					    TCGReg addr_reg, data_reg;
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
    uint64_t pre1, pre2;
 | 
					    uint64_t pre1, pre2;
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc, s_bits;
 | 
					    TCGMemOp opc, s_bits;
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    data_reg = args[0];
 | 
					    data_reg = args[0];
 | 
				
			||||||
    addr_reg = args[1];
 | 
					    addr_reg = args[1];
 | 
				
			||||||
    opc = args[2];
 | 
					    oi = args[2];
 | 
				
			||||||
    mem_index = args[3];
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Note that we always use LE helper functions, so the bswap insns
 | 
					    /* Note that we always use LE helper functions, so the bswap insns
 | 
				
			||||||
@ -1731,7 +1735,7 @@ static inline void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
 | 
				
			|||||||
                   tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
 | 
					                   tcg_opc_mov_a(TCG_REG_P7, TCG_REG_R56, TCG_AREG0),
 | 
				
			||||||
                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
 | 
					                   tcg_opc_a1 (TCG_REG_P6, OPC_ADD_A1, TCG_REG_R2,
 | 
				
			||||||
                               TCG_REG_R2, TCG_REG_R57),
 | 
					                               TCG_REG_R2, TCG_REG_R57),
 | 
				
			||||||
                   tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, mem_index));
 | 
					                   tcg_opc_movi_a(TCG_REG_P7, TCG_REG_R59, oi));
 | 
				
			||||||
    label_ptr = s->code_ptr;
 | 
					    label_ptr = s->code_ptr;
 | 
				
			||||||
    tcg_out_bundle(s, miB,
 | 
					    tcg_out_bundle(s, miB,
 | 
				
			||||||
                   tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
 | 
					                   tcg_opc_m4 (TCG_REG_P6, opc_st_m4[s_bits],
 | 
				
			||||||
 | 
				
			|||||||
@ -990,21 +990,19 @@ static void tcg_out_tlb_load(TCGContext *s, TCGReg base, TCGReg addrl,
 | 
				
			|||||||
    tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
 | 
					    tcg_out_opc_reg(s, OPC_ADDU, base, TCG_REG_A0, addrl);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGReg datalo, TCGReg datahi,
 | 
					                                TCGReg datalo, TCGReg datahi,
 | 
				
			||||||
                                TCGReg addrlo, TCGReg addrhi,
 | 
					                                TCGReg addrlo, TCGReg addrhi,
 | 
				
			||||||
                                int mem_index, void *raddr,
 | 
					                                void *raddr, tcg_insn_unit *label_ptr[2])
 | 
				
			||||||
                                tcg_insn_unit *label_ptr[2])
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->datalo_reg = datalo;
 | 
					    label->datalo_reg = datalo;
 | 
				
			||||||
    label->datahi_reg = datahi;
 | 
					    label->datahi_reg = datahi;
 | 
				
			||||||
    label->addrlo_reg = addrlo;
 | 
					    label->addrlo_reg = addrlo;
 | 
				
			||||||
    label->addrhi_reg = addrhi;
 | 
					    label->addrhi_reg = addrhi;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = label_ptr[0];
 | 
					    label->label_ptr[0] = label_ptr[0];
 | 
				
			||||||
    if (TARGET_LONG_BITS == 64) {
 | 
					    if (TARGET_LONG_BITS == 64) {
 | 
				
			||||||
@ -1014,7 +1012,8 @@ static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOp opc,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
					static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = l->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGReg v0;
 | 
					    TCGReg v0;
 | 
				
			||||||
    int i;
 | 
					    int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1030,7 +1029,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
 | 
					        i = tcg_out_call_iarg_reg(s, i, l->addrlo_reg);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    i = tcg_out_call_iarg_imm(s, i, l->mem_index);
 | 
					    i = tcg_out_call_iarg_imm(s, i, oi);
 | 
				
			||||||
    i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
 | 
					    i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr);
 | 
				
			||||||
    tcg_out_call_int(s, qemu_ld_helpers[opc], false);
 | 
					    tcg_out_call_int(s, qemu_ld_helpers[opc], false);
 | 
				
			||||||
    /* delay slot */
 | 
					    /* delay slot */
 | 
				
			||||||
@ -1056,7 +1055,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
					static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = l->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGMemOp s_bits = opc & MO_SIZE;
 | 
					    TCGMemOp s_bits = opc & MO_SIZE;
 | 
				
			||||||
    int i;
 | 
					    int i;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1088,7 +1088,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
 | 
				
			|||||||
    default:
 | 
					    default:
 | 
				
			||||||
        tcg_abort();
 | 
					        tcg_abort();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    i = tcg_out_call_iarg_imm(s, i, l->mem_index);
 | 
					    i = tcg_out_call_iarg_imm(s, i, oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Tail call to the store helper.  Thus force the return address
 | 
					    /* Tail call to the store helper.  Thus force the return address
 | 
				
			||||||
       computation to take place in the return address register.  */
 | 
					       computation to take place in the return address register.  */
 | 
				
			||||||
@ -1150,6 +1150,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addr_regl, addr_regh __attribute__((unused));
 | 
					    TCGReg addr_regl, addr_regh __attribute__((unused));
 | 
				
			||||||
    TCGReg data_regl, data_regh;
 | 
					    TCGReg data_regl, data_regh;
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    tcg_insn_unit *label_ptr[2];
 | 
					    tcg_insn_unit *label_ptr[2];
 | 
				
			||||||
@ -1164,17 +1165,18 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    data_regh = (is_64 ? *args++ : 0);
 | 
					    data_regh = (is_64 ? *args++ : 0);
 | 
				
			||||||
    addr_regl = *args++;
 | 
					    addr_regl = *args++;
 | 
				
			||||||
    addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
					    addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
 | 
					    tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
 | 
				
			||||||
                     s_bits, label_ptr, 1);
 | 
					                     s_bits, label_ptr, 1);
 | 
				
			||||||
    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
 | 
					    tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc);
 | 
				
			||||||
    add_qemu_ldst_label(s, 1, opc, data_regl, data_regh, addr_regl, addr_regh,
 | 
					    add_qemu_ldst_label(s, 1, oi, data_regl, data_regh, addr_regl, addr_regh,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    if (GUEST_BASE == 0 && data_regl != addr_regl) {
 | 
					    if (GUEST_BASE == 0 && data_regl != addr_regl) {
 | 
				
			||||||
        base = addr_regl;
 | 
					        base = addr_regl;
 | 
				
			||||||
@ -1279,6 +1281,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addr_regl, addr_regh __attribute__((unused));
 | 
					    TCGReg addr_regl, addr_regh __attribute__((unused));
 | 
				
			||||||
    TCGReg data_regl, data_regh, base;
 | 
					    TCGReg data_regl, data_regh, base;
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOp opc;
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    tcg_insn_unit *label_ptr[2];
 | 
					    tcg_insn_unit *label_ptr[2];
 | 
				
			||||||
@ -1290,10 +1293,11 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    data_regh = (is_64 ? *args++ : 0);
 | 
					    data_regh = (is_64 ? *args++ : 0);
 | 
				
			||||||
    addr_regl = *args++;
 | 
					    addr_regl = *args++;
 | 
				
			||||||
    addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
					    addr_regh = (TARGET_LONG_BITS == 64 ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(CONFIG_SOFTMMU)
 | 
					#if defined(CONFIG_SOFTMMU)
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    s_bits = opc & 3;
 | 
					    s_bits = opc & 3;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Note that we eliminated the helper's address argument,
 | 
					    /* Note that we eliminated the helper's address argument,
 | 
				
			||||||
@ -1302,8 +1306,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
 | 
					    tcg_out_tlb_load(s, base, addr_regl, addr_regh, mem_index,
 | 
				
			||||||
                     s_bits, label_ptr, 0);
 | 
					                     s_bits, label_ptr, 0);
 | 
				
			||||||
    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
 | 
					    tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
 | 
				
			||||||
    add_qemu_ldst_label(s, 0, opc, data_regl, data_regh, addr_regl, addr_regh,
 | 
					    add_qemu_ldst_label(s, 0, oi, data_regl, data_regh, addr_regl, addr_regh,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    if (GUEST_BASE == 0) {
 | 
					    if (GUEST_BASE == 0) {
 | 
				
			||||||
        base = addr_regl;
 | 
					        base = addr_regl;
 | 
				
			||||||
 | 
				
			|||||||
@ -918,7 +918,8 @@ static void tcg_constant_folding(TCGContext *s)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
        CASE_OP_32_64(qemu_ld):
 | 
					        CASE_OP_32_64(qemu_ld):
 | 
				
			||||||
            {
 | 
					            {
 | 
				
			||||||
                TCGMemOp mop = args[nb_oargs + nb_iargs];
 | 
					                TCGMemOpIdx oi = args[nb_oargs + nb_iargs];
 | 
				
			||||||
 | 
					                TCGMemOp mop = get_memop(oi);
 | 
				
			||||||
                if (!(mop & MO_SIGN)) {
 | 
					                if (!(mop & MO_SIGN)) {
 | 
				
			||||||
                    mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
 | 
					                    mask = (2ULL << ((8 << (mop & MO_SIZE)) - 1)) - 1;
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
 | 
				
			|||||||
@ -1451,28 +1451,27 @@ static TCGReg tcg_out_tlb_read(TCGContext *s, TCGMemOp s_bits,
 | 
				
			|||||||
/* Record the context of a call to the out of line helper code for the slow
 | 
					/* Record the context of a call to the out of line helper code for the slow
 | 
				
			||||||
   path for a load or store, so that we can later generate the correct
 | 
					   path for a load or store, so that we can later generate the correct
 | 
				
			||||||
   helper code.  */
 | 
					   helper code.  */
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGReg datalo_reg, TCGReg datahi_reg,
 | 
					                                TCGReg datalo_reg, TCGReg datahi_reg,
 | 
				
			||||||
                                TCGReg addrlo_reg, TCGReg addrhi_reg,
 | 
					                                TCGReg addrlo_reg, TCGReg addrhi_reg,
 | 
				
			||||||
                                int mem_index, tcg_insn_unit *raddr,
 | 
					                                tcg_insn_unit *raddr, tcg_insn_unit *lptr)
 | 
				
			||||||
                                tcg_insn_unit *lptr)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->datalo_reg = datalo_reg;
 | 
					    label->datalo_reg = datalo_reg;
 | 
				
			||||||
    label->datahi_reg = datahi_reg;
 | 
					    label->datahi_reg = datahi_reg;
 | 
				
			||||||
    label->addrlo_reg = addrlo_reg;
 | 
					    label->addrlo_reg = addrlo_reg;
 | 
				
			||||||
    label->addrhi_reg = addrhi_reg;
 | 
					    label->addrhi_reg = addrhi_reg;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = lptr;
 | 
					    label->label_ptr[0] = lptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGReg hi, lo, arg = TCG_REG_R3;
 | 
					    TCGReg hi, lo, arg = TCG_REG_R3;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    reloc_pc14(lb->label_ptr[0], s->code_ptr);
 | 
					    reloc_pc14(lb->label_ptr[0], s->code_ptr);
 | 
				
			||||||
@ -1493,7 +1492,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
        tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
 | 
					        tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
 | 
				
			||||||
    tcg_out32(s, MFSPR | RT(arg) | LR);
 | 
					    tcg_out32(s, MFSPR | RT(arg) | LR);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
 | 
					    tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]);
 | 
				
			||||||
@ -1515,7 +1514,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
					static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
    TCGMemOp s_bits = opc & MO_SIZE;
 | 
					    TCGMemOp s_bits = opc & MO_SIZE;
 | 
				
			||||||
    TCGReg hi, lo, arg = TCG_REG_R3;
 | 
					    TCGReg hi, lo, arg = TCG_REG_R3;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1562,7 +1562,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
        }
 | 
					        }
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, arg++, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
 | 
				
			||||||
    tcg_out32(s, MFSPR | RT(arg) | LR);
 | 
					    tcg_out32(s, MFSPR | RT(arg) | LR);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
					    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
				
			||||||
@ -1575,6 +1575,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg datalo, datahi, addrlo, rbase;
 | 
					    TCGReg datalo, datahi, addrlo, rbase;
 | 
				
			||||||
    TCGReg addrhi __attribute__((unused));
 | 
					    TCGReg addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc, s_bits;
 | 
					    TCGMemOp opc, s_bits;
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1585,11 +1586,12 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
 | 
					    datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
 | 
					    addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true);
 | 
					    addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, true);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Load a pointer into the current opcode w/conditional branch-link. */
 | 
					    /* Load a pointer into the current opcode w/conditional branch-link. */
 | 
				
			||||||
@ -1639,8 +1641,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    add_qemu_ldst_label(s, true, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1648,6 +1650,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg datalo, datahi, addrlo, rbase;
 | 
					    TCGReg datalo, datahi, addrlo, rbase;
 | 
				
			||||||
    TCGReg addrhi __attribute__((unused));
 | 
					    TCGReg addrhi __attribute__((unused));
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGMemOp opc, s_bits;
 | 
					    TCGMemOp opc, s_bits;
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    int mem_index;
 | 
					    int mem_index;
 | 
				
			||||||
@ -1658,11 +1661,12 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
 | 
					    datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
 | 
				
			||||||
    addrlo = *args++;
 | 
					    addrlo = *args++;
 | 
				
			||||||
    addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
 | 
					    addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
 | 
				
			||||||
    opc = *args++;
 | 
					    oi = *args++;
 | 
				
			||||||
 | 
					    opc = get_memop(oi);
 | 
				
			||||||
    s_bits = opc & MO_SIZE;
 | 
					    s_bits = opc & MO_SIZE;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    mem_index = *args;
 | 
					    mem_index = get_mmuidx(oi);
 | 
				
			||||||
    addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false);
 | 
					    addrlo = tcg_out_tlb_read(s, s_bits, addrlo, addrhi, mem_index, false);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Load a pointer into the current opcode w/conditional branch-link. */
 | 
					    /* Load a pointer into the current opcode w/conditional branch-link. */
 | 
				
			||||||
@ -1704,8 +1708,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
 | 
				
			|||||||
    }
 | 
					    }
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
    add_qemu_ldst_label(s, false, opc, datalo, datahi, addrlo, addrhi,
 | 
					    add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
 | 
				
			||||||
                        mem_index, s->code_ptr, label_ptr);
 | 
					                        s->code_ptr, label_ptr);
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -1544,17 +1544,16 @@ static TCGReg tcg_out_tlb_read(TCGContext* s, TCGReg addr_reg, TCGMemOp opc,
 | 
				
			|||||||
    return addr_reg;
 | 
					    return addr_reg;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc,
 | 
					static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOpIdx oi,
 | 
				
			||||||
                                TCGReg data, TCGReg addr, int mem_index,
 | 
					                                TCGReg data, TCGReg addr,
 | 
				
			||||||
                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
 | 
					                                tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
					    TCGLabelQemuLdst *label = new_ldst_label(s);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    label->is_ld = is_ld;
 | 
					    label->is_ld = is_ld;
 | 
				
			||||||
    label->opc = opc;
 | 
					    label->oi = oi;
 | 
				
			||||||
    label->datalo_reg = data;
 | 
					    label->datalo_reg = data;
 | 
				
			||||||
    label->addrlo_reg = addr;
 | 
					    label->addrlo_reg = addr;
 | 
				
			||||||
    label->mem_index = mem_index;
 | 
					 | 
				
			||||||
    label->raddr = raddr;
 | 
					    label->raddr = raddr;
 | 
				
			||||||
    label->label_ptr[0] = label_ptr;
 | 
					    label->label_ptr[0] = label_ptr;
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -1563,7 +1562,8 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addr_reg = lb->addrlo_reg;
 | 
					    TCGReg addr_reg = lb->addrlo_reg;
 | 
				
			||||||
    TCGReg data_reg = lb->datalo_reg;
 | 
					    TCGReg data_reg = lb->datalo_reg;
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
 | 
					    patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1571,7 +1571,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
    if (TARGET_LONG_BITS == 64) {
 | 
					    if (TARGET_LONG_BITS == 64) {
 | 
				
			||||||
        tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
 | 
					        tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, addr_reg);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi);
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
 | 
					    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr);
 | 
				
			||||||
    tcg_out_call(s, qemu_ld_helpers[opc]);
 | 
					    tcg_out_call(s, qemu_ld_helpers[opc]);
 | 
				
			||||||
    tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
 | 
					    tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2);
 | 
				
			||||||
@ -1583,7 +1583,8 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
{
 | 
					{
 | 
				
			||||||
    TCGReg addr_reg = lb->addrlo_reg;
 | 
					    TCGReg addr_reg = lb->addrlo_reg;
 | 
				
			||||||
    TCGReg data_reg = lb->datalo_reg;
 | 
					    TCGReg data_reg = lb->datalo_reg;
 | 
				
			||||||
    TCGMemOp opc = lb->opc;
 | 
					    TCGMemOpIdx oi = lb->oi;
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
 | 
					    patch_reloc(lb->label_ptr[0], R_390_PC16DBL, (intptr_t)s->code_ptr, -2);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1607,7 +1608,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
 | 
				
			|||||||
    default:
 | 
					    default:
 | 
				
			||||||
        tcg_abort();
 | 
					        tcg_abort();
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi);
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
 | 
					    tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr);
 | 
				
			||||||
    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
					    tcg_out_call(s, qemu_st_helpers[opc]);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1632,9 +1633,11 @@ static void tcg_prepare_user_ldst(TCGContext *s, TCGReg *addr_reg,
 | 
				
			|||||||
#endif /* CONFIG_SOFTMMU */
 | 
					#endif /* CONFIG_SOFTMMU */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
					static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			||||||
                            TCGMemOp opc, int mem_index)
 | 
					                            TCGMemOpIdx oi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned mem_index = get_mmuidx(oi);
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
    TCGReg base_reg;
 | 
					    TCGReg base_reg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1645,8 +1648,7 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
 | 
					    tcg_out_qemu_ld_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    add_qemu_ldst_label(s, 1, opc, data_reg, addr_reg, mem_index,
 | 
					    add_qemu_ldst_label(s, 1, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
 | 
				
			||||||
                        s->code_ptr, label_ptr);
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    TCGReg index_reg;
 | 
					    TCGReg index_reg;
 | 
				
			||||||
    tcg_target_long disp;
 | 
					    tcg_target_long disp;
 | 
				
			||||||
@ -1657,9 +1659,11 @@ static void tcg_out_qemu_ld(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
					static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			||||||
                            TCGMemOp opc, int mem_index)
 | 
					                            TCGMemOpIdx oi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp opc = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned mem_index = get_mmuidx(oi);
 | 
				
			||||||
    tcg_insn_unit *label_ptr;
 | 
					    tcg_insn_unit *label_ptr;
 | 
				
			||||||
    TCGReg base_reg;
 | 
					    TCGReg base_reg;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -1670,8 +1674,7 @@ static void tcg_out_qemu_st(TCGContext* s, TCGReg data_reg, TCGReg addr_reg,
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
 | 
					    tcg_out_qemu_st_direct(s, opc, data_reg, base_reg, TCG_REG_R2, 0);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    add_qemu_ldst_label(s, 0, opc, data_reg, addr_reg, mem_index,
 | 
					    add_qemu_ldst_label(s, 0, oi, data_reg, addr_reg, s->code_ptr, label_ptr);
 | 
				
			||||||
                        s->code_ptr, label_ptr);
 | 
					 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    TCGReg index_reg;
 | 
					    TCGReg index_reg;
 | 
				
			||||||
    tcg_target_long disp;
 | 
					    tcg_target_long disp;
 | 
				
			||||||
@ -1920,11 +1923,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
 | 
				
			|||||||
    case INDEX_op_qemu_ld_i32:
 | 
					    case INDEX_op_qemu_ld_i32:
 | 
				
			||||||
        /* ??? Technically we can use a non-extending instruction.  */
 | 
					        /* ??? Technically we can use a non-extending instruction.  */
 | 
				
			||||||
    case INDEX_op_qemu_ld_i64:
 | 
					    case INDEX_op_qemu_ld_i64:
 | 
				
			||||||
        tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3]);
 | 
					        tcg_out_qemu_ld(s, args[0], args[1], args[2]);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_st_i32:
 | 
					    case INDEX_op_qemu_st_i32:
 | 
				
			||||||
    case INDEX_op_qemu_st_i64:
 | 
					    case INDEX_op_qemu_st_i64:
 | 
				
			||||||
        tcg_out_qemu_st(s, args[0], args[1], args[2], args[3]);
 | 
					        tcg_out_qemu_st(s, args[0], args[1], args[2]);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case INDEX_op_ld16s_i64:
 | 
					    case INDEX_op_ld16s_i64:
 | 
				
			||||||
 | 
				
			|||||||
@ -915,7 +915,7 @@ static void build_trampolines(TCGContext *s)
 | 
				
			|||||||
            } else {
 | 
					            } else {
 | 
				
			||||||
                ra += 1;
 | 
					                ra += 1;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            /* Skip the mem_index argument.  */
 | 
					            /* Skip the oi argument.  */
 | 
				
			||||||
            ra += 1;
 | 
					            ra += 1;
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
                
 | 
					                
 | 
				
			||||||
@ -1070,9 +1070,11 @@ static const int qemu_st_opc[16] = {
 | 
				
			|||||||
};
 | 
					};
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
 | 
					static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
 | 
				
			||||||
                            TCGMemOp memop, int memi, bool is_64)
 | 
					                            TCGMemOpIdx oi, bool is_64)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp memop = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned memi = get_mmuidx(oi);
 | 
				
			||||||
    TCGMemOp s_bits = memop & MO_SIZE;
 | 
					    TCGMemOp s_bits = memop & MO_SIZE;
 | 
				
			||||||
    TCGReg addrz, param;
 | 
					    TCGReg addrz, param;
 | 
				
			||||||
    tcg_insn_unit *func;
 | 
					    tcg_insn_unit *func;
 | 
				
			||||||
@ -1111,7 +1113,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
 | 
				
			|||||||
    assert(func != NULL);
 | 
					    assert(func != NULL);
 | 
				
			||||||
    tcg_out_call_nodelay(s, func);
 | 
					    tcg_out_call_nodelay(s, func);
 | 
				
			||||||
    /* delay slot */
 | 
					    /* delay slot */
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_I32, param, memi);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, param, oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Recall that all of the helpers return 64-bit results.
 | 
					    /* Recall that all of the helpers return 64-bit results.
 | 
				
			||||||
       Which complicates things for sparcv8plus.  */
 | 
					       Which complicates things for sparcv8plus.  */
 | 
				
			||||||
@ -1150,9 +1152,11 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
 | 
					static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
 | 
				
			||||||
                            TCGMemOp memop, int memi)
 | 
					                            TCGMemOpIdx oi)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOp memop = get_memop(oi);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
 | 
					    unsigned memi = get_mmuidx(oi);
 | 
				
			||||||
    TCGMemOp s_bits = memop & MO_SIZE;
 | 
					    TCGMemOp s_bits = memop & MO_SIZE;
 | 
				
			||||||
    TCGReg addrz, param;
 | 
					    TCGReg addrz, param;
 | 
				
			||||||
    tcg_insn_unit *func;
 | 
					    tcg_insn_unit *func;
 | 
				
			||||||
@ -1188,7 +1192,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
 | 
				
			|||||||
    assert(func != NULL);
 | 
					    assert(func != NULL);
 | 
				
			||||||
    tcg_out_call_nodelay(s, func);
 | 
					    tcg_out_call_nodelay(s, func);
 | 
				
			||||||
    /* delay slot */
 | 
					    /* delay slot */
 | 
				
			||||||
    tcg_out_movi(s, TCG_TYPE_REG, param, memi);
 | 
					    tcg_out_movi(s, TCG_TYPE_I32, param, oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
 | 
					    *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
@ -1363,14 +1367,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
 | 
				
			|||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case INDEX_op_qemu_ld_i32:
 | 
					    case INDEX_op_qemu_ld_i32:
 | 
				
			||||||
        tcg_out_qemu_ld(s, a0, a1, a2, args[3], false);
 | 
					        tcg_out_qemu_ld(s, a0, a1, a2, false);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_ld_i64:
 | 
					    case INDEX_op_qemu_ld_i64:
 | 
				
			||||||
        tcg_out_qemu_ld(s, a0, a1, a2, args[3], true);
 | 
					        tcg_out_qemu_ld(s, a0, a1, a2, true);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_st_i32:
 | 
					    case INDEX_op_qemu_st_i32:
 | 
				
			||||||
    case INDEX_op_qemu_st_i64:
 | 
					    case INDEX_op_qemu_st_i64:
 | 
				
			||||||
        tcg_out_qemu_st(s, a0, a1, a2, args[3]);
 | 
					        tcg_out_qemu_st(s, a0, a1, a2);
 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    case INDEX_op_ld32s_i64:
 | 
					    case INDEX_op_ld32s_i64:
 | 
				
			||||||
 | 
				
			|||||||
@ -24,13 +24,12 @@
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
typedef struct TCGLabelQemuLdst {
 | 
					typedef struct TCGLabelQemuLdst {
 | 
				
			||||||
    bool is_ld;             /* qemu_ld: true, qemu_st: false */
 | 
					    bool is_ld;             /* qemu_ld: true, qemu_st: false */
 | 
				
			||||||
    TCGMemOp opc;
 | 
					    TCGMemOpIdx oi;
 | 
				
			||||||
    TCGType type;           /* result type of a load */
 | 
					    TCGType type;           /* result type of a load */
 | 
				
			||||||
    TCGReg addrlo_reg;      /* reg index for low word of guest virtual addr */
 | 
					    TCGReg addrlo_reg;      /* reg index for low word of guest virtual addr */
 | 
				
			||||||
    TCGReg addrhi_reg;      /* reg index for high word of guest virtual addr */
 | 
					    TCGReg addrhi_reg;      /* reg index for high word of guest virtual addr */
 | 
				
			||||||
    TCGReg datalo_reg;      /* reg index for low word to be loaded or stored */
 | 
					    TCGReg datalo_reg;      /* reg index for low word to be loaded or stored */
 | 
				
			||||||
    TCGReg datahi_reg;      /* reg index for high word to be loaded or stored */
 | 
					    TCGReg datahi_reg;      /* reg index for high word to be loaded or stored */
 | 
				
			||||||
    int mem_index;          /* soft MMU memory index */
 | 
					 | 
				
			||||||
    tcg_insn_unit *raddr;   /* gen code addr of the next IR of qemu_ld/st IR */
 | 
					    tcg_insn_unit *raddr;   /* gen code addr of the next IR of qemu_ld/st IR */
 | 
				
			||||||
    tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
 | 
					    tcg_insn_unit *label_ptr[2]; /* label pointers to be updated */
 | 
				
			||||||
    struct TCGLabelQemuLdst *next;
 | 
					    struct TCGLabelQemuLdst *next;
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										22
									
								
								tcg/tcg-op.c
									
									
									
									
									
								
							
							
						
						
									
										22
									
								
								tcg/tcg-op.c
									
									
									
									
									
								
							@ -1873,15 +1873,14 @@ static inline TCGMemOp tcg_canonicalize_memop(TCGMemOp op, bool is64, bool st)
 | 
				
			|||||||
static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
 | 
					static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
 | 
				
			||||||
                         TCGMemOp memop, TCGArg idx)
 | 
					                         TCGMemOp memop, TCGArg idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi = make_memop_idx(memop, idx);
 | 
				
			||||||
#if TARGET_LONG_BITS == 32
 | 
					#if TARGET_LONG_BITS == 32
 | 
				
			||||||
    tcg_gen_op4ii_i32(opc, val, addr, memop, idx);
 | 
					    tcg_gen_op3i_i32(opc, val, addr, oi);
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    if (TCG_TARGET_REG_BITS == 32) {
 | 
					    if (TCG_TARGET_REG_BITS == 32) {
 | 
				
			||||||
        tcg_gen_op5ii_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr),
 | 
					        tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi);
 | 
				
			||||||
                          memop, idx);
 | 
					 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr),
 | 
					        tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I32(val), GET_TCGV_I64(addr), oi);
 | 
				
			||||||
                    memop, idx);
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
@ -1889,20 +1888,19 @@ static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr,
 | 
				
			|||||||
static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
 | 
					static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr,
 | 
				
			||||||
                         TCGMemOp memop, TCGArg idx)
 | 
					                         TCGMemOp memop, TCGArg idx)
 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
 | 
					    TCGMemOpIdx oi = make_memop_idx(memop, idx);
 | 
				
			||||||
#if TARGET_LONG_BITS == 32
 | 
					#if TARGET_LONG_BITS == 32
 | 
				
			||||||
    if (TCG_TARGET_REG_BITS == 32) {
 | 
					    if (TCG_TARGET_REG_BITS == 32) {
 | 
				
			||||||
        tcg_gen_op5ii_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
 | 
					        tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi);
 | 
				
			||||||
                          addr, memop, idx);
 | 
					 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        tcg_gen_op4(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr),
 | 
					        tcg_gen_op3(&tcg_ctx, opc, GET_TCGV_I64(val), GET_TCGV_I32(addr), oi);
 | 
				
			||||||
                    memop, idx);
 | 
					 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
    if (TCG_TARGET_REG_BITS == 32) {
 | 
					    if (TCG_TARGET_REG_BITS == 32) {
 | 
				
			||||||
        tcg_gen_op6ii_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
 | 
					        tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val),
 | 
				
			||||||
                          TCGV_LOW(addr), TCGV_HIGH(addr), memop, idx);
 | 
					                         TCGV_LOW(addr), TCGV_HIGH(addr), oi);
 | 
				
			||||||
    } else {
 | 
					    } else {
 | 
				
			||||||
        tcg_gen_op4ii_i64(opc, val, addr, memop, idx);
 | 
					        tcg_gen_op3i_i64(opc, val, addr, oi);
 | 
				
			||||||
    }
 | 
					    }
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
				
			|||||||
@ -179,13 +179,13 @@ DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_END)
 | 
				
			|||||||
#define TLADDR_ARGS    (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2)
 | 
					#define TLADDR_ARGS    (TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? 1 : 2)
 | 
				
			||||||
#define DATA64_ARGS  (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
 | 
					#define DATA64_ARGS  (TCG_TARGET_REG_BITS == 64 ? 1 : 2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
DEF(qemu_ld_i32, 1, TLADDR_ARGS, 2,
 | 
					DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1,
 | 
				
			||||||
    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
 | 
					    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
 | 
				
			||||||
DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 2,
 | 
					DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1,
 | 
				
			||||||
    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
 | 
					    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS)
 | 
				
			||||||
DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 2,
 | 
					DEF(qemu_ld_i64, DATA64_ARGS, TLADDR_ARGS, 1,
 | 
				
			||||||
    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
 | 
					    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
 | 
				
			||||||
DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 2,
 | 
					DEF(qemu_st_i64, 0, TLADDR_ARGS + DATA64_ARGS, 1,
 | 
				
			||||||
    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
 | 
					    TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS | TCG_OPF_64BIT)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#undef TLADDR_ARGS
 | 
					#undef TLADDR_ARGS
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										12
									
								
								tcg/tcg.c
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								tcg/tcg.c
									
									
									
									
									
								
							@ -1071,12 +1071,18 @@ void tcg_dump_ops(TCGContext *s)
 | 
				
			|||||||
            case INDEX_op_qemu_st_i32:
 | 
					            case INDEX_op_qemu_st_i32:
 | 
				
			||||||
            case INDEX_op_qemu_ld_i64:
 | 
					            case INDEX_op_qemu_ld_i64:
 | 
				
			||||||
            case INDEX_op_qemu_st_i64:
 | 
					            case INDEX_op_qemu_st_i64:
 | 
				
			||||||
                if (args[k] < ARRAY_SIZE(ldst_name) && ldst_name[args[k]]) {
 | 
					                {
 | 
				
			||||||
                    qemu_log(",%s", ldst_name[args[k++]]);
 | 
					                    TCGMemOpIdx oi = args[k++];
 | 
				
			||||||
 | 
					                    TCGMemOp op = get_memop(oi);
 | 
				
			||||||
 | 
					                    unsigned ix = get_mmuidx(oi);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                    if (op < ARRAY_SIZE(ldst_name) && ldst_name[op]) {
 | 
				
			||||||
 | 
					                        qemu_log(",%s,%u", ldst_name[op], ix);
 | 
				
			||||||
                    } else {
 | 
					                    } else {
 | 
				
			||||||
                    qemu_log(",$0x%" TCG_PRIlx, args[k++]);
 | 
					                        qemu_log(",$0x%x,%u", op, ix);
 | 
				
			||||||
                    }
 | 
					                    }
 | 
				
			||||||
                    i = 1;
 | 
					                    i = 1;
 | 
				
			||||||
 | 
					                }
 | 
				
			||||||
                break;
 | 
					                break;
 | 
				
			||||||
            default:
 | 
					            default:
 | 
				
			||||||
                i = 0;
 | 
					                i = 0;
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										89
									
								
								tcg/tcg.h
									
									
									
									
									
								
							
							
						
						
									
										89
									
								
								tcg/tcg.h
									
									
									
									
									
								
							@ -241,6 +241,19 @@ typedef enum TCGMemOp {
 | 
				
			|||||||
    MO_TE    = MO_LE,
 | 
					    MO_TE    = MO_LE,
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    /* MO_UNALN accesses are never checked for alignment.
 | 
				
			||||||
 | 
					       MO_ALIGN accesses will result in a call to the CPU's
 | 
				
			||||||
 | 
					       do_unaligned_access hook if the guest address is not aligned.
 | 
				
			||||||
 | 
					       The default depends on whether the target CPU defines ALIGNED_ONLY.  */
 | 
				
			||||||
 | 
					    MO_AMASK = 16,
 | 
				
			||||||
 | 
					#ifdef ALIGNED_ONLY
 | 
				
			||||||
 | 
					    MO_ALIGN = 0,
 | 
				
			||||||
 | 
					    MO_UNALN = MO_AMASK,
 | 
				
			||||||
 | 
					#else
 | 
				
			||||||
 | 
					    MO_ALIGN = MO_AMASK,
 | 
				
			||||||
 | 
					    MO_UNALN = 0,
 | 
				
			||||||
 | 
					#endif
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    /* Combinations of the above, for ease of use.  */
 | 
					    /* Combinations of the above, for ease of use.  */
 | 
				
			||||||
    MO_UB    = MO_8,
 | 
					    MO_UB    = MO_8,
 | 
				
			||||||
    MO_UW    = MO_16,
 | 
					    MO_UW    = MO_16,
 | 
				
			||||||
@ -826,6 +839,44 @@ static inline size_t tcg_current_code_size(TCGContext *s)
 | 
				
			|||||||
    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
 | 
					    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
 | 
				
			||||||
 | 
					typedef uint32_t TCGMemOpIdx;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * make_memop_idx
 | 
				
			||||||
 | 
					 * @op: memory operation
 | 
				
			||||||
 | 
					 * @idx: mmu index
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Encode these values into a single parameter.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    tcg_debug_assert(idx <= 15);
 | 
				
			||||||
 | 
					    return (op << 4) | idx;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * get_memop
 | 
				
			||||||
 | 
					 * @oi: combined op/idx parameter
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Extract the memory operation from the combined value.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline TCGMemOp get_memop(TCGMemOpIdx oi)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    return oi >> 4;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					/**
 | 
				
			||||||
 | 
					 * get_mmuidx
 | 
				
			||||||
 | 
					 * @oi: combined op/idx parameter
 | 
				
			||||||
 | 
					 *
 | 
				
			||||||
 | 
					 * Extract the mmu index from the combined value.
 | 
				
			||||||
 | 
					 */
 | 
				
			||||||
 | 
					static inline unsigned get_mmuidx(TCGMemOpIdx oi)
 | 
				
			||||||
 | 
					{
 | 
				
			||||||
 | 
					    return oi & 15;
 | 
				
			||||||
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/**
 | 
					/**
 | 
				
			||||||
 * tcg_qemu_tb_exec:
 | 
					 * tcg_qemu_tb_exec:
 | 
				
			||||||
 * @env: CPUArchState * for the CPU
 | 
					 * @env: CPUArchState * for the CPU
 | 
				
			||||||
@ -889,46 +940,46 @@ void tcg_register_jit(void *buf, size_t buf_size);
 | 
				
			|||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
/* Value zero-extended to tcg register size.  */
 | 
					/* Value zero-extended to tcg register size.  */
 | 
				
			||||||
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                     int mmu_idx, uintptr_t retaddr);
 | 
					                                     TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
 | 
					uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                           int mmu_idx, uintptr_t retaddr);
 | 
					                           TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
 | 
					uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                           int mmu_idx, uintptr_t retaddr);
 | 
					                           TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Value sign-extended to tcg register size.  */
 | 
					/* Value sign-extended to tcg register size.  */
 | 
				
			||||||
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                     int mmu_idx, uintptr_t retaddr);
 | 
					                                     TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
 | 
					tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
 | 
				
			||||||
                                    int mmu_idx, uintptr_t retaddr);
 | 
					                                    TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
 | 
					void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
 | 
				
			||||||
                        int mmu_idx, uintptr_t retaddr);
 | 
					                        TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
 | 
					void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
 | 
					void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
 | 
					void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
 | 
					void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
 | 
					void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
 | 
					void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
 | 
				
			||||||
                       int mmu_idx, uintptr_t retaddr);
 | 
					                       TCGMemOpIdx oi, uintptr_t retaddr);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/* Temporary aliases until backends are converted.  */
 | 
					/* Temporary aliases until backends are converted.  */
 | 
				
			||||||
#ifdef TARGET_WORDS_BIGENDIAN
 | 
					#ifdef TARGET_WORDS_BIGENDIAN
 | 
				
			||||||
 | 
				
			|||||||
@ -763,9 +763,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
 | 
				
			|||||||
            tcg_out_r(s, *args++);
 | 
					            tcg_out_r(s, *args++);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        tcg_out_i(s, *args++);
 | 
					        tcg_out_i(s, *args++);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					 | 
				
			||||||
        tcg_out_i(s, *args);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_ld_i64:
 | 
					    case INDEX_op_qemu_ld_i64:
 | 
				
			||||||
        tcg_out_r(s, *args++);
 | 
					        tcg_out_r(s, *args++);
 | 
				
			||||||
@ -777,9 +774,6 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
 | 
				
			|||||||
            tcg_out_r(s, *args++);
 | 
					            tcg_out_r(s, *args++);
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        tcg_out_i(s, *args++);
 | 
					        tcg_out_i(s, *args++);
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					 | 
				
			||||||
        tcg_out_i(s, *args);
 | 
					 | 
				
			||||||
#endif
 | 
					 | 
				
			||||||
        break;
 | 
					        break;
 | 
				
			||||||
    case INDEX_op_qemu_st_i32:
 | 
					    case INDEX_op_qemu_st_i32:
 | 
				
			||||||
        tcg_out_r(s, *args++);
 | 
					        tcg_out_r(s, *args++);
 | 
				
			||||||
 | 
				
			|||||||
							
								
								
									
										47
									
								
								tci.c
									
									
									
									
									
								
							
							
						
						
									
										47
									
								
								tci.c
									
									
									
									
									
								
							@ -420,35 +420,34 @@ static bool tci_compare64(uint64_t u0, uint64_t u1, TCGCond condition)
 | 
				
			|||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#ifdef CONFIG_SOFTMMU
 | 
					#ifdef CONFIG_SOFTMMU
 | 
				
			||||||
# define mmuidx          tci_read_i(&tb_ptr)
 | 
					 | 
				
			||||||
# define qemu_ld_ub \
 | 
					# define qemu_ld_ub \
 | 
				
			||||||
    helper_ret_ldub_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_ret_ldub_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_leuw \
 | 
					# define qemu_ld_leuw \
 | 
				
			||||||
    helper_le_lduw_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_leul \
 | 
					# define qemu_ld_leul \
 | 
				
			||||||
    helper_le_ldul_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_leq \
 | 
					# define qemu_ld_leq \
 | 
				
			||||||
    helper_le_ldq_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_beuw \
 | 
					# define qemu_ld_beuw \
 | 
				
			||||||
    helper_be_lduw_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_lduw_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_beul \
 | 
					# define qemu_ld_beul \
 | 
				
			||||||
    helper_be_ldul_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_ldul_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_ld_beq \
 | 
					# define qemu_ld_beq \
 | 
				
			||||||
    helper_be_ldq_mmu(env, taddr, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_ldq_mmu(env, taddr, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_b(X) \
 | 
					# define qemu_st_b(X) \
 | 
				
			||||||
    helper_ret_stb_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_ret_stb_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_lew(X) \
 | 
					# define qemu_st_lew(X) \
 | 
				
			||||||
    helper_le_stw_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_lel(X) \
 | 
					# define qemu_st_lel(X) \
 | 
				
			||||||
    helper_le_stl_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_leq(X) \
 | 
					# define qemu_st_leq(X) \
 | 
				
			||||||
    helper_le_stq_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_le_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_bew(X) \
 | 
					# define qemu_st_bew(X) \
 | 
				
			||||||
    helper_be_stw_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_stw_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_bel(X) \
 | 
					# define qemu_st_bel(X) \
 | 
				
			||||||
    helper_be_stl_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_stl_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
# define qemu_st_beq(X) \
 | 
					# define qemu_st_beq(X) \
 | 
				
			||||||
    helper_be_stq_mmu(env, taddr, X, mmuidx, (uintptr_t)tb_ptr)
 | 
					    helper_be_stq_mmu(env, taddr, X, oi, (uintptr_t)tb_ptr)
 | 
				
			||||||
#else
 | 
					#else
 | 
				
			||||||
# define qemu_ld_ub      ldub_p(g2h(taddr))
 | 
					# define qemu_ld_ub      ldub_p(g2h(taddr))
 | 
				
			||||||
# define qemu_ld_leuw    lduw_le_p(g2h(taddr))
 | 
					# define qemu_ld_leuw    lduw_le_p(g2h(taddr))
 | 
				
			||||||
@ -496,7 +495,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
 | 
				
			|||||||
#if TCG_TARGET_REG_BITS == 32
 | 
					#if TCG_TARGET_REG_BITS == 32
 | 
				
			||||||
        uint64_t v64;
 | 
					        uint64_t v64;
 | 
				
			||||||
#endif
 | 
					#endif
 | 
				
			||||||
        TCGMemOp memop;
 | 
					        TCGMemOpIdx oi;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
#if defined(GETPC)
 | 
					#if defined(GETPC)
 | 
				
			||||||
        tci_tb_ptr = (uintptr_t)tb_ptr;
 | 
					        tci_tb_ptr = (uintptr_t)tb_ptr;
 | 
				
			||||||
@ -1107,8 +1106,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
 | 
				
			|||||||
        case INDEX_op_qemu_ld_i32:
 | 
					        case INDEX_op_qemu_ld_i32:
 | 
				
			||||||
            t0 = *tb_ptr++;
 | 
					            t0 = *tb_ptr++;
 | 
				
			||||||
            taddr = tci_read_ulong(&tb_ptr);
 | 
					            taddr = tci_read_ulong(&tb_ptr);
 | 
				
			||||||
            memop = tci_read_i(&tb_ptr);
 | 
					            oi = tci_read_i(&tb_ptr);
 | 
				
			||||||
            switch (memop) {
 | 
					            switch (get_memop(oi)) {
 | 
				
			||||||
            case MO_UB:
 | 
					            case MO_UB:
 | 
				
			||||||
                tmp32 = qemu_ld_ub;
 | 
					                tmp32 = qemu_ld_ub;
 | 
				
			||||||
                break;
 | 
					                break;
 | 
				
			||||||
@ -1144,8 +1143,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
 | 
				
			|||||||
                t1 = *tb_ptr++;
 | 
					                t1 = *tb_ptr++;
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            taddr = tci_read_ulong(&tb_ptr);
 | 
					            taddr = tci_read_ulong(&tb_ptr);
 | 
				
			||||||
            memop = tci_read_i(&tb_ptr);
 | 
					            oi = tci_read_i(&tb_ptr);
 | 
				
			||||||
            switch (memop) {
 | 
					            switch (get_memop(oi)) {
 | 
				
			||||||
            case MO_UB:
 | 
					            case MO_UB:
 | 
				
			||||||
                tmp64 = qemu_ld_ub;
 | 
					                tmp64 = qemu_ld_ub;
 | 
				
			||||||
                break;
 | 
					                break;
 | 
				
			||||||
@ -1193,8 +1192,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
 | 
				
			|||||||
        case INDEX_op_qemu_st_i32:
 | 
					        case INDEX_op_qemu_st_i32:
 | 
				
			||||||
            t0 = tci_read_r(&tb_ptr);
 | 
					            t0 = tci_read_r(&tb_ptr);
 | 
				
			||||||
            taddr = tci_read_ulong(&tb_ptr);
 | 
					            taddr = tci_read_ulong(&tb_ptr);
 | 
				
			||||||
            memop = tci_read_i(&tb_ptr);
 | 
					            oi = tci_read_i(&tb_ptr);
 | 
				
			||||||
            switch (memop) {
 | 
					            switch (get_memop(oi)) {
 | 
				
			||||||
            case MO_UB:
 | 
					            case MO_UB:
 | 
				
			||||||
                qemu_st_b(t0);
 | 
					                qemu_st_b(t0);
 | 
				
			||||||
                break;
 | 
					                break;
 | 
				
			||||||
@ -1217,8 +1216,8 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr)
 | 
				
			|||||||
        case INDEX_op_qemu_st_i64:
 | 
					        case INDEX_op_qemu_st_i64:
 | 
				
			||||||
            tmp64 = tci_read_r64(&tb_ptr);
 | 
					            tmp64 = tci_read_r64(&tb_ptr);
 | 
				
			||||||
            taddr = tci_read_ulong(&tb_ptr);
 | 
					            taddr = tci_read_ulong(&tb_ptr);
 | 
				
			||||||
            memop = tci_read_i(&tb_ptr);
 | 
					            oi = tci_read_i(&tb_ptr);
 | 
				
			||||||
            switch (memop) {
 | 
					            switch (get_memop(oi)) {
 | 
				
			||||||
            case MO_UB:
 | 
					            case MO_UB:
 | 
				
			||||||
                qemu_st_b(tmp64);
 | 
					                qemu_st_b(tmp64);
 | 
				
			||||||
                break;
 | 
					                break;
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user