
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
297 lines
9.0 KiB
C
297 lines
9.0 KiB
C
/*
|
|
* MicroBlaze helper routines.
|
|
*
|
|
* Copyright (c) 2009 Edgar E. Iglesias <edgar.iglesias@gmail.com>
|
|
* Copyright (c) 2009-2012 PetaLogix Qld Pty Ltd.
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "exec/exec-all.h"
|
|
#include "qemu/host-utils.h"
|
|
#include "exec/log.h"
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
static bool mb_cpu_access_is_secure(MicroBlazeCPU *cpu,
|
|
MMUAccessType access_type)
|
|
{
|
|
if (access_type == MMU_INST_FETCH) {
|
|
return !cpu->ns_axi_ip;
|
|
} else {
|
|
return !cpu->ns_axi_dp;
|
|
}
|
|
}
|
|
|
|
bool mb_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
MMUAccessType access_type, int mmu_idx,
|
|
bool probe, uintptr_t retaddr)
|
|
{
|
|
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
|
CPUMBState *env = &cpu->env;
|
|
MicroBlazeMMULookup lu;
|
|
unsigned int hit;
|
|
int prot;
|
|
MemTxAttrs attrs = {};
|
|
|
|
attrs.secure = mb_cpu_access_is_secure(cpu, access_type);
|
|
|
|
if (mmu_idx == MMU_NOMMU_IDX) {
|
|
/* MMU disabled or not available. */
|
|
address &= TARGET_PAGE_MASK;
|
|
prot = PAGE_BITS;
|
|
tlb_set_page_with_attrs(cs, address, address, attrs, prot, mmu_idx,
|
|
TARGET_PAGE_SIZE);
|
|
return true;
|
|
}
|
|
|
|
hit = mmu_translate(cpu, &lu, address, access_type, mmu_idx);
|
|
if (likely(hit)) {
|
|
uint32_t vaddr = address & TARGET_PAGE_MASK;
|
|
uint32_t paddr = lu.paddr + vaddr - lu.vaddr;
|
|
|
|
qemu_log_mask(CPU_LOG_MMU, "MMU map mmu=%d v=%x p=%x prot=%x\n",
|
|
mmu_idx, vaddr, paddr, lu.prot);
|
|
tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, lu.prot, mmu_idx,
|
|
TARGET_PAGE_SIZE);
|
|
return true;
|
|
}
|
|
|
|
/* TLB miss. */
|
|
if (probe) {
|
|
return false;
|
|
}
|
|
|
|
qemu_log_mask(CPU_LOG_MMU, "mmu=%d miss v=%" VADDR_PRIx "\n",
|
|
mmu_idx, address);
|
|
|
|
env->ear = address;
|
|
switch (lu.err) {
|
|
case ERR_PROT:
|
|
env->esr = access_type == MMU_INST_FETCH ? 17 : 16;
|
|
env->esr |= (access_type == MMU_DATA_STORE) << 10;
|
|
break;
|
|
case ERR_MISS:
|
|
env->esr = access_type == MMU_INST_FETCH ? 19 : 18;
|
|
env->esr |= (access_type == MMU_DATA_STORE) << 10;
|
|
break;
|
|
default:
|
|
abort();
|
|
}
|
|
|
|
if (cs->exception_index == EXCP_MMU) {
|
|
cpu_abort(cs, "recursive faults\n");
|
|
}
|
|
|
|
/* TLB miss. */
|
|
cs->exception_index = EXCP_MMU;
|
|
cpu_loop_exit_restore(cs, retaddr);
|
|
}
|
|
|
|
void mb_cpu_do_interrupt(CPUState *cs)
|
|
{
|
|
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
|
CPUMBState *env = &cpu->env;
|
|
uint32_t t, msr = mb_cpu_read_msr(env);
|
|
bool set_esr;
|
|
|
|
/* IMM flag cannot propagate across a branch and into the dslot. */
|
|
assert((env->iflags & (D_FLAG | IMM_FLAG)) != (D_FLAG | IMM_FLAG));
|
|
/* BIMM flag cannot be set without D_FLAG. */
|
|
assert((env->iflags & (D_FLAG | BIMM_FLAG)) != BIMM_FLAG);
|
|
/* RTI flags are private to translate. */
|
|
assert(!(env->iflags & (DRTI_FLAG | DRTE_FLAG | DRTB_FLAG)));
|
|
|
|
switch (cs->exception_index) {
|
|
case EXCP_HW_EXCP:
|
|
if (!(cpu->cfg.pvr_regs[0] & PVR0_USE_EXC_MASK)) {
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
"Exception raised on system without exceptions!\n");
|
|
return;
|
|
}
|
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
"INT: HWE at pc=%08x msr=%08x iflags=%x\n",
|
|
env->pc, msr, env->iflags);
|
|
|
|
/* Exception breaks branch + dslot sequence? */
|
|
set_esr = true;
|
|
env->esr &= ~D_FLAG;
|
|
if (env->iflags & D_FLAG) {
|
|
env->esr |= D_FLAG;
|
|
env->btr = env->btarget;
|
|
}
|
|
|
|
/* Exception in progress. */
|
|
msr |= MSR_EIP;
|
|
env->regs[17] = env->pc + 4;
|
|
env->pc = cpu->cfg.base_vectors + 0x20;
|
|
break;
|
|
|
|
case EXCP_MMU:
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
"INT: MMU at pc=%08x msr=%08x "
|
|
"ear=%" PRIx64 " iflags=%x\n",
|
|
env->pc, msr, env->ear, env->iflags);
|
|
|
|
/* Exception breaks branch + dslot sequence? */
|
|
set_esr = true;
|
|
env->esr &= ~D_FLAG;
|
|
if (env->iflags & D_FLAG) {
|
|
env->esr |= D_FLAG;
|
|
env->btr = env->btarget;
|
|
/* Reexecute the branch. */
|
|
env->regs[17] = env->pc - (env->iflags & BIMM_FLAG ? 8 : 4);
|
|
} else if (env->iflags & IMM_FLAG) {
|
|
/* Reexecute the imm. */
|
|
env->regs[17] = env->pc - 4;
|
|
} else {
|
|
env->regs[17] = env->pc;
|
|
}
|
|
|
|
/* Exception in progress. */
|
|
msr |= MSR_EIP;
|
|
env->pc = cpu->cfg.base_vectors + 0x20;
|
|
break;
|
|
|
|
case EXCP_IRQ:
|
|
assert(!(msr & (MSR_EIP | MSR_BIP)));
|
|
assert(msr & MSR_IE);
|
|
assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
|
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
"INT: DEV at pc=%08x msr=%08x iflags=%x\n",
|
|
env->pc, msr, env->iflags);
|
|
set_esr = false;
|
|
|
|
/* Disable interrupts. */
|
|
msr &= ~MSR_IE;
|
|
env->regs[14] = env->pc;
|
|
env->pc = cpu->cfg.base_vectors + 0x10;
|
|
break;
|
|
|
|
case EXCP_HW_BREAK:
|
|
assert(!(env->iflags & (D_FLAG | IMM_FLAG)));
|
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
"INT: BRK at pc=%08x msr=%08x iflags=%x\n",
|
|
env->pc, msr, env->iflags);
|
|
set_esr = false;
|
|
|
|
/* Break in progress. */
|
|
msr |= MSR_BIP;
|
|
env->regs[16] = env->pc;
|
|
env->pc = cpu->cfg.base_vectors + 0x18;
|
|
break;
|
|
|
|
default:
|
|
cpu_abort(cs, "unhandled exception type=%d\n", cs->exception_index);
|
|
/* not reached */
|
|
}
|
|
|
|
/* Save previous mode, disable mmu, disable user-mode. */
|
|
t = (msr & (MSR_VM | MSR_UM)) << 1;
|
|
msr &= ~(MSR_VMS | MSR_UMS | MSR_VM | MSR_UM);
|
|
msr |= t;
|
|
mb_cpu_write_msr(env, msr);
|
|
|
|
env->res_addr = RES_ADDR_NONE;
|
|
env->iflags = 0;
|
|
|
|
if (!set_esr) {
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
" to pc=%08x msr=%08x\n", env->pc, msr);
|
|
} else if (env->esr & D_FLAG) {
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
" to pc=%08x msr=%08x esr=%04x btr=%08x\n",
|
|
env->pc, msr, env->esr, env->btr);
|
|
} else {
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
" to pc=%08x msr=%08x esr=%04x\n",
|
|
env->pc, msr, env->esr);
|
|
}
|
|
}
|
|
|
|
hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|
MemTxAttrs *attrs)
|
|
{
|
|
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
|
target_ulong vaddr, paddr = 0;
|
|
MicroBlazeMMULookup lu;
|
|
int mmu_idx = cpu_mmu_index(cs, false);
|
|
unsigned int hit;
|
|
|
|
/* Caller doesn't initialize */
|
|
*attrs = (MemTxAttrs) {};
|
|
attrs->secure = mb_cpu_access_is_secure(cpu, MMU_DATA_LOAD);
|
|
|
|
if (mmu_idx != MMU_NOMMU_IDX) {
|
|
hit = mmu_translate(cpu, &lu, addr, 0, 0);
|
|
if (hit) {
|
|
vaddr = addr & TARGET_PAGE_MASK;
|
|
paddr = lu.paddr + vaddr - lu.vaddr;
|
|
} else
|
|
paddr = 0; /* ???. */
|
|
} else
|
|
paddr = addr & TARGET_PAGE_MASK;
|
|
|
|
return paddr;
|
|
}
|
|
|
|
bool mb_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
{
|
|
CPUMBState *env = cpu_env(cs);
|
|
|
|
if ((interrupt_request & CPU_INTERRUPT_HARD)
|
|
&& (env->msr & MSR_IE)
|
|
&& !(env->msr & (MSR_EIP | MSR_BIP))
|
|
&& !(env->iflags & (D_FLAG | IMM_FLAG))) {
|
|
cs->exception_index = EXCP_IRQ;
|
|
mb_cpu_do_interrupt(cs);
|
|
return true;
|
|
}
|
|
return false;
|
|
}
|
|
|
|
#endif /* !CONFIG_USER_ONLY */
|
|
|
|
void mb_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|
MMUAccessType access_type,
|
|
int mmu_idx, uintptr_t retaddr)
|
|
{
|
|
MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs);
|
|
uint32_t esr, iflags;
|
|
|
|
/* Recover the pc and iflags from the corresponding insn_start. */
|
|
cpu_restore_state(cs, retaddr);
|
|
iflags = cpu->env.iflags;
|
|
|
|
qemu_log_mask(CPU_LOG_INT,
|
|
"Unaligned access addr=" TARGET_FMT_lx " pc=%x iflags=%x\n",
|
|
(target_ulong)addr, cpu->env.pc, iflags);
|
|
|
|
esr = ESR_EC_UNALIGNED_DATA;
|
|
if (likely(iflags & ESR_ESS_FLAG)) {
|
|
esr |= iflags & ESR_ESS_MASK;
|
|
} else {
|
|
qemu_log_mask(LOG_UNIMP, "Unaligned access without ESR_ESS_FLAG\n");
|
|
}
|
|
|
|
cpu->env.ear = addr;
|
|
cpu->env.esr = esr;
|
|
cs->exception_index = EXCP_HW_EXCP;
|
|
cpu_loop_exit(cs);
|
|
}
|