
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
231 lines
7.2 KiB
C
231 lines
7.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* LoongArch CPU helpers for qemu
|
|
*
|
|
* Copyright (c) 2024 Loongson Technology Corporation Limited
|
|
*
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "internals.h"
|
|
#include "cpu-csr.h"
|
|
|
|
static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
int access_type, int index, int mmu_idx)
|
|
{
|
|
LoongArchTLB *tlb = &env->tlb[index];
|
|
uint64_t plv = mmu_idx;
|
|
uint64_t tlb_entry, tlb_ppn;
|
|
uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv;
|
|
|
|
if (index >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
n = (address >> tlb_ps) & 0x1;/* Odd or even */
|
|
|
|
tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0;
|
|
tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V);
|
|
tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D);
|
|
tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV);
|
|
if (is_la64(env)) {
|
|
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN);
|
|
tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX);
|
|
tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR);
|
|
tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV);
|
|
} else {
|
|
tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN);
|
|
tlb_nx = 0;
|
|
tlb_nr = 0;
|
|
tlb_rplv = 0;
|
|
}
|
|
|
|
/* Remove sw bit between bit12 -- bit PS*/
|
|
tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) -1));
|
|
|
|
/* Check access rights */
|
|
if (!tlb_v) {
|
|
return TLBRET_INVALID;
|
|
}
|
|
|
|
if (access_type == MMU_INST_FETCH && tlb_nx) {
|
|
return TLBRET_XI;
|
|
}
|
|
|
|
if (access_type == MMU_DATA_LOAD && tlb_nr) {
|
|
return TLBRET_RI;
|
|
}
|
|
|
|
if (((tlb_rplv == 0) && (plv > tlb_plv)) ||
|
|
((tlb_rplv == 1) && (plv != tlb_plv))) {
|
|
return TLBRET_PE;
|
|
}
|
|
|
|
if ((access_type == MMU_DATA_STORE) && !tlb_d) {
|
|
return TLBRET_DIRTY;
|
|
}
|
|
|
|
*physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) |
|
|
(address & MAKE_64BIT_MASK(0, tlb_ps));
|
|
*prot = PAGE_READ;
|
|
if (tlb_d) {
|
|
*prot |= PAGE_WRITE;
|
|
}
|
|
if (!tlb_nx) {
|
|
*prot |= PAGE_EXEC;
|
|
}
|
|
return TLBRET_MATCH;
|
|
}
|
|
|
|
/*
|
|
* One tlb entry holds an adjacent odd/even pair, the vpn is the
|
|
* content of the virtual page number divided by 2. So the
|
|
* compare vpn is bit[47:15] for 16KiB page. while the vppn
|
|
* field in tlb entry contains bit[47:13], so need adjust.
|
|
* virt_vpn = vaddr[47:13]
|
|
*/
|
|
bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr,
|
|
int *index)
|
|
{
|
|
LoongArchTLB *tlb;
|
|
uint16_t csr_asid, tlb_asid, stlb_idx;
|
|
uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps;
|
|
int i, compare_shift;
|
|
uint64_t vpn, tlb_vppn;
|
|
|
|
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
|
|
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1);
|
|
stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */
|
|
compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
|
|
/* Search STLB */
|
|
for (i = 0; i < 8; ++i) {
|
|
tlb = &env->tlb[i * 256 + stlb_idx];
|
|
tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
|
|
if (tlb_e) {
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
|
|
if ((tlb_g == 1 || tlb_asid == csr_asid) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
*index = i * 256 + stlb_idx;
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Search MTLB */
|
|
for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; ++i) {
|
|
tlb = &env->tlb[i];
|
|
tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
|
|
if (tlb_e) {
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
|
|
if ((tlb_g == 1 || tlb_asid == csr_asid) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
*index = i;
|
|
return true;
|
|
}
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
int index, match;
|
|
|
|
match = loongarch_tlb_search(env, address, &index);
|
|
if (match) {
|
|
return loongarch_map_tlb_entry(env, physical, prot,
|
|
address, access_type, index, mmu_idx);
|
|
}
|
|
|
|
return TLBRET_NOMATCH;
|
|
}
|
|
|
|
static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va,
|
|
target_ulong dmw)
|
|
{
|
|
if (is_la64(env)) {
|
|
return va & TARGET_VIRT_MASK;
|
|
} else {
|
|
uint32_t pseg = FIELD_EX32(dmw, CSR_DMW_32, PSEG);
|
|
return (va & MAKE_64BIT_MASK(0, R_CSR_DMW_32_VSEG_SHIFT)) | \
|
|
(pseg << R_CSR_DMW_32_VSEG_SHIFT);
|
|
}
|
|
}
|
|
|
|
int get_physical_address(CPULoongArchState *env, hwaddr *physical,
|
|
int *prot, target_ulong address,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
int user_mode = mmu_idx == MMU_USER_IDX;
|
|
int kernel_mode = mmu_idx == MMU_KERNEL_IDX;
|
|
uint32_t plv, base_c, base_v;
|
|
int64_t addr_high;
|
|
uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA);
|
|
uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG);
|
|
|
|
/* Check PG and DA */
|
|
if (da & !pg) {
|
|
*physical = address & TARGET_PHYS_MASK;
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
return TLBRET_MATCH;
|
|
}
|
|
|
|
plv = kernel_mode | (user_mode << R_CSR_DMW_PLV3_SHIFT);
|
|
if (is_la64(env)) {
|
|
base_v = address >> R_CSR_DMW_64_VSEG_SHIFT;
|
|
} else {
|
|
base_v = address >> R_CSR_DMW_32_VSEG_SHIFT;
|
|
}
|
|
/* Check direct map window */
|
|
for (int i = 0; i < 4; i++) {
|
|
if (is_la64(env)) {
|
|
base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_64, VSEG);
|
|
} else {
|
|
base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG);
|
|
}
|
|
if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) {
|
|
*physical = dmw_va2pa(env, address, env->CSR_DMW[i]);
|
|
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
return TLBRET_MATCH;
|
|
}
|
|
}
|
|
|
|
/* Check valid extension */
|
|
addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16);
|
|
if (!(addr_high == 0 || addr_high == -1)) {
|
|
return TLBRET_BADADDR;
|
|
}
|
|
|
|
/* Mapped address */
|
|
return loongarch_map_address(env, physical, prot, address,
|
|
access_type, mmu_idx);
|
|
}
|
|
|
|
hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|
{
|
|
CPULoongArchState *env = cpu_env(cs);
|
|
hwaddr phys_addr;
|
|
int prot;
|
|
|
|
if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD,
|
|
cpu_mmu_index(cs, false)) != 0) {
|
|
return -1;
|
|
}
|
|
return phys_addr;
|
|
}
|