
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
618 lines
20 KiB
C
618 lines
20 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* QEMU LoongArch TLB helpers
|
|
*
|
|
* Copyright (c) 2021 Loongson Technology Corporation Limited
|
|
*
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/guest-random.h"
|
|
|
|
#include "cpu.h"
|
|
#include "internals.h"
|
|
#include "exec/helper-proto.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/cpu_ldst.h"
|
|
#include "exec/log.h"
|
|
#include "cpu-csr.h"
|
|
|
|
static void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base,
|
|
uint64_t *dir_width, target_ulong level)
|
|
{
|
|
switch (level) {
|
|
case 1:
|
|
*dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_BASE);
|
|
*dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR1_WIDTH);
|
|
break;
|
|
case 2:
|
|
*dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_BASE);
|
|
*dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, DIR2_WIDTH);
|
|
break;
|
|
case 3:
|
|
*dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_BASE);
|
|
*dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR3_WIDTH);
|
|
break;
|
|
case 4:
|
|
*dir_base = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_BASE);
|
|
*dir_width = FIELD_EX64(env->CSR_PWCH, CSR_PWCH, DIR4_WIDTH);
|
|
break;
|
|
default:
|
|
/* level may be zero for ldpte */
|
|
*dir_base = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
|
|
*dir_width = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void raise_mmu_exception(CPULoongArchState *env, target_ulong address,
|
|
MMUAccessType access_type, int tlb_error)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
switch (tlb_error) {
|
|
default:
|
|
case TLBRET_BADADDR:
|
|
cs->exception_index = access_type == MMU_INST_FETCH
|
|
? EXCCODE_ADEF : EXCCODE_ADEM;
|
|
break;
|
|
case TLBRET_NOMATCH:
|
|
/* No TLB match for a mapped address */
|
|
if (access_type == MMU_DATA_LOAD) {
|
|
cs->exception_index = EXCCODE_PIL;
|
|
} else if (access_type == MMU_DATA_STORE) {
|
|
cs->exception_index = EXCCODE_PIS;
|
|
} else if (access_type == MMU_INST_FETCH) {
|
|
cs->exception_index = EXCCODE_PIF;
|
|
}
|
|
env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR, 1);
|
|
break;
|
|
case TLBRET_INVALID:
|
|
/* TLB match with no valid bit */
|
|
if (access_type == MMU_DATA_LOAD) {
|
|
cs->exception_index = EXCCODE_PIL;
|
|
} else if (access_type == MMU_DATA_STORE) {
|
|
cs->exception_index = EXCCODE_PIS;
|
|
} else if (access_type == MMU_INST_FETCH) {
|
|
cs->exception_index = EXCCODE_PIF;
|
|
}
|
|
break;
|
|
case TLBRET_DIRTY:
|
|
/* TLB match but 'D' bit is cleared */
|
|
cs->exception_index = EXCCODE_PME;
|
|
break;
|
|
case TLBRET_XI:
|
|
/* Execute-Inhibit Exception */
|
|
cs->exception_index = EXCCODE_PNX;
|
|
break;
|
|
case TLBRET_RI:
|
|
/* Read-Inhibit Exception */
|
|
cs->exception_index = EXCCODE_PNR;
|
|
break;
|
|
case TLBRET_PE:
|
|
/* Privileged Exception */
|
|
cs->exception_index = EXCCODE_PPI;
|
|
break;
|
|
}
|
|
|
|
if (tlb_error == TLBRET_NOMATCH) {
|
|
env->CSR_TLBRBADV = address;
|
|
if (is_la64(env)) {
|
|
env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_64,
|
|
VPPN, extract64(address, 13, 35));
|
|
} else {
|
|
env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI_32,
|
|
VPPN, extract64(address, 13, 19));
|
|
}
|
|
} else {
|
|
if (!FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) {
|
|
env->CSR_BADV = address;
|
|
}
|
|
env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
|
|
}
|
|
}
|
|
|
|
static void invalidate_tlb_entry(CPULoongArchState *env, int index)
|
|
{
|
|
target_ulong addr, mask, pagesize;
|
|
uint8_t tlb_ps;
|
|
LoongArchTLB *tlb = &env->tlb[index];
|
|
|
|
int mmu_idx = cpu_mmu_index(env_cpu(env), false);
|
|
uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V);
|
|
uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V);
|
|
uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
|
|
if (index >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
pagesize = MAKE_64BIT_MASK(tlb_ps, 1);
|
|
mask = MAKE_64BIT_MASK(0, tlb_ps + 1);
|
|
|
|
if (tlb_v0) {
|
|
addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */
|
|
tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
|
|
mmu_idx, TARGET_LONG_BITS);
|
|
}
|
|
|
|
if (tlb_v1) {
|
|
addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */
|
|
tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize,
|
|
mmu_idx, TARGET_LONG_BITS);
|
|
}
|
|
}
|
|
|
|
static void invalidate_tlb(CPULoongArchState *env, int index)
|
|
{
|
|
LoongArchTLB *tlb;
|
|
uint16_t csr_asid, tlb_asid, tlb_g;
|
|
|
|
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
|
|
tlb = &env->tlb[index];
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
if (tlb_g == 0 && tlb_asid != csr_asid) {
|
|
return;
|
|
}
|
|
invalidate_tlb_entry(env, index);
|
|
}
|
|
|
|
static void fill_tlb_entry(CPULoongArchState *env, int index)
|
|
{
|
|
LoongArchTLB *tlb = &env->tlb[index];
|
|
uint64_t lo0, lo1, csr_vppn;
|
|
uint16_t csr_asid;
|
|
uint8_t csr_ps;
|
|
|
|
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
|
|
csr_ps = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
|
|
if (is_la64(env)) {
|
|
csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_64, VPPN);
|
|
} else {
|
|
csr_vppn = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI_32, VPPN);
|
|
}
|
|
lo0 = env->CSR_TLBRELO0;
|
|
lo1 = env->CSR_TLBRELO1;
|
|
} else {
|
|
csr_ps = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
|
|
if (is_la64(env)) {
|
|
csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_64, VPPN);
|
|
} else {
|
|
csr_vppn = FIELD_EX64(env->CSR_TLBEHI, CSR_TLBEHI_32, VPPN);
|
|
}
|
|
lo0 = env->CSR_TLBELO0;
|
|
lo1 = env->CSR_TLBELO1;
|
|
}
|
|
|
|
if (csr_ps == 0) {
|
|
qemu_log_mask(CPU_LOG_MMU, "page size is 0\n");
|
|
}
|
|
|
|
/* Only MTLB has the ps fields */
|
|
if (index >= LOONGARCH_STLB) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps);
|
|
}
|
|
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn);
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1);
|
|
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, ASID, csr_asid);
|
|
|
|
tlb->tlb_entry0 = lo0;
|
|
tlb->tlb_entry1 = lo1;
|
|
}
|
|
|
|
/* Return an random value between low and high */
|
|
static uint32_t get_random_tlb(uint32_t low, uint32_t high)
|
|
{
|
|
uint32_t val;
|
|
|
|
qemu_guest_getrandom_nofail(&val, sizeof(val));
|
|
return val % (high - low + 1) + low;
|
|
}
|
|
|
|
void helper_tlbsrch(CPULoongArchState *env)
|
|
{
|
|
int index, match;
|
|
|
|
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
|
|
match = loongarch_tlb_search(env, env->CSR_TLBREHI, &index);
|
|
} else {
|
|
match = loongarch_tlb_search(env, env->CSR_TLBEHI, &index);
|
|
}
|
|
|
|
if (match) {
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX, index);
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
|
|
return;
|
|
}
|
|
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
|
|
}
|
|
|
|
void helper_tlbrd(CPULoongArchState *env)
|
|
{
|
|
LoongArchTLB *tlb;
|
|
int index;
|
|
uint8_t tlb_ps, tlb_e;
|
|
|
|
index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
|
|
tlb = &env->tlb[index];
|
|
|
|
if (index >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E);
|
|
|
|
if (!tlb_e) {
|
|
/* Invalid TLB entry */
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 1);
|
|
env->CSR_ASID = FIELD_DP64(env->CSR_ASID, CSR_ASID, ASID, 0);
|
|
env->CSR_TLBEHI = 0;
|
|
env->CSR_TLBELO0 = 0;
|
|
env->CSR_TLBELO1 = 0;
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, PS, 0);
|
|
} else {
|
|
/* Valid TLB entry */
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX, NE, 0);
|
|
env->CSR_TLBIDX = FIELD_DP64(env->CSR_TLBIDX, CSR_TLBIDX,
|
|
PS, (tlb_ps & 0x3f));
|
|
env->CSR_TLBEHI = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN) <<
|
|
R_TLB_MISC_VPPN_SHIFT;
|
|
env->CSR_TLBELO0 = tlb->tlb_entry0;
|
|
env->CSR_TLBELO1 = tlb->tlb_entry1;
|
|
}
|
|
}
|
|
|
|
void helper_tlbwr(CPULoongArchState *env)
|
|
{
|
|
int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
|
|
|
|
invalidate_tlb(env, index);
|
|
|
|
if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) {
|
|
env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc,
|
|
TLB_MISC, E, 0);
|
|
return;
|
|
}
|
|
|
|
fill_tlb_entry(env, index);
|
|
}
|
|
|
|
void helper_tlbfill(CPULoongArchState *env)
|
|
{
|
|
uint64_t address, entryhi;
|
|
int index, set, stlb_idx;
|
|
uint16_t pagesize, stlb_ps;
|
|
|
|
if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) {
|
|
entryhi = env->CSR_TLBREHI;
|
|
pagesize = FIELD_EX64(env->CSR_TLBREHI, CSR_TLBREHI, PS);
|
|
} else {
|
|
entryhi = env->CSR_TLBEHI;
|
|
pagesize = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, PS);
|
|
}
|
|
|
|
stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
|
|
if (pagesize == stlb_ps) {
|
|
/* Only write into STLB bits [47:13] */
|
|
address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT);
|
|
|
|
/* Choose one set ramdomly */
|
|
set = get_random_tlb(0, 7);
|
|
|
|
/* Index in one set */
|
|
stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */
|
|
|
|
index = set * 256 + stlb_idx;
|
|
} else {
|
|
/* Only write into MTLB */
|
|
index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1);
|
|
}
|
|
|
|
invalidate_tlb(env, index);
|
|
fill_tlb_entry(env, index);
|
|
}
|
|
|
|
void helper_tlbclr(CPULoongArchState *env)
|
|
{
|
|
LoongArchTLB *tlb;
|
|
int i, index;
|
|
uint16_t csr_asid, tlb_asid, tlb_g;
|
|
|
|
csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID);
|
|
index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
|
|
|
|
if (index < LOONGARCH_STLB) {
|
|
/* STLB. One line per operation */
|
|
for (i = 0; i < 8; i++) {
|
|
tlb = &env->tlb[i * 256 + (index % 256)];
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
if (!tlb_g && tlb_asid == csr_asid) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
} else if (index < LOONGARCH_TLB_MAX) {
|
|
/* All MTLB entries */
|
|
for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
|
|
tlb = &env->tlb[i];
|
|
tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
if (!tlb_g && tlb_asid == csr_asid) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
}
|
|
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_tlbflush(CPULoongArchState *env)
|
|
{
|
|
int i, index;
|
|
|
|
index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX);
|
|
|
|
if (index < LOONGARCH_STLB) {
|
|
/* STLB. One line per operation */
|
|
for (i = 0; i < 8; i++) {
|
|
int s_idx = i * 256 + (index % 256);
|
|
env->tlb[s_idx].tlb_misc = FIELD_DP64(env->tlb[s_idx].tlb_misc,
|
|
TLB_MISC, E, 0);
|
|
}
|
|
} else if (index < LOONGARCH_TLB_MAX) {
|
|
/* All MTLB entries */
|
|
for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) {
|
|
env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
|
|
TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_invtlb_all(CPULoongArchState *env)
|
|
{
|
|
for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
|
|
env->tlb[i].tlb_misc = FIELD_DP64(env->tlb[i].tlb_misc,
|
|
TLB_MISC, E, 0);
|
|
}
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_invtlb_all_g(CPULoongArchState *env, uint32_t g)
|
|
{
|
|
for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
|
|
LoongArchTLB *tlb = &env->tlb[i];
|
|
uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
|
|
if (tlb_g == g) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info)
|
|
{
|
|
uint16_t asid = info & R_CSR_ASID_ASID_MASK;
|
|
|
|
for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
|
|
LoongArchTLB *tlb = &env->tlb[i];
|
|
uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
|
|
if (!tlb_g && (tlb_asid == asid)) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info,
|
|
target_ulong addr)
|
|
{
|
|
uint16_t asid = info & 0x3ff;
|
|
|
|
for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
|
|
LoongArchTLB *tlb = &env->tlb[i];
|
|
uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
uint64_t vpn, tlb_vppn;
|
|
uint8_t tlb_ps, compare_shift;
|
|
|
|
if (i >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
|
|
compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
|
|
if (!tlb_g && (tlb_asid == asid) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
void helper_invtlb_page_asid_or_g(CPULoongArchState *env,
|
|
target_ulong info, target_ulong addr)
|
|
{
|
|
uint16_t asid = info & 0x3ff;
|
|
|
|
for (int i = 0; i < LOONGARCH_TLB_MAX; i++) {
|
|
LoongArchTLB *tlb = &env->tlb[i];
|
|
uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G);
|
|
uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID);
|
|
uint64_t vpn, tlb_vppn;
|
|
uint8_t tlb_ps, compare_shift;
|
|
|
|
if (i >= LOONGARCH_STLB) {
|
|
tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS);
|
|
} else {
|
|
tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS);
|
|
}
|
|
tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN);
|
|
vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1);
|
|
compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT;
|
|
|
|
if ((tlb_g || (tlb_asid == asid)) &&
|
|
(vpn == (tlb_vppn >> compare_shift))) {
|
|
tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0);
|
|
}
|
|
}
|
|
tlb_flush(env_cpu(env));
|
|
}
|
|
|
|
bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
MMUAccessType access_type, int mmu_idx,
|
|
bool probe, uintptr_t retaddr)
|
|
{
|
|
CPULoongArchState *env = cpu_env(cs);
|
|
hwaddr physical;
|
|
int prot;
|
|
int ret;
|
|
|
|
/* Data access */
|
|
ret = get_physical_address(env, &physical, &prot, address,
|
|
access_type, mmu_idx);
|
|
|
|
if (ret == TLBRET_MATCH) {
|
|
tlb_set_page(cs, address & TARGET_PAGE_MASK,
|
|
physical & TARGET_PAGE_MASK, prot,
|
|
mmu_idx, TARGET_PAGE_SIZE);
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
"%s address=%" VADDR_PRIx " physical " HWADDR_FMT_plx
|
|
" prot %d\n", __func__, address, physical, prot);
|
|
return true;
|
|
} else {
|
|
qemu_log_mask(CPU_LOG_MMU,
|
|
"%s address=%" VADDR_PRIx " ret %d\n", __func__, address,
|
|
ret);
|
|
}
|
|
if (probe) {
|
|
return false;
|
|
}
|
|
raise_mmu_exception(env, address, access_type, ret);
|
|
cpu_loop_exit_restore(cs, retaddr);
|
|
}
|
|
|
|
target_ulong helper_lddir(CPULoongArchState *env, target_ulong base,
|
|
target_ulong level, uint32_t mem_idx)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
target_ulong badvaddr, index, phys, ret;
|
|
int shift;
|
|
uint64_t dir_base, dir_width;
|
|
|
|
if (unlikely((level == 0) || (level > 4))) {
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
"Attepted LDDIR with level %"PRId64"\n", level);
|
|
return base;
|
|
}
|
|
|
|
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
|
|
if (unlikely(level == 4)) {
|
|
qemu_log_mask(LOG_GUEST_ERROR,
|
|
"Attempted use of level 4 huge page\n");
|
|
}
|
|
|
|
if (FIELD_EX64(base, TLBENTRY, LEVEL)) {
|
|
return base;
|
|
} else {
|
|
return FIELD_DP64(base, TLBENTRY, LEVEL, level);
|
|
}
|
|
}
|
|
|
|
badvaddr = env->CSR_TLBRBADV;
|
|
base = base & TARGET_PHYS_MASK;
|
|
|
|
/* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
|
|
shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
|
|
shift = (shift + 1) * 3;
|
|
|
|
get_dir_base_width(env, &dir_base, &dir_width, level);
|
|
index = (badvaddr >> dir_base) & ((1 << dir_width) - 1);
|
|
phys = base | index << shift;
|
|
ret = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
|
|
return ret;
|
|
}
|
|
|
|
void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd,
|
|
uint32_t mem_idx)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
target_ulong phys, tmp0, ptindex, ptoffset0, ptoffset1, ps, badv;
|
|
int shift;
|
|
uint64_t ptbase = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTBASE);
|
|
uint64_t ptwidth = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTWIDTH);
|
|
uint64_t dir_base, dir_width;
|
|
|
|
/*
|
|
* The parameter "base" has only two types,
|
|
* one is the page table base address,
|
|
* whose bit 6 should be 0,
|
|
* and the other is the huge page entry,
|
|
* whose bit 6 should be 1.
|
|
*/
|
|
base = base & TARGET_PHYS_MASK;
|
|
if (FIELD_EX64(base, TLBENTRY, HUGE)) {
|
|
/*
|
|
* Gets the huge page level and Gets huge page size.
|
|
* Clears the huge page level information in the entry.
|
|
* Clears huge page bit.
|
|
* Move HGLOBAL bit to GLOBAL bit.
|
|
*/
|
|
get_dir_base_width(env, &dir_base, &dir_width,
|
|
FIELD_EX64(base, TLBENTRY, LEVEL));
|
|
|
|
base = FIELD_DP64(base, TLBENTRY, LEVEL, 0);
|
|
base = FIELD_DP64(base, TLBENTRY, HUGE, 0);
|
|
if (FIELD_EX64(base, TLBENTRY, HGLOBAL)) {
|
|
base = FIELD_DP64(base, TLBENTRY, HGLOBAL, 0);
|
|
base = FIELD_DP64(base, TLBENTRY, G, 1);
|
|
}
|
|
|
|
ps = dir_base + dir_width - 1;
|
|
/*
|
|
* Huge pages are evenly split into parity pages
|
|
* when loaded into the tlb,
|
|
* so the tlb page size needs to be divided by 2.
|
|
*/
|
|
tmp0 = base;
|
|
if (odd) {
|
|
tmp0 += MAKE_64BIT_MASK(ps, 1);
|
|
}
|
|
} else {
|
|
/* 0:64bit, 1:128bit, 2:192bit, 3:256bit */
|
|
shift = FIELD_EX64(env->CSR_PWCL, CSR_PWCL, PTEWIDTH);
|
|
shift = (shift + 1) * 3;
|
|
badv = env->CSR_TLBRBADV;
|
|
|
|
ptindex = (badv >> ptbase) & ((1 << ptwidth) - 1);
|
|
ptindex = ptindex & ~0x1; /* clear bit 0 */
|
|
ptoffset0 = ptindex << shift;
|
|
ptoffset1 = (ptindex + 1) << shift;
|
|
|
|
phys = base | (odd ? ptoffset1 : ptoffset0);
|
|
tmp0 = ldq_phys(cs->as, phys) & TARGET_PHYS_MASK;
|
|
ps = ptbase;
|
|
}
|
|
|
|
if (odd) {
|
|
env->CSR_TLBRELO1 = tmp0;
|
|
} else {
|
|
env->CSR_TLBRELO0 = tmp0;
|
|
}
|
|
env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps);
|
|
}
|