
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
455 lines
14 KiB
C
455 lines
14 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* LoongArch emulation of Linux signals
|
|
*
|
|
* Copyright (c) 2021 Loongson Technology Corporation Limited
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu.h"
|
|
#include "user-internals.h"
|
|
#include "signal-common.h"
|
|
#include "linux-user/trace.h"
|
|
#include "target/loongarch/internals.h"
|
|
#include "target/loongarch/vec.h"
|
|
#include "vdso-asmoffset.h"
|
|
|
|
/* FP context was used */
|
|
#define SC_USED_FP (1 << 0)
|
|
|
|
struct target_sigcontext {
|
|
abi_ulong sc_pc;
|
|
abi_ulong sc_regs[32];
|
|
abi_uint sc_flags;
|
|
abi_ulong sc_extcontext[0] QEMU_ALIGNED(16);
|
|
};
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext);
|
|
QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc)
|
|
!= offsetof_sigcontext_pc);
|
|
QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs)
|
|
!= offsetof_sigcontext_gr);
|
|
|
|
#define FPU_CTX_MAGIC 0x46505501
|
|
#define FPU_CTX_ALIGN 8
|
|
struct target_fpu_context {
|
|
abi_ulong regs[32];
|
|
abi_ulong fcc;
|
|
abi_uint fcsr;
|
|
} QEMU_ALIGNED(FPU_CTX_ALIGN);
|
|
|
|
QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs)
|
|
!= offsetof_fpucontext_fr);
|
|
|
|
#define LSX_CTX_MAGIC 0x53580001
|
|
#define LSX_CTX_ALIGN 16
|
|
struct target_lsx_context {
|
|
abi_ulong regs[2 * 32];
|
|
abi_ulong fcc;
|
|
abi_uint fcsr;
|
|
} QEMU_ALIGNED(LSX_CTX_ALIGN);
|
|
|
|
#define LASX_CTX_MAGIC 0x41535801
|
|
#define LASX_CTX_ALIGN 32
|
|
struct target_lasx_context {
|
|
abi_ulong regs[4 * 32];
|
|
abi_ulong fcc;
|
|
abi_uint fcsr;
|
|
} QEMU_ALIGNED(LASX_CTX_ALIGN);
|
|
|
|
#define CONTEXT_INFO_ALIGN 16
|
|
struct target_sctx_info {
|
|
abi_uint magic;
|
|
abi_uint size;
|
|
abi_ulong padding;
|
|
} QEMU_ALIGNED(CONTEXT_INFO_ALIGN);
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info);
|
|
|
|
struct target_ucontext {
|
|
abi_ulong tuc_flags;
|
|
abi_ptr tuc_link;
|
|
target_stack_t tuc_stack;
|
|
target_sigset_t tuc_sigmask;
|
|
uint8_t __unused[1024 / 8 - sizeof(target_sigset_t)];
|
|
struct target_sigcontext tuc_mcontext;
|
|
};
|
|
|
|
struct target_rt_sigframe {
|
|
struct target_siginfo rs_info;
|
|
struct target_ucontext rs_uc;
|
|
};
|
|
|
|
QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
|
|
!= sizeof_rt_sigframe);
|
|
QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext)
|
|
!= offsetof_sigcontext);
|
|
|
|
/*
|
|
* These two structures are not present in guest memory, are private
|
|
* to the signal implementation, but are largely copied from the
|
|
* kernel's signal implementation.
|
|
*/
|
|
struct ctx_layout {
|
|
void *haddr;
|
|
abi_ptr gaddr;
|
|
unsigned int size;
|
|
};
|
|
|
|
struct extctx_layout {
|
|
unsigned long size;
|
|
unsigned int flags;
|
|
struct ctx_layout fpu;
|
|
struct ctx_layout lsx;
|
|
struct ctx_layout lasx;
|
|
struct ctx_layout end;
|
|
};
|
|
|
|
static abi_ptr extframe_alloc(struct extctx_layout *extctx,
|
|
struct ctx_layout *sctx, unsigned size,
|
|
unsigned align, abi_ptr orig_sp)
|
|
{
|
|
abi_ptr sp = orig_sp;
|
|
|
|
sp -= sizeof(struct target_sctx_info) + size;
|
|
align = MAX(align, CONTEXT_INFO_ALIGN);
|
|
sp = ROUND_DOWN(sp, align);
|
|
sctx->gaddr = sp;
|
|
|
|
size = orig_sp - sp;
|
|
sctx->size = size;
|
|
extctx->size += size;
|
|
|
|
return sp;
|
|
}
|
|
|
|
static abi_ptr setup_extcontext(CPULoongArchState *env,
|
|
struct extctx_layout *extctx, abi_ptr sp)
|
|
{
|
|
memset(extctx, 0, sizeof(struct extctx_layout));
|
|
|
|
/* Grow down, alloc "end" context info first. */
|
|
sp = extframe_alloc(extctx, &extctx->end, 0, CONTEXT_INFO_ALIGN, sp);
|
|
|
|
/* For qemu, there is no lazy fp context switch, so fp always present. */
|
|
extctx->flags = SC_USED_FP;
|
|
|
|
if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
|
|
sp = extframe_alloc(extctx, &extctx->lasx,
|
|
sizeof(struct target_lasx_context), LASX_CTX_ALIGN, sp);
|
|
} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
|
|
sp = extframe_alloc(extctx, &extctx->lsx,
|
|
sizeof(struct target_lsx_context), LSX_CTX_ALIGN, sp);
|
|
} else {
|
|
sp = extframe_alloc(extctx, &extctx->fpu,
|
|
sizeof(struct target_fpu_context), FPU_CTX_ALIGN, sp);
|
|
}
|
|
|
|
return sp;
|
|
}
|
|
|
|
static void setup_sigframe(CPULoongArchState *env,
|
|
struct target_sigcontext *sc,
|
|
struct extctx_layout *extctx)
|
|
{
|
|
struct target_sctx_info *info;
|
|
int i;
|
|
|
|
__put_user(extctx->flags, &sc->sc_flags);
|
|
__put_user(env->pc, &sc->sc_pc);
|
|
__put_user(0, &sc->sc_regs[0]);
|
|
for (i = 1; i < 32; ++i) {
|
|
__put_user(env->gpr[i], &sc->sc_regs[i]);
|
|
}
|
|
|
|
/*
|
|
* Set extension context
|
|
*/
|
|
|
|
if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
|
|
struct target_lasx_context *lasx_ctx;
|
|
info = extctx->lasx.haddr;
|
|
|
|
__put_user(LASX_CTX_MAGIC, &info->magic);
|
|
__put_user(extctx->lasx.size, &info->size);
|
|
|
|
lasx_ctx = (struct target_lasx_context *)(info + 1);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__put_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
|
|
__put_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
|
|
__put_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
|
|
__put_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
|
|
}
|
|
__put_user(read_fcc(env), &lasx_ctx->fcc);
|
|
__put_user(env->fcsr0, &lasx_ctx->fcsr);
|
|
} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
|
|
struct target_lsx_context *lsx_ctx;
|
|
info = extctx->lsx.haddr;
|
|
|
|
__put_user(LSX_CTX_MAGIC, &info->magic);
|
|
__put_user(extctx->lsx.size, &info->size);
|
|
|
|
lsx_ctx = (struct target_lsx_context *)(info + 1);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__put_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
|
|
__put_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
|
|
}
|
|
__put_user(read_fcc(env), &lsx_ctx->fcc);
|
|
__put_user(env->fcsr0, &lsx_ctx->fcsr);
|
|
} else {
|
|
struct target_fpu_context *fpu_ctx;
|
|
info = extctx->fpu.haddr;
|
|
|
|
__put_user(FPU_CTX_MAGIC, &info->magic);
|
|
__put_user(extctx->fpu.size, &info->size);
|
|
|
|
fpu_ctx = (struct target_fpu_context *)(info + 1);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__put_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
|
|
}
|
|
__put_user(read_fcc(env), &fpu_ctx->fcc);
|
|
__put_user(env->fcsr0, &fpu_ctx->fcsr);
|
|
}
|
|
|
|
/*
|
|
* Set end context
|
|
*/
|
|
info = extctx->end.haddr;
|
|
__put_user(0, &info->magic);
|
|
__put_user(0, &info->size);
|
|
}
|
|
|
|
static bool parse_extcontext(struct extctx_layout *extctx, abi_ptr frame)
|
|
{
|
|
memset(extctx, 0, sizeof(*extctx));
|
|
|
|
while (1) {
|
|
abi_uint magic, size;
|
|
|
|
if (get_user_u32(magic, frame) || get_user_u32(size, frame + 4)) {
|
|
return false;
|
|
}
|
|
|
|
switch (magic) {
|
|
case 0: /* END */
|
|
extctx->end.gaddr = frame;
|
|
extctx->end.size = size;
|
|
extctx->size += size;
|
|
return true;
|
|
|
|
case FPU_CTX_MAGIC:
|
|
if (size < (sizeof(struct target_sctx_info) +
|
|
sizeof(struct target_fpu_context))) {
|
|
return false;
|
|
}
|
|
extctx->fpu.gaddr = frame;
|
|
extctx->fpu.size = size;
|
|
extctx->size += size;
|
|
break;
|
|
case LSX_CTX_MAGIC:
|
|
if (size < (sizeof(struct target_sctx_info) +
|
|
sizeof(struct target_lsx_context))) {
|
|
return false;
|
|
}
|
|
extctx->lsx.gaddr = frame;
|
|
extctx->lsx.size = size;
|
|
extctx->size += size;
|
|
break;
|
|
case LASX_CTX_MAGIC:
|
|
if (size < (sizeof(struct target_sctx_info) +
|
|
sizeof(struct target_lasx_context))) {
|
|
return false;
|
|
}
|
|
extctx->lasx.gaddr = frame;
|
|
extctx->lasx.size = size;
|
|
extctx->size += size;
|
|
break;
|
|
default:
|
|
return false;
|
|
}
|
|
|
|
frame += size;
|
|
}
|
|
}
|
|
|
|
static void restore_sigframe(CPULoongArchState *env,
|
|
struct target_sigcontext *sc,
|
|
struct extctx_layout *extctx)
|
|
{
|
|
int i;
|
|
abi_ulong fcc;
|
|
|
|
__get_user(env->pc, &sc->sc_pc);
|
|
for (i = 1; i < 32; ++i) {
|
|
__get_user(env->gpr[i], &sc->sc_regs[i]);
|
|
}
|
|
|
|
if (extctx->lasx.haddr) {
|
|
struct target_lasx_context *lasx_ctx =
|
|
extctx->lasx.haddr + sizeof(struct target_sctx_info);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__get_user(env->fpr[i].vreg.UD(0), &lasx_ctx->regs[4 * i]);
|
|
__get_user(env->fpr[i].vreg.UD(1), &lasx_ctx->regs[4 * i + 1]);
|
|
__get_user(env->fpr[i].vreg.UD(2), &lasx_ctx->regs[4 * i + 2]);
|
|
__get_user(env->fpr[i].vreg.UD(3), &lasx_ctx->regs[4 * i + 3]);
|
|
}
|
|
__get_user(fcc, &lasx_ctx->fcc);
|
|
write_fcc(env, fcc);
|
|
__get_user(env->fcsr0, &lasx_ctx->fcsr);
|
|
restore_fp_status(env);
|
|
} else if (extctx->lsx.haddr) {
|
|
struct target_lsx_context *lsx_ctx =
|
|
extctx->lsx.haddr + sizeof(struct target_sctx_info);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__get_user(env->fpr[i].vreg.UD(0), &lsx_ctx->regs[2 * i]);
|
|
__get_user(env->fpr[i].vreg.UD(1), &lsx_ctx->regs[2 * i + 1]);
|
|
}
|
|
__get_user(fcc, &lsx_ctx->fcc);
|
|
write_fcc(env, fcc);
|
|
__get_user(env->fcsr0, &lsx_ctx->fcsr);
|
|
restore_fp_status(env);
|
|
} else if (extctx->fpu.haddr) {
|
|
struct target_fpu_context *fpu_ctx =
|
|
extctx->fpu.haddr + sizeof(struct target_sctx_info);
|
|
|
|
for (i = 0; i < 32; ++i) {
|
|
__get_user(env->fpr[i].vreg.UD(0), &fpu_ctx->regs[i]);
|
|
}
|
|
__get_user(fcc, &fpu_ctx->fcc);
|
|
write_fcc(env, fcc);
|
|
__get_user(env->fcsr0, &fpu_ctx->fcsr);
|
|
restore_fp_status(env);
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Determine which stack to use.
|
|
*/
|
|
static abi_ptr get_sigframe(struct target_sigaction *ka,
|
|
CPULoongArchState *env,
|
|
struct extctx_layout *extctx)
|
|
{
|
|
abi_ulong sp;
|
|
|
|
sp = target_sigsp(get_sp_from_cpustate(env), ka);
|
|
sp = ROUND_DOWN(sp, 16);
|
|
sp = setup_extcontext(env, extctx, sp);
|
|
sp -= sizeof(struct target_rt_sigframe);
|
|
|
|
assert(QEMU_IS_ALIGNED(sp, 16));
|
|
|
|
return sp;
|
|
}
|
|
|
|
void setup_rt_frame(int sig, struct target_sigaction *ka,
|
|
target_siginfo_t *info,
|
|
target_sigset_t *set, CPULoongArchState *env)
|
|
{
|
|
struct target_rt_sigframe *frame;
|
|
struct extctx_layout extctx;
|
|
abi_ptr frame_addr;
|
|
int i;
|
|
|
|
frame_addr = get_sigframe(ka, env, &extctx);
|
|
trace_user_setup_rt_frame(env, frame_addr);
|
|
|
|
frame = lock_user(VERIFY_WRITE, frame_addr,
|
|
sizeof(*frame) + extctx.size, 0);
|
|
if (!frame) {
|
|
force_sigsegv(sig);
|
|
return;
|
|
}
|
|
|
|
if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE)) {
|
|
extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
|
|
extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
|
|
} else if (FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE)) {
|
|
extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
|
|
extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
|
|
} else {
|
|
extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
|
|
extctx.end.haddr = (void *)frame + (extctx.end.gaddr - frame_addr);
|
|
}
|
|
|
|
frame->rs_info = *info;
|
|
|
|
__put_user(0, &frame->rs_uc.tuc_flags);
|
|
__put_user(0, &frame->rs_uc.tuc_link);
|
|
target_save_altstack(&frame->rs_uc.tuc_stack, env);
|
|
|
|
setup_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
|
|
|
|
for (i = 0; i < TARGET_NSIG_WORDS; i++) {
|
|
__put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]);
|
|
}
|
|
|
|
env->gpr[4] = sig;
|
|
env->gpr[5] = frame_addr + offsetof(struct target_rt_sigframe, rs_info);
|
|
env->gpr[6] = frame_addr + offsetof(struct target_rt_sigframe, rs_uc);
|
|
env->gpr[3] = frame_addr;
|
|
env->gpr[1] = default_rt_sigreturn;
|
|
|
|
env->pc = ka->_sa_handler;
|
|
unlock_user(frame, frame_addr, sizeof(*frame) + extctx.size);
|
|
}
|
|
|
|
long do_rt_sigreturn(CPULoongArchState *env)
|
|
{
|
|
struct target_rt_sigframe *frame;
|
|
struct extctx_layout extctx;
|
|
abi_ulong frame_addr;
|
|
sigset_t blocked;
|
|
|
|
frame_addr = env->gpr[3];
|
|
trace_user_do_rt_sigreturn(env, frame_addr);
|
|
|
|
if (!parse_extcontext(&extctx, frame_addr + sizeof(*frame))) {
|
|
goto badframe;
|
|
}
|
|
|
|
frame = lock_user(VERIFY_READ, frame_addr,
|
|
sizeof(*frame) + extctx.size, 1);
|
|
if (!frame) {
|
|
goto badframe;
|
|
}
|
|
|
|
if (extctx.lasx.gaddr) {
|
|
extctx.lasx.haddr = (void *)frame + (extctx.lasx.gaddr - frame_addr);
|
|
} else if (extctx.lsx.gaddr) {
|
|
extctx.lsx.haddr = (void *)frame + (extctx.lsx.gaddr - frame_addr);
|
|
} else if (extctx.fpu.gaddr) {
|
|
extctx.fpu.haddr = (void *)frame + (extctx.fpu.gaddr - frame_addr);
|
|
}
|
|
|
|
target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask);
|
|
set_sigmask(&blocked);
|
|
|
|
restore_sigframe(env, &frame->rs_uc.tuc_mcontext, &extctx);
|
|
|
|
target_restore_altstack(&frame->rs_uc.tuc_stack, env);
|
|
|
|
unlock_user(frame, frame_addr, 0);
|
|
return -QEMU_ESIGRETURN;
|
|
|
|
badframe:
|
|
force_sig(TARGET_SIGSEGV);
|
|
return -QEMU_ESIGRETURN;
|
|
}
|
|
|
|
void setup_sigtramp(abi_ulong sigtramp_page)
|
|
{
|
|
uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 8, 0);
|
|
assert(tramp != NULL);
|
|
|
|
__put_user(0x03822c0b, tramp + 0); /* ori a7, zero, 0x8b */
|
|
__put_user(0x002b0000, tramp + 1); /* syscall 0 */
|
|
|
|
default_rt_sigreturn = sigtramp_page;
|
|
unlock_user(tramp, sigtramp_page, 8);
|
|
}
|