
* Update to QEMU v9.0.0 --------- Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Fabiano Rosas <farosas@suse.de> Signed-off-by: Peter Xu <peterx@redhat.com> Signed-off-by: Thomas Huth <thuth@redhat.com> Signed-off-by: Cédric Le Goater <clg@redhat.com> Signed-off-by: Zheyu Ma <zheyuma97@gmail.com> Signed-off-by: Ido Plat <ido.plat@ibm.com> Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Signed-off-by: Marc-André Lureau <marcandre.lureau@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Signed-off-by: David Hildenbrand <david@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com> Signed-off-by: Fiona Ebner <f.ebner@proxmox.com> Signed-off-by: Gregory Price <gregory.price@memverge.com> Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org> Signed-off-by: Lorenz Brun <lorenz@brun.one> Signed-off-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Signed-off-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Signed-off-by: Inès Varhol <ines.varhol@telecom-paris.fr> Signed-off-by: BALATON Zoltan <balaton@eik.bme.hu> Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Sven Schnelle <svens@stackframe.org> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Signed-off-by: Christian Schoenebeck <qemu_oss@crudebyte.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Helge Deller <deller@gmx.de> Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Benjamin Gray <bgray@linux.ibm.com> Signed-off-by: Avihai Horon <avihaih@nvidia.com> Signed-off-by: Michael Tokarev <mjt@tls.msk.ru> Signed-off-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Signed-off-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Signed-off-by: Stefan Weil <sw@weilnetz.de> Signed-off-by: Zhao Liu <zhao1.liu@intel.com> Signed-off-by: Glenn Miles <milesg@linux.ibm.com> Signed-off-by: Oleg Sviridov <oleg.sviridov@red-soft.ru> Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Signed-off-by: Yajun Wu <yajunw@nvidia.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> Signed-off-by: Lei Wang <lei4.wang@intel.com> Signed-off-by: Wei Wang <wei.w.wang@intel.com> Signed-off-by: Martin Hundebøll <martin@geanix.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Signed-off-by: Wafer <wafer@jaguarmicro.com> Signed-off-by: Yuxue Liu <yuxue.liu@jaguarmicro.com> Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Signed-off-by: Nguyen Dinh Phi <phind.uet@gmail.com> Signed-off-by: Zack Buhman <zack@buhman.org> Signed-off-by: Keith Packard <keithp@keithp.com> Signed-off-by: Yuquan Wang wangyuquan1236@phytium.com.cn Signed-off-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Co-authored-by: Peter Maydell <peter.maydell@linaro.org> Co-authored-by: Fabiano Rosas <farosas@suse.de> Co-authored-by: Peter Xu <peterx@redhat.com> Co-authored-by: Thomas Huth <thuth@redhat.com> Co-authored-by: Cédric Le Goater <clg@redhat.com> Co-authored-by: Zheyu Ma <zheyuma97@gmail.com> Co-authored-by: Ido Plat <ido.plat@ibm.com> Co-authored-by: Ilya Leoshkevich <iii@linux.ibm.com> Co-authored-by: Markus Armbruster <armbru@redhat.com> Co-authored-by: Marc-André Lureau <marcandre.lureau@redhat.com> Co-authored-by: Paolo Bonzini <pbonzini@redhat.com> Co-authored-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru> Co-authored-by: David Hildenbrand <david@redhat.com> Co-authored-by: Kevin Wolf <kwolf@redhat.com> Co-authored-by: Stefan Reiter <s.reiter@proxmox.com> Co-authored-by: Fiona Ebner <f.ebner@proxmox.com> Co-authored-by: Gregory Price <gregory.price@memverge.com> Co-authored-by: Lorenz Brun <lorenz@brun.one> Co-authored-by: Yao Xingtao <yaoxt.fnst@fujitsu.com> Co-authored-by: Philippe Mathieu-Daudé <philmd@linaro.org> Co-authored-by: Arnaud Minier <arnaud.minier@telecom-paris.fr> Co-authored-by: BALATON Zoltan <balaton@eik.bme.hu> Co-authored-by: Igor Mammedov <imammedo@redhat.com> Co-authored-by: Akihiko Odaki <akihiko.odaki@daynix.com> Co-authored-by: Richard Henderson <richard.henderson@linaro.org> Co-authored-by: Sven Schnelle <svens@stackframe.org> Co-authored-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com> Co-authored-by: Helge Deller <deller@kernel.org> Co-authored-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Co-authored-by: Benjamin Gray <bgray@linux.ibm.com> Co-authored-by: Nicholas Piggin <npiggin@gmail.com> Co-authored-by: Avihai Horon <avihaih@nvidia.com> Co-authored-by: Michael Tokarev <mjt@tls.msk.ru> Co-authored-by: Joonas Kankaala <joonas.a.kankaala@gmail.com> Co-authored-by: Marcin Juszkiewicz <marcin.juszkiewicz@linaro.org> Co-authored-by: Stefan Weil <sw@weilnetz.de> Co-authored-by: Dayu Liu <liu.dayu@zte.com.cn> Co-authored-by: Zhao Liu <zhao1.liu@intel.com> Co-authored-by: Glenn Miles <milesg@linux.vnet.ibm.com> Co-authored-by: Artem Chernyshev <artem.chernyshev@red-soft.ru> Co-authored-by: Yajun Wu <yajunw@nvidia.com> Co-authored-by: Mark Cave-Ayland <mark.cave-ayland@ilande.co.uk> Co-authored-by: Pierre-Clément Tosi <ptosi@google.com> Co-authored-by: Wei Wang <wei.w.wang@intel.com> Co-authored-by: Martin Hundebøll <martin@geanix.com> Co-authored-by: Michael S. Tsirkin <mst@redhat.com> Co-authored-by: Manos Pitsidianakis <manos.pitsidianakis@linaro.org> Co-authored-by: Wafer <wafer@jaguarmicro.com> Co-authored-by: lyx634449800 <yuxue.liu@jaguarmicro.com> Co-authored-by: Gerd Hoffmann <kraxel@redhat.com> Co-authored-by: Nguyen Dinh Phi <phind.uet@gmail.com> Co-authored-by: Zack Buhman <zack@buhman.org> Co-authored-by: Keith Packard <keithp@keithp.com> Co-authored-by: Yuquan Wang <wangyuquan1236@phytium.com.cn> Co-authored-by: Matheus Tavares Bernardino <quic_mathbern@quicinc.com> Co-authored-by: Cindy Lu <lulu@redhat.com>
232 lines
5.6 KiB
C
232 lines
5.6 KiB
C
/*
|
|
* Misc Sparc helpers
|
|
*
|
|
* Copyright (c) 2003-2005 Fabrice Bellard
|
|
*
|
|
* This library is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* This library is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "cpu.h"
|
|
#include "exec/exec-all.h"
|
|
#include "qemu/timer.h"
|
|
#include "qemu/host-utils.h"
|
|
#include "exec/helper-proto.h"
|
|
|
|
void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
cs->exception_index = tt;
|
|
cpu_loop_exit_restore(cs, ra);
|
|
}
|
|
|
|
void helper_raise_exception(CPUSPARCState *env, int tt)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
cs->exception_index = tt;
|
|
cpu_loop_exit(cs);
|
|
}
|
|
|
|
void helper_debug(CPUSPARCState *env)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
cs->exception_index = EXCP_DEBUG;
|
|
cpu_loop_exit(cs);
|
|
}
|
|
|
|
#ifdef TARGET_SPARC64
|
|
void helper_tick_set_count(void *opaque, uint64_t count)
|
|
{
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
cpu_tick_set_count(opaque, count);
|
|
#endif
|
|
}
|
|
|
|
uint64_t helper_tick_get_count(CPUSPARCState *env, void *opaque, int mem_idx)
|
|
{
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
CPUTimer *timer = opaque;
|
|
|
|
if (timer->npt && mem_idx < MMU_KERNEL_IDX) {
|
|
cpu_raise_exception_ra(env, TT_PRIV_INSN, GETPC());
|
|
}
|
|
|
|
return cpu_tick_get_count(timer);
|
|
#else
|
|
/* In user-mode, QEMU_CLOCK_VIRTUAL doesn't exist.
|
|
Just pass through the host cpu clock ticks. */
|
|
return cpu_get_host_ticks();
|
|
#endif
|
|
}
|
|
|
|
void helper_tick_set_limit(void *opaque, uint64_t limit)
|
|
{
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
cpu_tick_set_limit(opaque, limit);
|
|
#endif
|
|
}
|
|
#endif
|
|
|
|
uint64_t helper_udiv(CPUSPARCState *env, target_ulong a, target_ulong b)
|
|
{
|
|
uint64_t a64 = (uint32_t)a | ((uint64_t)env->y << 32);
|
|
uint32_t b32 = b;
|
|
uint32_t r;
|
|
|
|
if (b32 == 0) {
|
|
cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
|
|
}
|
|
|
|
a64 /= b32;
|
|
r = a64;
|
|
if (unlikely(a64 > UINT32_MAX)) {
|
|
return -1; /* r = UINT32_MAX, v = 1 */
|
|
}
|
|
return r;
|
|
}
|
|
|
|
uint64_t helper_sdiv(CPUSPARCState *env, target_ulong a, target_ulong b)
|
|
{
|
|
int64_t a64 = (uint32_t)a | ((uint64_t)env->y << 32);
|
|
int32_t b32 = b;
|
|
int32_t r;
|
|
|
|
if (b32 == 0) {
|
|
cpu_raise_exception_ra(env, TT_DIV_ZERO, GETPC());
|
|
}
|
|
|
|
if (unlikely(a64 == INT64_MIN)) {
|
|
/*
|
|
* Special case INT64_MIN / -1 is required to avoid trap on x86 host.
|
|
* However, with a dividend of INT64_MIN, there is no 32-bit divisor
|
|
* which can yield a 32-bit result:
|
|
* INT64_MIN / INT32_MIN = 0x1_0000_0000
|
|
* INT64_MIN / INT32_MAX = -0x1_0000_0002
|
|
* Therefore we know we must overflow and saturate.
|
|
*/
|
|
return (uint32_t)(b32 < 0 ? INT32_MAX : INT32_MIN) | (-1ull << 32);
|
|
}
|
|
|
|
a64 /= b;
|
|
r = a64;
|
|
if (unlikely(r != a64)) {
|
|
return (uint32_t)(a64 < 0 ? INT32_MIN : INT32_MAX) | (-1ull << 32);
|
|
}
|
|
return (uint32_t)r;
|
|
}
|
|
|
|
target_ulong helper_taddcctv(CPUSPARCState *env, target_ulong src1,
|
|
target_ulong src2)
|
|
{
|
|
target_ulong dst, v;
|
|
|
|
/* Tag overflow occurs if either input has bits 0 or 1 set. */
|
|
if ((src1 | src2) & 3) {
|
|
goto tag_overflow;
|
|
}
|
|
|
|
dst = src1 + src2;
|
|
|
|
/* Tag overflow occurs if the addition overflows. */
|
|
v = ~(src1 ^ src2) & (src1 ^ dst);
|
|
if (v & (1u << 31)) {
|
|
goto tag_overflow;
|
|
}
|
|
|
|
/* Only modify the CC after any exceptions have been generated. */
|
|
env->cc_V = v;
|
|
env->cc_N = dst;
|
|
env->icc_Z = dst;
|
|
#ifdef TARGET_SPARC64
|
|
env->xcc_Z = dst;
|
|
env->icc_C = dst ^ src1 ^ src2;
|
|
env->xcc_C = dst < src1;
|
|
#else
|
|
env->icc_C = dst < src1;
|
|
#endif
|
|
|
|
return dst;
|
|
|
|
tag_overflow:
|
|
cpu_raise_exception_ra(env, TT_TOVF, GETPC());
|
|
}
|
|
|
|
target_ulong helper_tsubcctv(CPUSPARCState *env, target_ulong src1,
|
|
target_ulong src2)
|
|
{
|
|
target_ulong dst, v;
|
|
|
|
/* Tag overflow occurs if either input has bits 0 or 1 set. */
|
|
if ((src1 | src2) & 3) {
|
|
goto tag_overflow;
|
|
}
|
|
|
|
dst = src1 - src2;
|
|
|
|
/* Tag overflow occurs if the subtraction overflows. */
|
|
v = (src1 ^ src2) & (src1 ^ dst);
|
|
if (v & (1u << 31)) {
|
|
goto tag_overflow;
|
|
}
|
|
|
|
/* Only modify the CC after any exceptions have been generated. */
|
|
env->cc_V = v;
|
|
env->cc_N = dst;
|
|
env->icc_Z = dst;
|
|
#ifdef TARGET_SPARC64
|
|
env->xcc_Z = dst;
|
|
env->icc_C = dst ^ src1 ^ src2;
|
|
env->xcc_C = src1 < src2;
|
|
#else
|
|
env->icc_C = src1 < src2;
|
|
#endif
|
|
|
|
return dst;
|
|
|
|
tag_overflow:
|
|
cpu_raise_exception_ra(env, TT_TOVF, GETPC());
|
|
}
|
|
|
|
#ifndef TARGET_SPARC64
|
|
void helper_power_down(CPUSPARCState *env)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
|
|
cs->halted = 1;
|
|
cs->exception_index = EXCP_HLT;
|
|
env->pc = env->npc;
|
|
env->npc = env->pc + 4;
|
|
cpu_loop_exit(cs);
|
|
}
|
|
|
|
target_ulong helper_rdasr17(CPUSPARCState *env)
|
|
{
|
|
CPUState *cs = env_cpu(env);
|
|
target_ulong val;
|
|
|
|
/*
|
|
* TODO: There are many more fields to be filled,
|
|
* some of which are writable.
|
|
*/
|
|
val = env->def.nwindows - 1; /* [4:0] NWIN */
|
|
val |= 1 << 8; /* [8] V8 */
|
|
val |= (cs->cpu_index) << 28; /* [31:28] INDEX */
|
|
|
|
return val;
|
|
}
|
|
#endif
|