diff --git a/src/core/jit/register_mapping.cpp b/src/core/jit/register_mapping.cpp index 7a5634cb7..946f47182 100644 --- a/src/core/jit/register_mapping.cpp +++ b/src/core/jit/register_mapping.cpp @@ -2,6 +2,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include +#include "arm64_codegen.h" #include "common/assert.h" #include "register_mapping.h" @@ -116,8 +117,152 @@ bool RegisterMapper::IsRegisterSpilled(X86_64Register x86_reg) const { return spilled_registers[index]; } -void RegisterMapper::SaveAllRegisters() {} +void RegisterMapper::SaveRegister(Arm64CodeGenerator& codegen, X86_64Register x86_reg, + RegisterContext* ctx) { + if (!ctx) { + return; + } -void RegisterMapper::RestoreAllRegisters() {} + int arm64_reg = MapX86_64ToArm64(x86_reg); + if (arm64_reg == INVALID_MAPPING) { + return; + } + + size_t index = static_cast(x86_reg); + if (IsXmmRegister(x86_reg)) { + int vreg = MapX86_64XmmToArm64Neon(x86_reg); + if (vreg != INVALID_MAPPING) { + codegen.movz(SCRATCH_REG, + reinterpret_cast(&ctx->xmm_regs[index - 16][0]) & 0xFFFF); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 48) & 0xFFFF, 48); + codegen.str_v(vreg, SCRATCH_REG, 0); + } + } else if (x86_reg == X86_64Register::FLAGS) { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->flags) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 48) & 0xFFFF, 48); + codegen.str(arm64_reg, SCRATCH_REG, 0); + } else if (x86_reg == X86_64Register::RSP || x86_reg == X86_64Register::RBP) { + if (arm64_reg == STACK_POINTER) { + codegen.mov(SCRATCH_REG, STACK_POINTER); + codegen.movz(SCRATCH_REG2, reinterpret_cast(&ctx->rsp) & 0xFFFF); + codegen.movk(SCRATCH_REG2, (reinterpret_cast(&ctx->rsp) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG2, (reinterpret_cast(&ctx->rsp) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG2, (reinterpret_cast(&ctx->rsp) >> 48) & 0xFFFF, 48); + codegen.str(SCRATCH_REG, SCRATCH_REG2, 0); + } else { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->rbp) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 48) & 0xFFFF, 48); + codegen.str(arm64_reg, SCRATCH_REG, 0); + } + } else { + if (index < 16) { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->gp_regs[index]) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 16) & 0xFFFF, + 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 32) & 0xFFFF, + 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 48) & 0xFFFF, + 48); + codegen.str(arm64_reg, SCRATCH_REG, 0); + } + } +} + +void RegisterMapper::RestoreRegister(Arm64CodeGenerator& codegen, X86_64Register x86_reg, + RegisterContext* ctx) { + if (!ctx) { + return; + } + + int arm64_reg = MapX86_64ToArm64(x86_reg); + if (arm64_reg == INVALID_MAPPING) { + return; + } + + size_t index = static_cast(x86_reg); + if (IsXmmRegister(x86_reg)) { + int vreg = MapX86_64XmmToArm64Neon(x86_reg); + if (vreg != INVALID_MAPPING) { + codegen.movz(SCRATCH_REG, + reinterpret_cast(&ctx->xmm_regs[index - 16][0]) & 0xFFFF); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, + (reinterpret_cast(&ctx->xmm_regs[index - 16][0]) >> 48) & 0xFFFF, 48); + codegen.ldr_v(vreg, SCRATCH_REG, 0); + } + } else if (x86_reg == X86_64Register::FLAGS) { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->flags) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->flags) >> 48) & 0xFFFF, 48); + codegen.ldr(arm64_reg, SCRATCH_REG, 0); + } else if (x86_reg == X86_64Register::RSP || x86_reg == X86_64Register::RBP) { + if (arm64_reg == STACK_POINTER) { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->rsp) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rsp) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rsp) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rsp) >> 48) & 0xFFFF, 48); + codegen.ldr(SCRATCH_REG2, SCRATCH_REG, 0); + codegen.mov(STACK_POINTER, SCRATCH_REG2); + } else { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->rbp) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 16) & 0xFFFF, 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 32) & 0xFFFF, 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->rbp) >> 48) & 0xFFFF, 48); + codegen.ldr(arm64_reg, SCRATCH_REG, 0); + } + } else { + if (index < 16) { + codegen.movz(SCRATCH_REG, reinterpret_cast(&ctx->gp_regs[index]) & 0xFFFF); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 16) & 0xFFFF, + 16); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 32) & 0xFFFF, + 32); + codegen.movk(SCRATCH_REG, (reinterpret_cast(&ctx->gp_regs[index]) >> 48) & 0xFFFF, + 48); + codegen.ldr(arm64_reg, SCRATCH_REG, 0); + } + } +} + +void RegisterMapper::SaveAllRegisters(Arm64CodeGenerator& codegen, RegisterContext* ctx) { + if (!ctx) { + return; + } + + for (int i = 0; i < 16; i++) { + SaveRegister(codegen, static_cast(i), ctx); + } + for (int i = 16; i < 32; i++) { + SaveRegister(codegen, static_cast(i), ctx); + } + SaveRegister(codegen, X86_64Register::FLAGS, ctx); +} + +void RegisterMapper::RestoreAllRegisters(Arm64CodeGenerator& codegen, RegisterContext* ctx) { + if (!ctx) { + return; + } + + RestoreRegister(codegen, X86_64Register::FLAGS, ctx); + for (int i = 16; i < 32; i++) { + RestoreRegister(codegen, static_cast(i), ctx); + } + for (int i = 0; i < 16; i++) { + RestoreRegister(codegen, static_cast(i), ctx); + } +} } // namespace Core::Jit diff --git a/src/core/jit/register_mapping.h b/src/core/jit/register_mapping.h index 80e1caab7..c6d9cf540 100644 --- a/src/core/jit/register_mapping.h +++ b/src/core/jit/register_mapping.h @@ -5,6 +5,7 @@ #include #include "common/types.h" +#include "core/jit/arm64_codegen.h" namespace Core::Jit { @@ -97,6 +98,14 @@ enum class Arm64Register : u8 { COUNT = 48 }; +struct RegisterContext { + u64 gp_regs[16]; + u64 xmm_regs[16][2]; + u64 flags; + u64 rsp; + u64 rbp; +}; + class RegisterMapper { public: RegisterMapper(); @@ -109,8 +118,10 @@ public: void ReloadRegister(X86_64Register x86_reg); bool IsRegisterSpilled(X86_64Register x86_reg) const; - void SaveAllRegisters(); - void RestoreAllRegisters(); + void SaveAllRegisters(Arm64CodeGenerator& codegen, RegisterContext* ctx); + void RestoreAllRegisters(Arm64CodeGenerator& codegen, RegisterContext* ctx); + void SaveRegister(Arm64CodeGenerator& codegen, X86_64Register x86_reg, RegisterContext* ctx); + void RestoreRegister(Arm64CodeGenerator& codegen, X86_64Register x86_reg, RegisterContext* ctx); static constexpr int SCRATCH_REG = 9; static constexpr int SCRATCH_REG2 = 10;