diff --git a/FEXCore/Source/Interface/Core/CPUBackend.h b/FEXCore/Source/Interface/Core/CPUBackend.h index c7265dc5a4..8fc2de2688 100644 --- a/FEXCore/Source/Interface/Core/CPUBackend.h +++ b/FEXCore/Source/Interface/Core/CPUBackend.h @@ -115,7 +115,7 @@ namespace CPU { uint16_t HostPCOffset; // How much to offset the RIP from the previous entry. - uint16_t GuestRIPOffset; + int16_t GuestRIPOffset; }; /** diff --git a/FEXCore/Source/Interface/Core/JIT/JIT.cpp b/FEXCore/Source/Interface/Core/JIT/JIT.cpp index 5f5d7580e2..344241b7f7 100644 --- a/FEXCore/Source/Interface/Core/JIT/JIT.cpp +++ b/FEXCore/Source/Interface/Core/JIT/JIT.cpp @@ -35,6 +35,7 @@ desc: Main glue logic of the arm64 splatter backend #include #include #include +#include static constexpr size_t INITIAL_CODE_SIZE = 1024 * 1024 * 16; // We don't want to move above 128MB atm because that means we will have to encode longer jumps @@ -875,6 +876,11 @@ CPUBackend::CompiledCode Arm64JITCore::CompileCode(uint64_t Entry, uint64_t Size for (size_t i = 0; i < DebugData->GuestOpcodes.size(); i++) { const auto& GuestOpcode = DebugData->GuestOpcodes[i]; auto& RIPEntry = JITRIPEntries[i]; + uint64_t HostPCOffset = GuestOpcode.HostEntryOffset - CurrentPCOffset; + int64_t GuestRIPOffset = GuestOpcode.GuestEntryOffset - CurrentRIPOffset; + LOGMAN_THROW_AA_FMT(HostPCOffset <= std::numeric_limits::max(), "PC offset too large"); + LOGMAN_THROW_AA_FMT(GuestRIPOffset >= std::numeric_limits::min(), "RIP offset too small"); + LOGMAN_THROW_AA_FMT(GuestRIPOffset <= std::numeric_limits::max(), "RIP offset too large"); RIPEntry.HostPCOffset = GuestOpcode.HostEntryOffset - CurrentPCOffset; RIPEntry.GuestRIPOffset = GuestOpcode.GuestEntryOffset - CurrentRIPOffset; CurrentPCOffset = GuestOpcode.HostEntryOffset;