// SPDX-License-Identifier: MIT
/*
$info$
tags: frontend|x86-meta-blocks
desc: Extracts instruction & block meta info, frontend multiblock logic
$end_info$
*/

#include "Interface/Context/Context.h"
#include "Interface/Core/Frontend.h"
#include "Interface/Core/X86Tables/X86Tables.h"
#include "Interface/Core/LookupCache.h"

#include <array>
#include <algorithm>
#include <cstring>
#include <FEXCore/Config/Config.h>
#include <FEXCore/Core/X86Enums.h>
#include <FEXCore/HLE/SyscallHandler.h>
#include <FEXCore/Utils/Allocator.h>
#include <FEXCore/Utils/LogManager.h>
#include <FEXCore/Utils/Profiler.h>
#include <FEXCore/Utils/Telemetry.h>
#include <FEXCore/Utils/TypeDefines.h>
#include <FEXCore/Debug/InternalThreadState.h>
#include <FEXCore/fextl/set.h>

namespace FEXCore::Frontend {
#include "Interface/Core/VSyscall/VSyscall.inc"

using namespace FEXCore::X86Tables;

static uint32_t MapModRMToReg(uint8_t REX, uint8_t bits, bool HighBits, bool HasREX, bool HasXMM, bool HasMM, uint8_t InvalidOffset = 16) {
  using GPRArray = std::array<uint32_t, 16>;

  static constexpr GPRArray GPR8BitHighIndexes = {
    // Classical ordering?
    FEXCore::X86State::REG_RAX, FEXCore::X86State::REG_RCX, FEXCore::X86State::REG_RDX, FEXCore::X86State::REG_RBX,
    FEXCore::X86State::REG_RAX, FEXCore::X86State::REG_RCX, FEXCore::X86State::REG_RDX, FEXCore::X86State::REG_RBX,
    FEXCore::X86State::REG_R8,  FEXCore::X86State::REG_R9,  FEXCore::X86State::REG_R10, FEXCore::X86State::REG_R11,
    FEXCore::X86State::REG_R12, FEXCore::X86State::REG_R13, FEXCore::X86State::REG_R14, FEXCore::X86State::REG_R15,
  };

  uint8_t Offset = (REX << 3) | bits;

  if (Offset == InvalidOffset) {
    return FEXCore::X86State::REG_INVALID;
  }

  if (HasXMM) {
    return FEXCore::X86State::REG_XMM_0 + Offset;
  } else if (HasMM) {
    return FEXCore::X86State::REG_MM_0 + bits; // Ignore REX extension for MMX registers
  } else if (!(HighBits && !HasREX)) {
    return FEXCore::X86State::REG_RAX + Offset;
  }

  return GPR8BitHighIndexes[Offset];
}

static uint32_t MapVEXToReg(uint8_t vvvv, bool HasXMM) {
  if (HasXMM) {
    return FEXCore::X86State::REG_XMM_0 + vvvv;
  } else {
    return FEXCore::X86State::REG_RAX + vvvv;
  }
}

Decoder::Decoder(FEXCore::Core::InternalThreadState* Thread)
  : Thread {Thread}
  , CTX {static_cast<FEXCore::Context::ContextImpl*>(Thread->CTX)}
  , OSABI {CTX->SyscallHandler ? CTX->SyscallHandler->GetOSABI() : FEXCore::HLE::SyscallOSABI::OS_UNKNOWN}
  , PoolObject {CTX->FrontendAllocator, sizeof(FEXCore::X86Tables::DecodedInst) * DefaultDecodedBufferSize} {

  FEX_CONFIG_OPT(ReducedPrecision, X87REDUCEDPRECISION);
  if (ReducedPrecision) {
    X87Table = &FEXCore::X86Tables::X87F64Ops;
  } else {
    X87Table = &FEXCore::X86Tables::X87F80Ops;
  }

  if (CTX->HostFeatures.SupportsAVX && CTX->HostFeatures.SupportsSVE256) {
    VEXTable = &FEXCore::X86Tables::VEXTableOps;
    VEXTableGroup = &FEXCore::X86Tables::VEXTableGroupOps;
  } else if (CTX->HostFeatures.SupportsAVX) {
    VEXTable = &FEXCore::X86Tables::VEXTableOps_AVX128;
    VEXTableGroup = &FEXCore::X86Tables::VEXTableGroupOps_AVX128;
  }
}

bool Decoder::CheckRangeExecutable(uint64_t Address, uint64_t Size) {
  while (Address < ExecutableRangeBase || Address + Size > ExecutableRangeEnd) {
    auto RangeInfo = CTX->SyscallHandler->QueryGuestExecutableRange(Thread, Address);
    ExecutableRangeBase = RangeInfo.Base;
    ExecutableRangeEnd = RangeInfo.Base + RangeInfo.Size;
    ExecutableRangeWritable = RangeInfo.Writable;

    if (RangeInfo.Size == 0) {
      return false;
    }

    uint64_t RangeRemainingSize = ExecutableRangeEnd - Address;
    if (Size > RangeRemainingSize) {
      Size -= RangeRemainingSize;
      Address += RangeRemainingSize;
    }
  }

  return true;
}

uint8_t Decoder::ReadByte() {
  LOGMAN_THROW_A_FMT(InstructionSize < MAX_INST_SIZE, "Max instruction size exceeded!");
  std::optional<uint8_t> Byte = PeekByte(0);
  if (!Byte) {
    HitNonExecutableRange = true;
    // Pretend we read 0, the main decode loop will see HitNonExecutableRange and rollback the instruction.
    return 0;
  }

  Instruction[InstructionSize] = *Byte;
  InstructionSize++;
  return *Byte;
}

std::optional<uint8_t> Decoder::PeekByte(uint8_t Offset) {
  uint64_t ByteAddress = reinterpret_cast<uint64_t>(InstStream + InstructionSize + Offset);
  if (CheckRangeExecutable(ByteAddress, 1)) {
    return InstStream[InstructionSize + Offset];
  } else {
    return std::nullopt;
  }
}

uint64_t Decoder::ReadData(uint8_t Size) {
  LOGMAN_THROW_A_FMT(Size != 0 && Size <= sizeof(uint64_t), "Unknown data size to read");

  uint64_t Res = 0;
  uint64_t Address = reinterpret_cast<uint64_t>(InstStream + InstructionSize);
  if (CheckRangeExecutable(Address, Size)) {
    std::memcpy(&Res, &InstStream[InstructionSize], Size);
  } else {
    HitNonExecutableRange = true;
    // See PeekByte, this specific case may cause some executable memory to read as 0 but it doesn't matter as the entire instruction will be rolled back anyway.
    Res = 0;
  }


#if defined(ASSERTIONS_ENABLED) && ASSERTIONS_ENABLED
  for (size_t i = 0; i < Size; ++i) {
    ReadByte();
  }
#else
  SkipBytes(Size);
#endif

  return Res;
}

void Decoder::DecodeModRM_16(X86Tables::DecodedOperand* Operand, X86Tables::ModRMDecoded ModRM) {
  // 16bit modrm behaves similar to SIB but encoded directly in modrm
  // mod != 0b11 case
  // RM    | Result
  // ===============
  // 0b000 | [BX + SI]
  // 0b001 | [BX + DI]
  // 0b010 | [BP + SI]
  // 0b011 | [BP + DI]
  // 0b100 | [SI]
  // 0b101 | [DI]
  // 0b110 | {[BP], disp16}
  // 0b111 | [BX]
  // if mod = 0b00
  //    0b110 = disp16
  // if mod = 0b01
  //    All encodings gain 8bit displacement
  //    0b110 = [BP] + disp8
  // if mod = 0b10
  //    All encodings gain 16bit displacement
  //    0b110 = [BP] + disp16
  uint32_t Literal {};
  uint8_t DisplacementSize {};
  if ((ModRM.mod == 0 && ModRM.rm == 0b110) || ModRM.mod == 0b10) {
    DisplacementSize = 2;
  } else if (ModRM.mod == 0b01) {
    DisplacementSize = 1;
  }
  if (DisplacementSize) {
    Literal = ReadData(DisplacementSize);
    if (DisplacementSize == 1) {
      Literal = static_cast<int8_t>(Literal);
    }
  }

  Operand->Type = DecodedOperand::OpType::SIB;
  Operand->Data.SIB.Scale = 1;
  Operand->Data.SIB.Offset = Literal;

  // Only called when ModRM.mod != 0b11
  struct Encodings {
    uint8_t Base;
    uint8_t Index;
  };
  constexpr static std::array<Encodings, 24> Lookup = {{
    // Mod = 0b00
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RSI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RDI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_INVALID, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_INVALID},
    // Mod = 0b01
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RSI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RDI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_INVALID},
    // Mod = 0b10
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RSI},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_RDI},
    {FEXCore::X86State::REG_RSI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RDI, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RBP, FEXCore::X86State::REG_INVALID},
    {FEXCore::X86State::REG_RBX, FEXCore::X86State::REG_INVALID},
  }};

  uint8_t LookupIndex = ModRM.mod << 3 | ModRM.rm;
  auto it = Lookup[LookupIndex];
  Operand->Data.SIB.Base = it.Base;
  Operand->Data.SIB.Index = it.Index;
}

void Decoder::DecodeModRM_64(X86Tables::DecodedOperand* Operand, X86Tables::ModRMDecoded ModRM) {
  uint8_t Displacement {};
  // Do we have an offset?
  if (ModRM.mod == 0b01) {
    Displacement = 1;
  } else if (ModRM.mod == 0b10) {
    Displacement = 4;
  } else if (ModRM.mod == 0 && ModRM.rm == 0b101) {
    Displacement = 4;
  }

  // Calculate SIB
  bool HasSIB = ((ModRM.mod != 0b11) && (ModRM.rm == 0b100));

  if (HasSIB) {
    FEXCore::X86Tables::SIBDecoded SIB;
    if (DecodeInst->Flags & DecodeFlags::FLAG_DECODED_SIB) {
      SIB.Hex = DecodeInst->SIB;
    } else {
      // Haven't yet grabbed SIB, pull it now
      DecodeInst->SIB = ReadByte();
      SIB.Hex = DecodeInst->SIB;
      DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_SIB;
    }

    // If the SIB base is 0b101, aka BP or R13 then we have a 32bit displacement
    if (ModRM.mod == 0b00 && ModRM.rm == 0b100 && SIB.base == 0b101) {
      Displacement = 4;
    }

    // SIB
    Operand->Type = DecodedOperand::OpType::SIB;
    Operand->Data.SIB.Scale = 1 << SIB.scale;

    // The invalid encoding types are described at Table 1-12. "promoted nsigned is always non-zero"
    {
      // If we have a VSIB byte (as opposed to SIB), then the index register is a vector.
      // DecodeInst->TableInfo may be null in the case of 3DNow! ModRM decoding.
      const bool IsIndexVector = DecodeInst->TableInfo && (DecodeInst->TableInfo->Flags & InstFlags::FLAGS_VEX_VSIB) != 0;
      uint8_t InvalidSIBIndex = 0b100; ///< SIB Index where there is no register encoding.
      if (IsIndexVector) {
        DecodeInst->Flags |= X86Tables::DecodeFlags::FLAG_VSIB_BYTE;
        InvalidSIBIndex = ~0; ///< No Invalid SIB Index with Index Vectors.
      }

      const uint8_t IndexREX = (DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_X) != 0 ? 1 : 0;
      const uint8_t BaseREX = (DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_B) != 0 ? 1 : 0;

      Operand->Data.SIB.Index = MapModRMToReg(IndexREX, SIB.index, false, false, IsIndexVector, false, InvalidSIBIndex);
      Operand->Data.SIB.Base = MapModRMToReg(BaseREX, SIB.base, false, false, false, false, ModRM.mod == 0 ? 0b101 : 16);
    }

    LOGMAN_THROW_A_FMT(Displacement <= 4, "Number of bytes should be <= 4 for literal src");

    if (Displacement) {
      uint64_t Literal = ReadData(Displacement);
      if (Displacement == 1) {
        Literal = static_cast<int8_t>(Literal);
      }
      Operand->Data.SIB.Offset = Literal;
    }
  } else if (ModRM.mod == 0) {
    // Explained in Table 1-14. "Operand Addressing Using ModRM and SIB Bytes"
    if (ModRM.rm == 0b101) {
      // 32bit Displacement
      const uint32_t Literal = ReadData(4);

      Operand->Type = DecodedOperand::OpType::RIPRelative;
      Operand->Data.RIPLiteral.Value.u = Literal;
    } else {
      // Register-direct addressing
      Operand->Type = DecodedOperand::OpType::GPRDirect;
      Operand->Data.GPR.GPR = MapModRMToReg(DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_B ? 1 : 0, ModRM.rm, false, false, false, false);
    }
  } else {
    uint8_t DisplacementSize = ModRM.mod == 1 ? 1 : 4;
    uint32_t Literal = ReadData(DisplacementSize);
    if (DisplacementSize == 1) {
      Literal = static_cast<int8_t>(Literal);
    }

    Operand->Type = DecodedOperand::OpType::GPRIndirect;
    Operand->Data.GPRIndirect.GPR = MapModRMToReg(DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_B ? 1 : 0, ModRM.rm, false, false, false, false);
    Operand->Data.GPRIndirect.Displacement = Literal;
  }
}

bool Decoder::NormalOp(const FEXCore::X86Tables::X86InstInfo* Info, uint16_t Op, DecodedHeader Options) {
  if (Info->Type == FEXCore::X86Tables::TYPE_ARCH_DISPATCHER) [[unlikely]] {
    // Dispatcher Op.
    // TODO: Move this in to `NormalOpHeader`, Dispatch tables have a bug currently where some subtables don't inherit flags correctly.
    // Can be seen by running FEX asm tests if this is removed.
    return NormalOp(&Info->OpcodeDispatcher.Indirect[BlockInfo.Is64BitMode ? 1 : 0], Op);
  }

  DecodeInst->OP = Op;
  DecodeInst->TableInfo = Info;

  if (Info->Type == FEXCore::X86Tables::TYPE_UNKNOWN) {
    return false;
  }

  if (Info->Type == FEXCore::X86Tables::TYPE_INVALID) {
    return false;
  }

  LOGMAN_THROW_A_FMT(!(Info->Type >= FEXCore::X86Tables::TYPE_GROUP_1 && Info->Type <= FEXCore::X86Tables::TYPE_GROUP_P), "Group Ops "
                                                                                                                          "should have "
                                                                                                                          "been decoded "
                                                                                                                          "before this!");

  uint8_t DestSize {};
  const bool HasWideningDisplacement =
    (FEXCore::X86Tables::DecodeFlags::GetOpAddr(DecodeInst->Flags, 0) & FEXCore::X86Tables::DecodeFlags::FLAG_WIDENING_SIZE_LAST) != 0 ||
    (Options.w && BlockInfo.Is64BitMode);
  const bool HasNarrowingDisplacement =
    (FEXCore::X86Tables::DecodeFlags::GetOpAddr(DecodeInst->Flags, 0) & FEXCore::X86Tables::DecodeFlags::FLAG_OPERAND_SIZE_LAST) != 0;

  const bool HasXMMFlags = (Info->Flags & InstFlags::FLAGS_XMM_FLAGS) != 0;
  bool HasXMMSrc =
    HasXMMFlags && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_SRC_GPR) && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_MMX_SRC);
  bool HasXMMDst =
    HasXMMFlags && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_DST_GPR) && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_MMX_DST);
  bool HasMMSrc =
    HasXMMFlags && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_SRC_GPR) && HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_MMX_SRC);
  bool HasMMDst =
    HasXMMFlags && !HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_DST_GPR) && HAS_XMM_SUBFLAG(Info->Flags, InstFlags::FLAGS_SF_MMX_DST);

  // Is ModRM present via explicit instruction encoded or REX?
  const bool HasMODRM = !!(Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_MODRM);

  const bool HasREX = !!(DecodeInst->Flags & DecodeFlags::FLAG_REX_PREFIX);
  const bool Has16BitAddressing = !BlockInfo.Is64BitMode && DecodeInst->Flags & DecodeFlags::FLAG_ADDRESS_SIZE;

  if (Options.w && (Info->Flags & InstFlags::FLAGS_REX_W_0)) {
    return false;
  } else if (!Options.w && (Info->Flags & InstFlags::FLAGS_REX_W_1)) {
    return false;
  }

  if (Options.L && (Info->Flags & InstFlags::FLAGS_VEX_L_0)) {
    return false;
  } else if (!Options.L && (Info->Flags & InstFlags::FLAGS_VEX_L_1)) {
    return false;
  }

  const bool UseVEXL = Options.L && !(Info->Flags & InstFlags::FLAGS_VEX_L_IGNORE);

  // This is used for ModRM register modification
  // For both modrm.reg and modrm.rm(when mod == 0b11) when value is >= 0b100
  // then it changes from expected registers to the high 8bits of the lower registers
  // Bit annoying to support
  // In the case of no modrm (REX in byte situation) then it is unaffected
  bool Is8BitSrc {};
  bool Is8BitDest {};

  // If we require ModRM and haven't decoded it yet, do it now
  // Some instructions have to read modrm upfront, others do it later
  if (HasMODRM && !(DecodeInst->Flags & DecodeFlags::FLAG_DECODED_MODRM)) {
    DecodeInst->ModRM = ReadByte();
    DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;
  }

  // New instruction size decoding
  {
    // Decode destinations first
    const auto DstSizeFlag = FEXCore::X86Tables::InstFlags::GetSizeDstFlags(Info->Flags);
    const auto SrcSizeFlag = FEXCore::X86Tables::InstFlags::GetSizeSrcFlags(Info->Flags);

    if (DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_8BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_8BIT);
      DestSize = 1;
      Is8BitDest = true;
    } else if (DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_16BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_16BIT);
      DestSize = 2;
    } else if (DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_128BIT) {
      if (UseVEXL) {
        DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_256BIT);
        DestSize = 32;
      } else {
        DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_128BIT);
        DestSize = 16;
      }
    } else if (DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_256BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_256BIT);
      DestSize = 32;
    } else if (HasNarrowingDisplacement &&
               (DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_DEF || DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BITDEF)) {
      // See table 1-2. Operand-Size Overrides for this decoding
      // If the default operating mode is 32bit and we have the operand size flag then the operating size drops to 16bit
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_16BIT);
      DestSize = 2;
    } else if ((HasXMMDst || HasMMDst || BlockInfo.Is64BitMode) && (HasWideningDisplacement || DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BIT ||
                                                                    DstSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BITDEF)) {
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_64BIT);
      DestSize = 8;
    } else {
      DecodeInst->Flags |= DecodeFlags::GenSizeDstSize(DecodeFlags::SIZE_32BIT);
      DestSize = 4;
    }

    // Decode sources
    if (SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_8BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_8BIT);
      Is8BitSrc = true;
    } else if (SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_16BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_16BIT);
    } else if (SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_128BIT) {
      if (UseVEXL) {
        DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_256BIT);
      } else {
        DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_128BIT);
      }
    } else if (SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_256BIT) {
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_256BIT);
    } else if (HasNarrowingDisplacement &&
               (SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_DEF || SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BITDEF)) {
      // See table 1-2. Operand-Size Overrides for this decoding
      // If the default operating mode is 32bit and we have the operand size flag then the operating size drops to 16bit
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_16BIT);
    } else if ((HasXMMSrc || HasMMSrc || BlockInfo.Is64BitMode) && (HasWideningDisplacement || SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BIT ||
                                                                    SrcSizeFlag == FEXCore::X86Tables::InstFlags::SIZE_64BITDEF)) {
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_64BIT);
    } else {
      DecodeInst->Flags |= DecodeFlags::GenSizeSrcSize(DecodeFlags::SIZE_32BIT);
    }
  }

  auto* CurrentDest = &DecodeInst->Dest;

  if (HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_DST_RAX) ||
      HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_DST_RDX)) {
    // Some instructions hardcode their destination as RAX
    CurrentDest->Type = DecodedOperand::OpType::GPR;
    CurrentDest->Data.GPR.HighBits = false;
    CurrentDest->Data.GPR.GPR =
      HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_DST_RAX) ? FEXCore::X86State::REG_RAX : FEXCore::X86State::REG_RDX;
    CurrentDest = &DecodeInst->Src[0];
  } else if (HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_REX_IN_BYTE)) {
    LOGMAN_THROW_A_FMT(!HasMODRM, "This instruction shouldn't have ModRM!");

    // If the REX is in the byte that means the lower nibble of the OP contains the destination GPR
    // This also means that the destination is always a GPR on these ones
    // ADDITIONALLY:
    // If there is a REX prefix then that allows extended GPR usage
    CurrentDest->Type = DecodedOperand::OpType::GPR;
    DecodeInst->Dest.Data.GPR.HighBits = (Is8BitDest && !HasREX && (Op & 0b111) >= 0b100);
    CurrentDest->Data.GPR.GPR =
      MapModRMToReg(DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_B ? 1 : 0, Op & 0b111, Is8BitDest, HasREX, false, false);

    if (CurrentDest->Data.GPR.GPR == FEXCore::X86State::REG_INVALID) {
      return false;
    }
  }

  uint8_t Bytes = Info->MoreBytes;

  if ((Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_DISPLACE_SIZE_MUL_2) && HasWideningDisplacement) {
    Bytes <<= 1;
  }
  if ((Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_DISPLACE_SIZE_DIV_2) && HasNarrowingDisplacement) {
    Bytes >>= 1;
  }

  if ((Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_MEM_OFFSET) && (DecodeInst->Flags & DecodeFlags::FLAG_ADDRESS_SIZE)) {
    // If we have a memory offset and have the address size override then divide it just like narrowing displacement
    Bytes >>= 1;
  }

  auto ModRMOperand = [&](FEXCore::X86Tables::DecodedOperand& GPR, FEXCore::X86Tables::DecodedOperand& NonGPR, bool HasXMMGPR,
                          bool HasXMMNonGPR, bool HasMMGPR, bool HasMMNonGPR, bool GPR8Bit, bool NonGPR8Bit) {
    FEXCore::X86Tables::ModRMDecoded ModRM;
    ModRM.Hex = DecodeInst->ModRM;

    if (ModRM.reg != 0b000 && (Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SF_MOD_ZERO_REG)) {
      return false;
    }

    if (ModRM.mod == 0b11 && (Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SF_MOD_MEM_ONLY)) {
      return false;
    }

    if (ModRM.mod != 0b11 && (Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SF_MOD_REG_ONLY)) {
      return false;
    }

    // Decode the GPR source first
    GPR.Type = DecodedOperand::OpType::GPR;
    GPR.Data.GPR.HighBits = (GPR8Bit && ModRM.reg >= 0b100 && !HasREX);
    GPR.Data.GPR.GPR = MapModRMToReg(DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_R ? 1 : 0, ModRM.reg, GPR8Bit, HasREX, HasXMMGPR, HasMMGPR);

    if (GPR.Data.GPR.GPR == FEXCore::X86State::REG_INVALID) {
      return false;
    }

    // ModRM.mod == 0b11 == Register
    // ModRM.Mod != 0b11 == Register-direct addressing
    if (ModRM.mod == 0b11) {
      NonGPR.Type = DecodedOperand::OpType::GPR;
      NonGPR.Data.GPR.HighBits = (NonGPR8Bit && ModRM.rm >= 0b100 && !HasREX);
      NonGPR.Data.GPR.GPR =
        MapModRMToReg(DecodeInst->Flags & DecodeFlags::FLAG_REX_XGPR_B ? 1 : 0, ModRM.rm, NonGPR8Bit, HasREX, HasXMMNonGPR, HasMMNonGPR);
      if (NonGPR.Data.GPR.GPR == FEXCore::X86State::REG_INVALID) {
        return false;
      }
    } else {
      // Only decode if we haven't pre-decoded
      if (NonGPR.IsNone()) {
        auto Disp = DecodeModRMs_Disp[Has16BitAddressing];
        (this->*Disp)(&NonGPR, ModRM);
      }
    }

    return true;
  };

  size_t CurrentSrc = 0;

  const auto VEXOperand = Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_VEX_SRC_MASK;
  if (VEXOperand == FEXCore::X86Tables::InstFlags::FLAGS_VEX_NO_OPERAND && Options.vvvv) {
    return false;
  }

  if (VEXOperand == FEXCore::X86Tables::InstFlags::FLAGS_VEX_1ST_SRC) {
    DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::GPR;
    DecodeInst->Src[CurrentSrc].Data.GPR.HighBits = false;

    // If we have XMM flags at all, then SRC 1 cannot be a GPR. The only case where
    // this is possible is with BMI1 and BMI2 instructions (which are all GPR-based
    // and don't use XMM flags)
    DecodeInst->Src[CurrentSrc].Data.GPR.GPR = MapVEXToReg(Options.vvvv, HasXMMFlags);

    ++CurrentSrc;
  }

  if (Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_MODRM) {
    if (Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SF_MOD_DST) {
      if (!ModRMOperand(DecodeInst->Src[CurrentSrc], DecodeInst->Dest, HasXMMSrc, HasXMMDst, HasMMSrc, HasMMDst, Is8BitSrc, Is8BitDest)) {
        return false;
      }
    } else {
      if (!ModRMOperand(DecodeInst->Dest, DecodeInst->Src[CurrentSrc], HasXMMDst, HasXMMSrc, HasMMDst, HasMMSrc, Is8BitDest, Is8BitSrc)) {
        return false;
      }
    }
    ++CurrentSrc;
  }

  if (VEXOperand == FEXCore::X86Tables::InstFlags::FLAGS_VEX_2ND_SRC) {
    DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::GPR;
    DecodeInst->Src[CurrentSrc].Data.GPR.HighBits = false;
    DecodeInst->Src[CurrentSrc].Data.GPR.GPR = MapVEXToReg(Options.vvvv, HasXMMSrc);
    ++CurrentSrc;
  }

  if (HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_SRC_RAX)) {
    DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::GPR;
    DecodeInst->Src[CurrentSrc].Data.GPR.HighBits = false;
    DecodeInst->Src[CurrentSrc].Data.GPR.GPR = FEXCore::X86State::REG_RAX;
    ++CurrentSrc;
  } else if (HAS_NON_XMM_SUBFLAG(Info->Flags, FEXCore::X86Tables::InstFlags::FLAGS_SF_SRC_RCX)) {
    DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::GPR;
    DecodeInst->Src[CurrentSrc].Data.GPR.HighBits = false;
    DecodeInst->Src[CurrentSrc].Data.GPR.GPR = FEXCore::X86State::REG_RCX;
    ++CurrentSrc;
  }

  if (VEXOperand == FEXCore::X86Tables::InstFlags::FLAGS_VEX_DST) {
    CurrentDest->Type = DecodedOperand::OpType::GPR;
    CurrentDest->Data.GPR.HighBits = false;
    CurrentDest->Data.GPR.GPR = MapVEXToReg(Options.vvvv, HasXMMDst);
  }

  if (Bytes != 0) {
    LOGMAN_THROW_A_FMT(Bytes <= 8, "Number of bytes should be <= 8 for literal src");

    DecodeInst->Src[CurrentSrc].Data.Literal.Size = Bytes;

    uint64_t Literal = ReadData(Bytes);

    if ((Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SRC_SEXT) || (DecodeFlags::GetSizeDstFlags(DecodeInst->Flags) == DecodeFlags::SIZE_64BIT &&
                                                                          Info->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SRC_SEXT64BIT)) {
      if (Bytes == 1) {
        Literal = static_cast<int8_t>(Literal);
      } else if (Bytes == 2) {
        Literal = static_cast<int16_t>(Literal);
      } else {
        Literal = static_cast<int32_t>(Literal);
      }
      DecodeInst->Src[CurrentSrc].Data.Literal.Size = DestSize;
      DecodeInst->Src[CurrentSrc].Data.Literal.SignExtend = true;
    }

    DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::Literal;
    DecodeInst->Src[CurrentSrc].Data.Literal.Value = Literal;
    ++CurrentSrc;

    if (Bytes == 8) [[unlikely]] {
      DecodeInst->Src[CurrentSrc].Data.Literal.Size = 4;
      DecodeInst->Src[CurrentSrc].Type = DecodedOperand::OpType::Literal;
      DecodeInst->Src[CurrentSrc].Data.Literal.Value = Literal >> 32;
    }

    Bytes = 0;
  }

  LOGMAN_THROW_A_FMT(Bytes == 0, "Inst at 0x{:x}: 0x{:04x} '{}' Had an instruction of size {} with {} remaining", DecodeInst->PC,
                     DecodeInst->OP, DecodeInst->TableInfo->Name ?: "UND", InstructionSize, Bytes);
  DecodeInst->InstSize = InstructionSize;
  return true;
}

bool Decoder::NormalOpHeader(const FEXCore::X86Tables::X86InstInfo* Info, uint16_t Op) {
  DecodeInst->OPRaw = DecodeInst->OP = Op;
  DecodeInst->TableInfo = Info;

  if (Info->Type == FEXCore::X86Tables::TYPE_UNKNOWN) {
    return false;
  }

  if (Info->Type == FEXCore::X86Tables::TYPE_INVALID) {
    return false;
  }

  LOGMAN_THROW_A_FMT(Info->Type != FEXCore::X86Tables::TYPE_REX_PREFIX, "REX PREFIX should have been decoded before this!");

  // A normal instruction is the most likely.
  if (Info->Type == FEXCore::X86Tables::TYPE_INST) [[likely]] {
    return NormalOp(Info, Op);
  } else if (Info->Type == FEXCore::X86Tables::TYPE_ARCH_DISPATCHER) [[unlikely]] {
    // Dispatcher Op.
    return NormalOp(&Info->OpcodeDispatcher.Indirect[BlockInfo.Is64BitMode ? 1 : 0], Op);
  } else if (Info->Type >= FEXCore::X86Tables::TYPE_GROUP_1 && Info->Type <= FEXCore::X86Tables::TYPE_GROUP_11) {
    uint8_t ModRMByte = ReadByte();
    DecodeInst->ModRM = ModRMByte;
    DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;

    FEXCore::X86Tables::ModRMDecoded ModRM;
    ModRM.Hex = DecodeInst->ModRM;

#define OPD(group, prefix, Reg) (((group - FEXCore::X86Tables::TYPE_GROUP_1) << 6) | (prefix) << 3 | (Reg))
    Op = OPD(Info->Type, Info->MoreBytes, ModRM.reg);
    return NormalOp(&PrimaryInstGroupOps[Op], Op);
#undef OPD
  } else if (Info->Type >= FEXCore::X86Tables::TYPE_GROUP_6 && Info->Type <= FEXCore::X86Tables::TYPE_GROUP_P) {
#define OPD(group, prefix, Reg) (((group - FEXCore::X86Tables::TYPE_GROUP_6) << 5) | (prefix) << 3 | (Reg))
    constexpr uint16_t PF_NONE = 0;
    constexpr uint16_t PF_F3 = 1;
    constexpr uint16_t PF_66 = 2;
    constexpr uint16_t PF_F2 = 3;

    uint16_t PrefixType = PF_NONE;
    if (LastEscapePrefix == 0xF3) {
      PrefixType = PF_F3;
    } else if (LastEscapePrefix == 0xF2) {
      PrefixType = PF_F2;
    } else if (LastEscapePrefix == 0x66) {
      PrefixType = PF_66;
    }

    // We have ModRM
    uint8_t ModRMByte = ReadByte();
    DecodeInst->ModRM = ModRMByte;
    DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;

    FEXCore::X86Tables::ModRMDecoded ModRM;
    ModRM.Hex = DecodeInst->ModRM;

    uint16_t LocalOp = OPD(Info->Type, PrefixType, ModRM.reg);
    const FEXCore::X86Tables::X86InstInfo* LocalInfo = &SecondInstGroupOps[LocalOp];
#undef OPD
    if (LocalInfo->Type == FEXCore::X86Tables::TYPE_SECOND_GROUP_MODRM && ModRM.mod == 0b11) {
      // Everything in this group is privileged instructions aside from XGETBV
      constexpr std::array<uint8_t, 8> RegToField = {
        255, 0, 1, 2, 255, 255, 255, 3,
      };
      uint8_t Field = RegToField[ModRM.reg];
      if (Field == 255) {
        return false;
      }

      LocalOp = (Field << 3) | ModRM.rm;
      return NormalOp(&SecondModRMTableOps[LocalOp], LocalOp);
    } else {
      return NormalOp(&SecondInstGroupOps[LocalOp], LocalOp);
    }
  } else if (Info->Type == FEXCore::X86Tables::TYPE_X87_TABLE_PREFIX) {
    // We have ModRM
    uint8_t ModRMByte = ReadByte();
    DecodeInst->ModRM = ModRMByte;
    DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;

    uint16_t X87Op = ((Op - 0xD8) << 8) | ModRMByte;
    return NormalOp(&(*X87Table)[X87Op], X87Op);
  } else if (Info->Type == FEXCore::X86Tables::TYPE_VEX_TABLE_PREFIX) {
    if (!VEXTable) {
      // AVX not enabled.
      return false;
    }

    uint16_t map_select = 1;
    uint16_t pp = 0;
    const uint8_t Byte1 = ReadByte();
    DecodedHeader options {};

    if ((Byte1 & 0b10000000) == 0) {
      if (!BlockInfo.Is64BitMode) {
        return false;
      }

      DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_R;
    }

    if (Op == 0xC5) { // Two byte VEX
      pp = Byte1 & 0b11;
      options.vvvv = 15 - ((Byte1 & 0b01111000) >> 3);
      options.L = (Byte1 & 0b100) != 0;
    } else { // 0xC4 = Three byte VEX
      const uint8_t Byte2 = ReadByte();
      pp = Byte2 & 0b11;
      map_select = Byte1 & 0b11111;
      options.vvvv = 15 - ((Byte2 & 0b01111000) >> 3);
      options.w = (Byte2 & 0b10000000) != 0;
      options.L = (Byte2 & 0b100) != 0;
      if ((Byte1 & 0b01000000) == 0) {
        if (!BlockInfo.Is64BitMode) {
          return false;
        }
        DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_X;
      }
      if (BlockInfo.Is64BitMode && (Byte1 & 0b00100000) == 0) {
        DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_B;
      }
      if (options.w) {
        DecodeInst->Flags |= DecodeFlags::FLAG_OPTION_AVX_W;
      }
      if (!(map_select >= 1 && map_select <= 3)) {
        return false;
      }
    }

    uint16_t VEXOp = ReadByte();
#define OPD(map_select, pp, opcode) (((map_select - 1) << 10) | (pp << 8) | (opcode))
    Op = OPD(map_select, pp, VEXOp);
#undef OPD

    const FEXCore::X86Tables::X86InstInfo* LocalInfo = &(*VEXTable)[Op];

    if (LocalInfo->Type >= FEXCore::X86Tables::TYPE_VEX_GROUP_12 && LocalInfo->Type <= FEXCore::X86Tables::TYPE_VEX_GROUP_17) {
      // We have ModRM
      uint8_t ModRMByte = ReadByte();
      DecodeInst->ModRM = ModRMByte;
      DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;

      FEXCore::X86Tables::ModRMDecoded ModRM;
      ModRM.Hex = DecodeInst->ModRM;

#define OPD(group, pp, opcode) (((group - TYPE_VEX_GROUP_12) << 4) | (pp << 3) | (opcode))
      Op = OPD(LocalInfo->Type, pp, ModRM.reg);
#undef OPD
      return NormalOp(&(*VEXTableGroup)[Op], Op, options);
    } else {
      return NormalOp(LocalInfo, Op, options);
    }
  } else if (Info->Type == FEXCore::X86Tables::TYPE_GROUP_EVEX) {
    FEXCORE_TELEMETRY_SET(TYPE_USES_EVEX_OPS, 1);
    // EVEX unsupported
    return false;
  }

  LOGMAN_MSG_A_FMT("Invalid instruction decoding type");
  FEX_UNREACHABLE;
}

bool Decoder::DecodeInstructionImpl(uint64_t PC) {
  InstructionSize = 0;
  LastEscapePrefix = 0;
  Instruction.fill(0);

  DecodeInst = &DecodedBuffer[DecodedSize];
  memset(DecodeInst, 0, sizeof(DecodedInst));
  DecodeInst->PC = PC;

  for (;;) {
    if (InstructionSize >= MAX_INST_SIZE) {
      return false;
    }
    uint8_t Op = ReadByte();
    switch (Op) {
    case 0x0F: { // Escape Op
      uint8_t EscapeOp = ReadByte();
      switch (EscapeOp) {
      case 0x0F:
        [[unlikely]] { // 3DNow!
          // 3DNow! Instruction Encoding: 0F 0F [ModRM] [SIB] [Displacement] [Opcode]
          // Decode ModRM
          uint8_t ModRMByte = ReadByte();
          DecodeInst->ModRM = ModRMByte;
          DecodeInst->Flags |= DecodeFlags::FLAG_DECODED_MODRM;

          FEXCore::X86Tables::ModRMDecoded ModRM;
          ModRM.Hex = DecodeInst->ModRM;

          const bool Has16BitAddressing = !BlockInfo.Is64BitMode && DecodeInst->Flags & DecodeFlags::FLAG_ADDRESS_SIZE;

          // All 3DNow! instructions have the second argument as the rm handler
          // We need to decode it upfront to get the displacement out of the way
          if (ModRM.mod != 0b11) {
            auto Disp = DecodeModRMs_Disp[Has16BitAddressing];
            (this->*Disp)(&DecodeInst->Src[0], ModRM);
          }

          // Take a peek at the op just past the displacement
          uint8_t LocalOp = ReadByte();
          return NormalOpHeader(&FEXCore::X86Tables::DDDNowOps[LocalOp], LocalOp);
          break;
        }
      case 0x38: { // F38 Table!
        constexpr uint16_t PF_38_NONE = 0;
        constexpr uint16_t PF_38_66 = (1U << 0);
        constexpr uint16_t PF_38_F2 = (1U << 1);
        constexpr uint16_t PF_38_F3 = (1U << 2);

        uint16_t Prefix = PF_38_NONE;
        if (DecodeInst->Flags & DecodeFlags::FLAG_OPERAND_SIZE) {
          Prefix |= PF_38_66;
        }
        if (DecodeInst->Flags & DecodeFlags::FLAG_REPNE_PREFIX) {
          Prefix |= PF_38_F2;
        }
        if (DecodeInst->Flags & DecodeFlags::FLAG_REP_PREFIX) {
          Prefix |= PF_38_F3;
        }

        uint16_t LocalOp = (Prefix << 8) | ReadByte();

        bool NoOverlay66 = (FEXCore::X86Tables::H0F38TableOps[LocalOp].Flags & InstFlags::FLAGS_NO_OVERLAY66) != 0;
        if (LastEscapePrefix == 0x66 && NoOverlay66) { // Operand Size
          // Remove prefix so it doesn't effect calculations.
          // This is only an escape prefix rather than modifier now
          DecodeInst->Flags &= ~DecodeFlags::FLAG_OPERAND_SIZE;
          DecodeFlags::PopOpAddrIf(&DecodeInst->Flags, DecodeFlags::FLAG_OPERAND_SIZE_LAST);
        }

        return NormalOpHeader(&FEXCore::X86Tables::H0F38TableOps[LocalOp], LocalOp);
        break;
      }
      case 0x3A: { // F3A Table!
        constexpr uint16_t PF_3A_NONE = 0;
        constexpr uint16_t PF_3A_66 = (1 << 0);
        constexpr uint16_t PF_3A_REX = (1 << 1);

        uint16_t Prefix = PF_3A_NONE;
        if (LastEscapePrefix == 0x66) { // Operand Size
          Prefix = PF_3A_66;
        }

        if (DecodeInst->Flags & DecodeFlags::FLAG_REX_WIDENING) {
          Prefix |= PF_3A_REX;
        }

        uint16_t LocalOp = (Prefix << 8) | ReadByte();
        return NormalOpHeader(&FEXCore::X86Tables::H0F3ATableOps[LocalOp], LocalOp);
        break;
      }
      default:
        [[likely]] { // Two byte table!
          // x86-64 abuses three legacy prefixes to extend the table encodings
          // 0x66 - Operand Size prefix
          // 0xF2 - REPNE prefix
          // 0xF3 - REP prefix
          // If any of these three prefixes are used then it falls down the subtable
          // Additionally: If you hit repeat of differnt prefixes then only the LAST one before this one works for subtable selection

          bool NoOverlay = (FEXCore::X86Tables::SecondBaseOps[EscapeOp].Flags & InstFlags::FLAGS_NO_OVERLAY) != 0;
          bool NoOverlay66 = (FEXCore::X86Tables::SecondBaseOps[EscapeOp].Flags & InstFlags::FLAGS_NO_OVERLAY66) != 0;

          if (NoOverlay) { // This section of the table ignores prefix extention
            return NormalOpHeader(&FEXCore::X86Tables::SecondBaseOps[EscapeOp], EscapeOp);
          } else if (LastEscapePrefix == 0xF3) { // REP
            // Remove prefix so it doesn't effect calculations.
            // This is only an escape prefix rather tan modifier now
            DecodeInst->Flags &= ~DecodeFlags::FLAG_REP_PREFIX;
            return NormalOpHeader(&FEXCore::X86Tables::RepModOps[EscapeOp], EscapeOp);
          } else if (LastEscapePrefix == 0xF2) { // REPNE
            // Remove prefix so it doesn't effect calculations.
            // This is only an escape prefix rather tan modifier now
            DecodeInst->Flags &= ~DecodeFlags::FLAG_REPNE_PREFIX;
            return NormalOpHeader(&FEXCore::X86Tables::RepNEModOps[EscapeOp], EscapeOp);
          } else if (LastEscapePrefix == 0x66 && !NoOverlay66) { // Operand Size
            // Remove prefix so it doesn't effect calculations.
            // This is only an escape prefix rather tan modifier now
            DecodeInst->Flags &= ~DecodeFlags::FLAG_OPERAND_SIZE;
            DecodeFlags::PopOpAddrIf(&DecodeInst->Flags, DecodeFlags::FLAG_OPERAND_SIZE_LAST);
            return NormalOpHeader(&FEXCore::X86Tables::OpSizeModOps[EscapeOp], EscapeOp);
          } else {
            return NormalOpHeader(&FEXCore::X86Tables::SecondBaseOps[EscapeOp], EscapeOp);
          }
          break;
        }
      }
      break;
    }
    case 0x66: // Operand Size prefix
      DecodeInst->Flags |= DecodeFlags::FLAG_OPERAND_SIZE;
      LastEscapePrefix = Op;
      DecodeFlags::PushOpAddr(&DecodeInst->Flags, DecodeFlags::FLAG_OPERAND_SIZE_LAST);
      break;
    case 0x67: // Address Size override prefix
      DecodeInst->Flags |= DecodeFlags::FLAG_ADDRESS_SIZE;
      break;
    case 0x26: // ES legacy prefix
      if (!BlockInfo.Is64BitMode) {
        DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_ES_PREFIX;
      }
      break;
    case 0x2E: // CS legacy prefix
      if (!BlockInfo.Is64BitMode) {
        DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_CS_PREFIX;
      }
      break;
    case 0x36: // SS legacy prefix
      if (!BlockInfo.Is64BitMode) {
        DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_SS_PREFIX;
      }
      break;
    case 0x3E: // DS legacy prefix
      if (!BlockInfo.Is64BitMode) {
        DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_DS_PREFIX;
      }
      break;
    case 0xF0: // LOCK prefix
      DecodeInst->Flags |= DecodeFlags::FLAG_LOCK;
      break;
    case 0xF2: // REPNE prefix
      DecodeInst->Flags |= DecodeFlags::FLAG_REPNE_PREFIX;
      LastEscapePrefix = Op;
      break;
    case 0xF3: // REP prefix
      DecodeInst->Flags |= DecodeFlags::FLAG_REP_PREFIX;
      LastEscapePrefix = Op;
      break;
    case 0x64: // FS prefix
      DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_FS_PREFIX;
      break;
    case 0x65: // GS prefix
      DecodeInst->Flags = (DecodeInst->Flags & ~FEXCore::X86Tables::DecodeFlags::FLAG_SEGMENTS) | DecodeFlags::FLAG_GS_PREFIX;
      break;
    default:
      [[likely]] { // Default base table
        const X86InstInfo* Info = &FEXCore::X86Tables::BaseOps[Op];
        if (Info->Type == FEXCore::X86Tables::TYPE_ARCH_DISPATCHER) {
          Info = &Info->OpcodeDispatcher.Indirect[BlockInfo.Is64BitMode ? 1 : 0];
        }

        if (Info->Type == FEXCore::X86Tables::TYPE_REX_PREFIX) {
          DecodeInst->Flags |= DecodeFlags::FLAG_REX_PREFIX;

          // Widening displacement
          if (Op & 0b1000) {
            DecodeInst->Flags |= DecodeFlags::FLAG_REX_WIDENING;
            DecodeFlags::PushOpAddr(&DecodeInst->Flags, DecodeFlags::FLAG_WIDENING_SIZE_LAST);
          }

          // XGPR_B bit set
          if (Op & 0b0001) {
            DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_B;
          }

          // XGPR_X bit set
          if (Op & 0b0010) {
            DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_X;
          }

          // XGPR_R bit set
          if (Op & 0b0100) {
            DecodeInst->Flags |= DecodeFlags::FLAG_REX_XGPR_R;
          }
        } else {
          return NormalOpHeader(Info, Op);
        }

        break;
      }
    }
  }

  if (DecodeInst->Dest.IsGPR()) {
    return false;
  }

  return true;
}

Decoder::DecodedBlockStatus Decoder::DecodeInstruction(uint64_t PC) {
  // Will be set if DecodeInstructionImpl tries to read non-executable memory
  HitNonExecutableRange = false;
  bool ErrorDuringDecoding = !DecodeInstructionImpl(PC);

  if (ErrorDuringDecoding || HitNonExecutableRange) [[unlikely]] {
    // Put an invalid instruction in the stream so the core can raise SIGILL if hit
    // Error while decoding instruction. We don't know the table or instruction size
    DecodeInst->TableInfo = nullptr;
    auto Result = ErrorDuringDecoding  ? DecodedBlockStatus::INVALID_INST :
                  DecodeInst->InstSize ? DecodedBlockStatus::PARTIAL_DECODE_INST :
                                         DecodedBlockStatus::NOEXEC_INST;
    DecodeInst->InstSize = 0;
    return Result;
  } else if (!DecodeInst->TableInfo || (DecodeInst->TableInfo->Type == TYPE_INST && !DecodeInst->TableInfo->OpcodeDispatcher.OpDispatch)) {
    // If there wasn't an error during decoding but we have no dispatcher for the instruction then claim invalid instruction.
    return DecodedBlockStatus::INVALID_INST;
  }

  if (CTX->AreMonoHacksActive()) {
    // Unity uses a standard SPSC ringbuffer with cached read/write pointers and thread waiting flags at the following
    // offsets, which are consistent between 32-bit and 64-bit Unity versions from 2015 onwards.
    auto IsKnownAtomicDisplacement = [](uint64_t Displacement) {
      return Displacement == 0x80 || Displacement == 0x84 || Displacement == 0xC0 || Displacement == 0xC4;
    };

    if (DecodeInst->OP == 0x8b && DecodeInst->Src[0].IsGPRIndirect() &&
        IsKnownAtomicDisplacement(DecodeInst->Src[0].Data.GPRIndirect.Displacement)) {
      DecodeInst->Flags |= X86Tables::DecodeFlags::FLAG_FORCE_TSO;
    }
    if (DecodeInst->OP == 0x89 && DecodeInst->Dest.IsGPRIndirect() && IsKnownAtomicDisplacement(DecodeInst->Dest.Data.GPRIndirect.Displacement)) {
      DecodeInst->Flags |= X86Tables::DecodeFlags::FLAG_FORCE_TSO;
    }
  }

  return DecodedBlockStatus::SUCCESS;
}

void Decoder::BranchTargetInMultiblockRange() {
  if (!CTX->Config.Multiblock) {
    return;
  }

  // If the RIP setting is conditional AND within our symbol range then it can be considered for multiblock
  uint64_t TargetRIP = 0;
  const auto GPRSize = GetGPROpSize();
  bool Conditional = true;
  const auto InstEnd = DecodeInst->PC + DecodeInst->InstSize;

  if (DecodeInst->TableInfo->Flags & FEXCore::X86Tables::InstFlags::FLAGS_CALL) {
    if (ExecutableRangeWritable && CTX->AreMonoHacksActive()) {
      // Mono generated code often contains noreturn calls with garbage following them, and calls are always backpatched
      // after CIL compilation leading to n recompiles for a multiblock with n calls. Choose to minimize stutters over
      // raw performance and disable tracking past calls for mono generated code.
      return;
    }

    AddBranchTarget(InstEnd);
    BlockInfo.EntryPoints.emplace(InstEnd);
    return;
  }

  // Calls are handled above
  switch (DecodeInst->OP) {
  case 0x70 ... 0x7F:   // Conditional JUMP
  case 0x80 ... 0x8F: { // More conditional
    // Source is a literal
    // auto RIPOffset = LoadSource(Op, Op->Src[0], Op->Flags);
    // auto RIPTargetConst = Constant(Op->PC + Op->InstSize);
    // Target offset is PC + InstSize + Literal
    TargetRIP = InstEnd + DecodeInst->Src[0].Literal();
    break;
  }
  case 0xE9:
  case 0xEB: // Both are unconditional JMP instructions
    TargetRIP = InstEnd + DecodeInst->Src[0].Literal();
    Conditional = false;
    break;
  case 0xC2: // RET imm
  case 0xC3: // RET
  default: return; break;
  }

  if (GPRSize == IR::OpSize::i32Bit) {
    // If we are running a 32bit guest then wrap around addresses that go above 32bit
    TargetRIP &= 0xFFFFFFFFU;
  }

  if (Conditional) {
    // If we are conditional then a target can be the instruction past the conditional instruction
    AddBranchTarget(InstEnd);
  }

  // If the target RIP is x86 code within the symbol ranges then we are golden
  // Forbid distant branches to have the cost code better match the guest code layout, avoiding massive (range-wise) code
  // blocks in highly fragmented guest code. Such branches are often not-taken branches to garbage in obfuscated code.
  constexpr uint64_t MAX_FORWARD_BRANCH_DIST = FEXCore::Utils::FEX_PAGE_SIZE * 4;
  bool ValidMultiblockMember = TargetRIP >= SymbolMinAddress && TargetRIP < std::min(InstEnd + MAX_FORWARD_BRANCH_DIST, SymbolMaxAddress);

#ifdef _M_ARM_64EC
  ValidMultiblockMember = ValidMultiblockMember && !RtlIsEcCode(TargetRIP);
#endif

  if (ValidMultiblockMember) {
    // Update our conditional branch ranges before we return
    if (Conditional) {
      MaxCondBranchForward = std::max(MaxCondBranchForward, TargetRIP);
      MaxCondBranchBackwards = std::min(MaxCondBranchBackwards, TargetRIP);
    }

    AddBranchTarget(TargetRIP);
  } else {
    if (ExternalBranches) {
      ExternalBranches->insert(TargetRIP);
    }
  }
}

bool Decoder::IsBranchMonoTailcall(uint64_t NumInstructions) const {
  // While the mono call backpatching block can easily be detected due it being the only one to contain SMC-faulting
  // atomics, that can't be said for the tailcall jump backpatcher which has changed several times across versions and
  // can be partially inlined. To work around this, instead detect the tailcall site itself and force full non-signal-based
  // SMC detection for that single block.
  if (!ExecutableRangeWritable) {
    // We only care about jitted code
    return false;
  }

  // See mini-{amd64,x86}.c in the mono codebase, specifically where METHOD_JUMP patches are emitted.
  if (GetGPROpSize() == IR::OpSize::i32Bit) {
    // Matches:
    // LEAVE
    // <none> / NOP / MOV EAX, EAX / LEA EBP, [EBP+0]
    // JMP imm32
    if (DecodeInst->OP != 0xE9 || NumInstructions < 2) {
      return false;
    }

    auto PrevInst = std::prev(DecodeInst);
    if (PrevInst->OP == 0xC9) {
      return true;
    }

    if (NumInstructions < 3 || std::prev(PrevInst)->OP != 0xC9) {
      return false;
    }

    return PrevInst->OP == 0x90 || (PrevInst->OP == 0x8B && PrevInst->ModRM == 0xC0) ||
           (PrevInst->OP == 0x8D && PrevInst->ModRM == 0x6D && PrevInst->Src[1].IsLiteral() && PrevInst->Src[1].Literal() == 0);
  } else {
    FEXCore::X86Tables::ModRMDecoded ModRM;
    ModRM.Hex = DecodeInst->ModRM;
    if (DecodeInst->OPRaw == 0xFF && ModRM.reg == 4 && DecodeInst->Src[0].IsGPR()) {
      if (DecodeInst->Src[0].Data.GPR.GPR == FEXCore::X86State::REG_RAX) {
        // Found in versions of mono from 2024 onwards - matches:
        // REX.W JMP rax
        return (DecodeInst->Flags & (DecodeFlags::FLAG_REX_PREFIX | DecodeFlags::FLAG_REX_WIDENING | DecodeFlags::FLAG_REX_XGPR_B |
                                     DecodeFlags::FLAG_REX_XGPR_X | DecodeFlags::FLAG_REX_XGPR_R)) ==
               (DecodeFlags::FLAG_REX_PREFIX | DecodeFlags::FLAG_REX_WIDENING);
      } else if (NumInstructions > 1 && DecodeInst->Src[0].Data.GPR.GPR == FEXCore::X86State::REG_R11) {
        // Found in older versions of mono - match:
        // MOV r11, imm64
        // JMP r11
        auto PrevInst = std::prev(DecodeInst);
        return PrevInst->OP == 0xBB && PrevInst->Dest.IsGPR() && PrevInst->Dest.Data.GPR.GPR == FEXCore::X86State::REG_R11;
      }
    }
  }

  return false;
}

bool Decoder::InstCanContinue() const {
  if (DecodeInst->PC + DecodeInst->InstSize == NextBlockStartAddress) {
    return false;
  }

  if (!(DecodeInst->TableInfo->Flags & (FEXCore::X86Tables::InstFlags::FLAGS_BLOCK_END | FEXCore::X86Tables::InstFlags::FLAGS_SETS_RIP))) {
    return true;
  }

  uint64_t TargetRIP = 0;
  const auto GPRSize = GetGPROpSize();

  if (DecodeInst->OP == 0xE8) { // Call - immediate target
    const uint64_t NextRIP = DecodeInst->PC + DecodeInst->InstSize;
    TargetRIP = DecodeInst->PC + DecodeInst->InstSize + DecodeInst->Src[0].Literal();

    if (GPRSize == IR::OpSize::i32Bit) {
      // If we are running a 32bit guest then wrap around addresses that go above 32bit
      TargetRIP &= 0xFFFFFFFFU;
    }

    if (TargetRIP == NextRIP) {
      // Optimize the case that the instruction is jumping just after itself.
      // This is a GOT calculation which we can optimize out.
      // Optimization occurs inside of the OpDispatcher implementation
      return true;
    }
  }

  return false;
}

void Decoder::AddBranchTarget(uint64_t Target) {
  if (VisitedBlocks.contains(Target)) {
    return;
  }

  auto BlockSuccIt = std::lower_bound(BlockInfo.Blocks.begin(), BlockInfo.Blocks.end(), Target,
                                      [](const auto& a, uint64_t Address) { return a.Entry < Address; });

  LOGMAN_THROW_A_FMT(BlockSuccIt == BlockInfo.Blocks.end() || BlockSuccIt->Entry != Target, "unexpected");

  if (BlockSuccIt != BlockInfo.Blocks.begin()) {
    auto BlockIt = std::prev(BlockSuccIt);
    if (BlockIt->Entry + BlockIt->Size > Target) {
      uint64_t SplitIdx = 0;
      uint64_t SplitAddr = BlockIt->Entry;
      // Find the instruction boundary of the split
      for (; SplitIdx < BlockIt->NumInstructions && SplitAddr < Target; SplitIdx++) {
        SplitAddr += BlockIt->DecodedInstructions[SplitIdx].InstSize;
      }
      uint64_t SplitOffset = SplitAddr - BlockIt->Entry;

      LOGMAN_THROW_A_FMT(SplitIdx != 0, "unexpected");

      if (SplitAddr == Target) {
        // Split at the boundary
        DecodedBlocks SplitBlock {
          .Entry = SplitAddr,
          .Size = BlockIt->Size - SplitOffset,
          .NumInstructions = BlockIt->NumInstructions - SplitIdx,
          .DecodedInstructions = BlockIt->DecodedInstructions + SplitIdx,
          .BlockStatus = BlockIt->BlockStatus,
        };

        BlockIt->Size = SplitOffset;
        BlockIt->NumInstructions = SplitIdx;

        BlockInfo.Blocks.insert(BlockSuccIt, SplitBlock);
      } // else misaligned, leave as a branch out of the block

      // If we split a block then the target has already been visited as part of that, if it was
      // misaligned the jump will just leave the multiblock, mark it as visited to avoid running
      // this code path again and just bail out early.
      VisitedBlocks.insert(Target);
      return;
    }
  }

  CurrentBlockTargets.insert(Target);
  if (Target >= DecodeInst->PC + DecodeInst->InstSize && Target < NextBlockStartAddress) {
    NextBlockStartAddress = Target;
  }
}

const uint8_t* Decoder::AdjustAddrForSpecialRegion(const uint8_t* _InstStream, uint64_t EntryPoint, uint64_t RIP) {
  constexpr uint64_t VSyscall_Base = 0xFFFF'FFFF'FF60'0000ULL;
  constexpr uint64_t VSyscall_End = VSyscall_Base + 0x1000;

  if (OSABI == FEXCore::HLE::SyscallOSABI::OS_LINUX64 && RIP >= VSyscall_Base && RIP < VSyscall_End) {
    // VSyscall
    // This doesn't exist on AArch64 and on x86_64 hosts this is emulated with faults to a region mapped with --xp permissions
    // Offset     0: vgettimeofday
    // Offset 0x400: vtime
    // Offset 0x800: vgetcpu
    uint64_t Offset = RIP - VSyscall_Base;
    return VSyscallData + Offset;
  }

  return _InstStream - EntryPoint + RIP;
}

void Decoder::DecodeInstructionsAtEntry(FEXCore::Core::InternalThreadState* Thread, const uint8_t* _InstStream, uint64_t PC, uint64_t MaxInst) {
  FEXCORE_PROFILE_SCOPED("DecodeInstructions");
  BlockInfo.TotalInstructionCount = 0;
  BlockInfo.Blocks.clear();
  VisitedBlocks.clear();
  // Reset internal state management
  DecodedSize = 0;
  MaxCondBranchForward = 0;
  MaxCondBranchBackwards = ~0ULL;
  DecodedBuffer = PoolObject.ReownOrClaimBuffer();

  // Decode operating mode from thread's CS segment.
  const auto CSSegment = Core::CPUState::GetSegmentFromIndex(Thread->CurrentFrame->State, Thread->CurrentFrame->State.cs_idx);
  BlockInfo.Is64BitMode = CSSegment->L == 1;
  LOGMAN_THROW_A_FMT(BlockInfo.Is64BitMode == CTX->Config.Is64BitMode, "Expected operating mode to not change at runtime!");

  // XXX: Load symbol data
  SymbolAvailable = false;
  EntryPoint = PC;
  BlockInfo.EntryPoints = {PC};
  InstStream = _InstStream;

  uint64_t TotalInstructions {};

  // If we don't have symbols available then we become a bit optimistic about multiblock ranges
  if (!SymbolAvailable) {
    // If we don't have a symbol available then assume all branches are valid for multiblock
    SymbolMaxAddress = SectionMaxAddress;
    SymbolMinAddress = EntryPoint;
  }

  DecodedMinAddress = EntryPoint;
  DecodedMaxAddress = EntryPoint;

  // Entry is a jump target
  BlocksToDecode = {PC};

  uint64_t CurrentCodePage = PC & FEXCore::Utils::FEX_PAGE_MASK;

  BlockInfo.CodePages = {CurrentCodePage};

  if (MaxInst == 0) {
    MaxInst = CTX->Config.MaxInstPerBlock;
  }

  bool EntryBlock {true};
  bool FinalInstruction {false};

  while (!FinalInstruction && !BlocksToDecode.empty()) {
    auto BlockDecodeIt = BlocksToDecode.begin();
    uint64_t RIPToDecode = *BlockDecodeIt;
    BlocksToDecode.erase(BlockDecodeIt);
    VisitedBlocks.emplace(RIPToDecode);

    auto BlockSuccIt = std::lower_bound(BlockInfo.Blocks.begin(), BlockInfo.Blocks.end(), RIPToDecode,
                                        [](const auto& a, uint64_t Address) { return a.Entry < Address; });

    LOGMAN_THROW_A_FMT(BlockSuccIt == BlockInfo.Blocks.end() || BlockSuccIt->Entry != RIPToDecode, "unexpected");

    NextBlockStartAddress = ~0ULL;
    if (!BlocksToDecode.empty()) {
      // We just erased the lowest, the front is then the second lowest
      NextBlockStartAddress = *BlocksToDecode.begin();
    }
    if (BlockSuccIt != BlockInfo.Blocks.end() && BlockSuccIt->Entry < NextBlockStartAddress) {
      NextBlockStartAddress = BlockSuccIt->Entry;
    }
    LOGMAN_THROW_A_FMT(NextBlockStartAddress > RIPToDecode, "unexpected");

    // Insert the block now so it can be looked up and split if necessary on a backward edge
    auto BlockIt = BlockInfo.Blocks.emplace(BlockSuccIt);

    BlockIt->Entry = RIPToDecode;
    BlockIt->Size = 0;
    BlockIt->IsEntryPoint = EntryBlock;

    uint64_t PCOffset = 0;
    uint64_t BlockStartOffset = DecodedSize;
    bool EraseBlock = true; // Unset once the block contains an instruction

    BlockIt->DecodedInstructions = &DecodedBuffer[BlockStartOffset];
    BlockIt->NumInstructions = 0;

    // Do a bit of pointer math to figure out where we are in code
    InstStream = AdjustAddrForSpecialRegion(_InstStream, EntryPoint, RIPToDecode);

    while (1) {
      InstructionSize = 0;

      // MAX_INST_SIZE assumes worst case
      auto OpAddress = RIPToDecode + PCOffset;
      auto OpMaxAddress = OpAddress + MAX_INST_SIZE;

      auto OpMinPage = OpAddress & FEXCore::Utils::FEX_PAGE_MASK;
      auto OpMaxPage = OpMaxAddress & FEXCore::Utils::FEX_PAGE_MASK;

      if (!EntryBlock && OpMinPage == OpMaxPage && PeekByte(0).value_or(0) == 0 && PeekByte(1).value_or(0) == 0) [[unlikely]] {
        // End the multiblock early if we hit 2 consecutive null bytes (add [rax], al) in the same page with the
        // assumption we are most likely trying to explore garbage code.
        break;
      }

      if (OpMinPage != CurrentCodePage) {
        CurrentCodePage = OpMinPage;
        BlockInfo.CodePages.insert(CurrentCodePage);
      }

      if (OpMaxPage != CurrentCodePage) {
        CurrentCodePage = OpMaxPage;
        BlockInfo.CodePages.insert(CurrentCodePage);
      }

      BlockIt->BlockStatus = DecodeInstruction(OpAddress);
      uint64_t OpEndAddress = OpAddress + DecodeInst->InstSize;

      DecodedMinAddress = std::min(DecodedMinAddress, OpAddress);
      DecodedMaxAddress = std::max(DecodedMaxAddress, OpEndAddress);

      if (OpEndAddress > NextBlockStartAddress) {
        // This instruction would overlap with another so skip adding it to the multiblock
        break;
      }

      EraseBlock = false; // Block contains at least one valid instruction, so unset erase
      ++TotalInstructions;
      ++DecodedSize;
      ++BlockIt->NumInstructions;
      BlockIt->Size += DecodeInst->InstSize;

      // Can not continue this block at all on invalid instruction
      if (BlockIt->BlockStatus != DecodedBlockStatus::SUCCESS) [[unlikely]] {
        if (!EntryBlock) {
          // In multiblock configurations, we can early terminate any non-entrypoint blocks with the expectation that this won't get hit.
          // Improves compile-times.
          // Just need to undo additions that this block decoding has caused.
          TotalInstructions -= BlockIt->NumInstructions;
          DecodedSize = BlockStartOffset;
          InstStream -= PCOffset;
          EraseBlock = true;
        } else {
          LogMan::Msg::EFmt("{} instruction in entry block: {:X}",
                            BlockIt->BlockStatus == DecodedBlockStatus::INVALID_INST ? "Invalid" :
                            BlockIt->BlockStatus == DecodedBlockStatus::NOEXEC_INST  ? "NoExec" :
                                                                                       "PartialDecode",
                            OpAddress);
        }
        break;
      }

      // Check if we need to end the entire multiblock
      FinalInstruction = DecodedSize >= MaxInst || DecodedSize >= DefaultDecodedBufferSize || TotalInstructions >= MaxInst;
      if (FinalInstruction) {
        break;
      }

      if (!InstCanContinue()) {
        if (DecodeInst->TableInfo->Flags & FEXCore::X86Tables::InstFlags::FLAGS_SETS_RIP) {
          // If we have multiblock enabled
          // If the branch target is within our multiblock range then we can keep going on
          // We don't want to short circuit this since we want to calculate our ranges still
          // NOTE: This will invalidate BlockIt, this is fine as we immediately break from the loop and EraseBlock cannot be true
          BlockIt->ForceFullSMCDetection = CTX->AreMonoHacksActive() && IsBranchMonoTailcall(BlockIt->NumInstructions);
          BranchTargetInMultiblockRange();
        }

        break;
      }

      PCOffset += DecodeInst->InstSize;
      InstStream += DecodeInst->InstSize;
    }

    // NOTE: BlockIt is only valid here in the EraseBlock case
    if (EraseBlock) {
      BlockInfo.Blocks.erase(BlockIt);
    } else {
      BlocksToDecode.merge(CurrentBlockTargets);
    }

    CurrentBlockTargets.clear();
    EntryBlock = false;
  }

  BlockInfo.TotalInstructionCount = TotalInstructions;

  for (auto& Block : BlockInfo.Blocks) {
    Block.IsEntryPoint = BlockInfo.EntryPoints.contains(Block.Entry);
  }
}

} // namespace FEXCore::Frontend
