/******************************************************************************
 * The MIT License (MIT)
 *
 * Copyright (c) 2020-2025 Baldur Karlsson
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 ******************************************************************************/

#pragma once

#include "api/replay/rdcarray.h"
#include "maths/vec.h"
#include "shaders/controlflow.h"
#include "spirv_common.h"
#include "spirv_processor.h"

#if ENABLED(RDOC_RELEASE)
#define SPIRV_DEBUG_RDCASSERT(...) \
  do                               \
  {                                \
    (void)(__VA_ARGS__);           \
  } while((void)0, 0)
#define SPIRV_DEBUG_RDCASSERTEQUAL(...) \
  do                                    \
  {                                     \
    (void)(__VA_ARGS__);                \
  } while((void)0, 0)
#else
#define SPIRV_DEBUG_RDCASSERT(...) RDCASSERTMSG("", __VA_ARGS__)
#define SPIRV_DEBUG_RDCASSERTEQUAL(a, b) RDCASSERTEQUAL(a, b)
#endif

struct SPIRVInterfaceAccess;
struct SPIRVPatchData;

namespace rdcspv
{
struct ImageOperandsAndParamDatas;

enum class GatherChannel : uint8_t
{
  Red = 0,
  Green = 1,
  Blue = 2,
  Alpha = 3,
};

enum class ThreadProperty : uint32_t
{
  Helper,
  QuadId,
  QuadLane,
  Active,
  Elected,
  SubgroupId,
  Count,
};

ITERABLE_OPERATORS(ThreadProperty);

enum class DeviceOpResult : uint32_t
{
  Unknown,
  Succeeded,
  Failed,
  NeedsDevice,
};

inline void AtomicStore(int32_t *var, int32_t newVal)
{
  int32_t oldVal = *var;
  while(Atomic::CmpExch32(var, oldVal, newVal) != oldVal)
  {
    oldVal = *var;
  };
}

inline int32_t AtomicLoad(int32_t *var)
{
  return Atomic::CmpExch32(var, 0, 0);
}

inline int32_t AtomicLoad(const int32_t *var)
{
  return Atomic::CmpExch32((int32_t *)var, 0, 0);
}

struct ThreadState;

class DebugAPIWrapper
{
public:
  virtual ~DebugAPIWrapper() {}
  virtual void AddDebugMessage(MessageCategory c, MessageSeverity sv, MessageSource src, rdcstr d) = 0;

  virtual GraphicsAPI GetGraphicsAPI() = 0;
  virtual bool SimulateThreaded() = 0;
  virtual ResourceId GetShaderID() = 0;

  virtual uint64_t GetBufferLength(const ShaderBindIndex &bind) = 0;

  virtual void ReadLocationValue(int32_t location, ShaderVariable &var) = 0;

  virtual void ReadBufferValue(const ShaderBindIndex &bind, uint64_t offset, uint64_t byteSize,
                               void *dst) = 0;
  virtual void WriteBufferValue(const ShaderBindIndex &bind, uint64_t offset, uint64_t byteSize,
                                const void *src) = 0;

  virtual void ReadAddress(uint64_t address, uint64_t byteSize, void *dst) = 0;
  virtual void WriteAddress(uint64_t address, uint64_t byteSize, const void *src) = 0;

  virtual DeviceOpResult ReadTexel(const ShaderBindIndex &imageBind, const ShaderVariable &coord,
                                   uint32_t sample, ShaderVariable &output) = 0;
  virtual DeviceOpResult WriteTexel(const ShaderBindIndex &imageBind, const ShaderVariable &coord,
                                    uint32_t sample, const ShaderVariable &value) = 0;

  virtual void FillInputValue(ShaderVariable &var, ShaderBuiltin builtin, uint32_t threadIndex,
                              uint32_t location, uint32_t component) = 0;

  virtual uint32_t GetThreadProperty(uint32_t threadIndex, ThreadProperty prop) = 0;
  virtual bool IsImageCached(const ShaderBindIndex &bind) = 0;
  virtual bool IsBufferCached(const ShaderBindIndex &bind) = 0;
  virtual bool IsBufferCached(uint64_t address) = 0;

  enum TextureType
  {
    Float_Texture = 0x00,

    UInt_Texture = 0x01,
    SInt_Texture = 0x02,

    Buffer_Texture = 0x10,
    Subpass_Texture = 0x20,
  };

  virtual bool QueueSampleGather(ThreadState &lane, Op opcode, TextureType texType,
                                 const ShaderBindIndex &imageBind,
                                 const ShaderBindIndex &samplerBind, const ShaderVariable &uv,
                                 const ShaderVariable &ddxCalc, const ShaderVariable &ddyCalc,
                                 const ShaderVariable &compare, GatherChannel gatherChannel,
                                 const rdcspv::ImageOperandsAndParamDatas &operands,
                                 ShaderVariable &output, bool &hasResult) = 0;
  virtual bool QueueCalculateMathOp(GLSLstd450 op, const rdcarray<ShaderVariable> &params) = 0;
  virtual bool GetQueuedResults(rdcarray<ShaderVariable *> &mathOpResults,
                                rdcarray<ShaderVariable *> &sampleGatherResults) = 0;
  virtual bool QueuedOpsHasSpace() = 0;
};

// things we need to readback once per hit thread
struct ResultDataBase
{
  Vec4f pos;

  uint32_t prim;
  uint32_t sample;
  uint32_t view;
  uint32_t valid;

  float ddxDerivCheck;
  uint32_t quadLaneIndex;
  uint32_t laneIndex;
  uint32_t subgroupSize;

  uint32_t globalBallot[4];
  uint32_t electBallot[4];
  uint32_t helperBallot[4];

  uint32_t numSubgroups;    // may be packed oddly so we don't assume we can calculate
  uint32_t padding[3];

  // LaneData lanes[N]
  // each LaneData is prefixed by the subgroup struct below if needed, and then the stage struct unconditionally
};

// things we need per-lane with subgroups active, before any per-stage data
struct SubgroupLaneData
{
  uint32_t elect;       // for OpGroupNonUniformElect, if we don't have ballot
  uint32_t isActive;    // per lane active mask
  uint32_t padding[2];
};

struct VertexLaneData
{
  uint32_t inst;    // allow/expect instance to vary across subgroup just in case
  uint32_t vert;    // vertex id (either auto-generated or index)
  uint32_t view;    // multiview view (if used)
  uint32_t padding;
};

struct PixelLaneData
{
  Vec4f fragCoord;      // per-lane coord
  uint32_t isHelper;    // per-lane helper bit
  uint32_t quadId;    // the per-quad ID shared among all 4 threads, to differentiate between quads.
                      // is the laneIndex of the top-left thread (with an offset, so we can see 0 as invalid)
  uint32_t quadLaneIndex;    // the quadLaneIndex for quad-neighbours, in case we are fetching a subgroup
  uint32_t padding;
};

struct ComputeLaneData
{
  uint32_t threadid[3];    // per-lane thread id (in case it's not trivial)
  uint32_t subIdxInGroup;
};

typedef ShaderVariable (*ExtInstImpl)(ThreadState &, uint32_t, const rdcarray<Id> &);

struct ExtInstDispatcher
{
  rdcstr name;
  bool skippedNonsemantic = false;
  rdcarray<rdcstr> names;
  rdcarray<ExtInstImpl> functions;
};

void ConfigureGLSLStd450(ExtInstDispatcher &extinst);

struct GlobalState
{
public:
  GlobalState() {}
  // allocated storage for opaque uniform blocks, does not change over the course of debugging
  rdcarray<ShaderVariable> constantBlocks;

  // workgroup private variables
  rdcarray<ShaderVariable> workgroups;

  // resources may be read-write but the variable itself doesn't change
  rdcarray<ShaderVariable> readOnlyResources;
  rdcarray<ShaderVariable> readWriteResources;
  rdcarray<ShaderVariable> samplers;

  SparseIdMap<ExtInstDispatcher> extInsts;

  uint64_t clock;
};

struct StackFrame
{
  StackFrame() = default;
  Id function;
  uint32_t funcCallInstruction = ~0U;

  // allocated storage for locals
  rdcarray<ShaderVariable> locals;

  // list of Ids we created, either variables/function parameters in this function, or IDs created
  // in this function. When we return from this frame they will be emptied.
  // This prevents a use-after-free with ShaderVariableChanges if we re-enter the same function
  // and want to show the previous value of an id
  rdcarray<Id> idsCreated;

  // as a hack for scoping without proper debug info, we track locals from their first use
  rdcarray<Id> localsUsed;

  // the thread's live list before the function was entered
  rdcarray<Id> live;

  // the last block we were in and the current block, for OpPhis
  Id lastBlock, curBlock;

private:
  // disallow copying to ensure the locals we allocate never move around
  StackFrame(const StackFrame &o) = delete;
  StackFrame &operator=(const StackFrame &o) = delete;
};

struct GpuMathOperation
{
  uint32_t workgroupIndex;
  GLSLstd450 op;
  rdcarray<ShaderVariable> paramVars;
  ShaderVariable *result;
};

struct GpuSampleGatherOperation
{
  uint32_t workgroupIndex;
  Op opcode;
  DebugAPIWrapper::TextureType texType;
  ShaderBindIndex imageBind;
  ShaderBindIndex samplerBind;
  ShaderVariable uv;
  ShaderVariable ddxCalc;
  ShaderVariable ddyCalc;
  ShaderVariable compare;
  GatherChannel gatherChannel;
  ImageOperandsAndParamDatas operands;
  ShaderVariable *result = NULL;
};

enum class ShaderFeatures : uint32_t
{
  None = 0,
  Derivatives = 1 << 0,
};

BITMASK_OPERATORS(ShaderFeatures);

class Debugger;

struct ThreadState
{
  ThreadState(Debugger &debug, const GlobalState &globalState, ShaderStage stage,
              ShaderFeatures shaderFeatures);
  ~ThreadState();

  void EnterEntryPoint(bool useDebugState);
  void StepNext(bool useDebugState, const uint32_t steps, const rdcarray<ThreadState> &workgroup);

  enum DerivDir
  {
    DDX,
    DDY
  };
  enum DerivType
  {
    Coarse,
    Fine
  };

  ShaderVariable CalcDeriv(DerivDir dir, DerivType type, const rdcarray<ThreadState> &workgroup,
                           Id val);

  void FillCallstack(rdcarray<Id> &funcs);

  bool Finished() const;

  uint32_t currentInstruction;
  uint32_t nextInstruction;

  rdcarray<bool> activeMask;

  const GlobalState &global;
  Debugger &debugger;

  // thread-local inputs/outputs. This array does not change over the course of debugging
  rdcarray<ShaderVariable> inputs, outputs;

  // thread-local private variables
  rdcarray<ShaderVariable> privates;

  // This must be a thread safe container
  // every ID's variable, if a pointer it may be pointing at a ShaderVariable stored elsewhere
  DenseIdMap<ShaderVariable> ids;

  // for any allocated variables, a list of 'extra' pointers pointing to it. By default the actual
  // storage of allocated variables is not directly accessible (it's stored in e.g. inputs, outputs,
  // global constants, stack frame variables, etc). The ID for the allocating OpVariable is replaced
  // with a pointer pointing to that storage. However more pointers can be generated with
  // OpAccessChain etc, and these pointers must be listed as changed whenever the underlying Id
  // changes (and vice-versa - a change via any of those pointers must update all other pointers).
  SparseIdMap<rdcarray<Id>> pointersForId;

  SparseIdMap<ShaderVariable> gsmPointers;
  struct GSMIndex
  {
    int32_t global;
    int32_t local;
  };
  rdcarray<GSMIndex> gsmIndexes;

  bool IsDiverged() const { return diverged; };
  const rdcarray<uint32_t> &GetEnteredPoints() const { return enteredPoints; }
  uint32_t GetConvergenceInstruction() const { return convergenceInstruction; }
  uint32_t GetFunctionReturnPoint() const { return functionReturnPoint; }

  ShaderVariable returnValue;
  bool hasReturnValueData;
  rdcarray<StackFrame *> callstack;

  // the list of IDs that are currently valid and live
  rdcarray<Id> live;

  std::map<Id, uint32_t> lastWrite;

  // quad ID (arbitrary, just used to find neighbours for derivatives)
  uint32_t quadId = 0;
  // index in the pixel quad (relative to the active lane)
  uint32_t quadLaneIndex = ~0U;
  // the lane indices of our quad neighbours
  uint32_t quadNeighbours[4] = {~0U, ~0U, ~0U, ~0U};
  // index in the workgroup
  uint32_t workgroupIndex = 0;
  // index in the subgroup
  uint32_t subgroupId = 0;
  bool helperInvocation = false;
  bool dead = true;
  bool elected = false;

  const ShaderVariable &GetSrc(Id id) const;
  DeviceOpResult WritePointerValue(Id pointer, const ShaderVariable &val);
  DeviceOpResult ReadPointerValue(bool atomic, Id pointer, ShaderVariable &ret);

  void DebugBreak();

  enum class PendingResultStatus : int32_t
  {
    Unknown,
    Pending,
    Ready,
    Stepped,
  };

  void QueueMathOp(GLSLstd450 op, const rdcarray<ShaderVariable> &paramVars,
                   const ShaderVariable &result);
  void QueueSampleGather(Op opcode, DebugAPIWrapper::TextureType texType,
                         const ShaderBindIndex &imageBind, const ShaderBindIndex &samplerBind,
                         const ShaderVariable &uv, const ShaderVariable &ddxCalc,
                         const ShaderVariable &ddyCalc, const ShaderVariable &compare,
                         GatherChannel gatherChannel, const ImageOperandsAndParamDatas &operands,
                         const ShaderVariable &result);

  bool IsPendingResultPending() const
  {
    return GetPendingResultStatus() == PendingResultStatus::Pending;
  }
  bool IsPendingResultReady() const
  {
    return GetPendingResultStatus() == PendingResultStatus::Ready;
  }
  void SetPendingResultUnknown() { SetPendingResultStatus(PendingResultStatus::Unknown); }
  void SetPendingResultReady()
  {
    SPIRV_DEBUG_RDCASSERTEQUAL(GetPendingResultStatus(), PendingResultStatus::Pending);
    SetPendingResultStatus(PendingResultStatus::Ready);
  }
  const ShaderVariable &GetPendingResult() const
  {
    SPIRV_DEBUG_RDCASSERTEQUAL(GetPendingResultStatus(), PendingResultStatus::Ready);
    return pendingResultData;
  }
  void SetStepQueued()
  {
    AtomicStore(&atomic_isSimulationStepActive, 1);
    AtomicStore(&atomic_stepNeedsGpuSampleGatherOp, 0);
    AtomicStore(&atomic_stepNeedsGpuMathOp, 0);
    AtomicStore(&atomic_stepNeedsDeviceThread, 0);
  }
  void SetStepNeedsGpuSampleGatherOp()
  {
    AtomicStore(&atomic_stepNeedsGpuSampleGatherOp, 1);
    SetPendingResultStatus(PendingResultStatus::Pending);
  }
  bool StepNeedsGpuSampleGatherOp() const
  {
    return (AtomicLoad(&atomic_stepNeedsGpuSampleGatherOp) == 1);
  }
  void SetStepNeedsGpuMathOp()
  {
    AtomicStore(&atomic_stepNeedsGpuMathOp, 1);
    SetPendingResultStatus(PendingResultStatus::Pending);
  }
  bool StepNeedsGpuMathOp() const { return (AtomicLoad(&atomic_stepNeedsGpuMathOp) == 1); }
  void SetStepNeedsDeviceThread()
  {
    AtomicStore(&atomic_stepNeedsDeviceThread, 1);
    SetPendingResultStatus(PendingResultStatus::Pending);
  }
  bool StepNeedsDeviceThread() const { return (AtomicLoad(&atomic_stepNeedsDeviceThread) == 1); }
  const GpuMathOperation &GetQueuedGpuMathOp() const
  {
    SPIRV_DEBUG_RDCASSERT(AtomicLoad(&atomic_stepNeedsGpuMathOp));
    SPIRV_DEBUG_RDCASSERT(IsPendingResultPending());
    return queuedGpuMathOp;
  }
  const GpuSampleGatherOperation &GetQueuedGpuSampleGatherOp() const
  {
    SPIRV_DEBUG_RDCASSERT(AtomicLoad(&atomic_stepNeedsGpuSampleGatherOp));
    SPIRV_DEBUG_RDCASSERT(IsPendingResultPending());
    return queuedGpuSampleGatherOp;
  }

  bool CanRunAnotherStep() const;

  void SetSimulationStepCompleted() { AtomicStore(&atomic_isSimulationStepActive, 0); }
  bool IsSimulationStepActive() const { return (AtomicLoad(&atomic_isSimulationStepActive) == 1); }

  void ClearPendingDebugState()
  {
    pendingDebugState.changes.clear();
    pendingDebugState.flags = ShaderEvents::NoEvent;
    pendingDebugState.nextInstruction = 0;
  }
  const ShaderDebugState &GetPendingDebugState() const { return pendingDebugState; }

private:
  void EnterFunction(const rdcarray<Id> &arguments);
  void SetDst(Id id, const ShaderVariable &val);
  bool SetLive(Id id);
  void ProcessScopeChange(const rdcarray<Id> &oldLive, const rdcarray<Id> &newLive);
  void JumpToLabel(Id target);
  bool ReferencePointer(Id id);

  void SkipIgnoredInstructions();
  void SetConvergencePoint(Id block);

  void ExecuteMemoryBarrier(Id semanticsId);
  static bool WorkgroupIsDiverged(const rdcarray<ThreadState> &workgroup);

  PendingResultStatus GetPendingResultStatus() const
  {
    return (PendingResultStatus)AtomicLoad(&atomic_pendingResultStatus);
  }

  void SetPendingResultStatus(PendingResultStatus status)
  {
    AtomicStore(&atomic_pendingResultStatus, (int32_t)status);
  }

  ShaderFeatures features;
  DerivType defaultDeriveType;
  ShaderDebugState pendingDebugState;
  bool hasDebugState = false;
  uint32_t stepIndex = 0;
  ShaderVariable pendingResultData;
  GpuMathOperation queuedGpuMathOp;
  GpuSampleGatherOperation queuedGpuSampleGatherOp;

  // Control Flow state variables
  // true if executed an operation which could trigger divergence
  bool diverged;
  // list of potential convergence points that were entered in a single step (used for tracking thread convergence)
  rdcarray<uint32_t> enteredPoints;
  // the id of the merge block that the last branch targetted
  uint32_t convergenceInstruction;
  // the instruction after a function call is defined to be a convergence point
  uint32_t functionReturnPoint;

  // These need to be accessed using atomics
  int32_t atomic_pendingResultStatus = (int32_t)PendingResultStatus::Unknown;
  int32_t atomic_stepNeedsGpuSampleGatherOp = 0;
  int32_t atomic_stepNeedsGpuMathOp = 0;
  int32_t atomic_stepNeedsDeviceThread = 0;
  int32_t atomic_isSimulationStepActive = 0;
};

enum class DebugScope
{
  CompilationUnit,
  Composite,
  Function,
  Block,
};

struct TypeData
{
  VarType type = VarType::Unknown;
  uint32_t vecSize = 0, matSize = 0;
  bool colMajorMat = false;

  Id baseType;
  rdcarray<uint32_t> arrayDimensions;
  rdcarray<rdcpair<rdcstr, Id>> structMembers;
  rdcarray<uint32_t> memberOffsets;
};

struct LocalMapping
{
  bool operator<(const LocalMapping &o) const
  {
    if(sourceVar != o.sourceVar)
      return sourceVar < o.sourceVar;
    if(indexes != o.indexes)
      return indexes < o.indexes;
    return debugVar < o.debugVar;
  }

  bool isSourceSupersetOf(const LocalMapping &o) const
  {
    // this mapping is a superset of the other if:

    // it's the same source var
    if(sourceVar != o.sourceVar)
      return false;

    // it contains the same or fewer indices
    if(o.indexes.size() < indexes.size())
      return false;

    // the common prefix of indexes is identical
    for(size_t i = 0; i < indexes.size(); i++)
      if(indexes[i] != o.indexes[i])
        return false;

    // if all those conditions are true, we either map to the same index (indexes size is the same -
    // likely case) or else we cover a whole sub-tree where the other only covers a leaf (other has
    // more indexes - unlikely but possible)
    return true;
  }

  uint32_t instIndex;
  Id sourceVar;
  Id debugVar;
  bool isDeclare;
  rdcarray<uint32_t> indexes;
};

struct ScopeData
{
  DebugScope type;
  ScopeData *parent;
  uint32_t line;
  uint32_t column;
  int32_t fileIndex;
  size_t end;

  rdcstr name;

  rdcarray<Id> locals;

  rdcarray<LocalMapping> localMappings;

  bool HasAncestor(const ScopeData *check) const
  {
    const ScopeData *cur = this;

    while(cur)
    {
      if(cur == check)
        return true;
      cur = cur->parent;
    }

    return false;
  }
};

struct InlineData
{
  ScopeData *scope;
  InlineData *parent;
};

struct LocalData
{
  rdcstr name;
  ScopeData *scope;
  TypeData *type;
};

Id ParseRawName(const rdcstr &name);
rdcstr GetRawName(Id id);

struct DebugMessage
{
  MessageCategory cat;
  MessageSeverity sev;
  MessageSource src;
  rdcstr desc;
};

enum class StepThreadMode
{
  RUN_SINGLE_STEP,
  RUN_MULTIPLE_STEPS,
  QUEUE_MULTIPLE_STEPS
};

class Debugger : public Processor, public ShaderDebugger
{
public:
  Debugger();
  ~Debugger();
  virtual void Parse(const rdcarray<uint32_t> &spirvWords);
  ShaderDebugTrace *BeginDebug(DebugAPIWrapper *apiWrapper, const ShaderStage stage,
                               const rdcstr &entryPoint, const rdcarray<SpecConstant> &specInfo,
                               const std::map<size_t, uint32_t> &instructionLines,
                               const SPIRVPatchData &patchData, uint32_t activeIndex,
                               uint32_t threadsInWorkgroup, uint32_t threadsInSubgroup);

  rdcarray<ShaderDebugState> ContinueDebug();

  ConstIter GetIterForInstruction(uint32_t inst) const;
  uint32_t GetInstructionForIter(ConstIter it) const;
  uint32_t GetInstructionForFunction(Id id) const;
  uint32_t GetInstructionForLabel(Id id) const;
  const DataType &GetType(Id typeId) const;
  const DataType &GetTypeForId(Id ssaId) const;
  const Decorations &GetDecorations(Id typeId) const;
  bool IsDebugExtInstSet(Id id) const;
  bool HasDebugInfo() const { return m_DebugInfo.valid; }
  bool InDebugScope(uint32_t inst) const;
  rdcstr GetHumanName(Id id) const;
  void AllocateVariable(Id id, Id typeId, ShaderVariable &outVar) const;

  DeviceOpResult ReadFromPointer(const ShaderVariable &ptr, ShaderVariable &ret) const;
  DeviceOpResult GetPointerValue(const ShaderVariable &v, ShaderVariable &ret) const;
  uint64_t GetPointerByteOffset(const ShaderVariable &ptr) const;
  DebugAPIWrapper::TextureType GetTextureType(const ShaderVariable &img) const;
  ShaderVariable MakePointerVariable(Id id, const ShaderVariable *v, uint8_t scalar0 = 0xff,
                                     uint8_t scalar1 = 0xff) const;
  ShaderVariable MakeTypedPointer(uint64_t value, const DataType &type) const;
  Id GetPointerBaseId(const ShaderVariable &v) const;
  uint32_t GetPointerArrayStride(const ShaderVariable &ptr) const;
  bool IsOpaquePointer(const ShaderVariable &v) const;
  bool IsPhysicalPointer(const ShaderVariable &v) const;

  bool ArePointersAndEqual(const ShaderVariable &a, const ShaderVariable &b) const;
  DeviceOpResult WriteThroughPointer(ShaderVariable &ptr, const ShaderVariable &val) const;
  ShaderVariable MakeCompositePointer(const ShaderVariable &base, Id id,
                                      rdcarray<uint32_t> &indices) const;

  DebugAPIWrapper *GetAPIWrapper() const;
  uint32_t GetNumInstructions() const { return (uint32_t)instructionOffsets.size(); }
  GlobalState GetGlobal() const { return global; }
  const rdcarray<Id> &GetLiveGlobals() const { return liveGlobals; }
  ThreadState &GetActiveLane() { return workgroup[activeLaneIndex]; }
  const ThreadState &GetActiveLane() const { return workgroup[activeLaneIndex]; }
  uint32_t GetSubgroupSize() const { return subgroupSize; }

  void QueueGpuMathOp(uint32_t lane);
  void QueueGpuSampleGatherOp(uint32_t lane);

  uint64_t GetDeviceThreadID() const { return deviceThreadID; }
  bool IsDeviceThread() const { return Threading::GetCurrentID() == GetDeviceThreadID(); }
  void AddDebugMessage(MessageCategory cat, MessageSeverity sev, MessageSource src, rdcstr desc) const;
  void FillInputValue(ShaderVariable &var, ShaderBuiltin builtin, uint32_t threadIndex) const;
  DeviceOpResult ReadTexel(const ShaderBindIndex &imageBind, const ShaderVariable &coord,
                           uint32_t sample, ShaderVariable &output) const;
  DeviceOpResult WriteTexel(const ShaderBindIndex &imageBind, const ShaderVariable &coord,
                            uint32_t sample, const ShaderVariable &input) const;
  DeviceOpResult GetBufferLength(const ShaderBindIndex &bind, uint64_t &bufferLen) const;

  Threading::CriticalSection &GetAtomicMemoryLock() const { return atomicMemoryLock; }
private:
  virtual void PreParse(uint32_t maxId);
  virtual void PostParse();
  virtual void RegisterOp(Iter it);

  void SetDebugTypeMember(const OpShaderDbg &member, TypeData &resultType, size_t memberIndex);

  template <typename ShaderVarType, bool allocate>
  uint32_t WalkVariable(const Decorations &curDecorations, const DataType &type,
                        uint64_t offsetOrLocation, bool locationUniform, ShaderVarType &var,
                        const rdcstr &accessSuffix,
                        std::function<void(ShaderVarType &, const Decorations &, const DataType &,
                                           uint64_t, const rdcstr &)>
                            callback) const;
  void SetStructArrayNames(ShaderVariable &c, const DataType *typeWalk,
                           const rdcarray<SpecConstant> &specInfo);

  void MakeSignatureNames(const rdcarray<SPIRVInterfaceAccess> &sigList, rdcarray<rdcstr> &sigNames);

  void FillCallstack(ThreadState &thread, ShaderDebugState &state) const;
  void FillDebugSourceVars(rdcarray<InstructionSourceInfo> &instInfo) const;
  void FillDefaultSourceVars(rdcarray<InstructionSourceInfo> &instInfo) const;

  /////////////////////////////////////////////////////////
  // debug data

  DebugAPIWrapper *apiWrapper = NULL;

  GlobalState global;
  rdcarray<ThreadState> workgroup;

  Id convergeBlock;

  uint32_t activeLaneIndex = 0;
  uint32_t subgroupSize = 0;
  ShaderStage stage;

  int steps = 0;

  /////////////////////////////////////////////////////////
  // parsed data

  struct MemberName
  {
    Id id;
    uint32_t member;
    rdcstr name;
  };

  DenseIdMap<rdcstr> strings;
  rdcarray<MemberName> memberNames;
  std::map<ShaderEntryPoint, Id> entryLookup;

  DenseIdMap<rdcpair<size_t, size_t>> idLiveRange;

  SparseIdMap<size_t> m_Files;
  LineColumnInfo m_CurLineCol;
  rdcarray<InstructionSourceInfo> m_InstInfo;

  SparseIdMap<uint32_t> labelInstruction;

  SparseIdMap<uint16_t> idToPointerType;
  rdcarray<rdcspv::Id> pointerTypeToId;

  // the live mutable global variables, to initialise a stack frame's live list
  rdcarray<Id> liveGlobals;

  struct Function
  {
    size_t begin = 0;
    rdcarray<Id> parameters;
    rdcarray<Id> variables;
  };

  SparseIdMap<Function> functions;
  Function *curFunction = NULL;

  rdcarray<size_t> instructionOffsets;

  mutable std::set<rdcstr> usedNames;
  mutable Threading::RWLock dynamicNamesLock;
  mutable std::map<Id, rdcstr> dynamicNames;

  struct
  {
    bool valid = false;

    rdcarray<std::function<void()>> deferredMembers;

    SparseIdMap<TypeData> types;
    SparseIdMap<ScopeData> scopes;
    SparseIdMap<InlineData> inlined;
    ScopeData *curScope = NULL;
    InlineData *curInline = NULL;

    rdcarray<rdcpair<const ScopeData *, LocalMapping>> pendingMappings;

    rdcarray<Id> globals;
    rdcarray<Id> constants;

    SparseIdMap<LocalData> locals;

    SparseIdMap<int32_t> sources;
    SparseIdMap<rdcstr> filenames;

    SparseIdMap<Id> funcToDebugFunc;

    std::map<size_t, ScopeData *> lineScope;
    std::map<size_t, InlineData *> lineInline;

    rdcarray<LocalMapping> activeLocalMappings;
  } m_DebugInfo;

  rdcshaders::ControlFlow controlFlow;

  const ScopeData *GetScope(size_t offset) const;

  void ClampScalars(const ShaderVariable &var, uint8_t &scalar0) const;
  void ClampScalars(const ShaderVariable &var, uint8_t &scalar0, uint8_t &scalar1) const;

  void QueueDeviceThreadStep(uint32_t lane);
  void ProcessQueuedDeviceThreadSteps();

  void QueueJob(uint32_t lane);
  void StepThread(uint32_t lane, StepThreadMode stepMode);
  void InternalStepThread(uint32_t lane);
  void SimulationJobHelper();

  void ProcessQueuedDebugMessages();
  void ProcessQueuedOps();
  void ProcessQueuedGpuMathOps();
  void ProcessQueuedGpuSampleGatherOps();
  void SyncPendingGpuOps();
  void SyncPendingLanes();

  mutable Threading::CriticalSection atomicMemoryLock;
  mutable Threading::CriticalSection queuedDebugMessagesLock;
  mutable rdcarray<DebugMessage> queuedDebugMessages;
  rdcarray<bool> queuedGpuMathOps;
  rdcarray<bool> queuedGpuSampleGatherOps;
  rdcarray<bool> queuedDeviceThreadSteps;
  rdcarray<ShaderDebugState> *shaderChangesReturn;
  rdcarray<int32_t> queuedJobs;

  bool retireIDs = true;
  ShaderDebugState activeDebugState;
  rdcarray<bool> pendingLanes;
  rdcarray<ShaderVariable *> pendingGpuMathsOpsResults;
  rdcarray<ShaderVariable *> pendingGpuSampleGatherOpsResults;

  uint64_t deviceThreadID;
  int32_t atomic_simulationFinished;
  bool mtSimulation;
};

// this does a 'safe' value assignment, by doing parallel depth-first iteration of both variables
// and only copying the value itself. This ensures we don't change any locations that might be
// pointed to. Assignments should only ever be between compatible types so this should be safe.
void AssignValue(ShaderVariable &dst, const ShaderVariable &src);

};    // namespace rdcspv

DECLARE_REFLECTION_ENUM(rdcspv::ThreadState::PendingResultStatus);
DECLARE_REFLECTION_ENUM(rdcspv::StepThreadMode);
DECLARE_REFLECTION_ENUM(rdcspv::DeviceOpResult);
