//=- SeqAccessesInference.cpp - Infer SeqAccess Chain From Memory Transfers -=//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Transform sequential accesses intrinsics based on access analysis.
//
// The intrisics are for lowering to burst on m_axi.
//
// For OpenCL, it's also done on bram for widening the sequence of accesses.
//
//===----------------------------------------------------------------------===//

#include "reflow/Memory/SeqAccessesInference.h"
#include "reflow/Memory/SeqAccessesInfo.h"
#include "reflow/Options.h"
#include "reflow/Predicate/IntrinsicInst.h"
#include "reflow/ReflowConfig.h"
#include "reflow/SCEV/ScalarEvolutionCanon.h"
#include "reflow/SPIR/Kernel.h"
#include "reflow/Support/Metadata/AttributeMD.h"
#include "reflow/Support/Metadata/InterfaceIntrinsic.h"
#include "reflow/TransformUtils/MiscUtil.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/XILINXFunctionInfoUtils.h"
#include "llvm/Analysis/XILINXLoopInfoUtils.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/IR/XILINXFPGAIntrinsicInst.h"
#include "llvm/PassSupport.h"
#include "llvm/Transforms/Scalar.h"

#define DEBUG_TYPE "reflow-sequential-accesses-inference"
#define DIAG_TYPE "reflow-burst-inference"
#include "llvm/Support/Debug.h"

using namespace llvm;
using namespace PatternMatch;

static cl::opt<bool> InferSeqAccessesOnLayoutTransformedArray(
    "reflow-infer-seqaccesses-on-layout-transform-array",
    cl::desc("Burst accesses on array that has layout transformed pragma."),
    cl::Hidden, cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> InferInBoundGEP(
    "reflow-infer-inbound-gep",
    cl::desc("Infer in bound gep when infer sequential accesses."), cl::Hidden,
    cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableInferVectorizationOnMAXI(
    "reflow-seq-access-infer-enable-vectorization-on-maxi",
    cl::desc("Enable vectorization when stride is a power of 2 and the pointer "
             "is on maxi."),
    cl::Hidden, cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableSeqAccessesInDataFlowRegion(
    "reflow-seq-access-infer-enable-dataflow-region",
    cl::desc("Enable inferring sequential accesses in dataflow region."),
    cl::Hidden, cl::init(true), cl::cat(ReflowCategory));

STATISTIC(NumRegionBursts, "Number of bursts inferred");
STATISTIC(NumLoopBursts, "Number of bursts inferred");
STATISTIC(NumAssumptionAccept,
          "Number of assumption from predicated SCEV accepted");
STATISTIC(NumAssumptionReject,
          "Number of assumption from predicated SCEV rejected");

namespace {
class SeqAccessesInference : public FunctionPass {
  bool IsOpenCL;

public:
  static char ID;

  using PtrSCEVMapTy = DenseMap<const Instruction *, const SCEV *>;

  SeqAccessesInference(bool IsOpenCL = false)
      : FunctionPass(ID), IsOpenCL(IsOpenCL) {
    initializeSeqAccessesInferencePass(*PassRegistry::getPassRegistry());
  }

  bool runOnFunction(Function &F) override;

  void getAnalysisUsage(AnalysisUsage &AU) const override {
    AU.addRequired<ScalarEvolutionWrapperPass>();
    AU.addRequired<SeqAccessesInfoPass>();
    AU.addRequired<ScalarEvolutionCanonWrapperPass>();
    AU.addRequired<LoopInfoWrapperPass>();
    AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
    AU.setPreservesCFG();
  }

  bool skipFunction(const Function &F) const;
};

class SeqAccessesInferenceTransformer {
  using PtrSCEVMapTy = SeqAccessesInference::PtrSCEVMapTy;

  SeqAccess &SA;
  BasicBlock *Begin;
  BasicBlock *End;
  const DataLayout &DL;
  ScalarEvolution &SE;
  LoopInfo &LI;
  HLSIRBuilder HIB;
  SCEVExpander Expander;
  ReflowDiagnostic &RD;

  /// The sequential accesses that we're going to transform
  SetVector<Instruction *> Accesses;

  /// Record the sequential accesses pointer addrec
  PtrSCEVMapTy SCEVMap;

  bool IsOpenCL;

  /// Checks if the inferred sequential access chain may expose in dataflow
  /// region
  bool exposeInDataflowRegion(BasicBlock &BB, Instruction *FAI);

  /// Checks if \p L is a dataflow loop or contains dataflow loop
  bool isOrContainDataflowLoop(const Loop *L);

  /// Checks if the candidate region is a valid region to do sequential access
  /// inference
  bool isValidSeqAccessRegion();
  bool isValidSeqAccessRegion(Loop *L);

  /// Returns unique underlying object for \p P
  Value *getUniqueUnderlyingObject(Value *P);

  /// Checks if \p P is on legal interface for inferring sequential access chain
  bool hasLegalInterface(Value *P);

  ///  Checks if \p P is on legal interface that support vectorization
  bool supportVectorization(Value *P);

  /// Checks if \p P is having legal underlying object for inferring sequential
  /// access chain
  bool hasLegalUnderlyingObject(Value *P);

  /// Infers in bound geps from original source gep
  void inferInboundGEPs(Value *V, unsigned MaxLookup = 6);

  /// Transform to sequential access chain for SeqAccess in a region
  Value *inferRegionStoreSeqAccess(Type *SizeTy, Type *DataTy, Value *StartAddr,
                                   Value *Size);
  Value *inferRegionLoadSeqAccess(Type *SizeTy, Type *DataTy, Value *StartAddr,
                                  Value *Size);
  Value *inferRegionSeqAccess(Type *PtrTy, Type *SizeTy, Type *DataTy,
                              const SCEV *Start, const SCEV *Len);

  /// Gets new index for the sequential access chain
  Value *getNewIndex(const Loop *L, const SCEV *PtrSCEV, Type *SizeTy,
                     unsigned ElemSizeInBytes, const SCEV *Start, Value *P);

  /// Aligns the Pointer addrec to the vectorized one.
  /// Returns <AlignedPtrAddRec, Offset>
  std::pair<const SCEVAddRecExpr *, const SCEV *>
  alignPtrAddRec(const Loop *L, const SCEVAddRecExpr *PtrAddRec,
                 unsigned ElemSizeInBytes, Type *PtrTy, Type *SizeTy,
                 unsigned SrcEltSize);

  /// Generates Value for start address of the SeqAccess
  Value *generateLoopSeqAccessStartAddr(const SCEV *Start, Type *PtrTy);

  /// Gets burst size
  Value *getSize(Value *Size, Value *Pred);

  /// Gets predicate terms of the sequential access \p SA
  Value *getSAPredicate();

  /// Transform to sequential access chain for SeqAccess of a loop
  Value *inferLoopStoreSeqAccess(PredicatedScalarEvolution &PSE, const Loop *L,
                                 Type *PtrTy, Type *SizeTy, Type *DataTy,
                                 Value *Size, unsigned ElemSizeInBytes,
                                 Value *UO);
  Value *inferLoopLoadSeqAccess(PredicatedScalarEvolution &PSE, const Loop *L,
                                Type *PtrTy, Type *SizeTy, Type *DataTy,
                                Value *Size, unsigned ElemSizeInBytes,
                                Value *UO);
  Value *inferLoopSeqAccess(Loop *L, Type *PtrTy, Type *SizeTy, Type *DataTy,
                            Value *UO, const SCEV *Len);

public:
  SeqAccessesInferenceTransformer(SeqAccess &SA, BasicBlock *Begin,
                                  BasicBlock *End, LLVMContext &Ctx,
                                  const DataLayout &DL, ScalarEvolution &SE,
                                  LoopInfo &LI, ReflowDiagnostic &RD,
                                  SetVector<Instruction *> Accesses,
                                  PtrSCEVMapTy SCEVMap, bool IsOpenCL)
      : SA(SA), Begin(Begin), End(End), DL(DL), SE(SE), LI(LI),
        HIB(Ctx, DL), Expander(SE, DL, DEBUG_TYPE), RD(RD), Accesses(Accesses),
        SCEVMap(SCEVMap), IsOpenCL(IsOpenCL) {}

  /// Transform SA to sequential access chain
  Value *transform(const Loop *L);
};
} // namespace

INITIALIZE_PASS_BEGIN(
    SeqAccessesInference, DEBUG_TYPE,
    "SeqAccessesInference - Infer Sequential accesses for HLS C/C++", false,
    false)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(SeqAccessesInfoPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionCanonWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_END(
    SeqAccessesInference, DEBUG_TYPE,
    "SeqAccessesInference - Infer Sequential accesses for HLS C/C++", false,
    false)

char SeqAccessesInference::ID = 0;

// Check of there's side effect pragma attached on \p V. The side effect means
// we might have a chance to not respecting user specificed pragmas.
static bool hasNoSideEffectPragma(Value *V) {
  auto *PI = PragmaInst::getAnyPragmaOnDeclaration(V);
  // There's no side effect when there's no pragma attached on \p V
  if (!PI)
    return true;

  // No side effect for partition/reshape with factor=1
  if (auto *AP = dyn_cast<ArrayPartitionInst>(PI))
    return AP->getFactor() == 1;

  if (auto *AR = dyn_cast<ArrayReshapeInst>(PI))
    return AR->getFactor() == 1;

  return false;
}

static bool checkPredicate(const SCEVPredicate &P) {
  assert(!isa<SCEVUnionPredicate>(P) && "Unexpected predicate type!");
  if (isa<SCEVWrapPredicate>(P) &&
      ReflowConfig::GlobalConfig().AssumeNoAddrWrap) {
    ++NumAssumptionAccept;
    return true;
  }

  ++NumAssumptionReject;
  return false;
}

static Value *getPointerOperand(Value *I) {
  if (auto LI = dyn_cast<LoadInst>(I))
    return LI->getPointerOperand();
  if (auto SI = dyn_cast<StoreInst>(I))
    return SI->getPointerOperand();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return S->getPointerOperand();
  if (auto S = dyn_cast<SeqAccessInst>(I))
    return S->getPointerOperand();
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return S->getPointerOperand();

  llvm_unreachable("Unknown access instruction!");
}

static Value *getValueOperand(Value *I) {
  if (auto SI = dyn_cast<StoreInst>(I))
    return SI->getValueOperand();
  if (auto SI = dyn_cast<SeqStoreInst>(I))
    return SI->getValueOperand();
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return S->getValueOperand();
  return I;
}

static Type *getValueType(Value *I) {
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return S->getDataType();
  return getValueOperand(I)->getType();
}

static unsigned getValueSize(const DataLayout &DL, Value *I) {
  auto Ty = getValueType(I);
  return DL.getTypeAllocSize(Ty);
}

static unsigned getAlignment(const DataLayout &DL, Value *I) {
  if (auto *L = dyn_cast<LoadInst>(I))
    return L->getAlignment();
  if (auto *S = dyn_cast<StoreInst>(I))
    return S->getAlignment();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return getValueSize(DL, S);
  if (auto S = dyn_cast<MAXIStoreInst>(I)) {
    if (auto Align = S->getAlignment())
      return Align;
    return DL.getABITypeAlignment(getValueType(S));
  }

  llvm_unreachable("Unknown access instruction!");
}

static unsigned getRequiredAlignment(const DataLayout &DL, Value *I) {
  return getValueSize(DL, I);
}

static Value *simplifySizeScaling(Value *V, uint64_t SizeInBytes) {
  // Try to match the following pattern:
  // % 0 = shl i64 %iv, 2
  // % 1 = lshr i64 % 0, 2
  // And simplify %1 to %iv
  if (llvm::isPowerOf2_64(SizeInBytes)) {
    auto Align = llvm::Log2_64(SizeInBytes);
    Value *Simplified;
    if (match(V, m_LShr(m_Shl(m_Value(Simplified), m_SpecificInt(Align)),
                        m_SpecificInt(Align))))
      return Simplified;
  }
  return V;
}

/// Detect if the basic block might be exposed into a dataflow region. Return
/// true if any of below is true.
/// - It's in a loop, and the loop might be exposed into a dataflow region.
/// - It's not in a loop, but is in a dataflow function.
bool SeqAccessesInferenceTransformer::exposeInDataflowRegion(BasicBlock &BB,
                                                             Instruction *FAI) {
  if (auto *L = LI.getLoopFor(&BB)) {
    if (!mayExposeInDataFlowRegion(SE, L))
      return false;

    // Check if there's no chance that the loop L would disapear
    PredicatedScalarEvolution PSE(SE, *L);
    const SCEV *BTC = PSE.getBackedgeTakenCount();
    if (isa<SCEVCouldNotCompute>(BTC))
      return false;

    const SCEV *LTC =
        isRotatedLoop(L) ? SE.getAddExpr(BTC, SE.getOne(BTC->getType())) : BTC;
    if (!isDataFlow(L) && !(LTC->isOne()) && !mayFullyUnroll(L, LTC))
      return false;

    // NOTE: 3.1 dataflow is not yet ready to support region burst in a dataflow
    //       loop. Thus, we accept the region burst that's in a dataflow region
    //       under the flag, as long as it's not in a dataflow loop.
    if (EnableSeqAccessesInDataFlowRegion && !isDataFlow(L))
      return false;

    return true;
  }

  if (!EnableSeqAccessesInDataFlowRegion && isDataFlow(BB.getParent()))
    return true;

  return false;
}

bool SeqAccessesInferenceTransformer::isOrContainDataflowLoop(const Loop *L) {
  if (isDataFlow(L))
    return true;

  if (L->empty())
    return false;

  if (L->getSubLoops().size() > 1)
    return false;

  return isOrContainDataflowLoop(L->getSubLoops()[0]);
}

Value *SeqAccessesInferenceTransformer::getUniqueUnderlyingObject(Value *P) {
  if (P->getType()->isPointerTy()) {
    SmallVector<Value *, 2> Objs;
    auto MaxSearchDepth = ReflowConfig::GlobalConfig().AAMaxSearchDepth;
    GetUnderlyingObjects(P, Objs, DL, &LI, MaxSearchDepth);

    // Reject multiple underlying objects
    if (Objs.size() != 1)
      return nullptr;

    return Objs.back();
  }

  /// When the underlying is a select, check if there's unique underlying object
  /// of it
  return getUniqueUnderlyingObject(getPointerOperand(SA.getFirstAccess()));
}

bool SeqAccessesInferenceTransformer::supportVectorization(Value *P) {
  auto AS = P->getType()->getPointerAddressSpace();

  if (XlxInterfaceIntrinsic::IsMAXI(P, DL))
    return true;

  if (IsOpenCL) {
    if (AS == spir::OCL_AS_Global)
      return true;

    if (AS != spir::OCL_AS_Constant)
      return false;
  }

  return false;
}

bool SeqAccessesInferenceTransformer::hasLegalInterface(Value *P) {
  if (XlxInterfaceIntrinsic::IsMAXI(P, DL))
    return true;

  if (ReflowConfig::GlobalConfig().BurstOnlyOnMAXI)
    return false;

  auto AS = P->getType()->getPointerAddressSpace();
  if (AS == 0)
    return true;

  if (IsOpenCL)
    return spir::IsUniformAddressSpace(AS);

  return true;
}

bool SeqAccessesInferenceTransformer::hasLegalUnderlyingObject(Value *Obj) {
  // We don't infer sequential accesses when there's an non interface pragma
  // attached on the underlying object.
  // FIXME: For some of the pragmas, we can still infer sequential accesses.
  // For example, we can still infer sequential accesses as long as we can lower
  // the infered sequential accesses to partitioned array. From the whole HLS
  // complilation perspective, we can always do array parition first then infer
  // the sequential accesses.
  // Here, we don't allow accesses on array with non factor=1 array transform
  // pragma to be inferred sequential accesses since later on, widening pass
  // might widen those inferred sequential accesses in a way which is not
  // respecting the user applied array transform pragmas. The restriction
  // can be removed when default doing partition/reshape on all kinds of array
  // in reflow.
  if (isa<AllocaInst>(Obj) || isa<GlobalVariable>(Obj) ||
      isa<MallocInst>(Obj)) {
    if (InferSeqAccessesOnLayoutTransformedArray)
      return true;

    return (!XlxPtrMD::HasAnyAttribute(Obj) && hasNoSideEffectPragma(Obj));
  }

  // Some of the pragmas will not be directly attached on the argument, but
  // the underlying alloca/GV/malloc, need to trace the callers for pragmas.
  if (auto *Arg = dyn_cast<Argument>(Obj)) {
    // Check if it's alias. An argument that has "unidentidied" attr might be
    // alias.
    if (XlxInterfaceMD::HasInterface(Arg, "unidentified"))
      return false;

    // Check if there's any pragma applies in attribute format. Deprecated...
    // An argument that has "layout_transformed" attr is having pragma attached
    // to it in attribute format.
    if (!InferSeqAccessesOnLayoutTransformedArray &&
        XlxInterfaceMD::HasInterface(Arg, "layout_transformed"))
      return false;

    // Check if all the underlying objects are legal.
    SmallPtrSet<Value *, 2> Visited;
    SetVector<Value *> Set;
    reflow::GetRealUnderlyingObjects(Arg, DL, Visited, Set);
    for (auto V : Set) {
      if (V == Arg)
        continue;

      if (!hasLegalUnderlyingObject(V))
        return false;
    }

    return true;
  }

  return false;
}

static Value *getStoreByteEnable(Instruction *AI, Type *BETy) {
  auto MSI = dyn_cast<MAXIStoreInst>(AI);
  if (!MSI)
    return ConstantInt::getAllOnesValue(BETy);

  auto BE = MSI->getByteEnable();
  assert(BE->getType() == BETy && "Expect consistant byte enable type!");

  return BE;
}

Value *SeqAccessesInferenceTransformer::inferRegionStoreSeqAccess(
    Type *SizeTy, Type *DataTy, Value *StartAddr, Value *Size) {
  HIB.SetInsertPoint(SA.getEndPosition());
  auto SeqB =
      HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_begin, StartAddr, Size);

  auto BETy = HIB.getIntNTy(DL.getTypeStoreSize(DataTy));

  for (unsigned i = 0; i < Accesses.size(); i++) {
    auto Idx = ConstantInt::get(SizeTy, i);
    auto OldSt = Accesses[i];
    auto Data = getValueOperand(OldSt);
    Data = HIB.CreateZExtOrBitCast(Data, DataTy);
    auto BE = getStoreByteEnable(OldSt, BETy);
    auto NewSt = HIB.CreateSeqStoreInst(Data, SeqB, Idx, BE);
    OldSt->replaceAllUsesWith(NewSt);
  }

  HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_end, SeqB, Size);

  return SeqB;
}

Value *SeqAccessesInferenceTransformer::inferRegionLoadSeqAccess(
    Type *SizeTy, Type *DataTy, Value *StartAddr, Value *Size) {
  HIB.SetInsertPoint(SA.getBeginPosition());
  auto SeqB =
      HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_begin, StartAddr, Size);

  for (unsigned i = 0; i < Accesses.size(); i++) {
    auto Idx = ConstantInt::get(SizeTy, i);
    auto OldV = Accesses[i];
    auto Data = HIB.CreateSeqLoadInst(DataTy, SeqB, Idx);
    Data = HIB.CreateTruncOrBitCast(Data, OldV->getType());
    OldV->replaceAllUsesWith(Data);
  }

  HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_end, SeqB, Size);

  return SeqB;
}

void SeqAccessesInferenceTransformer::inferInboundGEPs(Value *V,
                                                       unsigned MaxLookup) {
  auto FGEP =
      dyn_cast<GEPOperator>(getPointerOperand(SA.getSeqAccessChainBegin()));
  if (!FGEP)
    return;

  bool IsInBound = FGEP->isInBounds();

  auto CurrI = dyn_cast_or_null<Instruction>(V);
  for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
    if (!CurrI || !Expander.isInsertedInstruction(CurrI))
      return;

    if (auto GEP = dyn_cast<GetElementPtrInst>(CurrI)) {
      if (!GEP->isInBounds())
        GEP->setIsInBounds(IsInBound);
      CurrI = dyn_cast<Instruction>(GEP->getPointerOperand());
      continue;
    }

    if (auto BC = dyn_cast<BitCastInst>(CurrI)) {
      CurrI = dyn_cast<Instruction>(BC->getOperand(0));
      continue;
    }

    return;
  }
}

bool SeqAccessesInferenceTransformer::isValidSeqAccessRegion() {
  auto FAI = SA.getSeqAccessChainBegin();
  return !exposeInDataflowRegion(*Begin, FAI) &&
         !exposeInDataflowRegion(*End, FAI);
}

Value *SeqAccessesInferenceTransformer::inferRegionSeqAccess(Type *PtrTy,
                                                             Type *SizeTy,
                                                             Type *DataTy,
                                                             const SCEV *Start,
                                                             const SCEV *Len) {
  if (!isValidSeqAccessRegion()) {
    RD.emitMayExposeInDataFlowRegion(SA.getSeqAccessChainBegin());
    return nullptr;
  }

  auto BP = SA.getBeginPosition();
  auto StartAddr = Expander.expandCodeFor(Start, PtrTy, BP);
  // SCEVExpander may have changed the address arithmetic to compute a value
  // which is beyond the end of the allocated object. This leads to an out of
  // bound gep generated. Assume it's in bound when the original gep is in
  // bound.
  if (InferInBoundGEP)
    inferInboundGEPs(StartAddr);

  auto Size = Expander.expandCodeFor(Len, SizeTy, BP);

  auto Dir = SA.getDirection();
  Value *SeqB = nullptr;
  if (Dir == SeqAccess::AccessDirection::LOAD)
    SeqB = inferRegionLoadSeqAccess(SizeTy, DataTy, StartAddr, Size);
  else
    SeqB = inferRegionStoreSeqAccess(SizeTy, DataTy, StartAddr, Size);

  if (!SeqB)
    return nullptr;

  RD.emitBurstInferred(SA.getAccesses().takeVector(), SeqB, Len,
                       LI.getLoopFor(Begin));
  ++NumRegionBursts;
  return SeqB;
}

Value *SeqAccessesInferenceTransformer::getNewIndex(
    const Loop *L, const SCEV *PtrSCEV, Type *SizeTy, unsigned ElemSizeInBytes,
    const SCEV *Start, Value *P) {
  const SCEV *Index = SE.getMinusSCEV(PtrSCEV, Start);
  Index = SE.getUDivExactExpr(SE.getTruncateOrZeroExtend(Index, SizeTy),
                              SE.getConstant(SizeTy, ElemSizeInBytes, false));
  auto PosI = dyn_cast<Instruction>(P);
  Value *Iter = Expander.expandCodeFor(
      Index, SizeTy, PosI ? PosI : L->getHeader()->getFirstNonPHI());
  return simplifySizeScaling(Iter, ElemSizeInBytes);
}

std::pair<const SCEVAddRecExpr *, const SCEV *>
SeqAccessesInferenceTransformer::alignPtrAddRec(const Loop *L,
                                                const SCEVAddRecExpr *PtrAddRec,
                                                unsigned ElemSizeInBytes,
                                                Type *PtrTy, Type *SizeTy,
                                                unsigned SrcEltSize) {
  auto DstEltSize = SE.getConstant(SizeTy, ElemSizeInBytes);
  auto Start = PtrAddRec->getStart();
  auto BasePtr = SE.getPointerBase(Start);
  auto StartOffSet = SE.getMinusSCEV(Start, BasePtr);
  assert(PtrAddRec->getStepRecurrence(SE) == DstEltSize && "Unexpected step!");

  // Cast the pointer and align the offset.
  auto AlignedPtr = SE.getUnknown(
      Expander.expandCodeFor(BasePtr, PtrTy, Begin->getTerminator()));

  auto AlignedStartOffSet =
      SE.getMulExpr(SE.getUDivExpr(StartOffSet, DstEltSize), DstEltSize);

  auto NewPtrAddRec =
      SE.getAddRecExpr(SE.getAddExpr(AlignedPtr, AlignedStartOffSet),
                       DstEltSize, const_cast<Loop *>(L), SCEV::NoWrapMask);
  auto Offset =
      SE.getUDivExactExpr(SE.getMinusSCEV(StartOffSet, AlignedStartOffSet),
                          SE.getConstant(SizeTy, SrcEltSize));

  return {cast<SCEVAddRecExpr>(NewPtrAddRec), Offset};
}

Value *SeqAccessesInferenceTransformer::generateLoopSeqAccessStartAddr(
    const SCEV *Start, Type *PtrTy) {
  auto StartAddr = Expander.expandCodeFor(Start, PtrTy, Begin->getTerminator());
  // SCEVExpander may have changed the address arithmetic to compute a value
  // which is beyond the end of the allocated object. This leads to an out of
  // bound gep generated. Assume it's in bound when the original gep is in
  // bound.
  if (InferInBoundGEP)
    inferInboundGEPs(StartAddr);

  return StartAddr;
}

static Instruction *getEndInsertionPoint(BasicBlock &End) {
  auto Pt = End.getFirstNonPHI();
  if (!isa<SeqEndInst>(Pt))
    return Pt;

  auto PtItr = Pt->getIterator();
  do {
    ++PtItr;
  } while (isa<SeqEndInst>(&*PtItr));

  return &*PtItr;
}

/// Gets SCEVAddRec for \p under \p L context
static const SCEVAddRecExpr *getAsAddRec(PredicatedScalarEvolution &PSE,
                                         Value *V, Loop *L, Instruction *FAI) {
  const SCEV *E = PSE.getSCEV(V);

  if (auto *S = dyn_cast<SCEVAddRecExpr>(E))
    return S;

  SmallVector<Loop *, 2> Worklist;
  auto CandL = L;
  do {
    Worklist.emplace_back(CandL);
    if (CandL->empty())
      break;

    if (CandL->getSubLoops().size() > 1)
      break;

    auto Child = CandL->getSubLoops()[0];
    if (!Child->contains(FAI))
      break;

  } while ((CandL = CandL->getSubLoops()[0]));

  const SCEVAddRecExpr *Add = nullptr;
  for (auto CandL : reverse(Worklist)) {
    if (!E)
      return nullptr;

    SmallPtrSet<const SCEVPredicate *, 8> Preds;
    Add = PSE.getSE()->convertSCEVToAddRecWithPredicates(E, CandL, Preds);
    E = Add;

    if (!llvm::all_of(
            Preds, [](const SCEVPredicate *P) { return checkPredicate(*P); }))
      return nullptr;
  }

  return Add;
}

bool SeqAccessesInferenceTransformer::isValidSeqAccessRegion(Loop *L) {
  if (!mayExposeInDataFlowRegion(SE, L))
    return true;

  if (EnableSeqAccessesInDataFlowRegion && !isOrContainDataflowLoop(L))
    return true;

  return false;
}

Value *SeqAccessesInferenceTransformer::getSize(Value *Size, Value *Pred) {
  if (!Pred)
    return Size;

  return HIB.CreateSelect(Pred, Size, ConstantInt::get(Size->getType(), 0));
}

Value *SeqAccessesInferenceTransformer::getSAPredicate() {
  if (!SA.isPredicated())
    return nullptr;

  Value *SAPred = nullptr;
  for (auto Pred : SA.getPreds()) {
    auto CmpI = cast<CmpInst>(Pred.first);
    auto LHS = CmpI->getOperand(0);
    auto RHS = CmpI->getOperand(1);
    auto Cmp = Pred.second->isZero()
                   ? HIB.CreateICmp(CmpI->getInversePredicate(), LHS, RHS)
                   : HIB.CreateICmp(CmpI->getPredicate(), LHS, RHS);
    if (SAPred) {
      SAPred = HIB.CreateAnd(Cmp, SAPred);
      continue;
    }

    SAPred = Cmp;
  }

  return SAPred;
}

Value *SeqAccessesInferenceTransformer::inferLoopStoreSeqAccess(
    PredicatedScalarEvolution &PSE, const Loop *L, Type *PtrTy, Type *SizeTy,
    Type *DataTy, Value *Size, unsigned ElemSizeInBytes, Value *UO) {

  auto DataStoreBytes = DL.getTypeStoreSize(DataTy);
  auto BETy = HIB.getIntNTy(DataStoreBytes);

  HIB.SetInsertPoint(Begin->getTerminator());

  Value *SAPred = getSAPredicate();

  // Handle sequential access inference on top of vectorization.
  auto FAI = SA.getSeqAccessChainBegin();
  auto P = getPointerOperand(FAI);
  auto PtrSCEV = not_null(SCEVMap[FAI]);
  auto PtrAddRec = dyn_cast<SCEVAddRecExpr>(PtrSCEV);
  auto SrcElemSizeInBytes = getValueSize(DL, FAI);
  auto Factor = (ElemSizeInBytes / SrcElemSizeInBytes);
  assert(llvm::isPowerOf2_64(Factor) && "Bad Factor!");
  if (Factor > 1) {
    if (!EnableInferVectorizationOnMAXI)
      return nullptr;

    if (!supportVectorization(UO))
      return nullptr;

    assert((SA.getSeqAccessChainSize() == 1) &&
           "Expect single access vectorization!");

    assert(!isa<MAXIStoreInst>(FAI) &&
           "Expect vectorize only non-byte enable store!");

    assert(PtrAddRec && "Expect address to be represented as SCEVAddRec!");

    const SCEV *SrcOffset = nullptr;
    std::tie(PtrAddRec, SrcOffset) = alignPtrAddRec(
        L, PtrAddRec, ElemSizeInBytes, PtrTy, SizeTy, SrcElemSizeInBytes);
    Value *Offset = Expander.expandCodeFor(SrcOffset, SizeTy, FAI);

    const SCEV *Start = PtrAddRec->getStart();
    auto StartAddr = generateLoopSeqAccessStartAddr(Start, PtrTy);
    auto SASize = getSize(Size, SAPred);
    auto SeqB = HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_begin,
                                      StartAddr, SASize);

    HIB.SetInsertPoint(SA.getEndPosition());

    auto Idx = getNewIndex(L, PtrAddRec, SizeTy, ElemSizeInBytes, Start, P);
    auto Data = getValueOperand(FAI);
    auto StoreTy = DataTy->getVectorElementType();
    Data = HIB.CreateBitCast(Data, StoreTy);
    Data =
        HIB.CreateInsertElement(Constant::getNullValue(DataTy), Data, Offset);

    // Shift the byteenable
    auto StoreBytes = DL.getTypeStoreSize(StoreTy);
    auto *BEMask = ConstantInt::get(
        BETy, APInt::getLowBitsSet(DataStoreBytes, StoreBytes));
    assert(llvm::isPowerOf2_64(StoreBytes) && "Bad type!");
    Offset = HIB.CreateShl(Offset, llvm::Log2_64(StoreBytes));
    auto BE = HIB.CreateShl(BEMask, HIB.CreateZExtOrTrunc(Offset, BETy));
    HIB.CreateSeqStoreInst(Data, SeqB, Idx, BE);

    HIB.SetInsertPoint(getEndInsertionPoint(*End));
    HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_end, SeqB, SASize);

    return SeqB;
  }

  auto SAStart = SA.getStart();
  DEBUG(dbgs() << "SeqAccess Start Addr: " << *SAStart << "\n");
  auto StartAddr = generateLoopSeqAccessStartAddr(SAStart, PtrTy);
  const SCEV *Start = PSE.getSCEV(StartAddr);
  auto SASize = getSize(Size, SAPred);
  auto SeqB = HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_begin, StartAddr,
                                    SASize);

  HIB.SetInsertPoint(getEndInsertionPoint(*End));
  HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_store_end, SeqB, SASize);

  if (isa<SeqBeginInst>(FAI)) {
    auto Idx = getNewIndex(L, PtrSCEV, SizeTy, ElemSizeInBytes, Start, P);
    for (auto I : Accesses)
      if (auto S = dyn_cast<SeqStoreInst>(I)) {
        HIB.SetInsertPoint(S);
        S->replaceUsesOfWith(FAI, SeqB);
        auto NewIdx = HIB.CreateNUWAdd(S->getIndex(), Idx);
        S->updateIndex(NewIdx);
      }
    return SeqB;
  }

  HIB.SetInsertPoint(SA.getEndPosition());

  // Subscript Index
  for (unsigned i = 0; i < Accesses.size(); i++) {
    auto OldSt = Accesses[i];
    auto P = getPointerOperand(OldSt);
    auto PtrSCEV = not_null(SCEVMap[OldSt]);
    DEBUG(dbgs() << "Access \'" << *OldSt << "\':\n" << *PtrSCEV << "\n");
    auto Idx = getNewIndex(L, PtrSCEV, SizeTy, ElemSizeInBytes, Start, P);
    auto Data = getValueOperand(OldSt);
    Data = HIB.CreateZExtOrBitCast(Data, DataTy);
    auto BE = getStoreByteEnable(OldSt, BETy);
    HIB.CreateSeqStoreInst(Data, SeqB, Idx, BE);
  }

  return SeqB;
}

Value *SeqAccessesInferenceTransformer::inferLoopLoadSeqAccess(
    PredicatedScalarEvolution &PSE, const Loop *L, Type *PtrTy, Type *SizeTy,
    Type *DataTy, Value *Size, unsigned ElemSizeInBytes, Value *UO) {
  HIB.SetInsertPoint(Begin->getTerminator());

  Value *SAPred = getSAPredicate();

  // Handle sequential access inference on top of vectorization.
  auto FAI = SA.getSeqAccessChainBegin();
  auto PtrSCEV = not_null(SCEVMap[FAI]);
  auto PtrAddRec = dyn_cast<SCEVAddRecExpr>(PtrSCEV);
  auto SrcElemSizeInBytes = getValueSize(DL, FAI);
  auto Factor = (ElemSizeInBytes / SrcElemSizeInBytes);
  assert(llvm::isPowerOf2_64(Factor) && "Bad Factor!");
  if (Factor > 1) {
    if (!EnableInferVectorizationOnMAXI)
      return nullptr;

    if (!supportVectorization(UO))
      return nullptr;

    assert((SA.getSeqAccessChainSize() == 1) && L->empty() &&
           "Expect only inner most single access vectorization!");

    assert(PtrAddRec && "Expect address to be represented as SCEVAddRec!");

    const SCEV *SrcOffset = nullptr;
    std::tie(PtrAddRec, SrcOffset) = alignPtrAddRec(
        L, PtrAddRec, ElemSizeInBytes, PtrTy, SizeTy, SrcElemSizeInBytes);
    Value *Offset = Expander.expandCodeFor(SrcOffset, SizeTy, FAI);

    const SCEV *Start = PtrAddRec->getStart();
    auto StartAddr = generateLoopSeqAccessStartAddr(Start, PtrTy);
    auto SASize = getSize(Size, SAPred);
    auto SeqB = HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_begin, StartAddr,
                                      SASize);

    auto BP = SA.getBeginPosition();
    auto Idx = getNewIndex(L, PtrAddRec, SizeTy, ElemSizeInBytes, Start, BP);
    HIB.SetInsertPoint(FAI);
    auto Data = HIB.CreateSeqLoadInst(DataTy, SeqB, Idx);
    Data = HIB.CreateExtractElement(Data, Offset);
    Data = HIB.CreateBitCast(Data, getValueType(FAI));
    FAI->replaceAllUsesWith(Data);

    HIB.SetInsertPoint(getEndInsertionPoint(*End));
    HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_end, SeqB, SASize);

    return SeqB;
  }

  auto SAStart = SA.getStart();
  DEBUG(dbgs() << "SeqAccess Start Addr: " << *SAStart << "\n");
  auto StartAddr = generateLoopSeqAccessStartAddr(SAStart, PtrTy);
  const SCEV *Start = PSE.getSCEV(StartAddr);
  auto SASize = getSize(Size, SAPred);
  auto SeqB = HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_begin, StartAddr,
                                    SASize);

  auto P = SA.getBeginPosition();
  HIB.SetInsertPoint(P);
  // Subscript Index
  for (unsigned i = 0; i < Accesses.size(); i++) {
    auto OldV = Accesses[i];
    auto PtrSCEV = not_null(SCEVMap[OldV]);
    DEBUG(dbgs() << "Access \'" << *OldV << "\' SCEV:\n" << *PtrSCEV << "\n");
    auto Idx = getNewIndex(L, PtrSCEV, SizeTy, ElemSizeInBytes, Start, P);
    auto Data = HIB.CreateSeqLoadInst(DataTy, SeqB, Idx);
    Data = HIB.CreateTruncOrBitCast(Data, OldV->getType());
    OldV->replaceAllUsesWith(Data);
    P = cast<Instruction>(Data);
  }

  HIB.SetInsertPoint(getEndInsertionPoint(*End));
  HIB.CreateSeqBeginEnd(Intrinsic::fpga_seq_load_end, SeqB, SASize);

  return SeqB;
}

Value *SeqAccessesInferenceTransformer::inferLoopSeqAccess(
    Loop *L, Type *PtrTy, Type *SizeTy, Type *DataTy, Value *UO,
    const SCEV *Len) {
  auto Size = Expander.expandCodeFor(Len, SizeTy, Begin->getTerminator());
  auto ElemSizeInBytes = DL.getTypeAllocSize(DataTy);

  PredicatedScalarEvolution PSE(SE, *L);
  auto Dir = SA.getDirection();
  Value *SeqB = nullptr;
  if (Dir == SeqAccess::AccessDirection::LOAD)
    SeqB = inferLoopLoadSeqAccess(PSE, L, PtrTy, SizeTy, DataTy, Size,
                                  ElemSizeInBytes, UO);
  else
    SeqB = inferLoopStoreSeqAccess(PSE, L, PtrTy, SizeTy, DataTy, Size,
                                   ElemSizeInBytes, UO);

  if (!SeqB)
    return nullptr;

  RD.emitBurstInferred(SA.getAccesses().takeVector(), SeqB, Len, L);
  ++NumLoopBursts;
  return SeqB;
}

Value *SeqAccessesInferenceTransformer::transform(const Loop *L) {
  auto UO = getUniqueUnderlyingObject(SA.getUnderlyingObj());
  if (!UO || !hasLegalInterface(UO) || !hasLegalUnderlyingObject(UO)) {
    for (auto AI : SA.getAccesses())
      RD.emitUnknownUnderlyingObject(AI, getPointerOperand(AI));
    DEBUG(dbgs() << *UO << " ; Unknwon underlying object.\n");
    return nullptr;
  }

  if (!hasLegalUnderlyingObject(UO)) {
    for (auto AI : SA.getAccesses())
      RD.emitUnknownUnderlyingObject(AI, getPointerOperand(AI));
    DEBUG(dbgs() << *UO << " ; Unknwon underlying object.\n");
    return nullptr;
  }

  // Utilize access data store size type for memory sequential access inference.
  auto DataTy = SA.getAccessStoreTy();
  auto PtrTy =
      PointerType::get(DataTy, UO->getType()->getPointerAddressSpace());
  auto SizeTy = DL.getIntPtrType(PtrTy);

  auto Len = SA.getLen();
  if (L) {
    Loop *CandL = const_cast<Loop *>(L);
    if (!isValidSeqAccessRegion(CandL)) {
      RD.emitMayExposeInDataFlowRegion(SA.getSeqAccessChainBegin());
      return nullptr;
    }

    return inferLoopSeqAccess(CandL, PtrTy, SizeTy, DataTy, UO, Len);
  }

  return inferRegionSeqAccess(PtrTy, SizeTy, DataTy, SA.getStart(), Len);
}

static bool isEqual(ScalarEvolution &SE, const SCEV *SA, const SCEV *SB) {
  if (SA == SB)
    return true;

  return SE.getMinusSCEV(SA, SB)->isZero();
}

static SeqAccess *getSeqAccess(SeqAccessesInfo &SI, const Instruction *AI,
                               const DataLayout &DL) {
  auto ZN = SI.getSeqAccessZoneNode(AI);
  return ZN->getSeqAccess(AI);
}

static const Instruction *
getCurrentDiagInst(SeqAccessesInfo &SI, DiagFailureInfo *Diag,
                   const DataLayout &DL, const Instruction *I,
                   DenseMap<const Instruction *, Instruction *> &AccessesMap) {
  if (!Diag->isExtendAnalyzedSA())
    return I;

  if (auto SA = getSeqAccess(SI, I, DL))
    if (auto FAI = SA->getSeqAccessChainBegin())
      if (AccessesMap.count(FAI))
        return AccessesMap[FAI];

  return I;
}

bool SeqAccessesInference::skipFunction(const Function &F) const {
  if (F.isDeclaration() || F.isVarArg())
    return true;

  return F.hasFnAttribute("fpga.wrapper.func");
}

bool isExtendable(ScalarEvolution &SE, SeqAccess *SA, SeqBeginInst *SBI) {
  return !isEqual(SE, SA->getLen(), SE.getSCEV(SBI->getSize()));
}

bool SeqAccessesInference::runOnFunction(Function &F) {
  if (skipFunction(F))
    return false;

  if (ReflowConfig::GlobalConfig().XCLTarget == ReflowConfig::target_cpu)
    return false;

  auto &Ctx = F.getParent()->getContext();
  const auto &DL = F.getParent()->getDataLayout();
  auto &SI = getAnalysis<SeqAccessesInfoPass>().getSeqAccessesInfo();
  auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  auto &SEC = getAnalysis<ScalarEvolutionCanonWrapperPass>().getSEC();
  auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
  ReflowDiagnostic RD(DIAG_TYPE, ORE, DL);

  PtrSCEVMapTy SCEVMap;
  SetVector<SeqAccess *> WorkList;
  for (auto I = SI.begin(), E = SI.end(); I != E; ++I) {
    auto AI = SI.getAccess(I);
    auto SA = getSeqAccess(SI, AI, DL);
    if (!SA) continue;
    auto FAI = SA->getSeqAccessChainBegin();

    // Collect all begin accesses into WorkList for later on inferring into
    // sequential access
    if (isa<SeqAccessInst>(AI))
      continue;

    if (AI == FAI) {
      if (auto SBI = dyn_cast<SeqBeginInst>(FAI)) {
        if (!isExtendable(SE, SA, SBI)) {
          continue;
        }
        WorkList.insert(SA);
      } else {
        WorkList.insert(SA);
      }
    }

    auto L = SI.getAccessInferredLoop(I);
    if (!L)
      continue;

    auto CandL = const_cast<Loop *>(L);
    PredicatedScalarEvolution PSE(SE, *CandL);
    auto P = getPointerOperand(const_cast<Instruction *>(AI));
    auto Addr = SE.getSCEV(P);
    DEBUG(dbgs() << "Access Addr: " << *Addr << '\n');
    auto PtrAddRec = getAsAddRec(PSE, P, CandL, FAI);
    const SCEV *S = PtrAddRec ? PtrAddRec : SEC.getCanonSCEV(Addr);
    DEBUG(dbgs() << "Access " << *AI << ":\nSCEV: " << *S << '\n');

    SCEVMap.insert({AI, S});
  }

  SetVector<Instruction *> DeadInstructions;
  DenseMap<const Instruction *, Instruction *> AccessesMap;

  DEBUG(dbgs() << "Transforming " << SI.size() << " accesses...\n");

  bool Changed = false;
  // Transform accesses into sequential access intrinsics from sequential access
  // analysis result.
  for (auto SA : WorkList) {
    auto FAI = SA->getSeqAccessChainBegin();
    DEBUG(dbgs() << "Transforming " << *FAI << "...\n");

    // Process the begin of the sequential access chain
    auto ZN = SI.getSeqAccessZoneNode(FAI);
    auto Accesses = SA->getAccesses();
    auto SeqB = SeqAccessesInferenceTransformer(
                    *SA, ZN->getZoneStart(), ZN->getZoneEnd(), Ctx, DL, SE, LI,
                    RD, Accesses, SCEVMap, IsOpenCL)
                    .transform(SI.getAccessInferredLoop(FAI));
    if (!SeqB)
      continue;

    Changed |= true;
    AccessesMap.insert({FAI, cast<Instruction>(SeqB)});

    if (auto SBI = dyn_cast<SeqBeginInst>(FAI)) {
      DeadInstructions.insert(cast<SeqEndInst>(SBI->user_back()));
      DeadInstructions.insert(SBI);
    } else
      DeadInstructions.insert(Accesses.begin(), Accesses.end());
  }

  // NOTE: Delay the diagnostics in transformation pass because of the burst
  //       report will be utilizing the access ID(gotten from instruction name
  //       or position) to understand the burst inference limitation.
  // Report the loop that can't be analyzed
  for (auto L : SI.getCouldNotAnalyzedLoops())
    RD.emitCouldNotAnalyzePattern(L);

  // Report the accesses that can't be static analyzed for sequential access
  // pattern or fail to extend into a longer one.
  for (auto I = SI.diag_begin(), E = SI.diag_end(); I != E; ++I) {
    auto FI = SI.getAccess(I);
    auto Diag = SI.getDiagFailureInfo(I);
    auto DI = const_cast<Instruction *>(
        getCurrentDiagInst(SI, Diag, DL, FI, AccessesMap));
    auto FT = Diag->getFailureType();
    assert(FT >= 0 && FT < Diag->getNumDiagKinds() &&
           "Unexpected diagnosed failure type!");
    switch (FT) {
    case DiagFailureInfo::AccessClobbered: {
      RD.emitAccessClobbered(DI, Diag->getSideEffectInst(),
         Diag->getDiagLoop());
      break;
    }
    case DiagFailureInfo::AccessInCondBranch: {
      RD.emitAccessInCondBranch(DI, Diag->getDiagLoop());
      break;
    }
    case DiagFailureInfo::CouldNotAnalyzePattern: {
      RD.emitCouldNotAnalyzePattern(DI, Diag->getDiagLoop());
      break;
    }
    case DiagFailureInfo::IncompatibleStride: {
      RD.emitIncompatibleStride(DI, Diag->getDiagLoop());
      break;
    }
    case DiagFailureInfo::SideEffect: {
      RD.emitSideEffectInst(Diag->getSideEffectInst(), DI);
      break;
    }
    case DiagFailureInfo::NonSimpleMemoryAccess: {
      RD.emitNonSimpleMemoryAccess(DI);
      break;
    }
    case DiagFailureInfo::InsufficientAlignment: {
      auto AI = const_cast<Instruction *>(FI);
      RD.emitInsufficientAlignment(DI, getAlignment(DL, AI),
                                   getRequiredAlignment(DL, AI));
      break;
    }
    case DiagFailureInfo::UnsupportedAccessType: {
      auto Ty = getValueType(DI);
      RD.emitUnsupportedAccessType(DI, Ty->getScalarType());
      break;
    }
    case DiagFailureInfo::ExposeInDataflowRegion: {
      RD.emitMayExposeInDataFlowRegion(DI, Diag->getDiagLoop());
      break;
    }
    }
  }

  // Erase the accesses that succed to be transformed into sequential access
  // intrinsics
  for (auto DI : DeadInstructions)
    DI->eraseFromParent();

  return Changed;
}

Pass *llvm::createSeqAccessesInferencePass(bool IsOpenCL) {
  return new SeqAccessesInference(IsOpenCL);
}
