//===------- SeqAccessesInfo.cpp -- Sequential accesses analysis ---------===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Utilize LLVM single entry single exit Region pass to analyze sequential
// accesses in a Function.
//
//===----------------------------------------------------------------------===//

#include "reflow/Memory/SeqAccessesInfo.h"
#include "reflow/Options.h"
#include "reflow/ReflowConfig.h"
#include "reflow/SCEV/SCEVAtIteration.h"
#include "reflow/SCEV/SCEVStep.h"
#include "reflow/SPIR/Kernel.h"
#include "reflow/Support/Metadata/AttributeMD.h"
#include "reflow/Support/Metadata/InterfaceIntrinsic.h"
#include "reflow/Support/PointerInfo.h"
#include "reflow/Support/PointerRewriter.h"
#include "reflow/Support/Vectorization.h"
#include "reflow/TransformUtils/MiscUtil.h"

#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/IteratedDominanceFrontier.h"
#include "llvm/Analysis/RegionIterator.h"
#include "llvm/Analysis/ScalarEvolutionExpander.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Analysis/XILINXFunctionInfoUtils.h"
#include "llvm/Analysis/XILINXLoopInfoUtils.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/XILINXFPGAIntrinsicInst.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"

#define DEBUG_TYPE "reflow-sequential-accesses-info"
#define PASS_DESCRIPTION "Analyze sequential accesses"

STATISTIC(NumSeqAccess, "The number of sequential accesses");
STATISTIC(NumAssumptionAccept,
          "Number of assumption from predicated SCEV accepted");
STATISTIC(NumAssumptionReject,
          "Number of assumption from predicated SCEV rejected");

using namespace llvm;

static cl::opt<bool> RunChecks("reflow-seq-access-info-checks",
                               cl::desc("Run on demand checks for widen burst"),
                               cl::Hidden, cl::init(false),
                               cl::cat(ReflowCategory));

static cl::opt<bool> EnableVerboseSeqAccessInfo(
    "reflow-seq-access-verbose-info",
    cl::desc("Print out verbose sequential access infomation"), cl::Hidden,
    cl::init(false), cl::cat(ReflowCategory));

static cl::opt<bool>
    EnableSADiag("reflow-seq-access-analyis-enable-diag",
                 cl::desc("Enable sequential access info diagnostics."),
                 cl::Hidden, cl::init(false), cl::cat(ReflowCategory));

static cl::opt<unsigned>
    ChunkDataSize("reflow-seq-access-chunk-data-size",
                  cl::desc("The chunck data size that each time we infer for a "
                           "sequential access."),
                  cl::Hidden, cl::init(512 * 256), cl::cat(ReflowCategory));

static cl::opt<unsigned> MaxPointerSelectionDepth(
    "reflow-max-pointer-selection-depth",
    cl::desc("The maximum depth of exploring pointer selection when evaluation "
             "consecutive access."),
    cl::Hidden, cl::init(3), cl::cat(ReflowCategory));

static cl::opt<bool> EnableVectorization(
    "reflow-seq-access-analyze-enable-vectorization",
    cl::desc("Enable vectorization when stride is a power of 2."), cl::Hidden,
    cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableLoopSeqAccessForAccessUnderPredicates(
    "reflow-loop-seq-access-for-access-under-preds",
    cl::desc("Enable analyzing accesses under predicates."), cl::Hidden,
    cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableLoopSeqAccessForAccessUnderModPredicate(
    "reflow-loop-seq-access-for-access-under-mod-pred",
    cl::desc("Enable analyzing accesses under a modulo predicate."), cl::Hidden,
    cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableLoopSeqAccessForAccessUnderInvariantPredicate(
    "reflow-loop-seq-access-for-access-under-invariant-pred",
    cl::desc("Enable analyzing accesses under a loop invariant predicate."),
    cl::Hidden, cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableSeqAccessOnImperfectLoopNest(
    "reflow-seq-access-on-imperfect-loop",
    cl::desc("Enable analyzing accesses on imperfect loop nest."), cl::Hidden,
    cl::init(true), cl::cat(ReflowCategory));

static cl::opt<bool> EnableSeqAccessInfoInDataFlowRegion(
    "reflow-seq-access-analysis-enable-dataflow-region",
    cl::desc("Enable analyzing sequential accesses in dataflow region."),
    cl::Hidden, cl::init(true), cl::cat(ReflowCategory));

// TODO: Move to ReflowConfig if we have the auto bundle inference(reassignment
//       when there are bundle conflicts) transformation in the future.
static cl::opt<bool> EnableAutoBundleInference(
    "reflow-seq-access-auto-bundle-inference",
    cl::desc("Enable automatically bundle inference."), cl::Hidden,
    cl::init(false), cl::cat(ReflowCategory));

static Value *getPointerOperand(Value *I) {
  if (auto LI = dyn_cast<LoadInst>(I))
    return LI->getPointerOperand();
  if (auto SI = dyn_cast<StoreInst>(I))
    return SI->getPointerOperand();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return S->getPointerOperand();
  if (auto S = dyn_cast<SeqAccessInst>(I))
    return S->getPointerOperand();
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return S->getPointerOperand();

  return nullptr;
}

static unsigned getPointerAddressSpace(Value *I) {
  if (auto L = dyn_cast<LoadInst>(I))
    return L->getPointerAddressSpace();
  if (auto S = dyn_cast<StoreInst>(I))
    return S->getPointerAddressSpace();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return S->getPointerAddressSpace();
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return S->getPointerAddressSpace();

  return -1;
}

static Type *getValueType(Value *I) {
  if (auto SI = dyn_cast<StoreInst>(I))
    return SI->getValueOperand()->getType();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return S->getDataType();
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return S->getValueOperand()->getType();
  return I->getType();
}

static bool isSimpleAccess(Value *I) {
  if (auto L = dyn_cast<LoadInst>(I))
    return L->isSimple();
  if (auto S = dyn_cast<StoreInst>(I))
    return S->isSimple();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return true;
  if (auto S = dyn_cast<MAXIStoreInst>(I))
    return !S->isVolatile();

  llvm_unreachable("Unknown access instruction!");
}

static bool isLoadAccess(Value *AI) {
  assert(isa<LoadInst>(AI) || isa<StoreInst>(AI) ||
         isa<FPGALoadStoreInst>(AI) || isa<SeqBeginInst>(AI) ||
         isa<SeqAccessInst>(AI) && "Expect an access instruction!");

  if (isa<LoadInst>(AI) || isa<FPGALoadInst>(AI))
    return true;

  if (auto SBI = dyn_cast<SeqBeginInst>(AI))
    return SBI->isLoad();

  if (auto SAI = dyn_cast<SeqAccessInst>(AI))
    return SAI->isLoad();

  return false;
}

static unsigned getValueSize(const DataLayout &DL, Value *I) {
  auto Ty = getValueType(I);
  return DL.getTypeAllocSize(Ty);
}

static unsigned getAlignment(const DataLayout &DL, Value *I) {
  if (auto *L = dyn_cast<LoadInst>(I))
    return L->getAlignment();
  if (auto *S = dyn_cast<StoreInst>(I))
    return S->getAlignment();
  if (auto S = dyn_cast<SeqBeginInst>(I))
    return getValueSize(DL, S);
  if (auto S = dyn_cast<MAXIStoreInst>(I)) {
    if (auto Align = S->getAlignment())
      return Align;
    return DL.getABITypeAlignment(getValueType(S));
  }

  llvm_unreachable("Unknown access instruction!");
}

static unsigned getRequiredAlignment(const DataLayout &DL, Value *I) {
  return getValueSize(DL, I);
}

static bool isAlignedAccess(const DataLayout &DL, Value *I) {
  if (isa<SeqBeginInst>(I))
    return true;
  return getRequiredAlignment(DL, I) <= getAlignment(DL, I);
}

static unsigned getNumOfSubRegions(Region *R) { return R->end() - R->begin(); }

static const SCEV *getLegalBasePtr(ScalarEvolution *SE, const SCEV *E) {
  auto *BaseAddr = SE->getPointerBase(E);

  if (BaseAddr->isZero())
    return nullptr;

  return BaseAddr;
}

static bool checkPredicate(const SCEVPredicate &P) {
  assert(!isa<SCEVUnionPredicate>(P) && "Unexpected predicate type!");
  if (isa<SCEVWrapPredicate>(P) &&
      ReflowConfig::GlobalConfig().AssumeNoAddrWrap) {
    ++NumAssumptionAccept;
    return true;
  }

  // TODO: print a message
  ++NumAssumptionReject;
  return false;
}

static const SCEV *extractStrideFromAddRec(const SCEVAddRecExpr *S,
                                           const Loop &L) {
  // TODO: SCEVPFD or SCEVStep can be used here to deal with All the
  // SCEVAddRecExpr
  if (S->getLoop() == &L && S->isAffine())
    return S->getOperand(1);

  return nullptr;
}

static const SCEVAddRecExpr *getAffineAddRec(Value *V, Loop &L,
                                             ScalarEvolution &SE) {
  PredicatedScalarEvolution PSE(SE, L);
  auto *IndVarSCEV = dyn_cast<SCEVAddRecExpr>(PSE.getSCEV(V));
  if (!IndVarSCEV || IndVarSCEV->getLoop() != &L || !IndVarSCEV->isAffine())
    return nullptr;

  return IndVarSCEV;
}

// FIXME: Can be removed when merging with upstream
static bool MayReadMemory(Instruction *I) {
  if (auto *CI = dyn_cast<CallInst>(I))
    return !CI->doesNotReadMemory();
  return I->mayReadFromMemory();
}

static Zone getZone(BasicBlock *BS, BasicBlock *BE,
                    SeqAccessesRegionNode *RNode) {
  return std::make_pair(std::make_pair(BS, BE), RNode);
}

static Zone createZone(SeqAccessesZoneNode *SN) {
  return getZone(SN->getZoneStart(), SN->getZoneEnd(), SN->getParent());
}

static Optional<Zone> getZone(Loop &L, SeqAccessesRegionNode *RNode) {
  auto ExitBlock = L.getUniqueExitBlock();
  if (!ExitBlock)
    return None;

  auto Preheader = L.getLoopPreheader();
  if (!Preheader)
    return None;

  return getZone(Preheader, ExitBlock, RNode);
}

static ChainID getChainID(const DataLayout &DL, const Value *Ptr) {
  const Value *ObjPtr = reflow::GetUnderlyingObject(Ptr, DL);
  if (const auto *Sel = dyn_cast<SelectInst>(ObjPtr)) {
    // The select's themselves are distinct instructions even if they share the
    // same condition and evaluate to consecutive pointers for true and false
    // values of the condition. Therefore using the select's themselves for
    // grouping instructions would put consecutive accesses into different lists
    // and they won't be even checked for being consecutive.
    return Sel->getCondition();
  }
  return ObjPtr;
}

static bool isLatencyPragma(ScopeEntry *Entry) {
  SmallVector<OperandBundleDef, 1> Bundles;
  Entry->getOperandBundlesAsDefs(Bundles);
  for (auto &Bundle : Bundles)
    if (LatencyRegionEntry::compatible(Bundle))
      return true;

  return false;
}

static bool hasSideEffect(Instruction *I) {
  if (auto Entry = dyn_cast<ScopeEntry>(I))
    return isLatencyPragma(Entry);

  if (auto Exit = dyn_cast<ScopeExit>(I))
    return isLatencyPragma(Exit->getEntry());

  return false;
}

/// Detect if the basic block might be exposed into a dataflow region. Return
/// true if any of below is true.
/// - It's in a loop, and the loop might be exposed into a dataflow region.
/// - It's not in a loop, but is in a dataflow function.
static bool exposeInDataflowRegion(LoopInfo &LI, ScalarEvolution &SE,
                                   BasicBlock &BB, Instruction *FAI) {
  if (auto *L = LI.getLoopFor(&BB)) {
    if (!mayExposeInDataFlowRegion(SE, L))
      return false;

    // Check if there's no chance that the loop L would disapear
    PredicatedScalarEvolution PSE(SE, *L);
    const SCEV *BTC = PSE.getBackedgeTakenCount();
    if (isa<SCEVCouldNotCompute>(BTC))
      return false;

    const SCEV *LTC =
        isRotatedLoop(L) ? SE.getAddExpr(BTC, SE.getOne(BTC->getType())) : BTC;
    if (!isDataFlow(L) && !(LTC->isOne()) && !mayFullyUnroll(L, LTC))
      return false;

    // NOTE: 3.1 dataflow is not yet ready to support region burst in a dataflow
    //       loop. Thus, we accept the region burst that's in a dataflow region
    //       under the flag, as long as it's not in a dataflow loop.
    if (EnableSeqAccessInfoInDataFlowRegion && !isDataFlow(L))
      return false;

    return true;
  }

  if (!EnableSeqAccessInfoInDataFlowRegion && isDataFlow(BB.getParent()))
    return true;

  return false;
}

//===----------------------------------------------------------------------===//
/// DiagFailureInfo implementation

const Instruction *DiagFailureInfo::getFailureInst() { return AI; }

DiagFailureInfo::FailureType DiagFailureInfo::getFailureType() { return FT; }

bool DiagFailureInfo::isExtendAnalyzedSA() { return ExtendAnalyzedSA; }

Instruction *DiagFailureInfo::getSideEffectInst() { return SI; }

Loop *DiagFailureInfo::getDiagLoop() { return L; }

//===----------------------------------------------------------------------===//
/// SeqAccess implementation

bool SeqAccess::isSeqAccessChainBegin(Instruction *I) const {
  auto AI = getFirstAccess();
  if (auto SAI = dyn_cast<SeqAccessInst>(AI))
    return I == getPointerOperand(SAI);

  return I == AI;
}

bool SeqAccess::contains(Instruction *I) const { return Accesses.count(I); }

Value *SeqAccess::getUnderlyingObj() const { return UObj; }

SeqAccess::AccessDirection SeqAccess::getDirection() const { return Dir; }

const SCEV *SeqAccess::getStart() const { return Start; }

const SCEV *SeqAccess::getLen() const { return Len; }

SetVector<Instruction *> SeqAccess::getAccesses() const { return Accesses; }

size_t SeqAccess::getSeqAccessChainSize() const { return Accesses.size(); }

Instruction *SeqAccess::getFirstAccess() const { return Accesses[0]; }

SetVector<std::pair<Value *, ConstantInt *>> SeqAccess::getPreds() const {
  return Preds;
}

bool SeqAccess::isPredicated() const { return !Preds.empty(); }

Instruction *SeqAccess::getSeqAccessChainBegin() const {
  auto AI = getFirstAccess();
  if (auto SAI = dyn_cast<SeqAccessInst>(AI))
    return cast<Instruction>(getPointerOperand(SAI));
  return AI;
}

Type *SeqAccess::getAccessTy() const {
  // Returns the access type analyzed from the underlying object. The result is
  // different from `getAccessStoreTy` for non-byte size type. For non-memory
  // related sequential access pattern inference, this is the one to go for
  // asking access type, for example, auto stream(FIFO) inference.
  return AccessTy;
}

Type *SeqAccess::getAccessStoreTy() const {
  // Rewrites iN type to iK type, where K is the store size of AccessTy in
  // bit. For vector and floating point type, returns the type directly since
  // these type themselves requires to match between type size and store size.
  // The result is different from `getAccessTy` for non-byte size type. For
  // memory related sequential access pattern inference, this is the one to go
  // for asking access type, for example, auto burst(MAXI) inference.
  return AccessTy->isIntegerTy()
             ? Type::getIntNTy(*Ctx, DL->getTypeStoreSizeInBits(AccessTy))
             : AccessTy;
}

Instruction *SeqAccess::getBeginPosition() const { return BP; }

Instruction *SeqAccess::getEndPosition() const { return EP; }

void SeqAccess::print(raw_ostream &OS) const {
  if (isPredicated()) {
    OS << "; Under predicates:\n";
    for (auto P : getPreds()) {
      OS << ";   ";
      if (P.second->isZero())
        OS << "not ";
      OS << *P.first << '\n';
    }
  }

  OS << "; Sequential ";
  if (getDirection() == AccessDirection::LOAD)
    OS << "loads ";
  else
    OS << "stores ";

  OS << "on `" << getUnderlyingObj()->getName() << "` starts at ";
  getStart()->print(OS);
  OS << " with length of ";
  getLen()->print(OS);
  OS << '\n';

  if (EnableVerboseSeqAccessInfo) {
    OS << "; Including:\n";
    for (auto AI : getAccesses())
      OS << ";   " << *AI << '\n';
  }
}
//===----------------------------------------------------------------------===//
/// SeqAccessesZoneNode implementation
void SeqAccessesZoneNode::addSeqAccess(
    Value *Obj, Type *Ty, ArrayRef<Instruction *> Chain,
    SeqAccess::AccessDirection Dir, const SCEV *Start, const SCEV *Len,
    Instruction *BP, Instruction *EP,
    ArrayRef<std::pair<Value *, ConstantInt *>> Preds) {
  SeqAccess *SA =
      new SeqAccess(Ctx, DL, Obj, Dir, Start, Len, Ty, BP, EP, Chain, Preds);

  if (SeqAccesses.count(Obj) == 0)
    SeqAccesses.insert({Obj, SetVector<SeqAccess *>()});

  SeqAccesses[Obj].insert(SA);
}

void SeqAccessesZoneNode::remove(SeqAccess *SA) {
  auto UObj = SA->getUnderlyingObj();
  if (SeqAccesses.count(UObj) == 0)
    return;

  SeqAccesses[UObj].remove(SA);
  delete SA;
}

void SeqAccessesZoneNode::clear() {
  for (auto SAEntry : SeqAccesses)
    for (auto SA : SAEntry.second)
      delete SA;
  SeqAccesses.clear();
}

BasicBlock *SeqAccessesZoneNode::getZoneStart() const { return RS; }

BasicBlock *SeqAccessesZoneNode::getZoneEnd() const { return RE; }

SeqAccessesRegionNode *SeqAccessesZoneNode::getParent() const { return Parent; }

SeqAccess *SeqAccessesZoneNode::getSeqAccess(const Instruction *I) {
  auto AI = const_cast<Instruction *>(I);
  auto P = getPointerOperand(AI);
  if (isa<SeqBeginInst>(P))
    P = getPointerOperand(P);

  auto ID = getChainID(*DL, P);
  if (SeqAccesses.count(ID) == 0)
    return nullptr;

  for (auto SA : SeqAccesses[ID]) {
    if (SA->contains(AI))
      return SA;

    if (SA->getSeqAccessChainBegin() == AI)
      return SA;
  }

  return nullptr;
}

SeqAccess *SeqAccessesZoneNode::getUniqueSeqAccessOnUObj(const Value *UObj) {
  if (SeqAccesses.count(UObj) == 0)
    return nullptr;

  if (SeqAccesses[UObj].size() == 1)
    return SeqAccesses[UObj].front();

  return nullptr;
}

//===----------------------------------------------------------------------===//
/// SeqAccessesRegionNode implementation

void SeqAccessesRegionNode::addSeqAccessesNode(SeqAccessesZoneNode *SN) {
  auto NodeZone = createZone(SN);

  if (SeqAccessesZoneNodes.count(NodeZone))
    return;

  std::unique_ptr<SeqAccessesZoneNode> USN(SN);
  SeqAccessesZoneNodes[NodeZone] = std::move(USN);
}

void SeqAccessesRegionNode::clear() { SeqAccessesZoneNodes.clear(); }

Region *SeqAccessesRegionNode::getRegion() const { return R; }

SeqAccessesZoneNode *SeqAccessesRegionNode::getZoneNode(Zone NodeZone) {
  return SeqAccessesZoneNodes[NodeZone].get();
}

SeqAccess *SeqAccessesRegionNode::getSeqAccess(const Zone *NodeZone,
                                               const Instruction *I) {
  if (auto ZNode = getZoneNode(*NodeZone))
    return ZNode->getSeqAccess(I);

  return nullptr;
}

//===----------------------------------------------------------------------===//
/// SeqAccessesInfo implementation

void SeqAccessesInfo::updateStatistics() { ++NumSeqAccess; }

int SeqAccessesInfo::topologicalSort(BasicBlock *BB, int ID,
                                     SetVector<BasicBlock *> &Visited) {
  if (Visited.count(BB))
    return ID;

  Visited.insert(BB);

  for (auto *SBB : successors(BB))
    ID = topologicalSort(SBB, ID, Visited);

  assert(ID > 0);
  TopoOrderedBBs.insert({BB, --ID});

  return ID;
}

void SeqAccessesInfo::calculate(Function &F) {
  DEBUG(dbgs() << "Calculating Function: " << F.getName() << '\n');
  SetVector<BasicBlock *> Visited;
  topologicalSort(&F.getEntryBlock(), F.size(), Visited);

  // Process the region in bottom-up fashion.
  process(*RI->getTopLevelRegion());
}

void SeqAccessesInfo::process(Region &R) {
  if (getNumOfSubRegions(&R) == 0) {
    calculate(R);
    return;
  }

  for (auto &E : R)
    process(*E);

  calculate(R);
}

void SeqAccessesInfo::calculate(Region &R) {
  DEBUG({
    dbgs() << "--- Calculating Region:\n";
    R.dump();
  });

  SeqAccessesRegionNode *RNode = new SeqAccessesRegionNode(Ctx, DL, &R);
  std::unique_ptr<SeqAccessesRegionNode> URNode(RNode);
  SeqAccessesRegionMap.insert({&R, std::move(URNode)});

  std::vector<BasicBlock *> WorkList;
  for (auto *BB : R.blocks()) {
    // Skip unreachable
    if (!DT->isReachableFromEntry(BB))
      continue;

    if (RI->getRegionFor(BB) == &R)
      WorkList.emplace_back(BB);
  }
  if (BasicBlock *ExitBB = R.getExit())
    WorkList.emplace_back(ExitBB);

  collectMayAccessesAndMayWrapsInRegion(WorkList, RNode);

  std::sort(WorkList.begin(), WorkList.end(),
            [&](BasicBlock *BB0, BasicBlock *BB1) {
              return TopoOrderedBBs[BB0] > TopoOrderedBBs[BB1];
            });

  SetVector<Loop *> Loops;

  // Infer sequential accesses within BBs region.
  for (auto *BB : WorkList) {
    if (auto *L = LI->getLoopFor(BB)) {
      DEBUG(dbgs() << "Found Loop: " << *L << " in Region\n");
      Loops.insert(L);
    }

    if (VisitedBBs.count(BB))
      continue;

    DEBUG(dbgs() << "------ Calculating BB: " << BB->getName() << '\n');
    calculate(RNode, *BB);
    DEBUG(dbgs() << "------\n");
  }

  // Extand sequential accesses of sub regions.
  extend(RNode, R);

  // Infer sequential accesses within loop region.
  for (auto *L : Loops) {
    if (!R.contains(L))
      continue;

    LoopRegion[L] = &R;

    if (VisitedLoops.count(L))
      continue;

    DEBUG(dbgs() << "Visiting Loop: " << *L << "\n");
    VisitedLoops.insert(L);

    calculate(RNode, *L);
  }

  DEBUG(dbgs() << "---\n");
}

void SeqAccessesInfo::collectMayAccessesAndMayWrapsInRegion(
    std::vector<BasicBlock *> &WorkList, SeqAccessesRegionNode *RNode) {
  for (auto BB : WorkList)
    for (auto &I : *BB) {
      if (auto BO = dyn_cast<BinaryOperator>(&I)) {
        auto Opcode = BO->getOpcode();
        // urem and srem is sources of overwrapping addresses for accesses over
        // loop. Even with reflow flag: AssumeNoAddrWrap, we should not infer
        // sequential access chain over the loop when urem/srem is contributed
        // to the consecutiveness.
        if (Opcode == Instruction::URem || Opcode == Instruction::SRem) {
          RNode->MayWraps.emplace_back(&I);
          continue;
        }

        if (Opcode == Instruction::And) {
          // urem iN %var, %n is equivalent to and iN %var, %n-1, where %n is
          // power of 2.
          RNode->MayWraps.emplace_back(&I);
        }
      }

      if (!I.mayReadOrWriteMemory())
        continue;

      if (MayReadMemory(&I))
        RNode->MayLoadRefs.emplace_back(&I);

      if (I.mayWriteToMemory())
        RNode->MayStoreRefs.emplace_back(&I);
    }
}

void SeqAccessesInfo::calculate(SeqAccessesRegionNode *RNode, BasicBlock &BB) {
  std::deque<BasicBlock *> WorkList;
  collectBBs(WorkList, BB);

  SeqAccessesZoneNode *ZoneNode =
      new SeqAccessesZoneNode(WorkList.front(), &BB, RNode);
  ReflowDiagnostic RD(DEBUG_TYPE, *ORE, *DL);
  BBsAnalyzer AAnalyzer(this, ZoneNode, Ctx, DL, LI, DT, AA, SE, SEC, RD,
                        EnableDiag);
  AAnalyzer.infer(WorkList);
  RNode->addSeqAccessesNode(ZoneNode);
}

void SeqAccessesInfo::collectBBs(std::deque<BasicBlock *> &WorkList,
                                 BasicBlock &BB) {
  auto CurrBB = &BB;
  WorkList.emplace_back(CurrBB);
  VisitedBBs.insert(CurrBB);

  // Collect PredBB if:
  // PredBB doms BB and BB pdoms PredBB
  while (auto *PredBB = CurrBB->getUniquePredecessor()) {
    if (PredBB->getUniqueSuccessor() != CurrBB || VisitedBBs.count(PredBB))
      break;

    CurrBB = PredBB;
    WorkList.emplace_front(CurrBB);
    VisitedBBs.insert(CurrBB);
  }
}

void BBsAnalyzer::infer(std::deque<BasicBlock *> &WorkList) {
  collectAccesses(WorkList);
  inferRefs(SN->LoadRefs);
  inferRefs(SN->StoreRefs);
}

void BBsAnalyzer::collectAccesses(std::deque<BasicBlock *> &WorkList) {
  for (auto BB : WorkList)
    for (auto &I : *BB) {
      if (!I.mayReadOrWriteMemory())
        continue;

      if (auto SBE = dyn_cast<SeqEndInst>(&I)) {
        auto SBI = cast<SeqBeginInst>(SBE->getBegin());
        // Skip if the begin and end are not in the same BB. This implies they
        // are not a BB burst
        if (SBE->getParent() != SBI->getParent())
          continue;

        const auto ID = getCandidateAccessID(SBI);
        if (!ID)
          continue;

        SmallVector<Instruction *, 8> Accesses;
        auto StartAddr = SE->getSCEV(getPointerOperand(SBI));
        auto ScevBurstSize = SE->getSCEV(SBI->getSize());

        for (auto U : SBI->users()) {
          if (isa<SeqEndInst>(U))
            continue;

          Accesses.emplace_back(cast<Instruction>(U));
        }

        auto NodeZone = createZone(SN);
        SAI->addAccessZone(SBI, NodeZone);

        SN->addSeqAccess(const_cast<Value *>(ID), getValueType(SBI), Accesses,
                         isLoadAccess(SBI) ? SeqAccess::AccessDirection::LOAD
                                           : SeqAccess::AccessDirection::STORE,
                         StartAddr, ScevBurstSize, SBI, SBE);
        continue;
      }

      if (auto *LI = dyn_cast<LoadInst>(&I)) {
        if (const auto ID = getCandidateAccessID(LI))
          SN->LoadRefs[ID].emplace_back(LI);
        continue;
      }

      if (isa<StoreInst>(I) || isa<MAXIStoreInst>(I))
        if (const auto ID = getCandidateAccessID(&I))
          SN->StoreRefs[ID].emplace_back(&I);
    }
}

Optional<ToBeAnalyzedLoopInfo>
SeqAccessesInfo::getToBeAnalyzedLoopInfo(Loop &L, ReflowDiagnostic &RD) {
  if (!L.hasDedicatedExits()) {
    DEBUG(dbgs() << "Ignoring " << L
                 << "as it does not have canonical form exists.");
    return None;
  }

  ToBeAnalyzedLoopInfo LI(L);

  LI.ExitBlock = L.getUniqueExitBlock();
  if (!LI.ExitBlock) {
    DEBUG(dbgs() << "Ignoring " << L
                 << "as it does not have a single exit block.");
    return None;
  }

  LI.Preheader = L.getLoopPreheader();
  if (!LI.Preheader) {
    DEBUG(dbgs() << "Ignoring " << L << "as it does not have a preheader.");
    return None;
  }

  PredicatedScalarEvolution PSE(*SE, L);
  LI.BTC = PSE.getBackedgeTakenCount();
  if (!LI.BTC || isa<SCEVCouldNotCompute>(LI.BTC)) {
    DEBUG(dbgs() << "Ignoring " << L
                 << "as it does not have a computable backedge taken count.");
    if (EnableDiag)
      RD.emitCouldNotAnalyzePattern(&L);
    CouldNotAnalyzedLoops.insert(&L);
    return None;
  }

  LI.IndVar = getIndVarOrAuxiliaryIndVar(&L, *SE);
  return LI;
}

static bool isImperfectLoopNest(Loop &L) {
  return !L.empty() && L.getSubLoops().size() > 1;
}

void SeqAccessesInfo::calculate(SeqAccessesRegionNode *RNode, Loop &L) {
  ReflowDiagnostic RD(DEBUG_TYPE, *ORE, *DL);
  auto ALI = getToBeAnalyzedLoopInfo(L, RD);
  if (!ALI.hasValue())
    return;

  if (!EnableSeqAccessOnImperfectLoopNest && isImperfectLoopNest(ALI->L))
    return;

  SeqAccessesZoneNode *LoopNode =
      new SeqAccessesZoneNode(ALI->Preheader, ALI->ExitBlock, RNode);
  LoopAnalyzer AAnalyzer(this, LoopNode, Ctx, DL, DT, AA, SE, SEC, RD, LI, *ALI,
                         LPA, EnableDiag);
  AAnalyzer.infer();
  RNode->addSeqAccessesNode(LoopNode);
}

void SeqAccessesInfo::addAccessZone(const Instruction *AI, Zone NodeZone,
                                    Loop *L) {
  if (isContainedInSeqAccess(AI)) {
    updateAccessZone(AI, NodeZone, L);
    return;
  }

  AccessZone.insert({AI, {NodeZone, L}});
}

void SeqAccessesInfo::updateAccessZone(const Instruction *AI, Zone NodeZone,
                                       Loop *L) {
  AccessZone[AI] = {NodeZone, L};
}

bool SeqAccessesInfo::addDiagFailureInfo(const Instruction *AI,
                                         DiagFailureInfo::FailureType FT,
                                         bool ExtendAnalyzedSA, Instruction *SI,
                                         Loop *L) {
  if (DiagFailureInfos.count(AI))
    return false;

  auto DFI = new DiagFailureInfo(AI, FT, ExtendAnalyzedSA, SI, L);
  DiagFailureInfos.insert({AI, DFI});
  return true;
}

void LoopAnalyzer::infer() {
  addExplicitSAInfo();
  collectAccesses();
  inferRefs(SN->LoadRefs);
  inferRefs(SN->StoreRefs);
}

void LoopAnalyzer::inferRefs(InstListMap &AccessRefs) {
  for (const auto &Chain : AccessRefs)
    for (auto CI : Chain.second) {
      LPA->clearSetPredCubes();
      if (auto Z = SAI->getSeqAccessZone(CI)) {
        auto RNode = Z->second;
        auto ZNode = RNode->getZoneNode(*Z);
        assert(ZNode && "Can not find corresponding SeqAccessesZoneNode!");

        // Skip multiple accesses separated in different sequential access
        // chains.
        auto SA = ZNode->getUniqueSeqAccessOnUObj(Chain.first);
        if (!SA)
          continue;

        inferChain(SA->getSeqAccessChainBegin(), SA);
        continue;
      }

      inferChain(CI);
    }
}

void LoopAnalyzer::addDiagFailureAccessInCondBranch(Instruction *AI, Loop *L) {
  DEBUG(dbgs() << *AI << " ; under conditional branch.\n");
  if (EnableDiag)
    RD.emitAccessInCondBranch(AI, L);
  bool ExistingSA = SAI->isContainedInSeqAccess(AI);
  SAI->addDiagFailureInfo(AI, DiagFailureInfo::AccessInCondBranch, ExistingSA,
                          /*SI=*/nullptr, L);
}

BasicBlock *LoopAnalyzer::getAccessBeginBB(Instruction *AI) {
  if (auto NodeZone = SAI->getSeqAccessZone(AI))
    if (auto RNode = NodeZone->second)
      return RNode->getZoneNode(*NodeZone)->getZoneStart();

  // It's not inferred sequential access.
  return AI->getParent();
}

void LoopAnalyzer::inferChain(Instruction *AI, SeqAccess *SA) {
  PredicatedScalarEvolution PSE(*SE, ALI.L);

  auto BB = getAccessBeginBB(AI);

  if (SA)
    for (auto Pred : SA->getPreds())
      if (!LoopPredicateAnalysis::isLoopInvariantPred(&ALI.L, Pred.first))
        LPA->intersectWith({BB, &ALI.L}, Pred.first, Pred.second);

  // Early exit for unreachable bb.
  if (LPA->isUnreachableBB(BB, &ALI.L))
    return;

  SetVector<std::pair<Value *, ConstantInt *>> LoopInvariantPreds;
  if (!LPA->getLoopInvariantPreds(BB, &ALI.L, LoopInvariantPreds) ||
      (!EnableLoopSeqAccessForAccessUnderInvariantPredicate &&
       !LoopInvariantPreds.empty())) {
    addDiagFailureAccessInCondBranch(AI, &ALI.L);
    return;
  }

  // Do not support multiple "induction variable urem constant" predicates
  SetVector<Value *> ModPreds;
  if (!LPA->getLoopIndVarModConstantPreds(BB, &ALI.L, ModPreds) ||
      ModPreds.size() > 1) {
    addDiagFailureAccessInCondBranch(AI, &ALI.L);
    return;
  }

  auto ModTerm = LPA->getSingleLoopIndVarModConstantTerm(BB, &ALI.L);
  if (!EnableLoopSeqAccessForAccessUnderModPredicate && ModTerm) {
    addDiagFailureAccessInCondBranch(AI, &ALI.L);
    return;
  }

  auto Interval =
      LPA->getIntervalFromLoopIndVarPreds(BB, &ALI.L);
  IntervalInfo *IndVarInterval =
      Interval.hasValue() ? Interval.getPointer() : nullptr;

  auto NumTerms = LPA->getNumTerms(BB, &ALI.L);
  if (!NumTerms.hasValue()) {
    addDiagFailureAccessInCondBranch(AI, &ALI.L);
    return;
  }

  // We'll need induction variable to figure out the start address later on if
  // the BB is not executed in all iterations or if the BB is not only guarded
  // by loop invarinat predicates. Bail out if we don't have the information.
  if (NumTerms.getValue() != 0 &&
      NumTerms.getValue() != LoopInvariantPreds.size() &&
      (!IndVarInterval || !ALI.IndVar)) {
    addDiagFailureAccessInCondBranch(AI, &ALI.L);
    return;
  }

  bool IsSingleBTC = LPA->isExecutedInAllIterations(BB, &ALI.L) &&
                     ALI.BTC->isOne();
  if (!hasLegalStride(PSE, AI, SA, ModTerm, IsSingleBTC))
    return;

  MemoryLocation CLoc = reflow::GetUnderlyingLocation(AI, *DL, LI);
  if (hasSideEffectInstructionOrIsWrappingAccess(
          PSE, SN->getParent()->getRegion(), AI, CLoc, SA))
    return;

  inferChain(PSE, AI, getAccessElementType(AI), SA, IndVarInterval, ModTerm,
             LoopInvariantPreds.getArrayRef());
}

unsigned LoopAnalyzer::getStrideInNumElts(PredicatedScalarEvolution &PSE,
                                          Instruction *I, SeqAccess *SA,
                                          Value *ModTerm) {
  // Do not support vectorization on sequential access chain in the loop body.
  if (SA)
    return 1;

  // Do not support vectorization on access under modulo predicate.
  if (ModTerm)
    return 1;

  auto AddRec = getAsAddRec(PSE, getPointerOperand(I), SA);
  // Do not support vectorization on non addrec access.
  if (!AddRec)
    return 1;

  auto ScStride = not_null(extractStrideFromAddRec(AddRec, ALI.L));
  auto Stride = cast<SCEVConstant>(ScStride)->getAPInt();

  auto ElementSize = getValueSize(*DL, I);
  auto StrideFactor = (Stride.getZExtValue() / ElementSize);
  assert(llvm::isPowerOf2_64(StrideFactor) && "Bad stride!");
  return StrideFactor;
}

const SCEV *
LoopAnalyzer::getStartAddr(PredicatedScalarEvolution &PSE, SeqAccess *SA,
                           Value *Ptr, IntervalInfo *Interval,
                           bool IsExeAllItrsOrOnlyGuardByInvariantPreds,
                           Value *ModTerm) {
  auto E = SA ? SA->getStart() : PSE.getSCEV(Ptr);
  if (IsExeAllItrsOrOnlyGuardByInvariantPreds) {
    SCEVAtLoopIterationRewriter Itr0(*SE, ALI.L, SE->getZero(Ptr->getType()));
    return Itr0.evaluateAtItr(E);
  }

  auto PtrAddRec = getAsAddRec(PSE, Ptr, SA);
  if (!PtrAddRec)
    assert(ModTerm && "Expect access on non addrec pointer only for access "
                      "under modulo predicate!");

  // Returns the SCEV that the access pointer \p PtrAddRec evolves at iteration
  // `steps`.
  const SCEV *PtrSCEV = ModTerm ? E : PtrAddRec;
  auto *Steps =
      BBPredicateAnalysisInfo::getNumStepsIn(ALI.IndVar, &ALI.L, Interval, *SE);
  SCEVAtLoopIterationRewriter StepsItrs(*SE, ALI.L, Steps);
  auto *StartAddr = StepsItrs.evaluateAtItr(PtrSCEV);
  return SE->getTruncateOrZeroExtend(StartAddr, Ptr->getType());
}

static bool isKnownSignedLessEqual(const SCEV *A, const SCEV *B) {
  if (A == B)
    return true;

  auto CnstA = dyn_cast<SCEVConstant>(A);
  auto CnstB = dyn_cast<SCEVConstant>(B);
  if (!CnstA || !CnstB)
    return false;

  return CnstA->getAPInt().sle(CnstB->getAPInt());
}

const SCEV *
LoopAnalyzer::getBurstSize(BasicBlock *BB, IntervalInfo *Interval,
                           bool IsExeAllItrsOrOnlyGuardByInvariantPreds,
                           Type *SizeTy, SeqAccess *SA, Value *ModTerm) {
  const SCEV *ScevBurstSize = nullptr;
  if (IsExeAllItrsOrOnlyGuardByInvariantPreds) {
    if (isRotatedLoop(&ALI.L)) {
      auto LCount = SE->getAddExpr(ALI.BTC, SE->getOne(ALI.BTC->getType()));
      ScevBurstSize = SE->getTruncateOrZeroExtend(LCount, SizeTy);
    } else {
      ScevBurstSize = SE->getTruncateOrZeroExtend(ALI.BTC, SizeTy);
      if (!SE->isKnownPositive(ScevBurstSize))
        ScevBurstSize = SE->getSMaxExpr(ScevBurstSize, SE->getZero(SizeTy));
    }

    DEBUG(dbgs()
          << "Deduced burst length in SCEV from the number of loop iterations: "
          << *ScevBurstSize << '\n');
  } else {
    auto Upper = Interval->getUpper();
    auto Lower = Interval->getLower();
    if (IntervalInfo::isEmpty(*SEC, Lower, Upper))
      return nullptr;

    const SCEV *Dist = SE->getMinusSCEV(Upper, Lower);
    auto IndVarSCEV = not_null(getAffineAddRec(ALI.IndVar, ALI.L, *SE));
    auto Stride = not_null(extractStrideFromAddRec(IndVarSCEV, ALI.L));
    Dist = SE->getTruncateOrSignExtend(Dist, SizeTy);

    if (ModTerm) {
      const SCEV *Base;
      std::tie(Base, Stride) = LPA->getIndVarAffine(BB, &ALI.L);
      if (!Base || !Stride)
        return nullptr;
    }

    Stride = SE->getTruncateOrZeroExtend(Stride, Dist->getType());
    // Skip burst length 1
    if (isKnownSignedLessEqual(SEC->getCanonSCEV(Dist),
                               SEC->getCanonSCEV(Stride)))
      return nullptr;

    // If we can't succeed to prove the U - L' is greater than 1,
    // issue a burst length of 0.(BE might not support negative burst length)
    if (!SE->isKnownPositive(Dist))
      Dist = SE->getSMaxExpr(Dist, SE->getZero(Dist->getType()));

    // burst size(length) = udiv_ceil((U - L'), stride)
    ScevBurstSize = SE->getUDivCeilSCEV(Dist, Stride);
  }

  // Need to multiply by block size
  if (SA)
    ScevBurstSize = SE->getMulExpr(
        ScevBurstSize,
        SE->getTruncateOrZeroExtend(SA->getLen(), ScevBurstSize->getType()));

  return ScevBurstSize;
}

void LoopAnalyzer::inferChain(
    PredicatedScalarEvolution &PSE, Instruction *AI, Type *T, SeqAccess *SA,
    IntervalInfo *Interval, Value *ModTerm,
    ArrayRef<std::pair<Value *, ConstantInt *>> LoopInvariantPreds) {
  if (!isValidSeqAccessRegion(AI)) {
    DEBUG({
      dbgs() << "Identified sequential access chain over loop:\n";
      ALI.L.dump();
      dbgs() << "is exposed in the dataflow region. Give up inference.\n";
    });

    if (EnableDiag)
      RD.emitMayExposeInDataFlowRegion(AI);
    bool ExistingSA = SAI->isContainedInSeqAccess(AI);
    SAI->addDiagFailureInfo(AI, DiagFailureInfo::ExposeInDataflowRegion,
                            ExistingSA, /*SI=*/nullptr, &ALI.L);
    return;
  }

  auto NodeZone = createZone(SN);

  auto Ptr = getPointerOperand(AI);

  auto PtrTy = cast<PointerType>(Ptr->getType());
  Type *SizeTy = DL->getIntPtrType(PtrTy);
  Type *Ty = PtrTy->getPointerElementType();
  auto BB = getAccessBeginBB(AI);

  auto NumTerms = LPA->getNumTerms(BB, &ALI.L);
  assert(NumTerms.hasValue() && "Expect analyzed terms!");
  bool IsExeAllItrsOrOnlyGuardByInvariantPreds =
      NumTerms.getValue() == 0 ||
      NumTerms.getValue() == LoopInvariantPreds.size();
  auto StartAddr = getStartAddr(
      PSE, SA, Ptr, Interval, IsExeAllItrsOrOnlyGuardByInvariantPreds, ModTerm);

  if (isa<SCEVCouldNotCompute>(StartAddr))
    return;

  unsigned Stride = getStrideInNumElts(PSE, AI, SA, ModTerm);
  if (Stride > 1) {
    assert(EnableVectorization && "Expect enable vectorization!");

    Ty = vectorization::ReplicateType(Ty, Stride, *DL);
    PtrTy = PointerType::get(Ty, PtrTy->getAddressSpace());

    // Can not represent the new start address in SCEV after change to
    // vectorized element type.
    StartAddr = nullptr;
  }

  auto ScevBurstSize =
      getBurstSize(BB, Interval, IsExeAllItrsOrOnlyGuardByInvariantPreds,
                   SizeTy, SA, ModTerm);
  if (!ScevBurstSize)
    return;

  Instruction *OldBeginPos = nullptr;
  Instruction *OldEndPos = nullptr;
  SeqAccessesZoneNode *OldN = nullptr;
  SeqAccess *OldSA = nullptr;
  SmallVector<Instruction *, 8> Accesses;
  SetVector<std::pair<Value *, ConstantInt *>> Preds(LoopInvariantPreds.begin(),
                                                     LoopInvariantPreds.end());
  if (auto OldNodeZone = SAI->getSeqAccessZone(AI)) {
    auto RNode = OldNodeZone->second;
    OldN = RNode->getZoneNode(*OldNodeZone);
    OldSA = OldN->getSeqAccess(AI);
    assert(OldN && OldSA &&
           "Expect consistent info between AccessZone map and SeqAccess node!");

    OldBeginPos = OldSA->getBeginPosition();
    OldEndPos = OldSA->getEndPosition();

    SAI->updateAccessZone(AI, NodeZone, &ALI.L);

    for (auto I : OldSA->getAccesses()) {
      SAI->updateAccessZone(I, NodeZone, &ALI.L);
      DEBUG({
        auto Z = SAI->getSeqAccessZone(I);
        dbgs() << "Access: " << *I << "\n"
               << "Updated to zone[" << Z->first.first->getName() << ", "
               << Z->first.second->getName() << "] equivalent to loop " << ALI.L
               << "\n";
      });
      Accesses.emplace_back(I);
    }

    for (auto OldPred : OldSA->getPreds())
      if (LoopPredicateAnalysis::isLoopInvariantPred(&ALI.L, OldPred.first))
        Preds.insert(OldPred);
  } else {
    SAI->addAccessZone(AI, NodeZone, &ALI.L);
    DEBUG(dbgs() << "Access: " << *AI << "\n");
    Accesses.emplace_back(AI);
  }

  SN->addSeqAccess(const_cast<Value *>(getChainID(*DL, Ptr)), Ty, Accesses,
                   isLoadAccess(AI) ? SeqAccess::AccessDirection::LOAD
                                    : SeqAccess::AccessDirection::STORE,
                   StartAddr, ScevBurstSize, OldBeginPos ? OldBeginPos : AI,
                   OldEndPos ? OldEndPos : AI, Preds.getArrayRef());

  if (OldN) {
    assert(OldSA && "Expect an old sequential access to be removed!");
    OldN->remove(OldSA);
  }

  DEBUG({
    dbgs() << "SeqAccessInfo: Infer sequential access:\nStart: ";
    if (StartAddr)
      dbgs() << *StartAddr << "\n";
    else
      dbgs() << "(need vectorization)\n";
    dbgs() << "Length: " << *ScevBurstSize << '\n';
  });

  SAI->updateStatistics();
}

bool LoopAnalyzer::hasSideEffectInstructionOrIsWrappingAccess(
    PredicatedScalarEvolution &PSE, Region *R, Instruction *AI,
    MemoryLocation &CLoc, SeqAccess *SA) {

  for (auto &E : *R)
    if (hasSideEffectInstructionOrIsWrappingAccess(PSE, &*E, AI, CLoc, SA))
      return true;

  auto RNode = SAI->getSeqAccessesRegionNode(R);
  if (hasSideEffectInstruction(PSE, AI, CLoc, RNode->MayStoreRefs, SA) ||
      hasSideEffectInstruction(PSE, AI, CLoc, RNode->MayLoadRefs, SA))
    return true;

  if (isWrappingAccess(AI, RNode->MayWraps))
    return true;

  return false;
}

// Returns true when instruction \p I is in the same direction as on access
// \p AI
static bool areOnSameDirection(Instruction *I, Instruction *AI) {
  if (isLoadAccess(AI))
    return MayReadMemory(I);

  // Check for stores
  return I->mayWriteToMemory();
}

// Gets the unique underlying object of \p P
static Value *getUniqueUnderlyingObject(Value *P, const DataLayout &DL,
                                        LoopInfo *LI = nullptr) {
  if (!P->getType()->isPointerTy())
    return nullptr;

  SmallVector<Value *, 2> Objs;
  auto MaxSearchDepth = ReflowConfig::GlobalConfig().AAMaxSearchDepth;
  GetUnderlyingObjects(P, Objs, DL, LI, MaxSearchDepth);

  // Reject multiple underlying objects
  if (Objs.size() != 1)
    return nullptr;

  return Objs.back();
}

// Returns MAXI bundle for \p Ptr
static StringRef getMAXIBundleOnPtr(Value &Ptr, const DataLayout &DL,
                                    LoopInfo *LI = nullptr) {
  auto UO = getUniqueUnderlyingObject(&Ptr, DL, LI);
  if (!UO)
    return "";

  return XlxInterfaceIntrinsic::GetMAXIBundle(UO, DL);
}

// Returns true when \p Ptr is on bundle \p Bundle
static bool areOnMAXIBundle(Value &Ptr, StringRef Bundle, const DataLayout &DL,
                            LoopInfo *LI = nullptr) {
  // Be conservative when the bundle information is missing. That is, assume
  // \p Ptr might be  on bundle \p Bundle
  if (Bundle.empty())
    return true;

  auto CBundle = getMAXIBundleOnPtr(Ptr, DL, LI);
  if (CBundle.empty())
    return true;

  return Bundle == CBundle;
}

bool LoopAnalyzer::hasSideEffectInLoop(PredicatedScalarEvolution &PSE,
                                       Instruction *AI, MemoryLocation &CLoc,
                                       Instruction *SI, Loop &L) {
  if (hasSideEffect(SI))
    return true;

  auto Bundle = getMAXIBundleOnPtr(*getPointerOperand(AI), *DL, LI);

  // Do not extend sequential accesses on imperfect loop nest when there's a
  // highly chance to introduce conflicting same direction bursts. Since the
  // legalization stage will cancel those conflicting same direction bursts,
  // this can lead to less burst at the end.
  if (isImperfectLoopNest(L)) {
    // Do not infer sequential accesses on imperfect loop nests for non maxi
    // interface.
    if (!ReflowConfig::GlobalConfig().BurstOnlyOnMAXI)
      return true;

    if (!EnableAutoBundleInference) {
      // When there's another same direction maxi access in this region, we
      // assume that it might be conflict. Thus, don't extend the sequential
      // accesses.
      if (auto CI = dyn_cast<CallInst>(SI)) {
        FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
        if (!AliasAnalysis::onlyAccessesInaccessibleOrArgMem(Behavior))
          return true;
      }

      if (llvm::any_of(SI->operand_values(), [&](Value *Ptr) {
            if (!isa<PointerType>(Ptr->getType()))
              return false;

            if (XlxInterfaceIntrinsic::IsMAXI(Ptr, *DL) &&
                areOnSameDirection(SI, AI) &&
                areOnMAXIBundle(*Ptr, Bundle, *DL, LI))
              return true;

            if (SAI->isForOpenCL() && areOnSameDirection(SI, AI) &&
                spir::IsUniformAddressSpace(
                    Ptr->getType()->getPointerAddressSpace()))
              return true;
            return false;
          }))
        return true;
    }
  }

  ModRefInfo MRInfo = AA->getModRefInfo(SI, CLoc);

  if (!EnableAutoBundleInference) {
    auto Ptr = getPointerOperand(SI);
    if (Ptr && XlxInterfaceIntrinsic::IsMAXI(Ptr, *DL) &&
        areOnSameDirection(SI, AI) && areOnMAXIBundle(*Ptr, Bundle, *DL, LI))
      return isModOrRefSet(MRInfo) && areAccessDataSetOverlap(PSE, SI, AI);
  }

  if (isLoadAccess(AI))
    return isModSet(MRInfo) && areAccessDataSetOverlap(PSE, SI, AI);

  // Check for stores
  return isRefSet(MRInfo) && areAccessDataSetOverlap(PSE, SI, AI);
}

bool LoopAnalyzer::hasSideEffectInstruction(
    PredicatedScalarEvolution &PSE, Instruction *AI, MemoryLocation &CLoc,
    SmallVectorImpl<Instruction *> &MayAccessRefs, SeqAccess *SA) {
  for (auto SI : MayAccessRefs) {
    if (hasNoSideEffect(SI))
      continue;

    if ((isa<LoadInst>(SI) || isa<StoreInst>(SI) || isa<MAXIStoreInst>(SI) ||
         isa<SeqBeginInst>(SI)) &&
        ((SA && is_contained(SA->getAccesses(), SI)) || (SI == AI)))
      continue;

    if (hasSideEffectInLoop(PSE, AI, CLoc, SI, ALI.L)) {
      DEBUG(dbgs() << "SeqAccessInfo: Found side-effecting operation: " << *SI
                   << '\n');
      if (EnableDiag)
        RD.emitAccessClobbered(SI, AI, &ALI.L);
      SAI->addDiagFailureInfo(AI, DiagFailureInfo::AccessClobbered,
                              SA ? true : false, SI, &ALI.L);
      return true;
    }
  }

  return false;
}

bool LoopAnalyzer::isWrappingAccess(Instruction *AI,
                                    SmallVectorImpl<Instruction *> &MayWraps) {
  auto PtrSCEV = SE->getSCEV(getPointerOperand(AI));
  for (auto MWPI : MayWraps) {
    auto MWPISECV = SE->getSCEV(MWPI);
    if (SE->isLoopInvariant(MWPISECV, &ALI.L))
      continue;

    if (SE->hasOperand(PtrSCEV, MWPISECV)) {
      DEBUG(dbgs() << "SeqAccessInfo: Found wrapping access: " << *AI
                   << ". The wrapping is from \'" << *MWPI << "\'.\n");
      ++NumAssumptionReject;
      return true;
    }
  }

  return false;
}

bool LoopAnalyzer::areAccessDataSetOverlap(PredicatedScalarEvolution &PSE,
                                           Instruction *LHS, Instruction *RHS) {
  auto LHSLoc = MemoryLocation::get(LHS);
  auto RHSLoc = MemoryLocation::get(RHS);
  if (!LHSLoc.Ptr || !RHSLoc.Ptr)
    return true;

  auto *LHSAddRec = getAsAddRec(PSE, const_cast<Value *>(LHSLoc.Ptr));
  auto *RHSAddRec = getAsAddRec(PSE, const_cast<Value *>(RHSLoc.Ptr));
  if (!LHSAddRec || !RHSAddRec)
    return true;

  auto &L = ALI.L;
  auto *LHSStart = LHSAddRec->getStart();
  auto *RHSStart = RHSAddRec->getStart();
  auto *LHSEnd = SE->getSCEVAtScope(LHSAddRec, L.getParentLoop());
  auto *RHSEnd = SE->getSCEVAtScope(RHSAddRec, L.getParentLoop());

  DEBUG(dbgs() << '[' << *LHSStart << ", " << *LHSEnd << ") \n"
               << '[' << *RHSStart << ", " << *RHSEnd << ") \n");
  // There is no overlap if LHSStart >= RHSEnd or RHSStart >= LHSEnd
  // FIXME: Make sure LHSEnd - LHSStart >= 0 and RHSEnd - RHSStart >= 0
  auto *Delta = SE->getMinusSCEV(LHSStart, RHSEnd);
  auto *Zero = SE->getZero(Delta->getType());
  DEBUG(dbgs() << "LHSStart - RHSEnd: " << *Delta << '\n');
  if (SE->isLoopEntryGuardedByCond(&L, ICmpInst::ICMP_SGE, Delta, Zero))
    return false;

  Delta = SE->getMinusSCEV(RHSStart, LHSEnd);
  DEBUG(dbgs() << "RHSStart - LHSEnd: " << *Delta << '\n');
  if (SE->isLoopEntryGuardedByCond(&L, ICmpInst::ICMP_SGE, Delta, Zero))
    return false;

  DEBUG(dbgs() << "Access dataset may overlap!\n\n");
  return true;
}

bool LoopAnalyzer::hasNoSideEffect(Instruction *I) {
  if (I->getParent() == ALI.ExitBlock)
    return true;

  if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<MAXIStoreInst>(I))
    return false;

  if (isa<SeqLoadInst>(I) || isa<SeqStoreInst>(I) || isa<SeqEndInst>(I))
    return true;

  if (isa<ScopeEntry>(I) || isa<ScopeExit>(I))
    return !hasSideEffect(I);

  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
    if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
        II->getIntrinsicID() == Intrinsic::lifetime_end ||
        II->getIntrinsicID() == Intrinsic::assume)
      return true;

  return false;
}

bool LoopAnalyzer::isValidSeqAccessRegion(Instruction *AI) {
  if (!mayExposeInDataFlowRegion(*SE, &ALI.L))
    return true;

  if (EnableSeqAccessInfoInDataFlowRegion && !isDataFlow(&ALI.L)) {
    DEBUG(dbgs() << ALI.L
                 << " may be exposed in the dataflow region."
                    " But the identified sequential access chain"
                    " will not cross dataflow loop\n");
    return true;
  }

  return false;
}

bool LoopAnalyzer::isExecutedInAllIterations(BasicBlock *BB) {
  return DT->dominates(BB, ALI.ExitBlock);
}

void LoopAnalyzer::addExplicitSAInfo() {
  for (auto &I : *ALI.ExitBlock) {
    auto SBE = dyn_cast<SeqEndInst>(&I);
    if (!SBE)
      continue;

    auto SBI = cast<SeqBeginInst>(SBE->getBegin());
    if (SBI->getParent() != ALI.Preheader)
      continue;

    const auto ID = getCandidateAccessID(SBI, LI, /*EnableUnalignBurst=*/false);
    if (!ID)
      continue;

    auto NodeZone = createZone(SN);
    SAI->addAccessZone(SBI, NodeZone, &ALI.L);
    auto StartAddr = SE->getSCEV(getPointerOperand(SBI));
    auto ScevBurstSize = SE->getSCEV(SBI->getSize());

    SmallVector<Instruction *, 8> Accesses;
    for (auto U : SBI->users()) {
      if (isa<SeqEndInst>(U))
        continue;

      Accesses.emplace_back(cast<Instruction>(U));
    }

    SN->addSeqAccess(const_cast<Value *>(ID), getValueType(SBI), Accesses,
                     SBI->isLoad() ? SeqAccess::AccessDirection::LOAD
                                   : SeqAccess::AccessDirection::STORE,
                     StartAddr, ScevBurstSize, SBI, SBE);
  }
}

void LoopAnalyzer::collectAccesses() {
  auto &L = ALI.L;

  // Collect single access or sequential access chain from the current loop.
  for (auto *BB : L.getBlocks()) {
    // Do not look at the BB that do not directly belong to the current loop
    if (LI->getLoopFor(BB) != &ALI.L)
      continue;

    for (auto &I : *BB) {
      if (!I.mayReadOrWriteMemory())
        continue;

      auto *SBI = dyn_cast<SeqBeginInst>(&I);
      auto *LAI = dyn_cast<LoadInst>(&I);

      if (!LAI && !SBI && !isa<StoreInst>(I) && !isa<MAXIStoreInst>(I))
        continue;

      if (!isExecutedInAllIterations(BB) &&
          !EnableLoopSeqAccessForAccessUnderPredicates) {
        addDiagFailureAccessInCondBranch(&I, &L);
        continue;
      }

      if (!isCandidateAccessInRegions(&I))
        continue;

      const auto ID =
          getCandidateAccessID(&I, LI, /*EnableUnalignBurst=*/false);
      if (!ID)
        continue;

      DEBUG(dbgs() << "Loop sequential accesses candidates:\n" << I << "\n");

      // Handle sequential accesses begins
      if (SBI) {
        if (SBI->isLoad()) {
          SN->LoadRefs[ID].emplace_back(SBI);
        } else {
          SN->StoreRefs[ID].emplace_back(SBI);
        }
        continue;
      }

      // Handle load
      if (LAI) {
        SN->LoadRefs[ID].emplace_back(LAI);
        continue;
      }

      // Handle stores/maxi.byteenable.store
      SN->StoreRefs[ID].emplace_back(&I);
    }
  }

  // Collect sequential access chain from the child loop.
  for (auto Child : L) {
    auto R = SAI->getLoopRegion(Child);
    auto RNode = SAI->getSeqAccessesRegionNode(R);
    auto ChildZone = getZone(*Child, RNode);
    if (!ChildZone.hasValue())
      continue;

    auto ChildSN = RNode->getZoneNode(*ChildZone);
    if (!ChildSN)
      continue;

    for (auto SAEntry : ChildSN->SeqAccesses) {
      auto *SA = SAEntry.second.front();
      auto I = SA->getSeqAccessChainBegin();

      // Skip the sequential access chain since it's extended to a longer one.
      auto *CurrentZone = SAI->getSeqAccessZone(I);
      if (*CurrentZone != *ChildZone)
        continue;

      if (!isExecutedInAllIterations(I->getParent()) &&
          !EnableLoopSeqAccessForAccessUnderPredicates) {
        addDiagFailureAccessInCondBranch(I, &L);
        continue;
      }

      if (SA->getDirection() == SeqAccess::AccessDirection::LOAD) {
        SN->LoadRefs[SA->getUnderlyingObj()].emplace_back(I);
        continue;
      }

      SN->StoreRefs[SA->getUnderlyingObj()].emplace_back(I);
    }
  }
}

const SCEVAddRecExpr *LoopAnalyzer::getAsAddRec(PredicatedScalarEvolution &PSE,
                                                Value *V, SeqAccess *SA) {
  DEBUG(dbgs() << "Getting AddRec for " << *V << "...\n");
  const SCEV *E = SA ? SA->getStart() : PSE.getSCEV(V);
  if (!E)
    return nullptr;
  DEBUG(dbgs() << *V << " SCEV:  " << *E << '\n');

  if (auto *S = dyn_cast<SCEVAddRecExpr>(E)) {
    DEBUG(dbgs() << *V << " AddRec:  " << *E << '\n');
    return S;
  }

  // Do the rewrite in a shadow PSE without polluting the main PSE
  SmallPtrSet<const SCEVPredicate *, 8> Preds;
  auto *Add = SE->convertSCEVToAddRecWithPredicates(E, &ALI.L, Preds);

  if (Add == nullptr)
    return nullptr;

  if (!llvm::all_of(Preds,
                    [](const SCEVPredicate *P) { return checkPredicate(*P); }))
    return nullptr;

  // Incorporate the new predicate.
  for (auto *P : Preds)
    PSE.addPredicate(*P);

  DEBUG(dbgs() << *V << " AddRec:  " << *Add << '\n');
  return Add;
}

// Transform {(a, +, b)} /u x ---> a /u x + ({(a - a /u x), + , b} /u x)
static const SCEV *factorOut(ScalarEvolution *SE, Loop *L,
                             const SCEVUDivExpr *UDiv) {
  auto LHS = dyn_cast<SCEVAddRecExpr>(UDiv->getLHS());
  if (!LHS || !LHS->isAffine() || !LHS->hasNoUnsignedWrap())
    return UDiv;

  auto RHS = UDiv->getRHS();
  if (!SE->isLoopInvariant(RHS, L))
    return UDiv;

  auto AddRecStart = LHS->getStart();
  auto FactorOut = SE->getUDivExpr(AddRecStart, RHS);
  auto NewStart = SE->getMinusSCEV(AddRecStart, SE->getMulExpr(FactorOut, RHS));
  auto NewAddrec = SE->getAddRecExpr(NewStart, LHS->getStepRecurrence(*SE),
                                     LHS->getLoop(), LHS->getNoWrapFlags());
  return SE->getAddExpr(FactorOut, SE->getUDivExpr(NewAddrec, RHS));
}

// Simplify the computed distance between accesses pointers in iterations of a
// loop.
// Handle the canonicalized SCEV form for the computed distance: -A + B
static const SCEV *simplifyDist(ScalarEvolution *SE, ScalarEvolutionCanon *SEC,
                                Loop *L, const SCEV *Dist,
                                const SCEV *ElementSize) {
  auto AddDist = dyn_cast<SCEVAddExpr>(Dist);
  if (!AddDist || AddDist->getNumOperands() > 2)
    return Dist;

  auto MinusOp =
      dyn_cast<SCEVMulExpr>(SEC->getCanonSCEV(AddDist->getOperand(0)));
  if (!MinusOp || MinusOp->getNumOperands() > 2)
    return Dist;

  auto C = dyn_cast<SCEVConstant>(MinusOp->getOperand(0));
  if (!C || !SE->isKnownNegative(C))
    return Dist;

  auto S0 = SEC->getCanonSCEV(SE->getNegativeSCEV(MinusOp));
  auto S1 = SEC->getCanonSCEV(AddDist->getOperand(1));
  auto US0 = dyn_cast<SCEVUDivExpr>(
      SEC->getCanonSCEV(SE->getUDivExpr(S0, ElementSize)));
  auto US1 = dyn_cast<SCEVUDivExpr>(
      SEC->getCanonSCEV(SE->getUDivExpr(S1, ElementSize)));
  if (!US0 || !US1)
    return Dist;

  // Try to factor out the base from udiv to bring chances on eliminating the
  // common (affine /u x) part in the expression.
  auto FS0 = factorOut(SE, L, US0);
  auto FS1 = factorOut(SE, L, US1);
  return SEC->getCanonSCEV(
      SE->getMulExpr(SE->getMinusSCEV(FS1, FS0), ElementSize));
}

const SCEV *LoopAnalyzer::getDistBetweenSteps(PredicatedScalarEvolution &PSE,
                                              Instruction *I, Value *ModTerm) {
  assert(ModTerm && "Expect modulo predicate!");

  // ModTerm: indvar % Divs == Rem
  ConstantInt *Divs, *Rem;
  std::tie(Divs, Rem) = LPA->getModTermInfo(I->getParent(), &ALI.L, ModTerm);
  if (!Divs || !Rem || Divs->isZero())
    return nullptr;

  const SCEV *E = SEC->getCanonSCEV(PSE.getSCEV(getPointerOperand(I)));
  if (!E || !getLegalBasePtr(SE, E))
    return nullptr;

  // Skips non constant stride on the loop.
  auto IndVarSCEV = not_null(getAffineAddRec(ALI.IndVar, ALI.L, *SE));
  auto IndVarStride = dyn_cast<SCEVConstant>(IndVarSCEV->getOperand(1));
  if (!IndVarStride)
    return nullptr;

  // Steps over to the next execute access address from any iteration K.
  auto StrideC = IndVarStride->getValue()->getZExtValue();
  auto Steps = LeastCommonMultiple64(Divs->getZExtValue(), StrideC) / StrideC;
  SCEVLoopStepRewriter DivsSteps(
      *SE, ALI.L, SE->getSCEV(ConstantInt::get(IndVarSCEV->getType(), Steps)));
  auto EAfterDivsSteps = DivsSteps.nextStep(E);
  if (isa<SCEVCouldNotCompute>(EAfterDivsSteps))
    return nullptr;

  if (ReflowConfig::GlobalConfig().AssumeNoAddrWrap) {
    E = stripCastsOnPointer(E, *SE);
    EAfterDivsSteps = stripCastsOnPointer(EAfterDivsSteps, *SE);
  }

  auto ElementSize = getValueSize(*DL, I);
  auto SCEVEleSize = SE->getConstant(E->getType(), ElementSize);
  auto Dist = SEC->getCanonSCEV(
      DivsSteps.computeStepDist(E, SEC->getCanonSCEV(EAfterDivsSteps)));
  // NOTE: Since the udiv is going to block further simplification from SCEV
  //       utilities on the computed distance. Try to simplify it further.
  return simplifyDist(SE, SEC, &ALI.L, Dist, SCEVEleSize);
}

const SCEV *LoopAnalyzer::getDistBetweenOneStep(PredicatedScalarEvolution &PSE,
                                                Instruction *I, SeqAccess *SA) {
  const SCEV *E = SEC->getCanonSCEV(PSE.getSCEV(getPointerOperand(I)));
  if (!E || !getLegalBasePtr(SE, E))
    return nullptr;

  auto *Ty = E->getType();
  auto *One = SE->getOne(Ty);
  SCEVAtLoopIterationRewriter Itr0(*SE, ALI.L, SE->getZero(Ty));
  SCEVAtLoopIterationRewriter Itr1(*SE, ALI.L, One);

  auto *E0 = Itr0.evaluateAtItr(E);
  if (isa<SCEVCouldNotCompute>(E0))
    return nullptr;
  auto *E1 = Itr1.evaluateAtItr(E);
  if (isa<SCEVCouldNotCompute>(E1))
    return nullptr;
  E0 = SEC->getCanonSCEV(Itr0.evaluateAtItr(E));
  E1 = SEC->getCanonSCEV(Itr1.evaluateAtItr(E));

  auto Len = SA ? SA->getLen() : One;
  auto EltSize = SE->getConstant(Ty, getValueSize(*DL, I));
  auto Delta = SE->getMulExpr(Len, EltSize, SCEV::FlagNUW);
  const SCEV *X = SEC->getCanonSCEV(SE->getAddExpr(E0, Delta));

  if (ReflowConfig::GlobalConfig().AssumeNoAddrWrap) {
    X = stripCastsOnPointer(X, *SE);
    E1 = stripCastsOnPointer(E1, *SE);
  }

  // TODO: Improve SCEVCanon to simplify minus.
  if (X == E1)
    return Delta;

  return SEC->getCanonSCEV(SE->getMinusSCEV(E1, E0));
}

const SCEV *LoopAnalyzer::getValidAccessPattern(PredicatedScalarEvolution &PSE,
                                                Instruction *I, SeqAccess *SA,
                                                Value *ModTerm,
                                                bool IsSingleBTC) {
  // TODO: Unify and simplify the code with SCEVStep to support all kinds of
  //       SCEV.
  if (ModTerm)
    return getDistBetweenSteps(PSE, I, ModTerm);

  if (IsSingleBTC)
    return getDistBetweenOneStep(PSE, I, SA);

  auto AddRec = getAsAddRec(PSE, getPointerOperand(I), SA);
  if (!AddRec)
    return nullptr;

  if (getLegalBasePtr(SE, AddRec))
    return AddRec;

  return nullptr;
}

bool LoopAnalyzer::hasLegalStride(PredicatedScalarEvolution &PSE,
                                  Instruction *I, SeqAccess *SA, Value *ModTerm,
                                  bool IsSingleBTC) {
  bool ExistingSA = SA ? true : false;
  auto ValidPattern = getValidAccessPattern(PSE, I, SA, ModTerm, IsSingleBTC);

  if (!ValidPattern) {
    DEBUG(dbgs() << *I << " ; PredicatedSCEV "
                 << *PSE.getSCEV(getPointerOperand(I))
                 << " unsupported scev form address or based pointer\n");
    if (EnableDiag)
      RD.emitCouldNotAnalyzePattern(I, &ALI.L);
    bool ExistingSA = SAI->isContainedInSeqAccess(I);
    SAI->addDiagFailureInfo(I, DiagFailureInfo::CouldNotAnalyzePattern,
                            ExistingSA, /*SI=*/nullptr, &ALI.L);
    return false;
  }

  auto ScStride =
      (ModTerm || IsSingleBTC)
          ? ValidPattern
          : extractStrideFromAddRec(cast<SCEVAddRecExpr>(ValidPattern), ALI.L);
  if (!ScStride || !hasLegalStride(ValidPattern, ScStride, I, SA)) {
    DEBUG(dbgs() << *I << " ; PredicatedSCEV "
                 << *PSE.getSCEV(getPointerOperand(I)) << " stride mismatch\n");
    if (EnableDiag)
      RD.emitIncompatibleStride(I, &ALI.L);
    SAI->addDiagFailureInfo(I, DiagFailureInfo::IncompatibleStride, ExistingSA,
                            /*SI=*/nullptr, &ALI.L);
    return false;
  }

  return true;
}

bool LoopAnalyzer::hasLegalStride(const SCEV *ValidPattern,
                                  const SCEV *ScStride, Instruction *I,
                                  SeqAccess *SA) {
  if (SA)
    return seqAccessHasLegalStride(SA, ScStride);

  return singleAccessHasLegalStride(I, ScStride, ValidPattern);
}

bool LoopAnalyzer::seqAccessHasLegalStride(SeqAccess *SA,
                                           const SCEV *ScStride) {
  auto I = SA->getSeqAccessChainBegin();
  auto BurstNum = SA->getLen();
  if (!SE->isLoopInvariant(BurstNum, &ALI.L))
    return false;

  auto EltSize = SE->getConstant(BurstNum->getType(), getValueSize(*DL, I));
  auto BurstSize = SE->getMulExpr(BurstNum, EltSize, SCEV::FlagNUW);
  assert(SE->isLoopInvariant(BurstSize, &ALI.L) &&
         "Burst size must be loop invariant!");
  DEBUG(dbgs() << "Stride: " << *ScStride << " vs Burst size: " << *BurstSize
               << '\n');
  return ScStride == BurstSize;
}

static uint32_t getMaxWidenSizeInBitsImpl(const DataLayout &DL, Value *V) {
  auto MaxV = XlxInterfaceIntrinsic::getMAXIMaxWidenBitWidth(V, DL);
  return (MaxV == -1) ? ReflowConfig::GlobalConfig().MaxWidenSizeInBits * 8
                      : static_cast<uint32_t>(MaxV);
}

// TODO: Refactor when the max_widen_bitwidth info prop to each function.
uint32_t getMaxWidenSizeInBits(const DataLayout &DL, Value *V) {
  SmallPtrSet<Value *, 2> Visited;
  SetVector<Value *> Set;
  reflow::GetRealUnderlyingObjects(V, DL, Visited, Set);
  assert(!Set.empty() && "Expect underlying object!");

  auto UO = Set.back();
  if (isa<GlobalVariable>(UO))
    return getMaxWidenSizeInBitsImpl(DL, UO);

  // For c/c++, there are wrappers, find the corresponding argument.
  auto TopArg = dyn_cast<Argument>(UO);
  if (!TopArg)
    TopArg = reflow::getTheTopArgument(UO);

  // If we allow sequential accesses on bram/ap_memory, return the default
  // threshold.
  if (!TopArg)
    return ReflowConfig::GlobalConfig().MaxWidenSizeInBits;

  uint32_t MaxWidenInBits = getMaxWidenSizeInBitsImpl(DL, TopArg);

  if (RunChecks) {
    // Check if there are unexpected non-identical max_widen_bitwidth set.
    do {
      auto U = Set.pop_back_val();
      auto TA = dyn_cast<Argument>(U);
      if (!TA)
        TA = reflow::getTheTopArgument(U);
      Value *UO = TA ? TA : U;
      assert(MaxWidenInBits == getMaxWidenSizeInBitsImpl(DL, UO) &&
             "Expect only the same max_widen_bitwidth!");
    } while (!Set.empty());
  }

  return MaxWidenInBits;
}

bool LoopAnalyzer::singleAccessHasLegalStride(Instruction *I,
                                              const SCEV *ScStride,
                                              const SCEV *ValidPattern) {

  auto *Stride = dyn_cast<SCEVConstant>(ScStride);
  if (!Stride)
    return false;

  auto ElementSize = getValueSize(*DL, I);
  auto StrideInt = Stride->getAPInt();
  if (StrideInt == ElementSize)
    return true;

  // Do not vectorize byte enable store.
  if (isa<MAXIStoreInst>(I))
    return false;

  // Do not vectorize for non power of 2 element type.
  Type *Ty = getAccessElementType(I);
  if (!isPowerOf2_32(DL->getTypeSizeInBits(Ty)))
    return false;

  if (!StrideInt.isPowerOf2())
    return false;

  // Do not vectorize non addrec accesses.
  if (!isa<SCEVAddRecExpr>(ValidPattern))
    return false;

  // Skip the unaligned pointers
  auto *Ptr = SE->getPointerBase(ValidPattern);
  if (!Ptr || SE->GetMinTrailingZeros(Ptr) < StrideInt.countTrailingZeros())
    return false;

  // Only power of two stride
  auto Factor = StrideInt.getZExtValue() / ElementSize;

  // Get widen threshold. Do not vectorize more than widen threshold.
  uint32_t Threshold = getMaxWidenSizeInBits(*DL, getPointerOperand(I));
  return (EnableVectorization &&
          LPA->isExecutedInAllIterations(I->getParent(), &ALI.L) &&
          ((Factor * ElementSize * 8) <= Threshold))
             ? llvm::isPowerOf2_64(Factor)
             : (Factor == 1);
}

void SeqAccessesInfo::mergeSets(SetVector<const BasicBlock *> &MinSets,
                                const BasicBlock *LMin,
                                const BasicBlock *RMin) {
  if (!DT->dominates(LMin, RMin)) {
    return;
  }

  MinSets.remove(RMin);
}

SetVector<const BasicBlock *>
SeqAccessesInfo::getSetsOfDomPostDomRegions(BBRegionMap &WorkMap, size_t s,
                                            size_t e) {
  assert((s <= e) && "Expect start lesser or equal to end!");

  if (s == e) {
    SetVector<const BasicBlock *> MinSets;
    MinSets.insert((WorkMap.begin() + s)->first);
    return MinSets;
  }

  auto m = (s + e + 1) / 2;
  auto LSets = getSetsOfDomPostDomRegions(WorkMap, s, m - 1);
  auto RSets = getSetsOfDomPostDomRegions(WorkMap, m, e);

  SetVector<const BasicBlock *> MinSets;
  for (auto LMin : LSets) {
    MinSets.insert(LMin);
    for (auto RMin : RSets) {
      MinSets.insert(RMin);
      if (TopoOrderedBBs[LMin] < TopoOrderedBBs[RMin]) {
        mergeSets(MinSets, LMin, RMin);
        continue;
      }

      mergeSets(MinSets, RMin, LMin);
    }
  }

  return MinSets;
}

void SeqAccessesInfo::extend(SeqAccessesRegionNode *RNode, Region &R) {
  if (getNumOfSubRegions(&R) < 1)
    return;

  BBRegionMap WorkMap;
  WorkMap.insert({R.getEntry(), &R});
  for (auto &E : R) {
    auto Entry = E->getEntry();
    auto Exit = E->getExit();
    if (WorkMap.count(Exit) == 0)
      WorkMap.insert({Exit, &R});

    if (WorkMap.count(Entry)) {
      WorkMap[Entry] = &*E;
      continue;
    }

    if (WorkMap.count(Entry) == 0)
      WorkMap.insert({Entry, &*E});
  }

  auto MinSets = getSetsOfDomPostDomRegions(WorkMap, 0, WorkMap.size() - 1);

  for (auto Min : MinSets) {
    std::deque<BasicBlock *> RWorkList;
    collectRegionBBs(WorkMap, RWorkList, *const_cast<BasicBlock *>(Min));

    if (RWorkList.size() <= 1)
      continue;

    // Infer the longest sequential access in a region.
    SeqAccessesZoneNode *ZoneNode =
        new SeqAccessesZoneNode(RWorkList.front(), RWorkList.back(), RNode);
    ReflowDiagnostic RD(DEBUG_TYPE, *ORE, *DL);
    RegionAnalyzer AAnalyzer(this, ZoneNode, Ctx, DL, LI, DT, AA, SE, SEC, RD,
                             OI, EnableDiag);
    AAnalyzer.infer(RWorkList);
    RNode->addSeqAccessesNode(ZoneNode);
  }
}

void SeqAccessesInfo::collectRegionBBs(BBRegionMap &WorkMap,
                                       std::deque<BasicBlock *> &RWorkList,
                                       BasicBlock &BB) {
  auto CurrBB = &BB;
  RWorkList.emplace_back(CurrBB);

  while (auto *ExitBB = WorkMap[CurrBB]->getExit()) {
    if (!DT->properlyDominates(CurrBB, ExitBB))
      break;

    if (WorkMap.count(ExitBB) == 0)
      break;

    CurrBB = ExitBB;
    RWorkList.emplace_back(CurrBB);
  }
}

void RegionAnalyzer::collectAccesses(std::deque<BasicBlock *> &WorkList) {
  for (auto BB : WorkList) {
    for (auto &I : *BB) {
      if (!I.mayReadOrWriteMemory())
        continue;

      if (auto *LI = dyn_cast<LoadInst>(&I)) {
        // Skip inferred sequential accesses.
        if (!isCandidateAccessInRegions(LI))
          continue;

        if (const auto ID = getCandidateAccessID(LI))
          SN->LoadRefs[ID].emplace_back(LI);
        continue;
      }

      if (isa<StoreInst>(I) || isa<MAXIStoreInst>(I)) {
        // Skip inferred sequential accesses.
        if (!isCandidateAccessInRegions(&I))
          continue;

        if (const auto ID = getCandidateAccessID(&I))
          SN->StoreRefs[ID].emplace_back(&I);
      }
    }
  }
}

bool AnalyzerBase::isCandidateAccessInRegions(Instruction *I) {
  if (auto NodeZone = SAI->getSeqAccessZone(I)) {
    auto RNode = NodeZone->second;
    auto SA = RNode->getSeqAccess(NodeZone, I);
    if (SA && !SA->isSeqAccessChainBegin(I))
      return false;
  }

  return true;
}

void RegionAnalyzer::infer(std::deque<BasicBlock *> &WorkList) {
  DEBUG({
    dbgs() << "------ Extending Region: ";
    SN->getParent()->getRegion()->dump();
  });
  collectAccesses(WorkList);
  inferRefs(SN->LoadRefs);
  inferRefs(SN->StoreRefs);
  DEBUG(dbgs() << "------\n");
}

void RegionAnalyzer::inferRefs(InstListMap &AccessRefs) {
  for (const auto &Chain : AccessRefs) {
    unsigned Size = Chain.second.size();
    if (Size < 2)
      continue;

    DEBUG(dbgs() << "SeqAccessInfo: Analyzing a chain of length " << Size
                 << ".\n");

    inferChain(Chain.second);
  }
}

void RegionAnalyzer::inferChain(ArrayRef<Instruction *> Accesses) {
  auto Size = Accesses.size();
  std::vector<int> ConsecutiveChain(Size, -1);
  exploreConsecutiveChain(Accesses, ConsecutiveChain);
  SmallPtrSet<Instruction *, 16> InstructionsProcessed;

  for (int i = 0, e = Size; i < e; ++i) {
    if (InstructionsProcessed.count(Accesses[i]))
      continue;

    SmallVector<Instruction *, 16> Operands;
    Operands.emplace_back(Accesses[i]);

    for (int j = i; j < e; ++j) {
      int ConIdx = ConsecutiveChain[j];

      if (ConIdx == j)
        break;

      Operands.emplace_back(Accesses[ConIdx]);
    }

    inferChain(Operands, &InstructionsProcessed);
  }
}

void RegionAnalyzer::exploreConsecutiveChain(
    ArrayRef<Instruction *> Accesses, std::vector<int> &ConsecutiveChain) {
  auto Size = Accesses.size();
  ConsecutiveChain[Size - 1] = Size - 1;

  for (unsigned i = 1; i < Size; ++i) {
    if (!areConsecutiveAccesses(Accesses[i - 1], Accesses[i],
                                SAI->getSeqAccessLen(Accesses[i - 1]))) {
      ConsecutiveChain[i - 1] = i - 1;
      continue;
    }

    ConsecutiveChain[i - 1] = i;
  }
}

void RegionAnalyzer::inferChain(
    ArrayRef<Instruction *> Chain,
    SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
  if (Chain.size() < 2) {
    InstructionsProcessed->insert(Chain.begin(), Chain.end());
    return;
  }

  unsigned StopChainIdx = getSeqAccessPrefixEndIdx(Chain);
  if (StopChainIdx == 0) {
    // There exists a side effect instruction, can't infer sequential accesses.
    InstructionsProcessed->insert(Chain.begin(), Chain.end());
    return;
  }

  if (StopChainIdx == 1) {
    // Failed after the first instruction. Discard it and try the smaller
    // chain.
    InstructionsProcessed->insert(Chain.front());
    return;
  }

  // Update Chain to the valid sequentail accesses subchain.
  Chain = Chain.slice(0, StopChainIdx);
  InstructionsProcessed->insert(Chain.begin(), Chain.end());

  inferChain(Chain, getPointerOperand(Chain[0]), getAccessElementType(Chain));
}

bool AnalyzerBase::hasNoSideEffect(Instruction *I) {
  // Should be fine to cross seq as long as we don't modify the whole seq
  // accesses
  if (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<MAXIStoreInst>(I))
    return SAI->isContainedInSeqAccess(I);

  if (isa<SeqLoadInst>(I) || isa<SeqStoreInst>(I) || isa<SeqEndInst>(I))
    return true;

  if (isa<ScopeEntry>(I) || isa<ScopeExit>(I))
    return !hasSideEffect(I);

  if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
    if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
        II->getIntrinsicID() == Intrinsic::lifetime_end ||
        II->getIntrinsicID() == Intrinsic::assume)
      return true;

  return false;
}

bool AnalyzerBase::isValidSeqAccessRegion(Instruction *AI) {
  return !exposeInDataflowRegion(*LI, *SE, *SN->getZoneStart(), AI) &&
         !exposeInDataflowRegion(*LI, *SE, *SN->getZoneEnd(), AI);
}

bool RegionAnalyzer::hasSideEffectInstruction(
    ArrayRef<Instruction *> Chain, MemoryLocation &CLoc,
    SmallVectorImpl<Instruction *> &MayAccessRefs,
    SetVector<Instruction *> &MemoryInstrs,
    SetVector<Instruction *> &ChainInstrs) {
  auto C0 = Chain[0];
  for (auto SI : MayAccessRefs) {
    if (isa<LoadInst>(SI) || isa<StoreInst>(SI) || isa<MAXIStoreInst>(SI)) {
      if (is_contained(Chain, SI)) {
        ChainInstrs.insert(SI);
        continue;
      }

      if (hasNoSideEffect(SI))
        continue;

      if (!OI->dominates(C0, SI))
        continue;

      MemoryInstrs.insert(SI);
      continue;
    }

    if (hasNoSideEffect(SI))
      continue;

    ModRefInfo MRInfo = AA->getModRefInfo(SI, CLoc);
    // Don't infer sequential accesses since it's not legal if any of below
    // happens.
    // - there's write between a Chain of loads.
    // - there's read or write between a Chain of stores.
    if (hasSideEffect(SI) || (isa<LoadInst>(C0) && isModSet(MRInfo)) ||
        ((isa<StoreInst>(C0) || isa<MAXIStoreInst>(C0)) &&
         isModOrRefSet(MRInfo))) {
      DEBUG(dbgs() << "SeqAccessInfo: Found side-effecting operation: " << *SI
                   << '\n');
      if (EnableDiag)
        RD.emitSideEffectInst(SI, C0);
      bool ExistingSA = SAI->isContainedInSeqAccess(C0);
      SAI->addDiagFailureInfo(C0, DiagFailureInfo::SideEffect, ExistingSA, SI);
      return true;
    }
  }

  return false;
}

bool RegionAnalyzer::hasSideEffectInstruction(
    ArrayRef<Instruction *> Chain, MemoryLocation &CLoc,
    SetVector<Instruction *> &MemoryInstrs,
    SetVector<Instruction *> &ChainInstrs, Region *R) {
  for (auto &E : *R)
    if (hasSideEffectInstruction(Chain, CLoc, MemoryInstrs, ChainInstrs, &*E))
      return true;

  auto RNode = SAI->getSeqAccessesRegionNode(R);
  if (hasSideEffectInstruction(Chain, CLoc, RNode->MayStoreRefs, MemoryInstrs,
                               ChainInstrs) ||
      hasSideEffectInstruction(Chain, CLoc, RNode->MayLoadRefs, MemoryInstrs,
                               ChainInstrs))
    return true;

  return false;
}

unsigned RegionAnalyzer::getSeqAccessPrefixEndIdx(ArrayRef<Instruction *> Chain,
                                                  Instruction *FromI,
                                                  Instruction *ToI) {
  MemoryLocation CLoc = reflow::GetUnderlyingLocation(Chain[0], *DL);
  SetVector<Instruction *> MemoryInstrs, ChainInstrs;
  auto R = SN->getParent()->getRegion();
  if (hasSideEffectInstruction(Chain, CLoc, MemoryInstrs, ChainInstrs, R))
    return 0;

  DEBUG({
    dbgs() << "Chain Inst:\n";
    for (auto I : Chain)
      dbgs() << *I << '\n';

    dbgs() << "ChainInstrs Inst:\n";
    for (auto I : ChainInstrs)
      dbgs() << *I << '\n';
  });

  auto ChainSize = Chain.size();
  assert(ChainSize == ChainInstrs.size() &&
         "All instructions in the Chain must exist in MayStoreRefs or "
         "MayLoadRefs");

  for (unsigned i = 0, e = ChainSize; i < e; ++i) {
    auto CI = ChainInstrs[i];
    for (auto MI : MemoryInstrs) {
      if (isa<LoadInst>(MI) && isa<LoadInst>(CI))
        continue;

      // We can ignore the alias as long as the load comes before the store,
      // because that means we won't move the load pass the store for
      // sequential access (When generating the seq loads, the begin of the seq
      // load is inserted at the location of the first load in the chain).
      if ((isa<StoreInst>(MI) || isa<MAXIStoreInst>(MI)) && isa<LoadInst>(CI) &&
          OI->dominates(CI, MI))
        continue;

      // Same case, but in reverse.
      if (isa<LoadInst>(MI) && (isa<StoreInst>(CI) || isa<MAXIStoreInst>(CI)) &&
          OI->dominates(MI, CI))
        continue;

      if (!AA->isNoAlias(MemoryLocation::get(MI), MemoryLocation::get(CI))) {
        DEBUG({
          Value *Ptr0 = getPointerOperand(MI);
          Value *Ptr1 = getPointerOperand(CI);
          dbgs() << "SeqAccessInfo: Found alias.\n"
                    "        Aliasing instruction and pointer:\n"
                 << *MI << " aliases " << *Ptr0 << '\n'
                 << "        Aliased instruction and pointer:\n"
                 << *CI << " aliases " << *Ptr1 << '\n';
        });
        if (EnableDiag)
          RD.emitAccessClobbered(CI, MI);
        bool ExistingSA = SAI->isContainedInSeqAccess(CI);
        SAI->addDiagFailureInfo(CI, DiagFailureInfo::AccessClobbered,
                                ExistingSA, MI);
        return i;
      }
    }
  }

  return ChainSize;
}

ChainID AnalyzerBase::getCandidateAccessID(Value *AccessInst, LoopInfo *LI,
                                           bool EnableUnalignBurst) {
  assert((isa<LoadInst>(AccessInst) || isa<StoreInst>(AccessInst) ||
          isa<SeqBeginInst>(AccessInst) || isa<MAXIStoreInst>(AccessInst)) &&
         "Expect load/store!");
  auto AI = cast<Instruction>(AccessInst);

  if (!isSimpleAccess(AI)) {
    DEBUG(dbgs() << *AI << " ; non simple access.\n");
    if (EnableDiag)
      RD.emitNonSimpleMemoryAccess(AI);
    SAI->addDiagFailureInfo(AI, DiagFailureInfo::NonSimpleMemoryAccess);
    return nullptr;
  }

  auto Ty = getValueType(AI);
  if (Ty->isPointerTy() ||
      !VectorType::isValidElementType(Ty->getScalarType())) {
    DEBUG(dbgs() << *AI << " ; unsupported access type.\n");
    if (EnableDiag)
      RD.emitUnsupportedAccessType(AI, Ty->getScalarType());
    SAI->addDiagFailureInfo(AI, DiagFailureInfo::UnsupportedAccessType);
    return nullptr;
  }

  if (!EnableUnalignBurst && !isAlignedAccess(*DL, AI)) {
    DEBUG(dbgs() << *AI << " ; invalid alignment.\n");
    if (EnableDiag)
      RD.emitInsufficientAlignment(AI, getAlignment(*DL, AI),
                                   getRequiredAlignment(*DL, AI));
    bool ExistingSA = SAI->isContainedInSeqAccess(AI);
    SAI->addDiagFailureInfo(AI, DiagFailureInfo::InsufficientAlignment,
                            ExistingSA);
    return nullptr;
  }

  Value *Ptr = getPointerOperand(AI);
  if (!supportSequentialAccessInterface(Ptr, LI)) {
    if (EnableDiag)
      RD.emitUnknownUnderlyingObject(AI, Ptr);
    return nullptr;
  }

  return getChainID(*DL, Ptr);
}

bool AnalyzerBase::supportSequentialAccessInterface(Value *Ptr, LoopInfo *LI) {
  auto UO = getUniqueUnderlyingObject(Ptr, *DL, LI);
  if (!UO)
    return false;

  if (XlxInterfaceIntrinsic::IsMAXI(UO, *DL))
    return true;

  if (ReflowConfig::GlobalConfig().BurstOnlyOnMAXI)
    return false;

  auto AS = UO->getType()->getPointerAddressSpace();
  if (AS == 0)
    return true;

  if (SAI->isForOpenCL())
    return spir::IsUniformAddressSpace(AS);

  return true;
}

void BBsAnalyzer::inferRefs(InstListMap &AccessRefs) {
  for (const auto &Chain : AccessRefs) {
    unsigned Size = Chain.second.size();
    if (Size < 2)
      continue;

    DEBUG(dbgs() << "SeqAccessInfo: Analyzing a chain of length " << Size
                 << ".\n");

    // NOTE: Maybe utilize the max_read_burst_length, max_write_burst_length,
    //       and max_widen_bitwidth for threshold if the reordering is too
    //       expensive in practice
    auto InferChunkSize =
        ChunkDataSize / getValueSize(*DL, Chain.second.front());
    for (unsigned CI = 0, CE = Size; CI < CE; CI += InferChunkSize) {
      unsigned Len = std::min<unsigned>(CE - CI, InferChunkSize);
      ArrayRef<Instruction *> Chunk(&Chain.second[CI], Len);
      inferChain(Chunk);
    }
  }
}

bool AnalyzerBase::isLegalToComputeConsecutiveness(Value *PtrA,
                                                   Value *PtrB) const {
  unsigned ASA = PtrA->getType()->getPointerAddressSpace();
  unsigned ASB = PtrB->getType()->getPointerAddressSpace();

  // Check that the address spaces match and that the pointers are valid.
  if (!PtrA || !PtrB || (ASA != ASB))
    return false;

  // Make sure that A and B are different pointers.
  if (PtrA == PtrB)
    return false;

  Type *PtrATy = PtrA->getType()->getPointerElementType();
  Type *PtrBTy = PtrB->getType()->getPointerElementType();

  // Do not bother with non-power of 2 size type.
  if (!isPowerOf2_64(DL->getTypeSizeInBits(PtrATy)) ||
      !isPowerOf2_64(DL->getTypeSizeInBits(PtrBTy)))
    return false;

  // Make sure that A and B are with same store size type. (For vector type,
  // make sure each element meets this requirement as well. )
  if (DL->getTypeStoreSize(PtrATy) != DL->getTypeStoreSize(PtrBTy) ||
      DL->getTypeStoreSize(PtrATy->getScalarType()) !=
          DL->getTypeStoreSize(PtrBTy->getScalarType()))
    return false;

  return true;
}

bool AnalyzerBase::areConsecutiveAccesses(Value *A, Value *B,
                                          const SCEV *ALen) const {
  Value *PtrA = getPointerOperand(A);
  Value *PtrB = getPointerOperand(B);
  if (!isLegalToComputeConsecutiveness(PtrA, PtrB))
    return false;

  unsigned PtrBitWidth =
      DL->getPointerSizeInBits(PtrA->getType()->getPointerAddressSpace());
  Type *PtrATy = PtrA->getType()->getPointerElementType();
  APInt Size(PtrBitWidth, DL->getTypeStoreSize(PtrATy));
  return areConsecutivePointers(PtrA, PtrB, Size, 0 /* Depth */, ALen);
}

bool AnalyzerBase::areConsecutivePointers(Value *PtrA, Value *PtrB,
                                          APInt PtrDelta, unsigned Depth,
                                          const SCEV *ALen) const {
  // Check consecutiveness with
  // (PtrB - PtrA) + (OffsetB - OffsetA) = PtrDelta
  unsigned PtrBitWidth =
      DL->getPointerSizeInBits(PtrA->getType()->getPointerAddressSpace());
  APInt OffsetA(PtrBitWidth, 0), OffsetB(PtrBitWidth, 0);
  PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetA);
  PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(*DL, OffsetB);

  if (auto ALenConstant = dyn_cast_or_null<SCEVConstant>(ALen))
    PtrDelta *= ALenConstant->getAPInt();

  APInt OffsetDelta = OffsetB - OffsetA;

  // Check if they are based on the same pointer. That makes the offsets
  // sufficient.
  if (PtrA == PtrB)
    return OffsetDelta == PtrDelta;

  // Compute the necessary base pointer delta to have the necessary final delta
  // equal to the size.
  APInt BaseDelta = PtrDelta - OffsetDelta;

  const SCEV *PtrSCEVA = SEC->getCanonSCEV(PtrA);
  const SCEV *PtrSCEVB = SEC->getCanonSCEV(PtrB);
  const SCEV *C = SE->getConstant(BaseDelta);
  const SCEV *X = SEC->getCanonSCEV(SE->getAddExpr(PtrSCEVA, C));

  if (ReflowConfig::GlobalConfig().AssumeNoAddrWrap) {
    PtrSCEVA = stripCastsOnPointer(PtrSCEVA, *SE);
    X = stripCastsOnPointer(X, *SE);
    PtrSCEVB = stripCastsOnPointer(PtrSCEVB, *SE);
  }

  // TODO: Improve SCEVCanon to simplify minus.
  if (X == PtrSCEVB)
    return true;

  // Get the minus scev, since if one of the pointers is factorized but the
  // other one is not, such as (C + (S * (A + B))) vs
  // (AS + BS), this will allow re-combining the expresions and getting the
  // simplified difference.
  const SCEV *Dist = SEC->getCanonSCEV(SE->getMinusSCEV(PtrSCEVB, PtrSCEVA));
  if (C == Dist)
    return true;

  // Sometimes even this doesn't work, because SCEV can't always see through
  // patterns that look like (gep (ext (add (shl X, C1), C2))). Try checking
  // things the hard way.
  return lookThroughComplexAddresses(PtrA, PtrB, BaseDelta, Depth);
}

bool AnalyzerBase::lookThroughComplexAddresses(Value *PtrA, Value *PtrB,
                                               APInt PtrDelta,
                                               unsigned Depth) const {
  // Handle pointer selection.
  if (isa<SelectInst>(PtrA) && isa<SelectInst>(PtrB))
    return lookThroughSelects(PtrA, PtrB, PtrDelta, Depth);

  // Handle GEPs that are the same except for the last index.
  auto *GEPA = dyn_cast<GEPOperator>(PtrA);
  auto *GEPB = dyn_cast<GEPOperator>(PtrB);

  if (!GEPA || !GEPB)
    return false;

  if (GEPA->getNumOperands() != GEPB->getNumOperands() ||
      GEPA->getPointerOperand() != GEPB->getPointerOperand())
    return false;

  gep_type_iterator GTIA = gep_type_begin(GEPA);
  gep_type_iterator GTIB = gep_type_begin(GEPB);
  for (unsigned I = 0, E = GEPA->getNumIndices() - 1; I < E; ++I) {
    if (GTIA.getOperand() != GTIB.getOperand())
      return false;
    ++GTIA;
    ++GTIB;
  }

  Instruction *OpA = dyn_cast<Instruction>(GTIA.getOperand());
  Instruction *OpB = dyn_cast<Instruction>(GTIB.getOperand());
  if (!OpA || !OpB || OpA->getOpcode() != OpB->getOpcode() ||
      OpA->getType() != OpB->getType())
    return false;

  if (PtrDelta.isMinSignedValue())
    return false;
  if (PtrDelta.isNegative()) {
    PtrDelta.negate();
    std::swap(OpA, OpB);
  }

  // Only look through a ZExt/SExt.
  bool Signed = isa<SExtInst>(OpA);
  if (!Signed && !isa<ZExtInst>(OpA))
    return false;

  // At this point A could be a function parameter, i.e. not an instruction
  Value *ValA = OpA->getOperand(0);
  OpB = dyn_cast<Instruction>(OpB->getOperand(0));
  if (!OpB || ValA->getType() != OpB->getType())
    return false;

  uint64_t Stride = DL->getTypeAllocSize(GTIA.getIndexedType());
  if (PtrDelta.urem(Stride))
    return false;
  unsigned IdxBitWidth = OpA->getType()->getScalarSizeInBits();
  APInt IdxDiff = PtrDelta.udiv(Stride).zextOrSelf(IdxBitWidth);

  // Now we need to prove that adding IdxDiff to ValA won't overflow.
  bool Safe = false;
  auto CheckFlags = [](Instruction *I, bool Signed) {
    BinaryOperator *BinOpI = cast<BinaryOperator>(I);
    return (Signed && BinOpI->hasNoSignedWrap()) ||
           (!Signed && BinOpI->hasNoUnsignedWrap());
  };

  // First attempt: if OpB is an add with NSW/NUW, and OpB is IdxDiff added to
  // ValA, we're okay.
  if (OpB->getOpcode() == Instruction::Add &&
      isa<ConstantInt>(OpB->getOperand(1)) &&
      IdxDiff.sle(cast<ConstantInt>(OpB->getOperand(1))->getSExtValue()) &&
      CheckFlags(OpB, Signed))
    Safe = true;

  // Second attempt: If both OpA and OpB is an add with NSW/NUW and with
  // the same LHS operand, we can guarantee that the transformation is safe
  // if we can prove that OpA won't overflow when IdxDiff added to the RHS
  // of OpA.
  // For example:
  //  %tmp7 = add nsw i32 %tmp2, %v0
  //  %tmp8 = sext i32 %tmp7 to i64
  //  ...
  //  %tmp11 = add nsw i32 %v0, 1
  //  %tmp12 = add nsw i32 %tmp2, %tmp11
  //  %tmp13 = sext i32 %tmp12 to i64
  //
  //  Both %tmp7 and %tmp2 has the nsw flag and the first operand
  //  is %tmp2. It's guaranteed that adding 1 to %tmp7 won't overflow
  //  because %tmp11 adds 1 to %v0 and both %tmp11 and %tmp12 has the
  //  nsw flag.
  OpA = dyn_cast<Instruction>(ValA);
  if (!Safe && OpA && OpA->getOpcode() == Instruction::Add &&
      OpB->getOpcode() == Instruction::Add &&
      OpA->getOperand(0) == OpB->getOperand(0) && CheckFlags(OpA, Signed) &&
      CheckFlags(OpB, Signed)) {
    Value *RHSA = OpA->getOperand(1);
    Value *RHSB = OpB->getOperand(1);
    Instruction *OpRHSA = dyn_cast<Instruction>(RHSA);
    Instruction *OpRHSB = dyn_cast<Instruction>(RHSB);
    // Match `x +nsw/nuw y` and `x +nsw/nuw (y +nsw/nuw IdxDiff)`.
    if (OpRHSB && OpRHSB->getOpcode() == Instruction::Add &&
        CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSB->getOperand(1))) {
      int64_t CstVal = cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue();
      if (OpRHSB->getOperand(0) == RHSA && IdxDiff.getSExtValue() == CstVal)
        Safe = true;
    }
    // Match `x +nsw/nuw (y +nsw/nuw -Idx)` and `x +nsw/nuw (y +nsw/nuw x)`.
    if (OpRHSA && OpRHSA->getOpcode() == Instruction::Add &&
        CheckFlags(OpRHSA, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1))) {
      int64_t CstVal = cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue();
      if (OpRHSA->getOperand(0) == RHSB && IdxDiff.getSExtValue() == -CstVal)
        Safe = true;
    }
    // Match `x +nsw/nuw (y +nsw/nuw c)` and
    // `x +nsw/nuw (y +nsw/nuw (c + IdxDiff))`.
    if (OpRHSA && OpRHSB && OpRHSA->getOpcode() == Instruction::Add &&
        OpRHSB->getOpcode() == Instruction::Add && CheckFlags(OpRHSA, Signed) &&
        CheckFlags(OpRHSB, Signed) && isa<ConstantInt>(OpRHSA->getOperand(1)) &&
        isa<ConstantInt>(OpRHSB->getOperand(1))) {
      int64_t CstValA =
          cast<ConstantInt>(OpRHSA->getOperand(1))->getSExtValue();
      int64_t CstValB =
          cast<ConstantInt>(OpRHSB->getOperand(1))->getSExtValue();
      if (OpRHSA->getOperand(0) == OpRHSB->getOperand(0) &&
          IdxDiff.getSExtValue() == (CstValB - CstValA))
        Safe = true;
    }
  }

  // Third attempt:
  // If all set bits of IdxDiff or any higher order bit other than the sign bit
  // are known to be zero in ValA, we can add Diff to it while guaranteeing no
  // overflow of any sort.
  unsigned BitWidth = ValA->getType()->getScalarSizeInBits();
  if (!Safe) {
    if (!OpA)
      return false;
    KnownBits Bits(BitWidth);
    computeKnownBits(OpA, Bits, *DL, 0, nullptr, OpA, DT);
    APInt BitsAllowedToBeSet = Bits.Zero.zext(IdxDiff.getBitWidth());
    if (Signed)
      BitsAllowedToBeSet.clearBit(BitWidth - 1);
    if (BitsAllowedToBeSet.ult(IdxDiff))
      return false;
  }

  const SCEV *OffsetSCEVA = SEC->getCanonSCEV(ValA);
  const SCEV *OffsetSCEVB = SEC->getCanonSCEV(OpB);
  const SCEV *C = SE->getConstant(IdxDiff.trunc(BitWidth));
  const SCEV *X = SEC->getCanonSCEV(SE->getAddExpr(OffsetSCEVA, C));

  // TODO: Improve SCEVCanon to simplify minus.
  if (X == OffsetSCEVB)
    return true;

  const SCEV *Dist =
      SEC->getCanonSCEV(SE->getMinusSCEV(OffsetSCEVB, OffsetSCEVA));
  return C == Dist;
}

bool AnalyzerBase::lookThroughSelects(Value *PtrA, Value *PtrB, APInt PtrDelta,
                                      unsigned Depth) const {
  if (Depth++ == MaxPointerSelectionDepth)
    return false;

  auto *SelectA = dyn_cast<SelectInst>(PtrA);
  auto *SelectB = dyn_cast<SelectInst>(PtrB);

  if (!SelectA || !SelectB)
    return false;

  if (SelectA->getCondition() != SelectB->getCondition())
    return false;

  return areConsecutivePointers(SelectA->getTrueValue(),
                                SelectB->getTrueValue(), PtrDelta, Depth) &&
         areConsecutivePointers(SelectA->getFalseValue(),
                                SelectB->getFalseValue(), PtrDelta, Depth);
}

// NOTE: Quadratic search! \p Accesses size is limited by option
// `reflow-seq-access-infer-chunk-size`
//
// \p ConsecutiveChain: Store the consecutive access through the index of
//                      \p Accesses. -1 is stored if there's no consecutive
//                      access in \p Accesses.
// \p Heads           : The start of the pair of consecutive accesses.
// \p Tails           : The end of the pair of consecutive accesses.
void BBsAnalyzer::exploreConsecutiveChain(ArrayRef<Instruction *> Accesses,
                                          std::vector<int> &ConsecutiveChain,
                                          SetVector<int> &Heads,
                                          SetVector<int> &Tails) {
  for (int i = 0, e = Accesses.size(); i < e; ++i)
    for (int j = e - 1; j >= 0; --j) {
      if (i == j)
        continue;

      if (!areConsecutiveAccesses(Accesses[i], Accesses[j]))
        continue;

      // Handle multiple equivalent accesses.
      if (ConsecutiveChain[i] != -1) {
        // If it would require reorder, use previously found one.
        if (j < i)
          continue;

        // If it's further, use previously found one.
        int CurDistance = std::abs(ConsecutiveChain[i] - i);
        int NewDistance = std::abs(j - i);
        if (NewDistance > CurDistance)
          continue;
      }

      Heads.insert(i);
      Tails.insert(j);
      ConsecutiveChain[i] = j;
    }
}

void BBsAnalyzer::inferChain(ArrayRef<Instruction *> Accesses) {
  DEBUG(dbgs() << "SeqAccessInfo: Inferring sequential accesses on "
               << Accesses.size() << " accesses.\n");

  SetVector<int> Heads, Tails;
  std::vector<int> ConsecutiveChain(Accesses.size(), -1);
  exploreConsecutiveChain(Accesses, ConsecutiveChain, Heads, Tails);

  SmallPtrSet<Instruction *, 16> InstructionsProcessed;
  for (int Head : Heads) {
    if (InstructionsProcessed.count(Accesses[Head]))
      continue;

    bool LongerChainExists = false;
    for (unsigned TIt = 0; TIt < Tails.size(); TIt++)
      if (Head == Tails[TIt] &&
          !InstructionsProcessed.count(Accesses[Heads[TIt]])) {
        // Existing a consecutive access to Head means there's a longer chain.
        LongerChainExists = true;
        break;
      }

    // Explore a longer chain from other Head if detected a longer chain
    // existed.
    if (LongerChainExists)
      continue;

    // We found an access that starts a chain(Head). Now, follow the chain and
    // collect thoses accesses in order in Operands.
    SmallVector<Instruction *, 16> Operands;
    int I = Head;
    while (I != -1 && (Tails.count(I) || Heads.count(I))) {
      if (InstructionsProcessed.count(Accesses[I]))
        break;
      Operands.emplace_back(Accesses[I]);
      I = ConsecutiveChain[I];
    }

    inferChain(Operands, &InstructionsProcessed);
  }
}

void BBsAnalyzer::inferChain(
    ArrayRef<Instruction *> Chain,
    SmallPtrSet<Instruction *, 16> *InstructionsProcessed) {
  if (Chain.size() < 2) {
    InstructionsProcessed->insert(Chain.begin(), Chain.end());
    return;
  }

  BasicBlock::iterator From, To;
  std::tie(From, To) = getBoundaryInstrs(Chain);

  unsigned StopChainIdx = getSeqAccessPrefixEndIdx(Chain, &*From, &*To);
  if (StopChainIdx == 0) {
    // There exists a side effect instruction, can't infer sequential accesses.
    InstructionsProcessed->insert(Chain.begin(), Chain.end());
    return;
  }

  if (StopChainIdx == 1) {
    // Failed after the first instruction. Discard it and try the smaller
    // chain.
    InstructionsProcessed->insert(Chain.front());
    return;
  }

  // Update Chain to the valid sequentail accesses subchain.
  Chain = Chain.slice(0, StopChainIdx);
  InstructionsProcessed->insert(Chain.begin(), Chain.end());

  inferChain(Chain, getPointerOperand(Chain[0]), getAccessElementType(Chain),
             &*From, &*To);
}

void BBsAnalyzer::inferChain(ArrayRef<Instruction *> Chain, Value *StartV,
                             Type *T, Instruction *BP, Instruction *EP) {
  assert(llvm::all_of(Chain,
                      [this, T](Value *V) {
                        return DL->getTypeSizeInBits(T) ==
                               DL->getTypeSizeInBits(getValueType(V));
                      }) &&
         "Cannot burst accesses with different type size!");

  auto FAI = Chain[0];
  if (!isValidSeqAccessRegion(FAI)) {
    DEBUG(dbgs() << "Identified sequential access chain starts with " << *FAI
                 << " is exposed in the dataflow region. Give up inference.\n");

    if (EnableDiag)
      RD.emitMayExposeInDataFlowRegion(FAI);
    bool ExistingSA = SAI->isContainedInSeqAccess(FAI);
    SAI->addDiagFailureInfo(FAI, DiagFailureInfo::ExposeInDataflowRegion,
                            ExistingSA);
    return;
  }

  auto NodeZone = createZone(SN);
  for (auto AI : Chain) {
    DEBUG(dbgs() << "Access: " << *AI << '\n');
    SAI->addAccessZone(AI, NodeZone);
  }

  Type *SizeTy =
      DL->getIntPtrType(PointerType::get(T, getPointerAddressSpace(FAI)));
  auto Size = ConstantInt::get(SizeTy, Chain.size());
  auto ScevBurstSize = SE->getConstant(cast<ConstantInt>(Size));
  auto StartAddr = SE->getSCEV(StartV);
  SN->addSeqAccess(const_cast<Value *>(getChainID(*DL, StartV)), T, Chain,
                   isa<LoadInst>(FAI) ? SeqAccess::AccessDirection::LOAD
                                      : SeqAccess::AccessDirection::STORE,
                   StartAddr, ScevBurstSize, BP, EP);

  DEBUG(dbgs() << "SeqAccessInfo: Infer sequential access:\nStart: "
               << *StartAddr << "\nLength: " << *ScevBurstSize << '\n');
  SAI->updateStatistics();
}

void RegionAnalyzer::inferChain(ArrayRef<Instruction *> Chain, Value *StartV,
                                Type *T, Instruction *BP, Instruction *EP) {
  assert(llvm::all_of(Chain,
                      [this, T](Value *V) {
                        return DL->getTypeSizeInBits(T) ==
                               DL->getTypeSizeInBits(getValueType(V));
                      }) &&
         "Cannot burst accesses with different type size!");

  auto FAI = Chain[0];
  if (!isValidSeqAccessRegion(FAI)) {
    DEBUG(dbgs() << "Identified sequential access chain starts with " << *FAI
                 << " is exposed in the dataflow region. Give up inference.\n");

    if (EnableDiag)
      RD.emitMayExposeInDataFlowRegion(FAI);
    bool ExistingSA = SAI->isContainedInSeqAccess(FAI);
    SAI->addDiagFailureInfo(FAI, DiagFailureInfo::ExposeInDataflowRegion,
                            ExistingSA);
    return;
  }

  SmallVector<Instruction *, 8> Accesses;
  auto NodeZone = createZone(SN);
  for (auto AI : Chain) {
    DEBUG(dbgs() << " process Chain Access: " << *AI << '\n');

    auto OldNodeZone = SAI->getSeqAccessZone(AI);
    if (!OldNodeZone) {
      SAI->addAccessZone(AI, NodeZone);
      Accesses.emplace_back(AI);
      continue;
    }

    auto RNode = OldNodeZone->second;
    auto OldN = RNode->getZoneNode(*OldNodeZone);
    auto SA = OldN->getSeqAccess(AI);
    assert(OldN && SA &&
           "Expect consistent info between AccessZone map and SeqAccess node!");

    for (auto I : SA->getAccesses()) {
      SAI->updateAccessZone(I, NodeZone);
      Accesses.emplace_back(I);
    }

    OldN->remove(SA);
  }

  Type *SizeTy =
      DL->getIntPtrType(PointerType::get(T, getPointerAddressSpace(FAI)));
  auto Size = ConstantInt::get(SizeTy, Accesses.size());
  auto ScevBurstSize = SE->getConstant(cast<ConstantInt>(Size));
  auto StartAddr = SE->getSCEV(StartV);
  SN->addSeqAccess(const_cast<Value *>(getChainID(*DL, StartV)), T, Accesses,
                   isa<LoadInst>(FAI) ? SeqAccess::AccessDirection::LOAD
                                      : SeqAccess::AccessDirection::STORE,
                   StartAddr, ScevBurstSize, Accesses.front(), Accesses.back());

  DEBUG(dbgs() << "SeqAccessInfo: Infer sequential access:\nStart: "
               << *StartAddr << "\nLength: " << *ScevBurstSize << '\n');
  SAI->updateStatistics();
}

Type *AnalyzerBase::getAccessElementType(ArrayRef<Instruction *> Chain) {
  // If the vector has an int element, default to int for the whole accesses.
  Type *Ty = nullptr;

  for (const auto &V : Chain) {
    Ty = getValueType(V);
    if (Ty->isIntOrIntVectorTy())
      return Ty;

    if (Ty->isPtrOrPtrVectorTy())
      return Type::getIntNTy(*Ctx, DL->getTypeSizeInBits(Ty));
  }

  return Ty;
}

Type *AnalyzerBase::getAccessElementType(Instruction *CI) {
  auto Ty = getValueType(CI);
  if (Ty->isIntOrIntVectorTy())
    return Ty;

  if (Ty->isPtrOrPtrVectorTy())
    return Type::getIntNTy(*Ctx, DL->getTypeSizeInBits(Ty));

  return Ty;
}

std::pair<BasicBlock::iterator, BasicBlock::iterator>
BBsAnalyzer::getBoundaryInstrs(ArrayRef<Instruction *> Chain) {
  BasicBlock::iterator FirstInstr = Chain[0]->getIterator();
  BasicBlock::iterator LastInstr = Chain[0]->getIterator();
  unsigned NumFound = 0;
  auto *CurrBB = SN->RS;
  do {
    for (Instruction &I : *CurrBB) {
      if (!is_contained(Chain, &I))
        continue;

      ++NumFound;

      if (NumFound == 1)
        FirstInstr = I.getIterator();

      if (NumFound == Chain.size()) {
        LastInstr = I.getIterator();
        break;
      }
    }

    if (CurrBB == SN->RE)
      break;

    CurrBB = CurrBB->getUniqueSuccessor();
  } while (CurrBB);

  // Range is [first, last).
  return std::make_pair(FirstInstr, ++LastInstr);
}

MemoryLocation AnalyzerBase::getChainMemoryLocation(Instruction *StartI,
                                                    Type *VT,
                                                    size_t ChainSize) {
  AAMDNodes AATags;
  StartI->getAAMetadata(AATags);
  return MemoryLocation(getPointerOperand(StartI),
                        DL->getTypeStoreSize(VT) * ChainSize, AATags);
}

unsigned BBsAnalyzer::getSeqAccessPrefixEndIdx(ArrayRef<Instruction *> Chain,
                                               Instruction *FromI,
                                               Instruction *ToI) {
  BasicBlock::iterator From = FromI->getIterator();
  BasicBlock::iterator To = ToI->getIterator();
  MapVector<Instruction *, unsigned> MemoryInstrs;
  MapVector<Instruction *, unsigned> ChainInstrs;
  auto *C0 = Chain[0];
  size_t Size = Chain.size();
  MemoryLocation CLoc = getChainMemoryLocation(C0, getValueType(C0), Size);
  unsigned InstrIdx = 0;
  auto *CurrBB = SN->RS;
  do {
    for (auto I = From; (I != To) && (I != CurrBB->end()); ++I) {
      ++InstrIdx;
      auto CandInst = cast<Instruction>(&*I);
      if (hasNoSideEffect(CandInst))
        continue;

      if (isa<LoadInst>(CandInst) || isa<StoreInst>(CandInst) ||
          isa<MAXIStoreInst>(CandInst)) {
        if (!is_contained(Chain, CandInst)) {
          MemoryInstrs.insert({CandInst, InstrIdx});
          continue;
        }

        ChainInstrs.insert({CandInst, InstrIdx});
        continue;
      }

      ModRefInfo MRInfo = AA->getModRefInfo(CandInst, CLoc);
      // Don't infer sequential accesses since it's not legal if any of below
      // happens.
      // - there's write between a Chain of loads.
      // - there's read or write between a Chain of stores.
      if (hasSideEffect(CandInst) || (isa<LoadInst>(C0) && isModSet(MRInfo)) ||
          ((isa<StoreInst>(C0) || isa<MAXIStoreInst>(C0)) &&
           isModOrRefSet(MRInfo))) {
        DEBUG(dbgs() << "SeqAccessInfo: Found side-effecting operation: "
                     << CandInst << '\n');
        if (EnableDiag)
          RD.emitSideEffectInst(CandInst, C0);
        bool ExistingSA = SAI->isContainedInSeqAccess(C0);
        SAI->addDiagFailureInfo(C0, DiagFailureInfo::SideEffect, ExistingSA,
                                CandInst);
        return 0;
      }
    }

    if (CurrBB == SN->RE)
      break;

    CurrBB = CurrBB->getUniqueSuccessor();
    From = CurrBB->begin();
  } while (CurrBB);

  assert(Size == ChainInstrs.size() &&
         "All instructions in the Chain must exist in [From, To).");

  unsigned ChainIdx = 0;
  for (auto EntryChain : ChainInstrs) {
    Instruction *ChainInstrValue = EntryChain.first;
    unsigned ChainInstrIdx = EntryChain.second;

    for (auto EntryMem : MemoryInstrs) {
      Instruction *MemInstrValue = EntryMem.first;
      unsigned MemInstrIdx = EntryMem.second;

      if (isa<LoadInst>(MemInstrValue) && isa<LoadInst>(ChainInstrValue))
        continue;

      // We can ignore the alias as long as the load comes before the store,
      // because that means we won't move the load pass the store for
      // sequential access (When generating the seq loads, the begin of the seq
      // load is inserted at the location of the first load in the chain).
      if ((isa<StoreInst>(MemInstrValue) ||
           isa<MAXIStoreInst>(MemInstrValue)) &&
          isa<LoadInst>(ChainInstrValue) && ChainInstrIdx < MemInstrIdx)
        continue;

      // Same case, but in reverse.
      if (isa<LoadInst>(MemInstrValue) &&
          (isa<StoreInst>(ChainInstrValue) ||
           isa<MAXIStoreInst>(ChainInstrValue)) &&
          ChainInstrIdx > MemInstrIdx)
        continue;

      if (!AA->isNoAlias(MemoryLocation::get(MemInstrValue),
                         MemoryLocation::get(ChainInstrValue))) {
        DEBUG({
          Value *Ptr0 = getPointerOperand(MemInstrValue);
          Value *Ptr1 = getPointerOperand(ChainInstrValue);
          dbgs() << "SeqAccessInfo: Found alias.\n"
                    "        Aliasing instruction and pointer:\n"
                 << *MemInstrValue << " aliases " << *Ptr0 << '\n'
                 << "        Aliased instruction and pointer:\n"
                 << *ChainInstrValue << " aliases " << *Ptr1 << '\n';
        });
        if (EnableDiag)
          RD.emitAccessClobbered(ChainInstrValue, MemInstrValue);
        bool ExistingSA = SAI->isContainedInSeqAccess(ChainInstrValue);
        SAI->addDiagFailureInfo(ChainInstrValue,
                                DiagFailureInfo::AccessClobbered, ExistingSA,
                                MemInstrValue);
        return ChainIdx;
      }
    }
    ChainIdx++;
  }
  return Size;
}

void SeqAccessesInfo::print(raw_ostream &OS) const {
  if (empty())
    return;

  SeqAccessesInfoWriter Writer(this, *SE);
  F->print(OS, &Writer);
}

void SeqAccessesInfo::recalculate(Function &F_, DominatorTree *DT_,
                                  PostDominatorTree *PDT_, LoopInfo *LI_,
                                  OrderedInstructions *OI_, RegionInfo *RI_,
                                  AliasAnalysis *AA_, ScalarEvolution *SE_,
                                  ScalarEvolutionCanon *SEC_,
                                  LoopPredicateAnalysis *LPA_,
                                  OptimizationRemarkEmitter *ORE_,
                                  bool EnableDiag_) {
  F = &F_;
  Ctx = &F_.getParent()->getContext();
  DL = &F_.getParent()->getDataLayout();
  DT = DT_;
  PDT = PDT_;
  LI = LI_;
  OI = OI_;
  RI = RI_;
  AA = AA_;
  SE = SE_;
  SEC = SEC_;
  LPA = LPA_;
  ORE = ORE_;
  IsOpenCL =
      isa<spir::Kernel>(F_) || (F_.getCallingConv() == CallingConv::SPIR_FUNC);
  EnableDiag = EnableDiag_;

  calculate(F_);
}

SeqAccessesRegionNode *
SeqAccessesInfo::getSeqAccessesRegionNode(const Region *R) {
  return SeqAccessesRegionMap[R].get();
}

const Region *SeqAccessesInfo::getLoopRegion(const Loop *L) {
  return LoopRegion[L];
}

bool SeqAccessesInfo::isContainedInSeqAccess(const Instruction *I) {
  return AccessZone.count(I);
}

Zone *SeqAccessesInfo::getSeqAccessZone(const Instruction *I) {
  if (isContainedInSeqAccess(I))
    return &AccessZone[I].first;

  return nullptr;
}

SeqAccessesZoneNode *
SeqAccessesInfo::getSeqAccessZoneNode(const Instruction *I) {
  if (auto Z = getSeqAccessZone(I))
    return Z->second->getZoneNode(*Z);

  return nullptr;
}

const Loop *SeqAccessesInfo::getAccessInferredLoop(const Instruction *I) {
  if (isContainedInSeqAccess(I))
    return AccessZone[I].second;

  return nullptr;
}

const SCEV *SeqAccessesInfo::getSeqAccessLen(const Instruction *I) {
  auto NodeZone = getSeqAccessZone(I);
  if (!NodeZone)
    return nullptr;

  auto RNode = NodeZone->second;
  if (auto SA = RNode->getSeqAccess(NodeZone, I))
    return SA->getLen();

  return nullptr;
}

bool SeqAccessesInfo::isForOpenCL() { return IsOpenCL; }

void SeqAccessesInfo::releaseMemory() {
  for (auto DiagEntry : DiagFailureInfos)
    delete DiagEntry.second;

  DiagFailureInfos.clear();
  AccessZone.clear();
  CouldNotAnalyzedLoops.clear();
  SeqAccessesRegionMap.clear();
  TopoOrderedBBs.clear();
  VisitedBBs.clear();
  VisitedLoops.clear();
  LoopRegion.clear();
}

void SeqAccessesInfo::verifyAnalysis() const {}

//===----------------------------------------------------------------------===//
// SeqAccessesInfoPass implementation
//

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void SeqAccessesInfo::dump() const { print(dbgs()); }
#endif

bool SeqAccessesInfoPass::skipFunction(const Function &F) const {
  if (F.isDeclaration() || F.isVarArg())
    return true;

  return F.hasFnAttribute("fpga.wrapper.func");
}

bool SeqAccessesInfoPass::runOnFunction(Function &F) {
  if (skipFunction(F))
    return false;

  releaseMemory();

  auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
  auto PDT = &getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
  OrderedInstructions OrderedInstrs(DT);
  auto RI = &getAnalysis<RegionInfoPass>().getRegionInfo();
  auto LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
  ReflowAAResult RAAR(F.getParent()->getDataLayout(), LI);
  auto AA = createReflowAAResults(*this, RAAR);
  auto SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  auto SEC = &getAnalysis<ScalarEvolutionCanonWrapperPass>().getSEC();
  auto LPA = &getAnalysis<LoopPredicateAnalysisWrapperPass>()
                  .getPredicateAnalysisInfo();
  auto ORE = &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();

  SI.recalculate(F, DT, PDT, LI, &OrderedInstrs, RI, &AA, SE, SEC, LPA, ORE,
                 EnableSADiag);
  return false;
}

void SeqAccessesInfoPass::releaseMemory() { SI.releaseMemory(); }

void SeqAccessesInfoPass::verifyAnalysis() const { SI.verifyAnalysis(); }

void SeqAccessesInfoPass::getAnalysisUsage(AnalysisUsage &AU) const {
  getReflowAAResultsAnalysisUsage(AU);
  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  AU.addRequiredTransitive<PostDominatorTreeWrapperPass>();
  AU.addRequired<RegionInfoPass>();
  AU.addRequired<LoopInfoWrapperPass>();
  AU.addRequired<ScalarEvolutionWrapperPass>();
  AU.addRequired<ScalarEvolutionCanonWrapperPass>();
  AU.addRequired<LoopPredicateAnalysisWrapperPass>();
  AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
  AU.addPreserved<ScalarEvolutionWrapperPass>();
  AU.addPreserved<DominatorTreeWrapperPass>();
  AU.setPreservesCFG();
}

void SeqAccessesInfoPass::print(raw_ostream &OS, const Module *) const {
  SI.print(OS);
}

#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
void SeqAccessesInfoPass::dump() const { SI.dump(); }
#endif

void SeqAccessesInfoWriter::printLoopInfo(const Loop *L,
                                          formatted_raw_ostream &OS) const {
  OS << "loop<";
  L->getHeader()->printAsOperand(OS, false);
  OS << "> at depth " << L->getLoopDepth();

  if (!EnableVerboseSeqAccessInfo) {
    OS << '\n';
    return;
  }

  if (SE.hasLoopInvariantBackedgeTakenCount(L)) {
    const SCEV *BTC = SE.getBackedgeTakenCount(L);
    if (!isa<SCEVCouldNotCompute>(BTC)) {
      OS << " with exact backedge-taken count of ";
      BTC->print(OS);
    }
    OS << '\n';
  }
}

void SeqAccessesInfoWriter::printSeqAccessRegionInfo(
    const SeqAccess *SA, formatted_raw_ostream &OS) const {
  OS << "; Sequential access region: ";
  auto CSI = const_cast<SeqAccessesInfo *>(SI);
  if (auto L = CSI->getAccessInferredLoop(SA->getSeqAccessChainBegin())) {
    printLoopInfo(L, OS);
    return;
  }

  auto BeginBB = SA->getBeginPosition()->getParent();
  OS << BeginBB->getName();

  auto EndBB = SA->getEndPosition()->getParent();
  if (BeginBB == EndBB) {
    OS << '\n';
    return;
  }

  OS << " -> " << EndBB->getName() << '\n';
}

char SeqAccessesInfoPass::ID = 0;

INITIALIZE_PASS_BEGIN(SeqAccessesInfoPass, DEBUG_TYPE, PASS_DESCRIPTION, true,
                      true)
INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
INITIALIZE_PASS_DEPENDENCY(RegionInfoPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionCanonWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopPredicateAnalysisWrapperPass)
INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
INITIALIZE_PASS_END(SeqAccessesInfoPass, DEBUG_TYPE, PASS_DESCRIPTION, true,
                    true)

Pass *llvm::createSeqAccessesInfoPass() { return new SeqAccessesInfoPass(); }
