#include "reflow/SCEV/ScalarEvolutionCanon.h"
#include "reflow/Options.h"
#include "reflow/SCEV/SCEVZ3.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/Support/Debug.h"
#include <cassert>

#define DEBUG_TYPE "scalar-evolution-canon"

using namespace llvm;

static cl::opt<bool> PrintZ3(
    "print-scev-z3-comparison",
    cl::desc("Print z3 format for comparing SCEV and canonicalized SCEV"),
    cl::Hidden, cl::cat(ReflowCategory));

static cl::opt<unsigned> MaxValueCompareDepth(
    "reflow-scalar-evolution-max-value-compare-depth",
    cl::desc("Maximum depth of recursive value complexity comparisons"),
    cl::init(2), cl::Hidden, cl::cat(ReflowCategory));

static cl::opt<unsigned> MaxSCEVCompareDepth(
    "reflow-scev-addops-inline-threshold",
    cl::desc("Maximum depth of recursive SCEV complexity comparisons"),
    cl::init(32), cl::Hidden, cl::cat(ReflowCategory));

void ScalarEvolutionCanonWrapperPass::getAnalysisUsage(
    AnalysisUsage &AU) const {
  AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
  AU.addRequiredTransitive<LoopInfoWrapperPass>();
  AU.addRequiredTransitive<DominatorTreeWrapperPass>();
  AU.setPreservesAll();
}

bool ScalarEvolutionCanonWrapperPass::runOnFunction(Function &F) {
  ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
  SCEVZ3Rewriter Z3WRITER(F, SE);
  SEC.reset(new ScalarEvolutionCanon(
      F, SE, getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
      getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), Z3WRITER));

  // Verify with z3 to see if the canonicalized SCEV and the original SCEV are
  // euivalent
  if (PrintZ3)
    SEC->runZ3Check();

  // For now, we did not change anything to the IR
  return false;
}

/// TODO: Find out where we need to manually release the memory for the unique
/// pointer
void ScalarEvolutionCanonWrapperPass::releaseMemory() { SEC.reset(); }

void ScalarEvolutionCanonWrapperPass::print(raw_ostream &OS,
                                            const Module *) const {
  SEC->print(OS);
}

void ScalarEvolutionCanon::runZ3Check() {
  for (auto &I : instructions(F))
    if (SE.isSCEVable(I.getType())) {
      // Get the source SCEV
      const SCEV *ISCEV = SE.getSCEV(&I);
      // Canonicalize the source SCEV and get a better SCEV
      const SCEV *betterSCEV = canonicalize(ISCEV);
      checkEquivalence(betterSCEV, ISCEV);
    }
}

//===----------------------------------------------------------------------===//
//                               SCEV Utilities
//===----------------------------------------------------------------------===//

/// Compare the two values \p LV and \p RV in terms of their "complexity" where
/// "complexity" is a partial (and somewhat ad-hoc) relation used to order
/// operands in SCEV expressions.  \p EqCache is a set of pairs of values that
/// have been previously deemed to be "equally complex" by this routine.  It is
/// intended to avoid exponential time complexity in cases like:
///
///   %a = f(%x, %y)
///   %b = f(%a, %a)
///   %c = f(%b, %b)
///
///   %d = f(%x, %y)
///   %e = f(%d, %d)
///   %f = f(%e, %e)
///
///   CompareValueComplexity(%f, %c)
///
/// Since we do not continue running this routine on expression trees once we
/// have seen unequal values, there is no need to track them in the cache.
static int
CompareValueComplexity(EquivalenceClasses<const Value *> &EqCacheValue,
                       const LoopInfo *const LI, Value *LV, Value *RV,
                       unsigned Depth) {
  if (Depth > MaxValueCompareDepth || EqCacheValue.isEquivalent(LV, RV))
    return 0;

  // Order pointer values after integer values. This helps SCEVExpander form
  // GEPs.
  bool LIsPointer = LV->getType()->isPointerTy(),
       RIsPointer = RV->getType()->isPointerTy();
  if (LIsPointer != RIsPointer)
    return (int)LIsPointer - (int)RIsPointer;

  // Compare getValueID values.
  unsigned LID = LV->getValueID(), RID = RV->getValueID();
  if (LID != RID)
    return (int)LID - (int)RID;

  // Sort arguments by their position.
  if (const auto *LA = dyn_cast<Argument>(LV)) {
    const auto *RA = cast<Argument>(RV);
    unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
    return (int)LArgNo - (int)RArgNo;
  }

  if (const auto *LGV = dyn_cast<GlobalValue>(LV)) {
    const auto *RGV = cast<GlobalValue>(RV);

    const auto IsGVNameSemantic = [&](const GlobalValue *GV) {
      auto LT = GV->getLinkage();
      return !(GlobalValue::isPrivateLinkage(LT) ||
               GlobalValue::isInternalLinkage(LT));
    };

    // Use the names to distinguish the two values, but only if the
    // names are semantically important.
    if (IsGVNameSemantic(LGV) && IsGVNameSemantic(RGV))
      return LGV->getName().compare(RGV->getName());
  }

  // For instructions, compare their loop depth, and their operand count.  This
  // is pretty loose.
  if (const auto *LInst = dyn_cast<Instruction>(LV)) {
    const auto *RInst = cast<Instruction>(RV);

    // Compare loop depths.
    const BasicBlock *LParent = LInst->getParent(),
                     *RParent = RInst->getParent();
    if (LParent != RParent) {
      unsigned LDepth = LI->getLoopDepth(LParent),
               RDepth = LI->getLoopDepth(RParent);
      if (LDepth != RDepth)
        return (int)LDepth - (int)RDepth;
    }

    // Compare the number of operands.
    unsigned LNumOps = LInst->getNumOperands(),
             RNumOps = RInst->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    for (unsigned Idx : seq(0u, LNumOps)) {
      int Result =
          CompareValueComplexity(EqCacheValue, LI, LInst->getOperand(Idx),
                                 RInst->getOperand(Idx), Depth + 1);
      if (Result != 0)
        return Result;
    }
  }

  EqCacheValue.unionSets(LV, RV);
  return 0;
}

// Return negative, zero, or positive, if LHS is less than, equal to, or greater
// than RHS, respectively. A three-way result allows recursive comparisons to be
// more efficient.
static int
CompareSCEVComplexity(EquivalenceClasses<const SCEV *> &EqCacheSCEV,
                      EquivalenceClasses<const Value *> &EqCacheValue,
                      const LoopInfo *const LI, const SCEV *LHS,
                      const SCEV *RHS, DominatorTree &DT, unsigned Depth = 0) {
  // Fast-path: SCEVs are uniqued so we can do a quick equality check.
  if (LHS == RHS)
    return 0;

  // Primarily, sort the SCEVs by their getSCEVType().
  unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
  if (LType != RType)
    return (int)LType - (int)RType;

  if (Depth > MaxSCEVCompareDepth || EqCacheSCEV.isEquivalent(LHS, RHS))
    return 0;
  // Aside from the getSCEVType() ordering, the particular ordering
  // isn't very important except that it's beneficial to be consistent,
  // so that (a + b) and (b + a) don't end up as different expressions.
  switch (static_cast<SCEVTypes>(LType)) {
  case scUnknown: {
    const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
    const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);

    int X = CompareValueComplexity(EqCacheValue, LI, LU->getValue(),
                                   RU->getValue(), Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scConstant: {
    const SCEVConstant *LC = cast<SCEVConstant>(LHS);
    const SCEVConstant *RC = cast<SCEVConstant>(RHS);

    // Compare constant values.
    const APInt &LA = LC->getAPInt();
    const APInt &RA = RC->getAPInt();
    unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
    if (LBitWidth != RBitWidth)
      return (int)LBitWidth - (int)RBitWidth;
    return LA.ult(RA) ? -1 : 1;
  }

  case scAddRecExpr: {
    const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
    const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);

    // There is always a dominance between two recs that are used by one SCEV,
    // so we can safely sort recs by loop header dominance. We require such
    // order in getAddExpr.
    const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
    if (LLoop != RLoop) {
      const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader();
      assert(LHead != RHead && "Two loops share the same header?");
      if (DT.dominates(LHead, RHead))
        return 1;
      else
        assert(DT.dominates(RHead, LHead) &&
               "No dominance between recurrences used by one SCEV?");
      return -1;
    }

    // Addrec complexity grows with operand count.
    unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    // Compare NoWrap flags.
    if (LA->getNoWrapFlags() != RA->getNoWrapFlags())
      return (int)LA->getNoWrapFlags() - (int)RA->getNoWrapFlags();

    // Lexicographically compare.
    for (unsigned i = 0; i != LNumOps; ++i) {
      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                    LA->getOperand(i), RA->getOperand(i), DT,
                                    Depth + 1);
      if (X != 0)
        return X;
    }
    EqCacheSCEV.unionSets(LHS, RHS);
    return 0;
  }

  case scAddExpr:
  case scMulExpr:
  case scSMaxExpr:
  case scUMaxExpr: {
    const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
    const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);

    // Lexicographically compare n-ary expressions.
    unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
    if (LNumOps != RNumOps)
      return (int)LNumOps - (int)RNumOps;

    // Compare NoWrap flags.
    if (LC->getNoWrapFlags() != RC->getNoWrapFlags())
      return (int)LC->getNoWrapFlags() - (int)RC->getNoWrapFlags();

    for (unsigned i = 0; i != LNumOps; ++i) {
      int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                    LC->getOperand(i), RC->getOperand(i), DT,
                                    Depth + 1);
      if (X != 0)
        return X;
    }
    EqCacheSCEV.unionSets(LHS, RHS);
    return 0;
  }

  case scUDivExpr: {
    const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
    const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);

    // Lexicographically compare udiv expressions.
    int X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getLHS(),
                                  RC->getLHS(), DT, Depth + 1);
    if (X != 0)
      return X;
    X = CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getRHS(),
                              RC->getRHS(), DT, Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scTruncate:
  case scZeroExtend:
  case scSignExtend: {
    const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
    const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);

    // Compare cast expressions by operand.
    int X =
        CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, LC->getOperand(),
                              RC->getOperand(), DT, Depth + 1);
    if (X == 0)
      EqCacheSCEV.unionSets(LHS, RHS);
    return X;
  }

  case scCouldNotCompute:
    llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
  }
  llvm_unreachable("Unknown SCEV kind!");
}

/// Given a list of SCEV objects, order them by their complexity, and group
/// objects of the same complexity together by value.  When this routine is
/// finished, we know that any duplicates in the vector are consecutive and that
/// complexity is monotonically increasing.
///
/// Note that we go take special precautions to ensure that we get deterministic
/// results from this routine.  In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops, LoopInfo *LI,
                              DominatorTree &DT) {
  if (Ops.size() < 2)
    return; // Noop

  EquivalenceClasses<const SCEV *> EqCacheSCEV;
  EquivalenceClasses<const Value *> EqCacheValue;
  if (Ops.size() == 2) {
    // This is the common case, which also happens to be trivially simple.
    // Special case it.
    const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
    if (CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI, RHS, LHS, DT) < 0)
      std::swap(LHS, RHS);
    return;
  }

  // Do the rough sort by complexity.
  std::stable_sort(Ops.begin(), Ops.end(),
                   [&](const SCEV *LHS, const SCEV *RHS) {
                     return CompareSCEVComplexity(EqCacheSCEV, EqCacheValue, LI,
                                                  LHS, RHS, DT) < 0;
                   });

  // Now that we are sorted by complexity, group elements of the same
  // complexity.  Note that this is, at worst, N^2, but the vector is likely to
  // be extremely short in practice.  Note that we take this approach because we
  // do not want to depend on the addresses of the objects we are grouping.
  for (unsigned i = 0, e = Ops.size(); i != e - 2; ++i) {
    const SCEV *S = Ops[i];
    unsigned Complexity = S->getSCEVType();

    // If there are any objects of the same complexity and same value as this
    // one, group them.
    for (unsigned j = i + 1; j != e && Ops[j]->getSCEVType() == Complexity;
         ++j) {
      if (Ops[j] == S) { // Found a duplicate.
        // Move it to immediately after i'th element.
        std::swap(Ops[i + 1], Ops[j]);
        ++i; // no need to rescan it.
        if (i == e - 2)
          return; // Done!
      }
    }
  }
}

const SCEV *ScalarEvolutionCanon::visitConstant(const SCEVConstant *S) {
  return S;
}

const SCEV *ScalarEvolutionCanon::visitTruncateExpr(const SCEVTruncateExpr *S) {
  const SCEV *Op = S->getOperand();
  Type *Ty = S->getType();
  DEBUG(errs() << "Truncate Operand: " << *Op << "\n");

  // trunc (x + y + z + ...) ---> trunc (x) + trunc(y) + trunc(z) + ...
  if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
    SmallVector<const SCEV *, 4> Operands;
    for (unsigned i = 0, e = SA->getNumOperands(); i != e; ++i)
      Operands.push_back(SE.getTruncateExpr(SA->getOperand(i), Ty));
    return SE.getAddExpr(Operands);
  }

  // trunc (x * y * z * ...) ---> trunc (x) * trunc(y) * trunc(z) * ...
  if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
    SmallVector<const SCEV *, 4> Operands;
    for (unsigned i = 0, e = SM->getNumOperands(); i != e; ++i)
      Operands.push_back(SE.getTruncateExpr(SM->getOperand(i), Ty));
    return SE.getMulExpr(Operands);
  }

  if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
    SmallVector<const SCEV *, 4> Operands;
    for (const SCEV *Op : AddRec->operands())
      Operands.push_back(SE.getTruncateExpr(Op, Ty));
    return SE.getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
  }

  return S;
}

const SCEV *
ScalarEvolutionCanon::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
  const SCEV *betterSCEV = canonicalize(S->getOperand());
  auto *AddRec = dyn_cast<SCEVAddRecExpr>(betterSCEV);
  auto *Ty = S->getType();
  // zext({a, b} nuw) -> {zext(a), zext(b)}
  if (AddRec && AddRec->hasNoUnsignedWrap()) {
    SmallVector<const SCEV *, 4> Operands;
    for (const SCEV *Op : AddRec->operands())
      Operands.push_back(SE.getZeroExtendExpr(Op, Ty));
    return SE.getAddRecExpr(Operands, AddRec->getLoop(),
                            AddRec->getNoWrapFlags());
  }

  // zext(a/b) -> zext(a) / zext(b)
  if (auto *UDiv = dyn_cast<SCEVUDivExpr>(betterSCEV)) {
    auto *LHS = canonicalize(UDiv->getLHS());
    auto *RHS = canonicalize(UDiv->getRHS());
    SmallVector<const SCEV *, 4> Operands;
    return SE.getUDivExpr(SE.getZeroExtendExpr(LHS, Ty),
                          SE.getZeroExtendExpr(RHS, Ty));
  }

  return SE.getZeroExtendExpr(betterSCEV, Ty);
}

const SCEV *
ScalarEvolutionCanon::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
  const SCEV *betterSCEV = canonicalize(S->getOperand());
  return SE.getSignExtendExpr(betterSCEV, S->getType());
}

const SCEV *ScalarEvolutionCanon::visitAddExpr(const SCEVAddExpr *S) {
  SmallVector<const SCEV *, 4> Operands;
  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) {
    const SCEV *betterSCEV = canonicalize(S->getOperand(i));
    Operands.push_back(betterSCEV);
  }
  return SE.getAddExpr(Operands, S->getNoWrapFlags());
}

const SCEV *ScalarEvolutionCanon::visitMulExpr(const SCEVMulExpr *S) {
  SmallVector<const SCEV *, 4> Operands;
  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) {
    const SCEV *betterSCEV = canonicalize(S->getOperand(i));
    Operands.push_back(betterSCEV);
  }

  // Sort by complexity, this groups all similar expression types together.
  GroupByComplexity(Operands, &LI, DT);

  // Handle SCEVMulExpr expansion that is not done by ScalarEvolution.
  // Case 1: SCEVConstant * (SCEVAddExpr Multiplication Chain)
  // Case 2: SCEVConstant * (Other SCEVs Chain) * (SCEVAddExpr Multiplication
  // Chain)
  // Case 3: SCEVConstant * (SCEVAddExpr Multiplication Chain) * (Other
  // SCEVs Chain)
  // Case 4: SCEVConstant * (Other SCEVs Chain) * (SCEVAddExpr
  // Multiplication Chain) * (Other SCEVs Chain)
  // Case 5: SCEVAddExpr Multiplication Chain
  // Case 6: (Other SCEVs Chain) * (SCEVAddExpr Multiplication Chain)
  // Case 7: (SCEVAddExpr Multiplication Chain) * (Other SCEVs Chain)
  // Case 8: ((Other SCEVs Chain) * SCEVAddExpr Multiplication
  // Chain) * (Other SCEVs Chain)
  // SCEVAddExpr Multiplication Chain: It contains at least 1 SCEVAddExpr.
  //                                   Something like this: SCEVAddExpr * ... *
  //                                   SCEVAddExpr
  // Other SCEVs Chain               : SCEVs that are not SCEVConstant and not
  // SCEVAddExpr Note: We do not need to consider multiple SCEVConstants case
  //                   since this should already be folded in Scalar Evolution.
  //                   And also special cases for SCEVConstant has already been
  //                   done. e.g. SCEVConstant has zero value.
  unsigned Idx = 0;
  SmallVector<const SCEV *, 4> NewOps;
  if (const auto *FC = dyn_cast<SCEVConstant>(Operands[Idx++])) {
    // Skip SCEVTruncateExpr, SCEVZeroExtendExpr, SCEVSignExtendExpr
    while (Idx < Operands.size() && Operands[Idx]->getSCEVType() < scAddExpr)
      ++Idx;

    if (Idx < Operands.size())
      if (const auto *MAdd = dyn_cast<SCEVAddExpr>(Operands[Idx])) {
        for (auto &AddOp : MAdd->operands())
          NewOps.push_back(SE.getMulExpr(FC, AddOp, SCEV::FlagAnyWrap));
        const SCEV *NewAddS = SE.getAddExpr(NewOps, SCEV::FlagAnyWrap);

        // C*(V1+V2+...+VN) -> C*V1 + C*V2 + ... +C*VN
        if (Operands.size() == 2)
          return NewAddS;
        // (C*V1 + C*V2 + ... +C*VN) * (V1+V2+...+VN) *... * (V1+V2+...+VN)
        // C*Others*(V1+V2+...+VN) -> Others*(C*V1 + C*V2 + ... +C*VN)
        Operands.erase(Operands.begin() + Idx);
        Operands.erase(Operands.begin());
        Operands.push_back(NewAddS);
        GroupByComplexity(Operands, &LI, DT);
      }
  }

  // Skip SCEVTruncateExpr, SCEVZeroExtendExpr, SCEVSignExtendExpr
  // Others*(V1+V2+...+VN) *... * (V1+V2+...+VN)
  Idx = 0;
  bool OthersBeforeAdd = false;
  while (Idx < Operands.size() && Operands[Idx]->getSCEVType() < scAddExpr) {
    OthersBeforeAdd = true;
    ++Idx;
  }

  if (Idx < Operands.size())
    if (const auto *FAdd = dyn_cast<SCEVAddExpr>(Operands[Idx])) {
      // (V1+V2+...+VN) *... * (V1+V2+...+VN)
      SmallVector<unsigned, 4> DeprecatedOps;

      // Only deal with the add expression
      const auto *LHSAdd = FAdd;
      const auto *CurrentExpandAdd = FAdd;
      DeprecatedOps.push_back(Idx++);

      while (LHSAdd && Idx < Operands.size() &&
             Operands[Idx]->getSCEVType() < scMulExpr) {
        if (const auto *RHSAdd = dyn_cast<SCEVAddExpr>(Operands[Idx])) {
          SmallVector<const SCEV *, 4> LHSAddOps(LHSAdd->operands());
          SmallVector<const SCEV *, 4> RHSAddOps(RHSAdd->operands());
          SmallVector<const SCEV *, 4> TmpOps;
          for (auto &LAOp : LHSAddOps)
            for (auto &RAOp : RHSAddOps) {
              TmpOps.push_back(SE.getMulExpr(LAOp, RAOp, SCEV::FlagAnyWrap));
              DEBUG(dbgs() << "NewMulS: "
                           << *SE.getMulExpr(LAOp, RAOp, SCEV::FlagAnyWrap)
                           << "\n");
            }
          // If getAddExpr factors out what we would like to expand, we give up
          // doing expansion
          if ((LHSAdd = dyn_cast<SCEVAddExpr>(
                   SE.getAddExpr(TmpOps, SCEV::FlagAnyWrap)))) {
            const SCEV *NewAddS = canonicalize(LHSAdd);
            LHSAdd = dyn_cast<SCEVAddExpr>(NewAddS);
            CurrentExpandAdd = LHSAdd;
            DeprecatedOps.push_back(Idx);
          }
        }
        ++Idx;
      }

      // All SCEVs in Operands are SCEVAddExpr
      if (Idx == (Operands.size() - 1) && !OthersBeforeAdd &&
          Operands[Idx]->getSCEVType() == scAddExpr)
        return CurrentExpandAdd;

      // Update Operands list for potential chance of expanding with SCEVUnknown
      if (!DeprecatedOps.empty()) {
        for (auto OpId = DeprecatedOps.rbegin(); OpId != DeprecatedOps.rend();
             ++OpId)
          Operands.erase(Operands.begin() + *OpId);
        GroupByComplexity(Operands, &LI, DT);
      }

      // Skip SCEVMulExpr, SCEVUDivExpr, SCEVAddRecExpr, SCEVUMaxExpr,
      // SCEVSMaxExpr
      Idx = 0;
      while (Idx < Operands.size() && Operands[Idx]->getSCEVType() < scUnknown)
        ++Idx;

      // Only deal with SCEVUnknown
      DeprecatedOps.clear();
      while (Idx < Operands.size() &&
             Operands[Idx]->getSCEVType() < scCouldNotCompute) {
        SmallVector<const SCEV *, 4> LHSAddOps(CurrentExpandAdd->operands());
        SmallVector<const SCEV *, 4> TmpOps;
        for (auto &LAOp : LHSAddOps)
          TmpOps.push_back(
              SE.getMulExpr(LAOp, Operands[Idx], SCEV::FlagAnyWrap));
        if (const auto *NewAddS = dyn_cast<SCEVAddExpr>(
                SE.getAddExpr(TmpOps, SCEV::FlagAnyWrap))) {
          CurrentExpandAdd = NewAddS;
          DeprecatedOps.push_back(Idx);
        }
        ++Idx;
      }

      if (!DeprecatedOps.empty())
        for (auto OpId = DeprecatedOps.rbegin(); OpId != DeprecatedOps.rend();
             ++OpId)
          Operands.erase(Operands.begin() + *OpId);
      Operands.push_back(CurrentExpandAdd);

      // (U*V1+U*V2+...+U*VN)
      if (Operands.size() < 2)
        return CurrentExpandAdd;

      // Others * (V1+V2+...+VN) *... * (V1+V2+...+VN)
      // (V1+V2+...+VN) *... * (V1+V2+...+VN) * Others
      // Others * (V1+V2+...+VN) *... * (V1+V2+...+VN) * Others
      // And if we fail to expand the SCEVMulExpr at some point since the
      // getAddExpr factors out what we would like to expand, we give up
      // further expanding and return the SCEVMulExpr.
      return SE.getMulExpr(Operands, S->getNoWrapFlags());
    }

  return SE.getMulExpr(Operands, S->getNoWrapFlags());
}

void ScalarEvolutionCanon::sinkUDivInAdd(SmallVectorImpl<const SCEV *> &UDs,
  SmallVectorImpl<const SCEV *> &Mods, const SCEVNAryExpr *LHS,
  const SCEVConstant *RHSC) {
  assert((isa<SCEVAddExpr>(LHS) || isa<SCEVAddRecExpr>(LHS)) &&
         "Expect SCEVAddExpr or SCEVAddRecExpr!");
  assert(RHSC->getAPInt().isPowerOf2() &&
         "Expect non-negative and power of 2 divisor!");
  for (unsigned i = 0, e = LHS->getNumOperands(); i != e; ++i) {
    auto *AddOp = LHS->getOperand(i);
    auto *UDiv = SE.getUDivExpr(AddOp, RHSC);
    auto *ZExt = SE.getURemExpr(AddOp, RHSC);
    UDs.emplace_back(UDiv);
    Mods.emplace_back(ZExt);
  }
}

// Eliminate the same value(and the same value after negate) in dividend.
// That is, (a*b)/b ---> a, where `a` and `b` are SCEVConstant/SCEVUnknown.
// And (a*-c)/c ---> -a, where `c` is SCEVConstant.
// TODO: Extend to other SCEV expression
static const SCEV *simplifyMultiplyUDiv(ScalarEvolution &SE,
                                        const SCEVUDivExpr *S) {
  auto *RHS = S->getRHS();
  if (!isa<SCEVUnknown>(RHS) && !isa<SCEVConstant>(RHS))
    return S;

  auto *LHSM = dyn_cast<SCEVMulExpr>(S->getLHS());
  if (!LHSM)
    return S;

  // (a*b)/b ---> a
  SmallVector<const SCEV *, 4> Operands;
  auto NumMulOps = LHSM->getNumOperands();
  for (unsigned i = 0; i != NumMulOps; ++i) {
    const SCEV *Op = LHSM->getOperand(i);
    if (RHS != Op)
      Operands.emplace_back(Op);
  }

  if (LHSM->hasNoUnsignedWrap() && !Operands.empty() &&
      (Operands.size() == (NumMulOps - 1)))
    return SE.getMulExpr(Operands);

  Operands.clear();
  auto NegRHS = SE.getNegativeSCEV(RHS);
  bool ElimNeg = false;

  // (a*-c)/c ---> -a
  for (unsigned i = 0; i != NumMulOps; ++i) {
    const SCEV *Op = LHSM->getOperand(i);
    if (RHS != Op) {
      if (NegRHS == Op) {
        Op = SE.getConstant(Op->getType(), -1, /*isSigned=*/true);
        ElimNeg = true;
      }
      Operands.emplace_back(Op);
    }
  }

  if (LHSM->hasNoSignedWrap() && !Operands.empty() &&
      (Operands.size() == NumMulOps) && ElimNeg)
    return SE.getMulExpr(Operands);

  return S;
}

// TODO: Exponent constants can be introduced to do further simplification for
// SCEVUDivExpr
const SCEV *ScalarEvolutionCanon::visitUDivExpr(const SCEVUDivExpr *S) {
  const SCEV *RHS = S->getRHS();
  const SCEV *LHS = S->getLHS();

  if (const auto *RHSU = dyn_cast<SCEVUnknown>(RHS)) {
    auto M = simplifyMultiplyUDiv(SE, S);
    if (M != S)
      return canonicalize(M);

  } else if (const auto *RHSC = dyn_cast<SCEVConstant>(RHS)) {
    auto M = simplifyMultiplyUDiv(SE, S);
    if (M != S)
      return canonicalize(M);

    const APInt &C = RHSC->getAPInt();
    if (!C.isPowerOf2())
      return S;

    // Transform (a + b) /u d into
    // ((a /u d) + (b /u d)) %u (2^(n - log( d ))) + (((a % d) + (b % d)) /u d)
    // when d is proved to be power of 2
    uint64_t BW = getTypeSizeInBits(S->getType());
    uint64_t EmuOverFlow = PowerOf2Ceil(BW - C.logBase2());
    auto *EmuOverFlowSCEV = SE.getConstant(Type::getIntNTy(getContext(), BW),
                                           EmuOverFlow, false);
    if (const auto *LHSA = dyn_cast<SCEVAddExpr>(LHS)) {
      SmallVector<const SCEV *, 4> UDs;
      SmallVector<const SCEV *, 4> Mods;
      SmallVector<const SCEV *, 4> Operands;
      sinkUDivInAdd(UDs, Mods, LHSA, RHSC);

      auto *QPart = SE.getURemExpr(SE.getAddExpr(UDs), EmuOverFlowSCEV);
      auto *ModPart = SE.getUDivExpr(SE.getAddExpr(Mods), RHSC);
      if (ModPart == S)
        return S;

      Operands.emplace_back(QPart);
      Operands.emplace_back(ModPart);
      return SE.getAddExpr(Operands);
    }

    // Transform {a , b} /u d into
    // { (a /u d), (b /u d) } %u (2^(n - log( d ))) + ({(a % d), (b % d)} /u d)
    // when d is proved to be power of 2
    if (const auto *LHSA = dyn_cast<SCEVAddRecExpr>(LHS)) {
      SmallVector<const SCEV *, 4> UDs;
      SmallVector<const SCEV *, 4> Mods;
      SmallVector<const SCEV *, 4> Operands;
      sinkUDivInAdd(UDs, Mods, LHSA, RHSC);

      auto *L = LHSA->getLoop();
      for (auto *UD : UDs)
        if (!SE.isLoopInvariant(UD ,L))
          return S;
      for (auto *Mod : Mods)
        if (!SE.isLoopInvariant(Mod ,L))
          return S;

      auto *QPart = SE.getURemExpr(SE.getAddRecExpr(UDs, L, SCEV::FlagAnyWrap),
                               EmuOverFlowSCEV);
      auto *ModPart = SE.getUDivExpr(
          SE.getAddRecExpr(Mods, L, SCEV::FlagAnyWrap), RHSC);

      // If modulo part is proven to be zero, return the quotient part is
      // enough.
      // TODO: Generalize this and add to scalar evolution directly.
      if (const auto *Mod = dyn_cast<SCEVUDivExpr>(ModPart))
        if (const auto *ModPartLHS = dyn_cast<SCEVAddRecExpr>(Mod->getLHS()))
          if (SE.isKnownNonNegative(ModPartLHS)
              && SE.getUnsignedRangeMax(ModPartLHS).ult(C))
            return QPart;

      if (ModPart == S)
        return S;

      Operands.emplace_back(SE.getAddRecExpr(UDs, L, SCEV::FlagAnyWrap));
      Operands.emplace_back(ModPart);
      return SE.getAddExpr(Operands);
    }
  }
  return S;
}

const SCEV *ScalarEvolutionCanon::visitAddRecExpr(const SCEVAddRecExpr *S) {
  SmallVector<const SCEV *, 4> Operands;
  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) {
    const SCEV *betterSCEV = canonicalize(S->getOperand(i));
    Operands.push_back(betterSCEV);
  }
  return SE.getAddRecExpr(Operands, S->getLoop(), S->getNoWrapFlags());
}

const SCEV *ScalarEvolutionCanon::visitSMaxExpr(const SCEVSMaxExpr *S) {
  SmallVector<const SCEV *, 4> Operands;
  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) {
    const SCEV *betterSCEV = canonicalize(S->getOperand(i));
    Operands.push_back(betterSCEV);
  }
  return SE.getSMaxExpr(Operands);
}

const SCEV *ScalarEvolutionCanon::visitUMaxExpr(const SCEVUMaxExpr *S) {
  SmallVector<const SCEV *, 4> Operands;
  for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) {
    const SCEV *betterSCEV = canonicalize(S->getOperand(i));
    Operands.push_back(betterSCEV);
  }
  return SE.getUMaxExpr(Operands);
}

const SCEV *ScalarEvolutionCanon::visitUnknown(const SCEVUnknown *S) {
  return S;
}

const SCEV *
ScalarEvolutionCanon::visitCouldNotCompute(const SCEVCouldNotCompute *S) {
  return S;
}

void ScalarEvolutionCanon::checkEquivalence(const SCEV *betterSCEV,
                                            const SCEV *S) const {
  DEBUG(dbgs() << "Z3 Check for\nSCEVCanon: " << *betterSCEV << "\nSCEV: " << *S << '\n');

  // Clear previous assumption for the new SCEV
  Z3WRITER.assumption.clear();

  std::string Betterz3 = Z3WRITER.convertToZ3(betterSCEV);
  std::string z3 = Z3WRITER.convertToZ3(S);

  // Do not support smax as an operand
  if (Z3WRITER.doNotSupport(Betterz3) || Z3WRITER.doNotSupport(z3)) {
    Z3WRITER.assumption.clear();
    DEBUG(dbgs() << "Do not support smax as an operand in z3 check\n");
    return;
  }

  // Declcare variables used in z3
  errs() << "(push)\n";

  for (auto it = Z3WRITER.context.begin(); it != Z3WRITER.context.end(); ++it) {
    errs() << "(declare-const " + (it->first) + " (_ BitVec " +
                  std::to_string(it->second) + "))\n";
  }

  for (auto ctxa : Z3WRITER.ctxtAssumption)
    errs() << "(assert " + ctxa + ")\n";

  // Define functions used in z3
  for (auto f : Z3WRITER.functionDecAndDef)
    errs() << f + "\n";

  // Assumption used in z3
  for (auto a : Z3WRITER.assumption)
    errs() << "(assert " + a + ")\n";


  // Compare and check if the 2 equations are equivalent
  errs() << "(assert (not (= " << Betterz3 << " " << z3 << ")))\n";
  errs() << "(check-sat)\n";
  errs() << "(pop)\n";
}

const SCEV *ScalarEvolutionCanon::getExistingCanonSCEV(Value *V) {
  assert(SE.isSCEVable(V->getType()) && "Value is not SCEVable!");
  auto I = ValueExprMap.find_as(V);
  if (I != ValueExprMap.end())
    return I->second;
  return nullptr;
}

const SCEV *ScalarEvolutionCanon::getCanonSCEV(const SCEV *S) {
  // Simplify the SCEV to SCEVConstant if the range demonstrates it's only a
  // single constant.
  auto UR = SE.getUnsignedRange(S);
  auto SR = SE.getSignedRange(S);
  auto IR = UR.intersectWith(SR);
  if (auto *E = IR.getSingleElement())
    return SE.getConstant(*E);

  return canonicalize(S);
}

const SCEV *ScalarEvolutionCanon::getCanonSCEV(Value *V) {
  assert(SE.isSCEVable(V->getType()) && "Value is not SCEVable!");

  const auto *S = getExistingCanonSCEV(V);
  if (S)
    return S;

  S = getCanonSCEV(SE.getSCEV(V));
  if (S)
    ValueExprMap.insert({V, S});
  return S;
}

void ScalarEvolutionCanon::print(raw_ostream &OS) const {
  // getSCEV returns an existing SCEV if it exists, otherwise it analyzes the
  // expression and creates one. This conflicts with the const qualifier.
  // However, the print function will not be called from outside the class, and
  // the there will be nothing modified in this print function. It is safe to
  // cast away const qualifier here.
  ScalarEvolutionCanon &SEC = *const_cast<ScalarEvolutionCanon *>(this);

  for (auto &I : instructions(F))
    if (SE.isSCEVable(I.getType()) && !isa<CmpInst>(I)) {
      OS << I << '\n';
      const SCEV *SV = SE.getSCEV(&I);
      const SCEV *SimplSV = SEC.getCanonSCEV(SV);
      OS << "SCEV      -->  " << *SV;
      if (!isa<SCEVCouldNotCompute>(SV)) {
        OS << " U: ";
        SE.getUnsignedRange(SV).print(OS);
        OS << " S: ";
        SE.getSignedRange(SV).print(OS);
      }
      OS << "\nSCEVCanon -->  " << *SimplSV;
      if (!isa<SCEVCouldNotCompute>(SimplSV)) {
        OS << " U: ";
        SE.getUnsignedRange(SimplSV).print(OS);
        OS << " S: ";
        SE.getSignedRange(SimplSV).print(OS);
      }
      OS << '\n';
    }
}

Pass *llvm::createScalarEvolutionCanonWrapperPassPass() {
  return new ScalarEvolutionCanonWrapperPass();
}

char ScalarEvolutionCanonWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(ScalarEvolutionCanonWrapperPass, "scalar-evolution-canon",
                      "Scalar Evolution Canonicalization", false, true)
INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
INITIALIZE_PASS_END(ScalarEvolutionCanonWrapperPass, "scalar-evolution-canon",
                    "Scalar Evolution Canonicalization", false, true)
