#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Async/IR/Async.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/IR/SymbolTable.h"
#include "mlir/IR/TypeRange.h"
#include "mlir/IR/ValueRange.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "sst/SST.h"

#include "crypto/Crypto.h"
#include "hyper/Hyper.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Casting.h"

#include <algorithm>
#include <iostream>
#include <unordered_map>
#include <vector>

using namespace mlir;
using namespace hyper;

//===----------------------------------------------------------------------===//
// Rewrite Pattern
//===----------------------------------------------------------------------===//

namespace {

#define BLOCK_SIZE 100 // fixed blocksize
#define GRID_SIZE 1    // fixed gridsize

class ForOpSSTDeviceSchedulePattern : public OpRewritePattern<hyper::ForOp> {
private:
  void rebuild_block(IRMapping& mapper, Block& block){
    OpBuilder builder(block.getParent()->getContext());
    builder.setInsertionPointToStart(&block);

  }
public:
  using OpRewritePattern<hyper::ForOp>::OpRewritePattern;

  LogicalResult matchAndRewrite(hyper::ForOp op,
                                PatternRewriter &rewriter) const override {
    auto loc = op.getLoc();
    auto ctx = rewriter.getContext();
    // operands of `hyper.for` op
    ValueRange memIns = op.getMemIn();
    ValueRange memOuts = op.getMemOut();
    Value upperBound = op.getUpperBound();
    Value lowerBound = op.getLowerBound();

    Value c0 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/0);
    Value c1 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/1);
    Value c8 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/8);
    Value c16 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/16);
    Value c24 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/24);
    Value c56 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/56);
    Value c64 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/64);
    Value c72 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/72);
    Value c104 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/104);
    Value c112 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/112);
    Value c120 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/120);
    Value c128 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/128);
    Value c136 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/136);
    Value c144 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/144);
    Value c152 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/152);

    Value gridSize =
        rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/GRID_SIZE);
    Value blockSize =
        rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/BLOCK_SIZE);

    // get information needed for splitting memref
    auto devices = op.getDevices();
    Value N = rewriter.create<arith::SubIOp>(loc, /*lhs=*/upperBound,
                                             /*rhs=*/lowerBound);
    Value Nfp = rewriter.create<arith::IndexCastOp>(
        loc, /*out=*/rewriter.getI32Type(), /*in=*/N);
    Nfp = rewriter.create<arith::SIToFPOp>(loc, /*out=*/rewriter.getF32Type(),
                                           /*in=*/Nfp);

    // args for sstsetarg
    Value nInput;
    Value nOutput;
    Value plainLen;
    Value plainLenIdx;
    Value devMemIn;
    Value devMemOut;
    Value keyBase;  /*for aes*/
    Value sBoxBase; /*for aes*/

    // We need to extract plainLen from differnt op for sha1 op
    enum CryptoOpType {
      OTHER,
      SHA1_CRACK,
      MD5_CRACK,
      SM3_CRACK,
      AES_MUTLI_CRACK,
    };

    enum CryptoCore {
      SHA1,
      MD5,
      SM3,
      AES
    };

    CryptoCore cryptoCore = SHA1;
    CryptoOpType cryptoOpType = OTHER;

    Value c0i8 = rewriter.create<arith::ConstantOp>(
        loc, /*value=*/rewriter.getIntegerAttr(rewriter.getI8Type(), 0));
    bool mulIMet = false;
    bool rankMet = false;
    ::mlir::Region &loopBody = op.getLoopBody();
    for (::mlir::Operation &innerop : loopBody.front()) {
      // This assume that the first arith::muli operation is calculating
      // plainLen. This is a bit hacky, but I don't know the best practice to
      // extract the plainLenIdx value here.
      if (auto mulIOp = dyn_cast<arith::MulIOp>(&innerop)) {
        if (!mulIMet) {
          mulIMet = true;
          plainLenIdx = mulIOp->getOperand(1);
          plainLen = rewriter.create<arith::IndexCastOp>(
              loc, rewriter.getI32Type(), plainLenIdx);
        }
        continue;
      }

      if (auto any = dyn_cast<crypto::MD5OP>(&innerop)) {
        cryptoCore = MD5;
        continue;
      }

      if (auto any = dyn_cast<crypto::SM3OP>(&innerop)) {
        cryptoCore = SM3;
        continue;
      }

      // aesmutli and aescrack have same sstsetarg
      if (auto any = dyn_cast<crypto::AESOP>(&innerop)) {
        cryptoOpType = AES_MUTLI_CRACK;
        cryptoCore = AES;
        continue;
      }

      // This assume that the first memref::rank operation is keyBase, the second memref::rank operation is sBoxBase.
      // This is a bit hacky, but I don't know the best practice to extract the keyBase and sBoxBase here.
      if (auto rankOp = dyn_cast<memref::RankOp>(&innerop)) {
        if (!rankMet) {
          rankMet = true;
          keyBase = rankOp->getOperand(0);
        }
        else {
          sBoxBase = rankOp->getOperand(0);
        }
      }

      // Check if the operation is the desired crypto::PadMessagePadone_OP
      if (auto padOp = dyn_cast<crypto::PadMessagePadone_OP>(&innerop)) {
        plainLen = padOp.getMessageLen();
        plainLenIdx = rewriter.create<arith::IndexCastOp>(
            loc, /*out=*/rewriter.getIndexType(), /*in=*/plainLen);
        continue;
      }

      // Check OP is crack type
      if (auto isCrackOp = dyn_cast<hyper::ReduceOp>(&innerop)) {
        switch (cryptoCore) {
          case SHA1:
            cryptoOpType = SHA1_CRACK;
            break;
          case MD5:
            cryptoOpType = MD5_CRACK;
            break;
          case SM3:
            cryptoOpType = SM3_CRACK;
            break;
          case AES:
            cryptoOpType = AES_MUTLI_CRACK;
            break;
          default:
            assert(false);
        }
      }
    }

    // index needed for splitting memref
    Value partStart = lowerBound;
    Value partEnd = lowerBound;

    // memory container for the "shared" memout
    SmallVector<Value> sharedMemOuts;
    // get the bool array of `isSharedMem` attribute to indicate whether the
    // corresponding memref is shared or not
    SmallVector<bool> isSharedMeminArray, isSharedMemoutArray;
    if (op.getIsSharedMem().has_value()) { // if `isSharedMem` attribute exists
      auto isSharedMem = op.getIsSharedMem().value();
      // construct the bool array for `memIns`
      for (size_t i = 0; i < memIns.size(); i++) {
        isSharedMeminArray.push_back(
            isSharedMem[i].dyn_cast<BoolAttr>().getValue());
      }
      // construct the bool array for `memOuts`
      for (size_t i = memIns.size(); i < isSharedMem.size(); i++) {
        bool isShared = isSharedMem[i].dyn_cast<BoolAttr>().getValue();
        if (isShared) {
          sharedMemOuts.push_back(memOuts[i - memIns.size()]);
        }
        isSharedMemoutArray.push_back(isShared);
      }
    }

    // memory container for the "reduce" operation
    SmallVector<SmallVector<Value>> resultsOfEachDev;
    // split memref and create new `scf.for` ops or `gpu.launch` ops for each
    // device
    for (size_t i = 0; i < devices.size(); i++) {
      // check validity of `devices` attr
      auto deviceInfo = devices[i];
      auto dictAttr = deviceInfo.dyn_cast_or_null<DictionaryAttr>();
      auto targetIdAttr = dictAttr.get("targetId");
      auto targetConfigAttr = dictAttr.get("targetConfig");
      auto dutyRatioAttr = dictAttr.get("dutyRatio");
      auto targetId = targetIdAttr.dyn_cast_or_null<StringAttr>().getValue();

      // don't generate related functions when dutyratio is 0
      auto dutyRatioFloatAttr = dutyRatioAttr.dyn_cast_or_null<FloatAttr>();
      float dutyRatiofl = dutyRatioFloatAttr.getValueAsDouble();
      if (dutyRatiofl == 0) {
        // jump out this cycle
        continue;
      }

      /// if any "shared" memOut exsits, we should allocate each of them for
      /// each device on the host for the "reduce" opreation
      SmallVector<Value> hostSharedMemOuts;
      for (auto outShared : llvm::zip(memOuts, isSharedMemoutArray)) {
        auto memOut = std::get<0>(outShared);
        auto isShared = std::get<1>(outShared);
        if (isShared) {
          Value outMemLen = rewriter.create<memref::DimOp>(
              loc, /*source=*/memOut, /*index=*/0);
          memref::AllocOp allocOp = rewriter.create<memref::AllocOp>(
              loc, /*memrefType=*/memOut.getType(),
              /*dynamicSizes=*/ValueRange{outMemLen});
          allocOp->setAttr("operandSegmentSizes",
                           rewriter.getDenseI32ArrayAttr({1, 0}));
          hostSharedMemOuts.push_back(allocOp.getResult());
          // Value alloc = rewriter.create<memref::AllocOp>(loc,
          // /*memrefType=*/memOut.getType(),
          // /*dynamicSizes=*/ValueRange{outMemLen});
          // hostSharedMemOuts.push_back(alloc);
        }
      }
      resultsOfEachDev.push_back(hostSharedMemOuts);

      // determine partEnd
      if (i == devices.size() - 1) {
        partEnd = upperBound;
      } else {
        Value dutyRatio = rewriter.create<arith::ConstantOp>(
            loc, /*result=*/rewriter.getF32Type(),
            /*value=*/dutyRatioAttr.dyn_cast<FloatAttr>());
        Value dutyValue =
            rewriter.create<arith::MulFOp>(loc, /*lhs=*/Nfp, /*rhs=*/dutyRatio);
        dutyValue = rewriter.create<arith::FPToSIOp>(
            loc, /*out=*/rewriter.getI32Type(), /*in=*/dutyValue);
        dutyValue = rewriter.create<arith::IndexCastOp>(
            loc, /*out=*/rewriter.getIndexType(), /*in=*/dutyValue);
        partEnd = rewriter.create<arith::AddIOp>(loc, /*lhs=*/partStart,
                                                 /*rhs=*/dutyValue);
      }

      // split memref
      Value partNum = rewriter.create<arith::SubIOp>(loc, /*lhs=*/partEnd,
                                                     /*rhs=*/partStart);
      // multiple memIn allocation
      SmallVector<Value> hostMemIns;
      SmallVector<Value> devMemIns;
      for (auto inShared : llvm::zip(memIns, isSharedMeminArray)) {
        auto memIn = std::get<0>(inShared);
        auto isShared = std::get<1>(inShared);

        // get the length of input memory
        Value inMemLen =
            rewriter.create<memref::DimOp>(loc, /*source=*/memIn, /*index=*/0);

        if (isShared) { // alloc exact the same memory size as host
          // allocate input memory on device
          Value devMemIn = rewriter.create<hyper::AllocOp>(
              loc, /*memrefType=*/memIn.getType(),
              /*dynamicSizes=*/ValueRange{inMemLen},
              /*symbolOperands=*/ValueRange{},
              /*device=*/rewriter.getStringAttr(targetId));
          devMemIns.push_back(devMemIn);
          hostMemIns.push_back(memIn);
        } else { // alloc part of memory splited from host
          nInput =
              rewriter.create<arith::DivUIOp>(loc, /*lhs=*/inMemLen, /*rhs=*/N);
          Value partSizeIn = rewriter.create<arith::MulIOp>(
              loc, /*lhs=*/partNum, /*rhs=*/nInput);
          Value partMemIn = rewriter.create<memref::ViewOp>(
              loc, /*resultType0=*/memIn.getType(), /*source=*/memIn,
              /*byte_shift=*/
              rewriter.create<arith::MulIOp>(loc, /*lhs=*/partStart,
                                             /*rhs=*/nInput),
              /*sizes=*/ValueRange{partSizeIn});
          hostMemIns.push_back(partMemIn);
          // allocate input memory on device
          devMemIn = rewriter.create<hyper::AllocOp>(
              loc, /*memrefType=*/memIn.getType(),
              /*dynamicSizes=*/ValueRange{partSizeIn},
              /*symbolOperands=*/ValueRange{},
              /*device=*/rewriter.getStringAttr(targetId));
          devMemIns.push_back(devMemIn);
        }
      }
      // multimple memOut allocation
      SmallVector<Value> hostUnsharedMemOuts;
      SmallVector<Value> devMemOuts;
      SmallVector<Value> devSharedMemOuts;
      SmallVector<Value> devUnsharedMemOuts;
      for (auto outShared : llvm::zip(memOuts, isSharedMemoutArray)) {
        auto memOut = std::get<0>(outShared);
        auto isShared = std::get<1>(outShared);

        Value outMemLen =
            rewriter.create<memref::DimOp>(loc, /*source=*/memOut, /*index=*/0);

        if (isShared) { // alloc exact the same memory size as host
          // allocate input memory on device
          Value devMemOut = rewriter.create<hyper::AllocOp>(
              loc, /*memrefType=*/memOut.getType(),
              /*dynamicSizes=*/ValueRange{outMemLen},
              /*symbolOperands=*/ValueRange{},
              /*device=*/rewriter.getStringAttr(targetId));
          devSharedMemOuts.push_back(devMemOut);
          devMemOuts.push_back(devMemOut);
        } else { // alloc part of memory splited from host
          nOutput = rewriter.create<arith::DivUIOp>(loc, /*lhs=*/outMemLen,
                                                    /*rhs=*/N);
          Value partSizeOut = rewriter.create<arith::MulIOp>(
              loc, /*lhs=*/partNum, /*rhs=*/nOutput);
          Value partMemOut = rewriter.create<memref::ViewOp>(
              loc, /*resultType0=*/memOut.getType(), /*source=*/memOut,
              /*byte_shift=*/
              rewriter.create<arith::MulIOp>(loc, /*lhs=*/partStart,
                                             /*rhs=*/nOutput),
              /*sizes=*/ValueRange{partSizeOut});
          hostUnsharedMemOuts.push_back(partMemOut);
          // allocate output memory on device
          devMemOut = rewriter.create<hyper::AllocOp>(
              loc, /*memrefType=*/memOut.getType(),
              /*dynamicSizes=*/ValueRange{partSizeOut},
              /*symbolOperands=*/ValueRange{},
              /*device=*/rewriter.getStringAttr(targetId));
          devUnsharedMemOuts.push_back(devMemOut);
          devMemOuts.push_back(devMemOut);
        }
      }

      // lower to cpu (scf.for)
      if (targetId.contains("cpu") || targetId.contains("cgra")) {
        // copy input data from cpu to cpu for each input memory
        for (auto hostDev : llvm::zip(hostMemIns, devMemIns)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
        // copy shared memout data from cpu to cpu for initialization
        for (auto hostDev : llvm::zip(sharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }

        // create new `scf.for` op
        rewriter.create<scf::ForOp>(
            loc, /*lowerBound=*/c0, /*upperBound=*/partNum, /*step=*/c1,
            /*iterArgs=*/ValueRange{},
            [&](OpBuilder &builder, Location loc, Value ivs, ValueRange iargs) {
              Block &hyperLoopBodyBlock = op.getLoopBody().front();
              // value map from original `hyper.for` op to new `scf.for` op
              IRMapping mp;
              // map the induction variable
              mp.map(hyperLoopBodyBlock.getArgument(0), ivs);
              // map the input and output memory
              if (memIns.size() > 0) {
                for (size_t i = 0; i < memIns.size(); i++)
                  mp.map(hyperLoopBodyBlock.getArgument(i + 1), devMemIns[i]);
              }
              if (memOuts.size() > 0) {
                for (size_t i = 0; i < memOuts.size(); i++)
                  mp.map(hyperLoopBodyBlock.getArgument(i + 1 + memIns.size()),
                         devMemOuts[i]);
              }
              // clone operations in original `hyper.for` op to new `scf.for` op
              for (auto &&op_ : hyperLoopBodyBlock.getOperations()) {
                if (dyn_cast_or_null<hyper::YieldOp>(op_))
                  builder.create<scf::YieldOp>(loc, op_.getResults());
                else if (auto reduce = dyn_cast_or_null<hyper::ReduceOp>(
                             op_)) { // atomic reduce
                  // TODO: support multiple regions for multiple shared memouts
                  for (unsigned i = 0; i < reduce.getReductionRegions().size();
                       i++) {
                    Block &block = reduce.getReductionRegions()[i].front();
                    IRMapping rmap;
                    rmap.map(block.getArgument(0), devSharedMemOuts[i]); // lhs
                    rmap.map(block.getArgument(1),
                             mp.lookup(reduce->getOperand(i))); // rhs
                    for (auto &op__ : block.getOperations()) {
                      if (!dyn_cast_or_null<hyper::ReduceReturnOp>(
                              op__)) // skip `hyper.reduce.return` op
                        builder.insert(op__.clone(rmap));
                    }
                  }
                } else {
                  if(dyn_cast_or_null<crypto::PadMessagePadone_OP>(op_)){
                    if(targetId.contains("cgra"))
                    // do not copy for cgra
                    continue;
                  } 
                  auto clone = builder.insert(op_.clone(mp));
                  for (auto oldNew :
                       llvm::zip(op_.getResults(), clone->getResults())) {
                    mp.map(std::get<0>(oldNew), std::get<1>(oldNew));
                  }
                  if (dyn_cast_or_null<crypto::Sha1core_OP>(clone) || 
                      dyn_cast_or_null<crypto::MD5OP>(clone) || 
                      dyn_cast_or_null<crypto::SM3OP>(clone) || 
                      dyn_cast_or_null<crypto::AESOP>(clone) ) {
                    clone->setAttr("device", targetIdAttr);
                  }else if(dyn_cast_or_null<scf::ForOp>(clone)){
                    auto forop = dyn_cast_or_null<scf::ForOp>(clone);
#define SETATTR(opty) \
for(auto crypto_op : forop.getLoopBody().front().getOps<opty>()) { \
  crypto_op->setAttr("device", targetIdAttr); \
} 
                    SETATTR(crypto::Sha1core_OP)
                    SETATTR(crypto::MD5OP)
                    SETATTR(crypto::SM3OP)
                    SETATTR(crypto::AESOP)
#undef SETATTR
                  }
                }
              }
            });

        // copy output data from cpu to cpu for each output memory
        for (auto hostDev :
             llvm::zip(hostUnsharedMemOuts, devUnsharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
        // copy shared memout data from cpu to cpu to get results
        for (auto hostDev : llvm::zip(hostSharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
      } else if (targetId.contains("gpu")) { // lower to gpu (gpu.launch)
        // copy input data from cpu to gpu
        for (auto hostDev : llvm::zip(hostMemIns, devMemIns)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
        }
        // copy shared memout data from cpu to gpu for initialization
        for (auto hostDev : llvm::zip(sharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
        }

        // for aes on sst
        if (rankMet) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/keyBase, /*src=*/keyBase, /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/sBoxBase, /*src=*/sBoxBase, /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
        }
        // sst configure kernel
        rewriter.create<sst::ConfigureCallOp>(
            loc, /*bx=*/blockSize, /*by=*/c1, /*bz=*/c1, /*gx=*/gridSize,
            /*gy=*/c1, /*gz=*/c1, /*sharedMem=*/c0);

        // NOTICE: why kernel's last arg run into first?
        switch (cryptoOpType) {
          case OTHER:
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/partNum, /*offset=*/c0);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nInput, /*offset=*/c8);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nOutput, /*offset=*/c16);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[0], /*offset=*/c24);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemOuts[0], /*offset=*/c64);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLen, /*offset=*/c104);
            break;
          case SHA1_CRACK:
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/partNum, /*offset=*/c0);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nInput, /*offset=*/c8);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[0], /*offset=*/c16);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLenIdx, /*offset=*/c56);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLen, /*offset=*/c64);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[1], /*offset=*/c72);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0, /*offset=*/c112);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0i8, /*offset=*/c120);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c1, /*offset=*/c128);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemOuts[0], /*offset=*/c136);
            break;
          case SM3_CRACK:
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/partNum, /*offset=*/c0);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nInput, /*offset=*/c8);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[0], /*offset=*/c16);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLen, /*offset=*/c56);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[1], /*offset=*/c64);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0, /*offset=*/c104);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0i8, /*offset=*/c112);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c1, /*offset=*/c120);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemOuts[0], /*offset=*/c128);
            break;
          case MD5_CRACK:
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/partNum, /*offset=*/c0);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nInput, /*offset=*/c8);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[0], /*offset=*/c16);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLen, /*offset=*/c56);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[1], /*offset=*/c64);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0, /*offset=*/c104);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c0i8, /*offset=*/c112);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/c1, /*offset=*/c120);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemOuts[0], /*offset=*/c128);
            break;
          case AES_MUTLI_CRACK:
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/partNum, /*offset=*/c0);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nInput, /*offset=*/c8);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/nOutput, /*offset=*/c16);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemIns[0], /*offset=*/c24);   /*memref:input*/
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/devMemOuts[0], /*offset=*/c64);  /*memref:output*/
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/sBoxBase, /*offset=*/c104);      /*memref:sBoxBase*/
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/plainLen, /*offset=*/c144);
            rewriter.create<sst::SetupArgumentOp>(loc, /*arg=*/keyBase, /*offset=*/c152);       /*memref:keyBase*/
            break;
        }

        // sst launch kernel
        rewriter.create<sst::LaunchOp>(loc, /*funcId=*/c1);

        // sst memcpy from device to host (flag 1 means device to host)
        for (auto hostDev :
             llvm::zip(hostUnsharedMemOuts, devUnsharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("gpu2cpu"));
        }
        // copy shared memout data from gpu to cpu to get results
        for (auto hostDev : llvm::zip(hostSharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(
              loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev),
              /*copyDirection=*/rewriter.getStringAttr("gpu2cpu"));
        }
      }
      //  else if (targetId.contains("cgra")) {

      //   // copy input data from cpu to cpu for each input memory
      //   for (auto hostDev : llvm::zip(hostMemIns, devMemIns)) {
      //     rewriter.create<hyper::MemcpyOp>(
      //         loc, /*dst=*/std::get<1>(hostDev),
      //         /*src=*/std::get<0>(hostDev),
      //         /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
      //   }
      //   // copy shared memout data from cpu to cpu for initialization
      //   for (auto hostDev : llvm::zip(sharedMemOuts, devSharedMemOuts)) {
      //     rewriter.create<hyper::MemcpyOp>(
      //         loc, /*dst=*/std::get<1>(hostDev),
      //         /*src=*/std::get<0>(hostDev),
      //         /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
      //   }

      //   auto m = op->getParentOfType<ModuleOp>();
      //   std::string algoFuncName;
      //   switch (cryptoCore) {
      //   case SHA1:
      //     algoFuncName = "cgra_sha1_run";
      //     break;
      //   case MD5:
      //     algoFuncName = "cgra_md5_run";
      //     break;
      //   case SM3:
      //     algoFuncName = "cgra_sm3_run";
      //     break;
      //   case AES:
      //     algoFuncName = "cgra_aes_run";
      //     break;
      //   default:
      //     assert(false && "unsupported crypto op for cgra now.");
      //     break;
      //   }
      //   func::FuncOp existingFunc = m.lookupSymbol<func::FuncOp>(algoFuncName);

      //   // declare function if not exist
      //   if (!existingFunc) {
      //     mlir::FunctionType algoFuncType;
      //     auto dynI8MemrefType = MemRefType::get({mlir::ShapedType::kDynamic},
      //                                            rewriter.getI8Type());
      //     switch (cryptoCore) {
      //     case SHA1:
      //       // algoFuncType = rewriter.getFunctionType(
      //       //     mlir::TypeRange{dynI8MemrefType, dynI8MemrefType},
      //       //     mlir::TypeRange{});
      //       //     break;
      //     case MD5:
      //     case SM3:
      //       algoFuncType = rewriter.getFunctionType(
      //           mlir::TypeRange{dynI8MemrefType, dynI8MemrefType, rewriter.getI32Type()},
      //           mlir::TypeRange{});
      //       break;
      //     case AES:
      //       algoFuncType = rewriter.getFunctionType(
      //           mlir::TypeRange{dynI8MemrefType, dynI8MemrefType, rewriter.getI32Type(), dynI8MemrefType},
      //           mlir::TypeRange{});
      //       break;
      //     default:
      //       assert(false && "unsupport crypto for cgra now.");
      //     }
      //     auto algoFunc =
      //         mlir::func::FuncOp::create(loc, algoFuncName, algoFuncType);
      //     algoFunc.setVisibility(mlir::SymbolTable::Visibility::Private);
      //     m.push_back(algoFunc);
      //   }

      //   // create new `scf.for` op
      //   rewriter.create<scf::ForOp>(
      //       loc, /*lowerBound=*/c0, /*upperBound=*/partNum, /*step=*/c1,
      //       /*iterArgs=*/ValueRange{},
      //       [&](OpBuilder &builder, Location loc, Value ivs, ValueRange iargs) {
      //         Block &hyperLoopBodyBlock = op.getLoopBody().front();
      //         // value map from original `hyper.for` op to new `scf.for` op
      //         IRMapping mp;
      //         // map the induction variable
      //         mp.map(hyperLoopBodyBlock.getArgument(0), ivs);
      //         // map the input and output memory
      //         if (memIns.size() > 0) {
      //           for (size_t i = 0; i < memIns.size(); i++)
      //             mp.map(hyperLoopBodyBlock.getArgument(i + 1), devMemIns[i]);
      //         }
      //         if (memOuts.size() > 0) {
      //           for (size_t i = 0; i < memOuts.size(); i++)
      //             mp.map(hyperLoopBodyBlock.getArgument(i + 1 + memIns.size()),
      //                    devMemOuts[i]);
      //         }
      //         // clone operations in original `hyper.for` op to new `scf.for` op
      //         for (auto &&op_ : hyperLoopBodyBlock.getOperations()) {
      //           if (dyn_cast_or_null<hyper::YieldOp>(op_))
      //             builder.create<scf::YieldOp>(loc, op_.getResults());
      //           else if (dyn_cast_or_null<crypto::AESOP>(op_)) {
      //             auto crypto_aes = dyn_cast_or_null<crypto::AESOP>(op_);
      //             auto input = mp.lookup(crypto_aes.getInput());
      //             auto output = mp.lookup(crypto_aes.getOutput());
      //             auto plain_len = crypto_aes.getMsgLen();
      //             auto key = crypto_aes.getKey();
      //             auto dynI8MemrefType = MemRefType::get(
      //                 {mlir::ShapedType::kDynamic}, rewriter.getI8Type());
      //             if (output.getType() != dynI8MemrefType) {
      //               output = builder.create<memref::CastOp>(
      //                   loc, dynI8MemrefType, output);
      //             }
      //             builder.create<mlir::func::CallOp>(
      //                 loc, algoFuncName, mlir::TypeRange{},
      //                 mlir::ValueRange{input, output, plain_len, key});
      //           } else if (dyn_cast_or_null<crypto::MD5OP>(op_)) {
      //             auto crypto_md5 = dyn_cast_or_null<crypto::MD5OP>(op_);
      //             auto input = mp.lookup(crypto_md5.getInput());
      //             auto output = mp.lookup(crypto_md5.getOutput());
      //             auto plain_len = crypto_md5.getMsgLen();
      //             auto dynI8MemrefType = MemRefType::get(
      //                 {mlir::ShapedType::kDynamic}, rewriter.getI8Type());
      //             if (output.getType() != dynI8MemrefType) {
      //               output = builder.create<memref::CastOp>(
      //                   loc, dynI8MemrefType, output);
      //             }
      //             builder.create<mlir::func::CallOp>(
      //                 loc, algoFuncName, mlir::TypeRange{},
      //                 mlir::ValueRange{input, output, plain_len});
      //           } else if (dyn_cast_or_null<crypto::SM3OP>(op_)) {
      //             auto crypto_sm3 = dyn_cast_or_null<crypto::SM3OP>(op_);
      //             auto input = mp.lookup(crypto_sm3.getInput());
      //             auto output = mp.lookup(crypto_sm3.getOutput());
      //             auto plain_len = crypto_sm3.getMsgLen();
      //             auto dynI8MemrefType = MemRefType::get(
      //                 {mlir::ShapedType::kDynamic}, rewriter.getI8Type());
      //             if (output.getType() != dynI8MemrefType) {
      //               output = builder.create<memref::CastOp>(
      //                   loc, dynI8MemrefType, output);
      //             }
      //             builder.create<mlir::func::CallOp>(
      //                 loc, algoFuncName, mlir::TypeRange{},
      //                 mlir::ValueRange{input, output, plain_len});
      //           } else if (dyn_cast_or_null<scf::ForOp>(op_)) {
      //             auto nest_for = dyn_cast_or_null<scf::ForOp>(op_);

      //           } else if(dyn_cast_or_null<crypto::PadMessagePadone_OP>(op_)){
      //             // do not clone PadMessagePadone_OP
      //           }
      //           else if (dyn_cast_or_null<crypto::Sha1core_OP>(op_)) {
      //             auto crypto_sha1 = dyn_cast_or_null<crypto::Sha1core_OP>(op_);
      //             auto input = mp.lookup(crypto_sha1.getInput());
      //             auto output = mp.lookup(crypto_sha1.getOutput());
      //             auto padops = hyperLoopBodyBlock.getOps<crypto::PadMessagePadone_OP>();
      //             auto padop = *padops.begin();
      //             auto plain_len = padop.getMessageLen();
      //             auto dynI8MemrefType = MemRefType::get(
      //                 {mlir::ShapedType::kDynamic}, rewriter.getI8Type());
      //             if (output.getType() != dynI8MemrefType) {
      //               output = builder.create<memref::CastOp>(
      //                   loc, dynI8MemrefType, output);
      //             }
      //             builder.create<mlir::func::CallOp>(
      //                 loc, algoFuncName, mlir::TypeRange{},
      //                 mlir::ValueRange{input, output, plain_len});
      //           } else if (dyn_cast_or_null<hyper::ReduceOp>(
      //                          op_)) { // atomic reduce
      //             auto reduce = dyn_cast_or_null<hyper::ReduceOp>(op_);
      //             // TODO: support multiple regions for multiple shared memouts
      //             for (unsigned i = 0; i < reduce.getReductionRegions().size();
      //                  i++) {
      //               Block &block = reduce.getReductionRegions()[i].front();
      //               IRMapping rmap;
      //               rmap.map(block.getArgument(0), devSharedMemOuts[i]); // lhs
      //               rmap.map(block.getArgument(1),
      //                        mp.lookup(reduce->getOperand(i))); // rhs
      //               for (auto &op__ : block.getOperations()) {
      //                 if (!dyn_cast_or_null<hyper::ReduceReturnOp>(
      //                         op__)) // skip `hyper.reduce.return` op
      //                   builder.insert(op__.clone(rmap));
      //               }
      //             }
      //           }
      //           else {
      //             auto clone = builder.insert(op_.clone(mp));
      //             for (auto oldNew :
      //                  llvm::zip(op_.getResults(), clone->getResults())) {
      //               mp.map(std::get<0>(oldNew), std::get<1>(oldNew));
      //             }
      //           }
      //         }
      //       });

      //   // copy output data from cpu to cpu for each output memory
      //   for (auto hostDev :
      //        llvm::zip(hostUnsharedMemOuts, devUnsharedMemOuts)) {
      //     rewriter.create<hyper::MemcpyOp>(
      //         loc, /*dst=*/std::get<0>(hostDev),
      //         /*src=*/std::get<1>(hostDev),
      //         /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
      //   }
      //   // copy shared memout data from cpu to cpu to get results
      //   for (auto hostDev : llvm::zip(hostSharedMemOuts, devSharedMemOuts)) {
      //     rewriter.create<hyper::MemcpyOp>(
      //         loc, /*dst=*/std::get<0>(hostDev),
      //         /*src=*/std::get<1>(hostDev),
      //         /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
      //   }
      // } 
      else { // unsupported device
        assert(false && "unsupported device for hyper.for now.");
      }

      // move start and end index for next iteration
      partStart = partEnd;
    }

    // get the `hyper.reduce` op if exists (co-exist with "shared" memOut)
    hyper::ReduceOp reduceOp = nullptr;
    for (auto reduce : op.getOps<hyper::ReduceOp>()) {
      reduceOp = reduce; // 'hyper.reduce' op will occur at most once
      Block &block = reduceOp.getReductionRegions()[0].front();
    }

    // when jump out a cycle, flag add 1
    int flag = 0;
    // reduce the results from each device if `hyper.reduce` op exists
    if (reduceOp) {
      for (size_t i = 0; i < devices.size(); i++) {
        // check validity of `devices` attr
        auto deviceInfo = devices[i];
        auto dictAttr = deviceInfo.dyn_cast_or_null<DictionaryAttr>();
        auto targetIdAttr = dictAttr.get("targetId");
        auto targetConfigAttr = dictAttr.get("targetConfig");
        auto dutyRatioAttr = dictAttr.get("dutyRatio");
        auto targetId = targetIdAttr.dyn_cast_or_null<StringAttr>().getValue();

        // don't generate related functions when dutyratio is 0
        auto dutyRatioFloatAttr = dutyRatioAttr.dyn_cast_or_null<FloatAttr>();
        float dutyRatiofl = dutyRatioFloatAttr.getValueAsDouble();
        if (dutyRatiofl == 0) {
          // jump out this cycle and flag + 1(meet resultsOfEachDev)
          flag += 1;
          continue;
        }

        for (auto each : llvm::zip(resultsOfEachDev[i - flag], sharedMemOuts,
                                   reduceOp.getReductionRegions())) {
          // TODO: support multiple regions for multiple shared memouts
          Block &block = std::get<2>(each).front();
          IRMapping rmap;
          rmap.map(block.getArgument(0), std::get<1>(each)); // lhs
          rmap.map(block.getArgument(1), std::get<0>(each)); // rhs
          for (auto &op__ : block.getOperations()) {
            if (!dyn_cast_or_null<hyper::ReduceReturnOp>(
                    op__)) // skip `hyper.reduce.return` op
              rewriter.insert(op__.clone(rmap));
          }
          // deallocate memory restoring temporary results of each device
          rewriter.create<memref::DeallocOp>(loc, /*memref=*/std::get<0>(each));
        }
      }
    }

    rewriter.eraseOp(op);

    return success();
  }
};

} // end anonymous namespace

void populateSSTDeviceScheduleConversionPatterns(RewritePatternSet &patterns) {
  // clang-format off
  patterns.add<ForOpSSTDeviceSchedulePattern>(patterns.getContext());
  // clang-format on
}

//===----------------------------------------------------------------------===//
// SSTDeviceSchedulePass
//===----------------------------------------------------------------------===//

namespace {
class SSTDeviceSchedulePass
    : public PassWrapper<SSTDeviceSchedulePass, OperationPass<ModuleOp>> {
public:
  MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(SSTDeviceSchedulePass)
  SSTDeviceSchedulePass() = default;
  SSTDeviceSchedulePass(const SSTDeviceSchedulePass &) {}

  StringRef getArgument() const final { return "sst-device-schedule"; }
  StringRef getDescription() const final { return "schedule on sst devices."; }

  void runOnOperation() override;

  void getDependentDialects(DialectRegistry &registry) const override {
    // clang-format off
    registry.insert<
        func::FuncDialect,
        vector::VectorDialect,
        memref::MemRefDialect,
        sst::SSTDialect,
        arith::ArithDialect,
        crypto::CryptoDialect,
        hyper::HyperDialect,
        async::AsyncDialect,
        scf::SCFDialect>();
    // clang-format on
  }
};
} // end anonymous namespace.

void SSTDeviceSchedulePass::runOnOperation() {
  MLIRContext *context = &getContext();

  ConversionTarget target(*context);
  // clang-format off
  target.addLegalDialect<
    arith::ArithDialect,
    func::FuncDialect,
    vector::VectorDialect,
    memref::MemRefDialect,
    LLVM::LLVMDialect,
    sst::SSTDialect,
    crypto::CryptoDialect,
    async::AsyncDialect,
    scf::SCFDialect>();
  // clang-format on
  target.addLegalOp<ModuleOp, hyper::AllocOp, hyper::DeallocOp, hyper::MemcpyOp,
                    func::FuncOp, func::ReturnOp>();

  RewritePatternSet patterns(context);
  populateSSTDeviceScheduleConversionPatterns(patterns);

  auto moduleOp = getOperation();

  if (failed(applyPartialConversion(moduleOp, target, std::move(patterns))))
    signalPassFailure();
}

void registerSSTDeviceSchedulePass() {
  PassRegistration<SSTDeviceSchedulePass>();
}

std::unique_ptr<Pass> createSSTDeviceSchedulePass() {
  return std::make_unique<SSTDeviceSchedulePass>();
}
