#include "mlir/Dialect/Arith/IR/Arith.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Pass/Pass.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/PassRegistry.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/Async/IR/Async.h"

#include "crypto/Crypto.h"
#include "hyper/Hyper.h"

#include <unordered_map>
#include <vector>
#include <algorithm>
#include <iostream>

using namespace mlir;
using namespace hyper;

//===----------------------------------------------------------------------===//
// Rewrite Pattern
//===----------------------------------------------------------------------===//

namespace {

#define BLOCK_SIZE 100 //fixed blocksize
#define GRID_SIZE 1 //fixed gridsize

class ForOpDeviceSchedulePattern : public OpRewritePattern<hyper::ForOp>  {
public:
  using OpRewritePattern<hyper::ForOp>::OpRewritePattern;

  LogicalResult
  matchAndRewrite(hyper::ForOp op, PatternRewriter &rewriter) const override {
    auto loc = op.getLoc();
    auto ctx = rewriter.getContext();
    // operands of `hyper.for` op
    ValueRange memIns = op.getMemIn();
    ValueRange memOuts = op.getMemOut();
    Value upperBound = op.getUpperBound();
    Value lowerBound = op.getLowerBound();

    Value c0 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/0);
    Value c1 = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/1);

    Value gridSize = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/GRID_SIZE);
    Value blockSize = rewriter.create<arith::ConstantIndexOp>(loc, /*value=*/BLOCK_SIZE);
    
    // get information needed for splitting memref
    auto devices = op.getDevices();
    Value N = rewriter.create<arith::SubIOp>(loc, /*lhs=*/upperBound, /*rhs=*/lowerBound);
    Value Nfp = rewriter.create<arith::IndexCastOp>(loc, /*out=*/rewriter.getI32Type(), /*in=*/N);
    Nfp = rewriter.create<arith::SIToFPOp>(loc, /*out=*/rewriter.getF32Type(), /*in=*/Nfp);
    // index needed for splitting memref
    Value partStart = lowerBound;
    Value partEnd = lowerBound;

    // memory container for the "shared" memout
    SmallVector<Value> sharedMemOuts;
    // get the bool array of `isSharedMem` attribute to indicate whether the corresponding memref is shared or not
    SmallVector<bool> isSharedMeminArray, isSharedMemoutArray;
    if (op.getIsSharedMem().has_value()) {    // if `isSharedMem` attribute exists
      auto isSharedMem = op.getIsSharedMem().value();
      // construct the bool array for `memIns`
      for (int i = 0; i < memIns.size(); i++) {
        isSharedMeminArray.push_back(isSharedMem[i].dyn_cast<BoolAttr>().getValue());
      }
      // construct the bool array for `memOuts`
      for (int i = memIns.size(); i < isSharedMem.size(); i++) {
        bool isShared = isSharedMem[i].dyn_cast<BoolAttr>().getValue();
        if (isShared) {
          sharedMemOuts.push_back(memOuts[i - memIns.size()]);
        }
        isSharedMemoutArray.push_back(isShared);
      }
    }

    // memory container for the "reduce" operation
    SmallVector<SmallVector<Value>> resultsOfEachDev;
    // split memref and create new `scf.for` ops or `gpu.launch` ops for each device
    for(auto i = 0; i < devices.size(); i++) {
      // check validity of `devices` attr
      auto deviceInfo = devices[i];
      auto dictAttr = deviceInfo.dyn_cast_or_null<DictionaryAttr>();
      auto targetIdAttr = dictAttr.get("targetId");
      auto targetConfigAttr = dictAttr.get("targetConfig");
      auto dutyRatioAttr = dictAttr.get("dutyRatio");
      auto targetId = targetIdAttr.dyn_cast_or_null<StringAttr>().getValue();

      // don't generate related functions when dutyratio is 0
      auto dutyRatioFloatAttr = dutyRatioAttr.dyn_cast_or_null<FloatAttr>();
      float dutyRatiofl = dutyRatioFloatAttr.getValueAsDouble();
      if (dutyRatiofl == 0) {
        // jump out this cycle
        continue;
      }

      /// if any "shared" memOut exsits, we should allocate each of them for each device on the host
      /// for the "reduce" opreation
      SmallVector<Value> hostSharedMemOuts;
      for (auto outShared : llvm::zip(memOuts, isSharedMemoutArray)) {
        auto memOut = std::get<0>(outShared);
        auto isShared = std::get<1>(outShared);
        if (isShared) {
          Value outMemLen = rewriter.create<memref::DimOp>(loc, /*source=*/memOut, /*index=*/0);
          memref::AllocOp allocOp = rewriter.create<memref::AllocOp>(loc, /*memrefType=*/memOut.getType(), /*dynamicSizes=*/ValueRange{outMemLen});
          allocOp->setAttr("operandSegmentSizes", rewriter.getDenseI32ArrayAttr({1, 0}));
          hostSharedMemOuts.push_back(allocOp.getResult());
          // Value alloc = rewriter.create<memref::AllocOp>(loc, /*memrefType=*/memOut.getType(), /*dynamicSizes=*/ValueRange{outMemLen});
          // hostSharedMemOuts.push_back(alloc);
        }
      }
      resultsOfEachDev.push_back(hostSharedMemOuts);

      // determine partEnd
      if(i == devices.size() - 1) {
        partEnd = upperBound;
      } 
      else {
        Value dutyRatio = rewriter.create<arith::ConstantOp>(loc, /*result=*/rewriter.getF32Type(), /*value=*/dutyRatioAttr.dyn_cast<FloatAttr>());
        Value dutyValue = rewriter.create<arith::MulFOp>(loc, /*lhs=*/Nfp, /*rhs=*/dutyRatio);
        dutyValue = rewriter.create<arith::FPToSIOp>(loc, /*out=*/rewriter.getI32Type(), /*in=*/dutyValue);
        dutyValue = rewriter.create<arith::IndexCastOp>(loc, /*out=*/rewriter.getIndexType(), /*in=*/dutyValue);
        partEnd = rewriter.create<arith::AddIOp>(loc, /*lhs=*/partStart, /*rhs=*/dutyValue);
      }

      // split memref
      Value partNum = rewriter.create<arith::SubIOp>(loc, /*lhs=*/partEnd, /*rhs=*/partStart);
      // multimple memIn allocation
      SmallVector<Value> hostMemIns;
      SmallVector<Value> devMemIns;
      for (auto inShared : llvm::zip(memIns, isSharedMeminArray)) {
        auto memIn = std::get<0>(inShared);
        auto isShared = std::get<1>(inShared);

        // get the length of input memory
        Value inMemLen = rewriter.create<memref::DimOp>(loc, /*source=*/memIn, /*index=*/0);
        
        if (isShared) {   // alloc exact the same memory size as host
          // allocate input memory on device
          Value devMemIn = rewriter.create<hyper::AllocOp>(loc, /*memrefType=*/memIn.getType(), /*dynamicSizes=*/ValueRange{inMemLen}, /*symbolOperands=*/ValueRange{}, /*device=*/rewriter.getStringAttr(targetId));
          devMemIns.push_back(devMemIn);
          hostMemIns.push_back(memIn);
        } else {  // alloc part of memory splited from host
          Value nInput = rewriter.create<arith::DivUIOp>(loc, /*lhs=*/inMemLen, /*rhs=*/N);
          Value partSizeIn = rewriter.create<arith::MulIOp>(loc, /*lhs=*/partNum, /*rhs=*/nInput);
          Value partMemIn = rewriter.create<memref::ViewOp>(loc, /*resultType0=*/memIn.getType(), /*source=*/memIn, /*byte_shift=*/rewriter.create<arith::MulIOp>(loc, /*lhs=*/partStart, /*rhs=*/nInput), /*sizes=*/ValueRange{partSizeIn});
          hostMemIns.push_back(partMemIn);
          // allocate input memory on device
          Value devMemIn = rewriter.create<hyper::AllocOp>(loc, /*memrefType=*/memIn.getType(), /*dynamicSizes=*/ValueRange{partSizeIn}, /*symbolOperands=*/ValueRange{}, /*device=*/rewriter.getStringAttr(targetId));
          devMemIns.push_back(devMemIn);
        }
      }
      // multimple memOut allocation
      SmallVector<Value> hostUnsharedMemOuts;
      SmallVector<Value> devMemOuts;
      SmallVector<Value> devSharedMemOuts;
      SmallVector<Value> devUnsharedMemOuts;
      for (auto outShared : llvm::zip(memOuts, isSharedMemoutArray)) {
        auto memOut = std::get<0>(outShared);
        auto isShared = std::get<1>(outShared);

        Value outMemLen = rewriter.create<memref::DimOp>(loc, /*source=*/memOut, /*index=*/0);

        if (isShared) {   // alloc exact the same memory size as host
          // allocate input memory on device
          Value devMemOut = rewriter.create<hyper::AllocOp>(loc, /*memrefType=*/memOut.getType(), /*dynamicSizes=*/ValueRange{outMemLen}, /*symbolOperands=*/ValueRange{}, /*device=*/rewriter.getStringAttr(targetId));
          devSharedMemOuts.push_back(devMemOut);
          devMemOuts.push_back(devMemOut);
        } else {  // alloc part of memory splited from host   
          Value nOutput = rewriter.create<arith::DivUIOp>(loc, /*lhs=*/outMemLen, /*rhs=*/N);
          Value partSizeOut = rewriter.create<arith::MulIOp>(loc, /*lhs=*/partNum, /*rhs=*/nOutput);
          Value partMemOut = rewriter.create<memref::ViewOp>(loc, /*resultType0=*/memOut.getType(), /*source=*/memOut, /*byte_shift=*/rewriter.create<arith::MulIOp>(loc, /*lhs=*/partStart, /*rhs=*/nOutput), /*sizes=*/ValueRange{partSizeOut});
          hostUnsharedMemOuts.push_back(partMemOut);
          // allocate output memory on device
          Value devMemOut = rewriter.create<hyper::AllocOp>(loc, /*memrefType=*/memOut.getType(), /*dynamicSizes=*/ValueRange{partSizeOut}, /*symbolOperands=*/ValueRange{}, /*device=*/rewriter.getStringAttr(targetId));
          devUnsharedMemOuts.push_back(devMemOut);
          devMemOuts.push_back(devMemOut);
        }
      }
      
      // lower to cpu (scf.for)
      if (targetId.contains("cpu") || targetId.contains("cgra")) {
        // copy input data from cpu to cpu for each input memory
        for (auto hostDev : llvm::zip(hostMemIns, devMemIns)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
        // copy shared memout data from cpu to cpu for initialization
        for (auto hostDev : llvm::zip(sharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }

        // create new `scf.for` op   
        rewriter.create<scf::ForOp>(loc, /*lowerBound=*/c0, /*upperBound=*/partNum, /*step=*/c1, /*iterArgs=*/ValueRange{}, [&](OpBuilder &builder, Location loc, Value ivs, ValueRange iargs) {
          Block &hyperLoopBodyBlock = op.getLoopBody().front();
          // value map from original `hyper.for` op to new `scf.for` op
          IRMapping mp;
          // map the induction variable
          mp.map(hyperLoopBodyBlock.getArgument(0), ivs);
          // map the input and output memory
          if (memIns.size() > 0) {
            for (size_t i = 0; i < memIns.size(); i++)
              mp.map(hyperLoopBodyBlock.getArgument(i + 1), devMemIns[i]);
          }
          if (memOuts.size() > 0) {
            for (size_t i = 0; i < memOuts.size(); i++)
              mp.map(hyperLoopBodyBlock.getArgument(i + 1 + memIns.size()), devMemOuts[i]);
          }
          // clone operations in original `hyper.for` op to new `scf.for` op
          for(auto&& op_ : hyperLoopBodyBlock.getOperations()) {
            if (dyn_cast_or_null<hyper::YieldOp>(op_))      
              builder.create<scf::YieldOp>(loc, op_.getResults());
            else if (auto reduce = dyn_cast_or_null<hyper::ReduceOp>(op_)) {  // atomic reduce
              // TODO: support multiple regions for multiple shared memouts
              for (unsigned i = 0; i < reduce.getReductionRegions().size(); i++) {
                Block &block = reduce.getReductionRegions()[i].front();
                IRMapping rmap;
                rmap.map(block.getArgument(0), devSharedMemOuts[i]);  // lhs
                rmap.map(block.getArgument(1), mp.lookup(reduce->getOperand(i)));    // rhs
                for (auto& op__ : block.getOperations()) {
                  if (!dyn_cast_or_null<hyper::ReduceReturnOp>(op__))   // skip `hyper.reduce.return` op
                    builder.insert(op__.clone(rmap));
                }
              }
            } else {
              auto clone = builder.insert(op_.clone(mp));
              for (auto oldNew : llvm::zip(op_.getResults(), clone->getResults())) {
                mp.map(std::get<0>(oldNew), std::get<1>(oldNew));
              }
            }
          }
        });

        // copy output data from cpu to cpu for each output memory
        for (auto hostDev : llvm::zip(hostUnsharedMemOuts, devUnsharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
        // copy shared memout data from cpu to cpu to get results
        for (auto hostDev : llvm::zip(hostSharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2cpu"));
        }
      } 
      else if (targetId.contains("gpu")) {  // lower to gpu (gpu.launch)
        // copy input data from cpu to gpu
        for (auto hostDev : llvm::zip(hostMemIns, devMemIns)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
        }
        // copy shared memout data from cpu to gpu for initialization
        for (auto hostDev : llvm::zip(sharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<1>(hostDev), /*src=*/std::get<0>(hostDev), /*copyDirection=*/rewriter.getStringAttr("cpu2gpu"));
        }

        // create new `gpu.launch` op
        // TODO: gridSizeX and blockSizeX should be set properly
        // TODO: the calculation logic inside `gpu.launch` op should be set properly
        gpu::LaunchOp launchOp = rewriter.create<gpu::LaunchOp>(
          loc, 
          /*gridSizeX=*/gridSize, /*gridSizeY=*/c1, /*gridSizeZ=*/c1,
          /*blockSizeX=*/blockSize, /*blockSizeY=*/c1, /*blockSizeZ=*/c1,
          /*dynamicSharedMemorySize*/nullptr, /*asyncTokenType=*/nullptr, /*asyncDependencies=*/ValueRange{});
        
        // clone operations in original `hyper.for` op to new `gpu.launch` op
        rewriter.setInsertionPointToStart(&launchOp.getBody().front());
        Block &hyperLoopBodyBlock = op.getLoopBody().front();

        // calculate the unique id for each thread
        Value tIdX = rewriter.create<gpu::ThreadIdOp>(loc, /*dimension=*/::mlir::gpu::Dimension::x);
        Value blockIdxX = rewriter.create<gpu::BlockIdOp>(loc, /*dimension=*/::mlir::gpu::Dimension::x);
        Value tmpMul = rewriter.create<arith::MulIOp>(loc, /*lhs=*/blockIdxX, /*rhs=*/launchOp.getBlockSize().x);
        Value id = rewriter.create<arith::AddIOp>(loc, /*lhs=*/tmpMul, /*rhs=*/tIdX);
        // calculate stride
        Value totalStride = rewriter.create<arith::MulIOp>(loc, /*lhs=*/launchOp.getGridSize().x, /*rhs=*/launchOp.getBlockSize().x);

        // messages to be calculated by current thread
        rewriter.create<scf::ForOp>(
          loc, /*lowerBound=*/id, /*upperBound=*/partNum, /*step=*/totalStride, /*iterArgs=*/std::nullopt,
          /*odsArg4=*/[&](OpBuilder &builder, Location loc, Value ivs, ValueRange iargs){
            Value curMsgIdx = ivs;
            // value map from original `hyper.for` op to new `gpu.launch` op
            IRMapping mp;
            mp.map(hyperLoopBodyBlock.getArgument(0), curMsgIdx);
            if (memIns.size() > 0) {
              for (size_t i = 0; i < memIns.size(); i++)
                mp.map(hyperLoopBodyBlock.getArgument(i + 1), devMemIns[i]);
            }
            if (memOuts.size() > 0) {
              for (size_t i = 0; i < memOuts.size(); i++)
                mp.map(hyperLoopBodyBlock.getArgument(i + 1 + memIns.size()), devMemOuts[i]);
            }
            // clone operations in original `hyper.for` op to new `gpu.launch` op
            for(auto&& op_ : hyperLoopBodyBlock.getOperations()) {
              if (dyn_cast<hyper::YieldOp>(op_))      
                builder.create<scf::YieldOp>(loc);
              else if (auto reduce = dyn_cast_or_null<hyper::ReduceOp>(op_)) {  // atomic reduce
                // TODO: support multiple regions for multiple shared memouts
                for (unsigned i = 0; i < reduce.getReductionRegions().size(); i++) {
                  Block &block = reduce.getReductionRegions()[i].front();
                  IRMapping rmap;
                  rmap.map(block.getArgument(0), devSharedMemOuts[i]);  // lhs
                  rmap.map(block.getArgument(1), mp.lookup(reduce->getOperand(i)));    // rhs
                  for (auto& op__ : block.getOperations()) {
                    if (!dyn_cast_or_null<hyper::ReduceReturnOp>(op__))   // skip `hyper.reduce.return` op
                      builder.insert(op__.clone(rmap));
                  }
                }
              } else {
                auto clone = builder.insert(op_.clone(mp));
                for (auto oldNew : llvm::zip(op_.getResults(), clone->getResults())) {
                  mp.map(std::get<0>(oldNew), std::get<1>(oldNew));
                }
              }
            }
        });

        rewriter.create<gpu::TerminatorOp>(loc);

        // set insertion point to the end of `gpu.launch` op
        rewriter.setInsertionPointAfter(launchOp);
        // copy output data from gpu to cpu
        for (auto hostDev : llvm::zip(hostUnsharedMemOuts, devUnsharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev), /*copyDirection=*/rewriter.getStringAttr("gpu2cpu"));
        }
        // copy shared memout data from gpu to cpu to get results
        for (auto hostDev : llvm::zip(hostSharedMemOuts, devSharedMemOuts)) {
          rewriter.create<hyper::MemcpyOp>(loc, /*dst=*/std::get<0>(hostDev), /*src=*/std::get<1>(hostDev), /*copyDirection=*/rewriter.getStringAttr("gpu2cpu"));
        }
      } else {  // unsupported device
        assert(false && "unsupported device for hyper.for now.");
      }

      // deallocate memory on device
      for (auto devMemIn : devMemIns)
        rewriter.create<hyper::DeallocOp>(loc, /*memref=*/devMemIn, /*device=*/rewriter.getStringAttr(targetId));
      for (auto devMemOut : devMemOuts)
        rewriter.create<hyper::DeallocOp>(loc, /*memref=*/devMemOut, /*device=*/rewriter.getStringAttr(targetId));
      
      // move start and end index for next iteration
      partStart = partEnd;
    }

    // get the `hyper.reduce` op if exists (co-exist with "shared" memOut)
    hyper::ReduceOp reduceOp = nullptr;
    for (auto reduce : op.getOps<hyper::ReduceOp>()) {
      reduceOp = reduce;    // 'hyper.reduce' op will occur at most once
      Block &block = reduceOp.getReductionRegions()[0].front();
    }

    // when jump out a cycle, flag add 1
    int flag = 0;
    // reduce the results from each device if `hyper.reduce` op exists
    if (reduceOp) {
      for (int i = 0; i < devices.size(); i++) {
        // check validity of `devices` attr
        auto deviceInfo = devices[i];
        auto dictAttr = deviceInfo.dyn_cast_or_null<DictionaryAttr>();
        auto targetIdAttr = dictAttr.get("targetId");
        auto targetConfigAttr = dictAttr.get("targetConfig");
        auto dutyRatioAttr = dictAttr.get("dutyRatio");
        auto targetId = targetIdAttr.dyn_cast_or_null<StringAttr>().getValue();

        // don't generate related functions when dutyratio is 0
        auto dutyRatioFloatAttr = dutyRatioAttr.dyn_cast_or_null<FloatAttr>();
        float dutyRatiofl = dutyRatioFloatAttr.getValueAsDouble();
        if (dutyRatiofl == 0) {
          // jump out this cycle and flag + 1(meet resultsOfEachDev)
          flag += 1;
          continue;
        }

        for (auto each : llvm::zip(resultsOfEachDev[i-flag], sharedMemOuts, reduceOp.getReductionRegions())){
          // TODO: support multiple regions for multiple shared memouts
          Block &block = std::get<2>(each).front();
          IRMapping rmap;
          rmap.map(block.getArgument(0), std::get<1>(each));  // lhs
          rmap.map(block.getArgument(1), std::get<0>(each));  // rhs
          for (auto& op__ : block.getOperations()) {
            if (!dyn_cast_or_null<hyper::ReduceReturnOp>(op__))   // skip `hyper.reduce.return` op
              rewriter.insert(op__.clone(rmap));
          }
          // deallocate memory restoring temporary results of each device
          rewriter.create<memref::DeallocOp>(loc, /*memref=*/std::get<0>(each));
        }
      }
    }

    rewriter.eraseOp(op);

    return success();
  }
};

} // end anonymous namespace

void populateDeviceScheduleConversionPatterns(RewritePatternSet &patterns) {
  // clang-format off
  patterns.add<ForOpDeviceSchedulePattern>(patterns.getContext());
  // clang-format on
}

//===----------------------------------------------------------------------===//
// DeviceSchedulePass
//===----------------------------------------------------------------------===//

namespace {
class DeviceSchedulePass : public PassWrapper<DeviceSchedulePass, OperationPass<ModuleOp>> {
public:
  MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(DeviceSchedulePass)
  DeviceSchedulePass() = default;
  DeviceSchedulePass(const DeviceSchedulePass &) {}

  StringRef getArgument() const final { return "device-schedule"; }
  StringRef getDescription() const final { return "schedule on devices."; }

  void runOnOperation() override;

  void getDependentDialects(DialectRegistry &registry) const override {
    // clang-format off
    registry.insert<
        func::FuncDialect,
        vector::VectorDialect,
        memref::MemRefDialect,
        gpu::GPUDialect,
        arith::ArithDialect,
        crypto::CryptoDialect,
        hyper::HyperDialect,
        async::AsyncDialect,
        scf::SCFDialect>();
    // clang-format on
  }
};
} // end anonymous namespace.

void DeviceSchedulePass::runOnOperation() {
  MLIRContext *context = &getContext();

  ConversionTarget target(*context);
  // clang-format off
  target.addLegalDialect<
    arith::ArithDialect,
    func::FuncDialect,
    vector::VectorDialect,
    memref::MemRefDialect,
    LLVM::LLVMDialect,
    gpu::GPUDialect,
    crypto::CryptoDialect,
    async::AsyncDialect,
    scf::SCFDialect>();
  // clang-format on
  target.addLegalOp<
    ModuleOp,
    hyper::AllocOp,
    hyper::DeallocOp,
    hyper::MemcpyOp,
    func::FuncOp,
    func::ReturnOp>();

  RewritePatternSet patterns(context);
  populateDeviceScheduleConversionPatterns(patterns);

  auto moduleOp = getOperation();

  if (failed(applyPartialConversion(moduleOp, target, std::move(patterns))))
    signalPassFailure();
}

void registerDeviceSchedulePass() { PassRegistration<DeviceSchedulePass>(); }

std::unique_ptr<Pass> createDeviceSchedulePass(){ return std::make_unique<DeviceSchedulePass>(); }
