#include <cuda_runtime.h>
#include <cute/tensor.hpp>


template <typename Spec, bool IsGemm, bool IsCvtPrecision>
__global__ __launch_bounds__(Spec::kThreadNum) void
pipelining(void* __restrict__ Cptr,
          const void* __restrict__ Aptr,
          const void* __restrict__ Bptr,
          int M, int N, int K,
          void* __restrict__ Outptr) {
  using namespace cute;

  using X = Underscore;
  using MMA_shape = typename Spec::MMA_shape;
  using OutType = typename Spec::OutType;
  using ComputeTypeA = typename Spec::ComputeTypeA;
  using ComputeTypeB = typename Spec::ComputeTypeB;
  using ComputeTypeC = typename Spec::ComputeTypeC;
  using SmemLayoutA = typename Spec::SmemLayoutA;
  using SmemLayoutB = typename Spec::SmemLayoutB;
  using SmemLayoutC = typename Spec::SmemLayoutC;
  using SmemLayoutO = typename Spec::SmemLayoutO;

  constexpr int kTileM = Spec::kTileM;
  constexpr int kTileN = Spec::kTileN;
  constexpr int kTileK = Spec::kTileK;
  constexpr int kShmSizeA = Spec::kShmSizeA;
  constexpr int kShmSizeB = Spec::kShmSizeB;
  constexpr int G2S_Stages = Spec::G2S_Stages;

  extern __shared__ __align__(1024) uint8_t smem[];

  uint8_t *Aptr_smem = smem;
  uint8_t *Bptr_smem = smem + kShmSizeA;
  uint8_t *Cptr_smem;
  if constexpr (!IsGemm) Cptr_smem = smem + kShmSizeA + kShmSizeB;
  else Cptr_smem = smem;
  uint8_t *Optr_smem = smem;

  int tid = threadIdx.x;
  int bidx = blockIdx.x;
  int bidy = blockIdx.y;

  Tensor mA = make_tensor(make_gmem_ptr((ComputeTypeA *)Aptr),
                          make_shape(M, K),
                          make_stride(K, Int<1>{}));  // (M, K)
  Tensor mB = make_tensor(make_gmem_ptr((ComputeTypeB *)Bptr),
                          make_shape(N, K),
                          make_stride(K, Int<1>{}));  // (N, K)
  Tensor mC = make_tensor(make_gmem_ptr((ComputeTypeC *)Cptr),
                          make_shape(M, N),
                          make_stride(N, Int<1>{}));  // (M, N)
  Tensor mO = make_tensor(make_gmem_ptr((OutType *)Outptr),
                          make_shape(M, N),
                          make_stride(N, Int<1>{}));  // (M, N)

  auto tiler = make_tile(Int<kTileM>{}, Int<kTileN>{}, Int<kTileK>{});
  auto coord = make_coord(bidy, bidx, _);

  Tensor gA = local_tile(mA, tiler, coord, Step<_1,  X, _1>{});  // (BLK_M, BLK_K, K_TILES)
  Tensor gB = local_tile(mB, tiler, coord, Step< X, _1, _1>{});  // (BLK_N, BLK_K, K_TILES)
  Tensor gC = local_tile(mC, tiler, coord, Step<_1, _1,  X>{});  // (BLK_M, BLK_N)
  Tensor gO = local_tile(mO, tiler, coord, Step<_1, _1,  X>{});  // (BLK_M, BLK_N)

  auto m_max_coord = M - size<0>(gA) * bidy;          // M - BLK_M * m_coord
  auto n_max_coord = N - size<0>(gB) * bidx;          // N - BLK_N * n_coord
  auto k_residue   = K - size<1>(gA) * size<2>(gA);   // K - BLK_K * k_coord_max
  
  if(thread0()) {
    printf("gA: ");print(gA);printf("\n");
  }
  gA = domain_offset(make_coord(0, k_residue, 0), gA);
  if(thread0()) {
    printf("gA: ");print(gA);printf("\n");
  }
  gB = domain_offset(make_coord(0, k_residue, 0), gB);

  Tensor sA = make_tensor(make_smem_ptr((ComputeTypeA *)Aptr_smem), SmemLayoutA{});  // (BLK_M, BLK_K, G2S_PIPE)
  Tensor sB = make_tensor(make_smem_ptr((ComputeTypeB *)Bptr_smem), SmemLayoutB{});  // (BLK_N, BLK_K, G2S_PIPE)
  Tensor sC = make_tensor(make_smem_ptr((ComputeTypeC *)Cptr_smem), SmemLayoutC{});  // (BLK_M, BLK_N)
  Tensor sO = make_tensor(make_smem_ptr((OutType      *)Optr_smem), SmemLayoutO{});  // (BLK_M, BLK_N)

  typename Spec::TiledMMA tiled_mma;
  ThrMMA thr_mma = tiled_mma.get_slice(tid);

  Tensor tCrA = thr_mma.partition_fragment_A(gA(_, _, 0));  // (MMA, MMA_M, MMA_K)
  Tensor tCrB = thr_mma.partition_fragment_B(gB(_, _, 0));  // (MMA, MMA_N, MMA_K)
  Tensor tCrC = thr_mma.partition_fragment_C(gC);           // (MMA, MMA_M, MMA_N)

  // 看一下，这里copy_a一次 copy->mma_atom 的数据量是多大
  // 我理解一次拷贝的数据量是TiledMMA需要的大小
  typename Spec::TiledCopyA_G2S g2s_tiled_copy_a;
  ThrCopy g2s_thr_copy_a = g2s_tiled_copy_a.get_slice(tid);
  Tensor tAgA_g2s = g2s_thr_copy_a.partition_S(gA);   // (ACPY, ACPY_M, ACPY_K, K_TILES)
  Tensor tAsA_g2s = g2s_thr_copy_a.partition_D(sA);   // (ACPY, ACPY_M, ACPY_K, G2S_PIPE)

  typename Spec::TiledCopyB_G2S g2s_tiled_copy_b;
  ThrCopy g2s_thr_copy_b = g2s_tiled_copy_b.get_slice(tid);
  Tensor tBgB_g2s = g2s_thr_copy_b.partition_S(gB);   // (BCPY, BCPY_N, BCPY_K, K_TILES)
  Tensor tBsB_g2s = g2s_thr_copy_b.partition_D(sB);   // (BCPY, BCPY_N, BCPY_K, G2S_PIPE)

  typename Spec::TiledCopyC_G2S g2s_tiled_copy_c;
  ThrCopy g2s_thr_copy_c = g2s_tiled_copy_c.get_slice(tid);
  Tensor tCgC_g2s = g2s_thr_copy_c.partition_S(gC);   // (CCPY, CCPY_M, CCPY_N)
  Tensor tCsC_g2s = g2s_thr_copy_c.partition_D(sC);   // (CCPY, CCPY_M, CCPY_N)

  //
  // PREDICATES
  //

  Tensor tApA_g2s = make_tensor<bool>(make_shape(size<1>(tAsA_g2s), size<2>(tAsA_g2s)), Stride<_1, _0>{});  // (ACPY_M, ACPY_K)
  Tensor tBpB_g2s = make_tensor<bool>(make_shape(size<1>(tBsB_g2s), size<2>(tBsB_g2s)), Stride<_1, _0>{});  // (BCPY_N, BCPY_K)
  Tensor tCpC_g2s = make_tensor<bool>(make_shape(size<1>(tCsC_g2s), size<2>(tCsC_g2s)), Stride<_1, Int<size<1>(tCsC_g2s)>>{});  // (CCPY_M, CCPY_N)

  Tensor cA = make_identity_tensor(make_shape(size<0>(sA), size<1>(sA)));    // (BLK_M,BLK_K) -> (blk_m,blk_k)
  Tensor cB = make_identity_tensor(make_shape(size<0>(sB), size<1>(sB)));    // (BLK_N,BLK_K) -> (blk_n,blk_k)
  Tensor cC = make_identity_tensor(make_shape(size<0>(sC), size<1>(sC)));    // (BLK_M,BLK_N) -> (blk_m,blk_n)

  Tensor tAcA_g2s = g2s_thr_copy_a.partition_S(cA);    // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k)
  Tensor tBcB_g2s = g2s_thr_copy_b.partition_S(cB);    // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k)
  Tensor tCcC_g2s = g2s_thr_copy_c.partition_S(cC);    // (CCPY,CCPY_M,CCPY_N) -> (blk_m,blk_n)

  #pragma unroll
  for (int m = 0; m < size<0>(tApA_g2s); ++m) {
    tApA_g2s(m,0) = get<0>(tAcA_g2s(0,m,0)) < m_max_coord;
  }
  #pragma unroll
  for (int n = 0; n < size<0>(tBpB_g2s); ++n) {
    tBpB_g2s(n,0) = get<0>(tBcB_g2s(0,n,0)) < n_max_coord;
  }
  #pragma unroll
  for (int m = 0; m < size<0>(tCpC_g2s); ++m) {
    #pragma unroll
    for (int n = 0; n < size<1>(tCpC_g2s); ++n) {
      tCpC_g2s(m,n) = elem_less(tCcC_g2s(0,m,n), make_coord(m_max_coord,n_max_coord));
    }
  }

  //
  // END PREDICATES
  //

  typename Spec::TiledCopyA_S2R s2r_tiled_copy_a;
  ThrCopy s2r_thr_copy_a = s2r_tiled_copy_a.get_slice(tid);
  Tensor tAsA_s2r = s2r_thr_copy_a.partition_S(sA);     // (CPY, CPY_M, CPY_K, PIPE)
  /// retile_D这里并没有进行 重新空间申请，而是直接使用了之前申请的地址
  /// 后续数据拷贝，就直接拷贝到这里
  Tensor tArA_s2r = s2r_thr_copy_a.retile_D(tCrA);      // (CPY, CPY_M, CPY_K)

  typename Spec::TiledCopyB_S2R s2r_tiled_copy_b;
  ThrCopy s2r_thr_copy_b = s2r_tiled_copy_b.get_slice(tid);
  Tensor tBsB_s2r = s2r_thr_copy_b.partition_S(sB);     // (CPY, CPY_M, CPY_K, PIPE)
  Tensor tBrB_s2r = s2r_thr_copy_b.retile_D(tCrB);      // (CPY, CPY_M, CPY_K) 

  typename Spec::TiledCopyC_S2R s2r_tiled_copy_c;
  ThrCopy s2r_thr_copy_c = s2r_tiled_copy_c.get_slice(tid);
  Tensor tCsC_s2r = s2r_thr_copy_c.partition_S(sC);     // (CPY, CPY_M, CPY_K)
  Tensor tCrC_s2r = s2r_thr_copy_c.retile_D(tCrC);      // (CPY, CPY_M, CPY_K)

  //
  // Prefetch
  //

  /// 先把C拷贝过来
  if constexpr (!IsGemm) {
    clear(tCsC_g2s);
    copy_if(g2s_tiled_copy_c, tCpC_g2s, tCgC_g2s, tCsC_g2s);
  }

  int NTilesK = ceil_div(K, kTileK);

  clear(tAsA_g2s);
  clear(tBsB_g2s);

  #pragma unroll
  for (int k = 0; k < size<2>(tAsA_g2s); ++k) {
    /// 前面添加了pad，这里就是pad的部分不进行拷贝
    if (get<1>(tAcA_g2s(0,0,k)) >= -k_residue) {      // blk_k coord < residue_k (gA shifted)
      copy_if(g2s_tiled_copy_a, tApA_g2s(_, k), tAgA_g2s(_, _, k, 0), tAsA_g2s(_, _, k, 0));
    }
  }

  #pragma unroll
  for (int k = 0; k < size<2>(tBsB_g2s); ++k) {
    if (get<1>(tBcB_g2s(0,0,k)) >= -k_residue) {      // blk_k coord < residue_k (gB shifted)
      copy_if(g2s_tiled_copy_b, tBpB_g2s(_, k), tBgB_g2s(_, _, k, 0), tBsB_g2s(_, _, k, 0));
    }
  }

  cp_async_fence();

  #pragma unroll
  for (int ik = 1; ik < G2S_Stages - 1; ++ik) {
    // Set all predicates to false if we are going to overshoot bounds
    if (ik == NTilesK) {
      clear(tApA_g2s);
      clear(tBpB_g2s);
    }

    copy_if(g2s_tiled_copy_a, tApA_g2s, tAgA_g2s(_, _, _, ik), tAsA_g2s(_, _, _, ik));
    copy_if(g2s_tiled_copy_b, tBpB_g2s, tBgB_g2s(_, _, _, ik), tBsB_g2s(_, _, _, ik));

    cp_async_fence();
  }

  cp_async_wait<G2S_Stages - 2>(); // 等待最后2个异步拷贝到shared memory完成
  __syncthreads();

  //
  // MAINLOOP
  //

  int g2s_gmem_pipe = G2S_Stages - 1;
  int g2s_smem_pipe = G2S_Stages - 1;
  int s2r_smem_pipe = 0;

  for (int ik = 0; ik < NTilesK; ++ik) {

    // 每次从shm中，拷贝kTileM, kTileK个数据到reg
    // 这个从shm到reg的拷贝是同步的
    copy(s2r_tiled_copy_a, tAsA_s2r(_, _, _, s2r_smem_pipe), tArA_s2r);
    // 每次从shm中，拷贝kTileN, kTileK个数据到reg
    copy(s2r_tiled_copy_b, tBsB_s2r(_, _, _, s2r_smem_pipe), tBrB_s2r);

    {
      // Set all predicates to false if we are going to overshoot bounds
      if (g2s_gmem_pipe == NTilesK) {
        clear(tApA_g2s);
        clear(tBpB_g2s);
      }

      /// multistage pipeline, 上面将数据从shm拷贝到reg之后，这里再启动从gmem拷贝到shm
      copy_if(g2s_tiled_copy_a, tApA_g2s, tAgA_g2s(_, _, _, g2s_gmem_pipe), tAsA_g2s(_, _, _, g2s_smem_pipe));
      copy_if(g2s_tiled_copy_b, tBpB_g2s, tBgB_g2s(_, _, _, g2s_gmem_pipe), tBsB_g2s(_, _, _, g2s_smem_pipe));

      cp_async_fence();
      ++g2s_gmem_pipe;
      ++g2s_smem_pipe;
      g2s_smem_pipe = (g2s_smem_pipe == G2S_Stages) ? 0 : g2s_smem_pipe;
    }

    if (ik == 0) {
      if constexpr (IsGemm) {
        clear(tCrC);  // Set the accumulators to zero
      } else {
        copy(s2r_tiled_copy_c, tCsC_s2r, tCrC_s2r);
      }
    }

    gemm(tiled_mma, tCrC, tCrA, tCrB, tCrC);

    cp_async_wait<G2S_Stages - 2>();
    __syncthreads();
    ++s2r_smem_pipe;
    s2r_smem_pipe = (s2r_smem_pipe == G2S_Stages) ? 0 : s2r_smem_pipe;

  }

  cp_async_wait<0>();
  __syncthreads();
  /// 计算完成之后，先写入到shm,后写入到 gmem
  if constexpr (!IsCvtPrecision) {
    typename Spec::TiledCopyC_R2S r2s_tiled_copy_c;
    ThrCopy r2s_thr_copy_c = r2s_tiled_copy_c.get_slice(tid);
    Tensor tCrC_r2s = r2s_thr_copy_c.retile_S(tCrC);    // (CPY, CPY_M, CPY_N)
    Tensor tCsC_r2s = r2s_thr_copy_c.partition_D(sC);   // (CPY, CPY_M, CPY_N)
    copy(r2s_tiled_copy_c, tCrC_r2s, tCsC_r2s);

    __syncthreads();

    typename Spec::TiledCopyC_S2G s2g_tiled_copy_c;
    ThrCopy s2g_thr_copy_c = s2g_tiled_copy_c.get_slice(tid);
    Tensor tCsC_s2g = s2g_thr_copy_c.partition_S(sC);   // (CPY, CPY_M, CPY_N)
    Tensor tCgC_s2g = s2g_thr_copy_c.partition_D(gC);   // (CPY, CPY_M, CPY_N)

    //
    // PREDICATES
    //

    Tensor tCpC_s2g = make_tensor<bool>(make_shape(size<1>(tCgC_s2g), size<2>(tCgC_s2g)), Stride<_1, Int<size<1>(tCgC_s2g)>>{});  // (CCPY_M, CCPY_N)
    Tensor tCcC_s2g = s2g_thr_copy_c.partition_S(cC);    // (CCPY,CCPY_M,CCPY_N) -> (blk_m,blk_n)

    #pragma unroll
    for (int m = 0; m < size<0>(tCpC_s2g); ++m) {
      #pragma unroll
      for (int n = 0; n < size<1>(tCpC_s2g); ++n) {
        tCpC_s2g(m,n) = elem_less(tCcC_s2g(0,m,n), make_coord(m_max_coord,n_max_coord));
      }
    }

    //
    // END PREDICATES
    //

    copy_if(s2g_tiled_copy_c, tCpC_s2g, tCsC_s2g, tCgC_s2g);

  } else {

    auto t = make_tensor_like<OutType>(tCrC);
    copy(tCrC, t);  // Convert precision

    typename Spec::TiledCopyO_R2S r2s_tiled_copy_o;
    ThrCopy r2s_thr_copy_o = r2s_tiled_copy_o.get_slice(tid);
    Tensor tOrC_r2s = r2s_thr_copy_o.retile_S(t);       // (CPY, CPY_M, CPY_N)
    Tensor tOsO_r2s = r2s_thr_copy_o.partition_D(sO);   // (CPY, CPY_M, CPY_N)
    copy(r2s_tiled_copy_o, tOrC_r2s, tOsO_r2s);

    __syncthreads();

    typename Spec::TiledCopyO_S2G s2g_tiled_copy_o;
    ThrCopy s2g_thr_copy_o = s2g_tiled_copy_o.get_slice(tid);
    Tensor tOsO_s2g = s2g_thr_copy_o.partition_S(sO);   // (CPY, CPY_M, CPY_N)
    Tensor tOgO_s2g = s2g_thr_copy_o.partition_D(gO);   // (CPY, CPY_M, CPY_N)

    //
    // PREDICATES
    //

    Tensor tOpO_s2g = make_tensor<bool>(make_shape(size<1>(tOgO_s2g), size<2>(tOgO_s2g)), Stride<_1, Int<size<1>(tOgO_s2g)>>{});  // (OCPY_M, OCPY_N)
    Tensor cO = make_identity_tensor(make_shape(size<0>(sO), size<1>(sO)));    // (BLK_N,BLK_K) -> (blk_n,blk_k) 
    Tensor tOcO_s2g = s2g_thr_copy_o.partition_S(cO);    // (OCPY,OCPY_M,OCPY_N) -> (blk_m,blk_n)

    #pragma unroll
    for (int m = 0; m < size<0>(tOpO_s2g); ++m) {
      #pragma unroll
      for (int n = 0; n < size<1>(tOpO_s2g); ++n) {
        tOpO_s2g(m,n) = elem_less(tOcO_s2g(0,m,n), make_coord(m_max_coord,n_max_coord));
      }
    }

    //
    // END PREDICATES
    //

    copy_if(s2g_tiled_copy_o, tOpO_s2g, tOsO_s2g, tOgO_s2g);
  }
}

namespace spec {

using namespace cute;

template <typename OutType_, typename ComputeTypeA_, typename ComputeTypeB_, typename ComputeTypeC_,
          int kTileM_, int kTileN_, int kTileK_, int G2S_Stages_ = 3>
struct KernelSpec {
  using OutType = OutType_;
  using ComputeTypeA = ComputeTypeA_;
  using ComputeTypeB = ComputeTypeB_;
  using ComputeTypeC = ComputeTypeC_;

  static constexpr int kTileM = kTileM_;
  static constexpr int kTileN = kTileN_;
  static constexpr int kTileK = kTileK_;

  static constexpr int G2S_Stages = G2S_Stages_;
  static_assert(G2S_Stages >= 2, "G2S_Stages should not be less than 2.");

  using MMA_op = std::conditional_t<
    std::is_same_v<ComputeTypeA, cute::bfloat16_t> &&
    std::is_same_v<ComputeTypeB, cute::bfloat16_t> &&
    std::is_same_v<ComputeTypeC, float>,
    SM80_16x8x16_F32BF16BF16F32_TN,
    std::conditional_t<
      std::is_same_v<ComputeTypeA, cute::half_t> &&
      std::is_same_v<ComputeTypeB, cute::half_t> &&
      std::is_same_v<ComputeTypeC, cute::half_t>,
      SM80_16x8x16_F16F16F16F16_TN,
      std::conditional_t<
        std::is_same_v<ComputeTypeA, cute::half_t> &&
        std::is_same_v<ComputeTypeB, cute::half_t> &&
        std::is_same_v<ComputeTypeC, float>,
        SM80_16x8x16_F32F16F16F32_TN,
        void
      >
    >
  >;

  static_assert(!std::is_same_v<MMA_op, void>, "Unsupported MMA op!");

  using MMA_traits = MMA_Traits<MMA_op>;
  using MMA_atom = MMA_Atom<MMA_traits>;
  using MMA_shape = typename MMA_traits::Shape_MNK;

  static constexpr int kMmaThrExpandM = 2;
  static constexpr int kMmaThrExpandN = 4;
  static constexpr int kMmaThrExpandK = 1;

  static constexpr int kMmaValExpandM = 1;
  static constexpr int kMmaValExpandN = 2;
  static constexpr int kMmaValExpandK = 2;

  static constexpr int kMmaTileM = kMmaThrExpandM * kMmaValExpandM * get<0>(MMA_shape{});
  static constexpr int kMmaTileN = kMmaThrExpandN * kMmaValExpandN * get<1>(MMA_shape{});
  static constexpr int kMmaTileK = kMmaThrExpandK * kMmaValExpandK * get<2>(MMA_shape{});

  using MMAThrLayout = decltype(make_layout(make_shape(Int<kMmaThrExpandM>{},  // 2
                                                       Int<kMmaThrExpandN>{},  // 4
                                                       Int<kMmaThrExpandK>{}))); // 1
  using MMATileLayout = Tile<Int<kMmaTileM>,
                             Int<kMmaTileN>,
                             Int<kMmaTileK>>;

  using TiledMMA = decltype(make_tiled_mma(MMA_op{}, MMAThrLayout{}, MMATileLayout{}));

  using Copy_G2S_op = SM80_CP_ASYNC_CACHEGLOBAL<cute::uint128_t>;

  using Copy_S2R_op_A = std::conditional_t<sizeof(ComputeTypeA) == 2, SM75_U32x4_LDSM_N, AutoVectorizingCopy>;
  using Copy_S2R_op_B = std::conditional_t<sizeof(ComputeTypeB) == 2, SM75_U32x4_LDSM_N, AutoVectorizingCopy>;
  using Copy_S2R_op_C = std::conditional_t<sizeof(ComputeTypeC) == 2, SM75_U32x4_LDSM_N, AutoVectorizingCopy>;

  using CopyA_G2S_atom = Copy_Atom<Copy_G2S_op, ComputeTypeA>;
  using CopyB_G2S_atom = Copy_Atom<Copy_G2S_op, ComputeTypeB>;
  using CopyC_G2S_atom = Copy_Atom<Copy_G2S_op, ComputeTypeC>;

  using CopyA_S2R_atom = Copy_Atom<Copy_S2R_op_A, ComputeTypeA>;
  using CopyB_S2R_atom = Copy_Atom<Copy_S2R_op_B, ComputeTypeB>;
  using CopyC_S2R_atom = Copy_Atom<Copy_S2R_op_C, ComputeTypeC>;  

#if (defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 900))
  using Copy_R2S_op = SM90_U32x4_STSM_N;
#else
  using Copy_R2S_op = AutoVectorizingCopy;
#endif

  using Copy_S2G_op = UniversalCopy<cute::uint128_t>;

  using CopyC_R2S_atom = Copy_Atom<Copy_R2S_op, ComputeTypeC>;
  using CopyO_R2S_atom = Copy_Atom<Copy_R2S_op, OutType>;

  using CopyC_S2G_atom = Copy_Atom<Copy_S2G_op, ComputeTypeC>;
  using CopyO_S2G_atom = Copy_Atom<Copy_S2G_op, OutType>;

  static constexpr int kThreadNum = size(TiledMMA{});
  static constexpr int kThreadsPerWarp = 32;
  static constexpr int kTileM_Copy = cute::min(kThreadsPerWarp, kTileM);
  static constexpr int kTileN_Copy = cute::min(kThreadsPerWarp, kTileN);

  // Here we omit cases that `kAlignedCopyItems < 1`
  static constexpr int kAlignedCopyItemsA = cute::min(128 / 8 / sizeof(ComputeTypeA), kTileK * kTileM_Copy / kThreadNum);
  static constexpr int kAlignedCopyItemsB = cute::min(128 / 8 / sizeof(ComputeTypeB), kTileK * kTileN_Copy / kThreadNum);
  static constexpr int kAlignedCopyItemsC = cute::min(128 / 8 / sizeof(ComputeTypeC), kTileN * kTileM_Copy / kThreadNum);
  static constexpr int kAlignedCopyItemsO = cute::min(128 / 8 / sizeof(OutType), kTileN * kTileM_Copy / kThreadNum);

  using TiledCopyA_G2S = decltype(make_tiled_copy(CopyA_G2S_atom{},
                                      make_layout(make_shape(Int<kTileM_Copy>{}, Int<kThreadNum / kTileM_Copy>{}),
                                                  make_stride(Int<kThreadNum / kTileM_Copy>{}, Int<1>{})),
                                      make_layout(make_shape(Int<1>{}, Int<kAlignedCopyItemsA>{}))));
  using TiledCopyB_G2S = decltype(make_tiled_copy(CopyB_G2S_atom{},
                                      make_layout(make_shape(Int<kTileN_Copy>{}, Int<kThreadNum / kTileN_Copy>{}),
                                                  make_stride(Int<kThreadNum / kTileN_Copy>{}, Int<1>{})),
                                      make_layout(make_shape(Int<1>{}, Int<kAlignedCopyItemsB>{}))));
  using TiledCopyC_G2S = decltype(make_tiled_copy(CopyC_G2S_atom{},
                                      make_layout(make_shape(Int<kTileM_Copy>{}, Int<kThreadNum / kTileM_Copy>{}),
                                                  make_stride(Int<kThreadNum / kTileM_Copy>{}, Int<1>{})),
                                      make_layout(make_shape(Int<1>{}, Int<kAlignedCopyItemsC>{}))));

  using TiledCopyA_S2R = decltype(make_tiled_copy_A(CopyA_S2R_atom{}, TiledMMA{}));
  using TiledCopyB_S2R = decltype(make_tiled_copy_B(CopyB_S2R_atom{}, TiledMMA{}));
  using TiledCopyC_S2R = decltype(make_tiled_copy_C(CopyC_S2R_atom{}, TiledMMA{}));

  using TiledCopyC_R2S = decltype(make_tiled_copy_C(CopyC_R2S_atom{}, TiledMMA{}));
  using TiledCopyO_R2S = decltype(make_tiled_copy_C(CopyO_R2S_atom{}, TiledMMA{}));

  using TiledCopyC_S2G = decltype(make_tiled_copy(CopyC_S2G_atom{},
                                      make_layout(make_shape(Int<kTileM_Copy>{}, Int<kThreadNum / kTileM_Copy>{}),
                                                  make_stride(Int<kThreadNum / kTileM_Copy>{}, Int<1>{})),
                                      make_layout(make_shape(Int<1>{}, Int<kAlignedCopyItemsC>{}))));
  using TiledCopyO_S2G = decltype(make_tiled_copy(CopyO_S2G_atom{},
                                      make_layout(make_shape(Int<kTileM_Copy>{}, Int<kThreadNum / kTileM_Copy>{}),
                                                  make_stride(Int<kThreadNum / kTileM_Copy>{}, Int<1>{})),
                                      make_layout(make_shape(Int<1>{}, Int<kAlignedCopyItemsO>{}))));

  using SmemLayoutAtomA = decltype(composition(
                                      Swizzle<3, 3, 3>{},
                                      make_layout(make_shape(Int<8>{}, Int<cute::min(64, kTileK)>{}),
                                                  make_stride(Int<cute::min(64, kTileK)>{}, Int<1>{}))));
  using SmemLayoutAtomB = decltype(composition(
                                      Swizzle<3, 3, 3>{},
                                      make_layout(make_shape(Int<8>{}, Int<cute::min(64, kTileK)>{}),
                                                  make_stride(Int<cute::min(64, kTileK)>{}, Int<1>{}))));
  using SmemLayoutAtomC = decltype(composition(
                                      Swizzle<3, 3, 3>{},
                                      make_layout(make_shape(Int<8>{}, Int<cute::min(64, kTileN)>{}),
                                                  make_stride(Int<cute::min(64, kTileN)>{}, Int<1>{}))));
  using SmemLayoutAtomO = decltype(composition(
                                      Swizzle<3, 3, 3>{},
                                      make_layout(make_shape(Int<8>{}, Int<cute::min(64, kTileN)>{}),
                                                  make_stride(Int<cute::min(64, kTileN)>{}, Int<1>{}))));
  using SmemLayoutA = decltype(tile_to_shape(SmemLayoutAtomA{},
                                             make_shape(Int<kTileM>{}, Int<kTileK>{}, Int<G2S_Stages>{})));
  using SmemLayoutB = decltype(tile_to_shape(SmemLayoutAtomB{},
                                             make_shape(Int<kTileN>{}, Int<kTileK>{}, Int<G2S_Stages>{})));
  using SmemLayoutC = decltype(tile_to_shape(SmemLayoutAtomC{},
                                             make_shape(Int<kTileM>{}, Int<kTileN>{})));
  using SmemLayoutO = decltype(tile_to_shape(SmemLayoutAtomO{},
                                             make_shape(Int<kTileM>{}, Int<kTileN>{})));

  // kTileM * kTileK * G2S_Stages * sizeof(ComputeTypeA)
  static constexpr int kShmSizeA = cosize_v<SmemLayoutA> * sizeof(ComputeTypeA);
  // kTileN * kTileK * G2S_Stages * sizeof(ComputeTypeB)
  static constexpr int kShmSizeB = cosize_v<SmemLayoutB> * sizeof(ComputeTypeB);
  // kTileM * kTileN * sizeof(ComputeTypeC)
  static constexpr int kShmSizeC = cosize_v<SmemLayoutC> * sizeof(ComputeTypeC);
  // kTileM * kTileN * sizeof(OutType)
  static constexpr int kShmSizeO = cosize_v<SmemLayoutO> * sizeof(OutType);

};

}  // namespace spec


#define CHECK_TORCH_TENSOR_DTYPE(T, DTYPE)                       \
  do {                                                           \
    if ((T).options().dtype() != (DTYPE)) {                      \
      std::cerr << "Tensor dtype mismatch! Expected: "           \
                << (DTYPE) << ", but got: "                      \
                << (T).options().dtype()                         \
                << " at " << __FILE__                            \
                << ":" << __LINE__ << std::endl;                 \
      std::exit(EXIT_FAILURE);                                   \
    }                                                            \
  } while (0);

#define CHECK_TORCH_TENSOR_SHAPE(T, M, N)                        \
  do {                                                           \
    auto actual_shape = (T).sizes();                             \
    if (actual_shape != torch::IntArrayRef({M, N})) {            \
      std::cerr << "Tensor shape mismatch! Expected: "           \
                << torch::IntArrayRef({M, N})                    \
                << ", but got: " << actual_shape                 \
                << " at " << __FILE__                            \
                << ":" << __LINE__ << std::endl;                 \
      std::exit(EXIT_FAILURE);                                   \
    }                                                            \
  } while (0);

#define BOOL_SWITCH(COND, CONST_NAME, ...)      \
  [&] {                                         \
    if (COND) {                                 \
      constexpr static bool CONST_NAME = true;  \
      return __VA_ARGS__();                     \
    } else {                                    \
      constexpr static bool CONST_NAME = false; \
      return __VA_ARGS__();                     \
    }                                           \
  }()


using namespace cute;


int main(int argc, char* argv[]) {

  constexpr int kTileM = 128;
  constexpr int kTileN = 128;
  constexpr int kTileK = 128;
  constexpr int G2S_Stages = 2;

  auto M = 1920;
  auto N = 1080;
  auto K = 1024;

  using OutType = float;
  using ComputeTypeA = cute::half_t;
  using ComputeTypeB = cute::half_t;
  using ComputeTypeC = float;


  using Spec = spec::KernelSpec<OutType, ComputeTypeA, ComputeTypeB, ComputeTypeC,
                   kTileM, kTileN, kTileK, G2S_Stages>;

  dim3 block = Spec::kThreadNum;
  dim3 grid(cute::ceil_div(N, Spec::kTileN), cute::ceil_div(M, Spec::kTileM));

  constexpr int kShmSizeA = Spec::kShmSizeA;
  constexpr int kShmSizeB = Spec::kShmSizeB;
  constexpr int kShmSizeC = Spec::kShmSizeC;
  constexpr int kShmSizeO = Spec::kShmSizeO;

  int shm_size = (cute::max(kShmSizeA + kShmSizeB, kShmSizeO));

  printf("Block Size: (%d, %d, %d) | Grid Size: (%d, %d, %d) | Shared Memory Size: %d Bytes\n",
          block.x, block.y, block.z, grid.x, grid.y, grid.z, shm_size);
  

  using copy_a_g2s = typename Spec::TiledCopyA_G2S;
  using copy_b_g2s = typename Spec::TiledCopyB_G2S;
  using copy_c_g2s = typename Spec::TiledCopyC_G2S;

  using copy_a_s2r = typename Spec::TiledCopyA_S2R;
  using copy_b_s2r = typename Spec::TiledCopyB_S2R;
  using copy_c_s2r = typename Spec::TiledCopyC_S2R;

  // print_latex(copy_b_s2r{});

  auto tensor_test = cute::make_tensor<float>(cute::Shape<_2, _5, _7>{}, cute::Stride<_1, _2, _10>{});
  auto shape = cute::Shape<cute::_1, cute::_2, cute::_3>();
  
  printf("size(tensor_test): ");print(size(tensor_test));printf("\n");
  printf("size<0>(tensor_test): ");print(size<0>(tensor_test));printf("\n");
  printf("size<1>(tensor_test): ");print(size<1>(tensor_test));printf("\n");
  printf("size<2>(tensor_test): ");print(size<2>(tensor_test));printf("\n");

  auto identity_tensor = cute::make_identity_tensor(cute::Shape<_5, _5>{}); // 创建一个对角单位矩阵
  print_tensor(identity_tensor);


  auto tiled_copy = copy_a_g2s();
  auto thr_copy = tiled_copy.get_slice(0);
  print(thr_copy);

  constexpr int kTileNCount = 32;
  auto tensor = make_identity_tensor(cute::Shape<Int<kTileM>, Int<kTileN>>{});
  auto thr_tensor = thr_copy.partition_S(tensor);
  printf("thr_tensor: ");print_tensor(thr_tensor);
  auto value = thr_tensor(3,1,0);
  print(get<1>(value));
  printf("\n");

  using TiledMMA = typename Spec::TiledMMA;
  auto tiled_mma = TiledMMA();
  auto thr_mma = tiled_mma.get_slice(0);

  auto tensor_tile = make_tensor<float>(cute::Shape<Int<kTileM>, Int<kTileN>, Int<kTileNCount>>{}); // [128, 128, 32]
  auto fragment_a = thr_mma.partition_fragment_A(tensor_tile(_, _, 0)); // []
  printf("fragment_a: ");print(fragment_a);printf("\n");
  auto flag = cute::is_rmem<decltype(fragment_a)>();
  printf("fragment_a: %d\n", flag);

  printf("***\n");


  // 优化内存访问模式
  auto global_mem = make_layout(make_shape(_2{}, _4{}), make_stride(_1{}, _2{}));
  auto result = tile_to_shape(global_mem, Shape<_16,_16,_8>{});
  // 以 64x64 的块为单位访问内存，提高缓存效率
  printf("result: ");print(result);printf("\n");

  auto gm = make_tensor<float>(cute::Shape<Int<30>, Int<20>, Int<30>>{});
  auto tiler = make_tile(Int<33>{}, Int<23>{}, Int<11>{});

  auto gm_tensor = local_tile(gm, tiler, make_coord(0, 0, _));
  printf("gm_tensor: ");print(gm_tensor);printf("\n");

  return 0;
}
