#ifndef MYAMG_SPBLAS_CUH
#define MYAMG_SPBLAS_CUH

#include <unordered_map>
#include <vector>

namespace myamg {
    template<typename Integer, typename Real>
    void spmvHost(const size_t nRows, const Integer *Ap, const Integer *Ai, const Real *Av, const Real *x, Real *y) {
#pragma omp parallel for
        for (auto i = 0; i < nRows; ++i) {
            Real temp = 0;
            for (auto j = Ap[i]; j < Ap[i + 1]; ++j) {
                temp += Av[j] * x[Ai[j]];
            }
            y[i] = temp;
        }
    }

    template<typename Integer, typename Real>
    __global__ void spmvDevice(const size_t nRows, const Integer *Ap, const Integer *Ai, const Real *Av, const Real *x,
                               Real *y) {
        const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
        if (idx < nRows) {
            Real temp = 0;
            for (auto j = Ap[idx]; j < Ap[idx + 1]; ++j) {
                temp += Av[j] * x[Ai[j]];
            }
            y[idx] = temp;
        }
    }

    template<typename Integer, typename Real>
    void transposeHost(const Integer *Ap, const Integer *Ai, const Real *Av, Integer *Bp, Integer *Bi, Real *Bv,
                       const size_t n, const size_t m, const size_t nnz) {
        for (auto i = 0; i <= m; ++i) {
            Bp[i] = 0;
        }
        for (auto j = 0; j < nnz; ++j) {
            ++Bp[Ai[j] + 1];
        }

        for (auto i = 0; i < m; ++i) {
            Bp[i + 1] += Bp[i];
        }

        std::vector<Integer> counter(m);
        for (auto i = 0; i < m; ++i) {
            counter[i] = Bp[i];
        }

        for (auto i = 0; i < n; ++i) {
            for (auto j = Ap[i]; j < Ap[i + 1]; ++j) {
                Integer col = Ai[j];
                Integer dest = counter[col]++;

                Bi[dest] = i;
                Bv[dest] = Av[j];
            }
        }
    }

    template<typename Real>
    Real dotProductHost(const Real *x, const Real *y, const size_t n) {
        Real result = 0;
        for (size_t i = 0; i < n; ++i) {
            result += x[i] * y[i];
        }
        return result;
    }

    template<typename Real>
    __inline__ __device__ Real warpReduce(Real localSum) {
        localSum += __shfl_xor_sync(0xffffffff, localSum, 16);
        localSum += __shfl_xor_sync(0xffffffff, localSum, 8);
        localSum += __shfl_xor_sync(0xffffffff, localSum, 4);
        localSum += __shfl_xor_sync(0xffffffff, localSum, 2);
        localSum += __shfl_xor_sync(0xffffffff, localSum, 1);
        return localSum;
    }

    template<typename Real>
    __global__ void dotProductReduceDevice(Real *blockResults, const Real *globalX, const Real *globalY,
                                           const size_t n) {
        extern __shared__ Real sdata[];
        const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
        const unsigned int tid = threadIdx.x;
        const unsigned int warpIdx = tid / 32;
        const unsigned int laneIdx = tid % 32;

        Real localSum = 0;
        for (size_t i = idx; i < n; i += blockDim.x * gridDim.x) {
            localSum += globalX[i] * globalY[i];
        }

        // Warp级归约
        localSum = warpReduce(localSum);

        // 写入shared memory
        if (laneIdx == 0) {
            sdata[warpIdx] = localSum;
        }
        __syncthreads();

        // 最终warp归约
        if (warpIdx == 0) {
            localSum = (tid < blockDim.x / 32) ? sdata[laneIdx] : 0;
            localSum = warpReduce(localSum);

            // 每个block的结果写入全局内存
            if (tid == 0) {
                blockResults[blockIdx.x] = localSum;
            }
        }
    }

    template<typename Real>
    __global__ void sumReduceDevice(Real *result, const Real *input, const size_t n) {
        extern __shared__ Real sdata[];
        const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
        const unsigned int tid = threadIdx.x;
        const unsigned int warpIdx = tid / 32;
        const unsigned int laneIdx = tid % 32;

        // 每个线程读取一个元素
        Real localSum = 0;
        for (size_t i = idx; i < n; i += blockDim.x * gridDim.x) {
            localSum += input[i];
        }

        // Warp级归约
        localSum = warpReduce(localSum);

        // 写入shared memory
        if (laneIdx == 0) {
            sdata[warpIdx] = localSum;
        }
        __syncthreads();

        // 最终warp归约
        if (warpIdx == 0) {
            localSum = (tid < blockDim.x / 32) ? sdata[laneIdx] : 0;
            localSum = warpReduce(localSum);

            if (tid == 0) {
                result[0] = localSum;
            }
        }
    }

    template<typename Real>
    Real dotProductDevice(const Real *d_x, const Real *d_y, const size_t n) {
        const int blockSize = 512;
        const int numBlocks = min((int) ((n + blockSize - 1) / blockSize), 1024);

        Real *d_blockResults;
        cudaMalloc(&d_blockResults, numBlocks * sizeof(Real));

        dotProductReduceDevice<<<numBlocks, blockSize, (blockSize / 32) * sizeof(Real)>>>(
            d_blockResults, d_x, d_y, n);

        sumReduceDevice<<<1, blockSize, (blockSize / 32) * sizeof(Real)>>>(
            d_blockResults, d_blockResults, numBlocks);

        Real result;
        cudaMemcpy(&result, d_blockResults, sizeof(Real), cudaMemcpyDeviceToHost);
        cudaFree(d_blockResults);

        return result;
    }

    template<typename T>
    __device__ __forceinline__ T loadGlobalWithCacheStreaming(const T *addr) {
        T val;
        if constexpr (std::is_same_v<T, float>) {
            asm volatile("ld.global.cs.f32 %0, [%1];" : "=f"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, double>) {
            asm volatile("ld.global.cs.f64 %0, [%1];" : "=d"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, int>) {
            asm volatile("ld.global.cs.s32 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, unsigned int>) {
            asm volatile("ld.global.cs.u32 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, char>) {
            asm volatile("ld.global.cs.s8 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, unsigned char>) {
            asm volatile("ld.global.cs.u8 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, short>) {
            asm volatile("ld.global.cs.s16 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, unsigned short>) {
            asm volatile("ld.global.cs.u16 %0, [%1];" : "=r"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, long long>) {
            asm volatile("ld.global.cs.s64 %0, [%1];" : "=l"(val) : "l"(addr));
        } else if constexpr (std::is_same_v<T, unsigned long long>) {
            asm volatile("ld.global.cs.u64 %0, [%1];" : "=l"(val) : "l"(addr));
        } else {
            static_assert(sizeof(T) == 0, "Unsupported type for loadGlobalWithCacheStreaming");
        }
        return val;
    }

    template<typename T>
    __device__ __forceinline__ void storeGlobalWithCacheStreaming(T *addr, T val) {
        if constexpr (std::is_same_v<T, float>) {
            asm volatile("st.global.cs.f32 [%0], %1;" :: "l"(addr), "f"(val));
        } else if constexpr (std::is_same_v<T, double>) {
            asm volatile("st.global.cs.f64 [%0], %1;" :: "l"(addr), "d"(val));
        } else if constexpr (std::is_same_v<T, int>) {
            asm volatile("st.global.cs.s32 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, unsigned int>) {
            asm volatile("st.global.cs.u32 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, char>) {
            asm volatile("st.global.cs.s8 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, unsigned char>) {
            asm volatile("st.global.cs.u8 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, short>) {
            asm volatile("st.global.cs.s16 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, unsigned short>) {
            asm volatile("st.global.cs.u16 [%0], %1;" :: "l"(addr), "r"(val));
        } else if constexpr (std::is_same_v<T, long long>) {
            asm volatile("st.global.cs.s64 [%0], %1;" :: "l"(addr), "l"(val));
        } else if constexpr (std::is_same_v<T, unsigned long long>) {
            asm volatile("st.global.cs.u64 [%0], %1;" :: "l"(addr), "l"(val));
        } else {
            static_assert(sizeof(T) == 0, "Unsupported type for storeGlobalWithCacheStreaming");
        }
    }

    __device__ __forceinline__ void FP64m8n8k4MMA(double *acc, double &fragA, double &fragB) {
        asm volatile(
            "mma.sync.aligned.m8n8k4.row.col.f64.f64.f64.f64"
            " { %0, %1 }, "
            " { %2 }, "
            " { %3 }, "
            " { %0, %1 };"
            : "+d"(acc[0]), "+d"(acc[1]):
            "d"(fragA), "d"(fragB)
        );
    }

#define MMA_M 8
#define MMA_N 8
#define MMA_K 4

    template<typename T>
    inline constexpr T alignUp(T n, T alignment) {
        return (n + alignment - 1) & ~(alignment - 1);
    }

    __global__ void spmv8cols(const double* data, const int* colIndex, const double* x, double* y);

    void tensorSpMV(const int* Ap, const int* Ai, const double* Av, const double* x, double* y, unsigned int nRows);

}

#endif //MYAMG_SPBLAS_CUH
