#ifndef MYAMG_CSRMATRIXOPERATOR_CUH
#define MYAMG_CSRMATRIXOPERATOR_CUH
#include <CSRMatrix.cuh>
#include <iostream>
#include <../Blas/spblas.cuh>


namespace myamg {
    template<typename Integer, typename Real, typename Allocator>
    void matMulVec(const CSRMatrix<Integer, Real, Allocator> &A, const Vector<Real, Allocator> &x, Vector<Real, Allocator> &y) {
        if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
            spmvHost(A.numRows(), A.rowPtr(), A.colIndex(), A.values(), x.values(), y.values());
        } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {
            dim3 block(512);
            dim3 grid((A.numRows() + block.x - 1) / block.x);
            spmvDevice<<<grid, block>>>(A.numRows(), A.rowPtr(), A.colIndex(), A.values(), x.values(), y.values());
        } else {
            std::cerr << "MatMulVec: Unsupported allocator type" << std::endl;
        }
    }

    template<typename Real, typename Allocator>
    void vectorClear(Vector<Real, Allocator> &y) {
        if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
#pragma omp parallel for
            for (size_t i = 0; i < y.size(); i++) {
                y.values()[i] = static_cast<Real>(0);
            }
        } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {
            auto err = cudaMemset(y.values(), 0, y.size() * sizeof(Real));
            if (err != cudaSuccess) {
                std::cerr << "VectorClear: cudaMemset failed with error: " << cudaGetErrorString(err) << std::endl;
            }
        } else {
            std::cerr << "VectorClear: Unsupported allocator type" << std::endl;
        }
    }

    template<typename Integer, typename Real, typename Allocator>
    auto transposeMatrix(const CSRMatrix<Integer, Real, Allocator> &A) {
        const auto n = A.numRows();
        const auto m = A.numCols();
        const auto nnz = A.numNonzeros();
        CSRMatrix<Integer, Real, Allocator> At(m, n, nnz);
        if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
            transposeHost(A.rowPtr(), A.colIndex(), A.values(), At.rowPtr(), At.colIndex(), At.values(), n, m, nnz);
        } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {
            auto *Ap = new Integer[n + 1];
            auto *Ai = new Integer[nnz];
            auto *Av = new Real[nnz];
            auto *Bp = new Integer[m + 1];
            auto *Bi = new Integer[nnz];
            auto *Bv = new Real[nnz];
            cudaMemcpy(Ap, A.rowPtr(), (n + 1) * sizeof(Integer), cudaMemcpyDeviceToHost);
            cudaMemcpy(Ai, A.colIndex(), nnz * sizeof(Integer), cudaMemcpyDeviceToHost);
            cudaMemcpy(Av, A.values(), nnz * sizeof(Real), cudaMemcpyDeviceToHost);
            transposeHost(Ap, Ai, Av, Bp, Bi, Bv, n, m, nnz);
            cudaMemcpy(At.rowPtr(), Bp, (m + 1) * sizeof(Integer), cudaMemcpyHostToDevice);
            cudaMemcpy(At.colIndex(), Bi, nnz * sizeof(Integer), cudaMemcpyHostToDevice);
            cudaMemcpy(At.values(), Bv, nnz * sizeof(Real), cudaMemcpyHostToDevice);
            delete[] Ap;
            delete[] Ai;
            delete[] Av;
            delete[] Bp;
            delete[] Bi;
            delete[] Bv;
        } else {
            std::cerr << "TransposeMatrix: Unsupported allocator type" << std::endl;
        }
        return std::move(At);
    }

    template<typename Integer, typename Real, typename Allocator>
    void validateMatrix(const CSRMatrix<Integer, Real, Allocator> &A) {
        const auto n = A.numRows();
        const auto m = A.numCols();
        const auto nnz = A.numNonzeros();

        if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
            // 检查 1: rowPtr 的最后一项应当等于非零元数目
            if (A.rowPtr()[n] != nnz) {
                throw std::runtime_error("Invalid CSR matrix: rowPtr[numRows] != numNonzeros");
            }

            // 检查 2: colIndex 不应超过 numCols
            for (Integer i = 0; i < nnz; ++i) {
                if (A.colIndex()[i] >= m || A.colIndex()[i] < 0) {
                    throw std::runtime_error("Invalid CSR matrix: colIndex out of bounds");
                }
            }

            // 检查 3: 非零元数目符合 (rowPtr 的递增性)
            for (Integer i = 0; i <= n; ++i) {
                if (A.rowPtr()[i] < 0 || A.rowPtr()[i] > nnz) {
                    throw std::runtime_error("Invalid CSR matrix: rowPtr values out of range");
                }
                if (i > 0 && A.rowPtr()[i] < A.rowPtr()[i - 1]) {
                    throw std::runtime_error("Invalid CSR matrix: rowPtr is not monotonically increasing");
                }
            }
        } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {
            auto *Ap = new Integer[n + 1];
            auto *Ai = new Integer[nnz];

            cudaMemcpy(Ap, A.rowPtr(), (n + 1) * sizeof(Integer), cudaMemcpyDeviceToHost);
            cudaMemcpy(Ai, A.colIndex(), nnz * sizeof(Integer), cudaMemcpyDeviceToHost);

            // 检查 1: rowPtr 的最后一项应当等于非零元数目
            if (Ap[n] != nnz) {
                delete[] Ap;
                delete[] Ai;
                throw std::runtime_error("Invalid CSR matrix: rowPtr[numRows] != numNonzeros");
            }

            // 检查 2: colIndex 不应超过 numCols
            for (Integer i = 0; i < nnz; ++i) {
                if (Ai[i] >= m || Ai[i] < 0) {
                    delete[] Ap;
                    delete[] Ai;
                    throw std::runtime_error("Invalid CSR matrix: colIndex out of bounds");
                }
            }

            // 检查 3: 非零元数目符合 (rowPtr 的递增性)
            for (Integer i = 0; i <= n; ++i) {
                if (Ap[i] < 0 || Ap[i] > nnz) {
                    delete[] Ap;
                    delete[] Ai;
                    throw std::runtime_error("Invalid CSR matrix: rowPtr values out of range");
                }
                if (i > 0 && Ap[i] < Ap[i - 1]) {
                    delete[] Ap;
                    delete[] Ai;
                    throw std::runtime_error("Invalid CSR matrix: rowPtr is not monotonically increasing");
                }
            }

            delete[] Ap;
            delete[] Ai;
        } else {
            throw std::runtime_error("ValidateMatrix: Unsupported allocator type");
        }
    }

    template<typename Real, typename Allocator>
    Real vectorDotProduct(const Vector<Real, Allocator> &x, const Vector<Real, Allocator> &y) {
        if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
            return dotProductHost(x.values(), y.values(), x.size());
        } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {
            return dotProductDevice(x.values(), y.values(), x.size());
        } else {
            std::cerr << "VectorDotProduct: Unsupported allocator type" << std::endl;
            return static_cast<Real>(0);
        }
    }

    template<typename Allocator>
    struct TensorSpMVHandler {
        TensorSpMVHandler() : Ap(nullptr), Ai(nullptr), Av(nullptr), shortData(nullptr), shortColIndex(nullptr), shortResult(nullptr),
                              shortResultHost(nullptr), numShortRows(0), numLongRows(0), numVeryLongRows(0),
                              numShortRowsPadded(0), shortRowsMap(nullptr), longRowsMap(nullptr), veryLongRowsMap(nullptr) {}

        ~TensorSpMVHandler() {
            free();
        }

        void setupShortRows() {
            if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
                                // 处理short, 全部划分为2×8×4的块，不足者补零
                numShortRowsPadded = alignUp<unsigned int>(numShortRows, 8);
                cudaMalloc(&shortData, sizeof(double) * numShortRowsPadded * 8 * 8);
                cudaMalloc(&shortColIndex, sizeof(int) * numShortRowsPadded * 8 * 8);
                cudaMalloc(&shortResult, sizeof(double) * numShortRowsPadded);
                shortResultHost = new double[numShortRowsPadded];
                auto* shortDataHost = new double[numShortRowsPadded * 8 * 8];
                auto* shortColIndexHost = new int[numShortRowsPadded * 8 * 8];
                // 填充0
                std::memset(shortDataHost, 0, sizeof(double) * numShortRowsPadded * 8 * 8);
                std::memset(shortColIndexHost, 0, sizeof(int) * numShortRowsPadded * 8 * 8);
                // 遍历所有short行,填充数据，每八行为一个单位进行处理
                // 内存布局如下
                // |行0的前四列|行1的前四列|...|行7的前四列|行0的后四列|行1的后四列|...|行7的后四列|
#pragma omp parallel for
                for (auto i = 0; i < (numShortRowsPadded / 8); ++i) {
                    // 处理第i个8行块，offset是当前行块的第一行
                    const auto offset = i * 8;
                    // 处理8行块的8行
                    for (auto j = 0; j < 8; ++j) {
                        // 实际行号
                        const auto actualRow = j + offset;
                        if (actualRow >= numShortRows) {
                            break;
                        }
                        // 获取真实行号
                        auto realRow = shortRowsMap[actualRow];
                        // 处理该行的非零元
                        auto numNoneZero = Ap[realRow + 1] - Ap[realRow];
                        for (auto k = 0; k < 8; ++k) {
                            if (k >= numNoneZero) {
                                break;
                            }
                            // 填充数据， 前四列为一组，后四列为一组
                            const auto idx = i * 64 + (k >> 2) * 32 + j * 4 + (k & 3);
                            shortDataHost[idx] = Av[Ap[realRow] + k];
                            shortColIndexHost[idx] = Ai[Ap[realRow] + k];
                        }
                    }
                }
                // 复制到设备
                cudaMemcpy(shortData, shortDataHost, sizeof(double) * numShortRowsPadded * 8 * 8, cudaMemcpyHostToDevice);
                cudaMemcpy(shortColIndex, shortColIndexHost, sizeof(int) * numShortRowsPadded * 8 * 8, cudaMemcpyHostToDevice);
                // 释放host内存
                delete[] shortDataHost;
                delete[] shortColIndexHost;
            } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {

            } else {
                std::cerr << "TensorSpMVHandler::setupShortRows: Unsupported allocator type" << std::endl;
            }
        }

        void setup(const CSRMatrix<int, double, Allocator>& A) {
            if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
                // 统计各种行的数量
                // short: 1-8个非零元
                // long: 9~12个非零元
                // very long: >12个非零元
                Ap = const_cast<int*>(A.rowPtr());
                Ai = const_cast<int*>(A.colIndex());
                Av = const_cast<double*>(A.values());
                const auto nRows = A.numRows();
                for (unsigned int i = 0; i < nRows; ++i) {
                    const auto numNoneZero = Ap[i + 1] - Ap[i];
                    if (numNoneZero > 0) {
                        if (numNoneZero <= 8) {
                            ++numShortRows;
                        } else if (numNoneZero <= 12) {
                            ++numLongRows;
                        } else {
                            ++numVeryLongRows;
                        }
                    }
                }
                // 建立映射
                shortRowsMap = new unsigned int[numShortRows];
                longRowsMap = new unsigned int[numLongRows];
                veryLongRowsMap = new unsigned int[numVeryLongRows];
                unsigned int shortIdx = 0;
                unsigned int longIdx = 0;
                unsigned int veryLongIdx = 0;
                for (unsigned int i = 0; i < nRows; ++i) {
                    const auto numNoneZero = Ap[i + 1] - Ap[i];
                    if (numNoneZero > 0) {
                        if (numNoneZero <= 8) {
                            shortRowsMap[shortIdx++] = i;
                        } else if (numNoneZero <= 12) {
                            longRowsMap[longIdx++] = i;
                        } else {
                            veryLongRowsMap[veryLongIdx++] = i;
                        }
                    }
                }
                // 处理short行
                setupShortRows();
            } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {

            } else {
                std::cerr << "TensorSpMVHandler::setup: Unsupported allocator type" << std::endl;
            }
        }

        void free() {
            if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
                cudaFree(shortData);
                cudaFree(shortColIndex);
                cudaFree(shortResult);
                delete[] shortResultHost;
                delete[] shortRowsMap;
                delete[] longRowsMap;
                delete[] veryLongRowsMap;
            } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {

            } else {
                std::cerr << "TensorSpMVHandler::free: Unsupported allocator type" << std::endl;
            }
        }

        void apply(const Vector<double, Allocator>& x, Vector<double, Allocator>& y) {
            constexpr unsigned int blockSize = 512;
            constexpr unsigned int warpsPerBlock = blockSize / 32;
            if constexpr (AllocatorType<Allocator>::value == MemoryType::Host) {
                // 拷贝x
                double* xDevice;
                cudaMalloc(&xDevice, x.size() * sizeof(double));
                cudaMemcpy(xDevice, x.values(), x.size() * sizeof(double), cudaMemcpyHostToDevice);
                // 处理short行
                dim3 block(blockSize);
                dim3 grid(((numShortRowsPadded / 8) + warpsPerBlock - 1) / warpsPerBlock);
                spmv8cols<<<grid, block>>>(shortData, shortColIndex, xDevice, shortResult);
                // 拷贝结果回host
                cudaMemcpy(shortResultHost, shortResult, sizeof(double) * numShortRowsPadded, cudaMemcpyDeviceToHost);
                // 汇总结果到y
                for (unsigned int i = 0; i < numShortRows; ++i) {
                    const auto row = shortRowsMap[i];
                    y.values()[row] = shortResultHost[i];
                }
                // 处理long和very long行，目前直接计算
                // 测试结果正确性，long行和very long行直接在CPU上计算
#pragma omp parallel for
                for (auto i = 0; i < numLongRows; ++i) {
                    const auto realRow = longRowsMap[i];
                    double sum = 0.0;
                    for (auto j = Ap[realRow]; j < Ap[realRow + 1]; ++j) {
                        sum += Av[j] * x.values()[Ai[j]];
                    }
                    y.values()[realRow] = sum;
                }
#pragma omp parallel for
                for (auto i = 0; i < numVeryLongRows; ++i) {
                    const auto realRow = veryLongRowsMap[i];
                    double sum = 0.0;
                    for (auto j = Ap[realRow]; j < Ap[realRow + 1]; ++j) {
                        sum += Av[j] * x.values()[Ai[j]];
                    }
                    y.values()[realRow] = sum;
                }
            } else if constexpr (AllocatorType<Allocator>::value == MemoryType::Cuda) {

            } else {
                std::cerr << "Unsupported allocator type" << std::endl;
            }
        }

        int* Ap;
        int* Ai;
        double* Av;
        double* shortData;
        int* shortColIndex;
        double* shortResult;
        double* shortResultHost;
        unsigned int numShortRows;
        unsigned int numLongRows;
        unsigned int numVeryLongRows;
        unsigned int numShortRowsPadded;
        unsigned int* shortRowsMap;
        unsigned int* longRowsMap;
        unsigned int* veryLongRowsMap;
    };
}

#endif //MYAMG_CSRMATRIXOPERATOR_CUH
