#ifndef TENSOR_H
#define TENSOR_H
#include <iostream>
#include <memory>
#include <stdexcept>
#include <vector>
#include <cmath>
#include <random>
#include <iomanip>
#include <cstring>
#include <sstream>

#ifndef MAYBE_USE
#define MAYBE_USE  [[maybe_unused]] [[nodiscard]]
#define VOID_USE   [[maybe_unused]]
#endif

template<typename T>
MAYBE_USE inline int to_int(T value) {
    return static_cast<int>(value);
}


class Tensor {
public:
    Tensor(size_t N, size_t C, size_t H, size_t W)
        : m_N(N), m_C(C), m_H(H), m_W(W), m_size(N* C* H* W),
        m_data(new double[m_size], std::default_delete<double[]>()) {
        std::memset(m_data.get(), 0, m_size * sizeof(double));
    }

    Tensor(const Tensor& other)
        : m_N(other.m_N), m_C(other.m_C), m_H(other.m_H), m_W(other.m_W), m_size(other.m_size),
        m_data(new double[m_size], std::default_delete<double[]>()) {
        std::copy(other.m_data.get(), other.m_data.get() + m_size, m_data.get());
    }

private:
    size_t m_N = 0;
    size_t m_C = 0;
    size_t m_H = 0;
    size_t m_W = 0;
    size_t m_size = 0;
    std::shared_ptr<double[]> m_data;

    void check_same_shape(const Tensor& other) const {
        if (m_N != other.m_N || m_C != other.m_C || m_H != other.m_H || m_W != other.m_W) {
            throw std::invalid_argument("Tensors have different shapes");
        }
    }

    void check_multiplication_shape(const Tensor& other) const {
        if (m_C != other.m_C || m_W != other.m_H) {
            throw std::invalid_argument("Tensors have incompatible shapes for multiplication");
        }
    }

public:
    Tensor& operator=(const Tensor& other) {
        if (this != &other) {
            m_N = other.m_N;
            m_C = other.m_C;
            m_H = other.m_H;
            m_W = other.m_W;
            m_size = other.m_size;
            m_data = std::shared_ptr<double[]>(new double[m_size], std::default_delete<double[]>());;
            std::copy(other.m_data.get(), other.m_data.get() + m_size, m_data.get());
        }
        return *this;
    }

    MAYBE_USE inline double& at(size_t n, size_t c, size_t h, size_t w) {
        size_t temp = m_H * m_W;
        return m_data.get()[n * m_C * temp + c * temp + h * m_W + w];
    }

    MAYBE_USE inline const double& at(size_t n, size_t c, size_t h, size_t w) const {
        size_t temp = m_H * m_W;
        return m_data.get()[n * m_C * temp + c * temp + h * m_W + w];
    }

    MAYBE_USE double* data() const { return m_data.get();}
    MAYBE_USE size_t size() const {return m_size;}
    MAYBE_USE size_t N() const { return m_N; }
    MAYBE_USE size_t C() const { return m_C; }
    MAYBE_USE size_t H() const { return m_H; }
    MAYBE_USE size_t W() const { return m_W; }

    Tensor operator+(const Tensor& other) const {
        check_same_shape(other);
        Tensor result(m_N, m_C, m_H, m_W);
        for (size_t i = 0; i < m_size; ++i) {
            result.m_data.get()[i] = m_data.get()[i] + other.m_data.get()[i];
        }
        return result;
    }

    Tensor operator-(const Tensor& other) const {
        check_same_shape(other);
        Tensor result(m_N, m_C, m_H, m_W);
        for (size_t i = 0; i < m_size; ++i) {
            result.m_data.get()[i] = m_data.get()[i] - other.m_data.get()[i];
        }
        return result;
    }


    /*
    *最朴素的实现，效率太低，需要优化
    Tensor operator*(const Tensor& other) const {
        check_multiplication_shape(other);
        Tensor result(m_N, m_C, m_H, other.m_W);
#pragma omp parallel for
        for (int n = 0; n < m_N; ++n) {
            for (int c = 0; c < m_C; ++c) {
                for (int h = 0; h < m_H; ++h) {
                    for (int w = 0; w < other.m_W; ++w) {
                        result.at(n, c, h, w) = 0;
                        for (int k = 0; k < m_W; ++k) {
                            result.at(n, c, h, w) += at(n, c, h, k) * other.at(n, c, k, w);
                        }
                    }
                }
            }
        }
        return result;
    }
    */

    Tensor operator*(const Tensor& other) const {
        check_multiplication_shape(other);
        Tensor result(m_N, m_C, m_H, other.m_W);
        int combined_NC = (int)(m_N * m_C);

#pragma omp parallel for
        for (int nc = 0; nc < combined_NC; ++nc) {
            int n = nc / (int)m_C;
            int c = nc % (int)m_C;
            for (int h = 0; h < m_H; ++h) {
                for (int w = 0; w < other.m_W; ++w) {
                    double sum = 0;
                    for (int k = 0; k < m_W; ++k) {
                        sum += at(n, c, h, k) * other.at(n, c, k, w);
                    }
                    result.at(n, c, h, w) = sum;
                }
            }
        }
        return result;
    }



    Tensor operator/(const Tensor& other) const {
        check_same_shape(other);
        Tensor result(m_N, m_C, m_H, m_W);
        for (size_t i = 0; i < m_size; ++i) {
            result.m_data.get()[i] = m_data.get()[i] / other.m_data.get()[i];
        }
        return result;
    }


    Tensor& operator+=(const Tensor& other) {
        check_same_shape(other);
        for (size_t i = 0; i < m_size; ++i) {
            m_data.get()[i] += other.m_data.get()[i];
        }
        return *this;
    }

    Tensor& operator-=(const Tensor& other) {
        check_same_shape(other);
        for (size_t i = 0; i < m_size; ++i) {
            m_data.get()[i] -= other.m_data.get()[i];
        }
        return *this;
    }


    Tensor& operator*=(const Tensor& other) {
        check_multiplication_shape(other);
        Tensor result(m_N, m_C, m_H, other.m_W);
        int combined_NC = (int)(m_N * m_C);

#pragma omp parallel for
        for (int nc = 0; nc < combined_NC; ++nc) {
            int n = nc / (int)m_C;
            int c = nc % (int)m_C;
            for (int h = 0; h < m_H; ++h) {
                for (int w = 0; w < other.m_W; ++w) {
                    double sum = 0;
                    for (int k = 0; k < m_W; ++k) {
                        sum += at(n, c, h, k) * other.at(n, c, k, w);
                    }
                    result.at(n, c, h, w) = sum;
                }
            }
        }

        *this = result;
        return *this;
    }



    Tensor& operator/=(const Tensor& other) {
        check_same_shape(other);
        for (size_t i = 0; i < m_size; ++i) {
            m_data.get()[i] /= other.m_data.get()[i];
        }
        return *this;
    }

    void TensorRandom(double low_ = 0.0, double up_ = 1.0) {
        std::random_device rd;
        std::mt19937 gen(rd());
        std::uniform_real_distribution<> dis(low_, up_);
        for (size_t i = 0; i < m_size; ++i) {
            m_data.get()[i] = dis(gen);
        }
    }


    MAYBE_USE Tensor transpose() const {
        Tensor result(m_N, m_C, m_W, m_H);
        double* result_data = result.m_data.get();
        const double* data = m_data.get();

        int single_channel = (int)(m_H * m_W);
        int third_channel = (int)m_C * single_channel;
        int combined_NC = (int)(m_N * m_C);

#pragma omp parallel for
        for (int nc = 0; nc < static_cast<int>(combined_NC); ++nc) {
            int n = nc / (int)m_C;                        // 计算当前 nc 对应的 N 维度的索引
            int c = nc % (int)m_C;                        // 计算当前 nc 对应的 C 维度的索引
            int n_offset = n * third_channel;             // 计算 N 维度的偏移量
            int c_offset = n_offset + c * single_channel; // 计算 C 维度的偏移量

            for (int h = 0; h < static_cast<int>(m_H); ++h) {
                for (int w = 0; w < static_cast<int>(m_W); ++w) {
                    result_data[nc * single_channel + w * m_H + h] = data[c_offset + h * m_W + w];
                }
            }
        }
        return result;
    }


//#pragma omp parallel for
//        for (int n = 0; n < m_N; ++n) {
//            for (int c = 0; c < m_C; ++c) {
//                for (int h = 0; h < m_H; ++h) {
//                    for (int w = 0; w < m_W; ++w) {
//                        result_data[n * m_C * m_W * m_H + c * m_W * m_H + w * m_H + h] = data[n * m_C * m_H * m_W + c * m_H * m_W + h * m_W + w];
//                    }
//                }
//            }
//        }
//
//        return result;


    VOID_USE void print() const {
        std::ostringstream oss;
        oss << std::fixed << std::setprecision(8); // 设置浮点数输出格式
        for (size_t n = 0; n < N(); ++n) {
            for (size_t c = 0; c < C(); ++c) {
                oss << "NC=[" << n << "][" << c << "]\n";
                for (size_t h = 0; h < H(); ++h) {
                    for (size_t w = 0; w < W(); ++w) {
                        oss << std::setw(8) << at(n, c, h, w) << " \t";
                    }
                    oss << std::endl;
                }
                oss << std::endl;
            }
        }
        std::cout << oss.str();
    }

};

#endif // !TENSOR_H