#include <vector>
#include <numeric>
#include <algorithm>
#include <stdexcept>
#include <ranges>
#include <random>
#include <iostream>
#include <sstream>
#include <initializer_list>
#include <cstdarg>
#include <print>

template <typename T>
class Tensor {
private:
    std::vector<T> data;       // 存储数据的一维数组
    std::vector<int> shape;    // 张量形状（如{2,3}表示2行3列）
    std::vector<int> strides;  // 各维度的步长（计算索引用）
    std::vector<T> grad;

    // 检查形状合法性（维度必须为正整数）
    static void check_shape_validity(const std::vector<int>& s) {
        for (int dim : s) {
            if (dim <= 0) {
                throw std::invalid_argument("Tensor dimensions must be positive integers");
            }
        }
    }

    // 计算总元素数
    static int compute_size(const std::vector<int>& s) {
        return std::accumulate(s.begin(), s.end(), 1, std::multiplies<int>());
    }

    // 计算步长（默认行优先，如{2,3}的步长为{3,1}）
    static std::vector<int> compute_strides(const std::vector<int>& s) {
        int n = s.size();
        std::vector<int> res(n, 1);
        for (int i = n - 2; i >= 0; --i) {
            res[i] = res[i + 1] * s[i + 1];
        }
        return res;
    }

    // 计算一维偏移量（从多维索引到data的索引）
    int compute_offset(const std::vector<int>& indices) const {
        if (indices.size() != shape.size()) {
            throw std::invalid_argument("Indices count does not match tensor dimensions");
        }
        int offset = 0;
        for (size_t i = 0; i < indices.size(); ++i) {
            if (indices[i] < 0 || indices[i] >= shape[i]) {
                throw std::out_of_range("Index out of bounds");
            }
            offset += indices[i] * strides[i];
        }
        return offset;
    }

    // 广播形状计算（内部用）
    static std::vector<int> compute_broadcast_shape(
        const std::vector<int>& shape_a,
        const std::vector<int>& shape_b)
    {
        size_t ndim = std::max(shape_a.size(), shape_b.size());
        std::vector<int> result_shape(ndim);

        for (int i = 1; i <= static_cast<int>(ndim); ++i) {
            int dim_a = (i <= static_cast<int>(shape_a.size())) ? shape_a[shape_a.size() - i] : 1;
            int dim_b = (i <= static_cast<int>(shape_b.size())) ? shape_b[shape_b.size() - i] : 1;

            if (dim_a != dim_b && dim_a != 1 && dim_b != 1) {
                throw std::invalid_argument("Incompatible shapes for broadcasting");
            }
            result_shape[ndim - i] = std::max(dim_a, dim_b);
        }
        return result_shape;
    }

    // 广播步长计算（基于原始步长）
    static std::vector<int> compute_broadcast_strides(
        const std::vector<int>& original_shape,
        const std::vector<int>& original_strides,
        const std::vector<int>& target_shape)
    {
        int original_rank = original_shape.size();
        int target_rank = target_shape.size();
        int diff = target_rank - original_rank;

        // 扩展原始形状和步长以匹配目标维度（前面补1和0）
        std::vector<int> extended_shape(original_shape);
        extended_shape.insert(extended_shape.begin(), diff, 1);
        std::vector<int> extended_strides(original_strides);
        extended_strides.insert(extended_strides.begin(), diff, 0);

        std::vector<int> result_strides(target_rank);
        for (int i = 0; i < target_rank; ++i) {
            int orig_dim = extended_shape[i];
            int target_dim = target_shape[i];
            if (orig_dim == target_dim) {
                result_strides[i] = extended_strides[i]; // 非广播维度用原步长
            }
            else if (orig_dim == 1) {
                result_strides[i] = 0; // 广播维度步长为0（重复取值）
            }
            else {
                throw std::logic_error("Invalid broadcast stride computation");
            }
        }
        return result_strides;
    }

    // 递归打印张量（内部辅助函数）
    std::string print_recursive(int depth, int start_idx) const {
        if (depth == shape.size() - 1) {
            // 最后一个维度：打印元素
            std::stringstream ss;
            ss << "[";
            for (int i = 0; i < shape[depth]; ++i) {
                int idx = start_idx + i * strides[depth];
                ss << data[idx];
                if (i != shape[depth] - 1) ss << ", ";
            }
            ss << "]";
            return ss.str();
        }
        else {
            // 高维：递归打印子维度
            std::stringstream ss;
            ss << "[";
            int step = strides[depth];
            for (int i = 0; i < shape[depth]; ++i) {
                ss << print_recursive(depth + 1, start_idx + i * step);
                if (i != shape[depth] - 1) ss << ", ";
            }
            ss << "]";
            return ss.str();
        }
    }

public:
    // 构造函数
    Tensor(std::initializer_list<int> init_shape) : shape(init_shape){
        check_shape_validity(shape);
        strides = compute_strides(shape);
        data.resize(compute_size(shape));
        grad.resize(data.size(), T{ 0 });
    }

    Tensor(const std::vector<int>& init_shape) : shape(init_shape) {
        check_shape_validity(shape);
        strides = compute_strides(shape);
        data.resize(compute_size(shape));
    }

    // 标量构造（0维张量）
    Tensor(T scalar_value) : shape({}), strides({}), data(1, scalar_value) {}

    // 默认构造（标量0）
    Tensor() : shape({}), strides({}), data(1, T{}) {}

    // 拷贝构造
    Tensor(const Tensor& other)
        : data(other.data), shape(other.shape), strides(other.strides) {
    }

    // 形状相关方法
    const std::vector<int>& get_shape() const { return shape; }
    int ndim() const { return shape.size(); }  // 维度数量
    size_t size() const { return data.size(); } // 总元素数
    bool is_scalar() const { return shape.empty(); }

    // 数据访问
    const std::vector<T>& get_data() const { return data; }
    std::vector<T>& get_data() { return data; }

    // 设置数据（初始化列表或vector）
    void set_data(const std::vector<T>& input_data) {
        if (input_data.size() != data.size()) {
            throw std::invalid_argument("Input size does not match tensor size");
        }
        data = input_data;
    }

    void set_data(std::initializer_list<T> init_list) {
        if (init_list.size() != data.size()) {
            throw std::invalid_argument("Input size does not match tensor size");
        }
        std::ranges::copy(init_list, data.begin());
    }

    // 索引访问（vector<int>形式）
    T& operator[](const std::vector<int>& indices) {
        return data[compute_offset(indices)];
    }

    const T& operator[](const std::vector<int>& indices) const {
        return data[compute_offset(indices)];
    }

    // 可变参数索引（如tensor.at(0,1)访问第0行第1列）
    template <typename... Args>
    T& at(Args... args) {
        std::vector<int> indices = { static_cast<int>(args)... };
        return (*this)[indices];
    }

    template <typename... Args>
    const T& at(Args... args) const {
        std::vector<int> indices = { static_cast<int>(args)... };
        return (*this)[indices];
    }

    // 形状调整（reshape）
    Tensor<T> reshape(const std::vector<int>& new_shape) const {
        if (compute_size(new_shape) != size()) {
            throw std::invalid_argument("Reshape shape has different element count");
        }
        Tensor<T> result(new_shape);
        result.data = this->data;  // 数据共享（浅拷贝，实际应用可改为深拷贝）
        return result;
    }

    Tensor<T> reshape(std::initializer_list<int> new_shape) const {
        return reshape(std::vector<int>(new_shape));
    }

    // 初始化方法
    void fill(T value) {  // 填充值
        std::ranges::fill(data, value);
    }

    void random(T min = T{ 0 }, T max = T{ 1 }) {  // 随机初始化
        std::random_device rd;
        std::mt19937 gen(rd());
        if constexpr (std::is_floating_point_v<T>) {
            std::uniform_real_distribution<T> dist(min, max);
            for (auto& x : data) x = dist(gen);
        }
        else {
            std::uniform_int_distribution<T> dist(min, max);
            for (auto& x : data) x = dist(gen);
        }
    }

    // 静态方法：创建全0/全1张量
    static Tensor<T> zeros(const std::vector<int>& shape) {
        Tensor<T> t(shape);
        t.fill(T{ 0 });
        return t;
    }

    static Tensor<T> ones(const std::vector<int>& shape) {
        Tensor<T> t(shape);
        t.fill(T{ 1 });
        return t;
    }

    // 打印张量
    void print(const std::string& name = "") const {
        if (!name.empty()) std::print("{} = ", name);
        if (is_scalar()) {
            std::println("{}", data[0]);
            return;
        }
        std::println("{}", print_recursive(0, 0));
        // 打印形状信息
        std::print("shape: (");
        for (size_t i = 0; i < shape.size(); ++i) {
            std::cout << shape[i];
            if (i != shape.size() - 1) std::print(", ");
        }
        std::println("), size: {}", size());
    }

    // 广播算术运算（加法、减法、乘法、除法）
    friend Tensor<T> operator+(const Tensor<T>& a, const Tensor<T>& b) {
        auto out_shape = compute_broadcast_shape(a.shape, b.shape);
        Tensor<T> result(out_shape);

        auto a_strides = compute_broadcast_strides(a.shape, a.strides, out_shape);
        auto b_strides = compute_broadcast_strides(b.shape, b.strides, out_shape);

        int total = compute_size(out_shape);
        for (int i = 0; i < total; ++i) {
            // 计算当前线性索引对应的多维坐标
            std::vector<int> coords(out_shape.size());
            int temp = i;
            for (int j = out_shape.size() - 1; j >= 0; --j) {
                coords[j] = temp % out_shape[j];
                temp /= out_shape[j];
            }

            // 计算a和b的原始索引
            int a_idx = 0, b_idx = 0;
            for (int j = 0; j < out_shape.size(); ++j) {
                a_idx += coords[j] * a_strides[j];
                b_idx += coords[j] * b_strides[j];
            }

            result.data[i] = a.data[a_idx] + b.data[b_idx];
        }
        return result;
    }

    friend Tensor<T> operator-(const Tensor<T>& a, const Tensor<T>& b) {
        auto out_shape = compute_broadcast_shape(a.shape, b.shape);
        Tensor<T> result(out_shape);

        auto a_strides = compute_broadcast_strides(a.shape, a.strides, out_shape);
        auto b_strides = compute_broadcast_strides(b.shape, b.strides, out_shape);

        int total = compute_size(out_shape);
        for (int i = 0; i < total; ++i) {
            std::vector<int> coords(out_shape.size());
            int temp = i;
            for (int j = out_shape.size() - 1; j >= 0; --j) {
                coords[j] = temp % out_shape[j];
                temp /= out_shape[j];
            }

            int a_idx = 0, b_idx = 0;
            for (int j = 0; j < out_shape.size(); ++j) {
                a_idx += coords[j] * a_strides[j];
                b_idx += coords[j] * b_strides[j];
            }

            result.data[i] = a.data[a_idx] - b.data[b_idx];
        }
        return result;
    }

    friend Tensor<T> operator*(const Tensor<T>& a, const Tensor<T>& b) {
        auto out_shape = compute_broadcast_shape(a.shape, b.shape);
        Tensor<T> result(out_shape);

        auto a_strides = compute_broadcast_strides(a.shape, a.strides, out_shape);
        auto b_strides = compute_broadcast_strides(b.shape, b.strides, out_shape);

        int total = compute_size(out_shape);
        for (int i = 0; i < total; ++i) {
            std::vector<int> coords(out_shape.size());
            int temp = i;
            for (int j = out_shape.size() - 1; j >= 0; --j) {
                coords[j] = temp % out_shape[j];
                temp /= out_shape[j];
            }

            int a_idx = 0, b_idx = 0;
            for (int j = 0; j < out_shape.size(); ++j) {
                a_idx += coords[j] * a_strides[j];
                b_idx += coords[j] * b_strides[j];
            }

            result.data[i] = a.data[a_idx] * b.data[b_idx];
        }
        return result;
    }

    friend Tensor<T> operator/(const Tensor<T>& a, const Tensor<T>& b) {
        auto out_shape = compute_broadcast_shape(a.shape, b.shape);
        Tensor<T> result(out_shape);

        auto a_strides = compute_broadcast_strides(a.shape, a.strides, out_shape);
        auto b_strides = compute_broadcast_strides(b.shape, b.strides, out_shape);

        int total = compute_size(out_shape);
        for (int i = 0; i < total; ++i) {
            std::vector<int> coords(out_shape.size());
            int temp = i;
            for (int j = out_shape.size() - 1; j >= 0; --j) {
                coords[j] = temp % out_shape[j];
                temp /= out_shape[j];
            }

            int a_idx = 0, b_idx = 0;
            for (int j = 0; j < out_shape.size(); ++j) {
                a_idx += coords[j] * a_strides[j];
                b_idx += coords[j] * b_strides[j];
            }

            if (b.data[b_idx] == T{ 0 }) {
                throw std::runtime_error("Division by zero");
            }
            result.data[i] = a.data[a_idx] / b.data[b_idx];
        }
        return result;
    }

    // 二维矩阵乘法（仅支持2D张量，且前一个的列数=后一个的行数）
    Tensor<T> matmul(const Tensor<T>& other) const {
        if (this->ndim() != 2 || other.ndim() != 2) {
            throw std::invalid_argument("matmul requires 2D tensors");
        }
        int a_rows = this->shape[0];
        int a_cols = this->shape[1];
        int b_rows = other.shape[0];
        int b_cols = other.shape[1];
        if (a_cols != b_rows) {
            throw std::invalid_argument("matmul: columns of A must equal rows of B");
        }

        // 结果形状：[a_rows, b_cols]
        Tensor<T> result({ a_rows, b_cols });
        auto& result_data = result.get_data();

        // 手动计算矩阵乘法（简化版，适合测试）
        for (int i = 0; i < a_rows; ++i) {       // 结果的行
            for (int j = 0; j < b_cols; ++j) {   // 结果的列
                T sum = T{ 0 };
                for (int k = 0; k < a_cols; ++k) {  // 内积求和
                    // 取当前张量的(i,k)元素：行优先存储，索引 = i*a_cols + k
                    T a_val = this->data[i * a_cols + k];
                    // 取other的(k,j)元素：索引 = k*b_cols + j
                    T b_val = other.data[k * b_cols + j];
                    sum += a_val * b_val;
                }
                result_data[i * b_cols + j] = sum;
            }
        }
        return result;
    }

    template <typename F>
    Tensor<T> apply(F&& f) const {
        Tensor<T> result(shape);  // 新张量与原张量形状相同
        auto& result_data = result.get_data();
        // 对每个元素调用函数f
        for (size_t i = 0; i < data.size(); ++i) {
            result_data[i] = f(data[i]);  // 逐个元素应用函数
        }
        return result;
    }

    // 与标量的运算（右操作数）
    friend Tensor<T> operator+(const Tensor<T>& a, T scalar) {
        return a + Tensor<T>(scalar);
    }
    friend Tensor<T> operator-(const Tensor<T>& a, T scalar) {
        return a - Tensor<T>(scalar);
    }
    friend Tensor<T> operator*(const Tensor<T>& a, T scalar) {
        return a * Tensor<T>(scalar);
    }
    friend Tensor<T> operator/(const Tensor<T>& a, T scalar) {
        return a / Tensor<T>(scalar);
    }

    // 与标量的运算（左操作数）
    friend Tensor<T> operator+(T scalar, const Tensor<T>& a) {
        return Tensor<T>(scalar) + a;
    }
    friend Tensor<T> operator-(T scalar, const Tensor<T>& a) {
        return Tensor<T>(scalar) - a;
    }
    friend Tensor<T> operator*(T scalar, const Tensor<T>& a) {
        return Tensor<T>(scalar) * a;
    }
    friend Tensor<T> operator/(T scalar, const Tensor<T>& a) {
        return Tensor<T>(scalar) / a;
    }

    std::vector<T>& get_grad() { return grad; }
    const std::vector<T>& get_grad() const { return grad; }
};

