#pragma once

#include <vector>
#include <memory>
#include <log.h>

namespace  ldl
{
namespace core
{
template<typename T>
class TensorData
{
public:
    // 辅助函数：将shape转换为字符串表示
    static std::string shapeToString(const std::vector<int64_t>& shape) {
        std::ostringstream oss;
        oss << "[";
        for (size_t i = 0; i < shape.size(); ++i) {
            if (i != 0) oss << ", ";
            oss << shape[i];
        }
        oss << "]";
        return oss.str();
    }
    // 初始化列表构造函数（修改为更安全的实现）
    TensorData(std::initializer_list<T> init_list) {
        m_shape = {static_cast<int64_t>(init_list.size())};
        m_size = init_list.size();
        m_start_position = 0;
        m_data_ptr = std::make_shared<std::vector<T>>(init_list);
    }

    // 多维初始化列表构造函数（安全版）
    template<typename U>
    TensorData(std::initializer_list<std::initializer_list<U>> init_list) {
        std::vector<int64_t> shape;
        std::vector<T> flat_data;
        
        // 检查所有子列表长度是否一致
        size_t first_dim = init_list.size();
        size_t second_dim = init_list.begin()->size();
        for (const auto& sublist : init_list) {
            if (sublist.size() != second_dim) {
                throw std::runtime_error("Inconsistent dimensions in initializer list");
            }
            for (const auto& item : sublist) {
                flat_data.push_back(static_cast<T>(item));
            }
        }
        
        m_shape = {static_cast<int64_t>(first_dim), static_cast<int64_t>(second_dim)};
        m_size = flat_data.size();
        m_start_position = 0;
        m_data_ptr = std::make_shared<std::vector<T>>(std::move(flat_data));
    }

    TensorData(const std::vector<int64_t> &shape, const std::vector<T> &data)
    {
        int64_t size = 1;
        for(auto dim : shape) {
            size *= dim;
        }
        if (static_cast<int64_t>(data.size()) < size) {
            throw std::runtime_error(
                "Input data size (" + std::to_string(data.size()) + 
                ") is smaller than the required size (" + std::to_string(size) + 
                ") for the given shape " + shapeToString(shape)
            );
        }
        m_data_ptr = std::make_shared<std::vector<T>>(data.begin(), data.begin() + size);
        m_shape = shape;
        m_size = size;
        m_start_position = 0;
    }

    TensorData(const std::vector<int64_t> &shape, T data)
    {
        int64_t size = 1;
        for(auto dim : shape) {
            size *= dim;
        }

        m_data_ptr = std::make_shared<std::vector<T>>(size, data);
        m_shape = shape;
        m_size = size;
        m_start_position = 0;
    }

    TensorData()
    {

    }

    int64_t size()
    {
        return m_size;
    }

    const std::vector<int64_t>& shape() const &
    {
        return m_shape;
    }

    std::vector<int64_t> shape() &&
    {
        return std::move(m_shape);
    }

    TensorData<T> operator[](int64_t index) const
    {
        if(index >= m_shape.at(0)) {
            throw std::out_of_range("Index " + std::to_string(index) + 
                              " is out of bounds with size " + 
                              std::to_string(m_shape.at(0)));
        }
        auto stride = m_size / m_shape.at(0);
        std::vector<int64_t> shape(m_shape.begin() + 1, m_shape.begin() + m_shape.size());
        if(m_shape.size() == 1) {
            shape.push_back(1);
        }
        return TensorData<T>(shape, stride, m_start_position + index * stride, m_data_ptr);
    }

    int64_t ndim() const
    {
        return m_shape.size();
    }

    void dump(std::ostream& os, const TensorData<T>& tensor_data) const
    {
        os << "[";
        if(tensor_data.ndim() == 1)
        {

            auto dim  = tensor_data.m_shape.at(0) + tensor_data.m_start_position;
            for(int64_t index = tensor_data.m_start_position; index < dim; index++)
            {
                os << (*tensor_data.m_data_ptr)[index];
                if(index != (dim - 1))
                    os << ",";
            }
        }
        else
        {
            auto dim  = tensor_data.m_shape.at(0);
            for(int64_t index = 0; index < dim; index++)
            {
                dump(os, tensor_data[index]);
                if(index != (dim - 1))
                {
                    os << ",\n";
                }

                if((tensor_data.m_shape.size() > 2) && (dim != index + 1))
                {
                    os << "\n";
                }
            }
        }
        os << "]";
    }

    friend std::ostream& operator<<(std::ostream& os, const TensorData<T>& tensor_data)
    {
        os << "Tensor(\n";
        tensor_data.dump(os, tensor_data);
        os << ",shape=(";
        for (size_t i = 0; i < tensor_data.m_shape.size(); ++i) {
            if (i != 0) os << ",";
            os << tensor_data.m_shape[i];
        }
        os << "))";
        return os;
    }

    TensorData<T> operator=(const T& value)
    {
        if (ndim() != 1 || size() != 1) {
            throw std::runtime_error("Assignment is only supported for scalar tensors (ndim=1, size=1)");
        }

        // 将唯一的元素赋值为 value
        (*m_data_ptr)[m_start_position] = value;

        return *this;  // 返回 *this 以支持链式赋值（如 a = b = c）
    }

    /**
     * @brief 赋值运算符
     * @note 如果是空的，就创建新数据 如果已经有数据，就只能赋值相同形状的
     */
    TensorData<T> operator=(const TensorData<T>& other)
    {
        // 1. 检查自赋值
        if (this == &other) {
            return *this;
        }

        if((other.m_size != 0) && (m_size == 0)) {
            m_shape = other.m_shape;
            m_size = other.m_size;
            m_start_position = 0;
            m_data_ptr = std::make_shared<std::vector<T>>(other.m_data_ptr->begin() + other.m_start_position, other.m_data_ptr->begin() + other.m_start_position + other.m_size);
            return *this;
        }
        // 2. 检查形状是否匹配
        if (m_shape != other.m_shape) {
            throw std::runtime_error(
                "Cannot assign tensors with different shapes. " +
                shapeToString(m_shape) + " vs " + shapeToString(other.m_shape)
            );
        }

        // 3. 执行深拷贝
        if (other.m_size > 0) {  // 只有非空张量才需要拷贝
            std::copy(
                other.m_data_ptr->begin() + other.m_start_position,
                other.m_data_ptr->begin() + other.m_start_position + other.m_size,
                m_data_ptr->begin() + m_start_position
            );
        }

        return *this;
    }

    TensorData<T> clone() const
    {
        TensorData<T> other;
        other.m_shape = m_shape;
        other.m_size = m_size;
        other.m_start_position = 0;
        other.m_data_ptr = std::make_shared<std::vector<T>>(m_data_ptr->begin() + m_start_position, m_data_ptr->begin() + m_start_position + m_size);
        return other;
    }

    TensorData<T> operator*(const TensorData<T>& other) const
    {
        // 1. 检查形状是否匹配
        if (m_shape != other.m_shape) {
            throw std::runtime_error(
                "Cannot multiply tensors with different shapes. " +
                shapeToString(m_shape) + " vs " + shapeToString(other.m_shape)
            );
        }

        auto result = this->clone();
        int64_t multiplicand_index = m_start_position;
        int64_t multiplier_index = other.m_start_position;
        for (auto it = result.m_data_ptr->begin() + result.m_start_position; it != result.m_data_ptr->begin() + result.m_start_position + result.m_size; ++it) {
            *it = m_data_ptr->at(multiplicand_index) * other.m_data_ptr->at(multiplier_index);
            multiplicand_index++;
            multiplier_index++;
        }
        return result;
    }

    TensorData<T> operator+(const TensorData<T>& other) const
    {
        // 1. 检查形状是否匹配
        if (m_shape != other.m_shape) {
            throw std::runtime_error(
                "Cannot multiply tensors with different shapes. " +
                shapeToString(m_shape) + " vs " + shapeToString(other.m_shape)
            );
        }

        auto result = this->clone();
        int64_t multiplicand_index = m_start_position;
        int64_t multiplier_index = other.m_start_position;
        for (auto it = result.m_data_ptr->begin() + result.m_start_position; it != result.m_data_ptr->begin() + result.m_start_position + result.m_size; ++it) {
            *it = m_data_ptr->at(multiplicand_index) + other.m_data_ptr->at(multiplier_index);
            multiplicand_index++;
            multiplier_index++;
        }
        return result;
    }

    TensorData<T> operator+=(const TensorData<T>& other)
    {
        if((other.m_size != 0) && (m_size == 0)) {
            m_shape = other.m_shape;
            m_size = other.m_size;
            m_start_position = 0;
            m_data_ptr = std::make_shared<std::vector<T>>(other.m_data_ptr->begin() + other.m_start_position, other.m_data_ptr->begin() + other.m_start_position + other.m_size);
            return *this;
        }

        // 1. 检查形状是否匹配
        if (m_shape != other.m_shape) {
            throw std::runtime_error(
                "Cannot multiply tensors with different shapes. " +
                shapeToString(m_shape) + " vs " + shapeToString(other.m_shape)
            );
        }

        int64_t multiplier_index = other.m_start_position;
        for (auto it = m_data_ptr->begin() + m_start_position; it != m_data_ptr->begin() + m_start_position + m_size; ++it) {
            *it += other.m_data_ptr->at(multiplier_index);
            multiplier_index++;
        }
        return *this;
    }

    TensorData<T> exp()
    {
        auto other = this->clone();
        for (auto it = other.m_data_ptr->begin() + other.m_start_position; it != other.m_data_ptr->begin() + other.m_start_position + other.m_size; ++it) {
            *it = std::exp(*it);
        }
        return other;
    }
private:
    TensorData(std::vector<int64_t> shape, int64_t size, int64_t start_position, std::shared_ptr<std::vector<T>> data_ptr)
    {
        m_shape = shape;
        m_size = size;
        m_start_position = start_position;
        m_data_ptr = data_ptr;
    }
private:
    std::shared_ptr<std::vector<T>> m_data_ptr{};
    std::vector<int64_t> m_shape{};
    int64_t m_start_position{};
    int64_t m_size{};
};
}
}
