#include <cstring>
#include <numeric>
#include <stdexcept>
#include <thread>
#include <sstream>

#include "tinyndarray.h"

namespace tinyndarray {

NdArray::Iter NdArray::begin() {
    return Iter(this, 0);
}

NdArray::Iter NdArray::end() {
    return Iter(this, m_sub->size);
}

NdArray::ConstIter NdArray::begin() const {
    return ConstIter(const_cast<NdArray*>(this), 0);
}

NdArray::ConstIter NdArray::end() const {
    return ConstIter(const_cast<NdArray*>(this), m_sub->size);
}

const std::vector<size_t>& NdArray::strides() const {
    return m_sub->strides;
}

size_t NdArray::calculate_size(const Shape& shape) {
    if (shape.empty()) return 0;
    return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<size_t>());
}

size_t NdArray::calculate_offset(const Index& index) const {
    if (index.size() != m_sub->shape.size()) {
        throw std::runtime_error("Index dimension mismatch");
    }

    size_t offset = 0;
    size_t stride = 1;
    const size_t ndim = m_sub->shape.size();

    for (size_t i = 0; i < ndim; ++i) {
        const size_t dim = ndim - 1 - i;
        const int idx = (index[dim] >= 0) ? index[dim] : m_sub->shape[dim] + index[dim];

        if (idx < 0 || idx >= m_sub->shape[dim]) {
            throw std::out_of_range("Index out of range");
        }

        offset += stride * static_cast<size_t>(idx);
        stride *= m_sub->shape[dim];
    }

    return offset;
}

// 构造函数实现
NdArray::NdArray() : m_sub(std::make_shared<Substance>(FP32, 0, Shape{})) {}

NdArray::NdArray(Dtype dtype, const Shape& shape)
    : m_sub(std::make_shared<Substance>(dtype, calculate_size(shape), shape)) {}

NdArray::NdArray(Dtype dtype, const Shape& shape, float32_t fill_value) : NdArray(dtype, shape) {
    fill(fill_value);
}

NdArray::NdArray(Dtype dtype, const Shape& shape, const void* external_data)
    : m_sub(std::make_shared<Substance>(dtype, calculate_size(shape), shape)) {
    const size_t type_size = get_type_size(dtype);
    const size_t total_bytes = m_sub->size * type_size;
    std::memcpy(m_sub->data.get(), external_data, total_bytes);
}

NdArray::NdArray(Dtype dtype, const Shape& shape, void* external_data, bool own_data)
    : m_sub(std::make_shared<Substance>(dtype, calculate_size(shape), shape, external_data)) {
    // 如果需要拥有数据，则创建拷贝
    if (own_data) {
        size_t total_size = calculate_size(shape) * get_type_size(dtype);
        void* new_data = new char[total_size];
        std::memcpy(new_data, external_data, total_size);
        m_sub = std::make_shared<Substance>(dtype, calculate_size(shape), shape, new_data);
        m_sub->owns_data = true;
    }
}

#define DEF_NDARRAY_INIT_LIST(ctype, TYPE, depth)                                                   \
    NdArray::NdArray(NumList<ctype, depth> init_list) : NdArray(TYPE, CheckNListShape(init_list)) { \
        CopyNListElems(init_list, static_cast<ctype*>(m_sub->data.get()));                          \
    }

#define DEF_ALL_NDARRAY_INIT_LIST(ctype, TYPE) \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 0)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 1)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 2)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 3)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 4)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 5)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 6)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 7)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 8)      \
    DEF_NDARRAY_INIT_LIST(ctype, TYPE, 9)

DEF_ALL_NDARRAY_INIT_LIST(float32_t, FP32)
DEF_ALL_NDARRAY_INIT_LIST(int, INT32)
// 静态工厂方法
NdArray NdArray::Empty(Dtype dtype, const Shape& shape) {
    return NdArray(dtype, shape);
}

NdArray NdArray::Zeros(Dtype dtype, const Shape& shape) {
    NdArray arr(dtype, shape);
    arr.fill(0);
    return arr;
}

NdArray NdArray::Ones(Dtype dtype, const Shape& shape) {
    NdArray arr(dtype, shape);
    arr.fill(1);
    return arr;
}

// 基本属性
uintptr_t NdArray::id() const {
    return reinterpret_cast<uintptr_t>(m_sub->data.get());
}

bool NdArray::empty() const {
    return m_sub->size == 0;
}

size_t NdArray::size() const {
    return m_sub->size;
}

const Shape& NdArray::shape() const {
    return m_sub->shape;
}

const int& NdArray::shape(int axis) const {
    return m_sub->shape.at(axis);
}

int NdArray::itemsize() const {
    return get_type_size(m_sub->dtype);
}

size_t NdArray::ndim() const {
    return m_sub->shape.size();
}

Dtype NdArray::dtype() const {
    return m_sub->dtype;
}

void* NdArray::data_ptr() {
    return m_sub->data.get();
}

const void* NdArray::data_ptr() const {
    return m_sub->data.get();
}

std::string NdArray::to_string() const {
    std::stringstream ss;
    ss << *this;
    return ss.str();
}

// 类型转换
template <typename T>
T NdArray::as_scalar() const {
    if (size() != 1) throw std::runtime_error("Only size-1 arrays can be converted to scalar");
    return static_cast<T>(ElementProxy(m_sub->data.get(), m_sub->dtype));
}

// 显式实例化
template float32_t NdArray::as_scalar<float32_t>() const;
template int NdArray::as_scalar<int>() const;
template float64_t NdArray::as_scalar<float64_t>() const;

// 填充数据
template <typename T>
void NdArray::fill(T value) {
    const size_t type_size = get_type_size(m_sub->dtype);
    const size_t total_bytes = m_sub->size * type_size;

    switch (m_sub->dtype.tag) {
        case FP32: std::fill_n(reinterpret_cast<float32_t*>(m_sub->data.get()), m_sub->size, value); break;
        case INT32: std::fill_n(reinterpret_cast<int*>(m_sub->data.get()), m_sub->size, value); break;
        case FP64: std::fill_n(reinterpret_cast<float64_t*>(m_sub->data.get()), m_sub->size, value); break;
        default: throw std::runtime_error("Unsupported data type for fill");
    }
}
// 复制数组
NdArray NdArray::copy() const {
    NdArray copy(m_sub->dtype, m_sub->shape);
    const size_t type_size = get_type_size(m_sub->dtype);
    const size_t total_bytes = m_sub->size * type_size;
    std::memcpy(copy.m_sub->data.get(), m_sub->data.get(), total_bytes);
    return copy;
}

// 调整大小
void NdArray::resize(const Shape& new_shape) {
    const size_t new_size = calculate_size(new_shape);
    if (new_size == m_sub->size) {
        m_sub->shape = new_shape;
        return;
    }

    // 创建新存储
    auto new_sub = std::make_shared<Substance>(m_sub->dtype, new_size, new_shape);
    const size_t type_size = get_type_size(m_sub->dtype);
    const size_t copy_size = std::min(m_sub->size, new_size) * type_size;

    // 复制数据
    std::memcpy(new_sub->data.get(), m_sub->data.get(), copy_size);

    // 填充剩余部分
    if (new_size > m_sub->size) {
        const size_t fill_start = m_sub->size * type_size;
        const size_t fill_size = (new_size - m_sub->size) * type_size;
        std::memset(reinterpret_cast<char*>(new_sub->data.get()) + fill_start, 0, fill_size);
    }

    m_sub = new_sub;
}

// 元素访问
ElementProxy NdArray::operator[](size_t index) {
    if (index >= m_sub->size) {
        throw std::out_of_range("Index out of range");
    }
    size_t type_size = get_type_size(m_sub->dtype);
    void* elem_ptr = reinterpret_cast<char*>(m_sub->data.get()) + index * type_size;
    return ElementProxy(elem_ptr, m_sub->dtype);
}

ConstElementProxy NdArray::operator[](size_t index) const {
    if (index >= m_sub->size) {
        throw std::out_of_range("Index out of range");
    }
    size_t type_size = get_type_size(m_sub->dtype);
    const void* elem_ptr = reinterpret_cast<char*>(m_sub->data.get()) + index * type_size;
    return ConstElementProxy(elem_ptr, m_sub->dtype);
}

ElementProxy NdArray::operator[](const Index& index) {
    if (!is_index_in_range(index, shape())) {
        throw std::out_of_range(std::string("Index ") + vector_int_to_string(index) + " out of range " +
                                vector_int_to_string(shape()));
    }
    return (*this)[calculate_offset(index)];
}

ConstElementProxy NdArray::operator[](const Index& index) const {
    if (!is_index_in_range(index, shape())) {
        throw std::out_of_range(std::string("Index ") + vector_int_to_string(index) + " out of range " +
                                vector_int_to_string(shape()));
    }
    return (*this)[calculate_offset(index)];
}

template <typename... I>
ElementProxy NdArray::operator()(I... indices) {
    return (*this)[{static_cast<int>(indices)...}];
}

// 形状操作
NdArray NdArray::reshape(const Shape& new_shape) const {
    const size_t new_size = calculate_size(new_shape);
    if (new_size != m_sub->size) {
        throw std::runtime_error("Total size must remain the same for reshape");
    }

    NdArray result = *this;
    result.m_sub->shape = new_shape;
    return result;
}

NdArray NdArray::flatten() const {
    return reshape({static_cast<int>(m_sub->size)});
}

NdArray NdArray::ravel() const {
    return reshape({static_cast<int>(m_sub->size)});
}

// 类型转换操作符
NdArray::operator float32_t() const {
    return as_scalar<float32_t>();
}
NdArray::operator int() const {
    return as_scalar<int>();
}
NdArray::operator float64_t() const {
    return as_scalar<float64_t>();
}

// 类型转换
NdArray NdArray::astype(Dtype new_dtype) const {
    if (m_sub->dtype == new_dtype) {
        return *this;  // 无需转换
    }

    NdArray result(new_dtype, shape());
    size_t size = this->size();

    // 获取数据指针
    const char* src_data = static_cast<const char*>(data_ptr());
    char* dst_data = static_cast<char*>(result.data_ptr());
#define ASSIGN_VALUE(TYPE, VALUE) *reinterpret_cast<TYPE*>(dst_data) = static_cast<TYPE>(VALUE)
    // 类型转换
    for (size_t i = 0; i < size; i++) {
        switch (m_sub->dtype.tag) {
            case FP32: {
                float32_t value = *reinterpret_cast<const float32_t*>(src_data);
                switch (new_dtype.tag) {
                    case INT32: ASSIGN_VALUE(int32_t, value); break;
                    case FP64: ASSIGN_VALUE(float64_t, value); break;
                    case FP16: ASSIGN_VALUE(float16_t, value); break;
                    case QINT8:
                        *reinterpret_cast<int8_t*>(dst_data) =
                                quantize_linear<int8_t>(value, new_dtype.get_quant_info());
                    default: break;
                }
                break;
            }
            case INT32: {
                int value = *reinterpret_cast<const int*>(src_data);
                switch (new_dtype.tag) {
                    case FP32: ASSIGN_VALUE(float32_t, value); break;
                    case FP64: ASSIGN_VALUE(float64_t, value); break;
                    default: break;
                }
                break;
            }
            case FP64: {
                float64_t value = *reinterpret_cast<const float64_t*>(src_data);
                switch (new_dtype.tag) {
                    case FP32: ASSIGN_VALUE(float32_t, value); break;
                    case INT32: ASSIGN_VALUE(int32_t, value); break;
                    default: break;
                }
                break;
            }
            case FP16: {
                float16_t value = *reinterpret_cast<const float16_t*>(src_data);
                switch (new_dtype.tag) {
                    case FP32: ASSIGN_VALUE(float32_t, value); break;
                    case FP64: ASSIGN_VALUE(float64_t, value); break;
                    case INT32: ASSIGN_VALUE(int32_t, value); break;
                    default: break;
                }
                break;
            }
            case QINT8: {
                int8_t value = *reinterpret_cast<const int8_t*>(src_data);
                switch (new_dtype.tag) {
                    case FP32:
                        *reinterpret_cast<float*>(dst_data) =
                                dequantize_linear<int8_t>(value, m_sub->dtype.get_quant_info());
                        break;
                    default: break;
                }
                break;
            }
            default: throw std::runtime_error("Unsupported data type for conversion");
        }
#undef ASSIGN_VALUE

        src_data += get_type_size(m_sub->dtype);
        dst_data += get_type_size(new_dtype);
    }

    return result;
}

}  // namespace tinyndarray