#pragma once
#include <iostream>
#include <vector>
#include <string>
#include <sstream>
#include <algorithm>
#include <numeric> 
#include <unordered_map>
#include "string_utils.h"
#include "macro.h"


enum class Device {
    CPU_PINNED,
    CPU,
    GPU
};

enum class DataType {
    FP32,
    FP16,
    INT8,
    INT32,
    BOOL,
    BYTES,
    UNSUPPORTED
};

template<typename T>
DataType inline getTensorType() {
    if (std::is_same<T, float>::value || std::is_same<T, const float>::value) {
        return DataType::FP32;
    }
    if (std::is_same<T, half>::value || std::is_same<T, const half>::value) {
        return DataType::FP16;
    }
    if (std::is_same<T, int8_t>::value || std::is_same<T, const int8_t>::value) {
        return DataType::INT8;
    }
    if (std::is_same<T, int32_t>::value || std::is_same<T, const int32_t>::value) {
        return DataType::INT32;
    }
    if (std::is_same<T, bool>::value || std::is_same<T, const bool>::value) {
        return DataType::BOOL;
    }
    if (std::is_same<T, char>::value || std::is_same<T, const char>::value) {
        return DataType::BYTES;
    }
    return DataType::UNSUPPORTED;
}


template<typename T>
struct TensorWrapper;

// Tensor 是不涉及具体数据类型的，因此不需要模版类
struct Tensor {
    Device device;
    DataType dtype;
    std::vector<int> shape;

    Tensor() = default;
    Tensor(const Device device_,
            const DataType dtype_,
            const std::vector<int>& shape_):
            device(device_), dtype(dtype_), shape(shape_) {}

    virtual int size() const {
        if (shape.size() == 0) {
            return 0;
        }
        return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());  
    }

    template<typename T>
    TensorWrapper<T>* as(){
        return static_cast<TensorWrapper<T>*>(this);
    }

    std::string DeviceString() const {
        static const std::unordered_map<Device, std::string> device2string {
            {Device::CPU_PINNED, "CPU_PINNED"},
            {Device::CPU, "CPU"},
            {Device::GPU, "GPU"}
        };
        return device2string.at(device);
    }

    std::string DataTypeString() const {
        static const std::unordered_map<DataType, std::string> type2string{
            {DataType::FP32, "FP32"},
            {DataType::FP16, "FP16"},
            {DataType::INT8, "INT8"},
            {DataType::INT32, "INT32"}
        };
        return type2string.at(dtype);
    }

    virtual std::string toString() const {
        std::string device_str = DeviceString();
        std::string datatype_str = DataTypeString();
        return fmtstr("Tensor[where=%sm type=%s, shape=%s]",
                device_str.c_str(),
                datatype_str.c_str(),
                vec2str(shape).c_str());
    }
};

template<typename T>
struct TensorWrapper : public Tensor {
public:
    T *data;

    TensorWrapper(const Device device, const DataType dtype, const std::vector<int> shape):
        Tensor(device, dtype, shape) {}
    TensorWrapper(const Device device, const DataType dtype, const std::vector<int> shape, T *data_):
        Tensor(device, dtype, shape),
        data(data_) {
            DataType in_dtype = getTensorType<T>();
            LLM_CHECK_WITH_INFO(in_dtype == dtype, "when build TensorWrapper, the passed in data type should be the same as dtype in params");
        }    
    
    virtual int size() const {
        if (data == nullptr || shape.size() == 0) {
            return 0;
        }
        return std::accumulate(shape.begin(), shape.end(), 1, std::multiplies<int>());  
    }

    inline T getVal(int id) const {
        LLM_CHECK(device == Device::CPU);
        return data[id];
    }

    inline T getVal() const {
        LLM_CHECK(device == Device::CPU);
        return getVal(0);
    }

    inline T* getPtr() const {
        return (T*)data;
    }

    inline T* gePtrByOffset(int offset) const {
        return (T*)data + offset;
    }

    virtual std::string toString() const {
        std::string device_str = DeviceString();
        std::string datatype_str = DataTypeString();
        return fmtstr("TensorWrapper[where=%s, type=%s, shape=%s, data=%p]",
                    device_str.c_str(),
                    datatype_str.c_str(),
                    vec2str(shape).c_str(),
                    data);
    }
};

struct TensorMap {
    std::unordered_map<std::string, Tensor*> tensor_map_;

    TensorMap() = default;
    TensorMap(std::initializer_list<std::pair<std::string, Tensor*>> tensor_map) {
        for (auto &pair : tensor_map) {
            if (isValid(pair.second)) {
                insert(pair.first, pair.second);
            } else {
                LLM_CHECK_WITH_INFO(isValid(pair.second), fmtstr("%s is not a valid tensor, skippinginsert into TensorMap", pair.first.c_str()));
            }
        }
    }
    TensorMap(std::unordered_map<std::string, Tensor*> tensor_map) {
        for (auto it = tensor_map.begin(); it != tensor_map.end(); ++it) {
            if (isValid(it->second)) {
                insert(it->first, it->second);
            } else {
                // TODO: add a reminder info
            }
        }
    }

    ~TensorMap() {
        tensor_map_.clear();
    }

    inline bool isValid(const Tensor* tensor) {
        return tensor->size() > 0;
    }

    inline void insert(const std::string &key, Tensor *value) {
        tensor_map_[key] = value;
    }

    inline void insert(std::pair<std::string, Tensor*> p) {
        tensor_map_.insert(p);
    } 

    inline size_t size() const {
        return tensor_map_.size();
    }

    inline bool isExist(const std::string &key) const {
        return tensor_map_.find(key) != tensor_map_.end();
    }

    inline Tensor* at(const std::string &key) {
        LLM_CHECK_WITH_INFO(isExist(key), fmtstr("Cannot find a tensor of name %s in the tensor map (keys: %s)", key.c_str(), vec2str(keys()).c_str()));
        
        return tensor_map_.at(key);
    }

    inline Tensor* operator[](const std::string &key) {
        LLM_CHECK_WITH_INFO(isExist(key), fmtstr("Cannot find a tensor of name %s in the tensor map (keys: %s)", key.c_str(), vec2str(keys()).c_str()));

        return tensor_map_[key];
    }


    std::vector<std::string> keys() const {
        std::vector<std::string> key_names;
        for (auto &kv : tensor_map_) {
            key_names.push_back(kv.first);
        }
        return key_names;
    }

    std::string toString() {
        std::stringstream ss;
        ss << "[ ";
        std::vector<std::string> key_names;
        for (size_t i = 0; i < tensor_map_.size(); ++i) {
            ss << key_names[i] << ": " << at(key_names[i])->toString();
            if (i < tensor_map_.size() - 1) {
                ss << ", ";
            }
        }
        ss << "]";
        return ss.str();
    }
};