#include "my_tensor.hpp"
#include "cuda_tools.hpp"

namespace MTsr
{
    static int data_type_size(DataType dt){
		switch (dt) {
			case DataType::Float32: return sizeof(float);
			case DataType::UInt8: return sizeof(uint8_t);
			default: {
				INFOE("Not support dtype: %d", dt);
				return -1;
			}
		}
	}

    static const char* data_head_string(DataHead dh){
		switch(dh){
			case DataHead::Init: return "Init";
			case DataHead::Device: return "Device";
			case DataHead::Host: return "Host";
			default: return "Unknow";
		}
	}
    
    static const char* data_type_string(DataType dt){
		switch(dt){
			case DataType::Float32: return "Float32";
			case DataType::UInt8: return "UInt8";
			default: return "Unknow";
		}
	}

    MyTensor::MyTensor(int device_id, DataType dtype)
        : MMem::MixMemory(device_id)
    {
        this->dtype_ = dtype;
    }

    MyTensor::MyTensor(int device_id, DataType dtype, int n, int c, int h, int w, bool default_gpu)
        : MMem::MixMemory(device_id)
    {
        this->dtype_ = dtype;
        this->shape_ = std::vector<int>{n, c, h, w};
        this->new_mem(default_gpu);
    }

    MyTensor::MyTensor(int device_id, DataType dtype, const std::vector<int>& dims, bool default_gpu)
        : MMem::MixMemory(device_id)
    {
        this->dtype_ = dtype;
        this->shape_ = dims;
        this->new_mem(default_gpu);
    }

    MyTensor::~MyTensor()
    {
        this->shape_.clear();
        this->head_ = DataHead::Init;
    }

    void MyTensor::release_tensor()
    {
        this->release_all();
        this->shape_.clear();
        this->head_ = DataHead::Init;
    }

    void MyTensor::new_dims_mem(int n, int c, int h, int w, bool default_gpu)
    {
        this->shape_ = std::vector<int>{n, c, h, w};
        this->new_mem(default_gpu);
    }

    void MyTensor::new_dims_mem(const std::vector<int>& dims, bool default_gpu)
    {
        this->shape_ = dims;
        this->new_mem(default_gpu);
    }

    void MyTensor::new_mem(bool default_gpu)
    {
        size_t data_size = this->numel() * data_type_size(this->dtype_);
        if (default_gpu)
        {
            this->new_gpu_mem(data_size);
            this->head_ = DataHead::Device;
        }
        else
        {
            this->new_cpu_mem(data_size);
            this->head_ = DataHead::Host;
        }
    }

    int MyTensor::numel() const
    {
		int value = shape_.empty() ? 0 : 1;
		for(int i = 0; i < shape_.size(); ++i)
        {
			value *= shape_[i];
		}
		return value;
	}

    int MyTensor::offset_array(size_t size, const int* index_array) const
    {
		Assert(size <= shape_.size());
		int value = 0;
		for(int i = 0; i < shape_.size(); ++i)
        {
			if(i < size)
				value += index_array[i];

			if(i + 1 < shape_.size())
				value *= shape_[i+1];
		}
		return value;
	}

	int MyTensor::offset_array(const std::vector<int>& index_array) const
    {
		return offset_array(index_array.size(), index_array.data());
	}
}