#include "tensor.h"
#include <cuda_runtime.h>


// 获取对应数据类型的字节大小
size_t getDataTypeSize(DataType data_type) {
    switch (data_type) {
    case DataType::FLOAT32:
        return sizeof(float);
    case DataType::INT32:
        return sizeof(int);
    default:
        return 0;
    }
}

Tensor allocateTensorOnCPU(std::vector<int> dims, DataType data_type, DataLayout layout) {
    Tensor tensor;
    tensor.layout = layout;
    tensor.data_type = data_type;
    tensor.is_gpu = false;
    tensor.dims = dims;
    int num_dims = std::accumulate(dims.begin(), dims.end(), 0);
    tensor.num_dims = num_dims;
    tensor.data = malloc(num_dims * getDataTypeSize(data_type));
    return tensor;
}

Tensor allocateTensorOnGPU(std::vector<int> dims, DataType data_type, DataLayout layout) {
    Tensor tensor;
    tensor.layout = layout;
    tensor.data_type = data_type;
    tensor.is_gpu = true;
    tensor.dims = dims;

    int num_dims = accumulate(dims.begin(), dims.end(), 0);
    tensor.num_dims = num_dims;
    cudaMalloc((void**)&tensor.data, num_dims * getDataTypeSize(data_type));
    return tensor;
}

void freeTensor(Tensor tensor) {
    if (tensor.is_gpu) {
        cudaFree(tensor.data);
    } else {
        free(tensor.data);
    }
}
void copyToGPU(Tensor src, Tensor dst) {
    if (src.is_gpu!= dst.is_gpu) {
        if (src.is_gpu) {
            cudaMemcpy(dst.data, src.data, src.num_dims * getDataTypeSize(src.data_type), cudaMemcpyDeviceToHost);
        } else {
            cudaMemcpy(dst.data, src.data, src.num_dims * getDataTypeSize(src.data_type), cudaMemcpyHostToDevice);
        }
    }
}