#include <iostream>
#include <ctime>
#include <cstdlib>
#include <string>
#include <cuda_runtime.h>
#include <cublas_v2.h>

using namespace std;


class Tensor{
    public:
        Tensor(int dim, int *size, int seed, string device);
        Tensor(int dim, int *size, float *data, string device); // 自定义输入数据
        string device; // 标记设备类型为cpu或gpu
        int dim; // 标记这是一个几维张量
        int *size; //张量形状
        float *data; //数据,可以是cpu类型也可以是gpu类型
        
        void shape(); //打印张量形状
        void freeTensor(); //释放tensor内存
        void cpu(); //将gpu数据转为cpu数据
        void cuda(); //将cpu数据转为gpu数据
        void print(); //打印张量数据

    private:
        float *get_tensor(int dim, int *shape,int seed, string device); // 用于生成随机张量
        float *turn_to_gpu(float *h_arr, int size); // 将cpu数据转为gpu数据
        float *turn_to_cpu(float *d_arr, int size); // 将gpu数据转为cpu数据
        float *turn(int dim, int *shape, float *data); // 将自定义输入的张量数据转换为cuda类型，自定义的数组需要手动释放内存
};

// 生成随机数据
float *Tensor::get_tensor(int dim, int *shape, int seed, string device) {
    int size = 1;
    for(int i = 0; i < dim; i++){
        size *= shape[i];
    }
    srand((int)time(NULL) + seed);
    
    if(device == "cpu"){
         float *arr = new float[size];
    for (int i = 0; i < size; i++) {
        int r = rand();
        float f = (float)r / RAND_MAX;
        float f2 = f * 2.0f - 1.0f;
        arr[i] = f2;
    }

    return arr;
    }else if(device == "cuda"){
        float *h_arr = new float[size];
    for (int i = 0; i < size; i++) {
        int r = rand();
        float f = (float)r / RAND_MAX;
        float f2 = f * 2.0f - 1.0f;
        h_arr[i] = f2;
    }
        float *d_arr; 
        cudaMalloc((void **)&d_arr,sizeof(float) *size);

        cudaMemcpy(d_arr,h_arr,sizeof(float)*size, cudaMemcpyHostToDevice);

        delete[] h_arr;

        return d_arr;
    }
     return;
}


/*      下面的两个转换方法只能在转换Tensor类型的时候使用                 */
// 将cpu数据转换为gpu数据
float *Tensor::turn_to_gpu(float *h_arr, int size){
    float *d_arr;
    cudaMalloc((void **)&d_arr,sizeof(float) *size);
    cudaMemcpy(d_arr,h_arr,sizeof(float)*size, cudaMemcpyHostToDevice);
    delete[] h_arr;
    return d_arr;
}

// 将gpu数据转换为cpu数据
float *Tensor::turn_to_cpu(float *d_arr, int size){
    float *h_arr = new float[size];
    cudaMemcpy(h_arr,d_arr,sizeof(float)*size, cudaMemcpyDeviceToHost);
    cudaFree(d_arr);
    return h_arr;
}

/********************************** */

// 将自定义输入的张量数据转换为cuda类型，自定义的数组需要手动释放内存
float *Tensor::turn(int dim, int *shape, float *data){
    int size = 1;
    for(int i = 0; i < dim; i++){
        size *= shape[i];
    }
    float *d_arr;
    cudaMalloc((void **)&d_arr,sizeof(float) *size);
    cudaMemcpy(d_arr,data,sizeof(float)*size, cudaMemcpyHostToDevice);
    return d_arr;
}


// 构造方法，自动生成随机张量
Tensor::Tensor(int dim, int *size, int seed, string device){
    this->device = device;
    this->dim = dim;
    this->size = size;
    this->data = get_tensor(dim, size, seed, device);
} 

// 构造方法，传入张量数据
Tensor::Tensor(int dim, int *size, float *data, string device){
    this->device = device;
    this->dim = dim;
    this->size = size;
    if(device == "cpu"){
        this->data = data;
    }
    else if(device == "cuda"){
        this->data = turn(dim, size, data);
    }
}

// 打印张量形状
void Tensor::shape(){
    if(this->dim == 1){
        cout << "(" << this->size[0] << ")" << endl;
    }
    else if(this->dim == 2){
        cout << "(";
        for(int i = 0; i < 2; i++){
            if(i == 1){
                cout << this->size[i];
            }else{
                cout << this->size[i] << ",";
            }
        }
        cout << ")" << endl;
    }
}

// 将张量转换为cpu类型
void Tensor::cpu(){
    int s = 1;
    for(int i = 0; i < this->dim; i++){
        s *= this->size[i];
    }

    if(this->device == "cuda"){
        this->data = turn_to_cpu(this->data, s);
        this->device = "cpu";
    }
}

// 将张量转换为cuda类型
void Tensor::cuda(){
    int s = 1;
    for(int i = 0; i < this->dim; i++){
        s *= this->size[i];
    }

    if(this->device == "cpu"){
        this->data = turn_to_gpu(this->data, s);
        this->device = "cuda";
    }
}

// 释放Tensor
void Tensor::freeTensor(){
    if(this->device == "cpu"){
        delete[] this->data;
        delete[] this->size;

    }
    else if(this->device == "cuda"){
        cudaFree(this->data);
        delete[] this->size;
    }

}

// 打印张量
void Tensor::print(){
    int s = 1;
    for(int i = 0; i < this->dim; i++){
        s *= this->size[i];
    }

    if(this->device == "cpu"){
        if(this->dim == 1){
            for(int i = 0; i < s; i++){
            cout << this->data[i] << " ";
        }
        cout << endl;
        }else if(this->dim == 2){
            for(int i = 0; i < s; i++){
                cout << this->data[i] << " ";
                if(i % this->size[1] == 0){
                    cout << endl;
                }
            }
            cout << endl;
        }
                
    }
    else if(this->device == "cuda"){
        float *h_arr = new float[s];
        cudaMemcpy(h_arr,this->data,sizeof(float)*s, cudaMemcpyDeviceToHost);
        if(this->dim == 1){
            for(int i = 0; i < s; i++){
                cout << h_arr[i] << " ";
            }
            cout << endl;
        }else if(this->dim == 2){
            for(int i = 0; i < s; i++){
                cout << h_arr[i] << " ";
                if(i % this->size[1] == 0){
                    cout << endl;
                }
            }
            cout << endl;
        }
        delete[] h_arr; 
    }
}

/* 生成全零或全一的Tensor */
Tensor oneTensor(int dim, int *size, string device){
    int s = 1;
    for(int i = 0; i < dim; i++){
        s *= size[i];
    }

    float *data = new float[s];
    for(int i = 0; i < s; i++){
        data[i] = 1.0f;
    }
    
    return Tensor(dim, size, data, device);
    
}

Tensor zeroTensor(int dim, int *size, string device){
    int s = 1;
    for(int i = 0; i < dim; i++){
        s *= size[i];
    }

    float *data = new float[s];
    for(int i = 0; i < s; i++){
        data[i] = 0.0f;
    }
    
    return Tensor(dim, size, data, device);
}

// 形状判断
// 加减法形状判断
int shape_equal(Tensor &a, Tensor &b){
    int dim1 = a.dim;
    int dim2 = b.dim;
    int *size1 = a.size;
    int *size2 = b.size;
    if(dim1 != dim2){
        return -1;
    }
    for(int i = 0; i < dim1; i++){
        if(size1[i] != size2[i]){
            return -1;
        }
    }
    return 1;
}

// 乘法形状判断
int shape_mul(Tensor &a, Tensor &b){
    int dim1 = a.dim;
    int dim2 = b.dim;
    int *size1 = a.size;
    int *size2 = b.size;
    
    if(dim1 != dim2){
        return -1;
    }
    if(dim1 == 1 && dim2 == 2){
        return -1;
    }
    if(dim1 == 2 && dim2 == 1){
        return -1;
    }
    if (dim1 == 2 && dim2 == 2){
        if(size1[1] != size2[0]){
            return -1;
        }
    }
    return 1;
}

// 设备判断
int device_equal(Tensor &a, Tensor &b){
    if(a.device == b.device){
        return 1;
    }
    return -1;
}
// cuda核函数
__global__ void t_add(float *a, float *b, float *c, int size){
    int i = threadIdx.x + blockIdx.x * blockDim.x;
    if(i < size){
        c[i] = a[i] + b[i];
    }
}

__global__ void t_sub(float *a, float *b, float *c, int size){
    int i = threadIdx.x + blockIdx.x * blockDim.x;
    if(i < size){
        c[i] = a[i] - b[i];
    }
}

// 计算方法
Tensor tensor_add(Tensor &a, Tensor &b){
    if(device_equal(a,b) < 0){
        string tip = "There were two different devices, between cuda and cpu!";
        cout << tip << endl;
        exit(0);
    }
    if(shape_equal(a,b) < 0){
        string tip = "The shape of two tensors are not equal!";
        cout << tip << endl;
        exit(0);
    }
    int s = 1;
    for(int i = 0; i < a.dim; i++){
        s *= a.size[i];
    }

    if(a.device == "cpu"){
        float *arr = new float[s];
        for(int i = 0; i < s; i++){
            arr[i] = a.data[i] + b.data[i];
        }

        return Tensor(a.dim, a.size, arr, a.device);
    }else if(a.device == "cuda"){
        float *d_c;
        cudaMalloc((void **)&d_c, sizeof(float)*s);
        t_add<<<1,s>>>(a.data, b.data, d_c, s);
        return Tensor(a.dim, a.size, d_c, a.device);
        
    }
    return;
}

// 减法
Tensor tensor_sub(Tensor &a, Tensor &b){
    if(device_equal(a,b) < 0){
        string tip = "There were two different devices, between cuda and cpu!";
        cout << tip << endl;
        exit(0);
    }
    if(shape_equal(a,b) < 0){
        string tip = "The shape of two tensors are not equal!";
    }
    
}
// test

int main(){
    int *size = new int[2];
    size[0] = 100000;
    size[1] = 100000;
    Tensor t(2,size,1,"cuda");
    t.shape();

    t.freeTensor();


}