#include <iostream>
#include <ctime>
#include <cstdlib>
#include <string>
#include <vector>
#include <thread>

// cpu only

using namespace std;


class Tensor{
    public:
        Tensor(int dim, int *size, int seed, string device);
        Tensor(int dim, int *size, float *data, string device); // 自定义输入数据
        string device; // 标记设备类型为cpu或gpu
        int dim; // 标记这是一个几维张量
        int *size; //张量形状
        float *data; //数据,可以是cpu类型也可以是gpu类型
        
        void shape(); //打印张量形状
        void freeTensor(); //释放tensor内存
        // void cpu(); //将gpu数据转为cpu数据
        // void cuda(); //将cpu数据转为gpu数据
        void print(); //打印张量数据
        // 运算符重载
        friend Tensor operator+(Tensor &t1, Tensor &t2); 
        friend Tensor operator-(Tensor &t1, Tensor &t2);
        friend Tensor operator*(Tensor &t1, Tensor &t2);

    private:
        // float *get_tensor(int dim, int *shape,int seed, string device); // 用于生成随机张量
        // float *turn_to_gpu(float *h_arr, int size); // 将cpu数据转为gpu数据
        // float *turn_to_cpu(float *d_arr, int size); // 将gpu数据转为cpu数据
        // float *turn(int dim, int *shape, float *data); // 将自定义输入的张量数据转换为cuda类型，自定义的数组需要手动释放内存
};

// 生成随机数据
// 向线程中传送数据
/***************************************************/
typedef struct {
    int start;
    int end;
    int seed;
    float *arr;
}Mes;

void rand_gen(Mes *mes) {
    
    srand((int)time(NULL) + mes->start + mes->seed);
    
    for (int i = mes->start; i < mes->end; i++) {
        int r = rand();
        float f = (float)r / RAND_MAX;
        float f2 = f * 2.0f - 1.0f;
        mes->arr[i] = f2;
    }
    
}

float * rand_arr(int size, int seed) {
    int thread_num = std::thread::hardware_concurrency();
    float *arr = new float[size];
    std::vector<std::thread> threads(thread_num);
    std::vector<Mes> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Mes{start, end, seed, arr};
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Mes{start, end, seed, arr};
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Mes{start, end, seed, arr};
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(rand_gen, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }

    return arr;
}

/***************************************************/

// 构造方法，自动生成随机张量
Tensor::Tensor(int dim, int *size, int seed, string device){
    this->device = device;
    this->dim = dim;
    this->size = size;

    int s_ = 1;
    for(int i = 0; i < dim; i++){
        s_ *= size[i];
    }
    this->data = rand_arr(s_, seed);
} 

// 构造方法，传入张量数据
Tensor::Tensor(int dim, int *size, float *data, string device){
    this->device = device;
    this->dim = dim;
    this->size = size;
    if(device == "cpu"){
        this->data = data;
    }
    else if(device == "cuda"){
        // this->data = turn(dim, size, data);
    }
}

// 打印张量形状
void Tensor::shape(){
    if(this->dim == 1){
        cout << "(" << this->size[0] << ")" << endl;
    }
    else if(this->dim == 2){
        cout << "(";
        for(int i = 0; i < 2; i++){
            if(i == 1){
                cout << this->size[i];
            }else{
                cout << this->size[i] << ",";
            }
        }
        cout << ")" << endl;
    }
}

// 释放Tensor
void Tensor::freeTensor(){
    if(this->device == "cpu"){
        delete[] this->data;
        delete[] this->size;

    }
    else if(this->device == "cuda"){
        // cudaFree(this->data);
        // delete[] this->size;
    }

}

// 打印张量
void Tensor::print(){
    int s = 1;
    for(int i = 0; i < this->dim; i++){
        s *= this->size[i];
    }

    if(this->device == "cpu"){
        if(this->dim == 1){
            for(int i = 0; i < this->size[0]; i++){
            cout << this->data[i] << " ";
        }
        cout << endl;
        }else if(this->dim == 2){
            int k = this->size[1];
            for(int i = 0; i < this->size[0]; i++){
                for(int j = 0;j< this->size[1];j++){
                    cout << this->data[i * k + j] << " ";
                }
                
            cout << endl; 
            }
        }            
    }
    else if(this->device == "cuda"){
        // float *h_arr = new float[s];
        // cudaMemcpy(h_arr,this->data,sizeof(float)*s, cudaMemcpyDeviceToHost);
        // if(this->dim == 1){
        //     for(int i = 0; i < s; i++){
        //         cout << h_arr[i] << " ";
        //     }
        //     cout << endl;
        // }else if(this->dim == 2){
        //     for(int i = 0; i < s; i++){
        //         cout << h_arr[i] << " ";
        //         if(i % this->size[1] == 0){
        //             cout << endl;
        //         }
        //     }
        //     cout << endl;
        // }
        // delete[] h_arr; 
    }
}

/* 生成全零或全一的Tensor */
Tensor oneTensor(int dim, int *size, string device){
    int s = 1;
    for(int i = 0; i < dim; i++){
        s *= size[i];
    }

    float *data = new float[s];
    for(int i = 0; i < s; i++){
        data[i] = 1.0f;
    }
    
    return Tensor(dim, size, data, device);
    
}

Tensor zeroTensor(int dim, int *size, string device){
    int s = 1;
    for(int i = 0; i < dim; i++){
        s *= size[i];
    }

    float *data = new float[s];
    for(int i = 0; i < s; i++){
        data[i] = 0.0f;
    }
    
    return Tensor(dim, size, data, device);
}

// 形状判断
// 加减法形状判断
int shape_equal(Tensor &a, Tensor &b){
    int dim1 = a.dim;
    int dim2 = b.dim;
    int *size1 = a.size;
    int *size2 = b.size;
    if(dim1 != dim2){
        return -1;
    }
    for(int i = 0; i < dim1; i++){
        if(size1[i] != size2[i]){
            return -1;
        }
    }
    return 1;
}

// 乘法形状判断
int shape_mul(Tensor &a, Tensor &b){
    int dim1 = a.dim;
    int dim2 = b.dim;
    int *size1 = a.size;
    int *size2 = b.size;
    
    if(dim1 != dim2){
        return -1;
    }
    if(dim1 == 1 && dim2 == 2){
        return -1;
    }
    if(dim1 == 2 && dim2 == 1){
        return -1;
    }
    if (dim1 == 2 && dim2 == 2){
        if(size1[1] != size2[0]){
            return -1;
        }
    }
    return 1;
}

// 设备判断
int device_equal(Tensor &a, Tensor &b){
    if(a.device == b.device){
        return 1;
    }
    return -1;
}


/*  加减乘线程计算  */
/**************************************************/

typedef struct{
    int start;
    int end;
    int k;
    float *a;
    float *b;
    float *c;

}Pot;


void add_t(Pot * pot){
    float *a = pot->a;
    float *b = pot->b;
    float *c = pot->c;
    
    for(int i = pot->start; i < pot->end; i++){
        if (pot ->k == NULL){
            c[i] = a[i] + b[i];
        }else{
            c[i] = a[i] - b[i];
        }
        
    }
}

void mul_t(Pot *p){
    
    int start = p->start;
    int end = p->end;
    int k = p->k;
    float *A = p->a;
    float *B = p->b;
    float *C = p->c;

    for(int i = start; i < end; i++) {
        int row = i / k;
        int col = i % k;
        for (int j = 0; j < k; j++) {
            C[i] += A[row * k + j] * B[j * k + col];
        }
    }

}
// 加法中利用不到k值，将k值用于区分做加法还是减法
void add(float *a, float *b, float *c, int size, int tag){
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<Pot> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Pot{start, end, tag, a, b, c };
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Pot{start, end, tag, a, b, c };
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Pot{start, end, tag, a, b, c };
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(add_t, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
}

void degmm(int m, int n, int k, float *a, float *b, float *c){
    int nums = m * n; // 矩阵元素个数
    int thread_num = std::thread::hardware_concurrency(); // 线程数量
    std::vector<std::thread> threads(thread_num);

    std::vector<Pot> pots(thread_num);

    int blocksdim;
    if (nums%thread_num != 0) {
        if (nums < thread_num) {
            for (int i = 0; i < nums; i++) {
                int end = i + 1;
                int start = i;
                pots[i] = Pot{start, end, k, a, b, c};
            }
        }else{
            blocksdim = nums/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = nums;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                pots[i] = Pot{start, end, k, a, b, c};
            }
        }
    }else{
        blocksdim = nums/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                pots[i] = Pot{start, end, k, a, b, c};
            }
    }
    if (nums < thread_num) {
        thread_num = nums;
    }

    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(mul_t, &pots[i]);
    }

    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
}

/****************************************************/

// 计算方法
Tensor tensor_add(Tensor &a, Tensor &b){
    if(device_equal(a,b) < 0){
        string tip = "There were two different devices, between cuda and cpu!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    if(shape_equal(a,b) < 0){
        string tip = "The shape of two tensors are not equal!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    int s = 1;
    for(int i = 0; i < a.dim; i++){
        s *= a.size[i];
    }

    if(a.device == "cpu"){
        float *arr = new float[s];
        add(a.data, b.data, arr, s, NULL);
        return Tensor(a.dim, a.size, arr, a.device);
    }else if(a.device == "cuda"){
        // float *d_c;
        // cudaMalloc((void **)&d_c, sizeof(float)*s);
        // t_add<<<1,s>>>(a.data, b.data, d_c, s);
        // return Tensor(a.dim, a.size, d_c, a.device);
        
    }
   // return ;
}

// 减法
Tensor tensor_sub(Tensor &a, Tensor &b){
    if(device_equal(a,b) < 0){
        string tip = "There were two different devices, between cuda and cpu!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    if(shape_equal(a,b) < 0){
        string tip = "The shape of two tensors are not equal!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    int s = 1;
    for(int i = 0; i < a.dim; i++){
        s *= a.size[i];
    }
    if(a.device == "cpu"){
        float *arr = new float[s];
        add(a.data, b.data, arr, s, 1);
        return Tensor(a.dim, a.size, arr, a.device);
    }else if(a.device == "cuda"){
        
    }
}

// 矩阵乘法
Tensor tensor_matmul(Tensor &a, Tensor &b){
    if(device_equal(a,b) < 0){
        string tip = "There were two different devices, between cuda and cpu!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    if(a.dim != 2 || b.dim != 2){
        string tip = "This function must run with dim > 1!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }
    // 检查矩阵形状
    if (a.size[1] != b.size[0]){
        string tip = "The shape of a is not mat shape of b!";
        cout << tip << endl;
        a.freeTensor();
        b.freeTensor();
        exit(0);
    }

    int s = a.size[0] * b.size[1];
    int *size = new int[2];
    size[0] = a.size[0];
    size[1] = b.size[1];

    if(a.device == "cpu"){
        float *arr = new float[s];
        degmm(a.size[0], b.size[1], b.size[0], a.data, b.data, arr);
        return Tensor(a.dim, size, arr, a.device);
    }

}

Tensor operator+(Tensor &a, Tensor &b){
    return tensor_add(a,b);
}

Tensor operator-(Tensor &a, Tensor &b){
    return tensor_sub(a,b);
}

Tensor operator*(Tensor &a, Tensor &b){
    return tensor_matmul(a,b);
}

// test
int main(){
    int *size = new int[2];
    size[0] = 1;
    size[1] = 10;
    Tensor t(2,size,1,"cpu");
    t.shape();
    t.print();

    t.freeTensor();

}