#include <iostream>
#include <ctime>
#include <cstdlib>
#include <string>
#include <vector>
#include <thread>
using namespace std;

class Shape {
public:
    Shape();
    Shape(int dim, int *shape);
    int dim;
    int *shape; 
    void set_shape(int dim, int *shape);
    int *get_shape();
};

Shape::Shape() {
    dim = 0;
    shape = 0;
}

Shape::Shape(int dim, int *shape) {
    this->dim = dim;
    this->shape = shape;
}

void Shape::set_shape(int dim, int *shape) {
    this->dim = dim;
    this->shape = shape;
}

int *Shape::get_shape() {
    return this->shape;
}

class Tensor{
    public:
        Tensor();
        Tensor(Shape *shape, int seed, int device);
        Tensor(Shape *shape, float *data, int device); // 自定义输入数据
        int device; // 标记设备类型为cpu(-1)或gpu(编号),默认为cpu
        Shape *shape; // 张量形状
        float *data; //数据,可以是cpu类型也可以是gpu类型
        
        Shape *get_shape(); //张量形状
        void print_shape(); //打印张量形状
        void freeTensor(); //释放tensor内存
        void reshape(Shape *shape); //改变张量形状
        void t(); //转置,将原始张量直接转置
        Tensor *T(); //转置,将原始张量转置后赋值给新张量
        void cpu(); //将gpu数据转为cpu数据
        void cuda(); //将cpu数据转为gpu数据
        void print_data(); //打印张量数据

};


typedef struct {
    int start;
    int end;
    int seed;
    float *arr;
}Mes;

void rand_gen(Mes *mes) {
    
    srand((int)time(NULL) + mes->start + mes->seed);
    
    for (int i = mes->start; i < mes->end; i++) {
        int r = rand();
        float f = (float)r / RAND_MAX;
        float f2 = f * 2.0f - 1.0f;
        mes->arr[i] = f2;
    }
    
}

float * rand_arr(int size, int seed) {
    int thread_num = std::thread::hardware_concurrency();
    float *arr = new float[size];
    std::vector<std::thread> threads(thread_num);
    std::vector<Mes> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Mes{start, end, seed, arr};
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Mes{start, end, seed, arr};
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Mes{start, end, seed, arr};
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(rand_gen, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }

    return arr;
}

typedef struct{
    int start;
    int end;
    int m;
    int n; 
    float *a;
    float *b;
}Pot_t;

void transpose_t(Pot_t *p){
    for(int idx=p->start;idx<p->end;idx++){
        int y=idx/p->n;
        int x=idx%p->n;
        p->b[x*p->m+y]=p->a[idx];
    }
}
void transpose(float *a, float *b, int m, int n){
    int size = m*n;
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<Pot_t> mes(thread_num);
    int blocksdim;
    if (size%thread_num != 0) {
        if (size < thread_num) {
            for (int i = 0; i < size; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Pot_t{start, end, m, n, a, b};
            }
        }else{
            blocksdim = size/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = size;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Pot_t{start, end, m, n, a, b};
            }
        }
    }else{
        blocksdim = size/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Pot_t{start, end, m, n, a, b};
            }
    }
    if (size < thread_num) {
        thread_num = size;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(transpose_t, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    }
}

// 返回数据量
int get_size_nums(Shape *shape){
    int s = 1;
    for (int i = 0; i < shape->dim; i++){
        s *= shape->shape[i];
    }
    return s;
}

// 默认构造函数
Tensor::Tensor(){
    
}

// 实现Tensor类中的方法
Tensor::Tensor(Shape *shape, int seed, int device){
    int s = get_size_nums(shape);
    if (device == -1){
        this->shape = shape;
        this->device = device;
        this->data = rand_arr(s, seed);
    }else{
        // gpu
    }
}

Tensor::Tensor(Shape *shape, float *data, int device){
    if (device == -1){
        this->shape = shape;
        this->device = device;
        this->data = data;
    }else{
        // gpu

    }
    
}

Shape *Tensor::get_shape(){
    return this->shape;
}

void Tensor::freeTensor(){
    if (this->device == -1){
        Shape *shape = this->shape;
        delete[] shape->shape;
        delete[] this->data;
        delete shape;
    }else{
        // gpu
    }
}

void Tensor::reshape(Shape *shape){
    Shape *old_shape = this->shape;
    int o = get_size_nums(this->shape);
    int n = get_size_nums(shape);
    if (o != n){
        std::string msg = "reshape error: the size of old shape and new shape is not equal!";
        // throw std::runtime_error(msg);
        std::cout << msg << std::endl;
        //exit(1);
    }
    this->shape = shape;
    delete old_shape;
}

// 转置仅针对矩阵
void Tensor::t(){
    Shape *shape = this->shape;
    int s = get_size_nums(shape);
    if (shape->dim != 2){
        std::string msg = "t error: the dim of shape is not 2!";
        // throw std::runtime_error(msg);
        std::cout << msg << std::endl;
        //exit(1);
    }
    int swap = shape->shape[0];
    shape->shape[0] = shape->shape[1];
    shape->shape[1] = swap;

    if (this->device == -1){
        float *new_data = new float[s];
        float *old_data = this->data;
        transpose(old_data, new_data, shape->shape[0], shape->shape[1]);
        this->data = new_data;
        delete[] old_data;
    }else{
        // gpu
    }
    
}



Tensor *Tensor::T(){
    Shape *shape = this->shape;
    int s = get_size_nums(shape);
    Tensor *new_tensor = new Tensor();
    if (shape->dim != 2){
        std::string msg = "t error: the dim of shape is not 2!";
        // throw std::runtime_error(msg);
        std::cout << msg << std::endl;
        //exit(1);
        return new_tensor;
    }

    int *ml = new int[2];
    ml[0] = shape->shape[1];
    ml[1] = shape->shape[0];
    
    Shape *new_shape = new Shape(2, ml);
    
    if (this->device == -1){
        float *new_data = new float[s];
        float *old_data = this->data;
        transpose(old_data, new_data, shape->shape[0], shape->shape[1]);
        new_tensor->shape = new_shape;
        new_tensor->device = -1;
        new_tensor->data = new_data;
        return new_tensor;
    }else{
        // gpu
    }
}


void Tensor::cpu(){
    // 
}

void Tensor::cuda(){

    //
}

void Tensor::print_shape(){
    Shape *shape = this->shape;
    std::cout << "dim: " << shape->dim << std::endl;
    std::cout << "shape: (" << shape->shape[0] << ", " << shape->shape[1] << ")" << std::endl;
}

void Tensor::print_data(){
    Shape *shape = this->shape;
    int s = get_size_nums(shape);
    if (this->device == -1){
        if (shape->dim == 1){
            for (int i = 0; i < s; i++){
                std::cout << this->data[i] << " ";
            }
            std::cout << std::endl;
        }else if(shape->dim == 2){
            for (int i = 0; i < shape->shape[0]; i++){
                for (int j = 0; j < shape->shape[1]; j++){
                    std::cout << this->data[i * shape->shape[1] + j] << " ";
                }
                std::cout << std::endl;
            }
        }
    }else{

        //
    }
}

int main(){
    int a[2] = {2,3};
    Shape *s1 = new Shape(2,a);
    float data[6] = {1,2,3,4,5,6};
    Tensor *t1 = new Tensor(s1,data,-1);
    // t1->print_data();
    Tensor *t2 = t1->T();
    // cout << t2->shape->shape[0] << endl;
    // cout << t2->shape->shape[1] << endl;
    // t2->print_shape();
    // float *d = t2->data;
    // for (int i = 0; i < 6; i++){
    //     std::cout << d[i] << " ";
    // }
    // std::cout << std::endl;
    // cout << t2->device << endl;
    // cout << "t1.T():" << endl;
    t2->print_data();

    //t1.freeTensor();
    t1->freeTensor();
    t2->freeTensor();

    delete t1;
    delete t2;
    return 0;

}