#include <iostream>
#include <thread>
#include <vector>
#include "tensor.h"
#include "conv2d.h"

// m,n是输出矩阵的大小
// padding是填充大小
// k为原始矩阵列数
void padding(int padding, float *input, int k, float *output, int m, int n) {
    if (padding <= 0){
        std::cout << "padding must be greater than 0!" << std::endl;
    }else {
        for (int i = 0; i < m; i++) {
            for (int j = 0; j < n; j++) {
                if((i >= padding) && (i < m - padding) && (j >= padding) && (j < n - padding)) {
                    output[i * n + j] = input[(i - padding) * k + (j - padding)];
                }else{
                    output[i * n + j] = 0;
                }
            }
        }
    }
}

// 多线程操作填充，以通道为单位。
typedef struct{
    int start;
    int end;
    int z;// 原始通道大小
    int new_z;
    int padding;
    // 输出张量长宽
    int m;
    int n;
    //输入张量形状
    int k;
    float *input;
    float *output;

}Gms;

void padding_t(Gms *p){
    for(int idx = p->start; idx < p->end; idx++){
        for (int i = 0; i < p->m; i++) {
            for (int j = 0; j < p->n; j++) {
                if((i >= p->padding) && (i < p->m - p->padding) && (j >= p->padding) && (j < p->n - p->padding)) {
                    p->output[idx * p->new_z + i * p->n + j] = p->input[idx * p->z +(i - p->padding) * p->k + (j - p->padding)];
                }else{
                    p->output[idx * p->new_z + i * p->n + j] = 0;
                }
            }
        }
    }  
}

Tensor *fun_padding(Tensor *x, int pad){
    if (pad <= 0){
        std::cout << "padding must be greater than 0!" << std::endl;
        return (Tensor *)NULL;
    }
    if (x->shape->dim != 3){
        std::cout << "input dim must 3!" << std::endl;
        return (Tensor *)NULL;
    }
    
    Shape *t_shape = x->shape; 
    // 获取张量的原始形状
    int channel = t_shape->shape[0];
    int wx = t_shape->shape[1];
    int wy = t_shape->shape[2];
    int z = wx * wy;
    int new_z = (wx +2) * (wy + 2);
    int new_size = new_z * channel;
    float *out = new float[new_size];
    float *in = x ->data;
    
    // 多线程分配
    int thread_num = std::thread::hardware_concurrency();
    std::vector<std::thread> threads(thread_num);
    std::vector<Gms> mes(thread_num);

    int blocksdim;
    if (channel%thread_num != 0) {
        if (channel < thread_num) {
            for (int i = 0; i < channel; i++) {
                int end = i + 1;
                int start = i;
                mes[i] = Gms{i, end, z, new_z, pad, wx +2, wy + 2, wy, in, out};
            }
        }else{
            blocksdim = channel/(thread_num -1);
            for (int i = 0; i < thread_num; i++) {
                int end;
                if (i == thread_num - 1) {
                    end = channel;
                }
                else{
                    end = blocksdim * (i + 1);
                }
                int start = blocksdim * i;
                
                mes[i] = Gms{i, end, z, new_z, pad, wx +2, wy + 2, wy, in, out};
            }
        }
    }else{
        blocksdim = channel/thread_num;

        for (int i = 0; i < thread_num; i++) {
                int end = blocksdim * (i + 1);
                int start = blocksdim * i;
                mes[i] = Gms{i, end, z, new_z, pad, wx +2, wy + 2, wy, in, out};
            }
    }
    if (channel < thread_num) {
        thread_num = channel;
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i] = std::thread(padding_t, &mes[i]);
    }
    for (int i = 0; i < thread_num; i++) {
        threads[i].join();
    } 
    
    int *ml = new int[3];
    ml[0] = channel;
    ml[1] = wx + 2;
    ml[2] = wy + 2;

    Shape *new_shape = new Shape(3,ml);
    Tensor *t = new Tensor(new_shape, out, -1);
    return t;
}

Tensor *fun_conv2d(Tensor *x, int in_channel,int out_channel, Tensor *kernel, int stride, int padding){
    if (padding <= 0){
        std::cout << "padding must be greater than 0!" << std::endl;
        return (Tensor *)NULL;
    }
    if (x->shape->dim != 3){
        std::cout << "input dim must 3!" << std::endl;
        return (Tensor *)NULL;
    }
    
    Tensor *t;
    if (padding != 0){
        t = fun_padding(x, padding);
    }else{
        t = x;
    }

    Shape *shape = x->shape;
    int channel = shape->shape[0];
    int m = shape->shape[1];
    int n = shape->shape[2];

    int kernel_size = kernel->shape->shape[1];

    int o1 = (int)((m - kernel_size + 2 * padding) / stride) + 1;
    int o2 = (int)((n - kernel_size + 2 * padding) / stride) + 1;

    // 创建输出张量
    int *fm = new int[3];
    fm[0] = out_channel;
    fm[1] = o1;
    fm[2] = o2;

    Shape *kf = new Shape(3,fm);
    Tensor *output = new Tensor(kf,0,-1);

    int kernel_num = in_channel * out_channel;

    int mmk = kernel_size * kernel_size; // 卷积核层大小
    int mlk = t->shape->shape[1] * t->shape->shape[2]; // 卷积输入层大小
    int mkk = o1 * o2; // 输出层大小


    for (int f = 0; f < out_channel; f++){
        for (int p = 0 ;p < in_channel; p++){ 
            int z = f * in_channel + p;
            int p1 = 0;
            int p2 = 0; // 卷积起始坐标
            for (int i = 0; i < o1; i++) {
                for (int j = 0; j < o2; j++) {
                    // 卷积操作
                    int row = 0;
                    int col = 0;
                    for (int k = p1; k < p1 + kernel_size; k++) {
                        for (int l = p2; l < p2 + kernel_size; l++) {
                            output->data[f*mkk+ i*o2 + j] += t->data[ p*mlk + k*n + l] * kernel->data[z*mmk + row*kernel_size + col];
                            col++;
                        }
                        col = 0;
                        row++;
                    }
                    col = 0;
                    row = 0;
                p2 += stride;
                }
            p2 = 0;
            p1 += stride;   
            }
            p1 = 0;
            p2 = 0;
        }
    }
    return output;
}

// 卷积核梯度计算
Tensor *conv2d_backwordw(Tensor *grad, Tensor *input, int in_channel, int out_channel, int stride, int padding){
    int kernel_size = grad->shape->shape[1];
    int m = input->shape->shape[1];
    int n = input->shape->shape[2];
    int o1 = (int)((m - kernel_size + 2 * padding) / stride) + 1;
    int o2 = (int)((n - kernel_size + 2 * padding) / stride) + 1;
    int mlk = m * n;
    int mkk = o1 * o2;
    int mmk = kernel_size * kernel_size;

    // 创建输出张量
    int *fm = new int[3];
    fm[0] = in_channel * out_channel;
    fm[1] = o1;
    fm[2] = o2;

    Shape *kf = new Shape(3,fm);
    Tensor *output = new Tensor(kf,0,-1);
    
    for (int f = 0; f < out_channel; f++){
        for (int p = 0 ;p < in_channel; p++){ 
            int z = f * in_channel + p;
            int p1 = 0;
            int p2 = 0; // 卷积起始坐标
            for (int i = 0; i < o1; i++) {
                for (int j = 0; j < o2; j++) {
                    // 卷积操作
                    int row = 0;
                    int col = 0;
                    for (int k = p1; k < p1 + kernel_size; k++) {
                        for (int l = p2; l < p2 + kernel_size; l++) {
                            output->data[z*mkk+ i*o2 + j] += input->data[ p*mlk + k*n + l] * grad->data[f*mmk + row*kernel_size + col];
                            col++;
                        }
                        col = 0;
                        row++;
                    }
                    col = 0;
                    row = 0;
                p2 += stride;
                }
            p2 = 0;
            p1 += stride;   
            }
            p1 = 0;
            p2 = 0;
        }
    }
    return output;
}

// 矩阵旋转180度
void sgrt(float *input, float *output, int m, int n, int k) {
    int size = m * n;
    for (int i = 0; i < size; i++) {
        output[i] = input[size - 1 - i];
    }
}

// 输入梯度计算
Tensor *conv2d_backwordat(Tensor *grad, Tensor *kernel, int in_channel, int out_channel, int stride, int padding){
    Shape *k_shape = kernel->shape;
    int k_nums = k_shape->shape[0] * k_shape->shape[1] * k_shape->shape[2];
    float *tk_data = new float[k_nums];
    sgrt(kernel->data, tk_data, k_shape->shape[1], k_shape->shape[2], k_shape->shape[0]);
    int *ml = new int[3];
    ml[0] = k_shape->shape[0];
    ml[1] = k_shape->shape[1];
    ml[2] = k_shape->shape[2];
    Shape *l = new Shape(3, ml);
    Tensor *sw = new Tensor(l, tk_data, -1);
    Tensor *sw2 = fun_padding(sw,(k_shape->shape[1] - 1));
    Tensor *output = fun_conv2d(grad, out_channel, in_channel, sw, stride, padding);
    tensorFree(sw);
    tensorFree(sw2);
    return output;
}

// conv_2d
Conv2d::Conv2d(int in_channel, int out_channel, int kernel_size, int padding, int stride, int device, int require_grad){
    this->require_bias = 0;
    this->device = device;
    this->require_grad = require_grad;
    this->in_channel = in_channel;
    this->out_channel = out_channel;
    this->kernel_size = kernel_size;
    this->stride = stride;
    this->padding = padding;
    int *ml = new int[3];
    ml[0] = out_channel * in_channel;
    ml[1] = kernel_size;
    ml[2] = kernel_size;
    Shape *l = new Shape(3, ml);
    this->weight = new Tensor(l, 0, device);
}

Tensor *Conv2d::forward(Tensor *x){
    this->input = x;
    return fun_conv2d(x, this->in_channel, this->out_channel, this->weight, this->stride, this->padding);
}

void Conv2d::backward(Tensor *grad){
    this->weight_grad = conv2d_backwordw(grad, this->input, this->in_channel, this->out_channel, this->stride, this->padding);
    this->grad_at = conv2d_backwordat(grad, this->weight, this->in_channel, this->out_channel, this->stride, this->padding);
}

void Conv2d::update(float lr){
    Tensor *sw = tensor_mul(lr, this->weight_grad);
    Tensor *temp = this->weight;
    this->weight = tensor_sub(temp, sw);
    tensorFree(sw);
    tensorFree(temp);
}

void Conv2d::mFree(){
    tensorFree(this->input);
    tensorFree(this->weight);
    tensorFree(this->weight_grad);
    tensorFree(this->grad_at);
}