#include "linear.h"
#include "tensor.h"
#include "pmath.h"
#include <iostream>


// 获取张量维度
int get_dim(Shape shape){
    if (shape.x ==1 && shape.y == 1 && shape.z == 1){
        return 0;
    }else if (shape.y == 1 && shape.z == 1){
        return 1;
    }else if (shape.z == 1){
        return 2;
    }else{
        return 3;
    }
}

// 仅支持batch_first
Tensor *linear_forword(Tensor *input, Tensor *weight, Tensor *bias){
    int input_dim = get_dim(input->shape);
    int batch_size;
    if (input_dim == 1){
        batch_size = input->shape.x;
    }
    else if (input_dim == 2){
        batch_size = input->shape.y;
    }
    else if (input_dim == 3){
        batch_size = input->shape.z;
    }
    Tensor *all_bias = cps(bias, 0, batch_size);
    Tensor *sw = tensor_matmul(input, weight);
    Tensor *output = tensor_add(sw, all_bias);
    tensorFree(all_bias);
    tensorFree(sw);
    return output;
}

// 权重梯度
Tensor *linear_backwardw(Tensor *input, Tensor *grad){
    Tensor *sw = input->T();
    Tensor *output = tensor_matmul(sw,grad);
    tensorFree(sw);
    return output;
}

// 偏置梯度
Tensor *linear_backwardb(Tensor *input, Tensor *grad){
    Tensor *output = tensor_mean(grad, 0);
    return output;
}

// 向上一层传递梯度
Tensor *linear_backwardat(Tensor *weight, Tensor *grad){
    Tensor *sw = weight->T();
    Tensor *output = tensor_matmul(grad, sw);
    tensorFree(sw);
    return output;
}

// 梯度更新
void *linear_update(Tensor *weight_grad, Tensor *weight, Tensor *bias_grad, Tensor *bias, float lr, int require_bias){
    if (require_bias == 1) {
        Tensor *temp = bias;
        Tensor *sw = tensor_mul(lr, bias_grad);
        bias = tensor_sub(temp, sw);
        tensorFree(sw);
        tensorFree(temp);
    }
    Tensor *temp = weight;
    Tensor *sw = tensor_mul(lr, weight_grad);
    weight = tensor_sub(temp, sw);
    tensorFree(sw);
    tensorFree(temp);
}

// Linear
Linear::Linear(int in_features, int out_features, int rquire_bias, int require_grad) {
    this->in_features = in_features;
    this->out_features = out_features;
    this->require_bias = rquire_bias;
    this->require_grad = require_grad;
    this->device = device;
    this->weight = new Tensor(Shape{in_features, out_features, 1}, 0);
    if (require_bias == 1) {
        this->bias = new Tensor(Shape{out_features, 1, 1}, 0);
        
    }else{
        this->bias = zeros(Shape{out_features, 1, 1});
    }
}

Tensor *Linear::forward(Tensor *x) {
    this->input = x;
    Tensor *output = linear_forword(x, this->weight, this->bias);
    return output;
}

void Linear::backward(Tensor *grad) {
    this->weight_grad = linear_backwardw(this->input, grad);
    this->bias_grad = linear_backwardb(this->input, grad);
    this->grad_at = linear_backwardat(this->weight, grad);
}


void Linear::update(float lr){
    linear_update(this->weight_grad, this->weight, this->bias_grad, this->bias, lr, this->require_bias);
}

// 该方法适用于非手动分配内存
void Linear::mFree(){
    tensorFree(this->weight);
    tensorFree(this->bias);
    tensorFree(this->input);
    tensorFree(this->weight_grad);
    tensorFree(this->bias_grad);
    tensorFree(this->grad_at);
}