#include "vector/linear.h"

#include "log.h"
#include "vector/normal.h"

Linear::Linear(int64_t d, int64_t h) : input_(), weights_(d,std::vector<double>(h)), 
    weights_gradient_(d, std::vector<double>(h)), bias_(h), bias_gradient_(h)
{
    for(auto &list:weights_)
    {
        list = Normal::vector(list.size(), 0.0, 0.1);
    }
    for(auto &item:bias_)
    {
        item = 0;
    }
    // LogInfo() << "weights: " << weights_;
    // LogInfo() << "bias: " << bias_;
}

void Linear::forward(const std::vector<std::vector<double>> &input, std::vector<std::vector<double>> &output)
{
    input_ = input;
    auto bias_tmp = std::vector<std::vector<double>>(input_.size(), bias_);
    output = Tensor::dot(input_, weights_);
    output = Tensor::add(output, bias_tmp);
}

std::vector<std::vector<double>> Linear::backward(const std::vector<std::vector<double>> &output_gradient)
{
    // output_gradient:(batch, h) input_:(batch, d)
    const double lr = 0.01;
    auto ret = Tensor::dot(weights_, Tensor::transpose(output_gradient));
    weights_gradient_ = Tensor::dot(Tensor::transpose(input_), output_gradient);
    weights_ = Tensor::minus(weights_, Tensor::multiply(weights_gradient_, lr));
    bias_gradient_ = Tensor::sum(output_gradient);
    bias_ = Tensor::minus(bias_, Tensor::multiply(bias_gradient_, lr));
    return ret;
}

std::vector<std::vector<double>> Linear::get_weights()
{
    return weights_;
}

void Linear::set_weights(const std::vector<std::vector<double>> &weights)
{
    weights_ = weights;
}

std::vector<double> Linear::get_bias()
{
    return bias_;
}

void Linear::set_bias(const std::vector<double> &bias)
{
    bias_ = bias;
}

std::vector<std::vector<double>> Linear::get_weights_gradient()
{
    return weights_gradient_;
}

std::vector<double> Linear::get_bias_gradient()
{
    return bias_gradient_;
}