#include <math.h>
#include "gemm.h"
#include "linear_layer.h"
#include "activations.h"
#include "convolutional_layer.h"

linear_layer make_linear_layer(int batch, int inputs, int outputs, ACTIVATION activation, int batch_normalize, int adam) {
    int i;
    linear_layer l = { 0 };
    l.type = LINEAR;

    l.h = 1;
    l.w = 1;
    l.c = 1;
    l.batch = batch;
    l.inputs = inputs;
    l.outputs = outputs;

    l.delta = calloc(batch * outputs, sizeof(float));
    l.output = calloc(batch * outputs, sizeof(float));

    l.weight_updates = calloc(inputs * outputs, sizeof(float));

    l.weights = calloc(outputs * inputs, sizeof(float));
    l.biases = calloc(outputs, sizeof(float));
    l.bias_updates = calloc(outputs, sizeof(float));

    l.forward = forward_linear_layer;
    l.backward = backward_linear_layer;
    l.update = update_linear_layer;

    // float scale = 1./sqrt(inputs);
    float scale = sqrt(2./inputs);
    for (i = 0; i < outputs * inputs; ++i) {
        l.weights[i] = scale * rand_uniform(-1, 1);
    }

    for (i = 0; i < outputs; ++i) {
        l.biases[i] = 0;
    }

#ifdef GPU
    l.forward_gpu = forward_linear_layer_gpu;
    l.backward_gpu = backward_linear_layer_gpu;
#endif // GPU
    l.activation = activation;
    return l;
}

int forward_linear_layer(linear_layer layer, network net) {
    fill_cpu(layer.outputs * layer.batch, 0, layer.output, 1);
    int m = layer.batch;
    int k = layer.inputs;
    int n = layer.outputs;

    float *a = net.input;
    float *b = layer.weights;
    float *c = layer.output;
    gemm(0, 1, m, n, k, 1, a, k, b, k, 1, c, n);

    if (layer.batch_normalize) {
        //forward_batchnorm_layer(layer, net);
    } else {
        add_bias(layer.output, layer.biases, layer.batch, layer.n, 1);
    }

    activate_array(layer.output, layer.outputs * layer.batch, layer.activation);
    return 0;                                                                                      
}

int backward_linear_layer(linear_layer layer, network net) {        
    gradient_array(layer.output, layer.outputs * layer.batch, layer.activation, layer.delta);      

    if (layer.batch_normalize) {
        // backward_batchnorm_layer(layer, net);
    } else {
        backward_bias(layer.bias_updates, layer.delta, layer.batch, layer.outputs, 1);
    }

    int m = layer.outputs;
    int k = layer.batch;
    return 0;
}

int update_linear_layer(linear_layer l, update_args a) {
    float learning_rate = a.learning_rate * l.learning_rate_scale;
    float momentum = a.momentum;
    float decay = a.decay;
    int batch = a.batch;
    axpy_cpu(l.outputs, learning_rate / batch, l.bias_updates, 1, l.biases, 1);
    scal_cpu(l.outputs, momentum, l.bias_updates, 1);

    if (l.batch_normalize) {

    }

    axpy_cpu(l.inputs * l.outputs, -decay * batch, l.weights, 1, l.weight_updates, 1);
    axpy_cpu(l.inputs * l.outputs, learning_rate / batch, l.weight_updates, 1, l.weights, 1);
}