#include "activation_layer_TA.h"
#include "darknet_TA.h"
#include "activations_TA.h"
#include <tee_internal_api.h>
#include <tee_internal_api_extensions.h>



static size_t get_workspace_size(layer_TA l){
    return (size_t)l.outputs*sizeof(float);
}
activation_layer_TA tt_make_activation_layer(int batch, int inputs, ACTIVATION_TA activation)
{
	activation_layer_TA l = { 0 };
    l.type = ACTIVE_TA;

    l.inputs = inputs;
    l.outputs = inputs;
    l.batch=batch;

    l.output = (float*)calloc(batch*inputs, sizeof(float*));
    l.delta = (float*)calloc(batch*inputs, sizeof(float*));

    l.forward_TA = tt_forward_activation_layer_TA_new;
    l.backward_TA = tt_backward_activation_layer_TA_new;
    l.activation = activation;
    l.workspace_size = get_workspace_size(l);
    return l;
}

void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
    int i;
    for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
void tt_forward_relu_layer_TA_new(activation_layer_TA l,network_TA net)
{
    copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
    // activate_array(l.output, l.outputs*l.batch, l.activation);
    tt_relu_array_activate_TA(l.output, l.outputs*l.batch);
}



void tt_forward_activation_layer_TA_new(activation_layer_TA l, network_TA net)
{
    copy_cpu(l.outputs*l.batch, net.input, 1, l.output, 1);
    activate_array_TA(l.output, l.outputs*l.batch, l.activation);
}

void tt_backward_activation_layer_TA_new(activation_layer_TA l, network_TA net)
{
    gradient_array_TA(l.output, l.outputs*l.batch, l.activation, l.delta);
    copy_cpu(l.outputs*l.batch, l.delta, 1, net.delta, 1);
}
