package com.example.harmonet.harmtorch;

public class RNN implements Layer{
    int input_size, hidden_size, num_layers;
    String nonlinearity;
    boolean bias;
    float[] h_cur, h_last;
    float[] h_0, b_ih, b_hh;
    float[][] W_ih, W_hh;

    public RNN(int input, int hidden, int num_ls, String nonlin, boolean bia)throws Exception{
        // 5 params: input_size, hidden_size, num_layers, nonlinearity, bias.
        input_size = input;
        hidden_size = hidden;
        num_layers = num_ls;
        if( (nonlin.compareTo("tanh") != 0) || (nonlin.compareTo("relu") != 0) ){
            throw new Exception("RNN Layer: parameter \'nonlinearity\' must be tanh or relu.");
        }
        nonlinearity = nonlin;
        bias = bia;
    }

    @Override
    public Tensor forward(Tensor in){
        Tensor out = new Tensor(hidden_size);
        float term = 0;
        for(int i = 0; i < hidden_size; i++){
            for(int j = 0; j < input_size; j++){
                term += W_ih[i][j] * in.tensor()[j];
            }
            for(int j = 0; j < hidden_size; j++){
                term += W_hh[i][j] * h_last[j];
            }
            if(bias){
                term += b_ih[i] + b_hh[i];
            }
            if(nonlinearity.compareTo("tanh") == 0){
                out.tensor()[i] = (float)Math.tanh(term);
            }
            else{
                out.tensor()[i] = (term > 0) ? term : 0;
            }
        }

        h_last = out.tensor().clone();       // update h_(t-1)

        return out;
    }

    @Override
    public int getParam(){
        return 3*hidden_size + 2*hidden_size*input_size;
    }

    @Override
    public void init(float[] par){
        // h_0, b_ih, b_hh, W_ih, W_hh
        int curPos = 0;

        h_last = new float[hidden_size];
        for(int i = 0; i < hidden_size; i++){
            h_last[i] = par[curPos++];
        }

        b_ih = new float[hidden_size];
        for(int i = 0; i < hidden_size; i++){
            b_ih[i] = par[curPos++];
        }

        b_hh = new float[hidden_size];
        for(int i = 0; i < hidden_size; i++){
            b_hh[i] = par[curPos++];
        }

        W_ih = new float[hidden_size][input_size];
        for(int i = 0; i < hidden_size; i++){
            for(int j = 0; j < input_size; j++){
                W_ih[i][j] = par[curPos++];
            }
        }

        W_hh = new float[hidden_size][hidden_size];
        for(int i = 0; i < hidden_size; i++){
            for(int j = 0; j < hidden_size; j++){
                W_hh[i][j] = par[curPos++];
            }
        }

    }
}
