/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package jmodcog2.fwd;

import jmodcog2.fwd.config.SupervisedNetConfig;

/**
 *
 * @author Mitch
 */
public abstract class SupervisedNet extends UntrainedNet {

    protected final float[] output_err;
    protected final float[] input_err;
    //
    protected final float Q;
    protected final int t_shared[] = new int[]{0};
    protected final int t_unshared[] = new int[]{0};
    //
    protected final float[] error;
    protected final float[] mu;
    protected final float[] dwa;
    protected final float[] bias_mu;
    protected final float[] bias_dwa;

    protected SupervisedNet(SupervisedNetConfig config) {
        super(config);
        this.output_err = new float[output_size];
        this.input_err = new float[input_size];
        this.Q = config.getQ();
        this.error = new float[config.totalNumNodes()];
        this.mu = new float[config.totalNumWeights()];
        this.dwa = new float[config.totalNumWeights()];
        this.bias_mu = new float[config.totalNumBias()];
        this.bias_dwa = new float[config.totalNumBias()];

    }

    protected void _back_shared(byte layer_j, int node_j, int conn_n, int t) {
        //
        if (conn_n < num_conns[layer_j - 1]) {
            if (shared_conn) {
                int node_i = 0;
                int i = 0;
                int j = 0;
                node_i = _conn_src(conn_n, layer_j, node_j);
                i = _node_index(layer_j - 1, node_i);
                j = _node_index(layer_j, node_j);
                error[i] += _bp(i, j,
                        _conn_index(layer_j - 1, node_i, node_j),
                        t);
            }
        } else {
            if (biased && shared_bias) {
                _bp_bias(_node_index(layer_j, node_j),
                        _bias_index(layer_j, node_j),
                        t);
            }
        }

    }

    protected void _back_unshared(byte layer_i, int node_i, int t) {
        //
        int ci = 0;
        int j = 0;
        int layer_size = num_nodes[layer_i];
        //
        if (node_i < layer_size) {
            if (!shared_conn) {
                float err_i = 0;
                int i = 0;
                //
                i = _node_index(layer_i, node_i);
                for (int node_j = 0; node_j < num_nodes[layer_i + 1]; node_j++) {
                    ci = _conn_index(layer_i, node_i, node_j);
                    if (ci >= 0) {
                        j = _node_index(layer_i + 1, node_j);
                        err_i += _bp(i, j, ci, t);
                    }
                }
                error[i] = err_i;
            }
        }
        if (node_i == layer_size) {
            if (biased && !shared_bias) {
                for (int node_j = 0; node_j < num_nodes[layer_i + 1]; node_j++) {
                    ci = _bias_index(layer_i + 1, node_j);
                    j = _node_index(layer_i + 1, node_j);
                    _bp_bias(j, ci, t);
                }
            }
        }
    }

    protected final float _bp(int ni_index, int nj_index, int conn_index, int t) {
        float x = 0;
        float e = 0;
        float delta = 0;
        //
        float dwa0 = 0;
        float mu0 = 0;
        float dwa1 = 0;
        float mu1 = 0;
        float dw = 0;
        //
        x = nodes[nj_index];
        e = error[nj_index];
        delta = e * _df(x);


        dwa0 = dwa[conn_index];
        mu0 = mu[conn_index];
        //
        dw = mu0 * delta * x;
        dwa1 = _dwa1(dw, dwa0, t);
        mu1 = _mu1(mu0, dw, dwa0, dwa1);

        float ce = conn_weights[conn_index] * delta;
        conn_weights[conn_index] += dw;
        dwa[conn_index] = dwa1;
        mu[conn_index] = mu1;
        return ce;
    }

    protected final void _bp_bias(int nj_index, int bias_index, int t) {
        float x = 0;
        float e = 0;
        float delta = 0;
        //
        float dwa0 = 0;
        float mu0 = 0;
        float dwa1 = 0;
        float mu1 = 0;
        float dw = 0;
        //
        x = nodes[nj_index];
        e = error[nj_index];
        delta = e * _df(x);


        dwa0 = bias_dwa[bias_index];
        mu0 = bias_mu[bias_index];
        //
        dw = mu0 * delta * x;
        dwa1 = _dwa1(dw, dwa0, t);
        mu1 = _mu1(mu0, dw, dwa0, dwa1);

        conn_biases[bias_index] += dw;
        bias_dwa[bias_index] = dwa1;
        bias_mu[bias_index] = mu1;
    }

    protected void _clear_err(int index) {
        error[index] = 0;
    }

    protected void _cpy_err_in(int index) {
        int ni = 0;
        ni = nodes_index0[num_layers - 1] + index;
        error[ni] = output_err[index];
    }

    protected void _cpy_err_out(int index) {
        input_err[index] = error[index];
    }

    protected float _mu1(float mu0, float dw, float dwa0, float dwa1) {
        return mu0 * max(0.5f, Q * dw * (dwa0 / (dwa1 * dwa1)));
    }

    protected float _dwa1(float dw, float dwa0, int t0) {
        float ex = 1f / (t0 + 1);
        return (dw * ex) + (dwa0 * (1f - ex));
    }
    static final int CPY_ERR_IN = 0x0010;
    static final int CPY_ERR_OUT = 0x0011;
    static final int BP_SHARED = 0x0012;
    static final int BP_UNSHARED = 0x0013;
    //

    protected final boolean setOutputErr(float[] err) {
        if (err.length == output_size) {
            System.arraycopy(err, 0, output_err, 0, output_size);
            put(output_err);
            return true;
        }
        return false;
    }

    public float[] getInputErr() {
        get(input_err);
        return input_err;
    }

    protected void incrTShared(int t) {
        t_shared[0] += t;
        put(t_shared);
    }

    protected void incrTUnshared(int t) {
        t_unshared[0] += t;
        put(t_unshared);
    }

    protected void backprop() {
        setMode(CPY_ERR_IN);
        execute(output_size);
        if (shared_conn || shared_bias) {
            for (byte j = (byte) (num_layers - 1); j > 0; j--) {
                setMode(BP_SHARED);
                setLayer(j);
                execute(num_conns[j - 1], num_nodes[j]);
                incrTShared(num_nodes[j]);
            }
        }
        if ((!shared_conn) || (!shared_bias)) {
            for (byte i = (byte) (num_layers - 2); i >= 0; i--) {
                setMode(BP_UNSHARED);
                setLayer(i);
                execute(num_nodes[i]);
                incrTUnshared(1);
            }
        }
        setMode(CPY_ERR_OUT);
        execute(input_size);
    }

    @Override
    protected void _run(int mode) {
        super._run(mode);
        if (mode == BP_SHARED) {
            _back_shared(LAYER[0], getPassId(), getGlobalId(0), t_shared[0] + getPassId());
        }
        if (mode == BP_UNSHARED) {
            this._back_unshared(LAYER[0], getGlobalId(), t_unshared[0]);
        }
    }
}
