package FALCON;

public class NN {

    final int numSpace = 3;
    final int CURSTATE = 0;
    final int ACTION = 1;
    final int REWARD = 2;
    final int numState;
    final int numAction;
    final int numReward;


    /////////////////////////////////////////////////////////////////////////////////
    Layer[] Layer;
    public int NUM_LAYERS = 4;
    public int x;
    public int H1 = 36;
    public int H2 = 72;
    public int Y;


    //生成网络
    public void GenerateNetwork() {
        Layer = new Layer[NUM_LAYERS];
        Layer[0] = new Layer(0, x);
        Layer[1] = new Layer(x, H1);
        Layer[2] = new Layer(H1, H2);
        Layer[3] = new Layer(H2, Y);
    }

    ////////////////////////////////////////////////////////////////
    //高斯分布随机函数
    public double RandomEqualREAL(double Low, double High) {
        return ((double) Math.random() * (High - Low) + Low);
    }

    //初始化权重
    public void RandomWeights() {
        int l, i, j;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    Layer[l].W[i][j] = RandomEqualREAL(-0.5, 0.5);
                }
            }
        }
    }
/////////////////////////////////////////////////////////////////////////////////


    //构造
    public NN(int numState, int numAction) {
        this.numState = numState;
        x = numState;
        this.numAction = numAction;
        Y = numAction;
        //Y=1;               
        this.numReward = 2;
        pol = new double[numAction];

        GenerateNetwork();//构建网络
        RandomWeights(); //每层随机权重

    }


    //////////////////////////////////////////////////////////////////////////////////////////////////////

    ////////////////////////////////////////////////////////////////////


    //表现
    //第一层输入层，权重随机，只输出
    void SetInput(double[] Input) {
        for (int i = 0; i < Layer[0].H; i++) {
            Layer[0].Output[i] = Input[i];
        }
    }

    ///////////////////////////////////////////////////
    //正向传播  (第二层之后)  
    public void PropagateLayer(int Lower, int Upper) {
        int i, j;
        double Sum;

        for (i = 0; i < Layer[Upper].H; i++) {
            Sum = 0;
            for (j = 0; j < Layer[Lower].H; j++)
                Sum += Layer[Upper].W[i][j] * Layer[Lower].Output[j];//矩阵乘法 W*X
            if (Upper >= 1)  //第一层之后
                Layer[Upper].Output[i] = 1 / (1 + Math.exp(-this.alphe * Sum));//signoid(WX)函数
            else
                Layer[Upper].Output[i] = Sum;
        }
    }

    public void PropagateNet() {
        for (int l = 0; l < NUM_LAYERS - 1; l++) {
            PropagateLayer(l, l + 1);
        }
    }
    /////////////////////////////////////////////////////


    //第三层输出层
    public void GetOutput(double[] Output) {
        for (int i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            Output[i] = Layer[NUM_LAYERS - 1].Output[i];
        }
    }
    /////////////////////////////////////////////////////////////

    double[] pol;

    public void softmax(double[] Output) {
        double[] tmp;
        double sum = 0.0;
        tmp = new double[Y];
        for (int i = 0; i < Y; i++) {
            tmp[i] = Math.pow(Math.E, Output[i]);
            sum += tmp[i];
        }
        for (int i = 0; i < Y; i++) {
            pol[i] = tmp[i] / sum;
        }

    }


    ///////////////////////////////////////////////////////////////////////////////////////
    public double[] policy(double[] Input) {
        double[] Output = new double[numAction];
        SetInput(Input);
        PropagateNet();
        GetOutput(Output);
        softmax(Output);//softmax 
        return pol;
    }

    ////////////////////////////////////////////////////////////////
    public int argmax() {
        int loc = -1;
        double max = 0;
        for (int i = 0; i < Y; i++) {
            if (max < pol[i]) {
                max = pol[i];
                loc = i;
            }
        }
        return loc;
    }

    //////////////////////////////////////////////////////
    public int detect(double[] Input) {
        double[] Output = new double[numAction];
        SetInput(Input);
        PropagateNet();
        GetOutput(Output);
        softmax(Output);//softmax
        int loc = argmax();
        return loc;
    }


    ///////////////////////////////////////////////////////////////////////////
    //反向传播过程
    public double lossError(double output, int i, int y) {
        double doutput;
        //doutput = y[i] - output; //loss函数 MSE （y-y’）**2
        //cross loss =-log( pol )
        if (i == y) {
            doutput = -1 + 1 / Math.pow(Math.E, output);
        } else {
            doutput = 1 / Math.pow(Math.E, output);
        }
        return doutput;


    }
    ///////////////////////////////////////////////////////////////

    double alphe = 1.0;

    //最后一层(输出层)反向传播
    public void ComputeOutputError(int y) {
        int i;
        double output, doutput;


        for (i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            output = Layer[NUM_LAYERS - 1].Output[i];
            doutput = lossError(output, i, y);
            Layer[NUM_LAYERS - 1].dydwx[i] = alphe * output * (1 - output) * doutput;  //激活函数求导  dydwx=(sigmoid(wx)' * 1/2 (y'-y)**2 )
        }
    }

    //后向传播
    public void BackpropagateLayer(int Upper, int Lower) {
        int i, j;
        double output, dydx;

        for (i = 0; i < Layer[Lower].H; i++) {
            output = Layer[Lower].Output[i];
            dydx = 0;
            for (j = 0; j < Layer[Upper].H; j++) {
                dydx += Layer[Upper].W[j][i] * Layer[Upper].dydwx[j];// dydx=dydwx + w
            }
            Layer[Lower].dydwx[i] = this.alphe * output * (1 - output) * dydx;  //激活函数求导 
        }
    }

    public void BackpropagateNet() {
        for (int l = NUM_LAYERS - 1; l > 1; l--) {
            BackpropagateLayer(l, l - 1);
        }
    }
    ///////////////////////////////////////////////////////////

    double Eta = 0.25;
    double Alpha = 0.5;

    //梯度下降
    public void AdjustWeights(double Value) {
        int l, i, j;
        double X, dydwx, dydw;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    X = Layer[l - 1].Output[j];
                    dydwx = Layer[l].dydwx[i];
                    dydw = Layer[l].dydw[i][j];
                    Layer[l].W[i][j] += Value * (this.Eta * dydwx * X + this.Alpha * dydw);  // 之前的梯度占比和新梯度占比
                    Layer[l].dydw[i][j] = this.Eta * dydwx * X;                    //更新之前的梯度
                }
            }
        }
    }

    ////////////////////////////////////////////////////////////////////

    public void learn(double[] Input, int y, double Value) {//输入，输出坐标，值
        double[] Output = new double[numAction];
        SetInput(Input);
        PropagateNet();
        ComputeOutputError(y);
        BackpropagateNet();
        AdjustWeights(Value);

    }

}
