package FALCON;

import Bots.Buffer;

public class MLP {

    final int numSpace = 3;
    final int CURSTATE = 0;
    final int ACTION = 1;
    final int REWARD = 2;
    final int numState;
    final int numAction;
    final int numReward;


    /////////////////////////////////////////////////////////////////////////////////
    Layer[] Layer;
    public int NUM_LAYERS = 4;
    public int x;
    public int H1 = 36;
    public int H2 = 72;
    public int Y;


    //生成网络
    public void GenerateNetwork() {
        Layer = new Layer[NUM_LAYERS];
        Layer[0] = new Layer(0, x);
        Layer[1] = new Layer(x, H1);
        Layer[2] = new Layer(H1, H2);
        Layer[3] = new Layer(H2, Y);
    }

    ////////////////////////////////////////////////////////////////
    //高斯分布随机函数
    public double RandomEqualREAL(double Low, double High) {
        return ((double) Math.random() * (High - Low) + Low);
    }

    //初始化权重
    public void RandomWeights() {
        int l, i, j;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    Layer[l].W[i][j] = RandomEqualREAL(-0.5, 0.5);
                }
            }
        }
    }
/////////////////////////////////////////////////////////////////////////////////


    //构造
    public MLP(int agent_num, int numState, int numAction, boolean rw, String name) {
        this.numState = numState;
        x = numState;
        this.numAction = numAction;
        Y = numAction;
        //Y=1;               
        this.numReward = 2;
        pol = new double[numAction];

        GenerateNetwork();//构建网络
        RandomWeights(); //每层随机权重

    }


    //////////////////////////////////////////////////////////////////////////////////////////////////////

    ////////////////////////////////////////////////////////////////////


    //表现
    //第一层输入层，权重随机，只输出
    void SetStat(double[] Stat) {
        for (int i = 0; i < Layer[0].H; i++) {
            Layer[0].Output[i] = Stat[i];
        }
    }

    ///////////////////////////////////////////////////
    //正向传播  (第二层之后)  
    public void PropagateLayer(int Lower, int Upper) {
        int i, j;
        double Sum;

        for (i = 0; i < Layer[Upper].H; i++) {
            Sum = 0;
            for (j = 0; j < Layer[Lower].H; j++)
                Sum += Layer[Upper].W[i][j] * Layer[Lower].Output[j];//矩阵乘法 W*X
            if (Upper >= 1)  //第一层之后
                Layer[Upper].Output[i] = 1 / (1 + Math.exp(-this.alphe * Sum));//signoid(WX)函数
            else
                Layer[Upper].Output[i] = Sum;
        }
    }

    public void PropagateNet() {
        for (int l = 0; l < NUM_LAYERS - 1; l++) {
            PropagateLayer(l, l + 1);
        }
    }
    /////////////////////////////////////////////////////


    //第三层输出层
    public void GetOutput(double[] Output) {
        for (int i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            Output[i] = Layer[NUM_LAYERS - 1].Output[i];
        }
    }
    /////////////////////////////////////////////////////////////

    public double[] pol;

    public void softmax(double[] Output) {
        double[] tmp;
        double sum = 0.0;
        tmp = new double[Y];
        for (int i = 0; i < Y; i++) {
            tmp[i] = Math.pow(Math.E, Output[i]);
            sum += tmp[i];
        }
        for (int i = 0; i < Y; i++) {
            pol[i] = tmp[i] / sum;
        }

    }

    public void softmax(double[] Output, boolean[] vact) {
        double[] tmp;
        double sum = 0.0;
        tmp = new double[Y];
        for (int i = 0; i < Y; i++) {
            if (vact[i]) {
                tmp[i] = Math.pow(Math.E, Output[i]);
                sum += tmp[i];
            }
        }
        for (int i = 0; i < Y; i++) {
            if (vact[i]) {
                pol[i] = tmp[i] / sum;
            } else {
                pol[i] = 0;
            }
        }

    }

    /////////////////////////////////////////////////////////////////
    public double eval(Buffer buff) {
        double y;
        if (buff.end == true) {
            y = buff.r;
        } else {
            y = MaxQValue(buff.next_stat, buff.r); //计算下次状态最大Q值
        }
        double QValue = Q_Function(buff.stat, buff.action);//计算本状态Q
        return Math.abs(y - QValue);
    }

    public double Q_Function(double[] Stat, double[] act) {
        double[] Output = new double[numAction];
        SetStat(Stat);
        PropagateNet();
        GetOutput(Output);
        for (int i = 0; i < numAction; i++) {
            if (act[i] == 1) return Output[i];
        }
        return -1;
    }

    public double Q_Function(double[] Stat, int action) {
        double[] Output = new double[numAction];
        SetStat(Stat);
        PropagateNet();
        GetOutput(Output);
        return Output[action];
    }

    public double[] Q_Function(double[] Stat) {
        double[] Output = new double[numAction];
        SetStat(Stat);
        PropagateNet();
        GetOutput(Output);
        return Output;
    }

    ///////////////////////////////////////////////////////////////////////////////////////
    public static double QEpsilonDecay = (double) 0.00050;
    public static double QEpsilon = (double) 0.50000;

    public int Policy(double[] Stat, boolean[] vact) {
        int selectedAction = -1;
        if (QEpsilon > 0.01) QEpsilon -= QEpsilonDecay;
        if (Math.random() < QEpsilon) {//随机行为
            while (true) {
                int randomIndex = (int) (Math.random() * numAction);
                selectedAction = randomIndex;
                if (vact[selectedAction]) {
                    break;
                }
            }
            ;
        } else {
            double[] qValues = new double[numAction];
            SetStat(Stat);
            PropagateNet();
            GetOutput(qValues);
            softmax(qValues, vact);//softmax
            double p = Math.random();
            for (int i = 0; i < numAction; i++) {
                p -= pol[i];
                if (p <= 0) {
                    selectedAction = i;
                    break;
                }
            }
        }
        return selectedAction;
    }

    ////////////////////////////////////////////////////////////////
    public int argmax() {
        int loc = -1;
        double max = 0;
        for (int i = 0; i < Y; i++) {
            if (max < pol[i]) {
                max = pol[i];
                loc = i;
            }
        }
        return loc;
    }

    //////////////////////////////////////////////////////
    public int detect(double[] Stat) {
        double[] Output = new double[numAction];
        SetStat(Stat);
        PropagateNet();
        GetOutput(Output);
        softmax(Output);//softmax
        int loc = argmax();
        return loc;
    }


    //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
    //反向传播过程
    public double lossError(int action, double Qvalue, int i) {
        double doutput;
        //doutput = r[i] - r; //loss函数 MSE （r-r’）**2
        //cross loss =-log( pol )
        if (i == action) {
            doutput = -1 + 1 / Math.pow(Math.E, Qvalue);
        } else {
            doutput = 1 / Math.pow(Math.E, Qvalue);
        }
        return doutput;
    }
    ///////////////////////////////////////////////////////////////

    double alphe = 1.0;

    //最后一层(输出层)反向传播
    public void ComputeOutputLossError(int action) {
        int i;
        double Qvalue, doutput;
        for (i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            Qvalue = Layer[NUM_LAYERS - 1].Output[i];
            doutput = lossError(action, Qvalue, i);
            Layer[NUM_LAYERS - 1].dydwx[i] = alphe * Qvalue * (1 - Qvalue) * doutput;  //激活函数求导  dydwx=(sigmoid(wx)' * 1/2 (r'-r)**2 )
        }
    }

    //后向传播
    public void BackpropagateLayer(int Upper, int Lower) {
        int i, j;
        double r, dydx;

        for (i = 0; i < Layer[Lower].H; i++) {
            r = Layer[Lower].Output[i];
            dydx = 0;
            for (j = 0; j < Layer[Upper].H; j++) {
                dydx += Layer[Upper].W[j][i] * Layer[Upper].dydwx[j];// dydx=dydwx + w
            }
            Layer[Lower].dydwx[i] = this.alphe * r * (1 - r) * dydx;  //激活函数求导 
        }
    }

    public void BackpropagateNet() {
        for (int l = NUM_LAYERS - 1; l > 1; l--) {
            BackpropagateLayer(l, l - 1);
        }
    }
    ///////////////////////////////////////////////////////////

    double Eta = 0.25;
    double Alpha = 0.5;

    //梯度下降
    public void AdjustWeights(double Qvalue) {
        int l, i, j;
        double X, dydwx, dydw;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    X = Layer[l - 1].Output[j];
                    dydwx = Layer[l].dydwx[i];
                    dydw = Layer[l].dydw[i][j];
                    Layer[l].W[i][j] += Qvalue * (this.Eta * dydwx * X + this.Alpha * dydw);  // 之前的梯度占比和新梯度占比
                    Layer[l].dydw[i][j] = this.Eta * dydwx * X;                    //更新之前的梯度
                }
            }
        }
    }

    ////////////////////////////////////////////////////////////////////
    public double QGamma = (double) 0.1;

    public double SoftMaxQValue(double[] next_stat, double r) {
        double softmax_Q = -Double.MAX_VALUE;
        double sum_soft_Q = 0.0;
        for (int i = 0; i < numAction; i++) {//Q-学习
            int action = i;
            double tmp_Q = Q_Function(next_stat, action);
            sum_soft_Q += Math.pow(Math.E, tmp_Q);
        }
        sum_soft_Q /= numAction;
        softmax_Q = Math.log(sum_soft_Q);
        double y = r + QGamma * softmax_Q;
        return y;
    }

    public double MaxQValue(double[] next_stat, double r) {
        double max_Q = -Double.MAX_VALUE;
        for (int i = 0; i < numAction; i++) {//Q-学习
            int action = i;
            double tmp_Q = Q_Function(next_stat, action);
            if (max_Q < tmp_Q) {
                max_Q = tmp_Q;
            }
        }
        double y = r + QGamma * max_Q;
        return y;
    }

    public double QAlpha = (double) 0.5;

    //Q( s , a ) = Q( s , a )- a(r(s,a) + softmax Q' (s',a') ) )
    public double QValue_Update(double QValue, double y, double w) {
        QValue = QValue - QAlpha * w * (QValue - y);// 新Q = 旧Q +（奖励 + 新状态最大Q - 旧Q）
        if (QValue < 0) QValue = 0;
        if (QValue > 1) QValue = 1;
        return QValue;
    }

    ///////////////////////////////////////////////////////////////////////
    public void Q_Function_update(Buffer buff, double[] p) {
        SetStat(buff.stat);
        PropagateNet();
        ComputeOutputLossError(buff.action);
        BackpropagateNet();

        double y;
        if (buff.end == true) {
            y = buff.r;
        } else {
            y = MaxQValue(buff.next_stat, buff.r); //计算下次状态最大Q值
        }

        double QValue = Q_Function(buff.stat, buff.action);//计算本状态Q
        buff.p = Math.abs(y - QValue);
        QValue = QValue_Update(QValue, y, buff.w);//Q学习公式

        AdjustWeights(QValue);

    }

}
