package AGENTS;

import java.io.IOException;

public class AGENTS_BPN {

    final int numSpace = 3;
    final int CURSTATE = 0;
    final int ACTION = 1;
    final int REWARD = 2;
    final int numState;
    final int numAction;
    final int numReward;

    final int complementCoding = 1;

    final int PERFORM = 0;
    final int LEARN = 1;
    final int INSERT = 2;


    public boolean detect_loop = false;
    public boolean look_ahead = false;

    public boolean end_state;


//    public static boolean INTERFLAG=false;	

    /* A bpn_xor */


    double MinTestError;
    boolean train_flag = true;

    double Mean;

    double TrainError;
    double TrainErrorPredictingMean;
    double TestError;
    double TestErrorPredictingMean;


    public static double minQEpsilon = (double) 0.00500;
    public static double initialQ = (double) 0.5;


    public int agentid;


    public double this_Q;
    public double max_Q;
    public double new_Q;


    /////////////////////////////////////////////////////////////////////////////////
    Layer[] Layer;
    public int NUM_LAYERS = 4;
    public int x;
    public int H1 = 36;
    public int H2 = 72;
    public int y;
    public int Y;


    //生成网络
    public void GenerateNetwork() {
        Layer = new Layer[NUM_LAYERS];
        Layer[0] = new Layer(0, x);
        Layer[1] = new Layer(x, H1);
        Layer[2] = new Layer(H1, H2);
        Layer[3] = new Layer(H2, Y);
    }

    ////////////////////////////////////////////////////////////////
    //高斯分布随机函数
    public double RandomEqualREAL(double Low, double High) {
        return ((double) Math.random() * (High - Low) + Low);
    }

    //初始化权重
    public void RandomWeights() {
        int l, i, j;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    Layer[l].W[i][j] = RandomEqualREAL(-0.5, 0.5);
                }
            }
        }
    }
/////////////////////////////////////////////////////////////////////////////////


    //构造
    public AGENTS_BPN(int num, int numState, int numAction) throws IOException {
        agentid = num;
        this.numState = numState;
        x = numState;
        this.numAction = numAction;
        y = numAction;
        Y = 1;


        this.numReward = 2;


        Input = new double[numState];
        act = new double[numAction];

        Output = new double[Y];
        target_output = new double[Y];
        pol = new double[Y];


        GenerateNetwork();//构建网络
        RandomWeights(); //每层随机权重

        end_state = false;
    }


    //////////////////////////////////////////////////////////////////////////////////////////////////////
    public static double QEpsilonDecay = (double) 0.00050;
    public static double QEpsilon = (double) 0.50000;

    //初始化参数
    public void setParameters(int AVTYPE, boolean immediateReward) {
        QEpsilonDecay = (double) 0.00050;
        QEpsilon = (double) 0.50000;
    }


    /////////////////////////////////////////////////////////
    double[] Input;
    double[] act;
    double[] target_output;

    //提取环境信息
    public void setState(double[] stat) {//提取状态
        for (int i = 0; i < numState; i++) {
            Input[i] = stat[i];
        }
    }

    public void setAction(int action) {//提取动作
        for (int i = 0; i < numAction; i++)
            act[i] = 0;
        act[action] = (double) 1.0;
    }

    public void setReward(double r) {//提取奖励
        target_output[0] = r;
    }

    public void setact(int action) {//提取奖励
        target_loc = action;
    }
    ////////////////////////////////////////////////////////////////////


    //表现
    //第一层输入层，权重随机，只输出
    void SetInput(double[] Input) {
        for (int i = 0; i < Layer[0].H; i++) {
            Layer[0].Output[i] = Input[i];
        }
    }

    ///////////////////////////////////////////////////
    //正向传播  (第二层之后)  
    public void PropagateLayer(int Lower, int Upper) {
        int i, j;
        double Sum;

        for (i = 0; i < Layer[Upper].H; i++) {
            Sum = 0;
            for (j = 0; j < Layer[Lower].H; j++)
                Sum += Layer[Upper].W[i][j] * Layer[Lower].Output[j];//矩阵乘法 W*X
            if (Upper >= 1)  //第一层之后
                Layer[Upper].Output[i] = 1 / (1 + Math.exp(-this.alphe * Sum));//signoid(WX)函数
            else
                Layer[Upper].Output[i] = Sum;
        }
    }

    public void PropagateNet() {
        for (int l = 0; l < NUM_LAYERS - 1; l++) {
            PropagateLayer(l, l + 1);
        }
    }
    /////////////////////////////////////////////////////


    //第三层输出层
    public void GetOutput(double[] Output) {
        for (int i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            Output[i] = Layer[NUM_LAYERS - 1].Output[i];
        }
    }

    /////////////////////////////////////////////////////////////
    double[] pol;

    public void softmax() {
        double[] tmp;
        double sum = 0;
        tmp = new double[Y];
        for (int i = 0; i < Y; i++) {
            tmp[i] = Math.pow(Math.E, Output[i]);
            sum += tmp[i];
        }
        for (int i = 0; i < Y; i++) {
            pol[i] = tmp[i] / sum;
        }

    }

    ////////////////////////////////////////////////////////////////
    public int argmax(double[] pol) {
        int loc = -1;
        double max = 0;
        for (int i = 0; i < Y; i++) {
            if (max < pol[i]) {
                max = pol[i];
                loc = i;
            }
        }
        return loc;
    }

    /////////////////////////////////////////////////////////////
    double[] Output;
    int loc;
    int target_loc;

    public double BPN_PERFORM() {
        double QValue = (double) 0.0;
        SetInput(Input);
        PropagateNet();
        GetOutput(Output);

        //softmax();//softmax
        //loc=argmax(pol);

        QValue = Output[0];
        return (QValue);

    }


    ////////////////////////////////////////
    //反向传播过程

    double alphe = 1.0;

    //最后一层(输出层)反向传播
    public void ComputeOutputError(double[] target_output) {
        int i;
        double output, doutput;


        for (i = 0; i < Layer[NUM_LAYERS - 1].H; i++) {
            output = Layer[NUM_LAYERS - 1].Output[i];
            doutput = target_output[i] - output; //loss函数 MSE （y-y’）**2


            Layer[NUM_LAYERS - 1].dydwx[i] = alphe * output * (1 - output) * doutput;  //激活函数求导  dydwx=(sigmoid(wx)' * 1/2 (y'-y)**2 )
        }
    }

    //后向传播
    public void BackpropagateLayer(int Upper, int Lower) {
        int i, j;
        double output, dydx;

        for (i = 0; i < Layer[Lower].H; i++) {
            output = Layer[Lower].Output[i];
            dydx = 0;
            for (j = 0; j < Layer[Upper].H; j++) {
                dydx += Layer[Upper].W[j][i] * Layer[Upper].dydwx[j];// dydx=dydwx + w
            }
            Layer[Lower].dydwx[i] = this.alphe * output * (1 - output) * dydx;  //激活函数求导 
        }
    }

    public void BackpropagateNet() {
        for (int l = NUM_LAYERS - 1; l > 1; l--) {
            BackpropagateLayer(l, l - 1);
        }
    }
    ///////////////////////////////////////////////////////////

    double Eta = 0.25;
    double Alpha = 0.5;

    //梯度下降
    public void AdjustWeights() {
        int l, i, j;
        double X, dydwx, dydw;
        for (l = 1; l < NUM_LAYERS; l++) {
            for (i = 0; i < Layer[l].H; i++) {
                for (j = 0; j < Layer[l - 1].H; j++) {
                    X = Layer[l - 1].Output[j];
                    dydwx = Layer[l].dydwx[i];
                    dydw = Layer[l].dydw[i][j];
                    Layer[l].W[i][j] += this.Eta * dydwx * X + this.Alpha * dydw;  // 之前的梯度占比和新梯度占比
                    Layer[l].dydw[i][j] = this.Eta * dydwx * X;                    //更新之前的梯度
                }
            }
        }
    }

    ////////////////////////////////////////////////////////////////////

    public void BPN_LEARN() {
        double QValue = (double) 0.0;
        SetInput(Input);
        PropagateNet();
        GetOutput(Output);


        ComputeOutputError(target_output);
        BackpropagateNet();
        AdjustWeights();

    }

    ///////////////////////////////////////////////////////////////////////////////////

    //行动策略选择行为
    public int Policy(double[] state) {

        double[] qValues = new double[numAction];
        int selectedAction = -1;
        int[] validActions = new int[numAction];
        int maxVA = 0;

        for (int i = 0; i < numAction; i++) {
            state[28 + i] = 1;
            setState(state);
            BPN_PERFORM();
            state[28 + i] = 0;
            validActions[maxVA] = i;
            maxVA++;
        }


        //1-e探索
        if (Math.random() < QEpsilon) {
            int randomIndex = (int) (Math.random() * maxVA);
            selectedAction = validActions[randomIndex];
            ;
        } else {
            double maxQ = -Double.MAX_VALUE;
            int[] doubleValues = new int[qValues.length];
            int maxDV = 0;
            for (int vAction = 0; vAction < maxVA; vAction++) {
                int action = validActions[vAction];
                if (qValues[action] > maxQ) {
                    selectedAction = action;
                    maxQ = qValues[action];
                    maxDV = 0;
                    doubleValues[maxDV] = selectedAction;
                } else if (qValues[action] == maxQ) {
                    maxDV++;
                    doubleValues[maxDV] = action;
                }
            }
            if (maxDV > 0) {
                int randomIndex = (int) (Math.random() * (maxDV + 1));
                selectedAction = doubleValues[randomIndex];
            }
        }

        return selectedAction;
    }


    /////////////////////////////////////////////////////////////////////
    //计算下一状态的最大Q值
    public double getMaxQValue(Boolean isdead) {
        double max_Q = (double) 0.0;
        if (isdead) {
            max_Q = 0.0;
        } else {
            for (int i = 0; i < numAction; i++) {//Q-学习
                setAction(i);
                double tmp_Q = BPN_PERFORM();
                if (tmp_Q > max_Q) max_Q = tmp_Q;
            }
        }
        return max_Q;
    }

    //Q学习公式
    public double QAlpha = (double) 0.5;
    public double QGamma = (double) 0.1;

    public double getNewQValue(double r) {
        double new_Q = this_Q + QAlpha * (r + QGamma * max_Q - this_Q);
        if (new_Q < 0) new_Q = 0;
        if (new_Q > 1) new_Q = 1;
        return new_Q;
    }


}

