package com.chl.deepmind;

import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;

/**
 * @author 陈宏亮
 */
public class MyDeep {

    public double[][] points;//神经网络各层节点的输出
    public double[][] pointErrs;//神经网络各节点误差
    public double[][][] pointWeights;//各层节点权重
    public double[][][] pointWeightDelta;//各层节点权重动量
    public int layerLen;// 层数
    public double mobp;//动量系数
    public double rate;//学习系数
    public Random random = new Random();

    public MyDeep(int[] layerPoints, double rate, double mobp) { // 初始化
        this.mobp = mobp;
        this.rate = rate;
        layerLen = layerPoints.length;
        points = new double[layerLen][];
        pointErrs = new double[layerLen][];
        pointWeights = new double[layerLen][][];
        pointWeightDelta = new double[layerLen][][];
        // l 当前第几层 pointNum 当前层节点数 nextNum 下一层节点数
        int pointNum, nextNum;
        for (int l = 0; l < layerLen; l++) {
            pointNum = layerPoints[l];
            points[l] = new double[pointNum];
            pointErrs[l] = new double[pointNum];
            if (l + 1 < layerLen) {
                nextNum = layerPoints[l + 1];
                pointWeights[l] = new double[pointNum + 1][nextNum]; // 初始化节点权重数组
                pointWeightDelta[l] = new double[pointNum + 1][nextNum]; // 初始化节点权重动量
                for (int j = 0; j < pointNum + 1; j++) { // j 当层前节点编号
                    for (int i = 0; i < nextNum; i++) { // i 下层节点编号
                        pointWeights[l][j][i] = random.nextDouble();// 随机初始化权重
                    }
                }
            }
        }
    }

    // 逐层向前计算输出
    public double[] computeOut(double[] in) {
        for (int l = 1; l < layerLen; l++) { // l 当前第几层 
            double[] layer = points[l]; // 当前层
            double[] prevLayer = points[l - 1]; // 上一层
            double[][] prevLayerWeights = pointWeights[l - 1]; // 上一层节点输入本层的权重
            for (int j = 0; j < layer.length; j++) { // j 当层前节点编号
                double z = prevLayerWeights[prevLayer.length][j];
                for (int i = 0; i < prevLayer.length; i++) {// i 上一层节点编号
                    if (l == 1) {
                        prevLayer[i] = in[i];// 第一层的值为输入值
                    }
                    z += prevLayerWeights[i][j] * prevLayer[i]; // 上一层节点对应本节点的权重 * 上一层节点的值 汇总。
                }
                layer[j] = 1 / (1 + Math.exp(-z)); // 计算本节点的值
            }
        }
        return points[points.length - 1];
    }

    // 逐层反向计算误差并修改权重
    public void updateWeight(double[] tar) {
        int l = points.length - 1;// 当前层编号
        for (int j = 0; j < pointErrs[l].length; j++) {// 计算输出层（最后一层）的误差值
            pointErrs[l][j] = points[l][j] * (1 - points[l][j]) * (tar[j] - points[l][j]);
        }

        while (l-- > 0) {
            for (int j = 0; j < pointErrs[l].length; j++) {
                double z = 0.0;
                for (int i = 0; i < pointErrs[l + 1].length; i++) {
                    z = z + l > 0 ? pointErrs[l + 1][i] * pointWeights[l][j][i] : 0;
                    pointWeightDelta[l][j][i] = mobp * pointWeightDelta[l][j][i] + rate * pointErrs[l + 1][i] * points[l][j];//隐含层动量调整
                    pointWeights[l][j][i] += pointWeightDelta[l][j][i];//隐含层权重调整
                    if (j == pointErrs[l].length - 1) {
                        pointWeightDelta[l][j + 1][i] = mobp * pointWeightDelta[l][j + 1][i] + rate * pointErrs[l + 1][i];//截距动量调整
                        pointWeights[l][j + 1][i] += pointWeightDelta[l][j + 1][i];//截距权重调整
                    }
                }
                pointErrs[l][j] = z * points[l][j] * (1 - points[l][j]);//记录误差
            }
        }
    }

    public void train(double[] in, double[] tar) {
        computeOut(in);
        updateWeight(tar);
    }

    public void trains() {
        Map<String, Object> trainData;
        double[] target;
        double[][] data;
        do {
            //迭代训练5000次
            for (int n = 0; n < 100; n++) {
                trainData = getTrainData(50000);
                target = (double[]) trainData.get("target");
                data = (double[][]) trainData.get("data");
                for (int i = 0; i < data.length; i++) {
                    train(data[i], new double[]{target[i]});
                }
            }
        } while (!validate());
    }

    public boolean validate() {
        Map<String, Object> trainData = getTrainData(50000);
        double[][] data = (double[][]) trainData.get("data");
        double[] target = (double[]) trainData.get("target");
        int okNum = 0;
        double[][] out = new double[data.length][];
        for (int i = 0; i < data.length; i++) {
            double[] tmp = computeOut(data[i]);
            out[i] = Arrays.copyOf(tmp, tmp.length);
        }
        for (int i = 0; i < out.length; i++) {
            if (target[i] == (out[i][0] > 0.5 ? 1 : 0)) {
                okNum++;
            }
        }
        System.out.println(data.length + "个，正确" + okNum);
        return okNum == data.length;
    }

    public Map<String, Object> getTrainData(int n) {
        Map<String, Object> trainData = new HashMap<String, Object>();
        double[][] data = new double[n][2];
        double[] target = new double[n];
        int x, y;
        for (int i = 0; i < n; i++) {
            x = random.nextInt(400);
            y = random.nextInt(400);
            data[i][0] = x;
            data[i][1] = y;
            target[i] = (y > 200 ? 1 : 0);
        }
        trainData.put("target", target);
        trainData.put("data", data);
        return trainData;
    }

    public static void print(double[] data, double[] result) {
        System.out.println(Arrays.toString(data) + "-" + Arrays.toString(result) + ",判断结果：" + (result[0] > 0.5 ? "红正方" : "蓝三角"));
    }

    public static void print(double[][] data, double[][] target, double[][] out) {
        for (int i = 0; i < data.length; i++) {
            System.out.println("数据：" + Arrays.toString(data[i])
                    + "，目标结果：" + Arrays.toString(target[i])
                    + "，训练结果：" + Arrays.toString(out[i])
                    + ",判断结果：" + (out[i][0] > 0.5 ? "红正方" : "蓝三角")
                    + "-" + ((out[i][0] > 0.5) == (target[i][0] > 0.5)));
        }
    }
    public static int a;

    public static void main(String[] args) {
        System.out.println("a:" + a);
        //初始化神经网络的基本配置
        //第一个参数是一个整型数组，表示神经网络的层数和每层节点数，比如{3,10,10,10,10,2}表示输入层是3个节点，输出层是2个节点，中间有4层隐含层，每层10个节点
        //第二个参数是学习步长，第三个参数是动量系数
        MyDeep bp = new MyDeep(new int[]{2, 1}, 0.001, 0.5);

        //double[][][] trainData = getTrainData(10000);
        //设置样本数据，对应上面的4个二维坐标数据
        //double[][] data = trainData[0];
        double[][] data = new double[][]{{1, 2}, {2, 2}, {1, 1}, {2, 1}};
        //设置目标数据，对应4个坐标数据的分类
        //double[][] target = trainData[1];
        double[] target = new double[]{0, 1, 1, 0};

        bp.trains();

        // 根据训练结果来预测一条新数据的分类
        double[] x = new double[]{300, 298};
        double[] result = bp.computeOut(x);
        print(x, result);
    }

}
