package com.rongji.dfish.misc.nn;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;

public class FOJ1000 {

    public static Map<int[], double[]> loadData (String filePath) throws IOException {
        File file = new File(filePath);
        try {
            FileReader reader = new FileReader(file);
            String cache = "";
            if (reader.ready()) {
                char[] tmp = new char[8192];
                reader.read(tmp);
                cache = new String(tmp).trim();
            }
            String[] e = cache.split("\r\n");
            Map<int[], double[]> rs = new HashMap<>();
            for (String o : e) {
                String[] data = o.split(",");
                double[] input = new double[4];
                int[] right = new int[3];

                input[0] = Double.valueOf(data[1]);
                input[1] = Double.valueOf(data[2]);
                input[2] = Double.valueOf(data[3]);
                input[3] = Double.valueOf(data[4]);

                right[0] = Integer.valueOf(data[5]);
                right[1] = Integer.valueOf(data[6]);
                right[2] = Integer.valueOf(data[7]);

                rs.put(right, input);
            }
            return rs;
        }catch (FileNotFoundException ex){
            System.out.println(file.getAbsolutePath());
            throw ex;
        }
//        return null;
    }

    public static void main(String[] args) throws IOException {
        Map<int[], double[]> trainData = loadData("dfish-misc/src/main/java/com/rongji/dfish/misc/nn/iris2.csv");

        int inputNum = 4; // 输入的向量维度多少个，这里是4个维度
        int hiddenNum = 8; // 隐藏层的神经元多少个
        int outputNum = 3; // 输出的向量维度多少个，这里设计成三个维度，分别对应三个类别
        int loopCount = 20; // 训练几轮
        double learnRate = 0.5; // 每次各个权重的更新步伐，越高更新越急，越容易陷进局部最优解。
        int weightInitValue = 0; // 权重矩阵的初始化值
        int levelInitValue = 0; // 神经元输出向量、神经元误差向量、神经元偏置向量的初始值

        double[][] input2HiddenWeight = new double[inputNum][hiddenNum]; // 【输入 <---> 隐藏层】 权重矩阵
        double[][] hidden2OutputWeight = new double[hiddenNum][outputNum]; // 【隐藏层 <---> 输出】 权重矩阵
        double[] hiddenOLevel = new double[hiddenNum]; // 隐藏层神经元输出向量
        double[] outputOLevel = new double[outputNum]; // 输出层神经元输出向量
        double[] thetaHidden = new double[hiddenNum]; // 隐藏层神经元偏置向量
        double[] thetaOutput = new double[outputNum]; // 输出层神经元偏置向量
        double[] eHidden = new double[hiddenNum]; // 隐藏层神经元误差向量
        double[] eOutput = new double[outputNum]; // 输出层神经元误差向量
        BPNN nn = new BPNN(
                inputNum,
                hiddenNum,
                outputNum,
                loopCount,
                learnRate,
                weightInitValue,
                levelInitValue,
                input2HiddenWeight,
                hidden2OutputWeight,
                hiddenOLevel,
                outputOLevel,
                thetaHidden,
                thetaOutput,
                eHidden,
                eOutput);
        nn.initParams();

        // 训练步骤
        // 循环迭代每批训练
        // 每批训练拆解为下列顺序
        // **、开始
        // 	1、提取下条训练数据
        // 	2、神经网络前向计算，逐层计算各个神经元的输出，直到输出层的输出
        // 	3、神经网络后向计算，从输出层开始，到隐藏层。逐层计算各个神经元的误差
        // 	4、神经网络更新，根据2和3的输出和误差，更新每个权重矩阵、神经元偏置向量
        // **、结束
        for (int i = 0; i < loopCount; i++) {
            for (Map.Entry<int[], double[]> entry : trainData.entrySet()) {
                int[] right = entry.getKey();
                double[] input = entry.getValue();

                nn.forwardCompute(input);
                nn.eCompute(right);
                nn.update(input);
            }

        }


        Map<int[], double[]> testData = loadData("dfish-misc/src/main/java/com/rongji/dfish/misc/nn/iris3.csv");

        // 测试训练后的神经网络的准确率如何
        // 测试数据集是独立于训练数据集，没有被训练过
        int rightNum = 0;
        int errorNum = 0;
        for (Map.Entry<int[], double[]> entry : testData.entrySet()) {
            if (nn.predict(entry.getValue(), entry.getKey())) {
                rightNum++;
            } else {
                errorNum++;
            }
        }

        System.out.println(rightNum*1d/testData.size()*1d);


    }

}

class BPNN {
    private int inputNum;
    private int hiddenNum;
    private int outputNum;
    private int loopCount;
    private double learnRate;
    private int weightInitValue;
    private int levelInitValue;
    private double[][] input2HiddenWeight;
    private double[][] hidden2OutputWeight;
    private double[] hiddenOLevel;
    private double[] outputOLevel;
    private double[] thetaHidden;
    private double[] thetaOutput;
    private double[] eHidden;
    private double[] eOutput;

    public BPNN(int inputNum, int hiddenNum, int outputNum, int loopCount, double learnRate, int weightInitValue, int levelInitValue, double[][] input2HiddenWeight, double[][] hidden2OutputWeight, double[] hiddenOLevel, double[] outputOLevel, double[] thetaHidden, double[] thetaOutput, double[] eHidden, double[] eOutput) {
        this.inputNum = inputNum;
        this.hiddenNum = hiddenNum;
        this.outputNum = outputNum;
        this.loopCount = loopCount;
        this.learnRate = learnRate;
        this.weightInitValue = weightInitValue;
        this.levelInitValue = levelInitValue;
        this.input2HiddenWeight = input2HiddenWeight;
        this.hidden2OutputWeight = hidden2OutputWeight;
        this.hiddenOLevel = hiddenOLevel;
        this.outputOLevel = outputOLevel;
        this.thetaHidden = thetaHidden;
        this.thetaOutput = thetaOutput;
        this.eHidden = eHidden;
        this.eOutput = eOutput;
    }

    public void initParams (){
        for (int i = 0; i < this.hiddenOLevel.length; i++) {
            this.hiddenOLevel[i] = this.levelInitValue;
            this.thetaHidden[i] = this.levelInitValue;
            this.eHidden[i] = this.levelInitValue;
        }
        for (int i = 0; i < this.outputOLevel.length; i++) {
            this.outputOLevel[i] = this.levelInitValue;
            this.thetaOutput[i] = this.levelInitValue;
            this.eOutput[i] = this.levelInitValue;
        }
        for (int i = 0; i < this.input2HiddenWeight.length; i++) {
            for (int j = 0; j < this.input2HiddenWeight[i].length; j++) {
                this.input2HiddenWeight[i][j] = this.weightInitValue;
            }
        }
        for (int i = 0; i < this.hidden2OutputWeight.length; i++) {
            for (int j = 0; j < this.hidden2OutputWeight[i].length; j++) {
                this.hidden2OutputWeight[i][j] = this.weightInitValue;
            }
        }

    }

    // 前向计算
    public void forwardCompute(double[] input) {
        // 根据输入层的输入，计算隐藏层的各个神经元输出
        for (int j = 0; j < hiddenNum; j++) {
            double tmp = 0;
            for(int i = 0; i < inputNum; i++) {
                tmp += input2HiddenWeight[i][j] * input[i];
            }
            // 通过激活函数计算每个神经元的输出，这里使用的激活函数是sigmoid函数，
            // 这个函数的值域 (0, 1) 两边都是闭区间
            hiddenOLevel[j] = 1d / (1d + Math.exp(-1d * (tmp + thetaHidden[j])));
        }
        // 隐藏层到输出层
        for (int j = 0; j < outputNum; j++) {
            double tmp = 0;
            for(int i = 0; i < hiddenNum; i++) {
                tmp += hidden2OutputWeight[i][j] * hiddenOLevel[i];
            }
            // 通过激活函数计算每个神经元的输出，这里使用的激活函数是sigmoid函数，
            // 这个函数的值域 (0, 1) 两边都是闭区间
            outputOLevel[j] = 1d / (1d + Math.exp(-1d * (tmp + thetaOutput[j])));
        }
    }

    // 后向计算误差
    public void eCompute(int[] right){
        // 输出层的误差
        for (int i = 0; i < outputOLevel.length; i++) {
            eOutput[i] = outputOLevel[i] * (1 - outputOLevel[i]) * (right[i] - outputOLevel[i]);
        }
        // 隐藏层的误差
        for (int j = 0; j < hiddenOLevel.length; j++) {
            double tmp = 0d;
            for (int k = 0; k < outputOLevel.length; k++) {
                tmp += hidden2OutputWeight[j][k] * eOutput[k];
            }

            eHidden[j] = hiddenOLevel[j] * (1 - hiddenOLevel[j]) * tmp;
        }
    }

    // 更新权重
    public void update(double[] input){
        // 更新输出层的权重
        for (int i = 0; i < hidden2OutputWeight.length; i++) {
            for (int j = 0; j < hidden2OutputWeight[i].length; j++) {
                double diff_weight = learnRate * hiddenOLevel[i] * eOutput[j];
                hidden2OutputWeight[i][j] += diff_weight;
            }
        }
        // 更新隐藏层的权重
        for (int i = 0; i < input2HiddenWeight.length; i++) {
            for (int j = 0; j < input2HiddenWeight[i].length; j++) {
                double diff_weight = learnRate * input[i] * eHidden[j];
                input2HiddenWeight[i][j] += diff_weight;
            }
        }
        // 更新隐藏层的偏置
        for (int i = 0; i < thetaHidden.length; i++) {
            thetaHidden[i] += (learnRate * eHidden[i]);
        }
        // 更新输出层的偏置
        for (int i = 0; i < thetaOutput.length; i++) {
            thetaOutput[i] += (learnRate * eOutput[i]);
        }
    }

    // 预测，判断是否和入参right的标签对上号
    public boolean predict (double[] input, int[] right) {
        forwardCompute(input);
        double max = -1;
        int maxIndex = -1;
        for (int i = 0; i < outputOLevel.length; i++) {
            if (max < outputOLevel[i]) {
                maxIndex = i;
                max = outputOLevel[i];
            }
        }

        int rightIndex = -1;
        for (int i = 0; i < right.length; i++) {
            if (right[i] == 1) {
                rightIndex = i;
                break;
            }
        }

        return rightIndex == maxIndex;
    }

    // 预测，单纯地把神经网络的输出返回。
    public double[] predict (double[] input) {
        forwardCompute(input);
        return Arrays.copyOf(outputOLevel, outputOLevel.length);
    }

}
