package com.demo.test.utils;

import org.deeplearning4j.nn.api.OptimizationAlgorithm;
import org.deeplearning4j.nn.conf.GradientNormalization;
import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.Updater;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.GravesLSTM;
import org.deeplearning4j.nn.conf.layers.LSTM;
import org.deeplearning4j.nn.conf.layers.RnnOutputLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.deeplearning4j.nn.weights.WeightInit;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.dataset.DataSet;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.learning.config.Adam;
import org.nd4j.linalg.lossfunctions.LossFunctions;

import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

public class LSTMUtils {

    public static double[] load = {100, 120, 110, 130, 140, 120, 150, 160, 140, 130}; // 历史负荷数据
    public static double[] temperature = {25, 24, 26, 28, 30, 29, 31, 33, 32, 30}; // 温度数据
    public static double[] humidity = {60, 62, 58, 55, 57, 59, 61, 60, 58, 56}; // 湿度数据

    public static double[] normalizeData(double[] data) {
        double max = Nd4j.max(Nd4j.create(data)).getDouble(0);
        double min = Nd4j.min(Nd4j.create(data)).getDouble(0);

        return Nd4j.create(data).sub(min).div(max - min).toDoubleVector();
    }

    /**
     * 创建训练集的DataSet对象
     * @return List<DataSet>
     */
    public static List<DataSet> dataSet(){
        // 准备训练数据
        List<Double> inputs = new ArrayList<>();
        List<Double> outputs = new ArrayList<>();
        for (int i = 0; i < load.length; i++) {
            double input1 = load[i];
            double input2 = temperature[i];
            double input3 = humidity[i];

            inputs.add(input1);
            inputs.add(input2);
            inputs.add(input3);
            outputs.add(input1 + input2 + input3);
        }

        int numInputs = 3; // 输入数据的个数
        int numOutputs = 1; // 输出数据的个数
        int numTimeSteps = 10; // 时间步数

        // 将训练数据转换为INDArray对象
        INDArray inputArray = Nd4j.create(inputs.size() / numInputs, numInputs, numTimeSteps);
        INDArray outputArray = Nd4j.create(outputs.size() / numOutputs, numOutputs, numTimeSteps);
        for (int i = 0; i < inputs.size() / numInputs; i++) {
            for (int j = 0; j < numTimeSteps; j++) {
                for (int k = 0; k < numInputs; k++) {
                    inputArray.putScalar(new int[]{i, k, j}, inputs.get(i * numInputs + k));
                }
                outputArray.putScalar(new int[]{i, 0, j}, outputs.get(i * numOutputs));
            }
        }
        DataSet dataSet = new DataSet(inputArray, outputArray);
        List<DataSet> dataSets = dataSet.asList();
        Collections.shuffle(dataSets);

        return dataSets;
    }

    /**
     * 创建LSTM模型
     * @param numInputs 输入特征数量
     * @param numOutputs 输出负荷数量
     * @param lstmLayerSize LSTM层大小
     * @return MultiLayerConfiguration
     */
    public static MultiLayerConfiguration createModel(int numInputs, int lstmLayerSize, int numOutputs){
        MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                .seed(123)
                .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                .weightInit(WeightInit.XAVIER)
                .updater(new Adam(0.01))
                .gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
                .gradientNormalizationThreshold(0.5)
                .list()
                .layer(0, new LSTM.Builder().nIn(numInputs).nOut(lstmLayerSize).activation(Activation.TANH).build())
                .layer(1, new RnnOutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).nIn(lstmLayerSize).nOut(numOutputs).build())
                .build();
        return conf;
    }

    /**
     * 创建LSTM模型
     * @param numInputs 输入特征数量
     * @param numOutputs 输出负荷数量
     * @param numHiddenNodes 隐藏层节点数量
     * @return MultiLayerConfiguration
     */
    public static MultiLayerConfiguration createModeNew(int numInputs, int numOutputs, int numHiddenNodes){
        MultiLayerConfiguration config = new NeuralNetConfiguration.Builder()
                //随机种子，用于初始化权重和偏置项。
                .seed(123)
                //优化算法
                //.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
                //.gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
                //设置权重初始化策略。这个例子中只有两层，每层设置的权重初始化策略都相同，都是XAVIER
                .weightInit(WeightInit.XAVIER)
                //是另一项决定优化算法向最优值收敛的速度的因素。动量影响权重调整的方向，所以在代码中，我们将其视为一种权重的更新器
                //.updater(new Adam(0.001))
                //.updater(Updater.SGD)
                //是否使用小批量训练
                //.miniBatch(true)


                //函数可指定网络中层的数量，它会将您的配置复制n次，建立分层的网络结构
                .list()
                //.activation(激活函数)  lossFunction(损失函数)
                //nIn 方法接受一个整数参数，表示输入到该 LSTM 层的特征数量或前一层的输出维度。在您的示例中，nIn 被设置为 inputSize，表示该 LSTM 层的输入维度为 inputSize。
                //nOut 方法也接受一个整数参数，表示该 LSTM 层的输出维度或下一层的输入维度。在您的示例中，nOut 被设置为 hiddenUnits，表示该 LSTM 层的输出维度为 hiddenUnits。
                //添加LSTM层
                .layer(0, new GravesLSTM.Builder().nIn(numInputs).nOut(numHiddenNodes).activation(Activation.RECTIFIEDTANH).build())
                //添加全连接层
                .layer(1, new DenseLayer.Builder().nIn(numHiddenNodes).nOut(numHiddenNodes).activation(Activation.RELU).build())
                //添加输出层
                .layer(2, new RnnOutputLayer.Builder().nIn(numHiddenNodes).nOut(numOutputs).lossFunction(LossFunctions.LossFunction.MSE).activation(Activation.IDENTITY).build())
                .build();

        return config;
    }

    /**
     * 预测数据
     */
    public static double[] predictedData(MultiLayerNetwork model){
        // 未来4小时的温度数据
        double[] futureTemperature = {28.5, 29.5, 30.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5, 31.5};
        // 未来4小时的湿度数据
        double[] futureHumidity = {57.5, 58.5, 59.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 60.5, 61.5, 70.5, 90.5, 10.5, 60.5, 60.5};

        double[] normalizedFutureTemperature = LSTMUtils.normalizeData(futureTemperature);
        double[] normalizedFutureHumidity = LSTMUtils.normalizeData(futureHumidity);


        //输入特征
        INDArray inputFeatures = Nd4j.create(new double[][]{ normalizedFutureTemperature, normalizedFutureHumidity, normalizedFutureHumidity}).transpose();
        INDArray indArray = model.rnnTimeStep(inputFeatures);

        double[] predictedLoad = indArray.toDoubleVector();
        return predictedLoad;
    }
}
