#ifndef FCNN_FCNN_H
#define FCNN_FCNN_H

#include "../GeneticAlgorithm/Utils/GlobalCppRandomEngine.h"
#include <iostream>
#include <cmath>

namespace FCNN {

    using namespace std;
    using GeneticAlgorithm::Utils::GlobalCppRandomEngine;

    /**
     * 作为异常抛出来
     */
    class Exception {

    public:

        /**
         * 构造异常对象
         *
         * @param const char* message 异常文本消息
         */
        Exception(const char* message) {
            cout << message << endl;
        }

    };

    /**
     * 神经细胞类
     */
    class Cell {

    public:

        /**
         * 构造细胞对象
         *
         * @param unsigned long inputNumber 细胞输入数量
         */
        Cell(unsigned long inputNumber) {
            uniform_real_distribution<long double> formateRandomNumberRange(-0.1, 0.1);
            if (inputNumber < 1) {
                throw new Exception("inputNumber < 1");
            }
            this->inputValues = new long double[inputNumber];
            this->weights = new long double[inputNumber];
            this->weightGradients = new long double[inputNumber];
            this->accumulativeWeightGradients = new long double[inputNumber];
            this->m = new long double[inputNumber + 1];
            this->v = new long double[inputNumber + 1];
            for (unsigned long i = 0; i < inputNumber; i++) {
                this->weights[i] = formateRandomNumberRange(GlobalCppRandomEngine::engine);
                this->weightGradients[i] = 0.0;
                this->accumulativeWeightGradients[i] = 0.0;
                this->m[i] = 0.0;
                this->v[i] = 0.0;
            }
            this->m[inputNumber] = 0;
            this->v[inputNumber] = 0;
            this->inputNumber = inputNumber;
            this->bias = formateRandomNumberRange(GlobalCppRandomEngine::engine);
        }

        ~Cell() {
            if (nullptr != this->inputValues) {
                delete[] this->inputValues;
            }
            delete[] this->weights;
            delete[] this->weightGradients;
            delete[] this->accumulativeWeightGradients;
            delete[] this->m;
            delete[] this->v;
        }

        /**
         * 使用外部给定的指针作为数组，获取输入参数
         *
         * 细胞对象不会修改指针指向位置的值，仅仅会去读取。
         *
         * @param long double* inputPointer 作为数组使用，指针。
         * @return void
         */
        void useInputPointer(long double* inputPointer) {
            this->inputPointer = inputPointer;
            if (nullptr != this->inputValues) {
                delete[] this->inputValues;
                this->inputValues = nullptr;
            }
        }

        /**
         * 告诉神经细胞不再使用外部给定的指针作为输入来源
         *
         * @return void
         */
        void doNotUseInputPointer() {
            this->inputPointer = nullptr;
            if (nullptr == this->inputValues) {
                this->inputValues = new long double[this->inputNumber];
            }
        }

        /**
         * 设置输入值
         *
         * @param unsigned long i 偏移
         * @param long double value 输入值
         * @return void
         */
        void setInput(unsigned long i, long double value) {
            if (i > this->inputNumber - 1) {
                throw new Exception("The position of the input value you want to set is out of range.");
            }
            this->inputValues[i] = value;
        }

        /**
         * 获取给定位置输入在此时的梯度
         *
         * @param unsigned long i 偏移
         * @return long double
         */
        long double getInputGradient(unsigned long i) {
            if (i > this->inputNumber - 1) {
                throw new Exception("The position of the input value you want to get is out of range.");
            }
            return this->gradient * this->dActivityFunction(this->sum) * this->weights[i];
        }

        /**
         * 计算输出参数
         *
         * @return void
         */
        void calculateOutput() {
            long double sum = this->bias;
            // If we have a input-pointer and it is not a nullptr, we use it.
            long double* inputs = this->inputPointer;
            if (nullptr == inputs) {
                inputs = this->inputValues;
            }
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                sum += (inputs[i] * this->weights[i]);
            }
            this->y = this->activityFunction(sum);
            this->sum = sum;
        }

        /**
         * 获取输出参数
         *
         * @return long double
         */
        long double getOutput() {
            return this->y;
        }

        /**
         * 设置此神经细胞的输出对最终结果的影响
         *
         * 误差函数对此神经细胞输出值的偏导数。
         *
         * @param long double yEffect
         * @return void
         */
        void setGradient(long double yEffect) {
            this->gradient = yEffect;
        }

        /**
         * 计算此神经细胞内部所有参数的梯度
         *
         * 包括输入权重和偏置，不计算输入参数梯度
         *
         * @return void
         */
        void computeWeightGradients() {
            long double* inputs = this->inputPointer;
            if (nullptr == inputs) {
                inputs = this->inputValues;
            }
            long double tmp = this->gradient * this->dActivityFunction(this->sum);
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                this->weightGradients[i] = tmp * inputs[i];
            }
            this->biasGradient = tmp;
        }

        /**
         * 对计算出来的梯度累加起来
         *
         * @return void
         */
        void accumulativeGradients() {
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                this->accumulativeWeightGradients[i] += this->weightGradients[i];
            }
            this->accumulativeBiasGradient += this->biasGradient;
        }

        /**
         * 使用累加起来的梯度信息进行梯度下降
         *
         * 将会更新内部的权重和偏置。
         *
         * @param long double learningRate 更新速度
         * @return void
         */
        void fit(long double learningRate) {
            long double step; // 梯度
            long double mhat, vhat;
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                step = this->accumulativeWeightGradients[i];
                this->m[i] = this->beta1 * this->m[i] + (1.0 - this->beta1) * step;
                this->v[i] = this->beta2 * this->v[i] + (1.0 - this->beta2) * step * step;
                mhat = this->m[i] / (1.0 - this->beta1);
                vhat = this->v[i] / (1.0 - this->beta2);
                this->weights[i] -= (learningRate * mhat / (sqrt(vhat) + this->epsilon));
            }
            step = this->accumulativeBiasGradient;
            this->m[this->inputNumber] = this->beta1 * this->m[this->inputNumber] + (1.0 - this->beta1) * step;
            this->v[this->inputNumber] = this->beta2 * this->v[this->inputNumber] + (1.0 - this->beta2) * step * step;
            mhat = this->m[this->inputNumber] / (1.0 - this->beta1);
            vhat = this->v[this->inputNumber] / (1.0 - this->beta2);
            this->bias -= (learningRate * mhat / (sqrt(vhat) + this->epsilon));
        }

        /**
         * 清除累计的梯度
         *
         * @return void
         */
        void cleanAccumulativeGradients() {
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                this->accumulativeWeightGradients[i] = 0.0;
            }
            this->accumulativeBiasGradient = 0.0;
        }

        /**
         * 打印权重
         *
         * @return void
         */
        void printWeigthAndBias() {
            cout << "  w=" << endl << "    ";
            for (unsigned long i = 0; i < this->inputNumber; i++) {
                cout << this->weights[i];
                if (i < this->inputNumber - 1) {
                    cout << ",";
                }
            }
            cout << endl << "  b=" << endl << "    " << this->bias << endl;
        }

        /**
         * 可训练的参数的数值
         *
         * @return long double
         */
        long double getTrainableValue(unsigned long i) {
            if (i > this->inputNumber) {
                throw new Exception("Your offset value is too large to return a trainable parameter value.");
            }
            if (i < this->inputNumber) {
                return this->weights[i];
            }
            return this->bias;
        }

        /**
         * 设置可训练的参数的数值
         *
         * @return void
         */
        void setTrainableValue(unsigned long i, long double x) {
            if (i > this->inputNumber) {
                throw new Exception("Your offset value is too large to set a trainable parameter value.");
            }
            if (i < this->inputNumber) {
                this->weights[i] = x;
            } else {
                this->bias = x;
            }
        }

        /**
         * 获取累计的梯度
         *
         * @return long double
         */
        long double getGradient(unsigned long i) {
            if (i > this->inputNumber) {
                throw new Exception("Your offset value is too large to return a trainable parameter value.");
            }
            if (i < this->inputNumber) {
                return this->accumulativeWeightGradients[i];
            }
            return this->accumulativeBiasGradient;
        }

        /**
         * 可训练的参数的数量
         *
         * @return unsigned long
         */
        unsigned long getTrainableValueNumber() {
            return this->inputNumber + 1;
        }

    private:

        /**
         * 指向外部的指针，用它取代内部输入数组，默认 nullptr 为不使用
         *
         * @var long double*
         */
        long double* inputPointer = nullptr;

        /**
         * 细胞内部维护的输入数组， nullptr 为使用外部指针
         *
         * @var long double*
         */
        long double* inputValues = nullptr;

        /**
         * 权重数组
         *
         * @var long double*
         */
        long double* weights;

        /**
         * 偏置值
         *
         * @var long double
         */
        long double bias;

        /**
         * 加权求和结果，含偏置
         *
         * @var long double
         */
        long double sum = 0.0;

        /**
         * 计算后的细胞最终输出
         *
         * @var long double
         */
        long double y = 0.0;

        /**
         * 细胞输入数
         *
         * @var unsigned long
         */
        unsigned long inputNumber;

        /**
         * 误差函数对此细胞输出的梯度，由外部提供
         *
         * @var long double
         */
        long double gradient = 0.0;

        /**
         * 计算得到的每个权重的偏导数
         *
         * @var long double*
         */
        long double* weightGradients;

        /**
         * 累计每个权重的偏导数
         *
         * @var long double*
         */
        long double* accumulativeWeightGradients;

        /**
         * 误差对偏置偏导数
         *
         * @var long double
         */
        long double biasGradient = 0.0;

        /**
         * 误差对偏置偏导数的累计
         *
         * @var long double
         */
        long double accumulativeBiasGradient = 0.0;

        /**
         * 激活函数
         *
         * @param long double x 输入
         * @return long double
         */
        long double activityFunction(long double x) {
            return tanh(x);
            /*if (x > 0) {
                return x / (1.0 + x);
            }
            return x / (1.0 - x);*/
        }

        /**
         * 激活函数导数
         *
         * @param long double x 输入
         * @return long double
         */
        long double dActivityFunction(long double x) {
            long double t = tanh(x);
            return 1.0 - t * t;
            /*long double tmp;
            if (x > 0) {
                tmp = 1.0 + x;
            } else {
                tmp = 1.0 - x;
            }
            return 1.0 / tmp / tmp;*/
        }

        /** @var long double Adam算法参数 */
        long double beta1 = 0.9;

        /** @var long double Adam算法参数 */
        long double beta2 = 0.999;

        /** @var long double Adam算法参数 */
        long double epsilon = 1E-8;

        /** @var long double* Adam算法参数 */
        long double* m = nullptr;

        /** @var long double* Adam算法参数 */
        long double* v = nullptr;

    };

    /**
     * 全连接神经网络
     */
    class FCNNModel {

    public:

        /**
         * 构造全连接神经网络
         *
         * @param unsigned long layerTotal 层数
         * @param const unsigned long layerCell[] 每层神经细胞数量的数组
         */
        FCNNModel(unsigned long layerTotal, const unsigned long layerCell[]) {
            if (layerTotal < 2) {
                throw new Exception("layerTotal must >= 2");
            }
            for (unsigned long i = 0; i < layerTotal; i++) {
                if (layerCell[i] < 1) {
                    throw new Exception("Layer cell number < 1 !!!");
                }
            }
            this->inputs = new long double*[layerTotal];
            this->inputs[0] = new long double[layerCell[0]];
            this->cells = new Cell**[layerTotal - 1];
            for (unsigned long i = 1; i < layerTotal; i++) {
                this->inputs[i] = new long double[layerCell[i]];
                this->cells[i - 1] = new Cell*[layerCell[i]];
                for (unsigned long j = 0; j < layerCell[i]; j++) {
                    this->cells[i - 1][j] = new Cell(layerCell[i - 1]);
                    this->cells[i - 1][j]->useInputPointer(this->inputs[i - 1]);
                }
            }
            this->layerCellInfo = new unsigned long[layerTotal];
            for (unsigned long i = 0; i < layerTotal; i++) {
                this->layerCellInfo[i] = layerCell[i];
            }
            this->layerNumber = layerTotal;
        }

        ~FCNNModel() {
            delete[] this->inputs[0];
            for (unsigned long i = 1; i < this->layerNumber; i++) {
                delete[] this->inputs[i];
                for (unsigned long j = 0; j < this->layerCellInfo[i]; j++) {
                    delete this->cells[i - 1][j];
                }
                delete[] this->cells[i - 1];
            }
            delete[] this->inputs;
            delete[] this->cells;
            delete[] this->layerCellInfo;
        }

        /**
         * 设置输入值
         *
         * @param unsigned long i 偏移
         * @param long double value 输入值
         * @return void
         */
        void setInput(unsigned long i, long double value) {
            if (i > this->layerCellInfo[0] - 1) {
                throw new Exception("The position of the input value you want to set(in layer 0) is out of range.");
            }
            this->inputs[0][i] = value;
        }

        /**
         * 获取设置的输入值
         *
         * @param unsigned long i 偏移
         * @return long double
         */
        long double getInput(unsigned long i) {
            if (i > this->layerCellInfo[0] - 1) {
                throw new Exception("The position of the input value you want to get(in layer 0) is out of range.");
            }
            return this->inputs[0][i];
        }

        /**
         * 计算输出参数
         *
         * @return void
         */
        void calculateOutput() {
            for (unsigned long i = 0; i < this->layerNumber - 1; i++) {
                for (unsigned long j = 0; j < this->layerCellInfo[i + 1]; j++) {
                    this->cells[i][j]->calculateOutput();
                    this->inputs[i + 1][j] = this->cells[i][j]->getOutput();
                }
            }
        }

        /**
         * 复制网络最终输出值到给定的数组
         *
         * @param long double* target
         * @return void
         */
        void copyOutput(long double* target) {
            for (unsigned long i = 0; i < this->layerCellInfo[this->layerNumber - 1]; i++) {
                target[i] = this->inputs[this->layerNumber - 1][i];
            }
        }

        /**
         * 获取输出值
         *
         * @param unsigned long i 偏移位置
         * @return long double
         */
        long double getOutput(unsigned long i) {
            if (i > this->layerCellInfo[this->layerNumber - 1] - 1) {
                throw new Exception("The position of the onput value you want to get is out of range.");
            }
            return this->inputs[this->layerNumber - 1][i];
        }

        /**
         * 设置给定输出位置的梯度
         *
         * @param unsigned long i 偏移
         * @param long double gradient 梯度
         * @return void
         */
        void setGradient(unsigned long i, long double gradient) {
            if (i > this->layerCellInfo[this->layerNumber - 1] - 1) {
                throw new Exception("The position of the output-gradient you want to set is out of range.");
            }
            this->cells[this->layerNumber - 2][i]->setGradient(gradient);
        }

        /**
         * 误差反向传播
         *
         * @return void
         */
        void gradientBackPropagate() {
            long double gradient = 0.0;
            for (unsigned long i = this->layerNumber; i > 2; i--) {
                for (unsigned long j = 0; j < this->layerCellInfo[i - 2]; j++) {
                    gradient = 0.0;
                    for (unsigned long k = 0; k < this->layerCellInfo[i - 1]; k++) {
                        gradient += this->cells[i - 2][k]->getInputGradient(j);
                    }
                    this->cells[i - 3][j]->setGradient(gradient);
                }
            }
        }

        /**
         * 从输出到出入层计算此神经网络内部所有细胞权重的梯度
         *
         * @return void
         */
        void computeWeightGradients() {
            for (unsigned long i = this->layerNumber - 1; i > 0; i--) {
                for (unsigned long j = 0; j < this->layerCellInfo[i]; j++) {
                    this->cells[i - 1][j]->computeWeightGradients();
                }
            }
        }

        /**
         * 获取给定位置输入在此时的梯度
         *
         * @param unsigned long i 偏移
         * @return long double
         */
        long double getInputGradient(unsigned long i) {
            long double gradient = 0.0;
            if (i > this->layerCellInfo[0] - 1) {
                throw new Exception("The position of the InputGradient you want to get is out of range.");
            }
            for (unsigned long j = 0; j < this->layerCellInfo[1]; j++) {
                gradient += this->cells[0][j]->getInputGradient(i);
            }
            return gradient;
        }

        /**
         * 对计算出来的梯度累加起来
         *
         * @return void
         */
        void accumulativeGradients() {
            for (unsigned long i = this->layerNumber - 1; i > 0; i--) {
                for (unsigned long j = 0; j < this->layerCellInfo[i]; j++) {
                    this->cells[i - 1][j]->accumulativeGradients();
                }
            }
        }

        /**
         * 使用累加起来的梯度信息进行梯度下降
         *
         * 将会更新内部的权重和偏置。
         *
         * @param long double learningRate 更新速度
         * @return void
         */
        void fit(long double learningRate) {
            for (unsigned long i = this->layerNumber - 1; i > 0; i--) {
                for (unsigned long j = 0; j < this->layerCellInfo[i]; j++) {
                    this->cells[i - 1][j]->fit(learningRate);
                }
            }
        }

        /**
         * 清除累计的梯度
         *
         * @return void
         */
        void cleanAccumulativeGradients() {
            for (unsigned long i = this->layerNumber - 1; i > 0; i--) {
                for (unsigned long j = 0; j < this->layerCellInfo[i]; j++) {
                    this->cells[i - 1][j]->cleanAccumulativeGradients();
                }
            }
        }

        /**
         * 打印权重
         *
         * @return void
         */
        void printAllWeightsAndBias() {
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    this->cells[l][i]->printWeigthAndBias();
                }
            }
        }

        /**
         * 打印所有可训练参数
         *
         * 我会用逗号分隔的
         *
         * @return void
         */
        void printAllTrainableValue() {
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    for (unsigned long k = 0; k < this->cells[l][i]->getTrainableValueNumber(); k++) {
                        cout << this->cells[l][i]->getTrainableValue(k);
                        if (l != this->layerNumber - 2 || i != this->layerCellInfo[l + 1] - 1 || k != this->cells[l][i]->getTrainableValueNumber() - 1) {
                            cout << ",";
                        } else {
                            cout << endl;
                        }
                    }
                }
            }
        }

        /**
         * 打印所有已经累计的梯度
         *
         * 我会用逗号分隔的
         *
         * @return void
         */
        void printAllGradients() {
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    for (unsigned long k = 0; k < this->cells[l][i]->getTrainableValueNumber(); k++) {
                        cout << this->cells[l][i]->getGradient(k);
                        if (l != this->layerNumber - 2 || i != this->layerCellInfo[l + 1] - 1 || k != this->cells[l][i]->getTrainableValueNumber() - 1) {
                            cout << ",";
                        } else {
                            cout << endl;
                        }
                    }
                }
            }
        }

        /**
         * 返回所有可以训练的参数的数量
         *
         * @return unsigned long
         */
        unsigned long getTrainableValueNumber() {
            unsigned long sum = 0;
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    sum += this->cells[l][i]->getTrainableValueNumber();
                }
            }
            return sum;
        }

        /**
         * 设置给定位置的可训练参数的数值
         *
         * @param unsigned long offset
         * @param long double value
         * @return bool
         */
        bool setTrainableValue(unsigned long offset, long double value) {
            unsigned long pass = 0;
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    if (offset + 1 >= pass && offset < pass + this->cells[l][i]->getTrainableValueNumber()) {
                        this->cells[l][i]->setTrainableValue(offset - pass, value);
                        return true;
                    }
                    pass += this->cells[l][i]->getTrainableValueNumber();
                }
            }
            return false;
        }

        /**
         * 返回位置的可训练参数的数值
         *
         * @param unsigned long offset
         * @return long double
         */
        long double getTrainableValue(unsigned long offset) {
            unsigned long pass = 0;
            for (unsigned long l = 0; l < this->layerNumber - 1; l++) {
                for (unsigned long i = 0; i < this->layerCellInfo[l + 1]; i++) {
                    if (offset + 1 >= pass && offset < pass + this->cells[l][i]->getTrainableValueNumber()) {
                        return this->cells[l][i]->getTrainableValue(offset - pass);
                    }
                    pass += this->cells[l][i]->getTrainableValueNumber();
                }
            }
            throw new Exception("no value return!");
            return 0.0;
        }

        /**
         * 返回神经网络输入层的细胞数
         *
         * @return unsigned long
         */
        unsigned long getInputNumber() {
            return this->layerCellInfo[0];
        }

        /**
         * 返回神经网络输出层的细胞数
         *
         * @return unsigned long
         */
        unsigned long getOutputNumber() {
            return this->layerCellInfo[this->layerNumber - 1];
        }

    private:

        /**
         * 按照每层细胞结构保存的细胞对象
         *
         * @var Cell*** 这是一个保存Cell对象的二维数组
         */
        Cell*** cells;

        /**
         * 保存每层细胞数，含输入层
         *
         * @var unsigned long*
         */
        unsigned long* layerCellInfo;

        /**
         * 层数
         *
         * @var unsigned long
         */
        unsigned long layerNumber;

        /**
         * 二维数组，保存每层细胞的输入/输出值，含输入层
         *
         * @var long double**
         */
        long double** inputs;
    };

    /**
     * 序列处理的网络
     *
     * 使用示例
     *
     * unsigned long layers = 3;
     * unsigned long network[] = {2, 2, 2};
     * unsigned long lengthOfSequence = 3;
     * long double sequence[] = {0.5, 0.5, 0.6};
     * FCNNModel fcnn = FCNNModel(layers, network);
     * SequenceNetwork seqNet = SequenceNetwork(&fcnn, sequence, lengthOfSequence);
     * cout << "Error=" << seqNet.getError() << endl;
     */
    class SequenceNetwork {

    public:

        /**
         * 使用已知的FCNNModel对象构造递归网络
         *
         * @param FCNNModel* fcnn FCNNModel对象
         * @param long double* sequenceValues 序列数值
         * @param unsigned long lengthOfSequence 序列的长度
         */
        SequenceNetwork(FCNNModel* fcnn, long double* sequenceValues, unsigned long lengthOfSequence) {
            if (lengthOfSequence < 2) {
                throw new Exception("lengthOfSequence must > 1");
            }
            if (fcnn->getInputNumber() != fcnn->getOutputNumber()) {
                throw new Exception("Error in create SequenceNetwork, FCNNModel inputNumber!=outputNumber.");
            }
            this->sequenceValues = new long double[lengthOfSequence];
            for (unsigned long i = 0; i < lengthOfSequence; i++) {
                this->sequenceValues[i] = sequenceValues[i];
            }
            this->lengthOfSequence = lengthOfSequence;
            this->fcnn = fcnn;
        }

        /**
         * 销毁对象属性占据的内存
         */
        ~SequenceNetwork() {
            delete[] sequenceValues;
        }

        /**
         * 计算并返回神经网络算出来的序列跟给定序列之间的误差
         *
         * @return long double
         */
        long double getError() {
            long double difference;
            long double sum = 0;
            long double error = 0;
            unsigned long inputNumber = this->fcnn->getInputNumber();
            // 先计算第一次的，后面的可以循环算
            this->fcnn->setInput(0, this->sequenceValues[0]);
            for (unsigned long offset = 1; offset < inputNumber; offset++) {
                this->fcnn->setInput(offset, 0);
            }
            this->fcnn->calculateOutput();
            difference = this->fcnn->getOutput(0) - this->sequenceValues[1];
            sum += abs(difference);
            // 开始后面的计算
            for (unsigned long i = 2; i < this->lengthOfSequence; i++) {
                this->fcnn->setInput(0, this->sequenceValues[i - 1]);
                for (unsigned long offset = 0; offset < inputNumber; offset++) {
                    this->fcnn->setInput(offset, this->fcnn->getOutput(offset));
                }
                this->fcnn->calculateOutput();
                difference = this->fcnn->getOutput(0) - this->sequenceValues[i];
                sum += abs(difference);
            }
            error = sum / (long double)(this->lengthOfSequence - 1);
            return error;
        }

        /**
         * 向标准输出输出神经网络，在给定序列作为输入的情况下，每一步输出的情况。
         * 这是为了方便调试。
         *
         * @return void
         */
        void dump() {
            // TODO 和 getError() 存在重复代码，也许能合并到一个方法内
            cout << "Output:" << sequenceValues[0] << ",";

            auto inputNumber = fcnn->getInputNumber();
            fcnn->setInput(0, sequenceValues[0]);
            for (unsigned long offset = 1; offset < inputNumber; offset++) {
                fcnn->setInput(offset, 0);
            }
            fcnn->calculateOutput();
            cout << fcnn->getOutput(0);

            for (unsigned long i = 2; i < lengthOfSequence; i++) {
                fcnn->setInput(0, sequenceValues[i - 1]);
                for (unsigned long offset = 0; offset < inputNumber; offset++) {
                    fcnn->setInput(offset, fcnn->getOutput(offset));
                }
                fcnn->calculateOutput();
                cout << "," << fcnn->getOutput(0);
            }
            cout << endl;
        }

    private:

        /**
         * @var long double* 初始的序列，这是新申请的内存
         */
        long double* sequenceValues;

        /**
         * @var unsigned long 序列长度
         */
        unsigned long lengthOfSequence;

        /**
         * @var FCNNModel* 利用来处理序列的FCNN网络
         */
        FCNNModel* fcnn;
    };

}

#endif
