package ink.mint.convolution;

import ink.mint.activation.Activateable;
import ink.mint.activation.ActivationFunction;
import ink.mint.activation.ActivationType;
import ink.mint.loss.LossFunction;
import ink.mint.loss.LossType;
import ink.mint.loss.Lossable;
import ink.mint.matrix.MatrixExtensions;
import ink.mint.neural.Layer;
import ink.mint.neural.NeuralNetwork;

import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;

public class ConvolutionNeuralNetwork implements Serializable {

    private static final long serialVersionUID = 1L;
    transient ConvolutionLayer convolutionLayer;
    List<ConvolutionLayer> convolutionLayerList;
    transient List<ConvolutionNeural> convolutionNeuralList;
    transient Activateable activateable;
    NeuralNetwork neuralNetwork;
    transient List<Layer> layerList;
    transient MatrixExtensions matrix;
    transient Lossable lossable;
    transient double[][][] input;
    transient double[][][] in = new double[1][28][28];
    transient double[] Y;
    transient double[][][] poolingMap;
    transient double[] convolutionNeuralNetworkOutputVector;
    transient double[] fullyConnectedLayerOutput;

    transient double learningRate;

    public ConvolutionNeuralNetwork() {
        convolutionLayerList = new ArrayList<>();
        matrix = new MatrixExtensions();
        neuralNetwork = new NeuralNetwork();
        layerList = new ArrayList<>();
        activateable = new ActivationFunction();
        lossable = new LossFunction();
    }

    public ConvolutionNeuralNetwork addConvolutionLayer(ConvolutionLayer convolutionLayer) {
        convolutionLayerList.add(convolutionLayer);
        return this;
    }

    public ConvolutionNeuralNetwork input(double[][][] input) {
        this.input = input;
        return this;
    }

    public ConvolutionNeuralNetwork setY(double[] Y) {
        this.Y = Y;
        neuralNetwork.setY(Y);
        return this;
    }

    public ConvolutionNeuralNetwork createConvolutionNeuralNetwork(boolean isTrain) {
        int convolutionLayerR = convolutionLayerList.size();
        for (int i = 0; i < convolutionLayerR; i++) {
            convolutionLayer = convolutionLayerList.get(i);
            if (i == 0) {
                convolutionLayer.input(in);
            } else {
                convolutionLayer.input(poolingMap);
            }
            convolutionLayer.convolution(learningRate, isTrain);
            convolutionLayer.activate();
            convolutionLayer.pooling();
            this.poolingMap = convolutionLayer.ConvolutionLayerOutput();
        }
        return this;
    }

    private double[][] convolutionNeuralNetworkOutput() {
        ConvolutionLayer endConvolutionLayer = convolutionLayerList.get(convolutionLayerList.size() - 1);
        int endConvolutionNeuralNumber = endConvolutionLayer.getConvolutionNeuralNumber();
        int poolingMapR = endConvolutionLayer.getConvolutionNeuralList().get(0).poolingMap.length;
        int poolingMapC = endConvolutionLayer.getConvolutionNeuralList().get(0).poolingMap[0].length;
        double[][] result = new double[endConvolutionNeuralNumber][poolingMapR * poolingMapC];
        for (int i = 0; i < endConvolutionNeuralNumber; i++) {
            double[][] endPoolingMap = endConvolutionLayer.getConvolutionNeuralList().get(i).getPoolingMap();
            result[i] = matrix.matrixToVector(endPoolingMap);
        }
        return result;
    }

    private ConvolutionNeuralNetwork convolutionNeuralNetworkOutputVector() {
        double[][] end = convolutionNeuralNetworkOutput();
        this.convolutionNeuralNetworkOutputVector = matrix.matrixToVector(end);
        neuralNetwork.setInput(convolutionNeuralNetworkOutputVector);
        return this;
    }

    public ConvolutionNeuralNetwork addFullyConnectedLayer(Layer layer) {
        neuralNetwork.addLayer(layer);
        return this;
    }

    public ConvolutionNeuralNetwork createFullyConnectedLayer(boolean isTrain) {
        convolutionNeuralNetworkOutputVector();
        neuralNetwork.setInput(convolutionNeuralNetworkOutputVector);
        neuralNetwork.createNetwork(isTrain);
        return this;
    }

    public ConvolutionNeuralNetwork setLossType(LossType lossType) {
        neuralNetwork.setLossType(lossType);
        return this;
    }

    transient int correct = 0;
    transient double correctRate;
    transient int mistake;

    public ConvolutionNeuralNetwork output(int index) {
        fullyConnectedLayerOutput = neuralNetwork.output();
        int maxIndex = matrix.maxVectorIndex(fullyConnectedLayerOutput);

        LossType lossType = neuralNetwork.getLossType();

        double lossValue = lossable.LossSolve(Y[index], maxIndex, lossType);

        if (maxIndex == Y[index]) {
            correct++;

        } else {
            mistake++;
        }
        correctRate = correct / (correct + mistake);
        System.out.println("第 " + index + " 个 " + "网络输出:  " + maxIndex + "  " + " 期望：" + Y[index] + " | 正确数： " + correct + " | 错误数： " + mistake + " | 损失: " + lossValue + " | 正确率： " + correctRate);

        return this;
    }

    public ConvolutionNeuralNetwork gradientDescent(int index) {
        layerList = neuralNetwork.getLayerList();
        int dimensionality = layerList.get(layerList.size() - 1).getNeuralNumber();
        neuralNetwork.setLearningRate(learningRate);
        neuralNetwork.expectations(matrix.oneHotEncoding((int) Y[index], dimensionality));
        neuralNetwork.lossFunction();
        neuralNetwork.output(index);
        neuralNetwork.gradientDescent();
        convolutionGradientDescent();
        return this;
    }

    public double[] getFullyConnectedLayerInputDelta() {
        Layer layer = layerList.get(0);
        double[][] deltaWeight = layer.getDeltaWeight();
        double[][] weight = layer.getWeight();
        double[][] delta = matrix.multi(matrix.transpose(weight), deltaWeight);
        double[] xLayer = layer.getInput();
        double[][] deltaE = matrix.multi(delta, matrix.vectorToColumnMatrix(xLayer));
        return matrix.matrixToVector(deltaE);
    }

    public ConvolutionNeuralNetwork upSampleDelta(int layerIndex) {

        convolutionLayer = convolutionLayerList.get(layerIndex);
        convolutionNeuralList = convolutionLayer.getConvolutionNeuralList();
        int cnl = convolutionNeuralList.size();
        double[][] featureMap;
        ActivationType activationType;
        double[][] derivationActivateMap;
        double[][] maxPoolingDelta;
        double[][] result;
        for (int i = 0; i < cnl; i++) {
            featureMap = convolutionNeuralList.get(i).getFeatureMap();
            activationType = convolutionNeuralList.get(i).getActivationType();
            derivationActivateMap = activateable.derivationActivationSolve(featureMap, activationType);
            maxPoolingDelta = convolutionNeuralList.get(i).getPoolingMapDelta();
            result = matrix.elementProduct(maxPoolingDelta, derivationActivateMap);
            convolutionNeuralList.get(i).setUpSampleDelta(result);
            convolutionNeuralList.get(i).setDerivationActivateMap(derivationActivateMap);
        }

        return this;
    }

    // 改进后的反卷积方法
    public ConvolutionNeuralNetwork deconvolution(int layerIndex) {
        ConvolutionLayer layer = convolutionLayerList.get(layerIndex);
        for (ConvolutionNeural neural : layer.getConvolutionNeuralList()) {
            double[][] delta = neural.getUpSampleDelta();
            double[][] rotKernel = matrix.rot180Matrix(neural.getKernel());

            // 使用原始卷积参数
            int stride = neural.getKernelStride();
            int padding = neural.getConvolutionPadding();

            // 正确计算目标尺寸
            int targetSize = (delta.length - 1)*stride + rotKernel.length - 2*padding;

            double[][] inputDelta = matrix.deconvolution(
                    delta,
                    rotKernel,
                    stride,
                    padding,
                    targetSize
            );

            neural.setInputDelta(inputDelta);
        }
        return this;
    }


    // ConvolutionNeuralNetwork.java 完善updateKernelGradient
    private void updateKernelGradient(ConvolutionNeural neural, double[][] delta) {
        double[][] input = neural.getInput();
        int stride = neural.getKernelStride();
        int padding = neural.getConvolutionPadding();

        // 计算核梯度
        double[][] kernelDelta = matrix.validConvolution(
                input,
                delta,
                stride,
                padding
        );

        // 计算偏置梯度
        double biasDelta = matrix.sum(delta);

        neural.setKernelDelta(kernelDelta);
        neural.setBiasDelta(biasDelta);
    }


//    public ConvolutionNeuralNetwork deconvolution(int layerIndex) {
//        convolutionLayer = convolutionLayerList.get(layerIndex);
//        convolutionNeuralList = convolutionLayer.getConvolutionNeuralList();
//        int cnl = convolutionNeuralList.size();
//        double[][] kernel;
//        double[][] derivationActivateMap;
//        double[][] upSampleDelta;
//        double[][] rot180Kernel;
//        double[][] input;
//        double[][] inputDelta;
//        double[][] featureMapDelta;
//        double[][] result;
//        double[][] kernelDelta;
//        double[][] rot180A;
//        double biasDelta;
//        for (int i = 0; i < cnl; i++) {
//
//
//            int convolutionPadding = convolutionNeuralList.get(i).convolutionPadding;
//            input = convolutionNeuralList.get(i).getInput();
//            kernel = convolutionNeuralList.get(i).getKernel();
//            derivationActivateMap = convolutionNeuralList.get(i).getDerivationActivateMap();
//            upSampleDelta = convolutionNeuralList.get(i).getUpSampleDelta();
//            rot180Kernel = matrix.rot180Matrix(kernel);
//
//            // ConvolutionNeuralNetwork.java 的deconvolution方法
////            inputDelta = matrix.featureMap(upSampleDelta, rot180Kernel, 1, kernel.length-1);
////            // 应使用正确的步长和填充参数：
//            int deconvStride = convolutionNeuralList.get(i).kernelStride; // ✅使用原步长
//            int deconvPadding = convolutionNeuralList.get(i).convolutionPadding; // ✅原填充
//            inputDelta = matrix.featureMap(input, rot180Kernel, deconvStride, deconvPadding);
//
//
//
////            inputDelta = matrix.featureMap(upSampleDelta, rot180Kernel, 1, kernel.length - 1);
//            rot180A = matrix.rot180Matrix(upSampleDelta);
//            featureMapDelta = matrix.elementProduct(matrix.addPaddingMatrix(inputDelta,deconvPadding,deconvPadding), derivationActivateMap);
//            kernelDelta = matrix.featureMap(rot180A, featureMapDelta, 1, convolutionPadding);
//            biasDelta = matrix.sum(featureMapDelta);
//            convolutionNeuralList.get(i).setKernelDelta(kernelDelta);
//            convolutionNeuralList.get(i).setDerivationActivateMapDelta(featureMapDelta);
//            convolutionNeuralList.get(i).setBiasDelta(biasDelta);
//        }
//        return this;
//    }

    private ConvolutionNeuralNetwork endMaxPoolingDelta() {
        double[][] fullyConnectedLayerInputDeltaToMatrix = fullyConnectedLayerInputDeltaToMatrix();
        int fr = fullyConnectedLayerInputDeltaToMatrix.length;
        for (int i = 0; i < fr; i++) {
            double[] fullyConnectedLayerInputDelta = fullyConnectedLayerInputDeltaToMatrix[i];
            convolutionLayer = convolutionLayerList.get(convolutionLayerList.size() - 1);
            convolutionNeuralList = convolutionLayer.getConvolutionNeuralList();
            int cnl = convolutionNeuralList.size();
            double[][] activateMap;
            int[][] maxIndex;
            double[][] result;
            for (int j = 0; j < cnl; j++) {
                activateMap = convolutionNeuralList.get(j).getActivateMap();
                maxIndex = convolutionNeuralList.get(j).getPoolingMaxIndex();
                result = setActivateMapDelta(activateMap, maxIndex, fullyConnectedLayerInputDelta);

                convolutionNeuralList.get(j).setPoolingMapDelta(result);
            }
        }
        return this;
    }

    private ConvolutionNeuralNetwork maxPoolingDelta(int layerIndex) {
        convolutionLayer = convolutionLayerList.get(layerIndex);
        convolutionNeuralList = convolutionLayer.getConvolutionNeuralList();
        int cnl = convolutionNeuralList.size();
        ConvolutionLayer nextConvolutionLayer;
        nextConvolutionLayer = convolutionLayerList.get(layerIndex + 1);
        List<ConvolutionNeural> nextConvolutionNeuralList = nextConvolutionLayer.getConvolutionNeuralList();
        int ncnl = nextConvolutionNeuralList.size();
        double[][] nextDerivationActivateMapDelta;
        double[][] result = new double[0][];

        // 初始化 temp 数组
        double[][][] temp = new double[cnl][][];


        int count = 0;
        count = ncnl / cnl;
        int runk = ncnl % cnl > 0 ? count + 1 : count;
        int index = 0;
        double[][] activateMap;

        int[][] maxIndex;
        double[][] kernel = nextConvolutionNeuralList.get(0).getKernel();
        for (int i = 0; i < runk; i++) {
            for (int j = 0; j < cnl; j++) {
                if (index >= ncnl) {
                    break;
                }
                nextDerivationActivateMapDelta = nextConvolutionNeuralList.get(index++).getDerivationActivateMap();
                activateMap = convolutionNeuralList.get(j).getActivateMap();
                for (int k = 0; k < cnl; k++) {
                    temp[k] = new double[activateMap.length][activateMap[0].length]; // 根据实际维度初始化
                }
                int poolingPadding = convolutionNeuralList.get(j).poolingPadding;
                maxIndex = convolutionNeuralList.get(j).getPoolingMaxIndex();
                result = setActivateMapDelta(activateMap, maxIndex, nextDerivationActivateMapDelta, poolingPadding);
                if (i == 0) {
                    temp[j] = result;
                } else {
                    for (int k = 0; k < result.length; k++) {
                        for (int v = 0; v < result[0].length; v++) {
                            temp[j][k][v] = result[k][v] + temp[j][k][v];
                        }
                    }
                }
            }
        }
        for (int j = 0; j < cnl; j++) {
            convolutionNeuralList.get(j).setPoolingMapDelta(temp[j]);
        }
        return this;
    }



    private double[][] setActivateMapDelta(double[][] activateMap, int[][] maxIndexDelta, double[] fullyConnectedLayerInputDelta) {
        int ar = activateMap.length;
        int ac = activateMap[0].length;
        int maxR = maxIndexDelta.length;
        double[][] result = new double[ar][ac];
        int indexR = 0;
        int indexC = 0;
        for (int i = 0; i < maxR; i++) {
            indexR = maxIndexDelta[i][0];
            indexC = maxIndexDelta[i][1];
            result[indexR][indexC] = fullyConnectedLayerInputDelta[i];
        }
        return result;
    }

//    private double[][] setActivateMapDelta(double[][] activateMap, int[][] maxIndexDelta, double[][] inputDelta, int poolingPadding) {
//        int ar = activateMap.length;
//        int ac = activateMap[0].length;
//        int maxR = maxIndexDelta.length;
//        inputDelta = matrix.addPaddingMatrix(inputDelta, poolingPadding, poolingPadding);
//        double[][] result = new double[ar][ac];
//        int indexR = 0;
//        int indexC = 0;
//        int inR = inputDelta.length;
//        int inC = inputDelta[0].length;
//        int index = 0;
//        for (int j = 0; j < inR; j++) {
//            for (int k = 0; k < inC; k++) {
//                indexR = maxIndexDelta[index][0];
//                indexC = maxIndexDelta[index][1];
//                result[indexR][indexC] = inputDelta[j][k];
//                index++;
//            }
//        }
//        return result;
//    }

    // ConvolutionNeuralNetwork.java 改进池化反向传播
    private double[][] setActivateMapDelta(double[][] activateMap,
                                           int[][] maxIndexDelta,
                                           double[][] inputDelta,
                                           int poolingPadding) {
        int ar = activateMap.length;
        int ac = activateMap[0].length;
        double[][] result = new double[ar][ac]; // 按激活图尺寸初始化

        int index = 0;
        for (int j = 0; j < inputDelta.length; j++) {
            for (int k = 0; k < inputDelta[0].length; k++) {
                if (index >= maxIndexDelta.length) break;

                int x = maxIndexDelta[index][0];
                int y = maxIndexDelta[index][1];

                // 边界保护
                if (x < ar && y < ac) {
                    result[x][y] += inputDelta[j][k];
                }
                index++;
            }
        }
        return result;
    }



    public ConvolutionNeuralNetwork convolutionGradientDescent() {
        endMaxPoolingDelta();
        int cll = convolutionLayerList.size() - 1;
        for (int i = cll; i >= 0; i--) {
            if (i < cll) {
                maxPoolingDelta(i);
            }
            upSampleDelta(i);
            deconvolution(i);
        }
        return this;
    }

    public double[][] fullyConnectedLayerInputDeltaToMatrix() {
        double[] fullyConnectedLayerInputDelta = getFullyConnectedLayerInputDelta();
        convolutionLayer = convolutionLayerList.get(convolutionLayerList.size() - 1);
        int convolutionNeuralNumber = convolutionLayer.getConvolutionNeuralNumber();
        double[][][] convolutionLayerOutput = convolutionLayer.ConvolutionLayerOutput();
        int cr = convolutionLayerOutput[0].length;
        return matrix.vectorDismember(fullyConnectedLayerInputDelta, convolutionNeuralNumber, cr);
    }

    int yr;

    public ConvolutionNeuralNetwork train() {
        yr = Y.length;
        for (int i = 0; i < 2; i++) {
            for (int j = 0; j < yr; j++) {
                this.in[0] = input[j];
                createConvolutionNeuralNetwork(true);
                createFullyConnectedLayer(true);
                gradientDescent(j);
            }
        }
        return this;
    }

    public void run() {
        yr = Y.length;
        for (int i = 0; i < yr; i++) {
            this.in[0] = input[i];
            createConvolutionNeuralNetwork(false);
            createFullyConnectedLayer(false);
            neuralNetwork.output(i);
        }
    }

    public ConvolutionLayer getConvolutionLayer() {
        return convolutionLayer;
    }

    public void setConvolutionLayer(ConvolutionLayer convolutionLayer) {
        this.convolutionLayer = convolutionLayer;
    }

    public List<ConvolutionLayer> getConvolutionLayerList() {
        return convolutionLayerList;
    }

    public void setConvolutionLayerList(List<ConvolutionLayer> convolutionLayerList) {
        this.convolutionLayerList = convolutionLayerList;
    }

    public List<ConvolutionNeural> getConvolutionNeuralList() {
        return convolutionNeuralList;
    }

    public void setConvolutionNeuralList(List<ConvolutionNeural> convolutionNeuralList) {
        this.convolutionNeuralList = convolutionNeuralList;
    }

    public Activateable getActivateable() {
        return activateable;
    }

    public void setActivateable(Activateable activateable) {
        this.activateable = activateable;
    }

    public NeuralNetwork getNeuralNetwork() {
        return neuralNetwork;
    }

    public void setNeuralNetwork(NeuralNetwork neuralNetwork) {
        this.neuralNetwork = neuralNetwork;
    }

    public List<Layer> getLayerList() {
        return layerList;
    }

    public void setLayerList(List<Layer> layerList) {
        this.layerList = layerList;
    }

    public MatrixExtensions getMatrix() {
        return matrix;
    }

    public void setMatrix(MatrixExtensions matrix) {
        this.matrix = matrix;
    }

    public Lossable getLossable() {
        return lossable;
    }

    public void setLossable(Lossable lossable) {
        this.lossable = lossable;
    }

    public double getLearningRate() {
        return learningRate;
    }

    public void setLearningRate(double learningRate) {
        neuralNetwork.setLearningRate(learningRate);
        this.learningRate = learningRate;
    }
}
