package enhanced.neural.network.controller;

import cn.hutool.core.io.FileUtil;
import cn.hutool.json.JSONUtil;
import enhanced.neural.network.model.ModelParameters;
import lombok.extern.slf4j.Slf4j;
import enhanced.neural.network.dto.CommonResponse;
import enhanced.neural.network.dto.ImageData;
import enhanced.neural.network.dto.PredictionResultDTO;
import enhanced.neural.network.dto.TrainingResultDTO;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.PostMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;

import javax.imageio.ImageIO;
import java.awt.image.BufferedImage;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

@Slf4j
@RequestMapping("/api/nn")
@RestController
public class EnhancedNeuralNetworkController {

    // 存储已训练好的模型
    private EnhancedNeuralNetwork trainedModel;
    // 移除冗余的数据标准化参数
    private double[] dataMean;
    private double[] dataStd;

    /**
     * 训练神经网络API
     */
    @PostMapping("/train")
    public CommonResponse<TrainingResultDTO> train(@RequestParam("datasetPath") String datasetPath) {
        try {
            // 1. 数据集路径配置
            String trainPath = datasetPath + File.separator + "training";
            String testPath = datasetPath + File.separator + "test";

            // 2. 加载和预处理图像数据
            List<ImageData> trainData = loadMNISTDataset(trainPath, "training");
            List<ImageData> testData = loadMNISTDataset(testPath, "test");

            if (trainData.isEmpty() || testData.isEmpty()) {
                return CommonResponse.error(HttpStatus.BAD_REQUEST.value() + "", "没有加载到足够的数据", HttpStatus.BAD_REQUEST.getReasonPhrase());
            }

            // 3. 从训练集中划分验证集 (90% 训练, 10% 验证)
            Collections.shuffle(trainData, new Random(48));
            int trainSize = (int) (trainData.size() * 0.9);
            int valSize = trainData.size() - trainSize;

            List<ImageData> trainSubset = trainData.subList(0, trainSize);
            List<ImageData> valData = trainData.subList(trainSize, trainData.size());

            // 4. 转换为神经网络需要的格式 (28x28 = 784 像素)
            double[][] X_train = new double[trainSubset.size()][784];
            int[] y_train = new int[trainSubset.size()];
            for (int i = 0; i < trainSubset.size(); i++) {
                X_train[i] = trainSubset.get(i).features;
                y_train[i] = trainSubset.get(i).label;
            }

            double[][] X_val = new double[valSize][784];
            int[] y_val = new int[valSize];
            for (int i = 0; i < valSize; i++) {
                X_val[i] = valData.get(i).features;
                y_val[i] = valData.get(i).label;
            }

            double[][] X_test = new double[testData.size()][784];
            int[] y_test = new int[testData.size()];
            for (int i = 0; i < testData.size(); i++) {
                X_test[i] = testData.get(i).features;
                y_test[i] = testData.get(i).label;
            }

            // 5. 数据标准化
            log.info("正在进行数据标准化...");
            dataMean = calculateMean(X_train);
            dataStd = calculateStd(X_train, dataMean);

            normalizeData(X_train, dataMean, dataStd);
            normalizeData(X_val, dataMean, dataStd);
            normalizeData(X_test, dataMean, dataStd);

            log.info("数据标准化完成");

            // 6. 创建并训练神经网络
            EnhancedNeuralNetwork nn = new EnhancedNeuralNetwork(784, 256, 128, 10);

            // 训练配置
            int maxEpochs = 25;
            int batchSize = 32;

            log.info("开始训练神经网络...");
            long startTime = System.currentTimeMillis();

            nn.train(X_train, y_train, X_val, y_val, maxEpochs, batchSize);

            long trainingTime = System.currentTimeMillis() - startTime;
            log.info("神经网络训练完成，耗时: {} ms", trainingTime);

            // 7. 评估模型性能
            double accuracy = nn.evaluateAccuracy(X_test, y_test);
            log.info("测试集准确率: {}%", accuracy * 100);

            // 保存为当前模型
            trainedModel = nn;

            // 8. 保存训练好的模型参数
            ModelParameters modelParams = nn.toModelParameters();
            String modelPath = "e:\\project\\enhanced-neural-network\\models\\mnist_model_" + System.currentTimeMillis() + ".txt";
            File file = FileUtil.writeUtf8String(JSONUtil.toJsonStr(modelParams), modelPath);
            log.info("模型参数已保存到: {}", file.getAbsolutePath());

            // 9. 构建训练结果DTO
            TrainingResultDTO result = new TrainingResultDTO();
            result.setModelPath(file.getAbsolutePath());
            result.setAccuracy(accuracy);
            result.setTrainingTime(trainingTime);
            result.setTrainSamples(trainSubset.size());
            result.setValSamples(valSize);
            result.setTestSamples(testData.size());
            result.setEpochs(maxEpochs);
            result.setBatchSize(batchSize);
            result.setNetworkStructure(nn.getInputSize() + " -> " + nn.getHiddenSize1() + " -> " + nn.getHiddenSize2() + " -> " + nn.getOutputSize());

            return CommonResponse.success(result);
        } catch (Exception e) {
            log.error("神经网络训练失败", e);
            return CommonResponse.error(HttpStatus.INTERNAL_SERVER_ERROR.value() + "", "训练失败: " + e.getMessage(), HttpStatus.INTERNAL_SERVER_ERROR.getReasonPhrase());
        }
    }

    /**
     * 图像预测API
     */
    @PostMapping("/predict")
    public CommonResponse<PredictionResultDTO> predict(@RequestParam("image") MultipartFile imageFile, @RequestParam(name = "modelPath", required = false) String modelFilePath) {
        try {
            // 加载模型
            if (trainedModel == null) {
                if (modelFilePath == null) {
                    return CommonResponse.error(HttpStatus.BAD_REQUEST.value() + "", "请提供训练好的模型文件路径", HttpStatus.BAD_REQUEST.getReasonPhrase());
                }
                if (imageFile == null) {
                    return CommonResponse.error(HttpStatus.BAD_REQUEST.value() + "", "请提供要测试的图像文件", HttpStatus.BAD_REQUEST.getReasonPhrase());
                }
                // 加载模型参数
                ModelParameters modelParams = JSONUtil.toBean(FileUtil.readUtf8String(modelFilePath), ModelParameters.class);
                trainedModel = EnhancedNeuralNetwork.fromModelParameters(modelParams);
                // 从模型参数中获取数据标准化参数
                dataMean = modelParams.getDataMean();
                dataStd = modelParams.getDataStd();
            }

            // 1. 读取和预处理图像
            BufferedImage img = ImageIO.read(imageFile.getInputStream());
            if (img == null) {
                return CommonResponse.error(HttpStatus.BAD_REQUEST.value() + "", "无法读取图像文件", HttpStatus.BAD_REQUEST.getReasonPhrase());
            }

            double[] features = preprocessImageOptimized(img, 28, 28, false); // 预测时禁用数据增强

            // 2. 应用数据标准化
            if (dataMean != null && dataStd != null) {
                for (int i = 0; i < features.length; i++) {
                    features[i] = (features[i] - dataMean[i]) / dataStd[i];
                }
            }

            // 3. 进行预测
            // 调用模型预测方法获取预测结果（0-9之间的数字）
            log.info("正在预测...");
            int prediction = trainedModel.predict(features);
            // 获取模型对所有类别的概率分布（长度为10的数组，分别对应0-9的概率）
            double[] probabilities = trainedModel.forward(features, false);
            log.info("预测概率分布: {}", Arrays.toString(probabilities));
            // 4. 构建预测结果DTO
            PredictionResultDTO result = new PredictionResultDTO();
            // 设置预测标签：模型判断的手写数字（0-9）
            result.setPredictedLabel(prediction);
            // 设置概率分布：包含每个数字(0-9)的预测概率，总和为1
            result.setProbabilities(probabilities);
            // 设置置信度：模型对预测结果的信任程度（预测数字的概率值）
            result.setConfidence(probabilities[prediction]);
            // 设置图片名称：输入预测的图片文件名
            result.setImageName(imageFile.getOriginalFilename());

            return CommonResponse.success(result);
        } catch (Exception e) {
            log.error("图像预测失败", e);
            return CommonResponse.error(HttpStatus.INTERNAL_SERVER_ERROR.value() + "", "预测失败: " + e.getMessage(), HttpStatus.INTERNAL_SERVER_ERROR.getReasonPhrase());
        }
    }

    // 从测试类迁移的数据加载和预处理方法
    private List<ImageData> loadMNISTDataset(String datasetPath, String datasetType) {
        log.info("正在加载 {} 数据集...", datasetType);
        long startTime = System.currentTimeMillis();

        File datasetDir = new File(datasetPath);

        if (!datasetDir.exists() || !datasetDir.isDirectory()) {
            log.error("目录不存在: {}", datasetDir.getAbsolutePath());
            return new ArrayList<>();
        }

        File[] imageFiles = datasetDir.listFiles((dir, name) ->
                name.toLowerCase().endsWith(".jpg") ||
                        name.toLowerCase().endsWith(".png"));

        if (imageFiles == null || imageFiles.length == 0) {
            log.error("目录中没有图片: {}", datasetDir.getAbsolutePath());
            return new ArrayList<>();
        }

        log.info("找到 {} 个图像文件，开始处理...", imageFiles.length);

        AtomicInteger processedCount = new AtomicInteger(0);
        List<ImageData> data = Arrays.stream(imageFiles)
                .parallel()
                .map(imgFile -> {
                    try {
                        // 解析文件名获取标签
                        String filename = imgFile.getName();
                        String[] parts = filename.split("_");

                        if (parts.length < 3) {
                            return null;
                        }

                        int label;
                        try {
                            String labelPart = parts[parts.length - 1];
                            labelPart = labelPart.substring(0, labelPart.lastIndexOf('.'));
                            label = Integer.parseInt(labelPart);
                        } catch (NumberFormatException e) {
                            return null;
                        }

                        BufferedImage img = ImageIO.read(imgFile);
                        if (img == null) {
                            return null;
                        }

                        double[] features = preprocessImageOptimized(img, 28, 28, true); // 训练时应用数据增强

                        int current = processedCount.incrementAndGet();
                        if (current % 1000 == 0) {
                            log.info("已处理 {}/{} 个文件", current, imageFiles.length);
                        }

                        return new ImageData(features, label, filename);
                    } catch (IOException e) {
                        log.warn("处理图像文件失败: {}", imgFile.getName(), e);
                        return null;
                    }
                })
                .filter(Objects::nonNull)
                .collect(Collectors.toList());

        long endTime = System.currentTimeMillis();
        log.info("已加载 {} 数据集: {} 个样本，耗时: {} ms", datasetType, data.size(), (endTime - startTime));

        return data;
    }

    private double[] preprocessImageOptimized(BufferedImage img, int width, int height, boolean applyAugmentation) {
        // 数据增强: 随机旋转(-15°到15°)
        Random random = new Random();
        double angle = (random.nextDouble() - 0.5) * 30; // -15°到15°随机角度
        // 添加随机缩放(0.9-1.1倍)
        double scale = 0.9 + random.nextDouble() * 0.2; // 0.9-1.1随机缩放因子
        int scaledWidth = (int) (width * scale);
        int scaledHeight = (int) (height * scale);
        int rotationWidth = (int) (width * 1.2);
        int rotationHeight = (int) (height * 1.2);
        BufferedImage rotatedImage = new BufferedImage(rotationWidth, rotationHeight, BufferedImage.TYPE_BYTE_GRAY);

        java.awt.Graphics2D g2d = rotatedImage.createGraphics();
        g2d.setRenderingHint(java.awt.RenderingHints.KEY_INTERPOLATION,
                java.awt.RenderingHints.VALUE_INTERPOLATION_BILINEAR);
        g2d.setRenderingHint(java.awt.RenderingHints.KEY_RENDERING,
                java.awt.RenderingHints.VALUE_RENDER_QUALITY);
        g2d.setRenderingHint(java.awt.RenderingHints.KEY_ANTIALIASING,
                java.awt.RenderingHints.VALUE_ANTIALIAS_ON);

        // 旋转图像并居中
        g2d.translate(rotationWidth / 2, rotationHeight / 2);
        // 添加随机剪切变换
        double shearX = (random.nextDouble() - 0.5) * 0.2; // -0.1到0.1的随机剪切因子
        double shearY = (random.nextDouble() - 0.5) * 0.2;
        g2d.shear(shearX, shearY);
        g2d.rotate(Math.toRadians(angle));
        // 添加随机平移(-2到2像素)
        int dx = random.nextInt(5) - 2; // -2到2像素随机平移
        int dy = random.nextInt(5) - 2;
        g2d.drawImage(img, -scaledWidth / 2 + dx, -scaledHeight / 2 + dy, scaledWidth, scaledHeight, null);
        g2d.dispose();

        // 裁剪回原始尺寸
        BufferedImage resizedImage = new BufferedImage(width, height, BufferedImage.TYPE_BYTE_GRAY);
        java.awt.Graphics2D cropG2d = resizedImage.createGraphics();
        int x = (rotationWidth - width) / 2;
        int y = (rotationHeight - height) / 2;
        cropG2d.drawImage(rotatedImage, 0, 0, width, height, x, y, x + width, y + height, null);
        cropG2d.dispose();

        // 转换为特征向量
        double[] features = new double[width * height];
        int[] pixels = new int[width * height];
        resizedImage.getRaster().getPixels(0, 0, width, height, pixels);

        for (int i = 0; i < pixels.length; i++) {
            features[i] = 1.0 - (pixels[i] / 255.0);
        }

        return features;
    }

    private double[] calculateMean(double[][] data) {
        int numFeatures = data[0].length;
        double[] mean = new double[numFeatures];
        for (int i = 0; i < numFeatures; i++) {
            double sum = 0;
            for (double[] datum : data) {
                sum += datum[i];
            }
            mean[i] = sum / data.length;
        }
        return mean;
    }

    private double[] calculateStd(double[][] data, double[] mean) {
        int numFeatures = data[0].length;
        double[] std = new double[numFeatures];
        for (int i = 0; i < numFeatures; i++) {
            double sumSquaredDiff = 0;
            for (double[] datum : data) {
                double diff = datum[i] - mean[i];
                sumSquaredDiff += diff * diff;
            }
            std[i] = Math.sqrt(sumSquaredDiff / data.length);
        }
        return std;
    }

    private void normalizeData(double[][] data, double[] mean, double[] std) {
        for (int i = 0; i < data.length; i++) {
            for (int j = 0; j < data[i].length; j++) {
                data[i][j] = (data[i][j] - mean[j]) / std[j];
            }
        }
    }
}
