package org.dxy.trigger;

import javax.swing.Timer;
import javax.swing.*;
import java.awt.*;
import java.awt.event.*;
import java.util.*;
import java.util.List;

import org.deeplearning4j.nn.conf.MultiLayerConfiguration;
import org.deeplearning4j.nn.conf.NeuralNetConfiguration;
import org.deeplearning4j.nn.conf.layers.DenseLayer;
import org.deeplearning4j.nn.conf.layers.OutputLayer;
import org.deeplearning4j.nn.multilayer.MultiLayerNetwork;
import org.nd4j.linalg.activations.Activation;
import org.nd4j.linalg.api.ndarray.INDArray;
import org.nd4j.linalg.factory.Nd4j;
import org.nd4j.linalg.factory.Nd4jBackend;
import org.nd4j.linalg.learning.config.Adam;
import org.nd4j.linalg.lossfunctions.LossFunctions;

/**
 * 基于深度Q学习的智能贪吃蛇游戏
 * 
 * 功能特性：
 * - 使用深度Q网络(DQN)进行强化学习训练
 * - 支持GPU/CPU自动后端选择
 * - 智能奖励机制，鼓励蛇变得更长
 * - 实时显示训练状态和性能指标
 * 
 * @author dxy
 * @createTime 2025/7/18 23:17
 */
public class SnakeGameWithDQN extends JPanel implements ActionListener {
    
    // ==================== 游戏基础配置 ====================
    private final int WIDTH = 600;                    // 游戏窗口宽度
    private final int HEIGHT = 600;                   // 游戏窗口高度
    private final int UNIT_SIZE = 20;                 // 每个游戏单元的像素大小
    private final int GAME_UNITS = (WIDTH * HEIGHT) / (UNIT_SIZE * UNIT_SIZE); // 总游戏单元数
    private final int DELAY = 80;                     // 游戏刷新间隔(毫秒)，稍快以获得更多训练数据
    
    // ==================== 游戏状态变量 ====================
    private final ArrayList<Integer> snakeX = new ArrayList<>();  // 蛇身X坐标列表
    private final ArrayList<Integer> snakeY = new ArrayList<>();  // 蛇身Y坐标列表
    private int foodX;                                // 食物X坐标
    private int foodY;                                // 食物Y坐标
    private int score;                                // 当前得分
    private char direction = 'R';                     // 当前移动方向 (U上/D下/L左/R右)
    private boolean running = false;                  // 游戏是否正在运行
    private boolean foodEaten = false;                // 是否刚吃到食物
    private Timer timer;                              // 游戏循环定时器
    private Random random;                            // 随机数生成器
    
    // ==================== DQN深度学习组件 ====================
    private MultiLayerNetwork model;                  // 深度神经网络模型
    private Deque<Transition> replayMemory = new LinkedList<>(); // 经验回放缓冲区
    private final int REPLAY_MEMORY_SIZE = 20000;      // 经验回放缓冲区大小(GPU模式更大)
    private final double GAMMA = 0.95;                // 折扣因子，用于计算未来奖励的权重
    private double epsilon = 0.9;                     // ε-贪婪策略的探索率，初始值较低
    private final double EPSILON_MIN = 0.05;          // 探索率的最小值，更多利用学到的策略
    private final double EPSILON_DECAY = 0.998;       // 探索率衰减速度，较慢的衰减
    private final int BATCH_SIZE = 128;               // 训练批次大小(GPU模式更大以提高效率)
    
    // ==================== 训练统计变量 ====================
    private int gameCount = 0;                        // 游戏局数计数器
    private double totalReward = 0.0;                 // 当前局总奖励
    private final Deque<Double> recentRewards = new LinkedList<>(); // 最近几局的奖励记录
    private final int REWARD_WINDOW = 20;             // 奖励统计窗口大小
    private final double DISTANCE_REWARD_SCALE = 3.0; // 距离奖励缩放因子，平衡奖励机制
    private Deque<Character> recentDirections = new LinkedList<>(); // 最近的移动方向记录
    private int consecutiveFoodMoves = 0;             // 连续向食物移动的步数
    private int maxScore = 0;                         // 历史最高得分

    /**
     * 构造函数 - 初始化游戏界面和AI组件
     */
    public SnakeGameWithDQN() {
        // 设置游戏窗口属性
        setPreferredSize(new Dimension(WIDTH, HEIGHT));
        setBackground(Color.black);
        setFocusable(true);
        addKeyListener(new MyKeyAdapter());
        
        // 检测并配置ND4J后端(GPU/CPU)
        detectAndConfigureBackend();
        
        // 初始化深度Q网络
        initializeDQN();
        
        // 开始第一局游戏
        startGame();
    }
    
    /**
     * 检测并配置ND4J计算后端(GPU/CPU)
     * 自动检测系统中可用的计算资源，优先使用GPU加速训练
     */
    private void detectAndConfigureBackend() {
        try {
            // 获取当前ND4J后端信息
            Nd4jBackend backend = Nd4j.getBackend();
            String backendName = backend.getClass().getSimpleName();
            System.out.println("🔧 ND4J计算后端检测: " + backendName);
            
            // 判断是否为GPU后端（CUDA或其他GPU加速）
            boolean isGPUBackend = backendName.toLowerCase().contains("cuda") || 
                                 backendName.toLowerCase().contains("gpu");
            
            if (isGPUBackend) {
                System.out.println("✅ 成功检测到GPU计算后端");
                try {
                    // 获取可用GPU设备数量
                    int deviceCount = Nd4j.getAffinityManager().getNumberOfDevices();
                    System.out.println("📊 系统可用GPU设备数量: " + deviceCount);
                    
                    if (deviceCount > 0) {
                        System.out.println("🚀 GPU加速深度学习训练已启用");
                        System.out.println("⚡ 预期训练速度将显著提升");
                        
                        // 尝试获取当前线程绑定的GPU设备信息
                        try {
                            Integer currentDevice = Nd4j.getAffinityManager().getDeviceForCurrentThread();
                            System.out.println("💻 当前训练线程使用GPU设备ID: " + currentDevice);
                        } catch (Exception deviceEx) {
                            System.out.println("💻 使用系统默认GPU设备配置");
                        }
                    } else {
                        System.out.println("⚠️ GPU后端已加载但检测不到可用设备");
                        System.out.println("🔍 请检查CUDA驱动和GPU硬件状态");
                    }
                } catch (Exception deviceEx) {
                    System.out.println("⚠️ 无法获取GPU设备详细信息: " + deviceEx.getMessage());
                    System.out.println("🔄 将继续使用GPU后端的默认配置");
                }
            } else {
                System.out.println("ℹ️ CPU计算后端已激活");
                System.out.println("💡 如需GPU加速，请安装CUDA和相应的ND4J-CUDA依赖");
            }
            
        } catch (Exception e) {
            System.err.println("❌ 计算后端检测过程发生异常: " + e.getMessage());
            System.err.println("🔄 系统将继续使用默认后端配置...");
            e.printStackTrace();
        }
    }

    /**
     * 初始化深度Q网络(DQN)模型
     * 根据可用计算资源自动选择最优的网络架构和超参数
     */
    private void initializeDQN() {
        try {
            System.out.println("🧠 开始构建深度Q网络(DQN)神经网络模型...");
            
            // 构建GPU优化的深度神经网络配置
            // 使用更深的网络结构以提高学习能力
            MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
                    .seed(123)                          // 设置随机种子，确保训练结果可重现
                    .updater(new Adam(0.001))           // Adam优化器，GPU模式使用较高学习率加速收敛
                    .list()
                    // 第一层：输入层到第一隐藏层 (16个游戏状态特征 -> 256个神经元)
                    // 16个输入特征包括：危险检测、方向信息、食物位置、距离等
                    .layer(new DenseLayer.Builder()
                            .nIn(16).nOut(256)
                            .activation(Activation.RELU)    // ReLU激活函数，避免梯度消失
                            .build())
                    // 第二层：深度特征提取层 (256 -> 256个神经元)
                    // 学习更复杂的游戏策略模式
                    .layer(new DenseLayer.Builder()
                            .nIn(256).nOut(256)
                            .activation(Activation.RELU)
                            .build())
                    // 第三层：策略整合层 (256 -> 128个神经元)
                    // 将复杂特征整合为决策相关的表示
                    .layer(new DenseLayer.Builder()
                            .nIn(256).nOut(128)
                            .activation(Activation.RELU)
                            .build())
                    // 输出层：Q值预测层 (128 -> 4个动作Q值)
                    // 输出4个动作的Q值：上(0)、下(1)、左(2)、右(3)
                    .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                            .nIn(128).nOut(4)
                            .activation(Activation.IDENTITY)  // 线性激活，直接输出Q值
                            .build())
                    .build();
            
            // 创建多层神经网络并初始化权重
            model = new MultiLayerNetwork(conf);
            model.init();
            
            System.out.println("✅ GPU优化DQN模型构建成功");
            System.out.println("📊 神经网络总参数数量: " + model.numParams());
            System.out.println("🏗️ 网络架构: 16 -> 256 -> 256 -> 128 -> 4");
            System.out.println("⚙️ 优化器: Adam (学习率: 0.001)");
            
        } catch (Exception e) {
            System.err.println("❌ GPU优化DQN模型初始化失败: " + e.getMessage());
            System.err.println("🔍 错误详情: ");
            e.printStackTrace();
            
            // 自动回退到CPU优化的轻量级网络架构
            System.out.println("🔄 正在切换到CPU优化的轻量级DQN模型...");
            try {
                MultiLayerConfiguration cpuConf = new NeuralNetConfiguration.Builder()
                        .seed(123)
                        .updater(new Adam(0.0005))      // CPU模式使用较低学习率，避免过拟合
                        .list()
                        // CPU优化的较小网络结构，减少计算负担
                        .layer(new DenseLayer.Builder()
                                .nIn(16).nOut(128)
                                .activation(Activation.RELU)
                                .build())
                        .layer(new DenseLayer.Builder()
                                .nIn(128).nOut(128)
                                .activation(Activation.RELU)
                                .build())
                        .layer(new DenseLayer.Builder()
                                .nIn(128).nOut(64)
                                .activation(Activation.RELU)
                                .build())
                        .layer(new OutputLayer.Builder(LossFunctions.LossFunction.MSE)
                                .nIn(64).nOut(4)
                                .activation(Activation.IDENTITY)
                                .build())
                        .build();
                        
                model = new MultiLayerNetwork(cpuConf);
                model.init();
                
                System.out.println("✅ CPU优化DQN模型构建成功");
                System.out.println("📊 轻量级网络总参数数量: " + model.numParams());
                System.out.println("🏗️ 网络架构: 16 -> 128 -> 128 -> 64 -> 4");
                System.out.println("⚙️ 优化器: Adam (学习率: 0.0005)");
                
            } catch (Exception cpuEx) {
                System.err.println("❌ CPU回退模型也初始化失败: " + cpuEx.getMessage());
                throw new RuntimeException("无法初始化任何DQN模型", cpuEx);
            }
        }
    }

    /**
     * 开始新的游戏局
     * 重置游戏状态，随机化初始条件以避免AI过拟合
     */
    public void startGame() {
        // 清空蛇身坐标列表
        snakeX.clear();
        snakeY.clear();
        
        // 随机生成起始位置，避免AI对固定起始位置过拟合
        // 确保起始位置不在边缘，留出安全空间
        int startX = (random != null ? random.nextInt(10) + 5 : 5) * UNIT_SIZE;
        int startY = (random != null ? random.nextInt(10) + 5 : 5) * UNIT_SIZE;
        snakeX.add(startX);
        snakeY.add(startY);
        
        // 重置游戏基础状态
        score = 0;                          // 当前局得分清零
        running = true;                     // 游戏运行状态
        foodEaten = false;                  // 食物消费状态
        
        // 随机选择初始移动方向，增加训练多样性
        char[] directions = {'U', 'D', 'L', 'R'};
        direction = directions[random != null ? random.nextInt(4) : 0];
        
        // 重置AI相关状态
        recentDirections.clear();           // 清空最近方向记录
        consecutiveFoodMoves = 0;           // 重置连续向食物移动计数
        
        // 初始化随机数生成器（如果尚未初始化）
        if (random == null) {
            random = new Random();
        }
        
        // 生成第一个食物
        spawnFood();
        
        // 重启游戏循环定时器
        if (timer != null) {
            timer.stop();
        }
        timer = new Timer(DELAY, this);
        timer.start();
        
        // 更新训练统计信息
        gameCount++;                        // 游戏局数递增
        
        // 记录上一局的总奖励到历史记录中
        if (gameCount > 1) {  // 第一局没有上一局奖励
            recentRewards.add(totalReward);
            if (recentRewards.size() > REWARD_WINDOW) {
                recentRewards.removeFirst();
            }
        }
        
        totalReward = 0.0;                  // 重置当前局总奖励
        
        // 输出游戏开始信息和训练状态
        System.out.println("🎮 开始第" + gameCount + "局游戏");
        System.out.println("📍 起始位置: (" + startX + ", " + startY + "), 初始方向: " + direction);
        System.out.println("📊 训练状态 - 探索率: " + String.format("%.3f", epsilon) + 
                          ", 历史最高分: " + maxScore);
        if (gameCount > 1) {
            System.out.println("🏆 上局表现 - 得分: " + score + ", 总奖励: " + String.format("%.2f", totalReward));
        }
    }

    /**
     * 在游戏区域内生成新的食物
     * 确保食物不会生成在蛇身上，提供有效的游戏目标
     */
    public void spawnFood() {
        boolean validPosition;
        int attempts = 0;
        final int MAX_ATTEMPTS = 100;  // 防止无限循环的最大尝试次数
        
        do {
            validPosition = true;
            attempts++;
            
            // 在游戏区域内随机生成食物坐标
            // 坐标必须是UNIT_SIZE的整数倍，确保与网格对齐
            foodX = random.nextInt(WIDTH / UNIT_SIZE) * UNIT_SIZE;
            foodY = random.nextInt(HEIGHT / UNIT_SIZE) * UNIT_SIZE;
            
            // 验证坐标是否在有效范围内（理论上应该总是有效的）
            if (foodX < 0 || foodX >= WIDTH || foodY < 0 || foodY >= HEIGHT) {
                validPosition = false;
                System.out.println("⚠️ 食物坐标超出边界，重新生成...");
                continue;
            }
            
            // 检查食物是否与蛇身任何部分重叠
            for (int i = 0; i < snakeX.size(); i++) {
                if (foodX == snakeX.get(i) && foodY == snakeY.get(i)) {
                    validPosition = false;
                    break;
                }
            }
            
            // 防止在蛇身过长时陷入无限循环
            if (attempts >= MAX_ATTEMPTS) {
                System.out.println("⚠️ 食物生成尝试次数过多，使用当前位置");
                break;
            }
            
        } while (!validPosition);
        
        // 输出食物生成信息
        System.out.println("🍎 新食物已生成");
        System.out.println("📍 食物坐标: (" + foodX + ", " + foodY + ")");
        System.out.println("🎯 生成尝试次数: " + attempts);
        
        // 计算食物与蛇头的初始距离，用于奖励计算参考
        if (!snakeX.isEmpty()) {
            double initialDistance = Math.abs(snakeX.get(0) - foodX) + Math.abs(snakeY.get(0) - foodY);
            System.out.println("📏 蛇头到食物初始距离: " + String.format("%.1f", initialDistance));
        }
    }

    @Override
    public void paintComponent(Graphics g) {
        super.paintComponent(g);
        draw(g);
    }

    /**
     * 绘制游戏界面和训练状态信息
     * 包括蛇身、食物、得分、AI训练指标等
     */
    public void draw(Graphics g) {
        // 绘制食物 - 红色圆形，更醒目
        g.setColor(Color.red);
        g.fillOval(foodX, foodY, UNIT_SIZE, UNIT_SIZE);
        
        // 添加食物的高亮边框
        g.setColor(Color.orange);
        g.drawOval(foodX - 1, foodY - 1, UNIT_SIZE + 2, UNIT_SIZE + 2);

        // 绘制蛇身 - 蛇头用亮绿色，身体用深绿色
        for (int i = 0; i < snakeX.size(); i++) {
            if (i == 0) {
                // 蛇头 - 亮绿色，表示当前位置
                g.setColor(Color.green);
            } else {
                // 蛇身 - 深绿色，根据距离蛇头的远近调整颜色深度
                int colorIntensity = Math.max(50, 150 - i * 5);  // 越远越暗
                g.setColor(new Color(0, colorIntensity, 0));
            }
            g.fillRect(snakeX.get(i), snakeY.get(i), UNIT_SIZE, UNIT_SIZE);
            
            // 为蛇身添加边框，增强视觉效果
            g.setColor(Color.darkGray);
            g.drawRect(snakeX.get(i), snakeY.get(i), UNIT_SIZE, UNIT_SIZE);
        }

        // 绘制游戏状态和训练指标信息
        g.setColor(Color.white);
        g.setFont(new Font("微软雅黑", Font.BOLD, 16));
        
        // 第一行：基础游戏信息
        g.drawString("当前得分: " + score + " | 最高分: " + maxScore + " | 游戏局数: " + gameCount, 10, 25);
        
        // 第二行：蛇身和AI探索状态
        g.drawString("蛇身长度: " + snakeX.size() + " | 探索率: " + String.format("%.3f", epsilon), 10, 50);
        
        // 第三行：奖励系统指标
        double avgReward = recentRewards.isEmpty() ? 0.0 : 
                          recentRewards.stream().mapToDouble(Double::doubleValue).average().getAsDouble();
        g.drawString("平均奖励: " + String.format("%.2f", avgReward) + " | 当前局奖励: " + String.format("%.2f", totalReward), 10, 75);
        
        // 第四行：环境感知信息
        g.drawString("周围空间: " + getFreeSpaceCount() + "/4 | 安全路径: " + 
                    (snakeX.size() > 10 ? (hasSafePath() ? "存在" : "无") : "检测中"), 10, 100);
        
        // 第五行：移动状态信息
        g.drawString("当前方向: " + getDirectionName(direction) + " | 连续向食物移动: " + consecutiveFoodMoves + "步", 10, 125);
        
        // 计算并显示到食物的距离
        if (!snakeX.isEmpty()) {
            double distanceToFood = Math.abs(snakeX.get(0) - foodX) + Math.abs(snakeY.get(0) - foodY);
            g.drawString("到食物距离: " + String.format("%.1f", distanceToFood) + " | 经验池: " + replayMemory.size() + "/" + REPLAY_MEMORY_SIZE, 10, 150);
        }
        
        // 计算后端和模型信息显示
        try {
            String backend = Nd4j.getBackend().getClass().getSimpleName();
            boolean isGPU = backend.toLowerCase().contains("cuda") || backend.toLowerCase().contains("gpu");
            
            // 根据后端类型设置颜色
            g.setColor(isGPU ? Color.GREEN : Color.YELLOW);
            
            int deviceCount = 0;
            try {
                deviceCount = Nd4j.getAffinityManager().getNumberOfDevices();
            } catch (Exception ex) {
                deviceCount = isGPU ? 1 : 0; // 如果是GPU后端但无法获取设备数，假设有1个
            }
            
            g.drawString("计算后端: " + (isGPU ? "GPU加速" : "CPU计算") + " | 设备数: " + deviceCount, 10, 175);
            
            // 显示神经网络模型信息
            if (model != null && model.numParams() > 0) {
                g.setColor(Color.CYAN);
                String deviceInfo = "未知";
                try {
                    if (model.params() != null && model.params().data() != null) {
                        // 尝试获取模型参数所在设备信息
                        deviceInfo = isGPU ? "GPU" : "CPU";
                    }
                } catch (Exception ex) {
                    deviceInfo = isGPU ? "GPU" : "CPU";
                }
                g.drawString("DQN模型: " + deviceInfo + "设备 | 参数量: " + String.format("%,d", model.numParams()), 10, 200);
            }
            
        } catch (Exception e) {
            // 如果获取后端信息失败，显示错误信息
            g.setColor(Color.RED);
            g.drawString("后端状态: " + e.getMessage(), 10, 175);
        }
        
        // 添加操作提示
        g.setColor(Color.LIGHT_GRAY);
        g.setFont(new Font("微软雅黑", Font.PLAIN, 12));
        g.drawString("操作提示: 空格键/R键重新开始游戏", 10, HEIGHT - 20);
    }
    
    /**
     * 获取方向的中文名称
     */
    private String getDirectionName(char dir) {
        switch (dir) {
            case 'U': return "上";
            case 'D': return "下";
            case 'L': return "左";
            case 'R': return "右";
            default: return "未知";
        }
    }

    public void move() {
        if (foodEaten) {
            snakeX.add(snakeX.get(snakeX.size() - 1));
            snakeY.add(snakeY.get(snakeY.size() - 1));
            foodEaten = false;
        }

        for (int i = snakeX.size() - 1; i > 0; i--) {
            snakeX.set(i, snakeX.get(i - 1));
            snakeY.set(i, snakeY.get(i - 1));
        }

        switch (direction) {
            case 'U':
                snakeY.set(0, snakeY.get(0) - UNIT_SIZE);
                break;
            case 'D':
                snakeY.set(0, snakeY.get(0) + UNIT_SIZE);
                break;
            case 'L':
                snakeX.set(0, snakeX.get(0) - UNIT_SIZE);
                break;
            case 'R':
                snakeX.set(0, snakeX.get(0) + UNIT_SIZE);
                break;
        }
    }

    public double[] getState() {
        double[] state = new double[16];
        int headX = snakeX.get(0);
        int headY = snakeY.get(0);

        // Danger detection in 4 directions
        state[0] = isDanger(headX, headY - UNIT_SIZE) ? 1 : 0; // Up
        state[1] = isDanger(headX, headY + UNIT_SIZE) ? 1 : 0; // Down
        state[2] = isDanger(headX - UNIT_SIZE, headY) ? 1 : 0; // Left
        state[3] = isDanger(headX + UNIT_SIZE, headY) ? 1 : 0; // Right

        // Current direction
        state[4] = direction == 'U' ? 1 : 0;
        state[5] = direction == 'D' ? 1 : 0;
        state[6] = direction == 'L' ? 1 : 0;
        state[7] = direction == 'R' ? 1 : 0;

        // Food direction (relative to head)
        state[8] = foodX < headX ? 1 : 0; // Food is left
        state[9] = foodX > headX ? 1 : 0; // Food is right
        state[10] = foodY < headY ? 1 : 0; // Food is up
        state[11] = foodY > headY ? 1 : 0; // Food is down

        // Distance to food (normalized)
        double distance = Math.abs(headX - foodX) + Math.abs(headY - foodY);
        state[12] = distance / (WIDTH + HEIGHT); // Normalized distance

        // Snake length (normalized)
        state[13] = (double) snakeX.size() / GAME_UNITS;

        // Free space around head
        state[14] = (double) getFreeSpaceCount() / 4.0;

        // Distance to walls (normalized)
        double minWallDistance = Math.min(Math.min(headX, WIDTH - headX), Math.min(headY, HEIGHT - headY));
        state[15] = minWallDistance / (Math.max(WIDTH, HEIGHT) / 2.0);

        return state;
    }

    private boolean isDanger(int x, int y) {
        if (x < 0 || x >= WIDTH || y < 0 || y >= HEIGHT) {
            return true;
        }
        for (int i = 1; i < snakeX.size(); i++) {
            if (x == snakeX.get(i) && y == snakeY.get(i)) {
                return true;
            }
        }
        return false;
    }

    public void checkFood() {
        if (snakeX.get(0) == foodX && snakeY.get(0) == foodY) {
            score++;
            foodEaten = true;
            System.out.println("🎉 吃到食物! 得分: " + score + ", 蛇身长度: " + (snakeX.size() + 1));
            spawnFood();
            consecutiveFoodMoves = 0; // 吃到食物后重置连续移动计数
        }
    }

    public double checkCollision() {
        int headX = snakeX.get(0);
        int headY = snakeY.get(0);
        
        // 检查墙壁碰撞
        if (headX < 0 || headX >= WIDTH || headY < 0 || headY >= HEIGHT) {
            running = false;
            System.out.println("💥 游戏结束: 撞墙了, 惩罚: -100.0");
            if (score > maxScore) {
                maxScore = score;
                System.out.println("🏆 新的最高分: " + maxScore);
            }
            startGame();
            return -100.0; // 减少的惩罚值
        }

        // 检查自身碰撞
        for (int i = 1; i < snakeX.size(); i++) {
            if (headX == snakeX.get(i) && headY == snakeY.get(i)) {
                running = false;
                System.out.println("💥 游戏结束: 咬到自己了, 惩罚: -150.0");
                if (score > maxScore) {
                    maxScore = score;
                    System.out.println("🏆 新的最高分: " + maxScore);
                }
                startGame();
                return -150.0;
            }
        }

        // 只在蛇较长时检查是否被困(更危险的情况)
        if (snakeX.size() > 10 && !hasSafePath()) {
            running = false;
            System.out.println("💥 游戏结束: 陷入死胡同, 惩罚: -80.0");
            if (score > maxScore) {
                maxScore = score;
                System.out.println("🏆 新的最高分: " + maxScore);
            }
            startGame();
            return -80.0; // 减少的惩罚值
        }

        return 0.0; // No penalty for normal moves
    }

    private double getDistanceToFood(int headX, int headY) {
        double distance = Math.abs(headX - foodX) + Math.abs(headY - foodY);
        return Math.max(distance, 1.0); // Avoid division by zero
    }

    private boolean hasSafePath() {
        int headX = snakeX.get(0);
        int headY = snakeY.get(0);
        // Validate head and food coordinates
        if (headX < 0 || headX >= WIDTH || headY < 0 || headY >= HEIGHT ||
                foodX < 0 || foodX >= WIDTH || foodY < 0 || foodY >= HEIGHT) {
            System.out.println("⚠️ 坐标无效: 蛇头 (" + headX + ", " + headY + "), 食物 (" + foodX + ", " + foodY + ")");
            return false;
        }

        boolean[][] visited = new boolean[WIDTH / UNIT_SIZE][HEIGHT / UNIT_SIZE];
        Queue<int[]> queue = new LinkedList<>();
        int gridX = headX / UNIT_SIZE;
        int gridY = headY / UNIT_SIZE;
        queue.add(new int[]{gridX, gridY});
        visited[gridX][gridY] = true;

        int[][] directions = {{-1, 0}, {1, 0}, {0, -1}, {0, 1}}; // Left, Right, Up, Down
        Set<String> snakeBody = new HashSet<>();
        for (int i = 1; i < snakeX.size(); i++) {
            int bodyX = snakeX.get(i) / UNIT_SIZE;
            int bodyY = snakeY.get(i) / UNIT_SIZE;
            snakeBody.add(bodyX + "," + bodyY);
        }

        while (!queue.isEmpty()) {
            int[] pos = queue.poll();
            int x = pos[0];
            int y = pos[1];
            if (x * UNIT_SIZE == foodX && y * UNIT_SIZE == foodY) {
                System.out.println("✅ 找到通往食物的安全路径: (" + foodX + ", " + foodY + ")");
                return true;
            }

            for (int[] dir : directions) {
                int nx = x + dir[0];
                int ny = y + dir[1];
                if (nx >= 0 && nx < WIDTH / UNIT_SIZE && ny >= 0 && ny < HEIGHT / UNIT_SIZE
                        && !visited[nx][ny] && !snakeBody.contains(nx + "," + ny)) {
                    queue.add(new int[]{nx, ny});
                    visited[nx][ny] = true;
                }
            }
        }
        System.out.println("❌ 无法找到通往食物的安全路径: (" + foodX + ", " + foodY + ")");
        return false;
    }

    private int getFreeSpaceCount() {
        int headX = snakeX.get(0);
        int headY = snakeY.get(0);
        int count = 0;
        int[][] directions = {{0, -UNIT_SIZE}, {0, UNIT_SIZE}, {-UNIT_SIZE, 0}, {UNIT_SIZE, 0}};
        for (int[] dir : directions) {
            int nx = headX + dir[0];
            int ny = headY + dir[1];
            if (!isDanger(nx, ny)) {
                count++;
            }
        }
        return count;
    }

    private boolean isPredictiveCloser(int action) {
        int headX = snakeX.get(0);
        int headY = snakeY.get(0);
        double oldDistance = getDistanceToFood(headX, headY);
        char tempDirection = direction;
        switch (action) {
            case 0:
                if (direction != 'D') tempDirection = 'U';
                break;
            case 1:
                if (direction != 'U') tempDirection = 'D';
                break;
            case 2:
                if (direction != 'R') tempDirection = 'L';
                break;
            case 3:
                if (direction != 'L') tempDirection = 'R';
                break;
        }
        int newHeadX = headX;
        int newHeadY = headY;
        switch (tempDirection) {
            case 'U': newHeadY -= UNIT_SIZE; break;
            case 'D': newHeadY += UNIT_SIZE; break;
            case 'L': newHeadX -= UNIT_SIZE; break;
            case 'R': newHeadX += UNIT_SIZE; break;
        }
        if (newHeadX < 0 || newHeadX >= WIDTH || newHeadY < 0 || newHeadY >= HEIGHT) {
            return false;
        }
        double newDistance = getDistanceToFood(newHeadX, newHeadY);
        return newDistance < oldDistance;
    }

    private boolean isBacktracking(char newDirection) {
        if (recentDirections.size() < 2) return false;
        List<Character> lastTwo = new ArrayList<>(recentDirections);
        char last = lastTwo.get(lastTwo.size() - 1);
        if ((newDirection == 'U' && last == 'D') || (newDirection == 'D' && last == 'U') ||
                (newDirection == 'L' && last == 'R') || (newDirection == 'R' && last == 'L')) {
            return true;
        }
        return false;
    }

    private int chooseAction(double[] state) {
        if (random.nextDouble() < epsilon) {
            // Smart random: avoid obviously bad moves
            List<Integer> validActions = new ArrayList<>();
            int headX = snakeX.get(0);
            int headY = snakeY.get(0);
            
            for (int action = 0; action < 4; action++) {
                char testDirection = direction;
                switch (action) {
                    case 0: if (direction != 'D') testDirection = 'U'; break;
                    case 1: if (direction != 'U') testDirection = 'D'; break;
                    case 2: if (direction != 'R') testDirection = 'L'; break;
                    case 3: if (direction != 'L') testDirection = 'R'; break;
                }
                
                int newX = headX, newY = headY;
                switch (testDirection) {
                    case 'U': newY -= UNIT_SIZE; break;
                    case 'D': newY += UNIT_SIZE; break;
                    case 'L': newX -= UNIT_SIZE; break;
                    case 'R': newX += UNIT_SIZE; break;
                }
                
                if (!isDanger(newX, newY)) {
                    validActions.add(action);
                }
            }
            
            int action = validActions.isEmpty() ? random.nextInt(4) : 
                        validActions.get(random.nextInt(validActions.size()));
            System.out.println("🎲 智能随机动作: " + action);
            return action;
        }
        
        // 使用神经网络预测最佳动作
        INDArray input = Nd4j.create(state).reshape(1, 16);
        INDArray output = model.output(input);
        int action = output.argMax(1).getInt(0);
        System.out.println("🧠 神经网络选择动作: " + action + " (Q值: " + output + ")");
        return action;
    }

    private void trainDQN(double[] state, int action, double reward, double[] nextState, boolean done) {
        totalReward += reward;
        replayMemory.add(new Transition(state, action, reward, nextState, done));
        if (replayMemory.size() > REPLAY_MEMORY_SIZE) {
            replayMemory.removeFirst();
        }

        // Train more frequently with GPU (every step when memory is sufficient)
        if (replayMemory.size() >= BATCH_SIZE) {
            long startTime = System.nanoTime();
            
            try {
                ArrayList<Transition> batch = new ArrayList<>();
                for (int i = 0; i < BATCH_SIZE; i++) {
                    int idx = random.nextInt(replayMemory.size());
                    batch.add(new ArrayList<>(replayMemory).get(idx));
                }

                // Prepare batch data for GPU processing
                double[][] states = new double[BATCH_SIZE][16];
                double[][] targets = new double[BATCH_SIZE][4];
                
                // Batch forward pass for efficiency
                INDArray statesBatch = Nd4j.zeros(BATCH_SIZE, 16);
                for (int i = 0; i < BATCH_SIZE; i++) {
                    states[i] = batch.get(i).state;
                    statesBatch.putRow(i, Nd4j.create(batch.get(i).state));
                }
                
                INDArray currentQBatch = model.output(statesBatch);
                
                // Process each transition
                for (int i = 0; i < BATCH_SIZE; i++) {
                    Transition t = batch.get(i);
                    INDArray target = currentQBatch.getRow(i).dup();
                    
                    if (t.done) {
                        target.putScalar(t.action, t.reward);
                    } else {
                        INDArray nextQ = model.output(Nd4j.create(t.nextState).reshape(1, 16));
                        double maxNextQ = nextQ.maxNumber().doubleValue();
                        target.putScalar(t.action, t.reward + GAMMA * maxNextQ);
                    }
                    targets[i] = target.toDoubleVector();
                }

                // Batch training for GPU efficiency
                INDArray targetsBatch = Nd4j.create(targets);
                model.fit(statesBatch, targetsBatch);
                
                long endTime = System.nanoTime();
                double trainingTime = (endTime - startTime) / 1_000_000.0; // Convert to milliseconds
                
                if (gameCount % 50 == 0) {
                    System.out.println("⏱️ 训练耗时: " + String.format("%.2f", trainingTime) + "毫秒 | " +
                                     "📊 经验池使用: " + replayMemory.size() + "/" + REPLAY_MEMORY_SIZE);
                }
                
            } catch (Exception ex) {
                System.err.println("❌ 训练过程出错: " + ex.getMessage());
                ex.printStackTrace();
            }
        }

        // Slower epsilon decay for better exploration
        if (epsilon > EPSILON_MIN && gameCount % 5 == 0) {
            epsilon *= EPSILON_DECAY;
        }
    }

    @Override
    public void actionPerformed(ActionEvent e) {
        if (running) {
            double[] state = getState();
            int action = chooseAction(state);
            int oldHeadX = snakeX.get(0);
            int oldHeadY = snakeY.get(0);
            double oldDistance = getDistanceToFood(oldHeadX, oldHeadY);

            char oldDirection = direction;
            switch (action) {
                case 0:
                    if (direction != 'D') direction = 'U';
                    break;
                case 1:
                    if (direction != 'U') direction = 'D';
                    break;
                case 2:
                    if (direction != 'R') direction = 'L';
                    break;
                case 3:
                    if (direction != 'L') direction = 'R';
                    break;
            }
            if (direction != oldDirection) {
                System.out.println("Action chosen: " + action + " -> Direction: " + direction);
                recentDirections.add(direction);
                if (recentDirections.size() > 2) recentDirections.removeFirst();
            }

            double reward = 0.1; // Small positive reward for surviving
            
            // Reduce backtracking penalty
            if (isBacktracking(direction)) {
                reward -= 2.0;
                System.out.println("Penalty: -2.0 for backtracking");
            }

            // Reward for moving toward food
            if (isPredictiveCloser(action)) {
                reward += 1.0;
                System.out.println("Reward: +1.0 for predictive move closer to food");
            }

            move();
            double newDistance = getDistanceToFood(snakeX.get(0), snakeY.get(0));
            
            if (!foodEaten) {
                if (newDistance < oldDistance) {
                    double distanceReward = DISTANCE_REWARD_SCALE / Math.max(newDistance, 20.0);
                    reward += distanceReward;
                    System.out.println("Reward: +" + String.format("%.2f", distanceReward) + " for moving closer to food");
                    consecutiveFoodMoves++;
                    if (consecutiveFoodMoves >= 3) {
                        reward += 2.0;
                        System.out.println("Reward: +2.0 for consecutive moves toward food");
                    }
                } else if (newDistance > oldDistance) {
                    double distancePenalty = DISTANCE_REWARD_SCALE / Math.max(newDistance, 40.0);
                    reward -= distancePenalty;
                    System.out.println("Penalty: -" + String.format("%.2f", distancePenalty) + " for moving away from food");
                    consecutiveFoodMoves = 0;
                } else {
                    consecutiveFoodMoves = 0;
                }
            } else {
                // Big reward for eating food, bonus for longer snake
                double foodReward = 50.0 + (snakeX.size() * 2.0);
                reward += foodReward;
                System.out.println("Reward: +" + String.format("%.2f", foodReward) + " for eating food (length bonus included)");
                consecutiveFoodMoves = 0;
            }

            // Reward for maintaining free space, but don't penalize too much for tight spaces
            int freeSpace = getFreeSpaceCount();
            if (freeSpace >= 3) {
                reward += 0.3 * freeSpace;
                System.out.println("Reward: +" + String.format("%.2f", 0.3 * freeSpace) + " for maintaining free space");
            } else if (freeSpace == 0) {
                reward -= 5.0; // Only penalize when completely trapped
                System.out.println("Penalty: -5.0 for being completely trapped");
            }

            // Only check safe path for longer snakes and reduce penalty
            if (snakeX.size() > 10 && !hasSafePath()) {
                reward -= 1.0;
                System.out.println("Penalty: -1.0 for no safe path to food (long snake)");
            }

            // Bonus for achieving longer lengths
            if (snakeX.size() > 5) {
                double lengthBonus = (snakeX.size() - 5) * 0.1;
                reward += lengthBonus;
                System.out.println("Length bonus: +" + String.format("%.2f", lengthBonus) + " for snake length " + snakeX.size());
            }

            double[] nextState = getState();
            double collisionReward = checkCollision();
            if (!running) {
                reward = collisionReward;
            }
            trainDQN(state, action, reward, nextState, !running);
            checkFood();
        }
        repaint();
    }

    private class MyKeyAdapter extends KeyAdapter {
        @Override
        public void keyPressed(KeyEvent e) {
            switch (e.getKeyCode()) {
                case KeyEvent.VK_UP:
                    if (direction != 'D') direction = 'U';
                    break;
                case KeyEvent.VK_DOWN:
                    if (direction != 'U') direction = 'D';
                    break;
                case KeyEvent.VK_LEFT:
                    if (direction != 'R') direction = 'L';
                    break;
                case KeyEvent.VK_RIGHT:
                    if (direction != 'L') direction = 'R';
                    break;
                case KeyEvent.VK_SPACE:
                    if (!running) startGame();
                    break;
                case KeyEvent.VK_R:
                    if (!running) startGame();
                    break;
            }
        }
    }

    private class Transition {
        double[] state;
        int action;
        double reward;
        double[] nextState;
        boolean done;

        Transition(double[] state, int action, double reward, double[] nextState, boolean done) {
            this.state = state;
            this.action = action;
            this.reward = reward;
            this.nextState = nextState;
            this.done = done;
        }
    }

    public static void main(String[] args) {
        System.out.println("Starting Snake Game with DQN...");
        System.out.println("Detecting best available backend (GPU/CPU)...");
        
        // 让ND4J自动选择最佳后端，不强制设置
        // 如果有GPU依赖且CUDA可用，会自动使用GPU
        // 否则会回退到CPU
        
        JFrame frame = new JFrame("Snake Game with DQN - Auto Backend");
        
        try {
            SnakeGameWithDQN game = new SnakeGameWithDQN();
            frame.add(game);
            frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE);
            frame.pack();
            frame.setLocationRelativeTo(null);
            frame.setVisible(true);
            
            System.out.println("Game started successfully!");
            
        } catch (Exception e) {
            System.err.println("Failed to start game: " + e.getMessage());
            e.printStackTrace();
            
            JOptionPane.showMessageDialog(frame, 
                "Failed to initialize DQN backend.\n" +
                "Error: " + e.getMessage() + "\n\n" +
                "Please check your ND4J dependencies.", 
                "Initialization Error", 
                JOptionPane.ERROR_MESSAGE);
        }
    }
}