import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import numpy as np
import random
from collections import deque
import os
from snake_game import Snake, Food, GRID_WIDTH, GRID_HEIGHT, WALL_PASS
from path_solver import HamiltonianCycle, ShortestPathSolver, HybridDecisionSystem

# 设置随机种子以确保结果可复现
SEED = 42
random.seed(SEED)
np.random.seed(SEED)
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED) if torch.cuda.is_available() else None

# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# DQN网络模型定义
class DQN(nn.Module):
    def __init__(self, input_size, output_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_size, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, output_size)
        
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)

# 经验回放缓冲区
class ReplayBuffer:
    def __init__(self, capacity):
        self.buffer = deque(maxlen=capacity)
    
    def add(self, state, action, reward, next_state, done):
        self.buffer.append((state, action, reward, next_state, done))
    
    def sample(self, batch_size):
        batch = random.sample(self.buffer, batch_size)
        state, action, reward, next_state, done = zip(*batch)
        return state, action, reward, next_state, done
    
    def __len__(self):
        return len(self.buffer)

# DQN智能体
class DQNAgent:
    def __init__(self, state_size, action_size, learning_rate=0.001, gamma=0.99, epsilon=1.0, epsilon_min=0.01, epsilon_decay=0.995, buffer_size=10000, batch_size=64):
        self.state_size = state_size
        self.action_size = action_size
        self.gamma = gamma  # 折扣因子
        self.epsilon = epsilon  # 探索率
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_decay
        self.batch_size = batch_size
        self.learning_rate = learning_rate
        
        # 创建主网络和目标网络
        self.policy_net = DQN(state_size, action_size).to(device)
        self.target_net = DQN(state_size, action_size).to(device)
        self.target_net.load_state_dict(self.policy_net.state_dict())
        self.target_net.eval()
        
        self.optimizer = optim.Adam(self.policy_net.parameters(), lr=learning_rate)
        self.memory = ReplayBuffer(buffer_size)
        self.update_count = 0
        
        # 初始化路径求解器
        self.hybrid_system = HybridDecisionSystem(GRID_WIDTH, GRID_HEIGHT, WALL_PASS)
        
    def get_state(self, snake, food):
        # 获取蛇头位置
        head_x, head_y = snake.get_head_position()
        
        # 获取食物位置
        food_x, food_y = food.position
        
        # 计算蛇头与食物的相对位置
        food_dir_x = food_x - head_x
        food_dir_y = food_y - head_y
        
        # 如果启用了穿墙模式，需要考虑最短路径
        if WALL_PASS:
            if abs(food_dir_x) > GRID_WIDTH // 2:
                food_dir_x = -np.sign(food_dir_x) * (GRID_WIDTH - abs(food_dir_x))
            if abs(food_dir_y) > GRID_HEIGHT // 2:
                food_dir_y = -np.sign(food_dir_y) * (GRID_HEIGHT - abs(food_dir_y))
        
        # 获取当前移动方向
        dir_x, dir_y = snake.direction
        
        # 检查四个方向是否有障碍物（墙壁或蛇身）
        # 上、右、下、左四个方向的危险程度
        danger = [0, 0, 0, 0]  # [上, 右, 下, 左]
        
        # 检查上方
        check_x, check_y = head_x, head_y - 1
        if not WALL_PASS and (check_y < 0 or (check_x, check_y) in snake.positions):
            danger[0] = 1
        elif WALL_PASS and (check_x, check_y % GRID_HEIGHT) in snake.positions:
            danger[0] = 1
            
        # 检查右方
        check_x, check_y = head_x + 1, head_y
        if not WALL_PASS and (check_x >= GRID_WIDTH or (check_x, check_y) in snake.positions):
            danger[1] = 1
        elif WALL_PASS and ((check_x % GRID_WIDTH), check_y) in snake.positions:
            danger[1] = 1
            
        # 检查下方
        check_x, check_y = head_x, head_y + 1
        if not WALL_PASS and (check_y >= GRID_HEIGHT or (check_x, check_y) in snake.positions):
            danger[2] = 1
        elif WALL_PASS and (check_x, check_y % GRID_HEIGHT) in snake.positions:
            danger[2] = 1
            
        # 检查左方
        check_x, check_y = head_x - 1, head_y
        if not WALL_PASS and (check_x < 0 or (check_x, check_y) in snake.positions):
            danger[3] = 1
        elif WALL_PASS and ((check_x % GRID_WIDTH), check_y) in snake.positions:
            danger[3] = 1
        
        # 获取Hamiltonian循环和最短路径信息
        state_info = self.hybrid_system.get_state_info(snake, food)
        
        # 获取Hamiltonian循环中的下一个位置
        next_hamiltonian = state_info['hamiltonian_next']
        # 计算下一个Hamiltonian位置的方向
        ham_dir = [0, 0, 0, 0]  # [上, 右, 下, 左]
        if next_hamiltonian[1] < head_y or (WALL_PASS and next_hamiltonian[1] % GRID_HEIGHT < head_y % GRID_HEIGHT):
            ham_dir[0] = 1  # 上
        elif next_hamiltonian[0] > head_x or (WALL_PASS and next_hamiltonian[0] % GRID_WIDTH > head_x % GRID_WIDTH):
            ham_dir[1] = 1  # 右
        elif next_hamiltonian[1] > head_y or (WALL_PASS and next_hamiltonian[1] % GRID_HEIGHT > head_y % GRID_HEIGHT):
            ham_dir[2] = 1  # 下
        elif next_hamiltonian[0] < head_x or (WALL_PASS and next_hamiltonian[0] % GRID_WIDTH < head_x % GRID_WIDTH):
            ham_dir[3] = 1  # 左
        
        # 获取最短路径信息
        shortest_path_safe = [0, 0, 0, 0]  # [上, 右, 下, 左]
        for i, (is_safe, is_on_hamiltonian, is_on_shortest_path) in enumerate(state_info['direction_safety']):
            if is_safe and is_on_shortest_path:
                shortest_path_safe[i] = 1
        
        # 构建状态向量
        state = [
            # 当前移动方向
            dir_x == 0 and dir_y == -1,  # 上
            dir_x == 1 and dir_y == 0,   # 右
            dir_x == 0 and dir_y == 1,   # 下
            dir_x == -1 and dir_y == 0,  # 左
            
            # 食物相对位置
            food_dir_y < 0,  # 食物在上方
            food_dir_x > 0,  # 食物在右方
            food_dir_y > 0,  # 食物在下方
            food_dir_x < 0,  # 食物在左方
            
            # 危险位置
            danger[0],  # 上方危险
            danger[1],  # 右方危险
            danger[2],  # 下方危险
            danger[3],  # 左方危险
            
            # Hamiltonian循环下一步方向
            ham_dir[0],  # Hamiltonian指向上
            ham_dir[1],  # Hamiltonian指向右
            ham_dir[2],  # Hamiltonian指向下
            ham_dir[3],  # Hamiltonian指向左
            
            # 最短路径安全方向
            shortest_path_safe[0],  # 上方是安全的最短路径
            shortest_path_safe[1],  # 右方是安全的最短路径
            shortest_path_safe[2],  # 下方是安全的最短路径
            shortest_path_safe[3],  # 左方是安全的最短路径
            
            # 捷径收益
            state_info['shortcut_benefit'] > 5,  # 使用捷径的收益较大
            state_info['is_shortcut_safe'],     # 捷径是安全的
        ]
        
        return np.array(state, dtype=int)
    
    def act(self, state, training=True):
        # 探索-利用策略
        if training and np.random.rand() <= self.epsilon:
            return random.randrange(self.action_size)
        
        with torch.no_grad():
            state_tensor = torch.FloatTensor(state).unsqueeze(0).to(device)
            q_values = self.policy_net(state_tensor)
            return q_values.argmax().item()
    
    def remember(self, state, action, reward, next_state, done):
        self.memory.add(state, action, reward, next_state, done)
    
    def replay(self):
        if len(self.memory) < self.batch_size:
            return
        
        # 从经验回放缓冲区中采样
        states, actions, rewards, next_states, dones = self.memory.sample(self.batch_size)
        
        # 转换为张量
        states = torch.FloatTensor(np.array(states)).to(device)
        actions = torch.LongTensor(actions).to(device)
        rewards = torch.FloatTensor(rewards).to(device)
        next_states = torch.FloatTensor(np.array(next_states)).to(device)
        dones = torch.FloatTensor(dones).to(device)
        
        # 计算当前Q值和目标Q值
        q_values = self.policy_net(states).gather(1, actions.unsqueeze(1)).squeeze(1)
        next_q_values = self.target_net(next_states).max(1)[0]
        expected_q_values = rewards + self.gamma * next_q_values * (1 - dones)
        
        # 计算损失并更新网络
        loss = F.mse_loss(q_values, expected_q_values.detach())
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # 更新探索率
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
        
        # 定期更新目标网络
        self.update_count += 1
        if self.update_count % 100 == 0:
            self.target_net.load_state_dict(self.policy_net.state_dict())
            
        return loss.item()
    
    def save(self, filename):
        torch.save({
            'policy_net': self.policy_net.state_dict(),
            'target_net': self.target_net.state_dict(),
            'optimizer': self.optimizer.state_dict(),
            'epsilon': self.epsilon
        }, filename)
        print(f"模型已保存到 {filename}")
    
    def load(self, filename):
        if os.path.isfile(filename):
            checkpoint = torch.load(filename)
            self.policy_net.load_state_dict(checkpoint['policy_net'])
            self.target_net.load_state_dict(checkpoint['target_net'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.epsilon = checkpoint['epsilon']
            print(f"模型已从 {filename} 加载")
            return True
        return False

# 训练DQN模型
def train_dqn(episodes=1000, max_steps=1000, render=False):
    # 初始化环境和智能体
    state_size = 22  # 状态空间大小 (增加了Hamiltonian和最短路径信息)
    action_size = 4  # 动作空间大小 (上、右、下、左)
    agent = DQNAgent(state_size, action_size)
    
    # 尝试加载已有模型
    model_path = 'snake_dqn_model.pth'
    agent.load(model_path)
    
    # 训练参数
    batch_size = 64
    scores = []
    max_score = 0
    
    # 训练循环
    for episode in range(episodes):
        # 初始化环境
        snake = Snake()
        food = Food()
        score = 0
        steps_without_food = 0
        
        # 获取初始状态
        state = agent.get_state(snake, food)
        # print(state)
        for step in range(max_steps):
            # 智能体选择动作
            action = agent.act(state)
            
            # 将动作转换为方向
            if action == 0:  # 上
                direction = (0, -1)
            elif action == 1:  # 右
                direction = (1, 0)
            elif action == 2:  # 下
                direction = (0, 1)
            else:  # 左
                direction = (-1, 0)
            
            # 获取Hamiltonian和最短路径信息
            state_info = agent.hybrid_system.get_state_info(snake, food)
            
            # 执行动作
            snake.change_direction(direction)
            move_result = snake.move()
            
            # 检查是否吃到食物
            reward = 0
            if snake.get_head_position() == food.position:
                snake.grow()
                food.randomize_position(snake.positions)
                score += 1
                reward = 10  # 吃到食物的奖励
                steps_without_food = 0
            else:
                steps_without_food += 1
                # 给予小的负奖励，鼓励蛇尽快找到食物
                reward = -0.1
            
            # 检查游戏是否结束
            done = False
            if not move_result or steps_without_food > 100 * snake.length:  # 如果蛇撞墙、撞到自己或长时间没吃到食物
                done = True
                reward = -10 if not move_result else -1  # 游戏结束的惩罚
            
            # 修改奖励机制，引导DQN优先选择安全路径，并鼓励捷径
            if not done:
                head_position = snake.get_head_position()
                next_hamiltonian = state_info['hamiltonian_next']
                
                # 计算选择的方向
                dx, dy = direction
                nx, ny = head_position[0] + dx, head_position[1] + dy
                if WALL_PASS:
                    nx = nx % GRID_WIDTH
                    ny = ny % GRID_HEIGHT
                
                # 检查是否遵循Hamiltonian路径
                # if (nx, ny) == next_hamiltonian:
                #     reward += 0.01  # 奖励遵循Hamiltonian路径
                
                # 检查是否使用了安全的捷径
                # if state_info['is_shortcut_safe'] and state_info['shortcut_benefit'] > 5:
                #     # 检查是否在最短路径上
                #     for i, (is_safe, is_on_hamiltonian, is_on_shortest_path) in enumerate(state_info['direction_safety']):
                #         if is_on_shortest_path and (i == 0 and direction == (0, -1)) or \
                #            (i == 1 and direction == (1, 0)) or \
                #            (i == 2 and direction == (0, 1)) or \
                #            (i == 3 and direction == (-1, 0)):
                #             reward += 1.0  # 奖励使用安全捷径
            
            # 获取新状态
            next_state = agent.get_state(snake, food)
            
            # 存储经验
            agent.remember(state, action, reward, next_state, done)
            
            # 更新状态
            state = next_state
            
            # 训练网络
            if len(agent.memory) > batch_size:
                loss = agent.replay()
            
            # 如果游戏结束，跳出循环
            if done:
                break
        
        # 记录得分
        scores.append(score)
        avg_score = np.mean(scores[-100:]) if len(scores) >= 100 else np.mean(scores)
        
        # 打印训练进度
        print(f"Episode: {episode+1}/{episodes}, Score: {score}, Average Score: {avg_score:.2f}, Epsilon: {agent.epsilon:.4f}")
        
        # 保存最佳模型
        if score > max_score:
            max_score = score
            agent.save(model_path)
    
    return agent, scores

# 使用训练好的模型玩游戏
def play_game(model_path='snake_dqn_model.pth', episodes=5, speed=10):
    # 导入pygame相关模块
    import pygame
    import time
    import sys
    from snake_game import render_ai_game
    
    # 确保pygame已初始化
    if not pygame.get_init():
        pygame.init()
    
    # 初始化智能体
    state_size = 22  # 更新状态空间大小
    action_size = 4
    agent = DQNAgent(state_size, action_size)
    
    # 加载模型
    if not agent.load(model_path):
        print(f"找不到模型文件 {model_path}，请先训练模型")
        return
    
    # 设置为评估模式
    agent.epsilon = 0.0  # 不进行探索，只利用学到的策略
    
    # 创建时钟对象控制游戏速度
    clock = pygame.time.Clock()
    
    try:
        for episode in range(episodes):
            # 初始化环境
            snake = Snake()
            food = Food()
            score = 0
            steps = 0
            
            print(f"开始游戏 {episode+1}")
            
            while True:
                # 处理pygame事件
                for event in pygame.event.get():
                    if event.type == pygame.QUIT:
                        pygame.quit()
                        print("游戏被用户中断")
                        return
                
                # 获取当前状态
                state = agent.get_state(snake, food)
                
                # DQN选择动作
                dqn_action = agent.act(state, training=False)
                
                # 将DQN动作转换为方向
                if dqn_action == 0:  # 上
                    dqn_direction = (0, -1)
                elif dqn_action == 1:  # 右
                    dqn_direction = (1, 0)
                elif dqn_action == 2:  # 下
                    dqn_direction = (0, 1)
                else:  # 左
                    dqn_direction = (-1, 0)
                
                # 使用混合决策系统做出最终决策
                final_direction = agent.hybrid_system.decide_action(snake, food, dqn_action)
                
                # 获取决策信息用于显示
                state_info = agent.hybrid_system.get_state_info(snake, food)
                head_position = snake.get_head_position()
                next_hamiltonian = state_info['hamiltonian_next']
                
                # 判断是否使用了Hamiltonian路径
                ham_dx = next_hamiltonian[0] - head_position[0]
                ham_dy = next_hamiltonian[1] - head_position[1]
                # 处理穿墙情况
                if WALL_PASS:
                    if abs(ham_dx) > 1:
                        ham_dx = -1 if ham_dx > 0 else 1
                    if abs(ham_dy) > 1:
                        ham_dy = -1 if ham_dy > 0 else 1
                ham_direction = (ham_dx, ham_dy)
                
                using_hamiltonian = final_direction == ham_direction
                using_shortcut = state_info['is_shortcut_safe'] and not using_hamiltonian
                
                # 执行最终决策
                snake.change_direction(final_direction)
                move_result = snake.move()
                
                # 检查是否吃到食物
                if snake.get_head_position() == food.position:
                    snake.grow()
                    food.randomize_position(snake.positions)
                    score += 1
                
                steps += 1
                
                # 渲染游戏画面
                decision_type = "Hamiltonian安全路径" if using_hamiltonian else "DQN捷径" if using_shortcut else "DQN决策"
                ai_info = f"AI思考: DQN选择{'上' if dqn_action == 0 else '右' if dqn_action == 1 else '下' if dqn_action == 2 else '左'}, 最终使用{decision_type}"
                render_ai_game(snake, food, score, episode+1, steps, ai_info)
                
                # 控制游戏速度
                clock.tick(speed)
                
                # 检查游戏是否结束
                if not move_result or steps > 1000:  # 如果蛇撞墙、撞到自己或达到最大步数
                    print(f"游戏 {episode+1} 结束，得分: {score}, 步数: {steps}")
                    # 游戏结束后暂停一下，让用户看清结果
                    time.sleep(2)
                    break
    except Exception as e:
        print(f"游戏发生错误: {e}")
    finally:
        print("所有游戏结束")
        # 确保退出前pygame已关闭
        if pygame.get_init():
            pygame.quit()

# 主函数
def main():
    # 训练模型
    # print("开始训练DQN模型...")
    # agent, scores = train_dqn(episodes=20)
    
    # 使用训练好的模型玩游戏
    print("\n使用训练好的模型玩游戏...")
    play_game()

if __name__ == "__main__":
    main()