import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import sys
import os

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from agents.base_agent import BaseAgent
from collections import deque, namedtuple
import random

# 定义经验回放中的经验
Experience = namedtuple('Experience', ['state', 'action', 'reward', 'next_state', 'done'])

class DQN(nn.Module):
    """深度Q网络"""
    
    def __init__(self, input_size, hidden_size, output_size):
        super(DQN, self).__init__()
        self.fc1 = nn.Linear(input_size, hidden_size)
        self.fc2 = nn.Linear(hidden_size, hidden_size)
        self.fc3 = nn.Linear(hidden_size, hidden_size)
        self.fc4 = nn.Linear(hidden_size, output_size)
        
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = self.fc4(x)
        return x

class DQNAgent(BaseAgent):
    """DQN代理"""
    
    def __init__(self, action_space, observation_space, 
                 lr=1e-3, gamma=0.99, epsilon=1.0, epsilon_decay=0.995, 
                 epsilon_min=0.01, buffer_size=10000, batch_size=32, 
                 target_update_freq=1000, hidden_size=128):
        super().__init__(action_space)
        
        self.observation_space = observation_space
        self.lr = lr
        self.gamma = gamma
        self.epsilon = epsilon
        self.epsilon_decay = epsilon_decay
        self.epsilon_min = epsilon_min
        self.batch_size = batch_size
        self.target_update_freq = target_update_freq
        self.hidden_size = hidden_size
        
        # 设备
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        
        # 网络
        self.q_network = DQN(observation_space.shape[0], hidden_size, action_space.n).to(self.device)
        self.target_network = DQN(observation_space.shape[0], hidden_size, action_space.n).to(self.device)
        self.optimizer = optim.Adam(self.q_network.parameters(), lr=lr)
        
        # 同步目标网络
        self.target_network.load_state_dict(self.q_network.state_dict())
        
        # 经验回放
        self.memory = deque(maxlen=buffer_size)
        
        # 训练步骤计数器
        self.step_count = 0
        
    def predict(self, observation):
        """根据观察预测动作"""
        if isinstance(observation, np.ndarray):
            observation = torch.FloatTensor(observation).to(self.device)
        else:
            observation = observation.to(self.device)
            
        # epsilon-greedy策略
        if random.random() < self.epsilon:
            return self.action_space.sample()
            
        # 使用Q网络选择动作
        with torch.no_grad():
            q_values = self.q_network(observation)
            return q_values.argmax().item()
            
    def learn(self, observation, action, reward, next_observation, done):
        """学习更新策略"""
        # 存储经验
        if isinstance(observation, np.ndarray):
            observation = torch.FloatTensor(observation).to(self.device)
        if isinstance(next_observation, np.ndarray):
            next_observation = torch.FloatTensor(next_observation).to(self.device)
            
        experience = Experience(observation, action, reward, next_observation, done)
        self.memory.append(experience)
        
        # 检查是否有足够的经验进行训练
        if len(self.memory) < self.batch_size:
            return
            
        # 采样一批经验
        batch = random.sample(self.memory, self.batch_size)
        states = torch.stack([e.state for e in batch])
        actions = torch.LongTensor([e.action for e in batch]).to(self.device)
        rewards = torch.FloatTensor([e.reward for e in batch]).to(self.device)
        next_states = torch.stack([e.next_state for e in batch])
        dones = torch.BoolTensor([e.done for e in batch]).to(self.device)
        
        # 计算当前Q值
        current_q_values = self.q_network(states).gather(1, actions.unsqueeze(1)).squeeze(1)
        
        # 计算目标Q值
        with torch.no_grad():
            next_q_values = self.target_network(next_states).max(1)[0]
            target_q_values = rewards + (self.gamma * next_q_values * ~dones)
            
        # 计算损失
        loss = F.mse_loss(current_q_values, target_q_values)
        
        # 更新网络
        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        
        # 更新epsilon
        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
            
        # 更新目标网络
        self.step_count += 1
        if self.step_count % self.target_update_freq == 0:
            self.target_network.load_state_dict(self.q_network.state_dict())
            
    def save(self, path):
        """保存模型"""
        torch.save({
            'q_network_state_dict': self.q_network.state_dict(),
            'target_network_state_dict': self.target_network.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'epsilon': self.epsilon
        }, path)
        
    def load(self, path):
        """加载模型"""
        checkpoint = torch.load(path, map_location=self.device)
        self.q_network.load_state_dict(checkpoint['q_network_state_dict'])
        self.target_network.load_state_dict(checkpoint['target_network_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.epsilon = checkpoint['epsilon']