import torch
from collections import deque


class DQNTrainer:
    """DQN训练器 - 负责网络训练逻辑"""
    
    def __init__(self, config, network_manager, replay_buffer, device):
        self.config = config
        self.network_manager = network_manager
        self.replay_buffer = replay_buffer
        self.device = device
        
        # 训练统计
        self.loss_history = deque(maxlen=1000)
        self.q_value_history = deque(maxlen=1000)
        self.grad_norm_history = deque(maxlen=1000)
    
    def update(self, steps_done):
        """更新网络参数"""
        if len(self.replay_buffer) < self.config.dqn_batch_size:
            return None, None
        
        # 采样批次数据
        batch_data, weights, indices = self._sample_batch()
        
        # 使用共享的训练逻辑
        return self._train_with_batch(batch_data, weights, indices, steps_done)
    
    def update_with_batch(self, batch, steps_done):
        """使用提供的批次更新网络"""
        # 检查批次数据是否有效
        if not batch or len(batch[0]) == 0:
            return None, None
        
        # 解构批次数据
        batch_data = batch[0]
        weights = batch[1] if len(batch) > 1 and batch[1] is not None else None
        indices = batch[2] if len(batch) > 2 and batch[2] is not None else None
        
        # 使用共享的训练逻辑
        return self._train_with_batch(batch_data, weights, indices, steps_done)
    
    def _train_with_batch(self, batch_data, weights, indices, steps_done):
        """使用批次数据训练网络的共享逻辑"""
        # 计算损失
        loss, current_q_values, td_errors = self._compute_loss(batch_data, weights)
        
        # 执行反向传播
        grad_norm = self._backward_pass(loss)
        
        # 更新学习率
        self.network_manager.step_scheduler(steps_done)
        
        # 更新目标网络
        self.network_manager.update_target_network()
        
        # 更新优先级
        self._update_priorities(indices, td_errors)
        
        # 记录统计
        self._record_statistics(loss, current_q_values, grad_norm)
        
        return loss.item(), current_q_values.mean().item()
    
    def _sample_batch(self):
        """采样批次数据"""
        beta = self._compute_beta()
        return self.replay_buffer.sample(self.config.dqn_batch_size, beta)
    
    def _compute_loss(self, batch_data, weights):
        """计算损失函数"""
        states, actions, rewards, next_states, dones = batch_data
        
        # 转换为张量
        states = torch.FloatTensor(states).to(self.device)
        actions = torch.LongTensor(actions).to(self.device)
        rewards = torch.FloatTensor(rewards).to(self.device)
        next_states = torch.FloatTensor(next_states).to(self.device)
        dones = torch.FloatTensor(dones).to(self.device)
        
        # 当前Q值
        current_q_values = self.network_manager.policy_net(states).gather(1, actions.unsqueeze(1))
        
        # 目标Q值
        target_q_values = self._compute_target_q_values(next_states, rewards, dones)
        
        # TD误差
        td_errors = target_q_values - current_q_values
        
        # 计算加权损失
        if weights is not None:
            weights = torch.FloatTensor(weights).to(self.device).unsqueeze(1)
            loss = (weights * self.network_manager.criterion(current_q_values, target_q_values)).mean()
        else:
            loss = self.network_manager.criterion(current_q_values, target_q_values).mean()
        
        return loss, current_q_values, td_errors
    
    def _compute_target_q_values(self, next_states, rewards, dones):
        """计算目标Q值"""
        with torch.no_grad():
            if self.config.double_dqn:
                # Double DQN
                next_actions = self.network_manager.policy_net(next_states).max(1)[1].unsqueeze(1)
                next_q_values = self.network_manager.target_net(next_states).gather(1, next_actions)
            else:
                # 标准DQN
                next_q_values = self.network_manager.target_net(next_states).max(1)[0].unsqueeze(1)
            
            # 裁剪Q值
            next_q_values = torch.clamp(next_q_values,
                                        -self.config.max_q_value,
                                        self.config.max_q_value)
            
            target_q_values = rewards.unsqueeze(1) + \
                              (1 - dones.unsqueeze(1)) * self.config.gamma * next_q_values
        
        return target_q_values
    
    def _backward_pass(self, loss):
        """执行反向传播"""
        self.network_manager.optimizer.zero_grad()
        loss.backward()
        
        # 梯度裁剪
        grad_norm = torch.nn.utils.clip_grad_norm_(
            self.network_manager.policy_net.parameters(),
            self.config.grad_clip_norm
        )
        
        self.network_manager.optimizer.step()
        return grad_norm
    
    def _update_priorities(self, indices, td_errors):
        """更新优先级"""
        if indices is not None:
            td_errors_abs = torch.abs(td_errors).detach().cpu().numpy()
            self.replay_buffer.update_priorities(indices, td_errors_abs.flatten())
    
    def _record_statistics(self, loss, current_q_values, grad_norm):
        """记录训练统计"""
        self.loss_history.append(loss.item())
        self.q_value_history.append(current_q_values.mean().item())
        self.grad_norm_history.append(grad_norm.item())
    
    def _compute_beta(self):
        """计算优先级经验回放的beta参数"""
        if not self.config.use_prioritized_replay:
            return 1.0
        
        # 这里需要从外部传入steps_done，简化处理
        progress = min(1.0, 50000 / 100000)  # 简化版本
        return self.config.priority_beta_start + \
            progress * (self.config.priority_beta_end - self.config.priority_beta_start)
    
    def get_statistics(self, training_stats_batch_freq):
        """获取训练统计"""
        stats = {}
        
        if self.loss_history:
            recent_losses = list(self.loss_history)[-training_stats_batch_freq:]
            stats['avg_loss'] = sum(recent_losses) / len(recent_losses)
            stats['max_loss'] = max(self.loss_history)
            stats['min_loss'] = min(self.loss_history)
        
        if self.q_value_history:
            recent_q_values = list(self.q_value_history)[-training_stats_batch_freq:]
            stats['avg_q_value'] = sum(recent_q_values) / len(recent_q_values)
        
        if self.grad_norm_history:
            recent_grad_norms = list(self.grad_norm_history)[-training_stats_batch_freq:]
            stats['avg_grad_norm'] = sum(recent_grad_norms) / len(recent_grad_norms)
            stats['max_grad_norm'] = max(self.grad_norm_history)
        
        return stats