"""
麻将模型训练器
"""
import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from tqdm import tqdm

from sichuanmajiang.model.base_model import BaseModel


class MahjongDataset(Dataset):
    """
    麻将训练数据集
    """
    def __init__(self, 
                 states: List[np.ndarray],
                 actions: Optional[List[int]] = None,
                 rewards: Optional[List[float]] = None,
                 returns: Optional[List[float]] = None,
                 advantages: Optional[List[float]] = None,
                 value_targets: Optional[List[float]] = None):
        """
        Args:
            states: 状态特征列表
            actions: 动作列表（用于策略学习）
            rewards: 奖励列表（用于强化学习）
            returns: 回报列表（用于策略梯度）
            advantages: 优势值列表（用于Actor-Critic）
            value_targets: 价值目标列表（用于价值学习）
        """
        self.states = np.array(states)
        self.actions = np.array(actions) if actions is not None else None
        self.rewards = np.array(rewards) if rewards is not None else None
        self.returns = np.array(returns) if returns is not None else None
        self.advantages = np.array(advantages) if advantages is not None else None
        self.value_targets = np.array(value_targets) if value_targets is not None else None
        
    def __len__(self) -> int:
        return len(self.states)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        item = {
            'state': torch.FloatTensor(self.states[idx])
        }
        
        if self.actions is not None:
            item['action'] = torch.LongTensor([self.actions[idx]])[0]
        if self.rewards is not None:
            item['reward'] = torch.FloatTensor([self.rewards[idx]])[0]
        if self.returns is not None:
            item['return'] = torch.FloatTensor([self.returns[idx]])[0]
        if self.advantages is not None:
            item['advantage'] = torch.FloatTensor([self.advantages[idx]])[0]
        if self.value_targets is not None:
            item['value_target'] = torch.FloatTensor([self.value_targets[idx]])[0]
        
        return item


class ModelTrainer:
    """
    模型训练器基类
    """
    def __init__(self, 
                 model: BaseModel,
                 device: str = 'cuda' if torch.cuda.is_available() else 'cpu'):
        self.model = model
        self.device = device
        self.model.to(self.device)
        self.optimizer = None
        self.scheduler = None
    
    def set_optimizer(self, 
                      optimizer_type: str = 'adam',
                      lr: float = 1e-4,
                      weight_decay: float = 0.0,
                      **kwargs) -> None:
        """
        设置优化器
        
        Args:
            optimizer_type: 优化器类型 ('adam', 'sgd', 'rmsprop')
            lr: 学习率
            weight_decay: 权重衰减
            **kwargs: 其他优化器参数
        """
        if optimizer_type == 'adam':
            self.optimizer = optim.Adam(
                self.model.parameters(),
                lr=lr,
                weight_decay=weight_decay,
                **kwargs
            )
        elif optimizer_type == 'sgd':
            self.optimizer = optim.SGD(
                self.model.parameters(),
                lr=lr,
                weight_decay=weight_decay,
                momentum=kwargs.get('momentum', 0.9),
                **kwargs
            )
        elif optimizer_type == 'rmsprop':
            self.optimizer = optim.RMSprop(
                self.model.parameters(),
                lr=lr,
                weight_decay=weight_decay,
                **kwargs
            )
        else:
            raise ValueError(f"不支持的优化器类型: {optimizer_type}")
    
    def set_scheduler(self, 
                      scheduler_type: str = 'none',
                      **kwargs) -> None:
        """
        设置学习率调度器
        
        Args:
            scheduler_type: 调度器类型 ('none', 'step', 'reduce_on_plateau')
            **kwargs: 调度器参数
        """
        if scheduler_type == 'none' or self.optimizer is None:
            self.scheduler = None
            return
            
        if scheduler_type == 'step':
            self.scheduler = optim.lr_scheduler.StepLR(
                self.optimizer,
                step_size=kwargs.get('step_size', 10),
                gamma=kwargs.get('gamma', 0.1)
            )
        elif scheduler_type == 'reduce_on_plateau':
            self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                mode=kwargs.get('mode', 'min'),
                factor=kwargs.get('factor', 0.1),
                patience=kwargs.get('patience', 5)
            )
        else:
            raise ValueError(f"不支持的调度器类型: {scheduler_type}")
    
    def save_model(self, 
                   path: str,
                   metadata: Optional[Dict] = None) -> None:
        """
        保存模型
        
        Args:
            path: 保存路径
            metadata: 额外的元数据
        """
        # 确保目录存在
        os.makedirs(os.path.dirname(path), exist_ok=True)
        
        # 准备保存的数据
        checkpoint = {
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict() if self.optimizer else None,
            'metadata': metadata or {}
        }
        
        torch.save(checkpoint, path)
    
    def load_model(self, path: str) -> Dict:
        """
        加载模型
        
        Args:
            path: 加载路径
        
        Returns:
            模型的元数据
        """
        checkpoint = torch.load(path, map_location=self.device)
        self.model.load_state_dict(checkpoint['model_state_dict'])
        
        if self.optimizer and checkpoint['optimizer_state_dict']:
            self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        
        return checkpoint.get('metadata', {})
    
    def train(self, 
              train_data: MahjongDataset,
              val_data: Optional[MahjongDataset] = None,
              epochs: int = 10,
              batch_size: int = 64,
              shuffle: bool = True,
              early_stopping_patience: int = 10,
              log_interval: int = 100,
              save_dir: Optional[str] = None) -> Dict:
        """
        通用训练循环
        子类需要实现 _train_epoch 和 _evaluate 方法
        """
        if self.optimizer is None:
            self.set_optimizer()
        
        train_loader = DataLoader(
            train_data,
            batch_size=batch_size,
            shuffle=shuffle
        )
        
        val_loader = DataLoader(
            val_data,
            batch_size=batch_size,
            shuffle=False
        ) if val_data else None
        
        # 记录训练历史
        history = {
            'train_loss': [],
            'val_loss': []
        }
        
        best_val_loss = float('inf')
        patience_counter = 0
        
        for epoch in range(epochs):
            start_time = time.time()
            
            # 训练一个epoch
            train_loss = self._train_epoch(train_loader, log_interval)
            history['train_loss'].append(train_loss)
            
            # 验证
            if val_loader:
                val_loss = self._evaluate(val_loader)
                history['val_loss'].append(val_loss)
                
                # 早停检查
                if val_loss < best_val_loss:
                    best_val_loss = val_loss
                    patience_counter = 0
                    
                    # 保存最佳模型
                    if save_dir:
                        self.save_model(
                            os.path.join(save_dir, f'best_model.pt'),
                            {'epoch': epoch, 'val_loss': val_loss}
                        )
                else:
                    patience_counter += 1
                    if patience_counter >= early_stopping_patience:
                        print(f"早停在 epoch {epoch+1}")
                        break
                
                print(f"Epoch {epoch+1}/{epochs}, "
                      f"Train Loss: {train_loss:.4f}, "
                      f"Val Loss: {val_loss:.4f}, "
                      f"Time: {time.time() - start_time:.2f}s")
            else:
                print(f"Epoch {epoch+1}/{epochs}, "
                      f"Train Loss: {train_loss:.4f}, "
                      f"Time: {time.time() - start_time:.2f}s")
            
            # 更新学习率
            if self.scheduler:
                if isinstance(self.scheduler, optim.lr_scheduler.ReduceLROnPlateau):
                    self.scheduler.step(val_loss if val_loader else train_loss)
                else:
                    self.scheduler.step()
            
            # 保存最新模型
            if save_dir:
                self.save_model(
                    os.path.join(save_dir, f'latest_model.pt'),
                    {'epoch': epoch, 'train_loss': train_loss}
                )
        
        return history
    
    def _train_epoch(self, 
                     data_loader: DataLoader,
                     log_interval: int) -> float:
        """
        训练一个epoch
        子类需要实现此方法
        """
        raise NotImplementedError
    
    def _evaluate(self, data_loader: DataLoader) -> float:
        """
        评估模型
        子类需要实现此方法
        """
        raise NotImplementedError


class PolicyTrainer(ModelTrainer):
    """
    策略模型训练器
    """
    def __init__(self, model: BaseModel, **kwargs):
        super().__init__(model, **kwargs)
        self.loss_fn = nn.CrossEntropyLoss()
    
    def _train_epoch(self, data_loader: DataLoader, log_interval: int) -> float:
        self.model.train()
        total_loss = 0.0
        
        for i, batch in enumerate(tqdm(data_loader)):
            states = batch['state'].to(self.device)
            actions = batch['action'].to(self.device)
            
            # 前向传播
            if hasattr(self.model, 'actor_layers'):  # Actor-Critic
                policies, _ = self.model(states)
            else:
                policies = self.model(states)
            
            # 计算损失
            loss = self.loss_fn(torch.log(policies), actions)
            
            # 反向传播和优化
            self.optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            self.optimizer.step()
            
            total_loss += loss.item()
            
            if (i + 1) % log_interval == 0:
                avg_loss = total_loss / (i + 1)
                tqdm.write(f"Batch {i+1}/{len(data_loader)}, Loss: {avg_loss:.4f}")
        
        return total_loss / len(data_loader)
    
    def _evaluate(self, data_loader: DataLoader) -> float:
        self.model.eval()
        total_loss = 0.0
        
        with torch.no_grad():
            for batch in data_loader:
                states = batch['state'].to(self.device)
                actions = batch['action'].to(self.device)
                
                # 前向传播
                if hasattr(self.model, 'actor_layers'):  # Actor-Critic
                    policies, _ = self.model(states)
                else:
                    policies = self.model(states)
                
                # 计算损失
                loss = self.loss_fn(torch.log(policies), actions)
                total_loss += loss.item()
        
        return total_loss / len(data_loader)


class ValueTrainer(ModelTrainer):
    """
    价值模型训练器
    """
    def __init__(self, model: BaseModel, **kwargs):
        super().__init__(model, **kwargs)
        self.loss_fn = nn.MSELoss()
    
    def _train_epoch(self, data_loader: DataLoader, log_interval: int) -> float:
        self.model.train()
        total_loss = 0.0
        
        for i, batch in enumerate(tqdm(data_loader)):
            states = batch['state'].to(self.device)
            value_targets = batch['value_target'].to(self.device)
            
            # 前向传播
            if hasattr(self.model, 'critic_layers'):  # Actor-Critic
                _, values = self.model(states)
            else:
                values = self.model(states)
            
            # 计算损失
            loss = self.loss_fn(values.squeeze(), value_targets)
            
            # 反向传播和优化
            self.optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            self.optimizer.step()
            
            total_loss += loss.item()
            
            if (i + 1) % log_interval == 0:
                avg_loss = total_loss / (i + 1)
                tqdm.write(f"Batch {i+1}/{len(data_loader)}, Loss: {avg_loss:.4f}")
        
        return total_loss / len(data_loader)
    
    def _evaluate(self, data_loader: DataLoader) -> float:
        self.model.eval()
        total_loss = 0.0
        
        with torch.no_grad():
            for batch in data_loader:
                states = batch['state'].to(self.device)
                value_targets = batch['value_target'].to(self.device)
                
                # 前向传播
                if hasattr(self.model, 'critic_layers'):  # Actor-Critic
                    _, values = self.model(states)
                else:
                    values = self.model(states)
                
                # 计算损失
                loss = self.loss_fn(values.squeeze(), value_targets)
                total_loss += loss.item()
        
        return total_loss / len(data_loader)


class ActorCriticTrainer(ModelTrainer):
    """
    Actor-Critic模型训练器
    """
    def __init__(self, model: BaseModel, **kwargs):
        super().__init__(model, **kwargs)
        self.value_loss_weight = kwargs.get('value_loss_weight', 0.5)
        self.entropy_weight = kwargs.get('entropy_weight', 0.01)
        self.policy_loss_fn = nn.CrossEntropyLoss(reduction='none')
        self.value_loss_fn = nn.MSELoss()
    
    def _train_epoch(self, data_loader: DataLoader, log_interval: int) -> float:
        self.model.train()
        total_loss = 0.0
        
        for i, batch in enumerate(tqdm(data_loader)):
            states = batch['state'].to(self.device)
            actions = batch['action'].to(self.device)
            returns = batch['return'].to(self.device)
            advantages = batch['advantage'].to(self.device) if 'advantage' in batch else returns
            
            # 前向传播
            policies, values = self.model(states)
            
            # 计算策略损失 (使用优势函数或回报)
            policy_loss = (self.policy_loss_fn(torch.log(policies), actions) * advantages).mean()
            
            # 计算价值损失
            value_loss = self.value_loss_fn(values.squeeze(), returns)
            
            # 计算熵正则化
            entropy = -(policies * torch.log(policies + 1e-8)).sum(dim=1).mean()
            
            # 总损失
            loss = policy_loss + self.value_loss_weight * value_loss - self.entropy_weight * entropy
            
            # 反向传播和优化
            self.optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)
            self.optimizer.step()
            
            total_loss += loss.item()
            
            if (i + 1) % log_interval == 0:
                avg_loss = total_loss / (i + 1)
                tqdm.write(f"Batch {i+1}/{len(data_loader)}, Loss: {avg_loss:.4f}, "
                          f"Policy Loss: {policy_loss.item():.4f}, "
                          f"Value Loss: {value_loss.item():.4f}")
        
        return total_loss / len(data_loader)
    
    def _evaluate(self, data_loader: DataLoader) -> float:
        self.model.eval()
        total_loss = 0.0
        
        with torch.no_grad():
            for batch in data_loader:
                states = batch['state'].to(self.device)
                actions = batch['action'].to(self.device)
                returns = batch['return'].to(self.device)
                
                # 前向传播
                policies, values = self.model(states)
                
                # 计算策略损失
                policy_loss = (self.policy_loss_fn(torch.log(policies), actions) * returns).mean()
                
                # 计算价值损失
                value_loss = self.value_loss_fn(values.squeeze(), returns)
                
                # 计算熵
                entropy = -(policies * torch.log(policies + 1e-8)).sum(dim=1).mean()
                
                # 总损失
                loss = policy_loss + self.value_loss_weight * value_loss - self.entropy_weight * entropy
                total_loss += loss.item()
        
        return total_loss / len(data_loader)


def create_trainer(model_type: str, 
                   model: BaseModel,
                   **kwargs) -> ModelTrainer:
    """
    创建训练器的工厂函数
    
    Args:
        model_type: 模型类型 ('policy', 'value', 'actor_critic')
        model: 要训练的模型
        **kwargs: 训练器参数
    
    Returns:
        创建的训练器实例
    """
    if model_type in ['policy', 'dueling_dqn', 'attention']:
        return PolicyTrainer(model, **kwargs)
    elif model_type == 'value':
        return ValueTrainer(model, **kwargs)
    elif model_type == 'actor_critic':
        return ActorCriticTrainer(model, **kwargs)
    else:
        raise ValueError(f"不支持的模型类型: {model_type}")