import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from typing import Optional, Union
import pandas as pd
import logging
from qtorch.core import ModelConfig,ReturnsDataset

# 配置日志记录
logger = logging.getLogger(__name__)

class ModelTrainer:
    scaler: Optional[torch.amp.GradScaler] = None
    optim_config: dict[str, Union[int, bool]] = {}  # 更正类型注解
    def __init__(self, model: nn.Module, config: Optional[ModelConfig] = None) -> None:
        """
        初始化模型训练器
        
        参数:
            model (nn.Module): 要训练的PyTorch模型
            config (ModelConfig): 训练配置参数，默认为默认配置
        """
        # 设备检测优化
        self.device = self._detect_device()
        self.model = model.to(self.device)
        
        # 配置管理
        self.config = config or ModelConfig()
        self.criterion = nn.MSELoss()
        # 差异化学习率配置
        param_groups = [
            {'params': model.lstm.parameters(), 'lr': self.config.learning_rate * 0.5},  # type: ignore
            {'params': model.transformer.parameters(), 'lr': self.config.learning_rate * 1.2},  # type: ignore
            {'params': model.time_fusion.parameters()},  # type: ignore
            {'params': model.output_head.parameters()}  # type: ignore
        ]
        self.optimizer = optim.Adam(
            param_groups,
            lr=self.config.learning_rate,
            weight_decay=self.config.weight_decay
        )
        # 根据设备类型配置混合精度组件
        self.autocast_device: Optional[str] = None  # 修改类型注解
        if self.device.type == 'cuda':
            self.scaler = torch.amp.GradScaler(enabled=True)
            self.autocast_device = 'cuda'
        elif self.device.type == 'mps':
            # MPS暂时不支持原生混合精度，仅启用自动类型转换
            self.scaler = None
            self.autocast_device = 'mps'
        else:
            self.scaler = None
        """
        初始化模型训练器
        
        参数:
            model (nn.Module): 要训练的PyTorch模型
            config (ModelConfig): 训练配置参数，默认为默认配置
        """
        # 设备检测优化
        self.device = self._detect_device()
        self.model = model.to(self.device)
        
        # 配置管理
        self.config = config or ModelConfig()
        self.criterion = nn.MSELoss()
        # 差异化学习率配置
        param_groups = [
            {'params': model.lstm.parameters(), 'lr': self.config.learning_rate * 0.5},
            {'params': model.transformer.parameters(), 'lr': self.config.learning_rate * 1.2},
            {'params': model.time_fusion.parameters()},
            {'params': model.output_head.parameters()}
        ]
        self.optimizer = optim.Adam(
            param_groups,
            lr=self.config.learning_rate,
            weight_decay=self.config.weight_decay
        )
        # 根据设备类型配置混合精度组件
        if self.device.type == 'cuda':
            self.scaler = torch.amp.GradScaler(enabled=True)
            self.autocast_device = 'cuda'
        elif self.device.type == 'mps':
            # MPS暂时不支持原生混合精度，仅启用自动类型转换
            self.scaler = None
            self.autocast_device = 'mps'
        else:
            self.scaler = None
            self.autocast_device = None

    def _detect_device(self) -> torch.device:
        """自动选择最优计算设备并配置平台优化参数"""
        device = torch.device("cpu")
        optim_config = {}
        
        if torch.cuda.is_available():
            # CUDA配置
            device = torch.device("cuda")
            torch.backends.cudnn.benchmark = True
            optim_config.update({
                "num_workers": 4,
                "pin_memory": True,
                "mixed_precision": True
            })
            logger.info(f"Using CUDA device {torch.cuda.get_device_name(device)}")
            logger.info(f"Available VRAM: {torch.cuda.get_device_properties(device).total_memory / 1024**3:.1f}GB")
            
        elif hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
            # MPS配置（Apple Silicon）
            device = torch.device("mps")
            torch.mps.set_per_process_memory_fraction(0.85)
            optim_config.update({
                "num_workers": 2,
                "pin_memory": False,
                "mixed_precision": True
            })
            logger.info("Using Apple MPS with automatic device optimization")
            
        else:
            # CPU配置
            if torch.backends.mkldnn.is_available():
                torch.backends.mkldnn.enabled = True
                optim_config.update({
                    "num_workers": 0,
                    "pin_memory": False,
                    "mixed_precision": False
                })
                logger.info("Using CPU with MKL-DNN acceleration")
            else:
                logger.warning("Using standard CPU without hardware acceleration")
                
        # 存储优化配置供后续使用
        self.optim_config = optim_config
        return device

    def train(self, data: pd.DataFrame, window_size: int = 30, epochs: int = 100, batch_size: int = 64):
        """
        训练模型
        
        参数:
            data (pd.DataFrame): 预处理后的数据（需包含 `returns` 列）
            window_size (int): 时间序列窗口长度
            epochs (int): 训练轮次
            batch_size (int): 批量大小
        """
        # 创建 Dataset 和 DataLoader
        dataset = ReturnsDataset(data, window_size=window_size)
        # 使用优化配置创建DataLoader
        dataloader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=True,
            num_workers=self.optim_config['num_workers'],
            pin_memory=bool(self.optim_config['pin_memory']),
            persistent_workers=self.optim_config['num_workers'] > 0
        )
        
        self.model.train()
        # 初始化混合精度组件
        scaler = torch.amp.GradScaler(
            enabled=bool(self.optim_config['mixed_precision'])
        )
        
        best_loss = float('inf')
        early_stop_counter = 0
        from tqdm import tqdm  # type: ignore
        
        epoch_iterator = tqdm(range(epochs), desc='Epochs', unit='epoch')
        for epoch in epoch_iterator:
            total_loss = 0.0
            self.model.train()
            
            batch_iterator = tqdm(dataloader, desc='Batches', leave=False, unit='batch')
            for X, y in batch_iterator:
                # 动态设备类型自动转换
                with torch.amp.autocast(
                    device_type=self.device.type,
                    enabled=bool(self.optim_config['mixed_precision'])
                ):
                    X = X.unsqueeze(-1).to(self.device, non_blocking=True)
                    y = y.to(self.device, non_blocking=True)
                    
                    self.optimizer.zero_grad(set_to_none=True)
                    outputs = self.model(X)
                    loss = self.criterion(outputs.squeeze(), y)
                
                # 梯度缩放与参数更新
                if scaler.is_enabled():
                    scaler.scale(loss).backward()
                    scaler.step(self.optimizer)
                    scaler.update()
                else:
                    loss.backward()
                    self.optimizer.step()
                
                total_loss += loss.item()
            
            # 每 10 个 epoch 打印损失
            if (epoch + 1) % 10 == 0:
                avg_loss = total_loss / len(dataloader)
                print(f"Epoch [{epoch+1}/{epochs}], Loss: {avg_loss:.6f}")
