import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from .utils import (
    config, logger, EarlyStopping, TrainingLogger, 
    ModelCheckpoint, setup_device, count_parameters
)
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data import DataLoader
from torch.optim import Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
import wandb  # 用于实验跟踪
from contextlib import nullcontext

class LabelSmoothingCrossEntropy(nn.Module):
    def __init__(self, smoothing=0.1):
        super().__init__()
        self.smoothing = smoothing
        
    def forward(self, x, target):
        confidence = 1. - self.smoothing
        logprobs = F.log_softmax(x, dim=-1)
        target = target.long().view(-1)
        nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
        nll_loss = nll_loss.squeeze(1)
        smooth_loss = -logprobs.mean(dim=-1)
        loss = confidence * nll_loss + self.smoothing * smooth_loss
        return loss.mean()

def train_tmb_model(model, train_loader, val_loader, num_epochs=None, learning_rate=None):
    """
    训练TMB模型的主函数
    
    Args:
        model: TMB模型实例
        train_loader: 训练数据加载器
        val_loader: 验证数据加载器
        num_epochs: 训练轮数，如果为None则使用配置文件中的值
        learning_rate: 学习率，如果为None则使用配置文件中的值
    """
    # 使用配置文件中的值或传入的参数
    num_epochs = num_epochs or config['training_params']['num_epochs']
    learning_rate = learning_rate or config['training_params']['learning_rate']
    
    # 设置设备
    device = setup_device()
    model.to(device)
    
    # 初始化工具类
    early_stopping = EarlyStopping(
        patience=config['training_params']['patience'],
        min_delta=config['training_params']['min_delta']
    )
    training_logger = TrainingLogger()
    checkpoint = ModelCheckpoint()
    
    # 使用标签平滑的交叉熵损失
    criterion = LabelSmoothingCrossEntropy(smoothing=0.1)
    optimizer = torch.optim.Adam(
        model.parameters(),
        lr=learning_rate,
        weight_decay=config['training_params']['weight_decay']
    )
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='min', factor=0.1, patience=3
    )
    
    # 初始化混合精度训练
    scaler = torch.cuda.amp.GradScaler() if torch.cuda.is_available() else None
    
    # 打印模型信息
    logger.info(f"Model Parameters: {count_parameters(model):,}")
    
    # 确保数据加载器使用单进程
    train_loader.num_workers = 0
    val_loader.num_workers = 0
    
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0
        batch_idx = 0  # 初始化batch索引
        train_pbar = tqdm(train_loader, desc=f'Epoch {epoch+1}/{num_epochs} [Train]')
        
        for batch in train_pbar:
            optimizer.zero_grad()
            
            # 将数据移动到正确的设备上并确保维度正确
            features = batch['features'].to(device)
            expression = batch['expression'].to(device)
            labels = batch['label'].long().view(-1).to(device)  # 确保标签是长整型且维度正确
            
            # 添加标签验证，防止CUDA device-side assert错误
            num_classes = model.classifier[-1].out_features
            if batch_idx == 0:
                min_label = labels.min().item()
                max_label = labels.max().item()
                logger.info(f"标签范围: min={min_label}, max={max_label}, 模型类别数: {num_classes}")
                
                # 检查标签值是否在有效范围内
                if max_label >= num_classes:
                    logger.error(f"错误: 最大标签值 {max_label} 超出了模型类别数 {num_classes}，这会导致CUDA错误")
                    logger.error(f"请检查数据集中的标签或增加模型的num_classes参数")
                    # 调整标签使训练可以继续（仅用于调试）
                    labels = torch.clamp(labels, 0, num_classes - 1)
                    logger.warning(f"已将标签值限制在 0-{num_classes-1} 范围内以继续训练")
                
                if min_label < 0:
                    logger.error(f"错误: 标签值不能为负数，发现最小值为 {min_label}")
                    # 调整标签使训练可以继续（仅用于调试）
                    labels = torch.clamp(labels, 0, num_classes - 1)
                    logger.warning(f"已将标签值限制在 0-{num_classes-1} 范围内以继续训练")
            
            # 添加维度调试日志
            if batch_idx == 0:  # 仅在第一个批次时打印
                logger.info(f"训练批次形状: features={features.shape}, labels={labels.shape}")
                
            batch_idx = batch_idx + 1 if 'batch_idx' in locals() else 0
            
            # 使用自动混合精度训练
            with torch.amp.autocast(device_type='cuda' if torch.cuda.is_available() else 'cpu'):
                try:
                    outputs = model(features)
                    loss = criterion(outputs, labels)
                except RuntimeError as e:
                    logger.error(f"训练时出错: {str(e)}")
                    logger.error(f"features.shape={features.shape}")
                    # 重新抛出异常以终止训练
                    raise
            
            if scaler is not None:
                scaler.scale(loss).backward()
                scaler.step(optimizer)
                scaler.update()
            else:
                loss.backward()
                optimizer.step()
            
            train_loss += loss.item()
            train_pbar.set_postfix({'loss': loss.item()})
        
        avg_train_loss = train_loss / len(train_loader)
        
        # 验证阶段
        model.eval()
        val_loss = 0.0
        val_predictions = []
        val_labels = []
        val_batch_idx = 0  # 验证批次索引
        
        with torch.no_grad():
            val_pbar = tqdm(val_loader, desc=f'Epoch {epoch+1}/{num_epochs} [Val]')
            for batch in val_pbar:
                try:
                    features = batch["features"].to(device)
                    expressions = batch["expression"].to(device)
                    labels = batch["label"].long().view(-1).to(device)  # 确保标签是长整型且维度正确
                    
                    # 添加标签验证，防止验证阶段的CUDA device-side assert错误
                    num_classes = model.classifier[-1].out_features
                    if val_batch_idx == 0:
                        min_label = labels.min().item()
                        max_label = labels.max().item()
                        logger.info(f"验证标签范围: min={min_label}, max={max_label}, 模型类别数: {num_classes}")
                        
                        # 检查标签值是否在有效范围内
                        if max_label >= num_classes or min_label < 0:
                            logger.warning(f"验证集标签超出范围，将进行调整")
                            labels = torch.clamp(labels, 0, num_classes - 1)
                    
                    # 添加验证维度日志
                    if val_batch_idx == 0:
                        logger.info(f"验证批次形状: features={features.shape}, labels={labels.shape}")
                    
                    val_batch_idx += 1
                    
                    with torch.amp.autocast(device_type='cuda' if torch.cuda.is_available() else 'cpu'):
                        try:
                            outputs = model(features)
                            loss = criterion(outputs, labels)
                        except RuntimeError as e:
                            logger.error(f"验证时出错: {str(e)}")
                            logger.error(f"features.shape={features.shape}")
                            raise
                    
                    val_loss += loss.item()
                    
                    _, predicted = torch.max(outputs, 1)
                    val_predictions.extend(predicted.cpu().numpy())
                    val_labels.extend(labels.cpu().numpy())
                    
                    val_pbar.set_postfix({'loss': loss.item()})
                    
                except Exception as e:
                    logger.error(f"验证批次出错: {str(e)}")
                    continue
        
        avg_val_loss = val_loss / len(val_loader)
        
        # 计算评估指标
        metrics = {
            'accuracy': accuracy_score(val_labels, val_predictions),
            'precision': precision_score(val_labels, val_predictions, average='weighted'),
            'recall': recall_score(val_labels, val_predictions, average='weighted'),
            'f1': f1_score(val_labels, val_predictions, average='weighted')
        }
        
        # 更新学习率
        scheduler.step(avg_val_loss)
        
        # 记录训练信息
        training_logger.log_epoch(epoch, avg_train_loss, avg_val_loss, metrics)
        
        # 保存检查点
        checkpoint.save_checkpoint(model, epoch, avg_val_loss, optimizer)
        
        # 早停检查
        if early_stopping(avg_val_loss):
            logger.info("Early stopping triggered")
            break
        
        # 打印当前epoch的结果
        logger.info(f"\nEpoch {epoch+1}/{num_epochs}")
        logger.info(f"Train Loss: {avg_train_loss:.4f}, Val Loss: {avg_val_loss:.4f}")
        for metric_name, value in metrics.items():
            logger.info(f"{metric_name}: {value:.4f}")
    
    # 绘制训练过程图表
    training_logger.plot_losses()
    training_logger.plot_metrics()
    
    return model

def train_with_cross_validation(model_class, dataset, n_splits=5):
    """
    使用K折交叉验证训练模型
    
    Args:
        model_class: 模型类
        dataset: 完整数据集
        n_splits: 折数
    """
    cross_validator = CrossValidator(n_splits=n_splits)
    device = setup_device()
    
    for fold, (train_idx, val_idx) in enumerate(cross_validator.split(dataset)):
        logger.info(f"\nTraining Fold {fold+1}/{n_splits}")
        
        # 创建数据加载器
        train_sampler = torch.utils.data.SubsetRandomSampler(train_idx)
        val_sampler = torch.utils.data.SubsetRandomSampler(val_idx)
        
        train_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=config['data_params']['batch_size'],
            sampler=train_sampler
        )
        val_loader = torch.utils.data.DataLoader(
            dataset,
            batch_size=config['data_params']['batch_size'],
            sampler=val_sampler
        )
        
        # 创建新的模型实例
        model = model_class(**config['model_params']).to(device)
        
        # 训练模型
        model = train_tmb_model(model, train_loader, val_loader)
        
        # 在验证集上评估
        model.eval()
        val_predictions = []
        val_labels = []
        
        with torch.no_grad():
            for batch in val_loader:
                sequences = batch["sequence"].to(device)
                histone_marks = {k: v.to(device) for k, v in batch["histone_marks"].items()}
                expressions = batch["expression"].to(device)
                labels = batch["label"].to(device)

                outputs = model(sequences, histone_marks, expressions)
                _, predicted = torch.max(outputs, 1)
                val_predictions.extend(predicted.cpu().numpy())
                val_labels.extend(labels.cpu().numpy())
        
        # 计算并记录该折的结果
        metrics = {
            'accuracy': accuracy_score(val_labels, val_predictions),
            'precision': precision_score(val_labels, val_predictions, average='weighted'),
            'recall': recall_score(val_labels, val_predictions, average='weighted'),
            'f1': f1_score(val_labels, val_predictions, average='weighted')
        }
        
        cross_validator.log_fold_results(metrics)
    
    return cross_validator.results 

def train_model(model, train_loader, val_loader, num_epochs=100, 
                learning_rate=1e-4, device='cuda'):
    """训练模型"""
    # 初始化wandb
    wandb.init(project="chromatin-state", name="transformer-cnn-moe")
    
    # 优化器和学习率调度器
    optimizer = Adam(model.parameters(), lr=learning_rate)
    scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs)
    
    # 移动模型到GPU
    model = model.to(device)
    
    best_val_loss = float('inf')
    
    for epoch in range(num_epochs):
        # 训练阶段
        model.train()
        train_loss = 0
        train_correct = 0
        train_total = 0
        
        with tqdm(train_loader, desc=f'Epoch {epoch+1}/{num_epochs}') as pbar:
            for batch in pbar:
                # 获取数据
                sequence_features = batch['sequence_features'].to(device)
                histone_features = batch['histone_features'].to(device)
                labels = batch['labels'].to(device)
                
                # 前向传播
                outputs = model(sequence_features, histone_features)
                loss = F.cross_entropy(outputs, labels)
                
                # 反向传播
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                
                # 统计
                train_loss += loss.item()
                _, predicted = outputs.max(1)
                train_total += labels.size(0)
                train_correct += predicted.eq(labels).sum().item()
                
                # 更新进度条
                pbar.set_postfix({
                    'loss': f'{train_loss/train_total:.4f}',
                    'acc': f'{100.*train_correct/train_total:.2f}%'
                })
        
        # 验证阶段
        model.eval()
        val_loss = 0
        val_correct = 0
        val_total = 0
        
        with torch.no_grad():
            for batch in val_loader:
                sequence_features = batch['sequence_features'].to(device)
                histone_features = batch['histone_features'].to(device)
                labels = batch['labels'].to(device)
                
                outputs = model(sequence_features, histone_features)
                loss = F.cross_entropy(outputs, labels)
                
                val_loss += loss.item()
                _, predicted = outputs.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()
        
        # 计算指标
        epoch_train_loss = train_loss / len(train_loader)
        epoch_train_acc = 100. * train_correct / train_total
        epoch_val_loss = val_loss / len(val_loader)
        epoch_val_acc = 100. * val_correct / val_total
        
        # 记录到wandb
        wandb.log({
            'train_loss': epoch_train_loss,
            'train_acc': epoch_train_acc,
            'val_loss': epoch_val_loss,
            'val_acc': epoch_val_acc
        })
        
        # 保存最佳模型
        if epoch_val_loss < best_val_loss:
            best_val_loss = epoch_val_loss
            torch.save(model.state_dict(), 'best_model.pth')
        
        # 更新学习率
        scheduler.step()
    
    wandb.finish()
    return model 