import os
import math
import random
import shutil
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from matplotlib import pyplot as plt
import torch.cuda.amp as amp

from data_processing import RNATrainDataset, featurize
from model import RNAModel
from utils import seeding
import heapq
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True '

# 在训练脚本中添加
class LabelSmoothingLoss(nn.Module):
    def __init__(self, classes=4, smoothing=0.2, ignore_index=-100):  # 增加平滑系数
        super(LabelSmoothingLoss, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.classes = classes
        self.ignore_index = ignore_index

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.classes - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))

class BalancedLengthBatchSampler(torch.utils.data.Sampler):
    def __init__(self, subset, fake_batch_size, shuffle=False):
        """
        基于样本长度均衡的批量采样器

        参数:
            batch_size: 批量大小
            drop_last: 是否丢弃最后不完整的批次 (默认False)
        """
        super().__init__()
        dataset: RNATrainDataset = subset.dataset
        lengths = dataset.get_lengths_by_indices(subset.indices)
        self.total_steps = math.ceil(len(lengths) / fake_batch_size)
        self.batches = self.split_list_into_groups_with_indices(lengths, self.total_steps)
        self.batches.sort(key=lambda x: sum(dataset.get_lengths_by_indices(x)), reverse=True)
        self.shuffle = shuffle

    @staticmethod
    def split_list_into_groups_with_indices(nums, n):
        """
        将列表分成n组，使得每组的总和尽量相等
        返回分组结果（包含原始索引）

        参数:
        nums -- 输入的整数列表
        n -- 要分成的组数

        返回:
        分组后的索引列表
        """
        if n <= 0:
            return []
        if n == 1:
            return [list(range(len(nums)))]  # 返回所有索引作为一组

        # 创建(值, 索引)元组列表并按值降序排序
        indexed_nums = [(num, idx) for idx, num in enumerate(nums)]
        indexed_nums.sort(key=lambda x: x[0], reverse=True)

        groups = [[] for _ in range(n)]  # 初始化n个空组
        current_sums = [0] * n  # 初始化每组当前总和
        heap = []  # 最小堆：(当前组和, 组索引)

        # 初始化堆，每组初始和为0
        for i in range(n):
            heapq.heappush(heap, (0, i))

        # 分配每个元素
        for num, idx in indexed_nums:
            while True:
                total, group_idx = heapq.heappop(heap)
                # 检查堆中的值是否仍然有效（未被更新）
                if total == current_sums[group_idx]:
                    break

            # 将当前元素的索引添加到该组
            groups[group_idx].append(idx)
            # 更新组的总和
            new_total = total + num
            current_sums[group_idx] = new_total
            # 将更新后的组推回堆中
            heapq.heappush(heap, (new_total, group_idx))

        return groups

    def __iter__(self):
        batches = self.batches
        if self.shuffle:
             random.shuffle(batches)
        yield from batches

    def __len__(self):
        return self.total_steps

tag_list = list("AUCG")
idx2tag = {i: tag for i, tag in enumerate(tag_list)}

def train(train_data_path="./saisdata"):
    # 初始化设置
    # if os.path.exists('./models'):
    #     shutil.rmtree('./models')
    os.makedirs('./models', exist_ok=True)
    os.makedirs('./plots', exist_ok=True)
    
    seeding(42)
    torch.backends.cudnn.benchmark = True
    torch.backends.cuda.matmul.allow_tf32 = True
    batch_size = 16
    
    # 数据加载
    dataset = RNATrainDataset(train_data_path)
    train_dataset, valid_dataset = random_split(
        dataset, 
        lengths=[int(0.9 * len(dataset)), len(dataset) - int(0.9 * len(dataset))])
    
    train_loader = DataLoader(
        train_dataset,
        batch_sampler=BalancedLengthBatchSampler(train_dataset, fake_batch_size=batch_size),
        collate_fn=featurize,
        num_workers=4,  # 增加worker数量
        pin_memory=True,
        persistent_workers=True
    )
    
    valid_loader = DataLoader(
        valid_dataset,
        batch_sampler=BalancedLengthBatchSampler(valid_dataset, fake_batch_size=batch_size),
        collate_fn=featurize,
        num_workers=4,
        pin_memory=True,
        persistent_workers=True
    )

    # 模型和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = RNAModel().to(device)
    
    # 使用AdamW优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=5e-4,
        weight_decay=0.01  # 添加权重衰减
    )
    
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='max', factor=0.5, patience=3, verbose=True
    )
    
    criterion = LabelSmoothingLoss()
    scaler = amp.GradScaler()  # 混合精度训练

    # 训练循环
    best_valid_recovery = 0
    num_epoch_no_improve = 0
    max_patience = 10
    train_logs = []
    
    for epoch in range(1, 300):  # 最大100个epoch
        # 训练阶段
        model.train()
        epoch_loss = 0
        train_pbar = tqdm(train_loader, desc=f'Epoch {epoch} [Train]')
        
        for batch in train_pbar:
            X, mask, lengths, names, S = batch
            X = X.to(device, non_blocking=True)
            S = S.to(device, non_blocking=True)
            mask = mask.to(device, non_blocking=True)
            
            optimizer.zero_grad()
            
            # 混合精度训练
            with amp.autocast():
                logits = model(X, mask)
                loss = criterion(logits, S)
            
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            epoch_loss += loss.item()
            train_pbar.set_postfix({'loss': loss.item()})

        epoch_loss /= len(train_loader)
        
        # 验证阶段
        model.eval()
        valid_loss = 0
        recovery_list = []
        recovery_list_seq = []
        
        with torch.no_grad():
            valid_pbar = tqdm(valid_loader, desc=f'Epoch {epoch} [Valid]')
            for batch in valid_pbar:
                X, mask, lengths, names, S = batch
                X = X.to(device, non_blocking=True)
                S = S.to(device, non_blocking=True)
                mask = mask.to(device, non_blocking=True)
                
                logits = model(X, mask)
                loss = criterion(logits, S)
                valid_loss += loss.item()
                
                preds = logits.argmax(dim=-1)
                recovery = torch.eq(preds, S).float().mean().item()
                recovery_list.append(recovery)
                valid_pbar.set_postfix({'recovery': recovery})

                # 处理预测结果
                # split_indices = np.cumsum(lengths)[:-1]
                # batch_preds = np.split(preds, split_indices, axis=0)
                # batch_gt = np.split(S, split_indices, axis=0)

                batch_preds = torch.split(preds, lengths.tolist(), dim=0)
                batch_gt = torch.split(S, lengths.tolist(), dim=0)
                 
                # 转换为序列
                for batch_seq, gt_seq in zip(batch_preds, batch_gt):
                    recovery = (batch_seq==gt_seq).sum() / len(gt_seq)
                    recovery_list_seq.append(recovery.cpu().item())

        valid_recovery = np.mean(recovery_list)
        valid_recovery_seq = np.mean(recovery_list_seq)

        valid_loss /= len(valid_loader)
        
        # 记录日志
        train_logs.append((epoch_loss, valid_loss, valid_recovery, valid_recovery_seq))
        print(f'Epoch {epoch}: Train Loss={epoch_loss:.4f}, Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f},  Recovery Seq={valid_recovery_seq:.4f}')
        
        # 保存模型
        if epoch % 5 == 0:
            torch.save(model.state_dict(), f'./models/epoch_{epoch}.pt')
        torch.save(model.state_dict(), f'./models/last.pt')
        
        # 早停和学习率调整
        scheduler.step(valid_recovery)
        
        if valid_recovery > best_valid_recovery:
            best_valid_recovery = valid_recovery
            num_epoch_no_improve = 0
            torch.save(model.state_dict(), 'best.pt')
        else:
            num_epoch_no_improve += 1
            if num_epoch_no_improve >= max_patience:
                print(f'Early stopping at epoch {epoch}')
                break
        # 清空cache 显存不太够
        torch.cuda.empty_cache()

    # 绘制训练曲线
    train_loss, valid_loss, valid_recovery, _ = zip(*train_logs)
    epoch_index = np.arange(1, len(train_logs) + 1)
    
    plt.figure(figsize=(12, 5))
    plt.subplot(1, 2, 1)
    plt.plot(epoch_index, train_loss, label='Train Loss')
    plt.plot(epoch_index, valid_loss, label='Valid Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.legend()
    
    plt.subplot(1, 2, 2)
    plt.plot(epoch_index, valid_recovery, label='Recovery Rate')
    plt.xlabel('Epoch')
    plt.ylabel('Recovery Rate')
    plt.legend()
    
    plt.tight_layout()
    plt.savefig('./plots/training_curves.png')
    plt.close()

if __name__ == '__main__':
    train()
# 63 epoches 0.8483
# 0.8488 with tf32
# best record: 0.8553
# Epoch 184: Train Loss=0.9149, Valid Loss=0.9051, Recovery=0.8514,  Recovery Seq=0.6177
