import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

from matplotlib import pyplot as plt
import math
import random
import shutil
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
# from matplotlib import pyplot as plt
import torch.cuda.amp as amp

from dataset import RNADatasetV2, prepare_rna_batch
from model import ModelConfig, RNAModel
from utils import seeding
import heapq
import torch.nn.functional as F


# 在训练脚本中添加
class LabelSmoothingLoss(nn.Module):
    def __init__(self, classes=4, smoothing=0.2, ignore_index=-100):  # 增加平滑系数
        super(LabelSmoothingLoss, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.classes = classes
        self.ignore_index = ignore_index

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.classes - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))

class BalancedLengthBatchSampler(torch.utils.data.Sampler):
    def __init__(self, subset, fake_batch_size, shuffle=False):
        """
        基于样本长度均衡的批量采样器

        参数:
            batch_size: 批量大小
            drop_last: 是否丢弃最后不完整的批次 (默认False)
        """
        super().__init__()
        dataset = subset.dataset
        lengths = dataset.get_lengths_by_indices(subset.indices)
        self.total_steps = math.ceil(len(lengths) / fake_batch_size)
        self.batches = self.split_list_into_groups_with_indices(lengths, self.total_steps)
        self.batches.sort(key=lambda x: sum(dataset.get_lengths_by_indices(x)), reverse=True)
        self.shuffle = shuffle

    @staticmethod
    def split_list_into_groups_with_indices(nums, n):
        """
        将列表分成n组，使得每组的总和尽量相等
        返回分组结果（包含原始索引）

        参数:
        nums -- 输入的整数列表
        n -- 要分成的组数

        返回:
        分组后的索引列表
        """
        if n <= 0:
            return []
        if n == 1:
            return [list(range(len(nums)))]  # 返回所有索引作为一组

        # 创建(值, 索引)元组列表并按值降序排序
        indexed_nums = [(num, idx) for idx, num in enumerate(nums)]
        indexed_nums.sort(key=lambda x: x[0], reverse=True)  # 按序列长度排序

        groups = [[] for _ in range(n)]  # 初始化n个空组, 每个组就是一个batch
        current_sums = [0] * n  # 初始化每组当前总和
        heap = []  # 最小堆：(当前组和, 组索引)

        # 初始化堆，每组初始和为0
        for i in range(n):
            heapq.heappush(heap, (0, i))

        # 分配每个元素, 本质上就是按长度排序后，依次分到不同的batch中
        for num, idx in indexed_nums:
            while True:
                total, group_idx = heapq.heappop(heap)
                # 检查堆中的值是否仍然有效（未被更新）
                if total == current_sums[group_idx]:
                    break
                else:
                    raise Exception('理论上不会发生的错误')

            # 将当前元素的索引添加到该组
            groups[group_idx].append(idx)
            # 更新组的总和
            new_total = total + num
            current_sums[group_idx] = new_total
            # 将更新后的组推回堆中
            heapq.heappush(heap, (new_total, group_idx))

        return groups

    def __iter__(self):
        batches = self.batches
        if self.shuffle:
             random.shuffle(batches)
        yield from batches

    def __len__(self):
        return self.total_steps

def eval(train_data_path="./saisdata", ckpt=None):
    # 初始化设置
    if os.path.exists('./models'):
        shutil.rmtree('./models')
    os.makedirs('./models', exist_ok=True)
    os.makedirs('./plots', exist_ok=True)
    
    seeding(42)
    torch.backends.cudnn.benchmark = True
    torch.backends.cuda.matmul.allow_tf32 = True
    batch_size = 8
    criterion = LabelSmoothingLoss()
    # 数据加载
    dataset = RNADatasetV2(train_data_path)
    train_dataset, valid_dataset = random_split(
        dataset, 
        lengths=[int(0.9 * len(dataset)), len(dataset) - int(0.9 * len(dataset))])

    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        collate_fn=prepare_rna_batch,
        num_workers=4,  # 增加worker数量
        pin_memory=True,
        persistent_workers=True
    )
    
    valid_loader = DataLoader(
        valid_dataset,
        batch_sampler=BalancedLengthBatchSampler(valid_dataset, fake_batch_size=batch_size),
        collate_fn=prepare_rna_batch,
        num_workers=4,
        pin_memory=True,
        persistent_workers=True
    )

    # 模型和优化器
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model_config = ModelConfig()
    model = RNAModel(model_config).to(device)
    model = RNAModel(model_config).to(device)
    # 加载模型权重
    ckpt = '/workspace/sais_medicine/code/weights_4/best.pt'  # train: 0.6816 valid_seq: 0.6729 val_token: 0.8403
    # ckpt = '/workspace/sais_medicine/code/weights_4/last.pt' # train: 0.6895 valid: 0.6654
    state_dict = torch.load(ckpt, map_location=device)
    model.load_state_dict(state_dict)

    recovery_list = []
    recovery_list_seq = []
    model.eval()
    criterion = LabelSmoothingLoss()
    valid_loss = 0
    with torch.no_grad():
        for batch in tqdm(valid_loader):
            X, S, mask, lengths, names = batch
            X = X.to(device, non_blocking=True)
            S = S.to(device, non_blocking=True)
            mask = mask.to(device, non_blocking=True)

            logits, _ = model(X, mask)
            S = torch.masked_select(S, (mask==1))  # 选择有效的标签
            loss = criterion(logits, S)
            valid_loss += loss.item()
            
            preds = logits.argmax(dim=-1)
            recovery = torch.eq(preds, S).float().mean().item()
            recovery_list.append(recovery)
            
            recovery = torch.eq(preds, S).float().mean().item()
            recovery_list.append(recovery)

            start_idx = 0
            for length in lengths:
                end_idx = start_idx + length.item()
                sample = preds[start_idx: end_idx]
                gt_S = S[start_idx: end_idx]
                recovery = (sample==gt_S).sum() / len(sample)
                recovery_list_seq.append(recovery.cpu().numpy())
                start_idx = end_idx

    valid_loss /= len(valid_loader)
    valid_recovery = np.mean(recovery_list)
    valid_recovery_seq = np.mean(recovery_list_seq)

    # 记录日志
    print(f'Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f}, Recovery_seq={valid_recovery_seq:.4f}')


if __name__ == '__main__':
    eval(train_data_path="./saisdata", ckpt='./code/weights_4/last.pt')

# 63 epoches 0.8483
# 0.8488 with tf32
# best record: 0.8553
# Epoch 99: Train Loss=0.9252, Valid Loss=0.9086, Recovery=0.8486
