import os
import sys
import tree
import torch
import os
import math
import random
import shutil
import numpy as np
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, random_split
from tqdm import tqdm
from matplotlib import pyplot as plt
import torch.amp as amp
import heapq

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'


root_dir = os.path.dirname(os.path.abspath(__file__))
print(root_dir)
sys.path.append(root_dir)
from GVPmodel import GVPTransCond, geo_batch
from inference_ribodiffusion import get_config
from RNADataset import RNADatasetV3, collate_fn

# 在训练脚本中添加
class LabelSmoothingLoss(nn.Module):
    def __init__(self, classes=4, smoothing=0.2, ignore_index=-100):  # 增加平滑系数
        super(LabelSmoothingLoss, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.classes = classes
        self.ignore_index = ignore_index

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.classes - 1))
            indices = target.unsqueeze(-1)
            src = torch.ones_like(indices, dtype=true_dist.dtype) * self.confidence
            true_dist.scatter_(dim=-1, index=indices, src=src)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))

def train(train_data_path="./saisdata"):
    os.makedirs('./models', exist_ok=True)
    os.makedirs('./plots', exist_ok=True)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    seed = 42
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    torch.backends.cuda.matmul.allow_tf32 = True
    config = get_config()
    model = GVPTransCond(config).to(device)
    # print(model)
    dataset = RNADatasetV3(
        data_path="/data/slz/sais_medicine/saisdata",
        is_train=True
    )
    train_dataset, valid_dataset = random_split(
        dataset, 
        lengths=[int(0.9 * len(dataset)), len(dataset) - int(0.9 * len(dataset))])
    train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn, num_workers=8)
    valid_loader = DataLoader(valid_dataset, batch_size=1, shuffle=True, collate_fn=collate_fn, num_workers=8)
    # 使用AdamW优化器
    optimizer = optim.AdamW(
        model.parameters(),
        lr=5e-4,
        weight_decay=0.01  # 添加权重衰减
    )
    # 学习率调度器
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, mode='max', factor=0.5, patience=3
    )
    
    criterion = LabelSmoothingLoss()
    scaler = amp.GradScaler('cuda')  # 混合精度训练

    # 训练循环
    best_valid_recovery = 0
    num_epoch_no_improve = 0
    max_patience = 5
    train_logs = []
    
    for epoch in range(1, 300):  # 最大100个epoch
        # 训练阶段
        model.train()
        epoch_loss = 0
        train_pbar = tqdm(train_loader, desc=f'Epoch {epoch} [Train]')
        
        for batch in train_pbar:
            for key, value in batch.items():
                if isinstance(value, list):
                    batch[key] = [v.to(device) for v in value]
                else:
                    batch[key] = value.to(device)
            gt = batch['seq'][0]
            gt = gt.unsqueeze(0)
           
            
            optimizer.zero_grad()
            # 混合精度训练
            with amp.autocast('cuda'):
                logits = model(batch)
                # print(logits.shape, gt.shape)
                loss = criterion(logits, gt)
            
            scaler.scale(loss).backward()
            scaler.step(optimizer)
            scaler.update()
            
            epoch_loss += loss.item()
            train_pbar.set_postfix({'loss': loss.item()})

        epoch_loss /= len(train_loader)
        
        # 验证阶段
        model.eval()
        valid_loss = 0
        recovery_list = []
        recovery_list_seq = []
        with torch.no_grad():
            valid_pbar = tqdm(valid_loader, desc=f'Epoch {epoch} [Valid]')
            for batch in valid_pbar:
                for key, value in batch.items():
                    if isinstance(value, list):
                        batch[key] = [v.to(device) for v in value]
                    else:
                        batch[key] = value.to(device)
                gt = batch['seq'][0]
                gt = gt.unsqueeze(0)
                logits = model(batch)
                loss = criterion(logits, gt)
                valid_loss += loss.item()
                
                preds = logits.argmax(dim=-1)
                recovery = torch.eq(preds, gt).float().mean().item()
                recovery_list.append(recovery)
                valid_pbar.set_postfix({'recovery': recovery})

                # 处理预测结果
                # batch_preds = torch.split(preds, lengths.tolist(), dim=0)
                # batch_gt = torch.split(S, lengths.tolist(), dim=0)
                 
                # # 转换为序列
                # for batch_seq, gt_seq in zip(batch_preds, batch_gt):
                #     recovery = (batch_seq==gt_seq).sum() / len(gt_seq)
                #     recovery_list_seq.append(recovery.cpu().item())

        valid_recovery = np.mean(recovery_list)
        # valid_recovery_seq = np.mean(recovery_list_seq)
        valid_recovery_seq = 0

        valid_loss /= len(valid_loader)
        
        # 记录日志
        train_logs.append((epoch_loss, valid_loss, valid_recovery, valid_recovery_seq))
        print(f'Epoch {epoch}: Train Loss={epoch_loss:.4f}, Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f},  Recovery Seq={valid_recovery_seq:.4f}')
        
        # 保存模型
        if epoch % 5 == 0:
            torch.save(model.state_dict(), f'./models/epoch_{epoch}.pt')
        torch.save(model.state_dict(), f'./models/last.pt')
        
        # 早停和学习率调整
        scheduler.step(valid_recovery)
        
        if valid_recovery > best_valid_recovery:
            best_valid_recovery = valid_recovery
            num_epoch_no_improve = 0
            torch.save(model.state_dict(), 'best.pt')
        else:
            num_epoch_no_improve += 1
            if num_epoch_no_improve >= max_patience:
                print(f'Early stopping at epoch {epoch}')
                break
        # 清空cache 显存不太够
        torch.cuda.empty_cache()

if __name__ == "__main__":
    train()

    
    
    