import argparse
import functools
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from model import RNAModel
from data_processing import RNAPredictDataset, featurize, idx2tag
from utils import seeding
def predict(predict_data_path, output_dir):
    # 初始化设置
    seeding(42)
    torch.backends.cudnn.benchmark = True
    
    # 数据加载
    dataset = RNAPredictDataset(predict_data_path)
    test_loader = DataLoader(
        dataset,
        batch_size=8,
        collate_fn=functools.partial(featurize, has_label=False),
        num_workers=4,
        pin_memory=True
    )

    # 模型加载
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = RNAModel().to(device)
    
    # 模型集成 - 加载多个检查点
    model_paths = ['./best.pt', './models/epoch_40.pt', './models/epoch_45.pt','./models/epoch_50.pt']  # 示例路径
    models = []
    for path in model_paths:
        if os.path.exists(path):
            m = RNAModel().to(device)
            m.load_state_dict(torch.load(path, map_location=device))
            m.eval()
            models.append(m)
    
    if not models:
        raise ValueError("No model checkpoints found!")
    
    # 预测
    names_list = []
    predictions = []
    
    with torch.no_grad():
        for batch in tqdm(test_loader, desc="Predicting"):
            X, mask, lengths, names = batch
            X = X.to(device, non_blocking=True)
            mask = mask.to(device, non_blocking=True)
            
            # 多模型集成
            logits = 0
            for model in models:
                with torch.cuda.amp.autocast():
                    logits += model(X, mask)
            logits /= len(models)
            
            # 处理预测结果
            preds = logits.argmax(dim=-1).cpu().numpy()
            split_indices = np.cumsum(lengths)[:-1]
            batch_preds = np.split(preds, split_indices, axis=0)
            
            # 转换为序列
            batch_seqs = ["".join([idx2tag[idx] for idx in p]) for p in batch_preds]
            
            names_list.extend(names)
            predictions.extend(batch_seqs)
    
    # 保存结果
    os.makedirs(output_dir, exist_ok=True)
    result_df = pd.DataFrame({"pdb_id": names_list, "seq": predictions})
    result_df.to_csv(os.path.join(output_dir, "submit.csv"), index=False)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--predict_data_path", type=str, default="./saisdata")
    parser.add_argument("--output_dir", type=str, default="./saisresult")
    args = parser.parse_args()
    predict(args.predict_data_path, args.output_dir)