import os
import sys
import csv
import torch
import argparse
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from typing import List
from torch.optim import Adam
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader, random_split
from Bio import SeqIO  # pip install biopython

root_dir = os.path.dirname(os.path.abspath(__file__))
print(root_dir)
sys.path.append(root_dir)
from rna_model import ModelConfig, RNAModel
from dataset import RNADatasetV2, data_preprocess, prepare_rna_batch
from utils import seeding

parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", type=str, default=os.path.dirname(root_dir))
args = parser.parse_args()

seeding(42)

ckpt_path: str = f'{root_dir}/weights_4/best.pt'
data_root_dir = args.root_dir
# data_dir = f"{data_root_dir}/saistraindata"
data_dir = f"{data_root_dir}/saisdata"
npy_dir = f'{data_dir}/coords'
# data_save_path = f'{data_root_dir}/outputs/test_data.csv'
# data_preprocess(data_save_path, f'{npy_dir}/seqs')

device = 'cuda' if torch.cuda.is_available() else 'cpu'
output = f"{data_root_dir}/saisresult/submit_bak.csv"
gt_output = f"{data_root_dir}/saisresult/output_gt.csv"
os.makedirs(f"{data_root_dir}/saisresult", exist_ok=True)


################################################
dataset = RNADatasetV2(data_path=data_dir)
# train_dataset, valid_dataset = random_split(dataset, lengths=[int(0.9 * len(dataset)), len(dataset) -  int(0.9 * len(dataset))])
dataloader = DataLoader(dataset, batch_size=8, shuffle=True, num_workers=0, collate_fn=prepare_rna_batch)


model_config = ModelConfig()
model = RNAModel(model_config).to(device)
# print(model)
# 加载模型权重
state_dict = torch.load(ckpt_path, map_location=device, weights_only=True)
model.load_state_dict(state_dict)
model.eval()

recovery_list = []
recovery_list_seq = []
# 处理所有输入文件
predictions = []
data_gt = []
valid_loss = 0
BASE_MAP = {'A':0, 'U':1, 'C':2, 'G':3}
BASE_MAP = {v: k for k, v in BASE_MAP.items()}
with torch.no_grad():
    result_list = []
    for batch in tqdm(dataloader):
        X, S, mask, lengths, names = batch
        X = X.to(device)
        S = S.to(device)
        mask = mask.to(device)
        S = torch.masked_select(S, (mask==1))  # 选择有效的标签
        logits, _ = model(X, mask)
        probs = F.softmax(logits, dim=-1)
        samples = probs.argmax(dim=-1)
        recovery = torch.eq(samples, S).float().mean().item()
        recovery_list.append(recovery)
        start_idx = 0
        for i, length in enumerate(lengths):
            end_idx = start_idx + length.item()
            sample = samples[start_idx: end_idx]
            gt_S = S[start_idx: end_idx]
            recovery = (sample==gt_S).sum() / len(sample)
            recovery_list_seq.append(recovery.cpu().numpy())
            predictions.append({
                "pdb_id": os.path.splitext(names[i])[0], 
                "seq": "".join([BASE_MAP[v] for v in sample.cpu().numpy()])
            })
            data_gt.append({
                "pdb_id": os.path.splitext(names[i])[0], 
                "seq": "".join([BASE_MAP[v] for v in gt_S.cpu().numpy()])
            })
 
    # 保存结果
    pd.DataFrame(predictions).to_csv(
        output,
        index=False,
        columns=["pdb_id", "seq"]
    )
    pd.DataFrame(data_gt).to_csv(
        gt_output,
        index=False,
        columns=["pdb_id", "seq"]
    )
    print(f"预测结果已保存到 {output}")
    
    # valid_loss /= len(valid_loader)
    valid_recovery = np.mean(recovery_list)
    valid_recovery_seq = np.mean(recovery_list_seq)
    # 记录日志
    print(f'Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f}, Recovery_seq={valid_recovery_seq:.4f}')

def print_csv(file_path):
    with open(file_path, 'r') as csv_file:
        csv_reader = csv.reader(csv_file)
        for row in csv_reader:
            print(row)

# print_csv(output)
