import os
import sys
import csv
import torch
import argparse
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from typing import List
from torch.optim import Adam
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from Bio import SeqIO  # pip install biopython
from torch.utils.data import DataLoader, random_split


root_dir = os.path.dirname(os.path.abspath(__file__))
print(root_dir)
sys.path.append(root_dir)
from utils import seeding
from rna_model import ModelConfig, RNAModel
from dataset import RNADatasetV2, data_preprocess, prepare_rna_batch


parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", type=str, default=os.path.dirname(root_dir))
args = parser.parse_args()

seeding(42)

ckpt_path: str = f'{root_dir}/weights_4/best.pt'
data_root_dir = args.root_dir
# data_dir = f"{data_root_dir}/saistraindata"
data_dir = f"{data_root_dir}/saisdata"
# npy_dir = f'{data_dir}/coords'
# seq_dir =  f'{data_dir}/seqs'


device = 'cuda' if torch.cuda.is_available() else 'cpu'
output = f"{data_root_dir}/saisresult/submit_eval.csv"
os.makedirs(f"{data_root_dir}/saisresult", exist_ok=True)


################################################

# 在训练脚本中添加
class LabelSmoothingLoss(nn.Module):
    def __init__(self, classes=4, smoothing=0.2, ignore_index=-100):  # 增加平滑系数
        super(LabelSmoothingLoss, self).__init__()
        self.confidence = 1.0 - smoothing
        self.smoothing = smoothing
        self.classes = classes
        self.ignore_index = ignore_index

    def forward(self, pred, target):
        pred = pred.log_softmax(dim=-1)
        with torch.no_grad():
            true_dist = torch.zeros_like(pred)
            true_dist.fill_(self.smoothing / (self.classes - 1))
            true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
        return torch.mean(torch.sum(-true_dist * pred, dim=-1))


dataset = RNADatasetV2(data_path=data_dir)
train_dataset, valid_dataset = random_split(dataset, lengths=[int(0.9 * len(dataset)), len(dataset) -  int(0.9 * len(dataset))])
dataloader = DataLoader(dataset, batch_size=8, shuffle=False, num_workers=0, collate_fn=prepare_rna_batch)
valid_loader = DataLoader(valid_dataset, batch_size=8, shuffle=False, num_workers=0, collate_fn=prepare_rna_batch)


# 模型和优化器
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model_config = ModelConfig()
model = RNAModel(model_config).to(device)
# 加载模型权重
ckpt = '/workspace/sais_medicine/code/weights_4/best.pt'  # train: 0.6816 valid_seq: 0.6729 val_token: 0.8403
# ckpt = '/workspace/sais_medicine/code/weights_4/last.pt' # train: 0.6895 valid: 0.6654
state_dict = torch.load(ckpt, map_location=device)
model.load_state_dict(state_dict)


predictions = []
BASE_MAP = {'A':0, 'U':1, 'C':2, 'G':3}
BASE_MAP = {v: k for k, v in BASE_MAP.items()}
recovery_list = []
recovery_list_seq = []
model.eval()
criterion = LabelSmoothingLoss()
valid_loss = 0
with torch.no_grad():
    for batch in tqdm(dataloader):
        X, S, mask, lengths, names = batch
        X = X.to(device, non_blocking=True)
        S = S.to(device, non_blocking=True)
        mask = mask.to(device, non_blocking=True)

        logits, _ = model(X, mask)
        S = torch.masked_select(S, (mask==1))  # 选择有效的标签
        loss = criterion(logits, S)
        valid_loss += loss.item()
        
        preds = logits.argmax(dim=-1)
        recovery = torch.eq(preds, S).float().mean().item()
        recovery_list.append(recovery)
        
        recovery = torch.eq(preds, S).float().mean().item()
        recovery_list.append(recovery)

        start_idx = 0
        for i, length in enumerate(lengths):
            end_idx = start_idx + length.item()
            sample = preds[start_idx: end_idx]
            gt_S = S[start_idx: end_idx]
            recovery = (sample==gt_S).sum() / len(sample)
            recovery_list_seq.append(recovery.cpu().numpy())
            start_idx = end_idx
            
            predictions.append({
                "pdb_id": os.path.splitext(names[i])[0], 
                "seq": "".join([BASE_MAP[v] for v in sample.cpu().numpy()])
            })

valid_loss /= len(valid_loader)
valid_recovery = np.mean(recovery_list)
valid_recovery_seq = np.mean(recovery_list_seq)

# 保存结果
pd.DataFrame(predictions).to_csv(
    output,
    index=False,
    columns=["pdb_id", "seq"]
)
print(f"预测结果已保存到 {output}")

# 记录日志
print(f'Valid Loss={valid_loss:.4f}, Recovery={valid_recovery:.4f}, Recovery_seq={valid_recovery_seq:.4f}')