import sys
import os
import torch
import pandas as pd
from torch.nn.utils.rnn import pad_sequence

# ====== 路径配置 ======
current_path = os.path.abspath(__file__)
project_root = os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
sys.path.insert(0, os.path.join(project_root, "code", "train"))
sys.path.insert(0, os.path.join(project_root, "code"))
sys.path.insert(0, project_root)

# ====== 模型导入 ======
from config import Config
from train import SemanticMatchModel


def predict():
    # 设备配置
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # 初始化模型
    model = SemanticMatchModel().to(device)
    model_path = os.path.join(project_root, "user_data", "model_data", "model.pth")
    model.load_state_dict(torch.load(model_path, map_location=device))
    model.eval()

    # 加载测试数据
    test_path = os.path.join(project_root, "tcdata", "oppo_breeno_round1_data", "test.tsv")
    test_df = pd.read_csv(test_path, sep="\t", names=["query1", "query2"], dtype=str)

    # 数据预处理：过滤超出 VOCAB_SIZE 的 token
    def filter_and_convert(ids_str):
        ids = list(map(int, ids_str.split()))
        filtered_ids = [i for i in ids if i < Config.VOCAB_SIZE]
        return torch.tensor(filtered_ids, dtype=torch.long)

    test_df["query1_ids"] = test_df["query1"].apply(filter_and_convert)
    test_df["query2_ids"] = test_df["query2"].apply(filter_and_convert)

    # 使用 DataLoader 批处理
    class TestDataset(torch.utils.data.Dataset):
        def __init__(self, df):
            self.query1 = df["query1_ids"].tolist()
            self.query2 = df["query2_ids"].tolist()

        def __len__(self):
            return len(self.query1)

        def __getitem__(self, idx):
            return {"query1_ids": self.query1[idx], "query2_ids": self.query2[idx]}

        @staticmethod
        def collate_fn(batch):
            q1 = pad_sequence([x["query1_ids"] for x in batch], batch_first=True, padding_value=Config.PADDING_IDX)
            q2 = pad_sequence([x["query2_ids"] for x in batch], batch_first=True, padding_value=Config.PADDING_IDX)
            return {"query1_ids": q1, "query2_ids": q2}

    test_dataset = TestDataset(test_df)
    test_loader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=Config.BATCH_SIZE,
        collate_fn=TestDataset.collate_fn,
    )

    # 预测
    probs = []
    with torch.no_grad():
        for batch in test_loader:
            q1 = batch["query1_ids"].to(device)
            q2 = batch["query2_ids"].to(device)
            batch_probs = model(q1, q2).squeeze().tolist()
            if isinstance(batch_probs, float):  # 处理 batch_size=1 的情况
                batch_probs = [batch_probs]
            probs.extend(batch_probs)

    # 保存结果（仅概率值）
    output_dir = os.path.join(project_root, "prediction_result")
    os.makedirs(output_dir, exist_ok=True)
    output_path = os.path.join(output_dir, "result.tsv")

    with open(output_path, "w", encoding="utf-8") as f:
        for prob in probs:
            f.write(f"{prob}\n")

    print(f"预测结果已保存到: {output_path}")


if __name__ == "__main__":
    predict()