import torch
from transformers import BertTokenizer, BertForSequenceClassification
import pandas as pd
from tqdm import tqdm
import os

def load_data(path):
    df = pd.read_csv(path, sep='\t', header=None)
    return df[[0, 1]].values.tolist()

def predict(model_path, test_path, output_path):
    print(f"加载模型: {model_path}")
    print(f"读取测试集: {test_path}")
    print(f"预测结果输出到: {output_path}")

    tokenizer = BertTokenizer.from_pretrained('hfl/chinese-macbert-base')
    model = BertForSequenceClassification.from_pretrained('hfl/chinese-macbert-base', num_labels=2)

    # 加载已训练模型参数
    model.load_state_dict(torch.load(model_path, map_location='cuda' if torch.cuda.is_available() else 'cpu'))
    model.eval()
    model = model.cuda() if torch.cuda.is_available() else model

    pairs = load_data(test_path)
    results = []

    for q1, q2 in tqdm(pairs, desc="Predicting"):
        encoded = tokenizer(q1, q2, padding='max_length', truncation=True, max_length=64, return_tensors='pt')
        if torch.cuda.is_available():
            encoded = {k: v.cuda() for k, v in encoded.items()}

        with torch.no_grad():
            logits = model(**encoded)
        prob = torch.softmax(logits.logits, dim=1)[:, 1].item()
        results.append(prob)

    # 确保输出目录存在
    os.makedirs(os.path.dirname(output_path), exist_ok=True)

    # 写入预测结果
    with open(output_path, 'w', encoding='utf-8') as f:
        for p in results:
            f.write(f"{p:.6f}\n")

if __name__ == "__main__":
    # ✅ 直接在此设置路径（硬编码）
    model_path = '../../user_data/model_data/model.bin'
    test_path = '../../tcdata/oppo_breeno_round1_data/testB.tsv'
    output_path = '../../prediction_result/result.tsv'

    predict(model_path, test_path, output_path)
