import torch
from transformers import BertTokenizer
from model import BertSpamClassifier
from cot_utils import generate_cot

PRETRAINED_MODEL = 'bert-base-uncased'
MAX_LEN = 128
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 加载模型和分词器
tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL)
model = BertSpamClassifier(PRETRAINED_MODEL)
model.load_state_dict(torch.load('spam_bert_cot.pth', map_location=DEVICE))
model.to(DEVICE)
model.eval()

# 预测函数
def predict_spam(text):
    cot = generate_cot(text)
    input_text = text + ' [SEP] ' + cot
    encoding = tokenizer(
        input_text,
        truncation=True,
        padding='max_length',
        max_length=MAX_LEN,
        return_tensors='pt'
    )
    input_ids = encoding['input_ids'].to(DEVICE)
    attention_mask = encoding['attention_mask'].to(DEVICE)
    with torch.no_grad():
        logits = model(input_ids, attention_mask)
        pred = torch.argmax(logits, dim=1).item()
    label = 'spam' if pred == 1 else 'ham'
    return label, cot

if __name__ == "__main__":
    # 示例
    text = input("请输入短信内容：")
    label, cot = predict_spam(text)
    print(f"预测结果：{label}")
    print(f"思维链推理：{cot}") 