"""
快速测试训练好的模型
"""
import torch
from transformers import BertTokenizer, BertForSequenceClassification

# 加载模型
print("加载模型...")
model_path = "./safety_classifier"
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertForSequenceClassification.from_pretrained(model_path)

# 检测设备
if torch.backends.mps.is_available():
    device = torch.device("mps")
    print("✅ 使用 MPS 加速推理")
else:
    device = torch.device("cpu")
    print("使用 CPU 推理")

model.to(device)
model.eval()

# 测试用例
test_cases = [
    "人工智能技术正在改变我们的生活方式",
    "这个产品太垃圾了，完全是骗人的傻逼东西",
    "今天天气很好，适合出去散步",
    "黑人都是罪犯，应该滚出中国",
    "机器学习是人工智能的一个重要分支",
    "台湾人都是骗子，非韓不投",
]

print("\n" + "="*70)
print("🧪 模型推理测试")
print("="*70)

for i, text in enumerate(test_cases, 1):
    # 编码
    inputs = tokenizer.encode_plus(
        text,
        add_special_tokens=True,
        max_length=128,
        padding='max_length',
        truncation=True,
        return_attention_mask=True,
        return_tensors='pt'
    )

    input_ids = inputs['input_ids'].to(device)
    attention_mask = inputs['attention_mask'].to(device)

    # 推理
    with torch.no_grad():
        outputs = model(input_ids=input_ids, attention_mask=attention_mask)

    # 获取预测
    logits = outputs.logits
    probabilities = torch.softmax(logits, dim=1)
    predicted_class = torch.argmax(probabilities, dim=1).item()
    confidence = probabilities[0][predicted_class].item()

    # 显示结果
    status = "✅ 安全" if predicted_class == 0 else "❌ 危险"
    print(f"\n{i}. {status} (置信度: {confidence:.2%})")
    print(f"   文本: {text}")

print("\n" + "="*70)
print("✅ 测试完成！")
