from transformers import AutoTokenizer, AutoModelForSequenceClassification
import torch

def load_model_and_tokenizer(model_path="test_trainer"):
    """加载训练好的模型和分词器
    
    Args:
        model_path: 模型保存的路径
    Returns:
        model: 加载的模型
        tokenizer: 分词器
    """
    # 加载分词器
    tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
    
    # 加载微调后的模型
    model = AutoModelForSequenceClassification.from_pretrained(
        model_path,
        num_labels=5,  # Yelp评分为1-5星
        torch_dtype="auto"
    )
    return model, tokenizer

def predict(text, model, tokenizer):
    """使用模型进行预测
    
    Args:
        text: 输入的评论文本
        model: 加载的模型
        tokenizer: 分词器
    Returns:
        prediction: 预测的评分（1-5星）
        probabilities: 每个类别的概率
    """
    # 对输入文本进行编码
    inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt")
    
    # 进行预测
    with torch.no_grad():
        outputs = model(**inputs)
    
    # 获取预测结果
    probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
    prediction = torch.argmax(probabilities, dim=-1).item() + 1  # +1 因为评分是1-5星
    
    return prediction, probabilities[0].tolist()

def main():
    # 加载模型和分词器
    model, tokenizer = load_model_and_tokenizer()
    
    # 测试样例
    test_text = "The food was amazing and the service was excellent!"
    prediction, probabilities = predict(test_text, model, tokenizer)
    
    print(f"输入文本: {test_text}")
    print(f"预测评分: {prediction}星")
    print(f"各星级概率: {[f'{prob:.3f}' for prob in probabilities]}")

if __name__ == "__main__":
    main()