# sentiment_analyzer.py
import torch
from transformers import BertTokenizer
from torch import nn
from transformers import BertModel


# 1. 模型定义（需与训练时一致）
class BertBiLSTM(nn.Module):
    def __init__(self):
        super().__init__()
        self.bert = BertModel.from_pretrained(r"C:\Users\dell\Desktop\PythonProject\大作业\chinese\models--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f")
        self.lstm = nn.LSTM(
            input_size=768,
            hidden_size=256,
            num_layers=2,
            batch_first=True,
            bidirectional=True
        )
        self.dropout = nn.Dropout(0.3)
        self.classifier = nn.Linear(512, 2)  # 双向LSTM hidden_size*2

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        sequence_output = outputs[0]
        lstm_output, _ = self.lstm(sequence_output)
        lstm_output = self.dropout(lstm_output[:, -1, :])
        return self.classifier(lstm_output)


# 2. 初始化分析器
class SentimentAnalyzer:
    def __init__(self, model_path):
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.tokenizer = BertTokenizer.from_pretrained(r"C:\Users\dell\Desktop\PythonProject\大作业\chinese\models--bert-base-chinese\snapshots\c30a6ed22ab4564dc1e3b2ecbf6e766b0611a33f")
        self.model = BertBiLSTM().to(self.device)
        self.model.load_state_dict(torch.load(model_path, map_location=self.device))
        self.model.eval()

    def predict(self, text):
        """预测文本情感倾向
        返回:
            tuple (预测标签, 负面概率, 正面概率)
            标签: 0(负面) 或 1(正面)
        """
        encoding = self.tokenizer(
            text,
            max_length=128,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )
        with torch.no_grad():
            inputs = encoding["input_ids"].to(self.device)
            masks = encoding["attention_mask"].to(self.device)
            outputs = self.model(inputs, masks)
            probs = torch.softmax(outputs, dim=1).cpu().numpy()[0]

        return {
            "label": outputs.argmax().item(),
            "negative_prob": float(probs[0]),
            "positive_prob": float(probs[1]),
            "sentiment": "负面" if outputs.argmax() == 0 else "正面"
        }


# 3. 使用示例
if __name__ == "__main__":
    # 初始化分析器（参数为模型路径）
    analyzer = SentimentAnalyzer("best_model.pt")  # 替换为您的模型路径

    # 交互式预测
    print("情感分析系统已启动（输入'exit'退出）")
    while True:
        text = input("\n请输入要分析的文本: ")
        if text.lower() == "exit":
            break

        result = analyzer.predict(text)
        print(f"\n分析结果: {result['sentiment']}")
        print(f"- 负面概率: {result['negative_prob']:.2%}")
        print(f"- 正面概率: {result['positive_prob']:.2%}")
        print(f"- 置信度: {'高' if max(result['negative_prob'], result['positive_prob']) > 0.9 else '中'}")