import torch
from transformers import BertTokenizer, BertModel
import numpy as np
import torch.nn as nn


# 加载模型和Tokenizer
class SentimentPredictor:
    def __init__(self, model_path, bert_path, max_seq_len=30):
        # self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        # self.tokenizer = BertTokenizer.from_pretrained(bert_path)
        # self.model = BertForSequenceClassification.from_pretrained(bert_path, num_labels=8)
        # self.model.load_state_dict(torch.load(model_path, map_location=self.device))
        # self.model.to(self.device)
        # self.model.eval()
        # self.max_seq_len = max_seq_len
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.tokenizer = BertTokenizer.from_pretrained(bert_path)
        self.bert = BertModel.from_pretrained(bert_path)
        self.max_seq_len = max_seq_len
        self.LABEL_MAP = {0: "开心", 1: "伤心", 2: "厌恶", 3: "惊讶",
                          4: "生气", 5: "关心", 6: "疑问", 7: "平静"}

        # 添加自定义分类层
        self.fc = nn.Linear(self.bert.config.hidden_size, 8)

        # 加载模型权重
        state_dict = torch.load(model_path, map_location=self.device)
        # 如果保存的模型使用了fc命名，直接加载
        self.fc.load_state_dict({'weight': state_dict['fc.weight'],
                                 'bias': state_dict['fc.bias']})

        # 加载BERT权重（如果保存了完整模型）
        # 你可能需要根据实际保存方式调整这里

        self.bert.to(self.device)
        self.fc.to(self.device)
        self.bert.eval()
        self.fc.eval()

    def predict(self, text):
        encoding = self.tokenizer(
            text,
            max_length=self.max_seq_len,
            padding="max_length",
            truncation=True,
            return_tensors="pt"
        )
        input_ids = encoding["input_ids"].to(self.device)
        attention_mask = encoding["attention_mask"].to(self.device)

        with torch.no_grad():
            # outputs = self.model(input_ids=input_ids, attention_mask=attention_mask)
            # pred_id = torch.argmax(outputs.logits, dim=1).item()
            outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
            # 使用最后一层的池化输出
            pooled_output = outputs.pooler_output
            logits = self.fc(pooled_output)
            pred_id = torch.argmax(logits, dim=1).item()
        return self.LABEL_MAP[pred_id]


# 初始化预测器（替换为你的模型路径）
predictor = SentimentPredictor(
    model_path=r"params\1bert.pth",
    bert_path=r"D:\AI\model\bert-base-chinese"
)

# 测试调用
if __name__ == "__main__":
    while True:
        text = input("请输入文本（输入q退出）：")
        if text == "q":
            break
        print("预测结果：", predictor.predict(text))