import torch
from transformers import BertTokenizer, BertModel

pretrained = BertModel.from_pretrained(r'F:/bert-base-chinese')


class Model(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.fc = torch.nn.Linear(768, 2)  # 输出类别数量为2，根据你的数据集修改

    def forward(self, input_ids, attention_mask, token_type_ids):
        out = pretrained(input_ids=input_ids,
                         attention_mask=attention_mask,
                         token_type_ids=token_type_ids)

        out = out[0]
        out = self.fc(out[:, 0])
        out = out.softmax(dim=1)
        return out


class BertTextClassifier:
    device = 'cpu'
    device = torch.device(device)
    model = Model()
    model.load_state_dict(torch.load(r'./model/bert_cold_model/best_model.pth', map_location=device))
    model = model.to(device)
    tokenizer = BertTokenizer.from_pretrained(r'F:/bert-base-chinese')

    @staticmethod
    def predict(text):
        if len(text) > 500:
            text = text[:500]
        if BertTextClassifier.model is None or BertTextClassifier.tokenizer is None or BertTextClassifier.device is None:
            raise ValueError("Model, tokenizer, and device must be initialized before making predictions.")
        if text is None:
            raise ValueError("Please provide text for prediction.")
        inputs = BertTextClassifier.tokenizer(text, return_tensors="pt", padding=True, truncation=True)
        input_ids = inputs['input_ids'].to(BertTextClassifier.device)
        attention_mask = inputs['attention_mask'].to(BertTextClassifier.device)
        token_type_ids = inputs['token_type_ids'].to(BertTextClassifier.device)
        with torch.no_grad():
            out = BertTextClassifier.model(input_ids=input_ids, attention_mask=attention_mask,
                                           token_type_ids=token_type_ids)
        out = out.argmax(dim=1)
        return out.item()
