import torch
from openprompt.data_utils import InputExample
from openprompt.plms import load_plm
from openprompt.prompts import ManualTemplate, ManualVerbalizer
from openprompt import PromptDataLoader, PromptForClassification
import numpy as np


class BertTextClassifier:
    plm, tokenizer, model_config, WrapperClass = load_plm('bert', 'F:/bert-base-chinese')
    prompt_dict = {
        'main': {
            'template': '{"placeholder":"text_b"}是对{"placeholder":"text_a"}的讽刺吗？{"mask"}',
            'label_words': [['否'], ['是']],
        },
    }
    promptTemplate = ManualTemplate(
        text=prompt_dict['main']['template'],
        tokenizer=tokenizer,
    )

    promptVerbalizer = ManualVerbalizer(
        label_words=prompt_dict['main']['label_words'],
        tokenizer=tokenizer,
    )

    promptModel = PromptForClassification(
        template=promptTemplate,
        plm=plm,
        verbalizer=promptVerbalizer,
        freeze_plm=False,
    )
    device = ''
    if torch.cuda.is_available():
        device = torch.device("cuda")
    else:
        device = 'cpu'
    device = torch.device(device)
    model = promptModel
    model.to(device)
    model.load_state_dict(torch.load(r'C:\Users\xiaot\Desktop\ToSarcasm\save\5\best_model.std', map_location=device),
                          strict=False)
    model.eval()  # 切换到评估模式

    @staticmethod
    def predict(topic, text):
        # 构建输入样本
        example = InputExample(
            guid=0,
            text_a=topic,
            text_b=text,
            label=0  # 预测时label可以是任意值，这里设置为0
        )
        dataset = [example]

        # 创建数据加载器
        data_loader = PromptDataLoader(
            dataset=dataset,
            tokenizer=BertTextClassifier.tokenizer,
            template=BertTextClassifier.promptTemplate,
            tokenizer_wrapper_class=BertTextClassifier.WrapperClass,
            batch_size=1,
            max_seq_length=256,
            shuffle=False,
            verbose=False  # 设置verbose为False
        )

        # 将模型设置为评估模式并推断
        BertTextClassifier.model.eval()
        with torch.no_grad():
            for batch in data_loader:
                batch = batch.to(BertTextClassifier.device)
                logits = BertTextClassifier.model(batch)
                pred_label = torch.argmax(logits, dim=-1).item()  # 获取预测的标签
        return pred_label

    @staticmethod
    def batch_predict(dataset):
        # 创建数据加载器
        data_loader = PromptDataLoader(
            dataset=dataset,
            tokenizer=BertTextClassifier.tokenizer,
            template=BertTextClassifier.promptTemplate,
            tokenizer_wrapper_class=BertTextClassifier.WrapperClass,
            batch_size=1,
            max_seq_length=256,
            shuffle=False
        )
        y_pred = []

        # 将模型设置为评估模式并推断
        BertTextClassifier.model.eval()
        with torch.no_grad():
            for batch in data_loader:
                batch = batch.to(BertTextClassifier.device)
                logits = BertTextClassifier.model(batch)
                y_pred.append(torch.argmax(logits, dim=-1).cpu().numpy())
        y_pred = np.concatenate(y_pred, axis=0)

        return y_pred
