"""
文本主题分类: ClueAI/PromptCLUE，只开放了PromptCLUE-base版本，当前最新PromptCLUE-base-v1.5
"""
import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

from threading import RLock
from transformers import T5Tokenizer, T5ForConditionalGeneration
import torch


class TextClassifier(object):

    lock = RLock()
    def __new__(cls, *args, **kwargs):
        with TextClassifier.lock:
            if not hasattr(TextClassifier, "_instance"):
                TextClassifier._instance = object.__new__(cls)
                repo_id = "/root/autodl-tmp/PromptCLUE-base-v1-5"
                cls.tokenizer = T5Tokenizer.from_pretrained(repo_id)
                model = T5ForConditionalGeneration.from_pretrained(repo_id)
                device = torch.device('cuda')
                model.to(device)
                cls.device = device
                cls.model = model
        return TextClassifier._instance

    def __init__(self):
        self.class_names = ['故事', '文化', '娱乐', '体育', '财经', '房产', '汽车', '教育', '科技', '军事', '旅游', '国际', '股票', '农业', '游戏', '公司制度', '合同条款', '法律']

    def news_prompt(self, text: str) -> str:
        prompt = f'''分类任务：
        {text}
        选项：{','.join(self.class_names)}
        答案：'''
        return prompt

    def predict(self, text, sample=False, top_p=0.8):
        """
        sample：是否抽样。生成任务，可以设置为True;
        top_p：0-1之间，生成的内容越多样
        """
        text = text.replace("\n", "_")
        prompt = self.news_prompt(text)
        encoding = self.tokenizer(text=[prompt], truncation=True, padding=True, max_length=768, return_tensors="pt").to(self.device)
        if not sample:
            out = self.model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_length=128,
                                 num_beams=4, length_penalty=0.6)
        else:
            out = self.model.generate(**encoding, return_dict_in_generate=True, output_scores=False, max_length=64,
                                 do_sample=True, top_p=top_p)
        out_text = self.tokenizer.batch_decode(out["sequences"], skip_special_tokens=True)
        return out_text[0].replace("_", "\n")


if __name__ == '__main__':
    classifier = TextClassifier()
    print(classifier.predict("如果日本沉没，中国会接收日本难民吗？"))


