import datetime
import re

from transformers import BertTokenizer, BertForSequenceClassification
import torch


class ColdUtil:
    model_path = r'F:\roberta-base-cold'
    tokenizer = BertTokenizer.from_pretrained(model_path)
    model = BertForSequenceClassification.from_pretrained(model_path)
    model.eval()

    @staticmethod
    def classify_text(text):
        # text = re.sub(r'\[.*?\]', '', text)
        # 如果文本长度超过512，截取前512个字符
        if len(text) > 500:
            text = text[:500]
        # 模拟模型推理
        model_input = ColdUtil.tokenizer(text, return_tensors="pt", padding=True)
        model_output = ColdUtil.model(**model_input, return_dict=False)
        prediction = torch.argmax(model_output[0].cpu(), dim=-1)
        return prediction.item()

    @staticmethod
    def classify_text_batch(texts):
        # text = re.sub(r'\[.*?\]', '', text)
        # 如果文本长度超过512，截取前512个字符
        result = []
        for text in texts:
            if len(text) > 500:
                text = text[:500]
            # 模拟模型推理
            model_input = ColdUtil.tokenizer(text, return_tensors="pt", padding=True)
            model_output = ColdUtil.model(**model_input, return_dict=False)
            prediction = torch.argmax(model_output[0].cpu(), dim=-1).item()
            result.append(prediction)
        return result

