from importlib import import_module

import torch
from torch import nn

from utils import build_iterator, get_time_dif

PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号
label_mapping = {
    0: '赌博诈骗',
    1: '未成年不良信息',
    2: '色情低俗',
    3: '设计侵犯版权、名誉、肖像、隐私、商标权等',
    4: '不实信息',
    5: '血腥暴力',
    6: '封建迷信'
}


def preprocess_text(config, contents, pad_size=32):
    processed_batches = []
    for content in contents:
        token = config.tokenizer.tokenize(content)
        token = [CLS] + token[:pad_size-2]  # 截断文本以适应pad_size
        seq_len = len(token)
        mask = [1] * len(token) + [0] * (pad_size - len(token))
        token_ids = config.tokenizer.convert_tokens_to_ids(token)
        token_ids += [0] * (pad_size - len(token_ids))
        processed_batches.append((token_ids, seq_len, mask))
    contents_iterator = build_iterator(processed_batches, config, mode='infer', batchsize_for_infer=1)
    return contents_iterator


def classify_text(contents):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    dataset = 'TikTok'
    model_name = 'ERNIE'
    x = import_module(f'models.{model_name}')
    config = x.Config(dataset)
    model = x.Model(config).to(device)
    model.load_state_dict(torch.load(config.save_path, map_location=device))
    if torch.cuda.device_count() > 1:
        print("我要开始并行了!")
        model = nn.DataParallel(model)
    contents_iterator = preprocess_text(config, contents)
    predicted_labels = []
    model.eval()
    with torch.no_grad():
        for content in contents_iterator:
            output = model(content)
            # 如果使用DataParallel，需要提取主模型的输出
            if isinstance(output, torch.Tensor):
                output = output.cpu()
            elif isinstance(output, tuple):
                output = tuple(item.cpu() for item in output)
            predicted_label_ids = torch.argmax(output, dim=1).cpu().tolist()
            predicted_labels_batch = [label_mapping[predicted_label_id]
                                      for predicted_label_id in predicted_label_ids]
            predicted_labels.append(predicted_labels_batch)
    return [label for batch in predicted_labels for label in batch]


contents = [
    "几分钟看完日本血浆片《东京残酷警察》，重口味的血腥暴力圣宴！",
    "全程核能，都闪开，燕双鹰要开始秀了！ # 燕双鹰 # 开挂 # 爆笑解说 # 文西与阿漆 # 搞笑 # 四川方言",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
    "水果手机把俄罗斯开除地球了",
    "芬兰政府正式决定申请加入北约",
]
predicted_labels = classify_text(contents)
print(predicted_labels)
