#!/usr/bin/python
# -*- coding: UTF-8 -*-

import os
import sys
import time
import torch
import torch.nn as nn
import copy
from transformers import BertModel, BertTokenizer, BertConfig
PAD, CLS = '[PAD]', '[CLS]'  # padding符号, bert中综合信息符号

# 预先加载tokenizer
this_path = os.path.dirname(__file__)
tokenizer = BertTokenizer.from_pretrained(os.path.join(this_path, 'bert_pretrained'))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
pad_size = 64  # 每句话处理成token的长度(短填长切)，设置越大耗时越多


class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        bert_config = BertConfig.from_json_file(os.path.join(this_path, "bert_pretrained/config.json"))
        self.bert = BertModel(config=bert_config)
        for param in self.bert.parameters():
            param.requires_grad = True
        self.fc = nn.Linear(768, 2)  #num_classes = 2  # 类别数, hidden_size = 768

    def forward(self, x):
        context = x[0]  # 输入的句子
        mask = x[2]  # 对padding部分进行mask，和句子一个size，padding部分用0表示，如：[1, 1, 1, 1, 0, 0]
        outputs = self.bert(context, attention_mask=mask, return_dict=True)
        pooled = outputs.pooler_output
        out = self.fc(pooled)
        return out

# 推理类
class roBertaInferClass():
    def __init__(self, pad_size):
        self.pad_size = pad_size
        self.device = device
        self.softmax = nn.Softmax(dim=1)  # 注意是沿着那个维度计算
        self.tokenizer = tokenizer

    def getbatch(self, ques_list, label):
        contents=[]
        for content in ques_list:
            token = self.tokenizer.tokenize(content.strip())
            token = [CLS] + token
            seq_len = len(token)
            mask = []
            token_ids = self.tokenizer.convert_tokens_to_ids(token)

            if len(token) < self.pad_size:
                mask = [1] * len(token_ids) + [0] * (self.pad_size - len(token))
                token_ids += ([0] * (self.pad_size - len(token)))
            else:
                mask = [1] * self.pad_size
                # token_ids = token_ids[:self.pad_size]  #从头开始切
                token_ids = token_ids[-self.pad_size:]  # 从尾部开始倒切
                seq_len = self.pad_size
            contents.append((token_ids, int(label), seq_len, mask))
        return contents

    def _to_tensor(self, datas):
        x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
        y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
        # pad前的长度(超过pad_size的设为pad_size)
        seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        mask = torch.LongTensor([_[3] for _ in datas]).to(self.device)
        return (x, seq_len, mask), y

    def get_tensor(self, input_list):
        sentences = self.getbatch(input_list, 0)  # label默认为0
        input_tensor = self._to_tensor(sentences)
        return input_tensor


def load_roberta_model(model_path="", filter_model_list=[]):
    print("start to load the model weight")
    if not os.path.exists(model_path):
        return False, "安全审核模型路径不存在，请重新输入"
    model = Model().to(device)
    model_politic = any
    model_porn = any
    model_insult = any
    model_violence = any

    try:
        for name in filter_model_list:
            if name == "politic":
                model_path_politic = os.path.join(model_path, "roberta_wwm_politic.ckpt")
                model_politic = copy.deepcopy(model)
                model_politic.load_state_dict(torch.load(model_path_politic, map_location=device))
                model_politic.eval()
            elif name == "porn":
                model_path_porn = os.path.join(model_path, "roberta_wwm_porn.ckpt")
                model_porn = copy.deepcopy(model)
                model_porn.load_state_dict(torch.load(model_path_porn, map_location=device))
                model_porn.eval()
            elif name == "insult":
                model_path_insult = os.path.join(model_path, "roberta_wwm_insult.ckpt")
                model_insult = copy.deepcopy(model)
                model_insult.load_state_dict(torch.load(model_path_insult, map_location=device))
                model_insult.eval()
            else:
                model_path_violence = os.path.join(model_path, "roberta_wwm_violence.ckpt")
                # model_violence = copy.deepcopy(model)
                model_violence = model
                model_violence.load_state_dict(torch.load(model_path_violence, map_location=device))
                model_violence.eval()
    except Exception as e:
        print(e)
        return False, "安全审核模型加载失败，请检查模型是否存在"

    print("model load success")
    return True, [model_politic, model_porn, model_insult, model_violence]


def load_roberta_model_single(model_path="", filter_model_name=""):
    print("start to load the filter model : " + filter_model_name)

    model = Model().to(device)
    try:
        model_path_join = os.path.join(model_path, "roberta_wwm_"+filter_model_name+".ckpt")
        model.load_state_dict(torch.load(model_path_join, map_location=device))
        model.eval()
    except Exception as e:
        print(e)
        raise ValueError("安全审核模型加载失败，请检查模型是否存在")

    print("model load success")
    return model


# 计算输入文本得分，list进list出
def evaluate_score(input_list, model, threshold=0.8):
    inferclass = roBertaInferClass(pad_size)
    input_tensor = inferclass.get_tensor(input_list)
    outputs = model(input_tensor[0])
    logit = inferclass.softmax(outputs.data).tolist()

    evaluate_result = []
    score_result = []
    for i in range(len(logit)):
        score = logit[i][1]
        score_result.append(score)
        if score > threshold:
            evaluate_result.append(True)   #表示输入包含敏感信息
        else:
            evaluate_result.append(False)

    # print('model score: ', score_result)
    return evaluate_result, score_result


if __name__ == "__main__":
    t0 = time.time()
    flg, result = load_roberta_model(model_path='.', filter_model_list=["politic", "porn", "insult", "violence"])
    t1 = time.time()
    [model_politic, model_porn, model_insult, model_violence] = result
    print('load model time: ', t1-t0)
    while True:
        text = input("\n请输入检测语句：")
        # input_list = ["她感觉，每次假期出行，热门城市的游客都很多，有些景区光是排队进去就要花不少时间，一天下来，景点没怎么玩好，反而觉得很累。调查显示，选择偏冷门小众旅游地的原因，避开人流量高峰，体验更好排在首位。"]
        if text.strip().lower() == "exit":
            print("结束！")
            break
        input_list = [text]
        flag_politic = evaluate_score(input_list, model_politic, threshold=0.8)
        print("politic:", flag_politic)
        flag_porn = evaluate_score(input_list, model_porn, threshold=0.8)
        print("porn:", flag_porn)
        flag_insult = evaluate_score(input_list, model_insult, threshold=0.8)
        print("insult:", flag_insult)
        flag_violence = evaluate_score(input_list, model_violence, threshold=0.8)
        print("violence:", flag_violence)
        t2 = time.time()
        print('finish time: ', t2-t1)
