# encoding: utf-8

from typing import List, Dict, Any

import torch
import numpy as np
from torch.nn.functional import softmax
from transformers import BertTokenizer, BertConfig, BertModel
from torch.utils import data


def get_confidence(outputs):
    '''
    input: a result of model predict output
    eg: outputs = some_model(input)
    '''
    softmax_outputs = softmax(outputs, dim=-1)
    torch_max_confidence = torch.max(softmax_outputs, dim=-1).values.numpy()

    return torch_max_confidence


pretrained = "D:/codes/nlp_about/pretrained_model/hfl_chinese-roberta-wwm-ext"
# pretrained = "hfl/chinese-roberta-wwm-ext"
model_path = "./staplesc4c6/staplesc4c6_model_2_0.8511.pt"
label1_path = "./staplesc4c6/staplesc4c6_c1.txt"
label2_path = "./staplesc4c6/staplesc4c6_c4.txt"
c1_labels = []
c2_labels = []


def load_labels(label1_path: str, label2_path: str):
    with open(label1_path, "r") as f:
        lines1 = f.readlines()
    for line1 in lines1:
        if line1 and line1.strip():
            c1_labels.append(line1.strip())
    with open(label2_path, "r") as f:
        lines2 = f.readlines()
    for line2 in lines2:
        if line2 and line2.strip():
            c2_labels.append(line2.strip())


load_labels(label1_path, label2_path)

bert_model = BertModel.from_pretrained(pretrained)
bert_config = BertConfig.from_pretrained(pretrained)


# dataloader
class DataGen(data.Dataset):
    def __init__(self, data, label1, label2):
        self.data = data
        self.label1 = label1
        self.label2 = label2

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return np.array(self.data[index]), np.array(self.label1[index]), np.array(self.label2[index])


class Model(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num1_class: int, num2_class: int):
        super(Model, self).__init__()
        num2_c1 = num2_class * 4
        num2_c2 = int(num2_c1 / 2)
        num2_c3 = int(num2_c2 / 2)
        assert num2_class > 1, f"num2_class must gt 1, but got {num2_class}"
        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.n1fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.n1fc2 = torch.nn.Linear(bert_config.hidden_size, num1_class)
        self.n2fc2 = torch.nn.Linear(bert_config.hidden_size, num2_c1)
        self.n2fc3 = torch.nn.Linear(num2_c1, num2_c2)
        self.n2fc4 = torch.nn.Linear(num2_c2, num2_c3)
        self.n2fc5 = torch.nn.Linear(num2_c3, num2_class)
        self.relu = torch.nn.ReLU()
        self.selu = torch.nn.SELU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.n1fc1(bert_out)
        bert_out1 = self.relu(bert_out)
        bert_out1 = self.dropout(bert_out1)
        bert_out1 = self.n1fc2(bert_out1)
        bert_out2 = self.n2fc2(bert_out)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.dropout(bert_out2)
        bert_out2 = self.n2fc3(bert_out2)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.dropout(bert_out2)
        bert_out2 = self.n2fc4(bert_out2)
        bert_out2 = self.selu(bert_out2)
        bert_out2 = self.n2fc5(bert_out2)
        return bert_out1, bert_out2


class ModelSmall(torch.nn.Module):
    def __init__(self, bert_model, bert_config, num1_class: int, num2_class: int = 1000):
        super(ModelSmall, self).__init__()

        self.bert_model = bert_model
        self.dropout = torch.nn.Dropout(0.4)
        self.n1fc1 = torch.nn.Linear(bert_config.hidden_size, bert_config.hidden_size)
        self.n1fc2 = torch.nn.Linear(bert_config.hidden_size, num1_class)
        self.n2fc2 = torch.nn.Linear(bert_config.hidden_size, num2_class)

        self.relu = torch.nn.ReLU()
        self.selu = torch.nn.SELU()

    def forward(self, token_ids):
        bert_out = self.bert_model(token_ids)[1]
        bert_out = self.dropout(bert_out)
        bert_out = self.n1fc1(bert_out)
        bert_out1 = self.relu(bert_out)
        bert_out1 = self.dropout(bert_out1)
        bert_out1 = self.n1fc2(bert_out1)

        bert_out2 = self.n2fc2(bert_out)
        return bert_out1, bert_out2


class BrandPredictor(object):

    def __init__(self, model_path: str = None, c1_labels: List[str] = [], c2_labels: List[str] = []):
        self.model_path = model_path
        self.c1_labels = c1_labels
        self.c2_labels = c2_labels

        # 加载预训练模型
        # self.model = Model(bert_model=bert_model, bert_config=bert_config, num1_class=len(self.c1_labels),
        #                    num2_class=len(self.c2_labels))
        self.model = ModelSmall(bert_model=bert_model, bert_config=bert_config, num1_class=len(self.c1_labels),
                                num2_class=len(self.c2_labels))
        self.model.load_state_dict(torch.load(self.model_path, map_location=lambda storage, loc: storage))
        self.device = torch.device("cuda:0") if torch.cuda.is_available() else "cpu"
        self.model.to(self.device)
        self.model.eval()
        print(self.device)

        self.tokenizer = BertTokenizer.from_pretrained(pretrained)

    @staticmethod
    def get_label(c1, c2):
        if isinstance(c1, str):
            if c1 not in c1_labels:
                c1_labels.append(c1)
            if c2 not in c2_labels:
                c2_labels.append(c2)
            return c1_labels.index(c1), c2_labels.index(c2)
        else:
            return c1_labels[c1], c2_labels[c2]

    def get_train_test_data(self, string, max_length=64):
        data = []
        label1 = []
        label2 = []
        if isinstance(string, str):
            string = "".join(string.split())
            ids = self.tokenizer.encode(string.strip(), max_length=max_length, truncation="longest_first",
                                        padding="max_length")
            data.append(ids)
            label1.append(1)
            label2.append(1)
        elif isinstance(string, list):
            for one_string in string:
                one_string = "".join(one_string.split())
                ids = self.tokenizer.encode(one_string.strip(), max_length=max_length, truncation="longest_first",
                                            padding="max_length")
                data.append(ids)
                label1.append(1)
                label2.append(1)
        return data, label1, label2

    def predict(self, text: str = "", threshold: float = 0.0) -> List[Dict[str, Any]]:

        item = dict()
        item['text'] = text
        x_train, c1_label, c2_label = self.get_train_test_data(text)
        train_data_set = DataGen(x_train, c1_label, c2_label)
        train_data_loader = data.DataLoader(train_data_set, batch_size=8)
        with torch.no_grad():
            for step, (token_ids, label1, label2) in enumerate(train_data_loader):
                token_ids = token_ids.to(self.device).long()
                outputs = self.model(token_ids)
                outputs1 = outputs[0].cpu()
                outputs2 = outputs[1].cpu()
                for t1 in outputs1:
                    confidence = get_confidence(t1)
                    t1 = np.argmax(t1)
                    c1, c2 = self.get_label(int(t1), 1)
                    confidence = float(confidence)
                    print(f"{text}-->c1:{c1}-->confidence:{confidence}")
                    if confidence > threshold:
                        item["label1"] = c1
                    else:
                        item["label1"] = None
                    item["label1_confidence"] = round(confidence, 4)
                for t2 in outputs2:
                    confidence = get_confidence(t2)
                    t2 = np.argmax(t2)
                    c1, c2 = self.get_label(1, int(t2))
                    confidence = float(confidence)
                    print(f"{text}-->c2:{c2}-->confidence:{confidence}")
                    if confidence > threshold:
                        item["label2"] = c2
                    else:
                        item["label2"] = None
                    item["label2_confidence"] = round(confidence, 4)
        return [item]

    def predict_batch(self, strings: List[str], threshold: float = 0.0) -> List[Dict[str, Any]]:
        x_train, c1_label, c2_label = self.get_train_test_data(strings)
        train_data_set = DataGen(x_train, c1_label, c2_label)
        train_data_loader = data.DataLoader(train_data_set, batch_size=8)

        result = []
        with torch.no_grad():
            for step, (token_ids, label1, label2) in enumerate(train_data_loader):
                token_ids = token_ids.to(self.device).long()
                outputs = self.model(token_ids)
                outputs1 = outputs[0].cpu()
                outputs2 = outputs[1].cpu()
                for t1 in outputs1:
                    item = {}
                    confidence = get_confidence(t1)
                    t1 = np.argmax(t1)
                    c1, c2 = self.get_label(int(t1), 1)
                    confidence = float(confidence)
                    # print(f"{text}-->c1:{c1}-->confidence:{confidence}")
                    if confidence > threshold:
                        item["label1"] = c1
                    else:
                        item["label1"] = None
                    item["label1_confidence"] = round(confidence, 4)
                for t2 in outputs2:
                    confidence = get_confidence(t2)
                    t2 = np.argmax(t2)
                    c1, c2 = self.get_label(1, int(t2))
                    confidence = float(confidence)
                    # print(f"{text}-->c2:{c2}-->confidence:{confidence}")
                    if confidence > threshold:
                        item["label2"] = c2
                    else:
                        item["label2"] = None
                    item["label2_confidence"] = round(confidence, 4)
                    result.append(item)

        for item, string in zip(result, strings):
            item["text"] = string
        return result


if __name__ == '__main__':
    text = "汰渍 洁净薰香薰衣草香氛洗衣粉 1.65KG"

    model = BrandPredictor(model_path=model_path, c1_labels=c1_labels, c2_labels=c2_labels)

    result = model.predict(text)
    # result = model.predict_batch([text, text], threshold=0.01)
    print(result)
