import torch
from torch import nn
from nezha.modeling.modeling import NeZhaPreTrainedModel, NeZhaModel
import jieba
import os
from fish_tool import logs
from transformers.configuration_utils import PretrainedConfig
from loader.vocab import Vocab
from fish_tool.ai import torch_tool

ignore_words = {
    '也', '服务', '就是', '这家', '整体', '这个', '一家', '这', '装修',
    '水果', '葡萄', '成人教育', '珠江', '寸', '房间', '有所', '机器',
    '虽然', '但是', '其实',
    '本书', '地理位置', '朋友', '大家', '设施', '模具',
    '快捷键', '儿子', '书', '酒店', '法语'
}
ignore_tokens = {
    '[CLS]', '[UNK]', '[SEP]', '[MASK]', '[PAD]', 'CLS', 'UNK', 'SEP', 'MASK', 'PAD', '，', '！', '[', ']',
    '的', '你', '我', '他', '她', '们', '；'
}


def get_word_indexs22(tokens):
    # 根据tokens和words 返回 每个词汇的index二维数组  （由于token可能有多个字符，word多个字符，两个不一定谁长，所以制定两个i往前移动）
    txt = ''.join(tokens)
    words = list(jieba.cut(txt))
    word_indexs = []
    token_i, word_i = -1, 0
    token_txt, word_txt = '', words[word_i]
    word_index = []
    while 1:
        if token_txt == word_txt:
            word_indexs.append(word_index)
            word_i += 1
            if word_i >= len(words):
                break
            token_txt, word_txt, word_index = '', words[word_i], []
        elif len(token_txt) < len(word_txt):
            token_i += 1
            token_txt += tokens[token_i]
            word_index.append(token_i)
        elif len(token_txt) > len(word_txt):
            word_i += 1
            word_txt += words[word_i]
        else:
            logs.print(f'tokens={tokens}\nwords={words}  \ntoken_txt={token_txt} word_txt={word_txt} token_i={token_i} word_i={word_i}')
            raise ValueError('xxx')
    return word_indexs


def get_word_indexs(tokens):
    # 根据tokens和words 返回 每个词汇的index二维数组  （由于token可能有多个字符，word多个字符，两个不一定谁长，所以制定两个i往前移动）
    txt = ''.join(tokens)
    # words = list(jieba.cut(txt))
    from paddlenlp import Taskflow

    seg1 = Taskflow('ner')
    word_segs = seg1(txt)
    word_infos = []
    token_i, word_i = -1, 0
    token_txt, (word_txt, etype) = '', word_segs[word_i]
    word_info = {'indexs': [], 'etypes': []}
    while 1:
        if token_txt == word_txt:
            word_infos.append(word_info)
            word_i += 1
            if word_i >= len(word_segs):
                break
            token_txt, (word_txt, etype) = '', word_segs[word_i]
            word_info = {'indexs': [], 'etypes': []}
        elif len(token_txt) < len(word_txt):
            token_i += 1
            token_txt += tokens[token_i]
            word_info['indexs'].append(token_i)
            word_info['etypes'].append(etype)
        elif len(token_txt) > len(word_txt):
            word_i += 1
            word_txt += word_segs[word_i][0]
            word_info['etypes'].append(word_segs[word_i][1])
        else:
            logs.print(f'tokens={tokens}\nword_segs={word_segs}  \ntoken_txt={token_txt} word_txt={word_txt} token_i={token_i} word_i={word_i}')
            raise ValueError('xxx')
    for w in word_infos:
        w['tokens'] = [tokens[t] for t in w['indexs']]
    return word_infos


class NeZhaForSequenceClassification(nn.Module):
    def __init__(self, config):
        super().__init__()
        jieba.load_userdict(os.path.join(os.path.dirname(__file__), "jiebaDict.txt"))
        self.loss_fct = nn.CrossEntropyLoss()
        self.loss_int = nn.CrossEntropyLoss()
        self.num_labels = config.num_labels
        self.vocab = Vocab(config.pre_model_dir)
        bert_config = PretrainedConfig.from_pretrained(config.pre_model_dir)
        bert_config.output_hidden_states = False
        bert_config.output_attentions = True
        self.bert = NeZhaModel.from_pretrained(config.pre_model_dir, config=bert_config)
        self.classifier = nn.Linear(config.hidden_size, self.num_labels)
        self.interpret_line = nn.Linear(config.hidden_size, 2)
        torch_tool.init_model_weights(self.interpret_line)

    def forward(self, input_ids=None, token_type_ids=None, labels=None, rationale=None, show=False, txts=None, data_ids=None, **tmp):
        attention_mask = torch.ne(input_ids, 0)
        # encoder_out~(bsz, seq_len, dim)  pooled_out~(bsz, dim)  all_attentions数组长度=注意力层数 size=(bsz, head, seq_len, seq_len)
        encoder_out, pooled_out, all_attentions = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        logits = self.classifier(pooled_out)
        interpret_logit = self.interpret_line(encoder_out)
        inter_pred = interpret_logit.argmax(dim=-1).tolist()
        interprets = []
        for pred_line in inter_pred:
            # i-1是因为增加了CLS token
            interprets.append({'rationale': [i - 1 for i, t in enumerate(pred_line) if t and i > 0]})
        if not self.training:
            preds = logits.argmax(dim=-1).tolist()
            for i, pred in enumerate(preds):
                interprets[i]['label'] = pred
                if data_ids:
                    interprets[i]['id'] = data_ids[i]
        out = {'logits': logits, 'pooled_out': pooled_out, 'interprets': interprets, 'interpret_logit': interpret_logit}
        if self.training:
            loss = 0
            if isinstance(labels, torch.Tensor):
                loss = loss + self.loss_fct(logits, labels)
            if isinstance(rationale, torch.Tensor):
                loss = loss + self.loss_int(interpret_logit.view(-1, 2), rationale.view(-1))
            out['loss'] = loss
        return out
