import torch
from torch import nn
from nezha.modeling.modeling import NeZhaPreTrainedModel, NeZhaModel
import jieba
import os
from fish_tool import logs
from transformers.configuration_utils import PretrainedConfig
from loader.vocab import Vocab

ignore_words = {
    '也', '服务', '就是', '这家', '整体', '这个', '一家', '这', '装修',
    '水果', '葡萄', '成人教育', '珠江', '寸', '房间', '有所', '机器',
    '虽然', '但是', '其实',
    '本书', '地理位置', '朋友', '大家', '设施', '模具',
    '快捷键', '儿子', '书', '酒店', '法语'
}
ignore_tokens = {
    '[CLS]', '[UNK]', '[SEP]', '[MASK]', '[PAD]', 'CLS', 'UNK', 'SEP', 'MASK', 'PAD', '，', '！', '[', ']',
    '的', '你', '我', '他', '她', '们', '；'
}


def get_word_indexs22(tokens):
    # 根据tokens和words 返回 每个词汇的index二维数组  （由于token可能有多个字符，word多个字符，两个不一定谁长，所以制定两个i往前移动）
    txt = ''.join(tokens)
    words = list(jieba.cut(txt))
    word_indexs = []
    token_i, word_i = -1, 0
    token_txt, word_txt = '', words[word_i]
    word_index = []
    while 1:
        if token_txt == word_txt:
            word_indexs.append(word_index)
            word_i += 1
            if word_i >= len(words):
                break
            token_txt, word_txt, word_index = '', words[word_i], []
        elif len(token_txt) < len(word_txt):
            token_i += 1
            token_txt += tokens[token_i]
            word_index.append(token_i)
        elif len(token_txt) > len(word_txt):
            word_i += 1
            word_txt += words[word_i]
        else:
            logs.print(f'tokens={tokens}\nwords={words}  \ntoken_txt={token_txt} word_txt={word_txt} token_i={token_i} word_i={word_i}')
            raise ValueError('xxx')
    return word_indexs


def get_word_indexs(tokens):
    # 根据tokens和words 返回 每个词汇的index二维数组  （由于token可能有多个字符，word多个字符，两个不一定谁长，所以制定两个i往前移动）
    txt = ''.join(tokens)
    # words = list(jieba.cut(txt))
    from paddlenlp import Taskflow
    seg1 = Taskflow('ner')
    word_segs = seg1(txt)
    word_infos = []
    token_i, word_i = -1, 0
    token_txt, (word_txt, etype) = '', word_segs[word_i]
    word_info = {'indexs': [], 'etypes': []}
    while 1:
        if token_txt == word_txt:
            word_infos.append(word_info)
            word_i += 1
            if word_i >= len(word_segs):
                break
            token_txt, (word_txt, etype) = '', word_segs[word_i]
            word_info = {'indexs': [], 'etypes': []}
        elif len(token_txt) < len(word_txt):
            token_i += 1
            token_txt += tokens[token_i]
            word_info['indexs'].append(token_i)
            word_info['etypes'].append(etype)
        elif len(token_txt) > len(word_txt):
            word_i += 1
            word_txt += word_segs[word_i][0]
            word_info['etypes'].append(word_segs[word_i][1])
        else:
            logs.print(f'tokens={tokens}\nword_segs={word_segs}  \ntoken_txt={token_txt} word_txt={word_txt} token_i={token_i} word_i={word_i}')
            raise ValueError('xxx')
    for w in word_infos:
        w['tokens'] = [tokens[t] for t in w['indexs']]
    return word_infos


class NeZhaForSequenceClassification(nn.Module):
    def __init__(self, config):
        super().__init__()
        jieba.load_userdict(os.path.join(os.path.dirname(__file__), "jiebaDict.txt"))
        self.num_labels = config.num_labels
        self.vocab = Vocab(config.pre_model_dir)
        bert_config = PretrainedConfig.from_pretrained(config.pre_model_dir)
        bert_config.output_hidden_states = False
        bert_config.output_attentions = True
        self.bert = NeZhaModel.from_pretrained(config.pre_model_dir, config=bert_config)
        self.classifier = nn.Linear(config.hidden_size, self.num_labels)
        self.tag__fn = {
            1: self.interpret1,
            2: self.interpret2,
            3: self.interpret3,
        }

    def forward(self, input_ids=None, token_type_ids=None, labels=None, interpret_tag=0, show=False, txts=None, data_ids=None, **tmp):
        attention_mask = torch.ne(input_ids, 0)
        # encoder_out~(bsz, seq_len, dim)  pooled_out~(bsz, dim)  all_attentions数组长度=注意力层数 size=(bsz, head, seq_len, seq_len)
        encoder_out, pooled_out, all_attentions = self.bert(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
        logits = self.classifier(pooled_out)
        out = {'logits': logits, 'pooled_out': pooled_out}
        if interpret_tag:
            fn = self.tag__fn[interpret_tag]
            word_infos = fn(all_attentions, input_ids)
            out['interprets'] = self.decode_interpret_word(logits, word_infos, txts, data_ids=data_ids, show=show)
        if self.training:
            loss_fct = nn.CrossEntropyLoss()
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
            out['loss'] = loss
        return out

    def interpret1(self, all_attentions, *tmp):
        """
        把所有层注意力相加
        score	accuracy	F1	MAP
        0.53197	0.88218	0.33377	0.57814
        """
        head_attn = all_attentions[0]
        for attn2 in all_attentions[1:]:
            head_attn = head_attn + attn2
        attn = head_attn.sum(dim=1)
        attn = attn / 144  # 144=12层*12头  # (bs, seq_len, seq_len归一)
        seq_attn = attn[:, 0, :]
        return seq_attn

    def interpret2(self, all_attentions, *tmp):
        """
        只用最后一层注意力
        score	accuracy	F1	MAP
        0.53053	0.88218	0.37214	0.49567
        """
        head_attn = all_attentions[-1]
        attn = head_attn.sum(dim=1)
        attn = attn / 12  # 12头  # (bs, seq_len, seq_len归一)
        seq_attn = attn[:, 0, :]
        return seq_attn

    def interpret3(self, all_attentions, input_ids, *tmp):
        """
        把所有层注意力相加，分词后把整词作为证据   支持bsz=1不带padding
        score	accuracy	F1	MAP
        0.53197	0.88218	0.33377	0.57814
        """
        # head_attn = all_attentions[0]
        # for attn2 in all_attentions[1:]:
        #     head_attn = head_attn + attn2
        # attn = head_attn.sum(dim=1)
        # attn = attn / 144  # 144=12层*12头  # (bs, seq_len, seq_len归一)
        head_attn = all_attentions[-1]
        attn = head_attn.sum(dim=1)
        attn = attn / 12  # 12头  # (bs, seq_len, seq_len归一)
        input_ids = input_ids.tolist()
        seq_attn = attn[:, 0, :]  # (bsz, seq_len) 取[CLS]index=0位置向量
        b = 0  # batch_index(仅支持bsz=1)
        attn = seq_attn[b]  # (seq_len)
        tokens = [self.vocab.trans_id_to_token(t) for t in input_ids[b]]
        seg_word_infos = get_word_indexs(tokens)
        for word_info in seg_word_infos:
            start = word_info['indexs'][0]
            end = word_info['indexs'][-1]
            prob = attn[start:end + 1].mean(dim=0).item()
            word_info['prob'] = prob
        return seg_word_infos

    def decode_interpret(self, input_ids, logits, seq_attn, txts, data_ids=None, show=True):
        bsz, seq_len = input_ids.size()
        pred = logits.argmax(dim=-1).tolist()
        input_ids = input_ids.tolist()
        probs, indexs = torch.sort(seq_attn, descending=True, dim=-1)
        probs, indexs = probs.tolist(), indexs.tolist()
        out = []
        for b in range(bsz):
            if show:
                print('=' * 50)
                raw_txt = ''.join(txts[b])
                print(f'{pred[b]} | {raw_txt}')
                ids = [input_ids[b][index] for index in indexs[b]]
                tokens = [self.vocab.id_to_token(t) for t in ids]
                txts = [f'{token}|{prob:0.2f}' for prob, token in zip(probs[b], tokens) if token not in ignore_tokens]
                txt = ' '.join(txts)
                print(txt)
            rationale = []
            for i in range(seq_len):
                index = indexs[b][i]
                token = self.vocab.id_to_token(input_ids[b][index])
                if token not in ignore_tokens:
                    rationale.append(index)
                if len(rationale) >= 3:
                    break
            if show:
                tokens = [self.vocab.id_to_token(input_ids[b][index]) for index in rationale]
                txt = ' '.join(tokens)
                print(txt)
            out.append({'label': pred[b], 'rationale': rationale})
            if data_ids:
                out[-1]['id'] = data_ids[b]
        return out

    def word_is_ignore(self, tokens):
        word = ''.join(tokens)
        if word in ignore_words:
            return True
        for t in tokens:
            if t not in ignore_tokens:
                return False
        return True

    def decode_interpret_word(self, logits, word_infos, txts, data_ids=None, show=True):
        use_etypes = ['否定词', '修饰词', '疾病损伤类']
        seg_ids = []

        b = 0  # 仅支持bsz=1
        pred = logits.argmax(dim=-1).tolist()
        word_infos.sort(key=lambda x: x['prob'], reverse=True)
        rationale = []
        rationale_words = []
        for info in word_infos:
            for etype, index, token in zip(info['etypes'], info['indexs'], info['tokens']):
                if etype in use_etypes:
                    logs.print(f'{etype}-->{token}')
                    seg_ids.append(index)

            if not self.word_is_ignore(info['tokens']):
                rationale.extend(info['indexs'])
                rationale_words.append(''.join(info['tokens']))
            if len(rationale) >= 4:
                break

        for seg_id in reversed(seg_ids):
            if seg_id not in rationale:
                rationale.insert(0, seg_id)

        out = []
        if show:
            logs.print('=' * 20)
            raw_txt = ''.join(txts[b])
            logs.print(f'{raw_txt} | '
                       f'\n情感={pred[b]} | 证据={rationale_words}')
            # f'\nword_infos={word_infos}')
        out.append({'label': pred[b], 'rationale': rationale})
        if data_ids:
            out[b]['id'] = data_ids[b]

        return out
