import numpy as np
from transformers import AutoTokenizer

from PET.pet_config import ProjectConfig


class HardTemplate(object):
    def __init__(self, prompt: str):
        self.prompt = prompt
        self.inputs_list = []
        self.custom_tokens = set(['MASK'])
        self.prompt_analysis()

    def prompt_analysis(self):
        idx = 0
        while idx < len(self.prompt):
            # print(self.prompt[idx])
            if self.prompt[idx] not in ['{', '}']:
                self.inputs_list.append(self.prompt[idx])
            str_part = ''
            if self.prompt[idx] == '{':
                idx += 1
                while self.prompt[idx] != '}':
                    str_part += self.prompt[idx]
                    idx += 1
            elif self.prompt[idx] == '}':
                raise ValueError("Unmatched bracket '}', check your prompt.")
            if str_part:
                self.inputs_list.append(str_part)
                self.custom_tokens.add(str_part)
            idx += 1

    def __call__(self, inputs_dict: dict, tokenizer, mask_length, max_seq_len=512):

        outputs = {
            'text': '',
            'input_ids': [],
            'token_type_ids': [],
            'attention_mask': [],
            'mask_position': []
        }

        str_formated = ''
        for value in self.inputs_list:
            if value in self.custom_tokens:
                if value == 'MASK':
                    str_formated += inputs_dict[value] * mask_length
                else:
                    str_formated += inputs_dict[value]
            else:
                str_formated += value
        # print(str_formated) # 这是一条[MASK][MASK]评论：包装不错，苹果挺甜的，个头也大。

        encoded = tokenizer(text=str_formated, truncation=True, max_length=max_seq_len, padding='max_length')
        # print(encoded) # {'input_ids': [101, 6821, 3221, 671, 3340, 103, 103, 6397, 6389, 8038, 1259, 6163, 679, 7231, 8024, 5741, 3362, 2923, 4494, 4638, 8024, 702, 1928, 738, 1920, 511, 102, 0, 0, 0], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0]}

        outputs['input_ids'] = encoded['input_ids']
        outputs['token_type_ids'] = encoded['token_type_ids']
        outputs['attention_mask'] = encoded['attention_mask']
        text = ''.join(tokenizer.convert_ids_to_tokens(encoded['input_ids']))
        # print(text)
        outputs['text'] = text
        mask_token_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]  # 103
        # print(mask_token_id)
        # print(np.where(np.array(outputs['input_ids']) == mask_token_id))
        mask_position = np.where(np.array(outputs['input_ids']) == mask_token_id)[0].tolist()  # [5, 6]
        outputs['mask_position'] = mask_position
        return outputs


if __name__ == '__main__':
    # 初始化解析prompt
    hard_template = HardTemplate(prompt='这是一条{MASK}评论：{textA}')
    print(hard_template.inputs_list)

    pc = ProjectConfig()
    tokenizer = AutoTokenizer.from_pretrained(pc.pre_model)

    # 调用call方法来生成模板
    tep = hard_template(
        inputs_dict={'textA': '包装不错，苹果挺甜的，个头也大。', 'MASK': '[MASK]'},
        tokenizer=tokenizer,
        mask_length=2,
        max_seq_len=30
    )

    print(tep)
    # {'text': '[CLS]这是一条[MASK][MASK]评论：包装不错，苹果挺甜的，个头也大。[SEP][PAD][PAD][PAD]',
    # 'input_ids': [101, 6821, 3221, 671, 3340, 103, 103, 6397, 6389, 8038, 1259, 6163, 679, 7231, 8024, 5741, 3362, 2923, 4494, 4638, 8024, 702, 1928, 738, 1920, 511, 102, 0, 0, 0],
    # 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
    # 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
    # 'mask_position': [5, 6]}

    # 模型到底看到了什么？
    # 每个字/词在词表里的编号是多少？
    # 它属于句子 A 还是句子 B？（BERT 双句任务才用）
    # 哪些位置是真 token、哪些是 padding 要忽略？
    # 我要预测的那几个 [MASK] 到底在哪儿？
