# coding :UTF-8
# 将prompt和text组合在一起

from rich import print # 终端层次显示
from transformers import AutoTokenizer
import numpy as np
import sys
sys.path.append("..")
from pet_config import *

class HardTemplate(object):
    def __init__(self, prompt: str):
        self.prompt = prompt
        self.inputs_list = []
        self.custom_tokens = set(['MASK'])
        self.prompt_analysis()

    def prompt_analysis(self):
        idx = 0
        while idx < len(self.prompt):
            str_part = ''
            if self.prompt[idx] not in ['{', '}']:
                self.inputs_list.append(self.prompt[idx])

            if self.prompt[idx] == '{':
                idx += 1
                while self.prompt[idx] != '}':
                    str_part += self.prompt[idx]
                    idx += 1
            elif self.prompt[idx] == '}':
                raise ValueError("Unmatched bracket '{', check your prompt")

            if str_part:
                self.inputs_list.append(str_part)
                self.custom_tokens.add(str_part)
            idx += 1

    def __call__(self,
                 inputs_dict: dict,
                 tokenizer,
                 mask_length,
                 max_seq_len=512):
        outputs = {
            'text': '',
            'input_ids': [],
            'token_type_ids': [],
            'attention_mask': [],
            'mask_position': []
        }

        str_formated = ''
        for value in self.inputs_list:
            if value in self.custom_tokens:
                if value == 'MASK':
                    str_formated += inputs_dict[value] * mask_length
                else:
                    str_formated += inputs_dict[value]
            else:
                str_formated += value
        # print(f'str_formated: {str_formated}')

        encoded = tokenizer(text= str_formated,
                            truncation=True,
                            max_length=max_seq_len,
                            padding='max_length' )
        # print(f'encoded: {encoded}')

        outputs['input_ids'] = encoded['input_ids']
        outputs['token_type_ids'] = encoded['token_type_ids']
        outputs['attention_mask'] = encoded['attention_mask']
        outputs['text'] = ''.join(tokenizer.convert_ids_to_tokens(encoded['input_ids']))
        mask_token_id = tokenizer.convert_tokens_to_ids(['[MASK]'])[0]
        input_ids = np.array(outputs['input_ids'])
        mask_position = np.where(input_ids == mask_token_id)[0].tolist()
        # mask_position = np.where(np.array(outputs['input_ids']) == mask_token_id)[0].tolist()
        outputs['mask_position'] = mask_position
        return outputs


if __name__ == '__main__':
    pc = ProjectConfig()
    tokenizer = AutoTokenizer.from_pretrained(pc.pre_model)
    hard_template = HardTemplate(prompt = '这是一条{MASK}评论：{textA}')
    print(hard_template.inputs_list)
    print(hard_template.custom_tokens)

    tep = hard_template(
        inputs_dict = {'textA': '包装不错，苹果挺甜，个头也大', 'MASK': '[MASK]'},
        tokenizer = tokenizer,
        max_seq_len = 30,
        mask_length = 2)

    print(tep)

    print(tokenizer.convert_ids_to_tokens([3819, 3352, 3819, 3352]))
    print(tokenizer.convert_tokens_to_ids(['网', '球']))
