# 将样本数据转换为模型接受的输入数据

import torch
import numpy as np
from rich import print
from datasets import load_dataset
from transformers import AutoTokenizer
from p_tuning_config import *
from functools import partial

pt = p_tuning_config()
def convert_examples(examples: dict,
                     tokenizer,
                     max_seq_length: int,
                     max_label_len: int,
                     p_embedding_num=pt.p_embedding_num,
                     train_mode = True,
                     return_tensor = False) -> dict:
    tokenized_output ={
        'input_ids': [],
        'attention_mask': [],
        'mask_positions': [],
        'mask_label': []
    }

    for i, example in enumerate(examples['text']):
        try:
            start_mask_position = 1

            if train_mode:
                label, content = example.strip().split('\t', 1)
            else:
                content = example.strip()

            encoded_inputs = tokenizer(
                text = content,
                truncation = True,
                max_length = max_seq_length,
                padding = 'max_length',)
        except:
            continue

        input_ids = encoded_inputs['input_ids']

        mask_tokens = ['MASK']* max_label_len
        mask_ids = tokenizer.convert_tokens_to_ids(mask_tokens)
        p_tokens = ['unused{}'.format(i+1) for i in range(p_embedding_num)] # 伪标记

        p_tokens_ids = tokenizer.convert_tokens_to_ids(p_tokens)

        tmp_input_ids = input_ids[:-1]

        tmp_input_ids = tmp_input_ids[:max_seq_length - len(mask_ids) - len(p_tokens_ids) - 1]

        tmp_input_ids = tmp_input_ids[:start_mask_position] + mask_ids + tmp_input_ids[start_mask_position:]

        input_ids = tmp_input_ids + [input_ids[-1]]
        input_ids = p_tokens_ids + input_ids

        mask_positions = [len(p_tokens_ids) + start_mask_position + i for i in range(max_label_len)]

        tokenized_output['input_ids'].append(input_ids)

        if 'token_type_ids' in encoded_inputs:
            tmp = encoded_inputs['token_type_ids']
            if 'token_type_ids' not in tokenized_output:
                tokenized_output['token_type_ids'] = [tmp]
            else:
                tokenized_output['token_type_ids'].append(tmp)
        tokenized_output['attention_mask'].append(encoded_inputs['attention_mask'])
        tokenized_output['mask_positions'].append(mask_positions)

        if train_mode:
            mask_labels = tokenizer(text= label)
            mask_labels = mask_labels['input_ids'][1:-1]
            mask_labels = mask_labels[:max_label_len]
            mask_labels += [tokenizer.pad_token_id] * (max_label_len - len(mask_labels))
            tokenized_output['mask_label'].append(mask_labels)


    for k, v in tokenized_output.items():
        if return_tensor:
            tokenized_output[k] = torch.LongTensor(v)
        else:
            tokenized_output[k] = np.array(v)

    return tokenized_output







