from PET.data_handle.template import HardTemplate
from PET.pet_config import ProjectConfig
from transformers import AutoTokenizer
from datasets import load_dataset
from functools import partial
from PET.data_handle.data_dataset import convert_example
from torch.utils.data import DataLoader
from transformers import default_data_collator  # default_data_collator作用，转换为tensor数据类型


def get_data(pc):
    # 读取文件获取prompt模板
    prompt = open(pc.prompt_file, 'r', encoding='utf8').readlines()[0].strip()

    # 模板转换器,解析模板
    hard_template = HardTemplate(prompt=prompt)

    # 分词器
    tokenizer = AutoTokenizer.from_pretrained(pc.pre_model)

    # 加载训练集和测试集数据
    dataset = load_dataset('text', data_files={'train': pc.train_path, 'dev': pc.dev_path})
    # print(dataset)

    convert_fun = partial(convert_example, tokenizer=tokenizer, max_seq_len=pc.max_seq_len,
                          max_label_len=pc.max_label_len, hard_template=hard_template)
    dataset = dataset.map(convert_fun, batched=True)
    # print('dataset:', dataset)

    train_dataset = dataset['train']
    dev_dataset = dataset['dev']

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=pc.batch_size,
                                  shuffle=True,
                                  collate_fn=default_data_collator)
    dev_dataloader = DataLoader(dev_dataset,
                                batch_size=pc.batch_size,
                                collate_fn=default_data_collator)
    return train_dataloader, dev_dataloader


if __name__ == '__main__':
    pc = ProjectConfig()
    pc.pre_model = '../../pre_model/bert-base-chinese'
    pc.train_path = '../data/train.txt'
    pc.dev_path = '../data/dev.txt'
    pc.prompt_file = '../data/prompt.txt'
    pc.verbalizer = '../data/verbalizer.txt'

    train_dataloader, dev_dataloader = get_data(pc)
    print('len(train_dataloader):', len(train_dataloader))  # 63/8 =8
    print('len(dev_dataloader):', len(dev_dataloader))  # 590/8 = 74

    for value in train_dataloader:
        print(value)
        print(value.keys())
        print("value['input_ids'].shape:", value['input_ids'].shape)
        print("value['token_type_ids'].shape:", value['token_type_ids'].shape)
        print("value['attention_mask'].shape:", value['attention_mask'].shape)
        print("value['mask_positions'].shape:", value['mask_positions'].shape)
        print("value['mask_labels'].shape:", value['mask_labels'].shape)
        break

        # dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'mask_positions', 'mask_labels'])
        # value['input_ids'].shape: torch.Size([8, 512])
        # value['token_type_ids'].shape: torch.Size([8, 512])
        # value['attention_mask'].shape: torch.Size([8, 512])
        # value['mask_positions'].shape: torch.Size([8, 2])
        # value['mask_labels'].shape: torch.Size([8, 2])
