# coding: UTF-8
from torch.utils.data import DataLoader
from transformers import default_data_collator
from data_preprocess import *
from pet_config import *

pc = ProjectConfig()
tokenizer = AutoTokenizer.from_pretrained(pc.pre_model)


def get_data():
    prompt = open(pc.prompt_file, 'r', encoding='utf-8').readlines()[0].strip()
    hard_template = HardTemplate(prompt=prompt)
    dataset = load_dataset('text', data_files={'train': pc.train_path,
                                               'dev': pc.dev_path})
    new_func = partial(convert_example,
                       tokenizer= tokenizer,
                       hard_template= hard_template,
                       max_seq_len= pc.max_len,
                       max_label_len= pc.max_label_len,
                       )

    dataset = dataset.map(new_func, batched=True)

    train_dataset = dataset['train']
    dev_dataset = dataset['dev']

    train_loader = DataLoader(train_dataset, batch_size=pc.batch_size, shuffle=True, collate_fn=default_data_collator)
    dev_loader = DataLoader(dev_dataset, batch_size=pc.batch_size, collate_fn=default_data_collator)

    return train_loader, dev_loader

if __name__ == '__main__':
    train_data, dev_data = get_data()
    print(len(train_data), len(dev_data))



