from torch.utils.data import DataLoader
from transformers import default_data_collator
from data_handle.data_preprocess import *
from p_tuning_config import *


pt = p_tuning_config()
tokenizer = AutoTokenizer.from_pretrained(pt.pre_model)

def get_data():
    dataset = load_dataset('text', data_files={'train': pt.train_path,
                                               'dev': pt.dev_path})
    new_func = partial(convert_examples,
                       tokenizer=tokenizer,
                       max_seq_length=pt.max_len,
                       max_label_len=pt.max_label_len,
                       p_embedding_num= pt.p_embedding_num)

    dataset = dataset.map(new_func, batched=True)

    train_dataset = dataset['train']
    train_loader = DataLoader(train_dataset, batch_size=pt.batch_size, shuffle=True, collate_fn=default_data_collator)
    dev_dataset = dataset['dev']
    dev_loader = DataLoader(dev_dataset, batch_size=pt.batch_size, collate_fn=default_data_collator)

    return train_loader, dev_loader