from datasets import load_dataset, ClassLabel
from transformers import AutoTokenizer
from configuration import config


def process_data():
    # 1.读取数据
    dataset_dict = load_dataset('csv', data_files={
        'train': str(config.RAW_DATA_DIR / 'train.txt'),
        'test': str(config.RAW_DATA_DIR / 'test.txt'),
        'valid': str(config.RAW_DATA_DIR / 'valid.txt')
    }, delimiter='\t')

    # 2.过滤数据
    dataset_dict = dataset_dict.filter(lambda x: x['text_a'] is not None and x['label'] is not None)

    # 3.处理类别
    all_labels = dataset_dict['train'].unique('label')  # 30
    dataset_dict = dataset_dict.cast_column('label', ClassLabel(names=all_labels))

    # 4.加载tokenizer
    tokenizer = AutoTokenizer.from_pretrained(config.PRETRAINED_DIR / 'bert-base-chinese')

    # 计算序列长度
    # df = dataset_dict['train'].to_pandas()
    # print(df['text_a'].apply(lambda x: len(tokenizer.tokenize(x))).max())  # 50

    def tokenize(example):
        encoded = tokenizer(
            example['text_a'],
            truncation=True,
            padding='max_length',
            max_length=config.SEQ_LEN
        )

        example['input_ids'] = encoded['input_ids']
        example['attention_mask'] = encoded['attention_mask']
        return example

    dataset_dict = dataset_dict.map(tokenize, batched=True, remove_columns=['text_a'])

    # 5.保存数据集
    dataset_dict['train'].save_to_disk(str(config.PROCESSED_DATA_DIR / 'train'))  # 88824
    dataset_dict['test'].save_to_disk(str(config.PROCESSED_DATA_DIR / 'test'))  # 22207
    dataset_dict['valid'].save_to_disk(str(config.PROCESSED_DATA_DIR / 'valid'))  # 22207
