from datasets import load_dataset, ClassLabel
from transformers import AutoTokenizer
from config import RAW_DATA_DIR, PROCESSED_DATA_DIR, PRETRAINED_DIR, SEQ_LEN, RANDOM_SEED


def process():
    """ 数据预处理 """
    # 1.读取数据
    dataset = load_dataset('csv', data_files=str(RAW_DATA_DIR / 'online_shopping_10_cats.csv'))['train']

    # 2.数据清洗
    dataset = dataset.filter(lambda x: x['review'] is not None and x['review'].strip() != '' and x['label'] in [0, 1])

    # 3.划分数据集
    dataset = dataset.cast_column('label', ClassLabel(num_classes=2))
    dataset_dict = dataset.train_test_split(test_size=0.2, stratify_by_column='label', seed=RANDOM_SEED)

    # 4.构建tokenizer
    tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_DIR / 'bert-base-chinese')

    def tokenize(example):
        encoded = tokenizer(
            example['review'],
            truncation=True,
            max_length=SEQ_LEN,
            padding='max_length'
        )
        return {
            'input_ids': encoded['input_ids'],
            'attention_mask': encoded['attention_mask']
        }

    dataset_dict = dataset_dict.map(tokenize, batched=True)  # 62773

    dataset_dict = dataset_dict.remove_columns(['review', 'cat'])  # 删除字段

    # 5.保存数据集
    dataset_dict['train'].save_to_disk(str(PROCESSED_DATA_DIR / 'train'))  # 50218
    dataset_dict['test'].save_to_disk(str(PROCESSED_DATA_DIR / 'test'))  # 12555


if __name__ == '__main__':
    process()
