|
from datasets import load_dataset |
|
import re |
|
import random |
|
|
|
def split_into_paragraphs(text): |
|
|
|
paragraphs = re.split(r'\n\n|(?=^#)', text, flags=re.MULTILINE) |
|
return [p.strip() for p in paragraphs if p.strip()] |
|
|
|
def create_input_output_pairs(example): |
|
paragraphs = example['paragraphs'] |
|
n_paragraphs = len(paragraphs) |
|
|
|
|
|
n_input = max(1, random.randint(n_paragraphs // 2 - 1, n_paragraphs // 2 + 1)) |
|
|
|
input_paragraphs = paragraphs[:n_input] |
|
output_paragraphs = paragraphs[n_input:] |
|
|
|
return { |
|
'inputs': ' '.join(input_paragraphs), |
|
'targets': ' '.join(output_paragraphs) |
|
} |
|
|
|
def preprocess_dataset(dataset_name, text_column='text'): |
|
|
|
dataset = load_dataset(dataset_name) |
|
|
|
|
|
dataset = dataset.map( |
|
lambda example: {'paragraphs': split_into_paragraphs(example[text_column])}, |
|
remove_columns=[text_column] |
|
) |
|
|
|
|
|
preprocessed_dataset = dataset.map( |
|
create_input_output_pairs, |
|
remove_columns=['paragraphs'] |
|
) |
|
|
|
return preprocessed_dataset |
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
dataset_name = 'your_dataset' |
|
|
|
preprocessed_dataset = preprocess_dataset(dataset_name) |
|
|
|
|
|
print(preprocessed_dataset['train'][:5]) |
|
|
|
|
|
preprocessed_dataset.save_to_disk("preprocessed_dataset") |
|
|