goodwiki-text2text-completion / dataset-preprocessor.py
pszemraj's picture
Upload dataset-preprocessor.py
ad9ffae verified
raw
history blame
1.63 kB
from datasets import load_dataset
import re
import random
def split_into_paragraphs(text):
# Split by markdown headers or double newlines
paragraphs = re.split(r'\n\n|(?=^#)', text, flags=re.MULTILINE)
return [p.strip() for p in paragraphs if p.strip()]
def create_input_output_pairs(example):
paragraphs = example['paragraphs']
n_paragraphs = len(paragraphs)
# Randomly select about half of the paragraphs for input
n_input = max(1, random.randint(n_paragraphs // 2 - 1, n_paragraphs // 2 + 1))
input_paragraphs = paragraphs[:n_input]
output_paragraphs = paragraphs[n_input:]
return {
'inputs': ' '.join(input_paragraphs),
'targets': ' '.join(output_paragraphs)
}
def preprocess_dataset(dataset_name, text_column='text'):
# Load the dataset
dataset = load_dataset(dataset_name)
# Split text into paragraphs
dataset = dataset.map(
lambda example: {'paragraphs': split_into_paragraphs(example[text_column])},
remove_columns=[text_column]
)
# Create input-output pairs
preprocessed_dataset = dataset.map(
create_input_output_pairs,
remove_columns=['paragraphs']
)
return preprocessed_dataset
# Usage example
if __name__ == "__main__":
# Replace 'your_dataset' with the actual dataset name
dataset_name = 'your_dataset'
preprocessed_dataset = preprocess_dataset(dataset_name)
# Print some examples
print(preprocessed_dataset['train'][:5])
# Save the preprocessed dataset
preprocessed_dataset.save_to_disk("preprocessed_dataset")