File size: 1,969 Bytes
312d05b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
#! /usr/bin/env python3

'''
This script will download and preprocess CCMatrix English-Basque parallel corpus
'''

from datasets import load_dataset, concatenate_datasets
import pandas as pd
import json

def main():
    dataset = load_dataset('xezpeleta/ccmatrix', 'en-eu', split='train', trust_remote_code=True)
    #print(next(iter(dataset['train'])))
    #Response: {'id': 0, 'score': 1.2498578, 'translation': {'en': "He stands to God's word, and God's worship.", 'eu': 'Jaungoikoa goratzera bideratuta egongo da eta Jaungoikoa bere borondatea betez goratzen da.'}}

    # Filter sentences with 40 characters or more in both English and Basque
    filtered_dataset = dataset.filter(lambda example: len(example['translation']['en']) >= 40 and len(example['translation']['eu']) >= 40)

    # Sort the dataset based on the "score" column - DISABLED (the dataset is already sorted by score)
    #sorted_dataset = dataset.sort("score", reverse=True)

    # Calculate the number of samples for top 10% and the last 10%
    num_samples = len(filtered_dataset)
    top_10_percent = int(num_samples * 0.1)
    last_10_percent = int(num_samples * 0.9)

    # Get the top and last 10% samples
    top_10_samples = filtered_dataset.select(range(top_10_percent))
    last_10_samples = filtered_dataset.select(range(num_samples-last_10_percent, last_10_percent))

    # Combine the top and last 10% samples
    assert top_10_samples.features.type == last_10_samples.features.type
    resulting_dataset = concatenate_datasets([top_10_samples, last_10_samples])

    # Shuffle the dataset
    resulting_dataset = resulting_dataset.shuffle()

    # Generate train and eval
    #resulting_dataset = resulting_dataset.train_test_split(test_size=0.1)

    # Save the dataset
    #resulting_dataset.to_json("ccmatrix_eng_eus_filtered.jsonl")

    # Upload the dataset to HF
    resulting_dataset.push_to_hub("xezpeleta/ccmatrix_eng_eus_filtered")

if __name__ == '__main__':
    main()