Datasets:
Xabi Ezpeleta
commited on
Commit
•
312d05b
1
Parent(s):
6711d2f
add ccmatrix_filtered generation scripts
Browse files
eng-eus/ccmatrix/ccmatrix_generate_txts.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
|
3 |
+
'''
|
4 |
+
This script will download the dataset xezpeleta/ccmatrix_eng_eus_filtered from Hugging Face
|
5 |
+
with the following structure:
|
6 |
+
|
7 |
+
{
|
8 |
+
'id': 0, 'score': 1.2498578, 'translation':
|
9 |
+
{
|
10 |
+
'en': "He stands to God's word, and God's worship.",
|
11 |
+
'eu': 'Jaungoikoa goratzera bideratuta egongo da eta Jaungoikoa bere borondatea betez goratzen da.'
|
12 |
+
}
|
13 |
+
}
|
14 |
+
|
15 |
+
Then will create the following txt files:
|
16 |
+
- ccmatrix_filtered.en-eu.en
|
17 |
+
- ccmatrix_filtered.en-eu.eu
|
18 |
+
'''
|
19 |
+
|
20 |
+
from datasets import load_dataset
|
21 |
+
|
22 |
+
def main():
|
23 |
+
dataset = load_dataset('xezpeleta/ccmatrix_eng_eus_filtered')
|
24 |
+
|
25 |
+
# Create the txt files
|
26 |
+
with open('ccmatrix_filtered.en-eu.en', 'w') as en_file, open('ccmatrix_filtered.en-eu.eu', 'w') as eu_file:
|
27 |
+
for data in dataset['train']:
|
28 |
+
en_file.write(data['translation']['en'] + '\n')
|
29 |
+
eu_file.write(data['translation']['eu'] + '\n')
|
30 |
+
|
31 |
+
if __name__ == '__main__':
|
32 |
+
main()
|
eng-eus/ccmatrix/get_ccmatrix_eng_eus.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#! /usr/bin/env python3
|
2 |
+
|
3 |
+
'''
|
4 |
+
This script will download and preprocess CCMatrix English-Basque parallel corpus
|
5 |
+
'''
|
6 |
+
|
7 |
+
from datasets import load_dataset, concatenate_datasets
|
8 |
+
import pandas as pd
|
9 |
+
import json
|
10 |
+
|
11 |
+
def main():
|
12 |
+
dataset = load_dataset('xezpeleta/ccmatrix', 'en-eu', split='train', trust_remote_code=True)
|
13 |
+
#print(next(iter(dataset['train'])))
|
14 |
+
#Response: {'id': 0, 'score': 1.2498578, 'translation': {'en': "He stands to God's word, and God's worship.", 'eu': 'Jaungoikoa goratzera bideratuta egongo da eta Jaungoikoa bere borondatea betez goratzen da.'}}
|
15 |
+
|
16 |
+
# Filter sentences with 40 characters or more in both English and Basque
|
17 |
+
filtered_dataset = dataset.filter(lambda example: len(example['translation']['en']) >= 40 and len(example['translation']['eu']) >= 40)
|
18 |
+
|
19 |
+
# Sort the dataset based on the "score" column - DISABLED (the dataset is already sorted by score)
|
20 |
+
#sorted_dataset = dataset.sort("score", reverse=True)
|
21 |
+
|
22 |
+
# Calculate the number of samples for top 10% and the last 10%
|
23 |
+
num_samples = len(filtered_dataset)
|
24 |
+
top_10_percent = int(num_samples * 0.1)
|
25 |
+
last_10_percent = int(num_samples * 0.9)
|
26 |
+
|
27 |
+
# Get the top and last 10% samples
|
28 |
+
top_10_samples = filtered_dataset.select(range(top_10_percent))
|
29 |
+
last_10_samples = filtered_dataset.select(range(num_samples-last_10_percent, last_10_percent))
|
30 |
+
|
31 |
+
# Combine the top and last 10% samples
|
32 |
+
assert top_10_samples.features.type == last_10_samples.features.type
|
33 |
+
resulting_dataset = concatenate_datasets([top_10_samples, last_10_samples])
|
34 |
+
|
35 |
+
# Shuffle the dataset
|
36 |
+
resulting_dataset = resulting_dataset.shuffle()
|
37 |
+
|
38 |
+
# Generate train and eval
|
39 |
+
#resulting_dataset = resulting_dataset.train_test_split(test_size=0.1)
|
40 |
+
|
41 |
+
# Save the dataset
|
42 |
+
#resulting_dataset.to_json("ccmatrix_eng_eus_filtered.jsonl")
|
43 |
+
|
44 |
+
# Upload the dataset to HF
|
45 |
+
resulting_dataset.push_to_hub("xezpeleta/ccmatrix_eng_eus_filtered")
|
46 |
+
|
47 |
+
if __name__ == '__main__':
|
48 |
+
main()
|
eng-eus/ccmatrix/requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
datasets
|