holylovenia commited on
Commit
4880225
1 Parent(s): 8199987

Upload roots_vi_ted.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. roots_vi_ted.py +128 -0
roots_vi_ted.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import Dict, List, Tuple
3
+
4
+ import datasets
5
+ import pandas as pd
6
+
7
+ from seacrowd.utils import schemas
8
+ from seacrowd.utils.configs import SEACrowdConfig
9
+ from seacrowd.utils.constants import Licenses, Tasks
10
+
11
+ _CITATION = """
12
+ @inproceedings{DBLP:conf/nips/LaurenconSWAMSW22,
13
+ author={Hugo Laurençon and Lucile Saulnier and Thomas Wang and Christopher Akiki and Albert Villanova del Moral and
14
+ Teven Le Scao and Leandro von Werra and Chenghao Mou and Eduardo González Ponferrada and Huu Nguyen and Jörg Frohberg
15
+ and Mario Sasko and Quentin Lhoest and Angelina McMillan-Major and Gérard Dupont and Stella Biderman and Anna Rogers
16
+ and Loubna Ben Allal and Francesco De Toni and Giada Pistilli and Olivier Nguyen and Somaieh Nikpoor and Maraim Masoud
17
+ and Pierre Colombo and Javier de la Rosa and Paulo Villegas and Tristan Thrush and Shayne Longpre and Sebastian Nagel
18
+ and Leon Weber and Manuel Muñoz and Jian Zhu and Daniel van Strien and Zaid Alyafeai and Khalid Almubarak and Minh
19
+ Chien Vu and Itziar Gonzalez-Dios and Aitor Soroa and Kyle Lo and Manan Dey and Pedro Ortiz Suarez and Aaron Gokaslan
20
+ and Shamik Bose and David Ifeoluwa Adelani and Long Phan and Hieu Tran and Ian Yu and Suhas Pai and Jenny Chim and
21
+ Violette Lepercq and Suzana Ilic and Margaret Mitchell and Alexandra Sasha Luccioni and Yacine Jernite},
22
+ title={The BigScience ROOTS Corpus: A 1.6TB Composite Multilingual Dataset},
23
+ year={2022},
24
+ cdate={1640995200000},
25
+ url={http://papers.nips.cc/paper_files/paper/2022/hash/ce9e92e3de2372a4b93353eb7f3dc0bd-Abstract-Datasets_and_Benchmarks.html},
26
+ booktitle={NeurIPS},
27
+ }
28
+ """
29
+
30
+ _DATASETNAME = "roots_vi_ted"
31
+
32
+ _DESCRIPTION = """
33
+ ROOTS_vi_ted is a subset of Vietnamese in ted_talks_iwslt datasets. ted_talks_iwslt is a collection of the original Ted
34
+ talks and their translated version. The translations are available in more than 109+ languages, though the distribution
35
+ is not uniform. Before using this dataloader, please accept the acknowledgement at
36
+ https://huggingface.co/datasets/bigscience-data/roots_vi_ted_talks_iwslt and use huggingface-cli login for authentication.
37
+ """
38
+
39
+ _HOMEPAGE = "https://huggingface.co/datasets/bigscience-data/roots_vi_ted_talks_iwslt"
40
+
41
+ _LANGUAGES = ["vie"]
42
+
43
+ _LICENSE = Licenses.CC_BY_NC_ND_4_0.value
44
+
45
+ _LOCAL = False
46
+
47
+ _URLS = {_DATASETNAME: {"train": "https://huggingface.co/datasets/bigscience-data/roots_vi_ted_talks_iwslt/resolve/main/data/train-00000-of-00001.parquet?download=true"}}
48
+
49
+ _SUPPORTED_TASKS = [Tasks.SELF_SUPERVISED_PRETRAINING]
50
+
51
+ _SOURCE_VERSION = "1.0.0"
52
+
53
+ _SEACROWD_VERSION = "2024.06.20"
54
+
55
+
56
+ class RootsViTedDataset(datasets.GeneratorBasedBuilder):
57
+ """RootsViTed is a subset of Vietnamese in ted_talks_iwslt datasets."""
58
+
59
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
60
+ SEACROWD_VERSION = datasets.Version(_SEACROWD_VERSION)
61
+
62
+ BUILDER_CONFIGS = [
63
+ SEACrowdConfig(
64
+ name="roots_vi_ted_source",
65
+ version=SOURCE_VERSION,
66
+ description="roots_vi_ted source schema",
67
+ schema="source",
68
+ subset_id="roots_vi_ted",
69
+ ),
70
+ SEACrowdConfig(
71
+ name="roots_vi_ted_seacrowd_ssp",
72
+ version=SEACROWD_VERSION,
73
+ description="roots_vi_ted SEACrowd schema",
74
+ schema="seacrowd_ssp",
75
+ subset_id="roots_vi_ted",
76
+ ),
77
+ ]
78
+
79
+ DEFAULT_CONFIG_NAME = "roots_vi_ted_source"
80
+
81
+ def _info(self) -> datasets.DatasetInfo:
82
+ if self.config.schema == "source":
83
+ features = datasets.Features(
84
+ {
85
+ "text": datasets.Value("string"),
86
+ "meta": datasets.Value("string"),
87
+ }
88
+ )
89
+
90
+ elif self.config.schema == "seacrowd_ssp":
91
+ features = schemas.self_supervised_pretraining.features
92
+
93
+ return datasets.DatasetInfo(
94
+ description=_DESCRIPTION,
95
+ features=features,
96
+ homepage=_HOMEPAGE,
97
+ license=_LICENSE,
98
+ citation=_CITATION,
99
+ )
100
+
101
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
102
+ """Returns SplitGenerators."""
103
+ urls = _URLS[_DATASETNAME]
104
+ data_dir = dl_manager.download_and_extract(urls)
105
+
106
+ return [
107
+ datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={"filepath": data_dir, "split": "train"},
110
+ ),
111
+ ]
112
+
113
+ def _generate_examples(self, filepath: Path, split: str) -> Tuple[int, Dict]:
114
+ if self.config.schema == "source":
115
+ df = pd.read_parquet(filepath[split])
116
+ for i, row in df.iterrows():
117
+ yield i, {
118
+ "text": row["text"],
119
+ "meta": row["meta"],
120
+ }
121
+
122
+ elif self.config.schema == "seacrowd_ssp":
123
+ df = pd.read_parquet(filepath[split])
124
+ for i, row in df.iterrows():
125
+ yield i, {
126
+ "id": str(i),
127
+ "text": row["text"],
128
+ }