Datasets:
Muennighoff
commited on
Commit
•
3d8cd0a
1
Parent(s):
2e41ddf
Create xp3x_sample.py
Browse files- xp3x_sample.py +110 -0
xp3x_sample.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""xP3x-sample"""
|
2 |
+
|
3 |
+
import json
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
logger = datasets.logging.get_logger(__name__)
|
9 |
+
|
10 |
+
|
11 |
+
_CITATION = """\
|
12 |
+
@misc{muennighoff2022crosslingual,
|
13 |
+
title={Crosslingual Generalization through Multitask Finetuning},
|
14 |
+
author={Niklas Muennighoff and Thomas Wang and Lintang Sutawika and Adam Roberts and Stella Biderman and Teven Le Scao and M Saiful Bari and Sheng Shen and Zheng-Xin Yong and Hailey Schoelkopf and Xiangru Tang and Dragomir Radev and Alham Fikri Aji and Khalid Almubarak and Samuel Albanie and Zaid Alyafeai and Albert Webson and Edward Raff and Colin Raffel},
|
15 |
+
year={2022},
|
16 |
+
eprint={2211.01786},
|
17 |
+
archivePrefix={arXiv},
|
18 |
+
primaryClass={cs.CL}
|
19 |
+
}
|
20 |
+
"""
|
21 |
+
|
22 |
+
_DESCRIPTION = """\
|
23 |
+
A multilingual collection of Winograd Schemas in six languages \
|
24 |
+
that can be used for evaluation of cross-lingual commonsense reasoning capabilities.
|
25 |
+
"""
|
26 |
+
_DS = [
|
27 |
+
"apps.jsonl"
|
28 |
+
"bisect.jsonl"
|
29 |
+
"BiSECT.jsonl"
|
30 |
+
"c3.jsonl"
|
31 |
+
"cmrc2018.jsonl"
|
32 |
+
"codecomplex.jsonl"
|
33 |
+
"code_contests.jsonl"
|
34 |
+
"code_docstring_corpus.jsonl"
|
35 |
+
"csl.jsonl"
|
36 |
+
"drcd.jsonl"
|
37 |
+
"flores.jsonl"
|
38 |
+
"github_jupyter_text_code_pairs.jsonl"
|
39 |
+
"great_code.jsonl"
|
40 |
+
"mbpp.jsonl"
|
41 |
+
"mlqa.jsonl"
|
42 |
+
"multi_eurlex.jsonl"
|
43 |
+
"neural_code_search.jsonl"
|
44 |
+
"paws_x.jsonl"
|
45 |
+
"python_state_changes.jsonl"
|
46 |
+
"tatoeba_mt.jsonl"
|
47 |
+
"tnews.jsonl"
|
48 |
+
"tydiqa_goldp.jsonl"
|
49 |
+
"tydiqa_primary.jsonl"
|
50 |
+
"wiki_lingua.jsonl"
|
51 |
+
"xcopa.jsonl"
|
52 |
+
"xlcost_text_to_code.jsonl"
|
53 |
+
"xlsum.jsonl"
|
54 |
+
"xlwic.jsonl"
|
55 |
+
"xnli.jsonl"
|
56 |
+
"xquad.jsonl"
|
57 |
+
]
|
58 |
+
_LICENSE = "Apache License 2.0"
|
59 |
+
_URL = "https://huggingface.co/datasets/Muennighoff/xP3x-sample/raw/main/{ds}"
|
60 |
+
_VERSION = datasets.Version("1.1.0", "")
|
61 |
+
|
62 |
+
|
63 |
+
class xP3x_sample(datasets.GeneratorBasedBuilder):
|
64 |
+
"""xP3x-sample"""
|
65 |
+
|
66 |
+
BUILDER_CONFIGS = [
|
67 |
+
datasets.BuilderConfig(
|
68 |
+
name=ds.replace(".jsonl", ""),
|
69 |
+
description=f"xP3x-sample {ds.replace(".jsonl", "")}",
|
70 |
+
version=_VERSION,
|
71 |
+
)
|
72 |
+
for ds in _DS
|
73 |
+
]
|
74 |
+
|
75 |
+
def _info(self):
|
76 |
+
return datasets.DatasetInfo(
|
77 |
+
description=_DESCRIPTION,
|
78 |
+
features=datasets.Features(
|
79 |
+
{
|
80 |
+
"inputs": datasets.Value("string"),
|
81 |
+
"targets": datasets.Value("string"),
|
82 |
+
"language": datasets.Value("string"),
|
83 |
+
"split": datasets.Value("string"),
|
84 |
+
"template": datasets.Value("string"),
|
85 |
+
"dataset": datasets.Value("string"),
|
86 |
+
"config": datasets.Value("string"),
|
87 |
+
}
|
88 |
+
),
|
89 |
+
supervised_keys=None,
|
90 |
+
citation=_CITATION,
|
91 |
+
)
|
92 |
+
|
93 |
+
def _split_generators(self, dl_manager):
|
94 |
+
|
95 |
+
downloaded_files = dl_manager.download(_URL.format(ds=self.config.name))
|
96 |
+
return [
|
97 |
+
datasets.SplitGenerator(
|
98 |
+
name=datasets.Split.TEST,
|
99 |
+
gen_kwargs={'filepath': downloaded_files}
|
100 |
+
)
|
101 |
+
]
|
102 |
+
|
103 |
+
def _generate_examples(self, filepath):
|
104 |
+
"""This function returns the examples in the raw (text) form."""
|
105 |
+
logger.info("Generating examples from = %s", filepath)
|
106 |
+
|
107 |
+
with open(filepath, encoding="utf-8") as f:
|
108 |
+
for id_, row in enumerate(f):
|
109 |
+
data = json.loads(row)
|
110 |
+
yield id_, data
|