system HF staff commited on
Commit
954824e
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"plain_text": {"description": "Piaf is a reading comprehension dataset. This version, published in February 2020, contains 3835 questions on French Wikipedia.\n", "citation": "@InProceedings{keraron-EtAl:2020:LREC,\n author = {Keraron, Rachel and Lancrenon, Guillaume and Bras, Mathilde and Allary, Fr\u00c3\u00a9d\u00c3\u00a9ric and Moyse, Gilles and Scialom, Thomas and Soriano-Morales, Edmundo-Pavel and Staiano, Jacopo},\n title = {Project PIAF: Building a Native French Question-Answering Dataset},\n booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},\n month = {May},\n year = {2020},\n address = {Marseille, France},\n publisher = {European Language Resources Association},\n pages = {5483--5492},\n abstract = {Motivated by the lack of data for non-English languages, in particular for the evaluation of downstream tasks such as Question Answering, we present a participatory effort to collect a native French Question Answering Dataset. Furthermore, we describe and publicly release the annotation tool developed for our collection effort, along with the data obtained and preliminary baselines.},\n url = {https://www.aclweb.org/anthology/2020.lrec-1.673}\n}\n", "homepage": "https://piaf.etalab.studio", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "piaf", "config_name": "plain_text", "version": {"version_str": "1.0.0", "description": "", "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3337705, "num_examples": 3835, "dataset_name": "piaf"}}, "download_checksums": {"https://github.com/etalab-ia/piaf-code/raw/master/piaf-v1.0.json": {"num_bytes": 1370384, "checksum": "008229ccefa0195d7e809d777d33149cab03433059c9477bdbadb4838a277cd2"}}, "download_size": 1370384, "dataset_size": 3337705, "size_in_bytes": 4708089}}
dummy/plain_text/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3c038bf8d5ce829fa28d4cdf2bbf63793427a1785b53ee234880d2be64fe2ae
3
+ size 664
piaf.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """PIAF Question Answering Dataset"""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import logging
23
+ import os
24
+
25
+ import datasets
26
+
27
+
28
+ _CITATION = """\
29
+ @InProceedings{keraron-EtAl:2020:LREC,
30
+ author = {Keraron, Rachel and Lancrenon, Guillaume and Bras, Mathilde and Allary, Frédéric and Moyse, Gilles and Scialom, Thomas and Soriano-Morales, Edmundo-Pavel and Staiano, Jacopo},
31
+ title = {Project PIAF: Building a Native French Question-Answering Dataset},
32
+ booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
33
+ month = {May},
34
+ year = {2020},
35
+ address = {Marseille, France},
36
+ publisher = {European Language Resources Association},
37
+ pages = {5483--5492},
38
+ abstract = {Motivated by the lack of data for non-English languages, in particular for the evaluation of downstream tasks such as Question Answering, we present a participatory effort to collect a native French Question Answering Dataset. Furthermore, we describe and publicly release the annotation tool developed for our collection effort, along with the data obtained and preliminary baselines.},
39
+ url = {https://www.aclweb.org/anthology/2020.lrec-1.673}
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ Piaf is a reading comprehension \
45
+ dataset. This version, published in February 2020, contains 3835 questions on French Wikipedia.
46
+ """
47
+
48
+
49
+ class PiafConfig(datasets.BuilderConfig):
50
+ """BuilderConfig for PIAF."""
51
+
52
+ def __init__(self, **kwargs):
53
+ """BuilderConfig for PIAF.
54
+
55
+ Args:
56
+ **kwargs: keyword arguments forwarded to super.
57
+ """
58
+ super(PiafConfig, self).__init__(**kwargs)
59
+
60
+
61
+ class Piaf(datasets.GeneratorBasedBuilder):
62
+ """The Piaf Question Answering Dataset. Version 1.0."""
63
+
64
+ _URL = "https://github.com/etalab-ia/piaf-code/raw/master/"
65
+ _TRAINING_FILE = "piaf-v1.0.json"
66
+
67
+ BUILDER_CONFIGS = [
68
+ PiafConfig(
69
+ name="plain_text",
70
+ version=datasets.Version("1.0.0", ""),
71
+ description="Plain text",
72
+ ),
73
+ ]
74
+
75
+ def _info(self):
76
+ return datasets.DatasetInfo(
77
+ description=_DESCRIPTION,
78
+ features=datasets.Features(
79
+ {
80
+ "id": datasets.Value("string"),
81
+ "title": datasets.Value("string"),
82
+ "context": datasets.Value("string"),
83
+ "question": datasets.Value("string"),
84
+ "answers": datasets.features.Sequence(
85
+ {
86
+ "text": datasets.Value("string"),
87
+ "answer_start": datasets.Value("int32"),
88
+ }
89
+ ),
90
+ }
91
+ ),
92
+ # No default supervised_keys (as we have to pass both question
93
+ # and context as input).
94
+ supervised_keys=None,
95
+ homepage="https://piaf.etalab.studio",
96
+ citation=_CITATION,
97
+ )
98
+
99
+ def _split_generators(self, dl_manager):
100
+ urls_to_download = {"train": os.path.join(self._URL, self._TRAINING_FILE)}
101
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
102
+
103
+ return [
104
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
105
+ ]
106
+
107
+ def _generate_examples(self, filepath):
108
+ """This function returns the examples in the raw (text) form."""
109
+ logging.info("generating examples from = %s", filepath)
110
+ with open(filepath, encoding="utf-8") as f:
111
+ dataset = json.load(f)
112
+ for article in dataset["data"]:
113
+ title = article.get("title", "").strip()
114
+ for paragraph in article["paragraphs"]:
115
+ context = paragraph["context"].strip()
116
+ for qa in paragraph["qas"]:
117
+ question = qa["question"].strip()
118
+ id_ = qa["id"]
119
+
120
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
121
+ answers = [answer["text"].strip() for answer in qa["answers"]]
122
+
123
+ # Features currently used are "context", "question", and "answers".
124
+ # Others are extracted here for the ease of future expansions.
125
+ yield id_, {
126
+ "title": title,
127
+ "context": context,
128
+ "question": question,
129
+ "id": id_,
130
+ "answers": {
131
+ "answer_start": answer_starts,
132
+ "text": answers,
133
+ },
134
+ }