kamelliao commited on
Commit
f4f0d7e
1 Parent(s): ca2facd

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -53,3 +53,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ data/dev.json filter=lfs diff=lfs merge=lfs -text
57
+ data/id_aliases.json filter=lfs diff=lfs merge=lfs -text
58
+ data/test.json filter=lfs diff=lfs merge=lfs -text
59
+ data/train.json filter=lfs diff=lfs merge=lfs -text
2wikimultihopqa.py ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering."""
18
+
19
+
20
+ import json
21
+ import textwrap
22
+
23
+ import datasets
24
+
25
+
26
+ _CITATION = """
27
+ @inproceedings{xanh2020_2wikimultihop,
28
+ title = "Constructing A Multi-hop {QA} Dataset for Comprehensive Evaluation of Reasoning Steps",
29
+ author = "Ho, Xanh and
30
+ Duong Nguyen, Anh-Khoa and
31
+ Sugawara, Saku and
32
+ Aizawa, Akiko",
33
+ booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
34
+ month = dec,
35
+ year = "2020",
36
+ address = "Barcelona, Spain (Online)",
37
+ publisher = "International Committee on Computational Linguistics",
38
+ url = "https://www.aclweb.org/anthology/2020.coling-main.580",
39
+ pages = "6609--6625",
40
+ }
41
+ """
42
+
43
+ _DESCRIPTION = """\
44
+ """
45
+
46
+ _URL_BASE = "data"
47
+
48
+
49
+ class TwowikimultihopQA(datasets.GeneratorBasedBuilder):
50
+ """2wikimultihopQA is a Dataset for Diverse, Explainable Multi-hop Question Answering."""
51
+
52
+ BUILDER_CONFIGS = [
53
+ datasets.BuilderConfig(),
54
+ ]
55
+
56
+ def _info(self):
57
+ return datasets.DatasetInfo(
58
+ description=_DESCRIPTION,
59
+ features=datasets.Features(
60
+ {
61
+ "id": datasets.Value("string"),
62
+ "question": datasets.Value("string"),
63
+ "answer": datasets.Value("string"),
64
+ "type": datasets.Value("string"),
65
+ "supporting_facts": datasets.features.Sequence(
66
+ {
67
+ "title": datasets.Value("string"),
68
+ "sent_id": datasets.Value("int32"),
69
+ }
70
+ ),
71
+ "context": datasets.features.Sequence(
72
+ {
73
+ "title": datasets.Value("string"),
74
+ "sentences": datasets.features.Sequence(datasets.Value("string")),
75
+ }
76
+ ),
77
+ "evidences": datasets.features.Sequence(
78
+ datasets.features.Sequence(
79
+ datasets.Value("string")
80
+ )
81
+ ),
82
+ "entity_ids": datasets.Value("string")
83
+ }
84
+ ),
85
+ supervised_keys=None,
86
+ homepage="https://github.com/Alab-NII/2wikimultihop",
87
+ citation=_CITATION,
88
+ )
89
+
90
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
91
+ """Returns SplitGenerators."""
92
+ paths = {
93
+ datasets.Split.TRAIN: f"{_URL_BASE}/train.json",
94
+ datasets.Split.VALIDATION: f"{_URL_BASE}/dev.json",
95
+ datasets.Split.TEST: f"{_URL_BASE}/test.json",
96
+ }
97
+
98
+ files = dl_manager.download(paths)
99
+
100
+ split_generators = []
101
+ for split in files:
102
+ split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={"data_file": files[split]}))
103
+
104
+ return split_generators
105
+
106
+ def _generate_examples(self, data_file):
107
+ """This function returns the examples."""
108
+ data = json.load(open(data_file))
109
+ for idx, example in enumerate(data):
110
+
111
+ # Test set has missing keys
112
+ for k in ["answer", "type", "level"]:
113
+ if k not in example.keys():
114
+ example[k] = None
115
+
116
+ if "supporting_facts" not in example.keys():
117
+ example["supporting_facts"] = []
118
+
119
+ yield idx, {
120
+ "id": example["_id"],
121
+ "question": example["question"],
122
+ "answer": example["answer"],
123
+ "type": example["type"],
124
+ "supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in example["supporting_facts"]],
125
+ "context": [{"title": f[0], "sentences": f[1]} for f in example["context"]],
126
+ "evidences": example["evidences"],
127
+ "entity_ids": example["entity_ids"]
128
+ }
data/dev.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79f77ae104088ea8e25b1a65dbece768d45771194663bc5660ec9a98070dadf5
3
+ size 57614142
data/id_aliases.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f08ffcb6c2cefca9bdbe86b4248d6ad7a7743762d3f7264c14ff0bae85726fb6
3
+ size 17501406
data/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48b196d4ba8557343abb9bd1ad03566bc02762ecd734617ff910027c33821b04
3
+ size 53838398
data/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b318dbafbfed51a8029718fa59be8b616600cbff675a3b587694b28c5eedfc13
3
+ size 707810660