Datasets:

Modalities:
Text
ArXiv:
License:
nxphi47 commited on
Commit
fc194d4
1 Parent(s): d04a4d1

Delete 2WikiMultihopQA

Browse files
2WikiMultihopQA/2WikiMultihopQA.py DELETED
@@ -1,141 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- """
17
- Mirror of https://github.com/Alab-NII/2wikimultihop
18
- """
19
-
20
- import json
21
- import datasets
22
- import pandas as pd
23
-
24
- _DESCRIPTION = """\
25
- Mirror of https://github.com/Alab-NII/2wikimultihop
26
- """
27
-
28
- CITATION = """
29
- @inproceedings{xanh2020_2wikimultihop,
30
- title = "Constructing A Multi-hop {QA} Dataset for Comprehensive Evaluation of Reasoning Steps",
31
- author = "Ho, Xanh and
32
- Duong Nguyen, Anh-Khoa and
33
- Sugawara, Saku and
34
- Aizawa, Akiko",
35
- booktitle = "Proceedings of the 28th International Conference on Computational Linguistics",
36
- month = dec,
37
- year = "2020",
38
- address = "Barcelona, Spain (Online)",
39
- publisher = "International Committee on Computational Linguistics",
40
- url = "https://www.aclweb.org/anthology/2020.coling-main.580",
41
- pages = "6609--6625",
42
- }
43
- """
44
-
45
- DEV_URL = "https://huggingface.co/datasets/somebody-had-to-do-it/2WikiMultihopQA/resolve/main/dev.parquet?download=true"
46
- TRAIN_URL = "https://huggingface.co/datasets/somebody-had-to-do-it/2WikiMultihopQA/resolve/main/train.parquet?download=true"
47
- TEST_URL = "https://huggingface.co/datasets/somebody-had-to-do-it/2WikiMultihopQA/resolve/main/test.parquet?download=true"
48
-
49
-
50
- class Dataset2WikiMultihopQa(datasets.GeneratorBasedBuilder):
51
- """Mirror of https://github.com/Alab-NII/2wikimultihop"""
52
-
53
- VERSION = datasets.Version("1.0.0")
54
-
55
- def _info(self):
56
- return datasets.DatasetInfo(
57
- description=_DESCRIPTION,
58
- features=datasets.Features(
59
- {
60
- "_id": datasets.Value("string"),
61
- "type": datasets.Value("string"),
62
- "question": datasets.Value("string"),
63
- "context": datasets.features.Sequence(
64
- {
65
- "title": datasets.Value("string"),
66
- "content": datasets.features.Sequence(
67
- datasets.Value("string")
68
- ),
69
- }
70
- ),
71
- "supporting_facts": datasets.features.Sequence(
72
- {
73
- "title": datasets.Value("string"),
74
- "sent_id": datasets.Value("int32"),
75
- }
76
- ),
77
- "evidences": datasets.features.Sequence(
78
- {
79
- "fact": datasets.Value("string"),
80
- "relation": datasets.Value("string"),
81
- "entity": datasets.Value("string"),
82
- }
83
- ),
84
- "answer": datasets.Value("string"),
85
- }
86
- ),
87
- supervised_keys=None,
88
- homepage="https://github.com/Alab-NII/2wikimultihop",
89
- citation=CITATION,
90
- )
91
-
92
- def _split_generators(self, dl_manager):
93
- """Returns SplitGenerators."""
94
-
95
- train_uri = dl_manager.download(TRAIN_URL)
96
- dev_uri = dl_manager.download(DEV_URL)
97
- test_uri = dl_manager.download(TEST_URL)
98
-
99
- return [
100
- datasets.SplitGenerator(
101
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_uri}
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split("dev"),
105
- gen_kwargs={"filepath": dev_uri, "split": "dev"},
106
- ),
107
- datasets.SplitGenerator(
108
- name=datasets.Split.TEST,
109
- gen_kwargs={"filepath": test_uri},
110
- ),
111
- ]
112
-
113
- def _generate_examples(self, filepath, *args, **kwargs):
114
- """Yields examples from a Parquet file."""
115
- # Read the Parquet file into a DataFrame
116
- df = pd.read_parquet(filepath)
117
-
118
- # Iterate over each row in the DataFrame
119
- for idx, row in df.iterrows():
120
- yield idx, {
121
- "_id": row["_id"],
122
- "type": row["type"],
123
- "question": row["question"],
124
- "context": [
125
- {"title": item[0], "content": item[1]}
126
- for item in json.loads(row["context"])
127
- ],
128
- "supporting_facts": [
129
- {"title": fact[0], "sent_id": fact[1]}
130
- for fact in json.loads(row["supporting_facts"])
131
- ],
132
- "evidences": [
133
- {
134
- "fact": evidence[0],
135
- "relation": evidence[1],
136
- "entity": evidence[2],
137
- }
138
- for evidence in json.loads(row["evidences"])
139
- ],
140
- "answer": row["answer"],
141
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2WikiMultihopQA/README.md DELETED
@@ -1,13 +0,0 @@
1
- ---
2
- license: apache-2.0
3
- task_categories:
4
- - question-answering
5
- language:
6
- - en
7
- size_categories:
8
- - 100K<n<1M
9
- ---
10
-
11
- # 2WikiMultihopQA: A Dataset for Comprehensive Evaluation of Reasoning Steps
12
-
13
- Official mirror of <https://github.com/Alab-NII/2wikimultihop>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2WikiMultihopQA/convert_to_jsonl.py DELETED
@@ -1,11 +0,0 @@
1
- from pathlib import Path
2
- import pandas as pd
3
-
4
- file_names = ["./raw/dev.json", "./raw/test.json", "./raw/train.json"]
5
- for file_name in file_names:
6
- # Read the JSON file into a DataFrame
7
- df = pd.read_json(file_name)
8
-
9
- # Convert the DataFrame to JSON Lines and write to a file_name
10
- jsonl_path = "./raw/" + Path(file_name).stem + ".jsonl"
11
- df.to_json(jsonl_path, orient="records", lines=True)
 
 
 
 
 
 
 
 
 
 
 
 
2WikiMultihopQA/convert_to_parquet.py DELETED
@@ -1,20 +0,0 @@
1
- from pathlib import Path
2
- import pandas as pd
3
- import json
4
-
5
- file_names = ["./raw/dev.jsonl", "./raw/test.jsonl", "./raw/train.jsonl"]
6
- for file_name in file_names:
7
- df = pd.read_json(file_name, lines=True)
8
-
9
- complex_columns = [
10
- "context",
11
- "supporting_facts",
12
- "evidences",
13
- ]
14
- for col in complex_columns:
15
- df[col] = df[col].apply(json.dumps)
16
-
17
- parquet_file_path = Path(file_name).stem + ".parquet"
18
- df.to_parquet(parquet_file_path)
19
-
20
- print(f"Converted {file_name} to {parquet_file_path}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2WikiMultihopQA/dev.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0d8b60b9026b728fb07ad74c5252a0f188f6942e8ba5c02df4dfa369502ea8d
3
- size 30056098
 
 
 
 
2WikiMultihopQA/test.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1dc69c656c03837f12bb231e1adbd8a2dcdaaf632143a1ab9ebb98bd6452ede5
3
- size 28475341