sagnikrayc commited on
Commit
8c21775
·
1 Parent(s): 2a8e5af

chore: add datasets

Browse files
Files changed (5) hide show
  1. .gitattributes +4 -0
  2. README.md +30 -0
  3. adversarial_hotpotqa.py +115 -0
  4. train.json +3 -0
  5. validation.json +3 -0
.gitattributes CHANGED
@@ -53,3 +53,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ hotpot_dev-addDoc.json filter=lfs diff=lfs merge=lfs -text
57
+ hotpot_train_addDoc.json filter=lfs diff=lfs merge=lfs -text
58
+ train.json filter=lfs diff=lfs merge=lfs -text
59
+ validation.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,33 @@
1
  ---
2
  license: afl-3.0
 
 
 
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: afl-3.0
3
+ task_categories:
4
+ - question-answering
5
+ language:
6
+ - en
7
+ pretty_name: Adversarial-MultiHopQA
8
+ size_categories:
9
+ - 10K<n<100K
10
  ---
11
+
12
+ This dataset is from the paper: "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for Multi-Hop QA" by Yichen Jiang and Mohit Bansal.
13
+
14
+ The dataset was created using the code provided in the [original Github repo ](https://github.com/jiangycTarheel-zz/Adversarial-MultiHopQA).
15
+
16
+ This is the ACL citation for the paper:
17
+
18
+ ```
19
+ @inproceedings{jiang-bansal-2019-avoiding,
20
+ title = "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for Multi-Hop {QA}",
21
+ author = "Jiang, Yichen and
22
+ Bansal, Mohit",
23
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
24
+ month = jul,
25
+ year = "2019",
26
+ address = "Florence, Italy",
27
+ publisher = "Association for Computational Linguistics",
28
+ url = "https://aclanthology.org/P19-1262",
29
+ doi = "10.18653/v1/P19-1262",
30
+ pages = "2726--2736",
31
+ abstract = "Multi-hop question answering requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. In this paper, we show that in the multi-hop HotpotQA (Yang et al., 2018) dataset, the examples often contain reasoning shortcuts through which models can directly locate the answer by word-matching the question with a sentence in the context. We demonstrate this issue by constructing adversarial documents that create contradicting answers to the shortcut but do not affect the validity of the original answer. The performance of strong baseline models drops significantly on our adversarial test, indicating that they are indeed exploiting the shortcuts rather than performing multi-hop reasoning. After adversarial training, the baseline{'}s performance improves but is still limited on the adversarial test. Hence, we use a control unit that dynamically attends to the question at different reasoning hops to guide the model{'}s multi-hop reasoning. We show that our 2-hop model trained on the regular data is more robust to the adversaries than the baseline. After adversarial training, it not only achieves significant improvements over its counterpart trained on regular data, but also outperforms the adversarially-trained baseline significantly. Finally, we sanity-check that these improvements are not obtained by exploiting potential new shortcuts in the adversarial data, but indeed due to robust multi-hop reasoning skills of the models.",
32
+ }
33
+ ```
adversarial_hotpotqa.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """HotpotQA: A Dataset for Diverse, Explainable Multi-hop Question Answering."""
18
+
19
+
20
+ import json
21
+
22
+ import datasets
23
+
24
+
25
+ _CITATION = """
26
+ @inproceedings{jiang-bansal-2019-avoiding,
27
+ title = "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for Multi-Hop {QA}",
28
+ author = "Jiang, Yichen and
29
+ Bansal, Mohit",
30
+ booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
31
+ month = jul,
32
+ year = "2019",
33
+ address = "Florence, Italy",
34
+ publisher = "Association for Computational Linguistics",
35
+ url = "https://aclanthology.org/P19-1262",
36
+ doi = "10.18653/v1/P19-1262",
37
+ pages = "2726--2736",
38
+ abstract = "Multi-hop question answering requires a model to connect multiple pieces of evidence scattered in a long context to answer the question. In this paper, we show that in the multi-hop HotpotQA (Yang et al., 2018) dataset, the examples often contain reasoning shortcuts through which models can directly locate the answer by word-matching the question with a sentence in the context. We demonstrate this issue by constructing adversarial documents that create contradicting answers to the shortcut but do not affect the validity of the original answer. The performance of strong baseline models drops significantly on our adversarial test, indicating that they are indeed exploiting the shortcuts rather than performing multi-hop reasoning. After adversarial training, the baseline{'}s performance improves but is still limited on the adversarial test. Hence, we use a control unit that dynamically attends to the question at different reasoning hops to guide the model{'}s multi-hop reasoning. We show that our 2-hop model trained on the regular data is more robust to the adversaries than the baseline. After adversarial training, it not only achieves significant improvements over its counterpart trained on regular data, but also outperforms the adversarially-trained baseline significantly. Finally, we sanity-check that these improvements are not obtained by exploiting potential new shortcuts in the adversarial data, but indeed due to robust multi-hop reasoning skills of the models.",
39
+ }
40
+ """
41
+
42
+ _DESCRIPTION = """\
43
+ This dataset is from the paper: "Avoiding Reasoning Shortcuts: Adversarial Evaluation, Training, and Model Development for
44
+ Multi-Hop QA" by Yichen Jiang and Mohit Bansal.
45
+
46
+ The dataset was created using the code provided in the repo: https://github.com/jiangycTarheel-zz/Adversarial-MultiHopQA.
47
+ """
48
+
49
+ TRAIN_JSON = "https://huggingface.co/datasets/sagnikrayc/adversarial_hotpotqa/resolve/main/train.json"
50
+ VALID_JSON = "https://huggingface.co/datasets/sagnikrayc/adversarial_hotpotqa/resolve/main/validation.json"
51
+
52
+
53
+ class AdvHotpotQA(datasets.GeneratorBasedBuilder):
54
+ def _info(self):
55
+ return datasets.DatasetInfo(
56
+ description=_DESCRIPTION,
57
+ features=datasets.Features(
58
+ {
59
+ "_id": datasets.Value("string"),
60
+ "question": datasets.Value("string"),
61
+ "answer": datasets.Value("string"),
62
+ "type": datasets.Value("string"),
63
+ "level": datasets.Value("string"),
64
+ "supporting_facts": datasets.features.Sequence(
65
+ {
66
+ "title": datasets.Value("string"),
67
+ "sent_id": datasets.Value("int32"),
68
+ }
69
+ ),
70
+ "context": datasets.features.Sequence(
71
+ {
72
+ "title": datasets.Value("string"),
73
+ "sentences": datasets.features.Sequence(datasets.Value("string")),
74
+ }
75
+ ),
76
+ }
77
+ ),
78
+ supervised_keys=None,
79
+ citation=_CITATION,
80
+ )
81
+
82
+ def _split_generators(self, dl_manager):
83
+ """Returns SplitGenerators."""
84
+ paths = {
85
+ datasets.Split.TRAIN: TRAIN_JSON,
86
+ datasets.Split.VALIDATION: VALID_JSON,
87
+ }
88
+ files = dl_manager.download(paths)
89
+ split_generators = []
90
+ for split in files:
91
+ split_generators.append(datasets.SplitGenerator(name=split, gen_kwargs={"data_file": files[split]}))
92
+
93
+ return split_generators
94
+
95
+ def _generate_examples(self, data_file):
96
+ """This function returns the examples."""
97
+ data = json.load(open(data_file))
98
+ for idx, example in enumerate(data):
99
+ # Test set has missing keys
100
+ for k in ["answer", "type", "level"]:
101
+ if k not in example.keys():
102
+ example[k] = None
103
+
104
+ if "supporting_facts" not in example.keys():
105
+ example["supporting_facts"] = []
106
+
107
+ yield idx, {
108
+ "_id": example["_id"],
109
+ "question": example["question"],
110
+ "answer": example["answer"],
111
+ "type": example["type"],
112
+ "level": example["level"],
113
+ "supporting_facts": [{"title": f[0], "sent_id": f[1]} for f in example["supporting_facts"]],
114
+ "context": [{"title": f[0], "sentences": f[1]} for f in example["context"]],
115
+ }
train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ffe0a76fcf9ae54e37ca33dc5946882cf5b5df978641551ea45d10fdd9e70f
3
+ size 499652968
validation.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:951efc9ef2b185fc26b6f5a8befc42d089c14943d251b08ec9dc75be112d4f0f
3
+ size 41330050