system HF staff commited on
Commit
da596cf
0 Parent(s):

Update files from the datasets library (from 1.2.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.2.0

Files changed (5) hide show
  1. .gitattributes +27 -0
  2. README.md +149 -0
  3. dataset_infos.json +1 -0
  4. dummy/1.1.0/dummy_data.zip +3 -0
  5. hover.py +111 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - expert-generated
4
+ language_creators:
5
+ - expert-generated
6
+ - found
7
+ languages:
8
+ - en
9
+ licenses:
10
+ - cc-by-sa-4-0
11
+ multilinguality:
12
+ - monolingual
13
+ size_categories:
14
+ - 10K<n<100K
15
+ source_datasets:
16
+ - original
17
+ task_categories:
18
+ - text-retrieval
19
+ task_ids:
20
+ - fact-checking-retrieval
21
+ ---
22
+
23
+ # Dataset Card Creation Guide
24
+
25
+ ## Table of Contents
26
+ - [Dataset Description](#dataset-description)
27
+ - [Dataset Summary](#dataset-summary)
28
+ - [Supported Tasks](#supported-tasks-and-leaderboards)
29
+ - [Languages](#languages)
30
+ - [Dataset Structure](#dataset-structure)
31
+ - [Data Instances](#data-instances)
32
+ - [Data Fields](#data-instances)
33
+ - [Data Splits](#data-instances)
34
+ - [Dataset Creation](#dataset-creation)
35
+ - [Curation Rationale](#curation-rationale)
36
+ - [Source Data](#source-data)
37
+ - [Annotations](#annotations)
38
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
39
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
40
+ - [Social Impact of Dataset](#social-impact-of-dataset)
41
+ - [Discussion of Biases](#discussion-of-biases)
42
+ - [Other Known Limitations](#other-known-limitations)
43
+ - [Additional Information](#additional-information)
44
+ - [Dataset Curators](#dataset-curators)
45
+ - [Licensing Information](#licensing-information)
46
+ - [Citation Information](#citation-information)
47
+
48
+ ## Dataset Description
49
+
50
+ - **Homepage:** https://hover-nlp.github.io/
51
+ - **Repository:** https://github.com/hover-nlp/hover
52
+ - **Paper:** https://arxiv.org/abs/2011.03088
53
+ - **Leaderboard:** https://hover-nlp.github.io/
54
+ - **Point of Contact:** [More Information Needed]
55
+
56
+ ### Dataset Summary
57
+
58
+ [More Information Needed]
59
+
60
+ ### Supported Tasks and Leaderboards
61
+
62
+ [More Information Needed]
63
+
64
+ ### Languages
65
+
66
+ [More Information Needed]
67
+
68
+ ## Dataset Structure
69
+
70
+ ### Data Instances
71
+
72
+ A sample training set is provided below
73
+
74
+ ```
75
+ {'id': 14856, 'uid': 'a0cf45ea-b5cd-4c4e-9ffa-73b39ebd78ce', 'claim': 'The park at which Tivolis Koncertsal is located opened on 15 August 1843.', 'supporting_facts': [{'key': 'Tivolis Koncertsal', 'value': 0}, {'key': 'Tivoli Gardens', 'value': 1}], 'label': 'SUPPORTED', 'num_hops': 2, 'hpqa_id': '5abca1a55542993a06baf937'}
76
+ ```
77
+
78
+ Please note that in test set sentence only id, uid and claim are available. Labels are not available in test set and are represented by -1.
79
+
80
+
81
+ ### Data Fields
82
+
83
+ [More Information Needed]
84
+
85
+ ### Data Splits
86
+
87
+ [More Information Needed]
88
+
89
+ ## Dataset Creation
90
+
91
+ ### Curation Rationale
92
+
93
+ [More Information Needed]
94
+
95
+ ### Source Data
96
+
97
+ [More Information Needed]
98
+
99
+ #### Initial Data Collection and Normalization
100
+
101
+ [More Information Needed]
102
+
103
+ #### Who are the source language producers?
104
+
105
+ [More Information Needed]
106
+
107
+ ### Annotations
108
+
109
+ [More Information Needed]
110
+
111
+ #### Annotation process
112
+
113
+ [More Information Needed]
114
+
115
+ #### Who are the annotators?
116
+
117
+ [More Information Needed]
118
+
119
+ ### Personal and Sensitive Information
120
+
121
+ [More Information Needed]
122
+
123
+ ## Considerations for Using the Data
124
+
125
+ ### Social Impact of Dataset
126
+
127
+ [More Information Needed]
128
+
129
+ ### Discussion of Biases
130
+
131
+ [More Information Needed]
132
+
133
+ ### Other Known Limitations
134
+
135
+ [More Information Needed]
136
+
137
+ ## Additional Information
138
+
139
+ ### Dataset Curators
140
+
141
+ [More Information Needed]
142
+
143
+ ### Licensing Information
144
+
145
+ [More Information Needed]
146
+
147
+ ### Citation Information
148
+
149
+ [More Information Needed]
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"default": {"description": "HoVer is an open-domain, many-hop fact extraction and claim verification dataset built upon the Wikipedia corpus. The original 2-hop claims are adapted from question-answer pairs from HotpotQA. It is collected by a team of NLP researchers at UNC Chapel Hill and Verisk Analytics.\n", "citation": "@inproceedings{jiang2020hover,\n title={{HoVer}: A Dataset for Many-Hop Fact Extraction And Claim Verification},\n author={Yichen Jiang and Shikha Bordia and Zheng Zhong and Charles Dognin and Maneesh Singh and Mohit Bansal.},\n booktitle={Findings of the Conference on Empirical Methods in Natural Language Processing ({EMNLP})},\n year={2020}\n}\n", "homepage": "https://hover-nlp.github.io/", "license": "", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "uid": {"dtype": "string", "id": null, "_type": "Value"}, "claim": {"dtype": "string", "id": null, "_type": "Value"}, "supporting_facts": [{"key": {"dtype": "string", "id": null, "_type": "Value"}, "value": {"dtype": "int32", "id": null, "_type": "Value"}}], "label": {"num_classes": 2, "names": ["NOT_SUPPORTED", "SUPPORTED"], "names_file": null, "id": null, "_type": "ClassLabel"}, "num_hops": {"dtype": "int32", "id": null, "_type": "Value"}, "hpqa_id": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "hover", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 5532178, "num_examples": 18171, "dataset_name": "hover"}, "validation": {"name": "validation", "num_bytes": 1299252, "num_examples": 4000, "dataset_name": "hover"}, "test": {"name": "test", "num_bytes": 927513, "num_examples": 4000, "dataset_name": "hover"}}, "download_checksums": {"https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_train_release_v1.1.json": {"num_bytes": 9205582, "checksum": "1f1cd57abd616fa00c70bdc575ce77c16fc6cf1a6cffd5ff87c208030a336bb6"}, "https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_dev_release_v1.1.json": {"num_bytes": 2153439, "checksum": "67c14858f2d7fcdb96b6fe3d538ffcd6f76e3ba594aa2c0cd4359f601101e89d"}, "https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_test_release_v1.1.json": {"num_bytes": 898814, "checksum": "c58e7fc59b4962213a5a6d41d746384ee88a7645e36cb3a439969cf762c8ec24"}}, "download_size": 12257835, "post_processing_size": null, "dataset_size": 7758943, "size_in_bytes": 20016778}}
dummy/1.1.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4209fce73e0e85faa36cd2b5cfdb6065c57a1e314a9a31b196e7d620b866c70
3
+ size 2277
hover.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ import json
18
+
19
+ import datasets
20
+
21
+
22
+ _DESCRIPTION = """\
23
+ HoVer is an open-domain, many-hop fact extraction and claim verification dataset built upon the Wikipedia corpus. The original 2-hop claims are adapted from question-answer pairs from HotpotQA. It is collected by a team of NLP researchers at UNC Chapel Hill and Verisk Analytics.
24
+ """
25
+ _HOMEPAGE_URL = "https://hover-nlp.github.io/"
26
+ _CITATION = """\
27
+ @inproceedings{jiang2020hover,
28
+ title={{HoVer}: A Dataset for Many-Hop Fact Extraction And Claim Verification},
29
+ author={Yichen Jiang and Shikha Bordia and Zheng Zhong and Charles Dognin and Maneesh Singh and Mohit Bansal.},
30
+ booktitle={Findings of the Conference on Empirical Methods in Natural Language Processing ({EMNLP})},
31
+ year={2020}
32
+ }
33
+ """
34
+
35
+ _TRAIN_URL = "https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_train_release_v1.1.json"
36
+ _VALID_URL = "https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_dev_release_v1.1.json"
37
+ _TEST_URL = "https://raw.githubusercontent.com/hover-nlp/hover/main/data/hover/hover_test_release_v1.1.json"
38
+
39
+
40
+ class Hover(datasets.GeneratorBasedBuilder):
41
+ VERSION = datasets.Version("1.1.0")
42
+
43
+ def _info(self):
44
+ return datasets.DatasetInfo(
45
+ description=_DESCRIPTION,
46
+ features=datasets.Features(
47
+ {
48
+ "id": datasets.Value("int32"),
49
+ "uid": datasets.Value("string"),
50
+ "claim": datasets.Value("string"),
51
+ "supporting_facts": [
52
+ {
53
+ "key": datasets.Value("string"),
54
+ "value": datasets.Value("int32"),
55
+ }
56
+ ],
57
+ "label": datasets.ClassLabel(names=["NOT_SUPPORTED", "SUPPORTED"]),
58
+ "num_hops": datasets.Value("int32"),
59
+ "hpqa_id": datasets.Value("string"),
60
+ },
61
+ ),
62
+ supervised_keys=None,
63
+ homepage=_HOMEPAGE_URL,
64
+ citation=_CITATION,
65
+ )
66
+
67
+ def _split_generators(self, dl_manager):
68
+ train_path = dl_manager.download_and_extract(_TRAIN_URL)
69
+ valid_path = dl_manager.download_and_extract(_VALID_URL)
70
+ test_path = dl_manager.download_and_extract(_TEST_URL)
71
+ return [
72
+ datasets.SplitGenerator(
73
+ name=datasets.Split.TRAIN,
74
+ gen_kwargs={"datapath": train_path, "datatype": "train"},
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.VALIDATION,
78
+ gen_kwargs={"datapath": valid_path, "datatype": "valid"},
79
+ ),
80
+ datasets.SplitGenerator(
81
+ name=datasets.Split.TEST,
82
+ gen_kwargs={"datapath": test_path, "datatype": "test"},
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, datapath, datatype):
87
+ with open(datapath, encoding="utf-8") as f:
88
+ data = json.load(f)
89
+
90
+ for sentence_counter, d in enumerate(data):
91
+ if datatype != "test":
92
+ resp = {
93
+ "id": sentence_counter,
94
+ "uid": d["uid"],
95
+ "claim": d["claim"],
96
+ "supporting_facts": [{"key": x[0], "value": x[1]} for x in d["supporting_facts"]],
97
+ "label": d["label"],
98
+ "num_hops": d["num_hops"],
99
+ "hpqa_id": d["hpqa_id"],
100
+ }
101
+ else:
102
+ resp = {
103
+ "id": sentence_counter,
104
+ "uid": d["uid"],
105
+ "claim": d["claim"],
106
+ "supporting_facts": [],
107
+ "label": -1,
108
+ "num_hops": -1,
109
+ "hpqa_id": "None",
110
+ }
111
+ yield sentence_counter, resp