albertvillanova HF staff commited on
Commit
a6842a0
1 Parent(s): 54b9787

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (fe721a2c92746e3082b43f6cc968636cc1bd198c)
- Add 'masked_medhop' config data files (83a39d6809957620f77a744d34fb36e38566ba4d)
- Add 'wikihop' config data files (95ff35c439fdd18848475cb8fec18d157aa2848c)
- Add 'masked_wikihop' config data files (b04c4143c89a15596c43cbbb2fe05f2c6defc626)
- Delete loading script (7276113ed572d98b650ec397771f3fe1d8af63c4)
- Delete data file (f2879469fdc15902ef29a34dc8c8cf7af239d5a3)

README.md CHANGED
@@ -1,10 +1,9 @@
1
  ---
2
  language:
3
  - en
4
- paperswithcode_id: null
5
  pretty_name: qangaroo
6
  dataset_info:
7
- - config_name: medhop
8
  features:
9
  - name: query
10
  dtype: string
@@ -18,14 +17,14 @@ dataset_info:
18
  dtype: string
19
  splits:
20
  - name: train
21
- num_bytes: 93947725
22
  num_examples: 1620
23
  - name: validation
24
- num_bytes: 16463555
25
  num_examples: 342
26
- download_size: 339843061
27
- dataset_size: 110411280
28
- - config_name: masked_medhop
29
  features:
30
  - name: query
31
  dtype: string
@@ -39,14 +38,14 @@ dataset_info:
39
  dtype: string
40
  splits:
41
  - name: train
42
- num_bytes: 95823986
43
- num_examples: 1620
44
  - name: validation
45
- num_bytes: 16802484
46
- num_examples: 342
47
- download_size: 339843061
48
- dataset_size: 112626470
49
- - config_name: wikihop
50
  features:
51
  - name: query
52
  dtype: string
@@ -60,14 +59,14 @@ dataset_info:
60
  dtype: string
61
  splits:
62
  - name: train
63
- num_bytes: 325994029
64
- num_examples: 43738
65
  - name: validation
66
- num_bytes: 40869634
67
- num_examples: 5129
68
- download_size: 339843061
69
- dataset_size: 366863663
70
- - config_name: masked_wikihop
71
  features:
72
  - name: query
73
  dtype: string
@@ -81,13 +80,38 @@ dataset_info:
81
  dtype: string
82
  splits:
83
  - name: train
84
- num_bytes: 348290479
85
  num_examples: 43738
86
  - name: validation
87
- num_bytes: 43689810
88
  num_examples: 5129
89
- download_size: 339843061
90
- dataset_size: 391980289
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  ---
92
 
93
  # Dataset Card for "qangaroo"
 
1
  ---
2
  language:
3
  - en
 
4
  pretty_name: qangaroo
5
  dataset_info:
6
+ - config_name: masked_medhop
7
  features:
8
  - name: query
9
  dtype: string
 
17
  dtype: string
18
  splits:
19
  - name: train
20
+ num_bytes: 95813556
21
  num_examples: 1620
22
  - name: validation
23
+ num_bytes: 16800542
24
  num_examples: 342
25
+ download_size: 58801723
26
+ dataset_size: 112614098
27
+ - config_name: masked_wikihop
28
  features:
29
  - name: query
30
  dtype: string
 
38
  dtype: string
39
  splits:
40
  - name: train
41
+ num_bytes: 348073986
42
+ num_examples: 43738
43
  - name: validation
44
+ num_bytes: 43663600
45
+ num_examples: 5129
46
+ download_size: 211302995
47
+ dataset_size: 391737586
48
+ - config_name: medhop
49
  features:
50
  - name: query
51
  dtype: string
 
59
  dtype: string
60
  splits:
61
  - name: train
62
+ num_bytes: 93937294
63
+ num_examples: 1620
64
  - name: validation
65
+ num_bytes: 16461612
66
+ num_examples: 342
67
+ download_size: 57837760
68
+ dataset_size: 110398906
69
+ - config_name: wikihop
70
  features:
71
  - name: query
72
  dtype: string
 
80
  dtype: string
81
  splits:
82
  - name: train
83
+ num_bytes: 325777822
84
  num_examples: 43738
85
  - name: validation
86
+ num_bytes: 40843303
87
  num_examples: 5129
88
+ download_size: 202454962
89
+ dataset_size: 366621125
90
+ configs:
91
+ - config_name: masked_medhop
92
+ data_files:
93
+ - split: train
94
+ path: masked_medhop/train-*
95
+ - split: validation
96
+ path: masked_medhop/validation-*
97
+ - config_name: masked_wikihop
98
+ data_files:
99
+ - split: train
100
+ path: masked_wikihop/train-*
101
+ - split: validation
102
+ path: masked_wikihop/validation-*
103
+ - config_name: medhop
104
+ data_files:
105
+ - split: train
106
+ path: medhop/train-*
107
+ - split: validation
108
+ path: medhop/validation-*
109
+ - config_name: wikihop
110
+ data_files:
111
+ - split: train
112
+ path: wikihop/train-*
113
+ - split: validation
114
+ path: wikihop/validation-*
115
  ---
116
 
117
  # Dataset Card for "qangaroo"
qangaroo_v1.1.zip → masked_medhop/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2f512869760cdad76a022a1465f025b486ae79dc5b8f0bf3ad901a4caf2d3050
3
- size 339843061
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88b2cb6463c9ba61430d4f2527b9f9dc07b1e94ea7fa783a3e95eef1045ded88
3
+ size 50027308
masked_medhop/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a13e43d2ba22fe8470bd0ae8333bc53b4ff11bce2bce657a31ebee635663558a
3
+ size 8774415
masked_wikihop/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b2c8882ae2beb592aee16bb33f8e996faea13f8ce9c2245a426ff66faaeb9fe
3
+ size 187716381
masked_wikihop/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d955159145070559eb7cdd7c108c74924beceea1e435577a4e72ef0b576fc0fe
3
+ size 23586614
medhop/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8eb9c480e5706b96c2084de7c1f788f8b533a100f056f7ca3af66d83ca555b0
3
+ size 49190527
medhop/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c8b0249a81eba3fd646c6589f56d56adb583e384967e8af969aca67e51e93d9
3
+ size 8647233
qangaroo.py DELETED
@@ -1,126 +0,0 @@
1
- """TODO(qangaroo): Add a description here."""
2
-
3
-
4
- import json
5
- import os
6
-
7
- import datasets
8
-
9
-
10
- # TODO(qangaroo): BibTeX citation
11
-
12
- _CITATION = """
13
- """
14
-
15
- # TODO(quangaroo):
16
- _DESCRIPTION = """\
17
- We have created two new Reading Comprehension datasets focussing on multi-hop (alias multi-step) inference.
18
-
19
- Several pieces of information often jointly imply another fact. In multi-hop inference, a new fact is derived by combining facts via a chain of multiple steps.
20
-
21
- Our aim is to build Reading Comprehension methods that perform multi-hop inference on text, where individual facts are spread out across different documents.
22
-
23
- The two QAngaroo datasets provide a training and evaluation resource for such methods.
24
- """
25
-
26
- _MEDHOP_DESCRIPTION = """\
27
- With the same format as WikiHop, this dataset is based on research paper abstracts from PubMed, and the queries are about interactions between pairs of drugs.
28
- The correct answer has to be inferred by combining information from a chain of reactions of drugs and proteins.
29
- """
30
- _WIKIHOP_DESCRIPTION = """\
31
- With the same format as WikiHop, this dataset is based on research paper abstracts from PubMed, and the queries are about interactions between pairs of drugs.
32
- The correct answer has to be inferred by combining information from a chain of reactions of drugs and proteins.
33
- """
34
-
35
- _URL = "qangaroo_v1.1.zip"
36
-
37
-
38
- class QangarooConfig(datasets.BuilderConfig):
39
- def __init__(self, data_dir, **kwargs):
40
- """BuilderConfig for qangaroo dataset
41
-
42
- Args:
43
- data_dir: directory for the given dataset name
44
- **kwargs: keyword arguments forwarded to super.
45
-
46
- """
47
-
48
- super(QangarooConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
49
-
50
- self.data_dir = data_dir
51
-
52
-
53
- class Qangaroo(datasets.GeneratorBasedBuilder):
54
- """TODO(qangaroo): Short description of my dataset."""
55
-
56
- # TODO(qangaroo): Set up version.
57
- VERSION = datasets.Version("0.1.0")
58
- BUILDER_CONFIGS = [
59
- QangarooConfig(name="medhop", description=_MEDHOP_DESCRIPTION, data_dir="medhop"),
60
- QangarooConfig(name="masked_medhop", description=_MEDHOP_DESCRIPTION, data_dir="medhop"),
61
- QangarooConfig(name="wikihop", description=_WIKIHOP_DESCRIPTION, data_dir="wikihop"),
62
- QangarooConfig(name="masked_wikihop", description=_WIKIHOP_DESCRIPTION, data_dir="wikihop"),
63
- ]
64
-
65
- def _info(self):
66
- # TODO(qangaroo): Specifies the datasets.DatasetInfo object
67
- return datasets.DatasetInfo(
68
- # This is the description that will appear on the datasets page.
69
- description=_DESCRIPTION,
70
- # datasets.features.FeatureConnectors
71
- features=datasets.Features(
72
- {
73
- # These are the features of your dataset like images, labels ...
74
- "query": datasets.Value("string"),
75
- "supports": datasets.features.Sequence(datasets.Value("string")),
76
- "candidates": datasets.features.Sequence(datasets.Value("string")),
77
- "answer": datasets.Value("string"),
78
- "id": datasets.Value("string")
79
- # These are the features of your dataset like images, labels ...
80
- }
81
- ),
82
- # If there's a common (input, target) tuple from the features,
83
- # specify them here. They'll be used if as_supervised=True in
84
- # builder.as_dataset.
85
- supervised_keys=None,
86
- # Homepage of the dataset for documentation
87
- homepage="http://qangaroo.cs.ucl.ac.uk/index.html",
88
- citation=_CITATION,
89
- )
90
-
91
- def _split_generators(self, dl_manager):
92
- """Returns SplitGenerators."""
93
- # TODO(qangaroo): Downloads the data and defines the splits
94
- # dl_manager is a datasets.download.DownloadManager that can be used to
95
- # download and extract URLs
96
- dl_dir = dl_manager.download_and_extract(_URL)
97
- data_dir = os.path.join(dl_dir, "qangaroo_v1.1")
98
- train_file = "train.masked.json" if "masked" in self.config.name else "train.json"
99
- dev_file = "dev.masked.json" if "masked" in self.config.name else "dev.json"
100
- return [
101
- datasets.SplitGenerator(
102
- name=datasets.Split.TRAIN,
103
- # These kwargs will be passed to _generate_examples
104
- gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_dir, train_file)},
105
- ),
106
- datasets.SplitGenerator(
107
- name=datasets.Split.VALIDATION,
108
- # These kwargs will be passed to _generate_examples
109
- gen_kwargs={"filepath": os.path.join(data_dir, self.config.data_dir, dev_file)},
110
- ),
111
- ]
112
-
113
- def _generate_examples(self, filepath):
114
- """Yields examples."""
115
- # TODO(quangaroo): Yields (key, example) tuples from the dataset
116
- with open(filepath, encoding="utf-8") as f:
117
- data = json.load(f)
118
- for example in data:
119
- id_ = example["id"]
120
- yield id_, {
121
- "id": example["id"],
122
- "query": example["query"],
123
- "supports": example["supports"],
124
- "candidates": example["candidates"],
125
- "answer": example["answer"],
126
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
wikihop/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e60c581c298c65870c465c440dc5e0a7b1a40b26e2027709bda3a4f8c1d30553
3
+ size 179876427
wikihop/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50517348fd57fb40fddedba920c52ee8cabeffb9e0369bd76e0703ce76065845
3
+ size 22578535