albertvillanova HF staff commited on
Commit
f659301
1 Parent(s): 334431e

Convert dataset to Parquet (#4)

Browse files

- Convert dataset to Parquet (fa102ba3a84b6c58abd1ab51cd9177385024e8fe)
- Delete loading script (5bda394d5c9bd34bd443ff92eebb18356929d138)

README.md CHANGED
@@ -9,7 +9,6 @@ license:
9
  - unknown
10
  multilinguality:
11
  - monolingual
12
- pretty_name: QaZre
13
  size_categories:
14
  - 1M<n<10M
15
  source_datasets:
@@ -17,7 +16,7 @@ source_datasets:
17
  task_categories:
18
  - question-answering
19
  task_ids: []
20
- paperswithcode_id: null
21
  tags:
22
  - zero-shot-relation-extraction
23
  dataset_info:
@@ -34,16 +33,25 @@ dataset_info:
34
  sequence: string
35
  splits:
36
  - name: test
37
- num_bytes: 29410194
38
  num_examples: 120000
39
  - name: validation
40
- num_bytes: 1481430
41
  num_examples: 6000
42
  - name: train
43
- num_bytes: 2054954011
44
  num_examples: 8400000
45
- download_size: 516061636
46
- dataset_size: 2085845635
 
 
 
 
 
 
 
 
 
47
  ---
48
 
49
  # Dataset Card for QaZre
 
9
  - unknown
10
  multilinguality:
11
  - monolingual
 
12
  size_categories:
13
  - 1M<n<10M
14
  source_datasets:
 
16
  task_categories:
17
  - question-answering
18
  task_ids: []
19
+ pretty_name: QaZre
20
  tags:
21
  - zero-shot-relation-extraction
22
  dataset_info:
 
33
  sequence: string
34
  splits:
35
  - name: test
36
+ num_bytes: 29409906
37
  num_examples: 120000
38
  - name: validation
39
+ num_bytes: 1481406
40
  num_examples: 6000
41
  - name: train
42
+ num_bytes: 2054933851
43
  num_examples: 8400000
44
+ download_size: 1262253976
45
+ dataset_size: 2085825163
46
+ configs:
47
+ - config_name: default
48
+ data_files:
49
+ - split: test
50
+ path: data/test-*
51
+ - split: validation
52
+ path: data/validation-*
53
+ - split: train
54
+ path: data/train-*
55
  ---
56
 
57
  # Dataset Card for QaZre
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6dcd88f988e25fc84340605981ca4cee2431f46c6cd7bb8d71fb26a971cfa47
3
+ size 16280465
data/train-00000-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ef26fd38225787069f5808cc7ca435c624a2e053aeebd4dff3a44d5b25136f6
3
+ size 249185400
data/train-00001-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ecbe362d1e5b700a35a4f08ad8ffc5bb876f37532458fb5139ab69a2824bbcb
3
+ size 248602563
data/train-00002-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012ca297cda181b54cbcc3166e4efddfdc4a8ada2335799a2861b6ea24b4ed1f
3
+ size 247864412
data/train-00003-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98c6c0ad56597317df7d0ed865d7db368bd4fce92d2c789598d26c596e26740
3
+ size 249805558
data/train-00004-of-00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:593cb80c479247bf10a92ef032f4580ce249c94b00d0249e84c665bdfc44cd55
3
+ size 249681886
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38a5c8d0b0deb3c4bea9d6293128189ae1f21de4846ffd0df520ef21bb9c6d11
3
+ size 833692
qa_zre.py DELETED
@@ -1,99 +0,0 @@
1
- """A dataset reducing relation extraction to simple reading comprehension questions"""
2
-
3
- import csv
4
- import os
5
-
6
- import datasets
7
-
8
-
9
- _CITATION = """\
10
- @inproceedings{levy-etal-2017-zero,
11
- title = "Zero-Shot Relation Extraction via Reading Comprehension",
12
- author = "Levy, Omer and
13
- Seo, Minjoon and
14
- Choi, Eunsol and
15
- Zettlemoyer, Luke",
16
- booktitle = "Proceedings of the 21st Conference on Computational Natural Language Learning ({C}o{NLL} 2017)",
17
- month = aug,
18
- year = "2017",
19
- address = "Vancouver, Canada",
20
- publisher = "Association for Computational Linguistics",
21
- url = "https://www.aclweb.org/anthology/K17-1034",
22
- doi = "10.18653/v1/K17-1034",
23
- pages = "333--342",
24
- }
25
- """
26
-
27
- _DESCRIPTION = """\
28
- A dataset reducing relation extraction to simple reading comprehension questions
29
- """
30
-
31
- _DATA_URL = "http://nlp.cs.washington.edu/zeroshot/relation_splits.tar.bz2"
32
-
33
-
34
- class QaZre(datasets.GeneratorBasedBuilder):
35
- """QA-ZRE: Reducing relation extraction to simple reading comprehension questions"""
36
-
37
- VERSION = datasets.Version("0.1.0")
38
-
39
- def _info(self):
40
- return datasets.DatasetInfo(
41
- description=_DESCRIPTION,
42
- features=datasets.Features(
43
- {
44
- "relation": datasets.Value("string"),
45
- "question": datasets.Value("string"),
46
- "subject": datasets.Value("string"),
47
- "context": datasets.Value("string"),
48
- "answers": datasets.features.Sequence(datasets.Value("string")),
49
- }
50
- ),
51
- # If there's a common (input, target) tuple from the features,
52
- # specify them here. They'll be used if as_supervised=True in
53
- # builder.as_dataset.
54
- supervised_keys=None,
55
- # Homepage of the dataset for documentation
56
- homepage="http://nlp.cs.washington.edu/zeroshot",
57
- citation=_CITATION,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- dl_dir = dl_manager.download_and_extract(_DATA_URL)
63
- dl_dir = os.path.join(dl_dir, "relation_splits")
64
-
65
- return [
66
- datasets.SplitGenerator(
67
- name=datasets.Split.TEST,
68
- gen_kwargs={
69
- "filepaths": [os.path.join(dl_dir, "test." + str(i)) for i in range(10)],
70
- },
71
- ),
72
- datasets.SplitGenerator(
73
- name=datasets.Split.VALIDATION,
74
- gen_kwargs={
75
- "filepaths": [os.path.join(dl_dir, "dev." + str(i)) for i in range(10)],
76
- },
77
- ),
78
- datasets.SplitGenerator(
79
- name=datasets.Split.TRAIN,
80
- gen_kwargs={
81
- "filepaths": [os.path.join(dl_dir, "train." + str(i)) for i in range(10)],
82
- },
83
- ),
84
- ]
85
-
86
- def _generate_examples(self, filepaths):
87
- """Yields examples."""
88
-
89
- for file_idx, filepath in enumerate(filepaths):
90
- with open(filepath, encoding="utf-8") as f:
91
- data = csv.reader(f, delimiter="\t")
92
- for idx, row in enumerate(data):
93
- yield f"{file_idx}_{idx}", {
94
- "relation": row[0],
95
- "question": row[1],
96
- "subject": row[2],
97
- "context": row[3],
98
- "answers": row[4:],
99
- }