Giguru Scheuer commited on
Commit
0647863
1 Parent(s): 8205a6a
Files changed (1) hide show
  1. canard_quretec.py +25 -10
canard_quretec.py CHANGED
@@ -21,7 +21,6 @@ import os
21
  import datasets
22
 
23
 
24
- # TODO: Add BibTeX citation
25
  # Find for instance the citation on arxiv or on the dataset repo/website
26
  _CITATION = """\
27
  @inproceedings{Elgohary:Peskov:Boyd-Graber-2019,
@@ -49,8 +48,23 @@ _LICENSE = "CC BY-SA 4.0"
49
 
50
  # The HuggingFace dataset library don't host the datasets but only point to the original files
51
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
 
52
  _URLs = {
53
- 'voskarides': "https://drive.google.com/drive/folders/1e3s-V6VQqOKHrmn_kBStNsV0gGHPeJVf",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  }
55
 
56
 
@@ -74,7 +88,8 @@ class CanardQuretec(datasets.GeneratorBasedBuilder):
74
  # data = datasets.load_dataset('my_dataset', 'second_domain')
75
  BUILDER_CONFIGS = [
76
  datasets.BuilderConfig(name="gold_supervision", version=VERSION, description="Was used for training quretec with gold supervision"),
77
- # datasets.BuilderConfig(name="original_all", version=VERSION, description="Was used for creating dataset statistics"),
 
78
  ]
79
 
80
  # It's not mandatory to have a default configuration. Just use one if it make sense.
@@ -122,26 +137,26 @@ class CanardQuretec(datasets.GeneratorBasedBuilder):
122
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
123
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
124
  my_urls = _URLs[self.config.name]
125
- data_dir = dl_manager.download_and_extract(my_urls)
126
  return [
127
  datasets.SplitGenerator(
128
  name=datasets.Split.TRAIN,
129
  gen_kwargs={ # These kwargs will be passed to _generate_examples
130
- "filepath": os.path.join(data_dir, "train_gold_supervision.json"),
131
  "split": "train",
132
  },
133
  ),
134
  datasets.SplitGenerator(
135
  name=datasets.Split.TEST,
136
  gen_kwargs={ # These kwargs will be passed to _generate_examples
137
- "filepath": os.path.join(data_dir, "test_gold_supervision.json"),
138
  "split": "test"
139
  },
140
  ),
141
  datasets.SplitGenerator(
142
  name=datasets.Split.VALIDATION,
143
  gen_kwargs={ # These kwargs will be passed to _generate_examples
144
- "filepath": os.path.join(data_dir, "dev_gold_supervision.json"),
145
  "split": "dev",
146
  },
147
  ),
@@ -155,7 +170,7 @@ class CanardQuretec(datasets.GeneratorBasedBuilder):
155
  # The `key` is here for legacy reason (tfds) and is not important in itself.
156
 
157
  with open(filepath) as f:
158
- data = json.load(f)
159
- for id_, row in data:
160
  # if self.config.name == "first_domain":
161
- yield id_, row
21
  import datasets
22
 
23
 
 
24
  # Find for instance the citation on arxiv or on the dataset repo/website
25
  _CITATION = """\
26
  @inproceedings{Elgohary:Peskov:Boyd-Graber-2019,
48
 
49
  # The HuggingFace dataset library don't host the datasets but only point to the original files
50
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
51
+ _URL = "https://drive.google.com/drive/folders/1e3s-V6VQqOKHrmn_kBStNsV0gGHPeJVf/"
52
  _URLs = {
53
+ 'gold_supervision': {
54
+ 'train': _URL+"train_gold_supervision.json",
55
+ 'dev': _URL+"dev_gold_supervision.json",
56
+ 'test': _URL+"test_gold_supervision.json"
57
+ },
58
+ 'original_all': {
59
+ 'train': _URL+"train_original_all.json",
60
+ 'dev': _URL+"dev_original_all.json",
61
+ 'test': _URL+"test_original_all.json"
62
+ },
63
+ 'distant_supervision': {
64
+ 'train': _URL+"train_distant_supervision.json",
65
+ 'dev': _URL+"dev_distant_supervision.json",
66
+ 'test': _URL+"test_distant_supervision.json"
67
+ }
68
  }
69
 
70
 
88
  # data = datasets.load_dataset('my_dataset', 'second_domain')
89
  BUILDER_CONFIGS = [
90
  datasets.BuilderConfig(name="gold_supervision", version=VERSION, description="Was used for training quretec with gold supervision"),
91
+ datasets.BuilderConfig(name="original_all", version=VERSION, description="Was used for creating dataset statistics"),
92
+ datasets.BuilderConfig(name="distant_supervision", version=VERSION, description="Was used for training quretec with distant supervision"),
93
  ]
94
 
95
  # It's not mandatory to have a default configuration. Just use one if it make sense.
137
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
138
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
139
  my_urls = _URLs[self.config.name]
140
+ downloaded_files = dl_manager.download_and_extract(my_urls)
141
  return [
142
  datasets.SplitGenerator(
143
  name=datasets.Split.TRAIN,
144
  gen_kwargs={ # These kwargs will be passed to _generate_examples
145
+ "filepath": downloaded_files['train'],
146
  "split": "train",
147
  },
148
  ),
149
  datasets.SplitGenerator(
150
  name=datasets.Split.TEST,
151
  gen_kwargs={ # These kwargs will be passed to _generate_examples
152
+ "filepath": downloaded_files['test'],
153
  "split": "test"
154
  },
155
  ),
156
  datasets.SplitGenerator(
157
  name=datasets.Split.VALIDATION,
158
  gen_kwargs={ # These kwargs will be passed to _generate_examples
159
+ "filepath": downloaded_files['dev'],
160
  "split": "dev",
161
  },
162
  ),
170
  # The `key` is here for legacy reason (tfds) and is not important in itself.
171
 
172
  with open(filepath) as f:
173
+ data_array = json.load(f)
174
+ for id_, item_dict in data_array:
175
  # if self.config.name == "first_domain":
176
+ yield id_, item_dict