Francisco Castillo commited on
Commit
bdc2a7b
1 Parent(s): cb52154
Files changed (1) hide show
  1. reviews_with_drift.py +46 -12
reviews_with_drift.py CHANGED
@@ -106,14 +106,13 @@ class NewDataset(datasets.GeneratorBasedBuilder):
106
  features=features, # Here we define them above because they are different between the two configurations
107
  # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
108
  # specify them. They'll be used if as_supervised=True in builder.as_dataset.
109
- # supervised_keys=("text", "label"),
110
- supervised_keys=None,
111
  # Homepage of the dataset for documentation
112
  # License for the dataset if available
113
  license=_LICENSE,
114
  # Citation for the dataset
115
  citation=_CITATION,
116
- # task_templates=[TextClassification(text_column="text", label_column="label")],
117
  )
118
 
119
  def _split_generators(self, dl_manager):
@@ -123,39 +122,74 @@ class NewDataset(datasets.GeneratorBasedBuilder):
123
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
124
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
125
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
126
- archive = dl_manager.download(_URLS)
127
  return [
128
  datasets.SplitGenerator(
129
  name=datasets.Split("training"),
130
  # These kwargs will be passed to _generate_examples
131
  gen_kwargs={
132
- "files": dl_manager.iter_archive(archive),
133
  "split": "training",
134
  },
135
  ),
136
  datasets.SplitGenerator(
137
- name=datasets.Split.VALIDATION,
138
  # These kwargs will be passed to _generate_examples
139
  gen_kwargs={
140
- "files": dl_manager.iter_archive(archive),
141
- "split": "validation",
142
  },
143
  ),
144
  datasets.SplitGenerator(
145
  name=datasets.Split("production"),
146
  # These kwargs will be passed to _generate_examples
147
  gen_kwargs={
148
- "files": dl_manager.iter_archive(archive),
149
  "split": "production",
150
  },
151
  ),
152
  ]
153
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
155
  def _generate_examples(self, filepath, split):
156
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
157
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
158
  with open(filepath, encoding="utf-8") as f:
159
- for key, row in enumerate(f):
160
- data = json.loads(row)
161
- yield key, {"text":data["text"]}
 
106
  features=features, # Here we define them above because they are different between the two configurations
107
  # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
108
  # specify them. They'll be used if as_supervised=True in builder.as_dataset.
109
+ supervised_keys=("text", "label"),
 
110
  # Homepage of the dataset for documentation
111
  # License for the dataset if available
112
  license=_LICENSE,
113
  # Citation for the dataset
114
  citation=_CITATION,
115
+ task_templates=[TextClassification(text_column="text", label_column="label")],
116
  )
117
 
118
  def _split_generators(self, dl_manager):
 
122
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
123
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
124
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
125
+ extracted_paths = dl_manager.download_and_extract(_URLS)
126
  return [
127
  datasets.SplitGenerator(
128
  name=datasets.Split("training"),
129
  # These kwargs will be passed to _generate_examples
130
  gen_kwargs={
131
+ "filepath": extracted_paths['training'],
132
  "split": "training",
133
  },
134
  ),
135
  datasets.SplitGenerator(
136
+ name=datasets.Split("validation"),
137
  # These kwargs will be passed to _generate_examples
138
  gen_kwargs={
139
+ "filepath": extracted_paths['validation'],
140
+ "split": "validation"
141
  },
142
  ),
143
  datasets.SplitGenerator(
144
  name=datasets.Split("production"),
145
  # These kwargs will be passed to _generate_examples
146
  gen_kwargs={
147
+ "filepath": extracted_paths['production'],
148
  "split": "production",
149
  },
150
  ),
151
  ]
152
 
153
+ # def _split_generators(self, dl_manager):
154
+ # # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
155
+ # # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
156
+
157
+ # # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
158
+ # # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
159
+ # # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
160
+ # archive = dl_manager.download(_URLS)
161
+ # return [
162
+ # datasets.SplitGenerator(
163
+ # name=datasets.Split("training"),
164
+ # # These kwargs will be passed to _generate_examples
165
+ # gen_kwargs={
166
+ # "files": dl_manager.iter_archive(archive),
167
+ # "split": "training",
168
+ # },
169
+ # ),
170
+ # datasets.SplitGenerator(
171
+ # name=datasets.Split.VALIDATION,
172
+ # # These kwargs will be passed to _generate_examples
173
+ # gen_kwargs={
174
+ # "files": dl_manager.iter_archive(archive),
175
+ # "split": "validation",
176
+ # },
177
+ # ),
178
+ # datasets.SplitGenerator(
179
+ # name=datasets.Split("production"),
180
+ # # These kwargs will be passed to _generate_examples
181
+ # gen_kwargs={
182
+ # "files": dl_manager.iter_archive(archive),
183
+ # "split": "production",
184
+ # },
185
+ # ),
186
+ # ]
187
+
188
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
189
  def _generate_examples(self, filepath, split):
190
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
191
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
192
  with open(filepath, encoding="utf-8") as f:
193
+ yield {
194
+ "text": f.read().decode("utf-8")
195
+ }