Datasets:
Tasks:
Image Classification
Sub-tasks:
multi-class-classification
Languages:
English
Size:
10K<n<100K
License:
Francisco Castillo
commited on
Commit
·
e25e21d
1
Parent(s):
ce40f71
Clean up
Browse files
fashion_mnist_label_drift.py
CHANGED
@@ -16,10 +16,8 @@
|
|
16 |
"""IMDb movie revies dataset mixed with Trip Advisor Hotel Reviews to simulate drift accross time."""
|
17 |
|
18 |
|
19 |
-
import pickle
|
20 |
-
import os
|
21 |
-
import pandas as pd
|
22 |
import datasets
|
|
|
23 |
|
24 |
|
25 |
|
@@ -130,7 +128,6 @@ class FashionMNISTLabelDrift(datasets.GeneratorBasedBuilder):
|
|
130 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
131 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
132 |
extracted_paths = dl_manager.download_and_extract(_URLS)
|
133 |
-
print("EXTRACTED PATHS=",extracted_paths)
|
134 |
return [
|
135 |
datasets.SplitGenerator(
|
136 |
name=datasets.Split("training"),
|
@@ -160,7 +157,6 @@ class FashionMNISTLabelDrift(datasets.GeneratorBasedBuilder):
|
|
160 |
def _generate_examples(self, filepath):
|
161 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
162 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
163 |
-
print("FILEPATH=",filepath)
|
164 |
with open(filepath, 'rb') as pkl_file:
|
165 |
data = pickle.load(pkl_file, encoding='bytes')
|
166 |
prediction_ts=data['prediction_ts']
|
|
|
16 |
"""IMDb movie revies dataset mixed with Trip Advisor Hotel Reviews to simulate drift accross time."""
|
17 |
|
18 |
|
|
|
|
|
|
|
19 |
import datasets
|
20 |
+
import pickle
|
21 |
|
22 |
|
23 |
|
|
|
128 |
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
|
129 |
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
|
130 |
extracted_paths = dl_manager.download_and_extract(_URLS)
|
|
|
131 |
return [
|
132 |
datasets.SplitGenerator(
|
133 |
name=datasets.Split("training"),
|
|
|
157 |
def _generate_examples(self, filepath):
|
158 |
# This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
|
159 |
# The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
|
|
|
160 |
with open(filepath, 'rb') as pkl_file:
|
161 |
data = pickle.load(pkl_file, encoding='bytes')
|
162 |
prediction_ts=data['prediction_ts']
|