varora commited on
Commit
78aa55e
1 Parent(s): 62d52d8

update hit.py

Browse files
Files changed (1) hide show
  1. hit.py +12 -8
hit.py CHANGED
@@ -74,6 +74,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
74
  # data = datasets.load_dataset('my_dataset', 'second_domain')
75
 
76
  def _info(self):
 
77
  features = datasets.Features(
78
  {
79
  "gender": datasets.Value("string"),
@@ -110,14 +111,14 @@ class NewDataset(datasets.GeneratorBasedBuilder):
110
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
111
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
112
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
113
- rel_path = _PATHS[self.config.name]
114
- print(os.path.join(rel_path, "train", "*.gz"))
115
  return [
116
  datasets.SplitGenerator(
117
  name=datasets.Split.TRAIN,
118
  # These kwargs will be passed to _generate_examples
119
  gen_kwargs={
120
- "filepath": os.path.join(rel_path, "train", "*.gz"),
121
  "split": "train",
122
  },
123
  ),
@@ -125,7 +126,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
125
  name=datasets.Split.VALIDATION,
126
  # These kwargs will be passed to _generate_examples
127
  gen_kwargs={
128
- "filepath": os.path.join(rel_path, "val", "*.gz"),
129
  "split": "validation",
130
  },
131
  ),
@@ -133,7 +134,7 @@ class NewDataset(datasets.GeneratorBasedBuilder):
133
  name=datasets.Split.TEST,
134
  # These kwargs will be passed to _generate_examples
135
  gen_kwargs={
136
- "filepath": os.path.join(rel_path, "test", "*.gz"),
137
  "split": "test"
138
  },
139
  ),
@@ -144,9 +145,12 @@ class NewDataset(datasets.GeneratorBasedBuilder):
144
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
145
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
146
  # List all files in the path .gz
147
- files = glob(filepath)
148
- print(files)
149
- for subject_path in files:
 
 
 
150
  with gzip.open(subject_path, 'rb') as f:
151
  data = pickle.load(f)
152
  key = data['subject_ID']
 
74
  # data = datasets.load_dataset('my_dataset', 'second_domain')
75
 
76
  def _info(self):
77
+ print("HELOOOOOOOOO")
78
  features = datasets.Features(
79
  {
80
  "gender": datasets.Value("string"),
 
111
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
112
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
113
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
114
+ data_urls = _PATHS[self.config.name]
115
+ archive_paths = dl_manager.download(data_urls)
116
  return [
117
  datasets.SplitGenerator(
118
  name=datasets.Split.TRAIN,
119
  # These kwargs will be passed to _generate_examples
120
  gen_kwargs={
121
+ "filepath": os.path.join(data_urls, "train"),
122
  "split": "train",
123
  },
124
  ),
 
126
  name=datasets.Split.VALIDATION,
127
  # These kwargs will be passed to _generate_examples
128
  gen_kwargs={
129
+ "filepath": os.path.join(data_urls, "val"),
130
  "split": "validation",
131
  },
132
  ),
 
134
  name=datasets.Split.TEST,
135
  # These kwargs will be passed to _generate_examples
136
  gen_kwargs={
137
+ "filepath": os.path.join(data_urls, "test"),
138
  "split": "test"
139
  },
140
  ),
 
145
  # TODO: This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
146
  # The `key` is for legacy reasons (tfds) and is not important in itself, but must be unique for each example.
147
  # List all files in the path .gz
148
+ file_paths = []
149
+ for root, dirs, files in os.walk(filepath):
150
+ for file in files:
151
+ if file.endswith('.gz'):
152
+ file_paths.append(file)
153
+ for subject_path in file_paths:
154
  with gzip.open(subject_path, 'rb') as f:
155
  data = pickle.load(f)
156
  key = data['subject_ID']