Update new_dataset_script.py
Browse files- new_dataset_script.py +9 -8
new_dataset_script.py
CHANGED
@@ -133,15 +133,16 @@ class NewDataset(datasets.GeneratorBasedBuilder):
|
|
133 |
# Shuffle the list for random split
|
134 |
random.seed(42) # Set a random seed for reproducibility
|
135 |
random.shuffle(all_image_filenames)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
-
|
138 |
-
# Use all the data for the full configuration
|
139 |
-
train_files, val_files, test_files = self._split_files(all_image_filenames)
|
140 |
-
elif self.config.name == "small":
|
141 |
-
# Use only a subset of the data for the small configuration
|
142 |
-
small_subset = all_image_filenames[:1000] # For example, use only the first 100 images
|
143 |
-
train_files, val_files, test_files = self._split_files(small_subset)
|
144 |
-
|
145 |
|
146 |
|
147 |
|
|
|
133 |
# Shuffle the list for random split
|
134 |
random.seed(42) # Set a random seed for reproducibility
|
135 |
random.shuffle(all_image_filenames)
|
136 |
+
|
137 |
+
num_files = len(all_image_filenames)
|
138 |
+
train_split_end = int(num_files * 0.7)
|
139 |
+
val_split_end = train_split_end + int(num_files * 0.15)
|
140 |
+
|
141 |
+
train_files = all_image_filenames[:train_split_end]
|
142 |
+
val_files = all_image_filenames[train_split_end:val_split_end]
|
143 |
+
test_files = all_image_filenames[val_split_end:]
|
144 |
|
145 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
|
148 |
|