Datasets:
Tasks:
Image Segmentation
Modalities:
Image
Formats:
parquet
Sub-tasks:
semantic-segmentation
Languages:
English
Size:
10K - 100K
License:
hassanjbara
commited on
Commit
•
93dbb79
1
Parent(s):
4ca0aeb
Create prepare.py
Browse files- prepare.py +93 -0
prepare.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import Dataset, DatasetDict, Features, Image, Value
|
2 |
+
import os
|
3 |
+
|
4 |
+
# Script for preparing the dataset from a local directory.
|
5 |
+
|
6 |
+
def load_ai4mars_dataset(data_dir):
|
7 |
+
# Define features
|
8 |
+
features = Features({
|
9 |
+
'image': Image(decode=True),
|
10 |
+
'label_mask': Image(decode=True),
|
11 |
+
'rover_mask': Image(decode=True),
|
12 |
+
'range_mask': Image(decode=True),
|
13 |
+
'has_masks': Value(dtype='bool'),
|
14 |
+
'has_labels': Value(dtype='bool')
|
15 |
+
})
|
16 |
+
|
17 |
+
dataset_dict = {}
|
18 |
+
train_data = {
|
19 |
+
'image': [],
|
20 |
+
'label_mask': [],
|
21 |
+
'rover_mask': [],
|
22 |
+
'range_mask': [],
|
23 |
+
'has_masks': [],
|
24 |
+
'has_labels': []
|
25 |
+
}
|
26 |
+
|
27 |
+
# Training data paths
|
28 |
+
train_img_dir = os.path.join(data_dir, 'msl/images/edr')
|
29 |
+
train_label_dir = os.path.join(data_dir, 'msl/labels/train')
|
30 |
+
train_mxy_dir = os.path.join(data_dir, 'msl/images/mxy')
|
31 |
+
train_range_dir = os.path.join(data_dir, 'msl/images/rng-30m')
|
32 |
+
|
33 |
+
without_labels = 0
|
34 |
+
without_masks = 0
|
35 |
+
|
36 |
+
for img_name in os.listdir(train_img_dir):
|
37 |
+
base_name = os.path.splitext(img_name)[0]
|
38 |
+
img_path = os.path.join(train_img_dir, img_name)
|
39 |
+
label_path = os.path.join(train_label_dir, f"{base_name}.png")
|
40 |
+
rover_path = os.path.join(train_mxy_dir, f"{base_name}.png").replace('EDR', 'MXY')
|
41 |
+
range_path = os.path.join(train_range_dir, f"{base_name}.png").replace('EDR', 'RNG')
|
42 |
+
|
43 |
+
# Always add the image
|
44 |
+
train_data['image'].append(img_path)
|
45 |
+
|
46 |
+
# Check if label files exist
|
47 |
+
has_labels = os.path.exists(label_path)
|
48 |
+
has_masks = os.path.exists(rover_path) and os.path.exists(range_path)
|
49 |
+
without_labels += 1 if not has_labels else 0
|
50 |
+
without_masks += 1 if not has_masks else 0
|
51 |
+
train_data['has_labels'].append(has_labels)
|
52 |
+
train_data['has_masks'].append(has_masks)
|
53 |
+
|
54 |
+
# Add paths if they exist, None if they don't
|
55 |
+
train_data['label_mask'].append(label_path if os.path.exists(label_path) else None)
|
56 |
+
train_data['rover_mask'].append(rover_path if os.path.exists(rover_path) else None)
|
57 |
+
train_data['range_mask'].append(range_path if os.path.exists(range_path) else None)
|
58 |
+
|
59 |
+
|
60 |
+
print(f"Training data without labels: {without_labels}")
|
61 |
+
print(f"Training data without masks: {without_masks}")
|
62 |
+
dataset_dict['train'] = Dataset.from_dict(train_data, features=features)
|
63 |
+
|
64 |
+
# Load test data for each agreement level
|
65 |
+
for agreement in ['min1', 'min2', 'min3']:
|
66 |
+
test_data = {
|
67 |
+
'image': [],
|
68 |
+
'label_mask': [],
|
69 |
+
'rover_mask': [],
|
70 |
+
'range_mask': [],
|
71 |
+
'has_masks': [],
|
72 |
+
'has_labels': []
|
73 |
+
}
|
74 |
+
|
75 |
+
test_label_dir = os.path.join(data_dir, f'msl/labels/test/masked-gold-{agreement}-100agree')
|
76 |
+
|
77 |
+
for label_name in os.listdir(test_label_dir):
|
78 |
+
base_name = os.path.splitext(label_name)[0]
|
79 |
+
img_path = os.path.join(data_dir, 'msl/images/edr', f"{base_name[:-len('_merged')]}.JPG")
|
80 |
+
|
81 |
+
if os.path.exists(img_path):
|
82 |
+
test_data['image'].append(img_path)
|
83 |
+
test_data['label_mask'].append(os.path.join(test_label_dir, label_name))
|
84 |
+
test_data['rover_mask'].append(os.path.join(train_mxy_dir, f"{base_name.replace('_merged', '').replace('EDR', 'MXY')}.png"))
|
85 |
+
test_data['range_mask'].append(os.path.join(train_range_dir, f"{base_name.replace('_merged', '').replace('EDR', 'RNG')}.png"))
|
86 |
+
test_data['has_labels'].append(True)
|
87 |
+
test_data['has_masks'].append(True)
|
88 |
+
|
89 |
+
dataset_dict[f'test_{agreement}'] = Dataset.from_dict(test_data, features=features)
|
90 |
+
|
91 |
+
return DatasetDict(dataset_dict)
|
92 |
+
|
93 |
+
dataset = load_ai4mars_dataset("./ai4mars-dataset-merged-0.1")
|