soniajoseph commited on
Commit
c70523c
1 Parent(s): 493ee46

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +71 -0
README.md CHANGED
@@ -42,6 +42,77 @@ Finally `train.json`, `val.json`, `test.json` store box, label, score and path i
42
  "labels": ["bird", "dirt field", "vulture", "land"],
43
  "masks": ["masks/val_masks/ILSVRC2012_val_00000025_n01616318_00.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_01.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_02.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_03.png"]
44
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  ```
46
  ### Citation
47
 
 
42
  "labels": ["bird", "dirt field", "vulture", "land"],
43
  "masks": ["masks/val_masks/ILSVRC2012_val_00000025_n01616318_00.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_01.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_02.png", "masks/val_masks/ILSVRC2012_val_00000025_n01616318_03.png"]
44
  }
45
+
46
+ You can use this dataloader for your patch level labels. Patch size is a hyperparameter.
47
+
48
+ ```
49
+ class PatchDataset(Dataset):
50
+ def __init__(self, dataset, patch_size=16, width=224, height=224):
51
+ """
52
+ dataset: A list of dictionaries, each dictionary corresponds to an image and its details
53
+ """
54
+ self.dataset = dataset
55
+ self.transform = transforms.Compose([
56
+ transforms.Resize((width, height)), # Resize the image
57
+ # 3 channels
58
+ # transforms.Grayscale(num_output_channels=3), # Convert the image to grayscale
59
+ transforms.ToTensor(), # Convert the image to a tensor
60
+ ])
61
+ self.patch_size = patch_size
62
+
63
+ self.width = width
64
+ self.height = height
65
+
66
+ def __len__(self):
67
+ return len(self.dataset)
68
+
69
+ def __getitem__(self, idx):
70
+ item = self.dataset[idx]
71
+ image = self.transform(item['image'])
72
+ masks = item['masks']
73
+ labels = item['labels'] # Assuming labels are aligned with masks
74
+
75
+ # Calculate the size of the reduced mask
76
+ num_patches = self.width // self.patch_size
77
+ label_array = [[[] for _ in range(num_patches)] for _ in range(num_patches)]
78
+
79
+ for mask, label in zip(masks, labels):
80
+ # Resize and reduce the mask
81
+ mask = mask.resize((self.width, self.height))
82
+ mask_array = np.array(mask) > 0
83
+ reduced_mask = self.reduce_mask(mask_array)
84
+
85
+ # Populate the label array based on the reduced mask
86
+ for i in range(num_patches):
87
+ for j in range(num_patches):
88
+ if reduced_mask[i, j]:
89
+ label_array[i][j].append(label)
90
+
91
+ # Convert label_array to a format suitable for tensor operations, if necessary
92
+ # For now, it's a list of lists of lists, which can be used directly in Python
93
+
94
+ return image, label_array
95
+
96
+
97
+ def reduce_mask(self, mask):
98
+ """
99
+ Reduce the mask size by dividing it into patches and checking if there's at least
100
+ one True value within each patch.
101
+ """
102
+ # Calculate new height and width
103
+ new_h = mask.shape[0] // self.patch_size
104
+ new_w = mask.shape[1] // self.patch_size
105
+
106
+ reduced_mask = np.zeros((new_h, new_w), dtype=bool)
107
+
108
+ for i in range(new_h):
109
+ for j in range(new_w):
110
+ patch = mask[i*self.patch_size:(i+1)*self.patch_size, j*self.patch_size:(j+1)*self.patch_size]
111
+ reduced_mask[i, j] = np.any(patch) # Set to True if any value in the patch is True
112
+
113
+ return reduced_mask
114
+ ```
115
+
116
  ```
117
  ### Citation
118