File size: 15,062 Bytes
e2fdd40 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 |
dataset:
use_epochs: false
num_workers: 4
batch_size: ${experiment.batch_size_per_gpu}
_target_: ocl.datasets.WebdatasetDataModule
train_shards: ${oc.env:DATASET_PREFIX}/vg_disjoint_coco/train/shard-{000000..001217}.tar
train_size: 118287
val_shards: ${oc.env:DATASET_PREFIX}/vg/val/shard-{000000..000037}.tar
val_size: 5000
test_shards: ${oc.env:DATASET_PREFIX}/vg/test/shard-{000000..000037}.tar
test_size: 40670
use_autopadding: true
eval_transforms:
03a_preprocessing:
_target_: ocl.transforms.Map
transform:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.SelectConditioningInfoVG
num_max_binds: ${experiment.num_slots}
num_slots: ${experiment.num_slots}
- _target_: ocl.preprocessing.CopyFields
mapping:
instance_mask: instance_mask_v2
fields:
- image
- instance_mask
- instance_bbox
- name
- bbox_centroids
- name_embedding
- selected_indices
- contrastive_loss_mask
- all_bbox_centroids
batch_transform: false
03c_preprocessing:
_target_: ocl.transforms.SimpleTransform
transforms:
image:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda image: image.copy()''}'
- _target_: torchvision.transforms.v2.ToImage
- _target_: torchvision.transforms.v2.ToDtype
dtype: ${torch_dtype:float32}
scale: true
- _target_: torchvision.transforms.v2.Normalize
mean:
- 0.485
- 0.456
- 0.406
std:
- 0.229
- 0.224
- 0.225
instance_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.IntegerToOneHotMask
output_axis: -3
- _target_: ocl.preprocessing.AddEmptyMasksVG
- _target_: ocl.preprocessing.DenseMaskToTensor
instance_mask_v2:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.IntegerToOneHotMask
output_axis: -3
- _target_: ocl.preprocessing.AddEmptyMasksVG
- _target_: ocl.preprocessing.DenseMaskToTensor
batch_transform: false
train_transforms:
03a_preprocessing:
_target_: ocl.transforms.Map
transform:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.SelectConditioningInfoVG
num_max_binds: ${experiment.num_slots}
num_slots: ${experiment.num_slots}
- _target_: ocl.preprocessing.CopyFields
mapping:
instance_mask: instance_mask_v2
fields:
- image
- instance_mask
- instance_bbox
- name
- bbox_centroids
- name_embedding
- selected_indices
- contrastive_loss_mask
batch_transform: false
03b_preprocessing:
_target_: ocl.transforms.SimpleTransform
transforms:
image:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda image: image.copy()''}'
- _target_: torchvision.transforms.v2.ToImage
- _target_: torchvision.transforms.v2.ToDtype
dtype: ${torch_dtype:float32}
scale: true
- _target_: torchvision.transforms.v2.Normalize
mean:
- 0.485
- 0.456
- 0.406
std:
- 0.229
- 0.224
- 0.225
name_embedding:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda name_embedding: name_embedding.copy()''}'
- _target_: ocl.preprocessing.ToTensor
bbox_centroids:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda bbox_centroids: bbox_centroids.copy()''}'
- _target_: ocl.preprocessing.ToTensor
all_bbox_centroids:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda all_bbox_centroids: all_bbox_centroids.copy()''}'
- _target_: ocl.preprocessing.ToTensor
selected_indices:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda selected_indices: selected_indices.copy()''}'
- _target_: ocl.preprocessing.ToTensor
contrastive_loss_mask:
_target_: torchvision.transforms.Compose
transforms:
- '${lambda_fn:''lambda contrastive_loss_mask: contrastive_loss_mask.copy()''}'
- _target_: ocl.preprocessing.ToTensor
instance_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.IntegerToOneHotMask
output_axis: -3
- _target_: ocl.preprocessing.AddEmptyMasksVG
- _target_: ocl.preprocessing.DenseMaskToTensor
instance_mask_v2:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.IntegerToOneHotMask
output_axis: -3
- _target_: ocl.preprocessing.AddEmptyMasksVG
- _target_: ocl.preprocessing.DenseMaskToTensor
batch_transform: false
models:
feature_extractor:
_target_: routed.ocl.feature_extractors.TimmFeatureExtractor
model_name: ${experiment.timm_model}
pretrained: ${when_testing:false,true}
freeze: true
feature_level: 12
video_path: input.image
dynamic_img_size: true
mapping:
_target_: routed.ocl.mapping.MLPMapping
dim: ${experiment.feature_dim}
x_path: feature_extractor
conditioning:
_target_: routed.ocl.conditioning.LangConditioning
n_slots: ${experiment.num_slots}
object_dim: ${experiment.slot_dim}
dual_conditioning: false
name_embedding_path: input.name_embedding
batch_size_path: input.batch_size
mask_path: input.contrastive_loss_mask
perceptual_grouping:
_target_: routed.ocl.perceptual_grouping.SlotAttentionGrouping
feature_dim: ${.object_dim}
object_dim: ${experiment.slot_dim}
use_projection_bias: false
positional_embedding:
_target_: ocl.neural_networks.wrappers.Sequential
_args_:
- _target_: ocl.neural_networks.positional_embedding.DummyPositionEmbed
- _target_: ocl.neural_networks.build_two_layer_mlp
input_dim: ${experiment.feature_dim}
output_dim: ${....feature_dim}
hidden_dim: '${mul: ${experiment.feature_dim}, 2}'
initial_layer_norm: true
ff_mlp:
_target_: ocl.neural_networks.build_two_layer_mlp
input_dim: ${..object_dim}
output_dim: ${..object_dim}
hidden_dim: '${mul: ${..object_dim}, 4}'
initial_layer_norm: true
residual: true
feature_path: mapping
conditioning_path: conditioning
attn_aggregation:
_target_: routed.ocl.heads.PatchClipAttentionAggregationHead
dim: ${experiment.feature_dim}
attn_path: perceptual_grouping.feature_attributions
x_path: input.image
projector_slots:
_target_: routed.ocl.heads.SlotProjectorHead
dim: 512
embedding_dim: 512
slots_path: attn_aggregation
dual_embedding:
_target_: routed.ocl.heads.CLIPLangEmbeddingHead
embedding_dim: 512
names_batch_path: input.name
dec_conditioning:
_target_: routed.ocl.decoder_conditioning.EncodeLangConditioning
dim: ${experiment.slot_dim}
language_path: input.name_embedding
mask_path: input.contrastive_loss_mask
object_decoder:
_target_: routed.ocl.decoding.PatchDecoder
decoder:
_target_: ocl.neural_networks.build_mlp
_partial_: true
features:
- 2048
- 2048
- 2048
object_dim: ${experiment.slot_dim}
output_dim: ${experiment.feature_dim}
num_patches: ${experiment.num_patches}
object_features_path: perceptual_grouping.objects
image_path: input.image
conditioned: true
condition_info_path: dec_conditioning
optimizers:
opt0:
_target_: ocl.optimization.OptimizationWrapper
optimizer:
_target_: torch.optim.AdamW
_partial_: true
lr: ${experiment.total_lr}
lr_scheduler:
_target_: ocl.scheduling.exponential_decay_after_optional_warmup
_partial_: true
decay_rate: 0.5
decay_steps: 100000
warmup_steps: 10000
parameter_groups:
_target_: ocl.optimization.ParameterGroupCreator
param_groups:
grouping:
params:
- models.perceptual_grouping
- models.conditioning
- models.object_decoder
- models.dec_conditioning
lr: ${experiment.total_lr}
weight_decay: 0.0
encoder:
params:
- models.mapping
- models.projector_slots
lr: ${experiment.mapping_lr}
weight_decay: 0.0
losses:
mse:
_target_: routed.ocl.losses.ReconstructionLoss
loss_type: mse
input_path: object_decoder.reconstruction
target_path: feature_extractor.features
contrastive_loss:
_target_: routed.ocl.losses.DiagonalContrastiveLoss
x1_path: projector_slots
x2_path: dual_embedding
contrastive_loss_mask_path: input.contrastive_loss_mask
temp: 0.1
batch_contrastive: true
weight: 0.2
visualizations:
input:
_target_: routed.ocl.visualizations.Image
n_instances: 32
denormalization:
_target_: ocl.preprocessing.Denormalize
mean:
- 0.485
- 0.456
- 0.406
std:
- 0.229
- 0.224
- 0.225
image_path: input.image
masks:
_target_: routed.ocl.visualizations.Mask
mask_path: object_decoder.masks_as_image
pred_segmentation:
_target_: routed.ocl.visualizations.Segmentation
denormalization:
_target_: ocl.preprocessing.Denormalize
mean:
- 0.485
- 0.456
- 0.406
std:
- 0.229
- 0.224
- 0.225
image_path: input.image
mask_path: object_decoder.masks_as_image
pred_segmentation_with_text:
_target_: routed.ocl.visualizations.SegmentationWithText
n_instances: 32
denormalization:
_target_: ocl.preprocessing.Denormalize
mean:
- 0.485
- 0.456
- 0.406
std:
- 0.229
- 0.224
- 0.225
image_path: input.image
mask_path: object_decoder.masks_as_image
gt_masks_path: input.instance_mask_v2
selected_indices_path: input.selected_indices
text_path: input.name
bbox_centroids_path: input.all_bbox_centroids
trainer:
_target_: pytorch_lightning.trainer.trainer.Trainer
accelerator: auto
strategy: auto
devices: 1
num_nodes: 1
precision: null
logger:
- _target_: pytorch_lightning.loggers.TensorBoardLogger
save_dir: .
name: tb
version: ''
- _target_: pytorch_lightning.loggers.WandbLogger
project: ${slice:${hydra:runtime.choices.experiment},"/", 1}_${slice:${hydra:runtime.choices.experiment},"/",
2}
name: ${slice:${hydra:runtime.choices.experiment},"/","3:"}
log_model: false
callbacks: ${oc.dict.values:experiment.callbacks}
fast_dev_run: false
max_epochs: -1
min_epochs: null
max_steps: 100000
min_steps: null
max_time: null
limit_train_batches: null
limit_val_batches: null
limit_test_batches: null
limit_predict_batches: null
overfit_batches: 0.0
val_check_interval: 5000
check_val_every_n_epoch: null
num_sanity_val_steps: null
log_every_n_steps: 100
enable_checkpointing: null
enable_progress_bar: null
enable_model_summary: null
accumulate_grad_batches: 1
gradient_clip_val: 1.0
gradient_clip_algorithm: null
deterministic: null
benchmark: null
inference_mode: true
use_distributed_sampler: true
profiler: null
detect_anomaly: false
barebones: false
plugins: null
sync_batchnorm: false
reload_dataloaders_every_n_epochs: 0
default_root_dir: .
training_vis_frequency: 10000
training_metrics:
acc_sc:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: sc
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
acc_cs:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: cs
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
acc_avg:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: average
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
evaluation_metrics:
binding_hits:
_target_: routed.ocl.metrics.BindingHits
prediction_path: object_decoder.masks_as_image
target_path: input.instance_mask_v2
selected_indices_path: input.selected_indices
use_threshold: false
matching: best_overlap
ignore_overlaps: false
instance_ari:
_target_: routed.ocl.metrics.ARIMetric
prediction_path: object_decoder.masks_as_image
target_path: input.instance_mask_v2
foreground: false
convert_target_one_hot: true
ignore_overlaps: true
instance_mbo:
_target_: routed.ocl.metrics.UnsupervisedMaskIoUMetric
prediction_path: object_decoder.masks_as_image
target_path: input.instance_mask
use_threshold: false
matching: best_overlap
ignore_overlaps: true
gt_matched_instance_mbo:
_target_: routed.ocl.metrics.UnsupervisedMaskIoUMetric
prediction_path: object_decoder.masks_as_image
target_path: input.instance_mask_v2
selected_indices_path: input.selected_indices
use_threshold: false
matching: best_overlap
ignore_overlaps: true
acc_sc:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: sc
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
acc_cs:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: cs
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
acc_avg:
_target_: routed.ocl.metrics.acc.EmbAccMetric
mode: average
slot_emb_path: projector_slots
ctrl_emb_path: dual_embedding
mask_idx_path: input.contrastive_loss_mask
load_checkpoint: null
load_checkpoint_partial: null
modules_to_load: null
trainable_models: null
seed: null
experiment:
callbacks: {}
checkpoint_every_n_steps: 1000
image_size: 224
mask_size: ${.image_size}
batch_size_per_gpu: 64
base_learning_rate: 0.0004
max_num_binds: 7
slot_dim: 256
num_slots: 7
timm_model: vit_small_patch14_dinov2.lvd142m
feature_dim: '${timm_model_dim: ${.timm_model}}'
num_patches: '${timm_model_num_patches: ${.timm_model}, ${.image_size}}'
num_patches_per_side: '${isqrt: ${.num_patches}}'
patch_size: '${timm_model_patch_size: ${.timm_model}}'
total_batch_size: '${mul: ${trainer.devices}, ${.batch_size_per_gpu}}'
total_lr: 0.0004
mapping_lr: '${mul: 0.05, ${.total_lr}}'
|