Upload weights, notebooks, sample images
Browse files- configs/config_inference.yaml +0 -29
- configs/decoder_pretrain_448.yaml +0 -307
- configs/decoder_pretrain_896.yaml +0 -295
- configs/figuremeout.yaml +0 -306
- configs/pretrain_tki.yaml +0 -306
configs/config_inference.yaml
DELETED
|
@@ -1,29 +0,0 @@
|
|
| 1 |
-
# weights_path: run "unreflectanything download-weights" then use ~/.cache/unreflectanything/weights/ (or set path below)
|
| 2 |
-
run: "gallant-bush-806"
|
| 3 |
-
runs_dir: "/anvme/workspace/v120bb18-unreflectanything/runs"
|
| 4 |
-
weights_path: "/anvme/workspace/v120bb18-unreflectanything/results/gallant-bush-806/models/full_model_weights.pt"
|
| 5 |
-
|
| 6 |
-
model_config_path: "/anvme/workspace/v120bb18-unreflectanything/config_train.yaml"
|
| 7 |
-
model_module: "models"
|
| 8 |
-
input_dir: "/anvme/workspace/v120bb18-unreflectanything/benchmark/data/input"
|
| 9 |
-
output_dir: "/anvme/workspace/v120bb18-unreflectanything/benchmark/data/OURS_L1"
|
| 10 |
-
batch_size: 32
|
| 11 |
-
num_workers: 4
|
| 12 |
-
chunk_size: 8
|
| 13 |
-
device: "cuda"
|
| 14 |
-
image_extensions: [".png", ".jpg", ".jpeg"]
|
| 15 |
-
resize_output: True
|
| 16 |
-
brightness_threshold: 0.8
|
| 17 |
-
inpaint_mask_dilation: 11
|
| 18 |
-
monitor_usage: True
|
| 19 |
-
|
| 20 |
-
# serene-terrain-817 : SoftTHR ablation - Rebuttal
|
| 21 |
-
# smooth-vortex-816 : Dice ablation - Rebuttal
|
| 22 |
-
# super-microwave-815 : TV ablation - Rebuttal
|
| 23 |
-
# magic-brook-814 : DWConv ablation - Rebuttal
|
| 24 |
-
# faithful-music-813 : Learned mask token ablation - Rebuttal
|
| 25 |
-
# fresh-fire-811 : Positional encoding ablation- Rebuttal
|
| 26 |
-
# clean-haze-809 : Spec ablation - Rebuttal
|
| 27 |
-
# dainty-paper-808 : Seam ablation - Rebuttal
|
| 28 |
-
# leafy-glade-807 : RGB ablation - Rebuttal
|
| 29 |
-
# gallant-bush-806 : L1 ablation - Rebuttal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/decoder_pretrain_448.yaml
DELETED
|
@@ -1,307 +0,0 @@
|
|
| 1 |
-
### BASELINE: CONVERGES AFTER LONG
|
| 2 |
-
|
| 3 |
-
parameters:
|
| 4 |
-
|
| 5 |
-
### MODEL ARCHITECTURE
|
| 6 |
-
MODEL:
|
| 7 |
-
value:
|
| 8 |
-
MODEL_CLASS: "UnReflect_Model" # Main model class name (must match class in models.py)
|
| 9 |
-
MODEL_MODULE: "models" # Module name to import model classes from (default: "models")
|
| 10 |
-
RGB_ENCODER:
|
| 11 |
-
ENCODER: "facebook/dinov3-vitl16-pretrain-lvd1689m" # DINOv3 encoder model name (HuggingFace format)
|
| 12 |
-
IMAGE_SIZE: 448 # Input image size (height and width in pixels)
|
| 13 |
-
RETURN_SELECTED_LAYERS: [3, 6, 9, 12] # Transformer layer indices to extract features from (0-indexed)
|
| 14 |
-
RGB_ENCODER_LR: 0.0 # Learning rate for RGB encoder (0.0 = frozen, must be explicitly set)
|
| 15 |
-
DECODERS:
|
| 16 |
-
diffuse:
|
| 17 |
-
USE_FILM: False # Enable FiLM (Feature-wise Linear Modulation) conditioning in decoder
|
| 18 |
-
FEATURE_DIM: 1024 # Feature dimension for decoder (should match encoder output)
|
| 19 |
-
REASSEMBLE_OUT_CHANNELS: [768,1024,1536,2048] # Output channels for each decoder stage (DPT-style reassembly)
|
| 20 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 21 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder ("ignore", "project", etc.)
|
| 22 |
-
# FROM_PRETRAINED: "weights/rgb_decoder.pth" # Path to pretrained decoder weights (optional)
|
| 23 |
-
USE_BN: False # Use batch normalization in decoder
|
| 24 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 25 |
-
OUTPUT_IMAGE_SIZE: [448,448] # Output image resolution [height, width]
|
| 26 |
-
OUTPUT_CHANNELS: 3 # Number of output channels (3 for RGB diffuse image)
|
| 27 |
-
DECODER_LR: 1.0e-3 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 28 |
-
NUM_FUSION_BLOCKS_TRAINABLE: null # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 29 |
-
TRAIN_RGB_HEAD: True # Whether to train RGB head (true/false, null = train if DECODER_LR != 0)
|
| 30 |
-
highlight:
|
| 31 |
-
USE_FILM: False # Enable FiLM conditioning in highlight decoder
|
| 32 |
-
FEATURE_DIM: 1024 # Feature dimension for highlight decoder
|
| 33 |
-
REASSEMBLE_OUT_CHANNELS: [96,192,384,768] # Output channels for each decoder stage
|
| 34 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 35 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder
|
| 36 |
-
USE_BN: False # Use batch normalization in decoder
|
| 37 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 38 |
-
OUTPUT_IMAGE_SIZE: [448,448] # Output image resolution [height, width]
|
| 39 |
-
OUTPUT_CHANNELS: 1 # Number of output channels (1 for highlight mask)
|
| 40 |
-
DECODER_LR: 5.0e-4 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 41 |
-
NUM_FUSION_BLOCKS_TRAINABLE: null # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 42 |
-
TOKEN_INPAINTER:
|
| 43 |
-
TOKEN_INPAINTER_CLASS: "TokenInpainter_Prior" # Token inpainter class name
|
| 44 |
-
TOKEN_INPAINTER_MODULE: "token_inpainters" # Module name to import token inpainter from
|
| 45 |
-
FROM_PRETRAINED: "weights/token_inpainter.pth" # Path to pretrained token inpainter weights (optional)
|
| 46 |
-
TOKEN_INPAINTER_LR: 1.0e-5 # Learning rate for token inpainter (can differ from base LR)
|
| 47 |
-
DEPTH: 6 # Number of transformer blocks
|
| 48 |
-
HEADS: 16 # Number of attention heads
|
| 49 |
-
DROP: 0 # Dropout rate
|
| 50 |
-
USE_POSITIONAL_ENCODING: True # Enable 2D sinusoidal positional encodings
|
| 51 |
-
USE_FINAL_NORM: True # Enable final LayerNorm before output projection
|
| 52 |
-
USE_LOCAL_PRIOR: True # Blend local mean prior for masked seeds
|
| 53 |
-
LOCAL_PRIOR_WEIGHT: 0.5 # Weight for local prior blending (1.0 = only mask_token, 0.0 = only local mean)
|
| 54 |
-
LOCAL_PRIOR_KERNEL: 5 # Kernel size for local prior blending (> 1)
|
| 55 |
-
SEED_NOISE_STD: 0.02 # Standard deviation of noise added to masked seeds during training
|
| 56 |
-
INPAINT_MASK_DILATION:
|
| 57 |
-
value: 11 # Dilation kernel size (pixels) for inpaint mask - Must be odd
|
| 58 |
-
USE_TORCH_COMPILE: # Enable PyTorch 2.0 torch.compile for faster training (experimental)
|
| 59 |
-
value: True
|
| 60 |
-
|
| 61 |
-
### DATA
|
| 62 |
-
DATASETS:
|
| 63 |
-
value:
|
| 64 |
-
SCRREAM:
|
| 65 |
-
VAL_SCENES: ["scene10_full_00","scene11_full_00","scene044_full_00","scene04_reduced_00","scene04_reduced_01","scene04_reduced_02"] # List of validation scene names
|
| 66 |
-
TARGET_SIZE: [448,448] # Target image size [height, width] in pixels
|
| 67 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode: "resize", "crop", "resize+crop", or "pad"
|
| 68 |
-
FEW_IMAGES: False # If True, load only first 10 images per scene (for quick debugging)
|
| 69 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame from each scene (1 = all frames, 4 = every 4th frame)
|
| 70 |
-
LOAD_RGB_ONLY: True # If True, ignore polarization data and load only RGB images
|
| 71 |
-
|
| 72 |
-
HOUSECAT6D:
|
| 73 |
-
VAL_SCENES: ["val_scene1","val_scene2"] # Validation scene names
|
| 74 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 75 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 76 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 77 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 78 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 79 |
-
|
| 80 |
-
CROMO:
|
| 81 |
-
TRAIN_SCENES: ["kitchen"] # Training scene names (list or string)
|
| 82 |
-
# VAL_SCENES: "station" # Validation scene names (optional)
|
| 83 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 84 |
-
RESIZE_MODE: "resize" # Image resizing mode
|
| 85 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 86 |
-
SAMPLE_EVERY_N: 8 # Load every Nth frame
|
| 87 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 88 |
-
|
| 89 |
-
PSD:
|
| 90 |
-
TRAIN_SCENES: "PSD_Train" # Training scene name (string or list)
|
| 91 |
-
VAL_SCENES: "PSD_Val" # Validation scene name (string or list)
|
| 92 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 93 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 94 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 95 |
-
SAMPLE_EVERY_N: 1 # Load every Nth frame (1 = all frames)
|
| 96 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 97 |
-
|
| 98 |
-
SCARED:
|
| 99 |
-
VAL_SCENES: ["v22","v23","v24","v25","v26","v27","v28","v29","v30","v31","v32","v33","v34"] # Validation scene names
|
| 100 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 101 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 102 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 103 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 104 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 105 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing in dataset
|
| 106 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection (0-1)
|
| 107 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 108 |
-
HIGHLIGHT_RECT_SIZE: [1000, 1000] # Size of highlight rectangle region [height, width]
|
| 109 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 110 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 111 |
-
|
| 112 |
-
STEREOMIS_TRACKING:
|
| 113 |
-
VAL_SCENES: ["P2_2"] # Validation scene names
|
| 114 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 115 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 116 |
-
SAMPLE_EVERY_N: 10 # Load every Nth frame
|
| 117 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 118 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 119 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 120 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 121 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 122 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 123 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 124 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 125 |
-
|
| 126 |
-
CHOLEC80:
|
| 127 |
-
VAL_SCENES: ["val"] # Validation scene names
|
| 128 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 129 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 130 |
-
SAMPLE_EVERY_N: 50 # Load every Nth frame
|
| 131 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 132 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 133 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 134 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 135 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 136 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 137 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 138 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 139 |
-
|
| 140 |
-
# POLARGB:
|
| 141 |
-
# TRAIN_SCENES: "train"
|
| 142 |
-
# VAL_SCENES: "test"
|
| 143 |
-
# TARGET_SIZE: [448,448]
|
| 144 |
-
# RESIZE_MODE: "resize+crop"
|
| 145 |
-
# SAMPLE_EVERY_N: 1
|
| 146 |
-
# LOAD_RGB_ONLY: True
|
| 147 |
-
|
| 148 |
-
BATCH_SIZE: # Max batch size with img size 448 is 32
|
| 149 |
-
value: 16 # Number of samples per batch (adjust based on GPU memory)
|
| 150 |
-
NUM_WORKERS:
|
| 151 |
-
value: 12 # Number of data loading worker processes (0 = main process only)
|
| 152 |
-
SHUFFLE:
|
| 153 |
-
value: True # Shuffle training data each epoch (False for validation/test)
|
| 154 |
-
PIN_MEMORY:
|
| 155 |
-
value: True # Pin memory in DataLoader for faster GPU transfer (recommended: True)
|
| 156 |
-
PREFETCH_FACTOR:
|
| 157 |
-
value: 2 # Number of batches to prefetch per worker (higher = more memory usage)
|
| 158 |
-
|
| 159 |
-
### HIGHLIGHTS
|
| 160 |
-
MOGE_MODEL:
|
| 161 |
-
value: "Ruicheng/moge-2-vits-normal" # MoGe model name for normal estimation (HuggingFace format)
|
| 162 |
-
SURFACE_ROUGHNESS:
|
| 163 |
-
value: 8.0 # Blinn-Phong surface roughness exponent (higher = sharper highlights)
|
| 164 |
-
INTENSITY:
|
| 165 |
-
value: 0.0 # Specular highlight intensity multiplier
|
| 166 |
-
LIGHT_DISTANCE_RANGE:
|
| 167 |
-
value: [0.0, 1] # Range for light source distance sampling [min, max] (normalized)
|
| 168 |
-
LIGHT_LEFT_RIGHT_ANGLE:
|
| 169 |
-
value: [0, 360] # Range for light source horizontal angle [min, max] in degrees
|
| 170 |
-
LIGHT_ABOVE_BELOW_ANGLE:
|
| 171 |
-
value: [0, 360] # Range for light source vertical angle [min, max] in degrees
|
| 172 |
-
DATASET_HIGHLIGHT_DILATION:
|
| 173 |
-
value: 25 # Dilation kernel size (pixels) for dataset highlight masks
|
| 174 |
-
DATASET_HIGHLIGHT_THRESHOLD:
|
| 175 |
-
value: 0.9 # Brightness/luminance threshold (0-1) for detecting highlights in dataset images
|
| 176 |
-
DATASET_HIGHLIGHT_USE_LUMINANCE:
|
| 177 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B) for dataset highlights; if False, use simple mean brightness
|
| 178 |
-
HIGHLIGHT_COLOR:
|
| 179 |
-
value: [1.0, 1.0, 1.0] # RGB color for synthetic highlights (normalized 0-1)
|
| 180 |
-
CLAMP_RECONSTRUCTION:
|
| 181 |
-
value: True # Clamp reconstructed images to [0, 1] range if True
|
| 182 |
-
|
| 183 |
-
### OPTIMIZATION
|
| 184 |
-
LEARNING_RATE:
|
| 185 |
-
value: 1.0e-3 # Base learning rate for optimizer
|
| 186 |
-
WEIGHT_DECAY:
|
| 187 |
-
value: 0.0 # L2 regularization weight (0.0 = no weight decay)
|
| 188 |
-
EPOCHS:
|
| 189 |
-
value: 50 # Maximum number of training epochs
|
| 190 |
-
GRADIENT_ACCUMULATION_STEPS:
|
| 191 |
-
value: 1 # Number of steps to accumulate gradients before optimizer step (1 = no accumulation)
|
| 192 |
-
WARMUP:
|
| 193 |
-
value: 100 # Number of warmup steps for learning rate schedule (linear warmup from 0 to LR)
|
| 194 |
-
GRADIENT_CLIPPING_MAX_NORM:
|
| 195 |
-
value: 8 # Maximum gradient norm for clipping (set to -1 to disable clipping)
|
| 196 |
-
LR_SCHEDULER:
|
| 197 |
-
value:
|
| 198 |
-
ONPLATEAU: # ReduceLROnPlateau scheduler (reduces LR when validation metric plateaus)
|
| 199 |
-
PATIENCE: 5 # Number of epochs to wait before reducing LR
|
| 200 |
-
FACTOR: 0.1 # Factor by which LR is reduced (new_lr = old_lr * factor)
|
| 201 |
-
# COSINE: # CosineAnnealingLR scheduler (cosine annealing schedule)
|
| 202 |
-
# N_PERIODS: 5 # Number of cosine periods over training
|
| 203 |
-
STEPWISE: # StepLR scheduler (reduces LR at fixed step intervals)
|
| 204 |
-
N_STEPS: 4 # Number of times to reduce LR during training
|
| 205 |
-
GAMMA: 0.5 # Factor by which LR is reduced at each step (new_lr = old_lr * gamma)
|
| 206 |
-
# EXPONENTIAL: # ExponentialLR scheduler (exponential decay)
|
| 207 |
-
# GAMMA: 0.5 # Multiplicative factor for exponential decay
|
| 208 |
-
|
| 209 |
-
SWITCH_OPTIMIZER_EPOCH:
|
| 210 |
-
value: null # Epoch number to switch from bootstrap to refining optimizer (null = no switch)
|
| 211 |
-
OPTIMIZER_BOOTSTRAP_NAME:
|
| 212 |
-
value: "AdamW" # Optimizer name for initial training phase ("Adam", "SGD", etc.)
|
| 213 |
-
OPTIMIZER_REFINING_NAME:
|
| 214 |
-
value: "AdamW" # Optimizer name for refining phase (used after SWITCH_OPTIMIZER_EPOCH)
|
| 215 |
-
EARLY_STOPPING_PATIENCE:
|
| 216 |
-
value: 20 # Number of epochs without improvement before stopping training
|
| 217 |
-
SAVE_INTERVAL:
|
| 218 |
-
value: 1000 # Number of training steps between model checkpoints
|
| 219 |
-
|
| 220 |
-
DATASET_HIGHLIGHT_SUPERVISION_THRESHOLD:
|
| 221 |
-
value: 0.1 # Pixel highlights above this threshold (should be low) are excluded from supervision
|
| 222 |
-
|
| 223 |
-
### LOSS WEIGHTS (relative to the total loss, NOT NORMALIZED LATER)
|
| 224 |
-
SPECULAR_LOSS_WEIGHT:
|
| 225 |
-
value: 1.0 # Weight for specular component reconstruction loss
|
| 226 |
-
DIFFUSE_LOSS_WEIGHT:
|
| 227 |
-
value: 1.0 # Weight for diffuse component reconstruction loss
|
| 228 |
-
HIGHLIGHT_LOSS_WEIGHT:
|
| 229 |
-
value: 1.0 # Weight for highlight mask regression loss
|
| 230 |
-
IMAGE_RECONSTRUCTION_LOSS_WEIGHT:
|
| 231 |
-
value: 0.0 # Weight for full image reconstruction loss
|
| 232 |
-
SATURATION_RING_LOSS_WEIGHT:
|
| 233 |
-
value: 0.5 # Weight for saturation ring consistency loss (around highlight regions)
|
| 234 |
-
RING_KERNEL_SIZE:
|
| 235 |
-
value: 11 # Kernel size (odd number) for saturation ring dilation around highlights
|
| 236 |
-
RING_VAR_WEIGHT:
|
| 237 |
-
value: 0.5 # Weight for variance matching in saturation ring loss (vs mean matching)
|
| 238 |
-
RING_TEXTURE_WEIGHT:
|
| 239 |
-
value: 0.0 # Weight for texture consistency term in saturation ring loss
|
| 240 |
-
HLREG_W_L1:
|
| 241 |
-
value: 1.0 # Weight for L1 loss in highlight regression
|
| 242 |
-
HLREG_USE_CHARB:
|
| 243 |
-
value: True # Use Charbonnier loss (smooth L1) instead of standard L1 if True
|
| 244 |
-
HLREG_W_DICE:
|
| 245 |
-
value: 0.2 # Weight for Dice loss in highlight regression (for mask overlap)
|
| 246 |
-
HLREG_W_SSIM:
|
| 247 |
-
value: 0.0 # Weight for SSIM loss in highlight regression
|
| 248 |
-
HLREG_W_GRAD:
|
| 249 |
-
value: 0.0 # Weight for gradient loss in highlight regression
|
| 250 |
-
HLREG_W_TV:
|
| 251 |
-
value: 0.0 # Weight for total variation loss in highlight regression
|
| 252 |
-
HLREG_BALANCE_MODE:
|
| 253 |
-
value: "auto" # Class balancing mode for highlight regression: 'none' | 'auto' | 'pos_weight'
|
| 254 |
-
HLREG_POS_WEIGHT:
|
| 255 |
-
value: 1.0 # Positive class weight (used only if BALANCE_MODE == 'pos_weight')
|
| 256 |
-
HLREG_FOCAL_GAMMA:
|
| 257 |
-
value: 2.0 # Focal loss gamma parameter (0.0 = standard BCE, 1.0-2.0 helps with gradient vanishing)
|
| 258 |
-
|
| 259 |
-
WEIGHT_TOKEN_INPAINT:
|
| 260 |
-
value: 0.0 # Weight for token-space inpainting loss (L1 + cosine similarity in feature space)
|
| 261 |
-
WEIGHT_CONTEXT_IDENTITY:
|
| 262 |
-
value: 0.0 # LEAVE TO 0.0: Weight for L1 loss on context (non-masked) regions (identity preservation)
|
| 263 |
-
WEIGHT_TV_IN_HOLE:
|
| 264 |
-
value: 0.0 # LEAVE TO 0.0: Weight for total variation loss inside masked/hole regions
|
| 265 |
-
RING_DILATE_KERNEL:
|
| 266 |
-
value: 17 # Dilation kernel size (odd number) for creating ring mask around highlights
|
| 267 |
-
WEIGHT_SEAM:
|
| 268 |
-
value: 0.5 # Weight for gradient matching loss on saturation ring
|
| 269 |
-
SEAM_USE_CHARB:
|
| 270 |
-
value: True # Use Charbonnier loss instead of L1 in seam loss (smooth L1 for boundary consistency)
|
| 271 |
-
SEAM_WEIGHT_GRAD:
|
| 272 |
-
value: 0.0 # Weight for gradient matching term inside seam loss (0.0 = disable gradient term)
|
| 273 |
-
TOKEN_FEAT_ALPHA:
|
| 274 |
-
value: 0.5 # Mixing factor for token feature loss: alpha * L1 + (1-alpha) * (1-cosine_sim)
|
| 275 |
-
|
| 276 |
-
### DIFFUSE HIGHLIGHT PENALTY
|
| 277 |
-
WEIGHT_DIFFUSE_HIGHLIGHT_PENALTY:
|
| 278 |
-
value: 0.0 # Weight for penalty loss on highlights in diffuse decoder output (0.0 = disabled)
|
| 279 |
-
DIFFUSE_HL_THRESHOLD:
|
| 280 |
-
value: 0.8 # Brightness/luminance threshold for detecting highlights in diffuse (0.0-1.0)
|
| 281 |
-
DIFFUSE_HL_USE_CHARB:
|
| 282 |
-
value: True # Use Charbonnier loss instead of L1 for diffuse highlight penalty
|
| 283 |
-
DIFFUSE_HL_PENALTY_MODE:
|
| 284 |
-
value: "brightness" # Penalty mode: "brightness" (penalize brightness/luminance above threshold) or "pixel" (penalize RGB values directly)
|
| 285 |
-
DIFFUSE_HL_TARGET_BRIGHTNESS:
|
| 286 |
-
value: null # Target brightness/luminance for penalized pixels (null = use threshold value)
|
| 287 |
-
DIFFUSE_HL_USE_LUMINANCE:
|
| 288 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B); if False, use simple mean brightness
|
| 289 |
-
|
| 290 |
-
### LOGGING, RESULTS AND WANDB
|
| 291 |
-
LOG_INTERVAL:
|
| 292 |
-
value: 1 # Number of training steps between console log outputs
|
| 293 |
-
WANDB_LOG_INTERVAL:
|
| 294 |
-
value: 1 # Number of training steps between WandB metric logs
|
| 295 |
-
IMAGE_LOG_INTERVAL:
|
| 296 |
-
value: 5 # Number of training steps between image logging to WandB
|
| 297 |
-
NO_WANDB:
|
| 298 |
-
value: False # Disable WandB logging if True (useful for local debugging)
|
| 299 |
-
MODEL_WATCHER_FREQ_WANDB:
|
| 300 |
-
value: 50 # Frequency (in steps) for logging model parameter histograms to WandB
|
| 301 |
-
WANDB_ENTITY:
|
| 302 |
-
value: "unreflect-anything" # WandB organization/entity name
|
| 303 |
-
WANDB_PROJECT:
|
| 304 |
-
value: "UnReflectAnything" # WandB project name
|
| 305 |
-
NOTES:
|
| 306 |
-
value: "896 Decoder Pretraining" # Notes/description for this training run
|
| 307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/decoder_pretrain_896.yaml
DELETED
|
@@ -1,295 +0,0 @@
|
|
| 1 |
-
### BASELINE: CONVERGES AFTER LONG
|
| 2 |
-
|
| 3 |
-
parameters:
|
| 4 |
-
|
| 5 |
-
### MODEL ARCHITECTURE
|
| 6 |
-
MODEL:
|
| 7 |
-
value:
|
| 8 |
-
MODEL_CLASS: "UnReflect_Model" # Main model class name (must match class in models.py) # <<<<<<<<< DECODER PRETRAINING: NOT USING TOKEN INPAINTER (DIRECT FROM DINO)
|
| 9 |
-
MODEL_MODULE: "models" # Module name to import model classes from (default: "models")
|
| 10 |
-
RGB_ENCODER:
|
| 11 |
-
ENCODER: "facebook/dinov3-vitl16-pretrain-lvd1689m" # DINOv3 encoder model name (HuggingFace format)
|
| 12 |
-
IMAGE_SIZE: 896 # Input image size (height and width in pixels)
|
| 13 |
-
RETURN_SELECTED_LAYERS: [3, 6, 9, 12] # Transformer layer indices to extract features from (0-indexed)
|
| 14 |
-
RGB_ENCODER_LR: 0.0 # Learning rate for RGB encoder (0.0 = frozen, must be explicitly set)
|
| 15 |
-
DECODERS:
|
| 16 |
-
diffuse:
|
| 17 |
-
USE_FILM: False # Enable FiLM (Feature-wise Linear Modulation) conditioning in decoder
|
| 18 |
-
FEATURE_DIM: 1024 # Feature dimension for decoder (should match encoder output)
|
| 19 |
-
REASSEMBLE_OUT_CHANNELS: [768,1024,1536,2048] # Output channels for each decoder stage (DPT-style reassembly)
|
| 20 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 21 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder ("ignore", "project", etc.)
|
| 22 |
-
# FROM_PRETRAINED: "weights/rgb_decoder.pth" # Path to pretrained decoder weights (optional) # <<<<<<<<< DECODER PRETRAINING: NO WEIGHTS HERE
|
| 23 |
-
USE_BN: False # Use batch normalization in decoder
|
| 24 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 25 |
-
OUTPUT_IMAGE_SIZE: [896,896] # Output image resolution [height, width]
|
| 26 |
-
OUTPUT_CHANNELS: 3 # Number of output channels (3 for RGB diffuse image)
|
| 27 |
-
DECODER_LR: 5.0e-5 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 28 |
-
NUM_FUSION_BLOCKS_TRAINABLE: null # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 29 |
-
TRAIN_RGB_HEAD: True # Whether to train RGB head (true/false, null = train if DECODER_LR != 0)
|
| 30 |
-
TOKEN_INPAINTER:
|
| 31 |
-
TOKEN_INPAINTER_CLASS: "TokenInpainter_Prior" # Token inpainter class name
|
| 32 |
-
TOKEN_INPAINTER_MODULE: "token_inpainters" # Module name to import token inpainter from
|
| 33 |
-
FROM_PRETRAINED: "weights/token_inpainter.pth" # Path to pretrained token inpainter weights (optional)
|
| 34 |
-
TOKEN_INPAINTER_LR: 1.0e-5 # Learning rate for token inpainter (can differ from base LR)
|
| 35 |
-
DEPTH: 6 # Number of transformer blocks
|
| 36 |
-
HEADS: 16 # Number of attention heads
|
| 37 |
-
DROP: 0 # Dropout rate
|
| 38 |
-
USE_POSITIONAL_ENCODING: True # Enable 2D sinusoidal positional encodings
|
| 39 |
-
USE_FINAL_NORM: True # Enable final LayerNorm before output projection
|
| 40 |
-
USE_LOCAL_PRIOR: True # Blend local mean prior for masked seeds
|
| 41 |
-
LOCAL_PRIOR_WEIGHT: 0.5 # Weight for local prior blending (1.0 = only mask_token, 0.0 = only local mean)
|
| 42 |
-
LOCAL_PRIOR_KERNEL: 5 # Kernel size for local prior blending (> 1)
|
| 43 |
-
SEED_NOISE_STD: 0.02 # Standard deviation of noise added to masked seeds during training
|
| 44 |
-
INPAINT_MASK_DILATION:
|
| 45 |
-
value: 11 # Dilation kernel size (pixels) for inpaint mask - Must be odd
|
| 46 |
-
USE_TORCH_COMPILE: # Enable PyTorch 2.0 torch.compile for faster training (experimental)
|
| 47 |
-
value: False
|
| 48 |
-
|
| 49 |
-
### DATA
|
| 50 |
-
DATASETS:
|
| 51 |
-
value:
|
| 52 |
-
SCRREAM:
|
| 53 |
-
VAL_SCENES: ["scene10_full_00","scene11_full_00","scene044_full_00","scene04_reduced_00","scene04_reduced_01","scene04_reduced_02"] # List of validation scene names
|
| 54 |
-
TARGET_SIZE: [896,896] # Target image size [height, width] in pixels
|
| 55 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode: "resize", "crop", "resize+crop", or "pad"
|
| 56 |
-
FEW_IMAGES: False # If True, load only first 10 images per scene (for quick debugging)
|
| 57 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame from each scene (1 = all frames, 4 = every 4th frame)
|
| 58 |
-
LOAD_RGB_ONLY: True # If True, ignore polarization data and load only RGB images
|
| 59 |
-
|
| 60 |
-
HOUSECAT6D:
|
| 61 |
-
VAL_SCENES: ["val_scene1","val_scene2"] # Validation scene names
|
| 62 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 63 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 64 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 65 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 66 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 67 |
-
|
| 68 |
-
CROMO:
|
| 69 |
-
TRAIN_SCENES: ["kitchen","station","facades"] # Training scene names (list or string)
|
| 70 |
-
# VAL_SCENES: "station" # Validation scene names (optional)
|
| 71 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 72 |
-
RESIZE_MODE: "resize" # Image resizing mode
|
| 73 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 74 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 75 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 76 |
-
|
| 77 |
-
PSD:
|
| 78 |
-
TRAIN_SCENES: "PSD_Train" # Training scene name (string or list)
|
| 79 |
-
VAL_SCENES: "PSD_Val" # Validation scene name (string or list)
|
| 80 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 81 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 82 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 83 |
-
SAMPLE_EVERY_N: 1 # Load every Nth frame (1 = all frames)
|
| 84 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 85 |
-
|
| 86 |
-
SCARED:
|
| 87 |
-
VAL_SCENES: ["v22","v23","v24","v25","v26","v27","v28","v29","v30","v31","v32","v33","v34"] # Validation scene names
|
| 88 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 89 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 90 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 91 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 92 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 93 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing in dataset
|
| 94 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection (0-1)
|
| 95 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 96 |
-
HIGHLIGHT_RECT_SIZE: [1000, 1000] # Size of highlight rectangle region [height, width]
|
| 97 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 98 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 99 |
-
|
| 100 |
-
STEREOMIS_TRACKING:
|
| 101 |
-
VAL_SCENES: ["P2_2"] # Validation scene names
|
| 102 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 103 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 104 |
-
SAMPLE_EVERY_N: 5 # Load every Nth frame
|
| 105 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 106 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 107 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 108 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 109 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 110 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 111 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 112 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 113 |
-
|
| 114 |
-
CHOLEC80:
|
| 115 |
-
VAL_SCENES: ["val"] # Validation scene names
|
| 116 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 117 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 118 |
-
SAMPLE_EVERY_N: 50 # Load every Nth frame
|
| 119 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 120 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 121 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 122 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 123 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 124 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 125 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 126 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 127 |
-
|
| 128 |
-
# POLARGB:
|
| 129 |
-
# TRAIN_SCENES: "train"
|
| 130 |
-
# VAL_SCENES: "test"
|
| 131 |
-
# TARGET_SIZE: [896,896]
|
| 132 |
-
# RESIZE_MODE: "resize+crop"
|
| 133 |
-
# SAMPLE_EVERY_N: 1
|
| 134 |
-
# LOAD_RGB_ONLY: True
|
| 135 |
-
|
| 136 |
-
BATCH_SIZE: # Max batch size with img size 896x896 is 20
|
| 137 |
-
value: 20 # Number of samples per batch (adjust based on GPU memory)
|
| 138 |
-
NUM_WORKERS:
|
| 139 |
-
value: 12 # Number of data loading worker processes (0 = main process only)
|
| 140 |
-
SHUFFLE:
|
| 141 |
-
value: True # Shuffle training data each epoch (False for validation/test)
|
| 142 |
-
PIN_MEMORY:
|
| 143 |
-
value: True # Pin memory in DataLoader for faster GPU transfer (recommended: True)
|
| 144 |
-
PREFETCH_FACTOR:
|
| 145 |
-
value: 2 # Number of batches to prefetch per worker (higher = more memory usage)
|
| 146 |
-
|
| 147 |
-
### HIGHLIGHTS
|
| 148 |
-
MOGE_MODEL:
|
| 149 |
-
value: "Ruicheng/moge-2-vits-normal" # MoGe model name for normal estimation (HuggingFace format)
|
| 150 |
-
SURFACE_ROUGHNESS:
|
| 151 |
-
value: 8.0 # Blinn-Phong surface roughness exponent (higher = sharper highlights)
|
| 152 |
-
INTENSITY:
|
| 153 |
-
value: 0.0 # Specular highlight intensity multiplier
|
| 154 |
-
LIGHT_DISTANCE_RANGE:
|
| 155 |
-
value: [0.0, 1] # Range for light source distance sampling [min, max] (normalized)
|
| 156 |
-
LIGHT_LEFT_RIGHT_ANGLE:
|
| 157 |
-
value: [0, 360] # Range for light source horizontal angle [min, max] in degrees
|
| 158 |
-
LIGHT_ABOVE_BELOW_ANGLE:
|
| 159 |
-
value: [0, 360] # Range for light source vertical angle [min, max] in degrees
|
| 160 |
-
DATASET_HIGHLIGHT_DILATION:
|
| 161 |
-
value: 25 # Dilation kernel size (pixels) for dataset highlight masks
|
| 162 |
-
DATASET_HIGHLIGHT_THRESHOLD:
|
| 163 |
-
value: 0.9 # Brightness/luminance threshold (0-1) for detecting highlights in dataset images
|
| 164 |
-
DATASET_HIGHLIGHT_USE_LUMINANCE:
|
| 165 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B) for dataset highlights; if False, use simple mean brightness
|
| 166 |
-
HIGHLIGHT_COLOR:
|
| 167 |
-
value: [1.0, 1.0, 1.0] # RGB color for synthetic highlights (normalized 0-1)
|
| 168 |
-
CLAMP_RECONSTRUCTION:
|
| 169 |
-
value: True # Clamp reconstructed images to [0, 1] range if True
|
| 170 |
-
|
| 171 |
-
### OPTIMIZATION
|
| 172 |
-
EPOCHS:
|
| 173 |
-
value: 20 # Maximum number of training epochs<
|
| 174 |
-
LEARNING_RATE:
|
| 175 |
-
value: 1.0e-4 # Base learning rate for optimizer
|
| 176 |
-
WEIGHT_DECAY:
|
| 177 |
-
value: 0.0 # L2 regularization weight (0.0 = no weight decay)
|
| 178 |
-
GRADIENT_ACCUMULATION_STEPS:
|
| 179 |
-
value: 1 # Number of steps to accumulate gradients before optimizer step (1 = no accumulation)
|
| 180 |
-
WARMUP:
|
| 181 |
-
value: 100 # Number of warmup steps for learning rate schedule (linear warmup from 0 to LR)
|
| 182 |
-
GRADIENT_CLIPPING_MAX_NORM:
|
| 183 |
-
value: 8 # Maximum gradient norm for clipping (set to -1 to disable clipping)
|
| 184 |
-
LR_SCHEDULER:
|
| 185 |
-
value:
|
| 186 |
-
ONPLATEAU: # ReduceLROnPlateau scheduler (reduces LR when validation metric plateaus)
|
| 187 |
-
PATIENCE: 5 # Number of epochs to wait before reducing LR
|
| 188 |
-
FACTOR: 0.1 # Factor by which LR is reduced (new_lr = old_lr * factor)
|
| 189 |
-
COSINE: # CosineAnnealingLR scheduler (cosine annealing schedule)
|
| 190 |
-
N_PERIODS: 1 # Number of cosine periods over training
|
| 191 |
-
# STEPWISE: # StepLR scheduler (reduces LR at fixed step intervals)
|
| 192 |
-
# N_STEPS: 5 # Number of times to reduce LR during training
|
| 193 |
-
# GAMMA: 0.25 # Factor by which LR is reduced at each step (new_lr = old_lr * gamma)
|
| 194 |
-
# EXPONENTIAL: # ExponentialLR scheduler (exponential decay)
|
| 195 |
-
# GAMMA: 0.5 # Multiplicative factor for exponential decay
|
| 196 |
-
|
| 197 |
-
SWITCH_OPTIMIZER_EPOCH:
|
| 198 |
-
value: null # Epoch number to switch from bootstrap to refining optimizer (null = no switch)
|
| 199 |
-
OPTIMIZER_BOOTSTRAP_NAME:
|
| 200 |
-
value: "AdamW" # Optimizer name for initial training phase ("Adam", "SGD", etc.)
|
| 201 |
-
OPTIMIZER_REFINING_NAME:
|
| 202 |
-
value: "AdamW" # Optimizer name for refining phase (used after SWITCH_OPTIMIZER_EPOCH)
|
| 203 |
-
EARLY_STOPPING_PATIENCE:
|
| 204 |
-
value: 20 # Number of epochs without improvement before stopping training
|
| 205 |
-
SAVE_INTERVAL:
|
| 206 |
-
value: 1000 # Number of training steps between model checkpoints
|
| 207 |
-
|
| 208 |
-
DATASET_HIGHLIGHT_SUPERVISION_THRESHOLD:
|
| 209 |
-
value: 0.1 # Pixel highlights above this threshold (should be low) are excluded from supervision
|
| 210 |
-
|
| 211 |
-
### LOSS WEIGHTS (relative to the total loss, NOT NORMALIZED LATER)
|
| 212 |
-
SPECULAR_LOSS_WEIGHT:
|
| 213 |
-
value: 0.0 # Weight for specular component reconstruction loss
|
| 214 |
-
DIFFUSE_LOSS_WEIGHT:
|
| 215 |
-
value: 1.0 # Weight for diffuse component reconstruction loss
|
| 216 |
-
HIGHLIGHT_LOSS_WEIGHT:
|
| 217 |
-
value: 0.0 # Weight for highlight mask regression loss
|
| 218 |
-
IMAGE_RECONSTRUCTION_LOSS_WEIGHT:
|
| 219 |
-
value: 0.0 # Weight for full image reconstruction loss
|
| 220 |
-
SATURATION_RING_LOSS_WEIGHT:
|
| 221 |
-
value: 0.0 # Weight for saturation ring consistency loss (around highlight regions)
|
| 222 |
-
RING_KERNEL_SIZE:
|
| 223 |
-
value: 11 # Kernel size (odd number) for saturation ring dilation around highlights
|
| 224 |
-
RING_VAR_WEIGHT:
|
| 225 |
-
value: 0.5 # Weight for variance matching in saturation ring loss (vs mean matching)
|
| 226 |
-
RING_TEXTURE_WEIGHT:
|
| 227 |
-
value: 0.0 # Weight for texture consistency term in saturation ring loss
|
| 228 |
-
HLREG_W_L1:
|
| 229 |
-
value: 1.0 # Weight for L1 loss in highlight regression
|
| 230 |
-
HLREG_USE_CHARB:
|
| 231 |
-
value: True # Use Charbonnier loss (smooth L1) instead of standard L1 if True
|
| 232 |
-
HLREG_W_DICE:
|
| 233 |
-
value: 0.2 # Weight for Dice loss in highlight regression (for mask overlap)
|
| 234 |
-
HLREG_W_SSIM:
|
| 235 |
-
value: 0.0 # Weight for SSIM loss in highlight regression
|
| 236 |
-
HLREG_W_GRAD:
|
| 237 |
-
value: 0.0 # Weight for gradient loss in highlight regression
|
| 238 |
-
HLREG_W_TV:
|
| 239 |
-
value: 0.0 # Weight for total variation loss in highlight regression
|
| 240 |
-
HLREG_BALANCE_MODE:
|
| 241 |
-
value: "auto" # Class balancing mode for highlight regression: 'none' | 'auto' | 'pos_weight'
|
| 242 |
-
HLREG_POS_WEIGHT:
|
| 243 |
-
value: 1.0 # Positive class weight (used only if BALANCE_MODE == 'pos_weight')
|
| 244 |
-
HLREG_FOCAL_GAMMA:
|
| 245 |
-
value: 2.0 # Focal loss gamma parameter (0.0 = standard BCE, 1.0-2.0 helps with gradient vanishing)
|
| 246 |
-
|
| 247 |
-
WEIGHT_TOKEN_INPAINT:
|
| 248 |
-
value: 0.0 # Weight for token-space inpainting loss (L1 + cosine similarity in feature space)
|
| 249 |
-
WEIGHT_CONTEXT_IDENTITY:
|
| 250 |
-
value: 0.0 # LEAVE TO 0.0: Weight for L1 loss on context (non-masked) regions (identity preservation)
|
| 251 |
-
WEIGHT_TV_IN_HOLE:
|
| 252 |
-
value: 0.0 # LEAVE TO 0.0: Weight for total variation loss inside masked/hole regions
|
| 253 |
-
RING_DILATE_KERNEL:
|
| 254 |
-
value: 17 # Dilation kernel size (odd number) for creating ring mask around highlights
|
| 255 |
-
WEIGHT_SEAM:
|
| 256 |
-
value: 0.0 # Weight for gradient matching loss on saturation ring
|
| 257 |
-
SEAM_USE_CHARB:
|
| 258 |
-
value: True # Use Charbonnier loss instead of L1 in seam loss (smooth L1 for boundary consistency)
|
| 259 |
-
SEAM_WEIGHT_GRAD:
|
| 260 |
-
value: 0.0 # Weight for gradient matching term inside seam loss (0.0 = disable gradient term)
|
| 261 |
-
TOKEN_FEAT_ALPHA:
|
| 262 |
-
value: 0.5 # Mixing factor for token feature loss: alpha * L1 + (1-alpha) * (1-cosine_sim)
|
| 263 |
-
|
| 264 |
-
### DIFFUSE HIGHLIGHT PENALTY
|
| 265 |
-
WEIGHT_DIFFUSE_HIGHLIGHT_PENALTY:
|
| 266 |
-
value: 0.0 # Weight for penalty loss on highlights in diffuse decoder output (0.0 = disabled)
|
| 267 |
-
DIFFUSE_HL_THRESHOLD:
|
| 268 |
-
value: 0.8 # Brightness/luminance threshold for detecting highlights in diffuse (0.0-1.0)
|
| 269 |
-
DIFFUSE_HL_USE_CHARB:
|
| 270 |
-
value: True # Use Charbonnier loss instead of L1 for diffuse highlight penalty
|
| 271 |
-
DIFFUSE_HL_PENALTY_MODE:
|
| 272 |
-
value: "brightness" # Penalty mode: "brightness" (penalize brightness/luminance above threshold) or "pixel" (penalize RGB values directly)
|
| 273 |
-
DIFFUSE_HL_TARGET_BRIGHTNESS:
|
| 274 |
-
value: null # Target brightness/luminance for penalized pixels (null = use threshold value)
|
| 275 |
-
DIFFUSE_HL_USE_LUMINANCE:
|
| 276 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B); if False, use simple mean brightness
|
| 277 |
-
|
| 278 |
-
### LOGGING, RESULTS AND WANDB
|
| 279 |
-
LOG_INTERVAL:
|
| 280 |
-
value: 1 # Number of training steps between console log outputs
|
| 281 |
-
WANDB_LOG_INTERVAL:
|
| 282 |
-
value: 1 # Number of training steps between WandB metric logs
|
| 283 |
-
IMAGE_LOG_INTERVAL:
|
| 284 |
-
value: 5 # Number of training steps between image logging to WandB
|
| 285 |
-
NO_WANDB:
|
| 286 |
-
value: False # Disable WandB logging if True (useful for local debugging)
|
| 287 |
-
MODEL_WATCHER_FREQ_WANDB:
|
| 288 |
-
value: 50 # Frequency (in steps) for logging model parameter histograms to WandB
|
| 289 |
-
WANDB_ENTITY:
|
| 290 |
-
value: "unreflect-anything" # WandB organization/entity name
|
| 291 |
-
WANDB_PROJECT:
|
| 292 |
-
value: "UnReflectAnything" # WandB project name
|
| 293 |
-
NOTES:
|
| 294 |
-
value: "896 Decoder Pretraining" # Notes/description for this training run
|
| 295 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/figuremeout.yaml
DELETED
|
@@ -1,306 +0,0 @@
|
|
| 1 |
-
### BASELINE: CONVERGES AFTER LONG
|
| 2 |
-
|
| 3 |
-
parameters:
|
| 4 |
-
|
| 5 |
-
### MODEL ARCHITECTURE
|
| 6 |
-
MODEL:
|
| 7 |
-
value:
|
| 8 |
-
MODEL_CLASS: "UnReflect_Model " # Main model class name (must match class in models.py)
|
| 9 |
-
MODEL_MODULE: "models" # Module name to import model classes from (default: "models")
|
| 10 |
-
RGB_ENCODER:
|
| 11 |
-
ENCODER: "facebook/dinov3-vitl16-pretrain-lvd1689m" # DINOv3 encoder model name (HuggingFace format)
|
| 12 |
-
IMAGE_SIZE: 448 # Input image size (height and width in pixels)
|
| 13 |
-
RETURN_SELECTED_LAYERS: [3, 6, 9 , 12] # Transformer layer indices to extract features from (0-indexed)
|
| 14 |
-
RGB_ENCODER_LR: 0.0 # Learning rate for RGB encoder (0.0 = frozen, must be explicitly set)
|
| 15 |
-
DECODERS:
|
| 16 |
-
diffuse:
|
| 17 |
-
USE_FILM: False # Enable FiLM (Feature-wise Linear Modulation) conditioning in decoder
|
| 18 |
-
FEATURE_DIM: 1024 # Feature dimension for decoder (should match encoder output)
|
| 19 |
-
REASSEMBLE_OUT_CHANNELS: [768,1024,1536,2048] # Output channels for each decoder stage (DPT-style reassembly)
|
| 20 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 21 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder ("ignore", "project", etc.)
|
| 22 |
-
# FROM_PRETRAINED: "weights/rgb_decoder.pth" # Path to pretrained decoder weights (optional)
|
| 23 |
-
USE_BN: False # Use batch normalization in decoder
|
| 24 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 25 |
-
OUTPUT_IMAGE_SIZE: [448,448] # Output image resolution [height, width]
|
| 26 |
-
OUTPUT_CHANNELS: 3 # Number of output channels (3 for RGB diffuse image)
|
| 27 |
-
DECODER_LR: 1.0e-4 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 28 |
-
NUM_FUSION_BLOCKS_TRAINABLE: 1 # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 29 |
-
TRAIN_RGB_HEAD: True # Whether to train RGB head (true/false, null = train if DECODER_LR != 0)
|
| 30 |
-
highlight:
|
| 31 |
-
USE_FILM: False # Enable FiLM conditioning in highlight decoder
|
| 32 |
-
FEATURE_DIM: 1024 # Feature dimension for highlight decoder
|
| 33 |
-
REASSEMBLE_OUT_CHANNELS: [96,192,384,768] # Output channels for each decoder stage
|
| 34 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 35 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder
|
| 36 |
-
USE_BN: False # Use batch normalization in decoder
|
| 37 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 38 |
-
OUTPUT_IMAGE_SIZE: [448,448] # Output image resolution [height, width]
|
| 39 |
-
OUTPUT_CHANNELS: 1 # Number of output channels (1 for highlight mask)
|
| 40 |
-
DECODER_LR: 5.0e-4 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 41 |
-
NUM_FUSION_BLOCKS_TRAINABLE: null # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 42 |
-
TOKEN_INPAINTER:
|
| 43 |
-
TOKEN_INPAINTER_CLASS: "TokenInpainter_Naive" # Token inpainter class name
|
| 44 |
-
# TOKEN_INPAINTER_MODULE: "token_inpainters" # Module name to import token inpainter from
|
| 45 |
-
# FROM_PRETRAINED: "weights/token_inpainter.pth" # Path to pretrained token inpainter weights (optional)
|
| 46 |
-
TOKEN_INPAINTER_LR: 0.0 # Learning rate for token inpainter (can differ from base LR)
|
| 47 |
-
DEPTH: 6 # Number of transformer blocks
|
| 48 |
-
HEADS: 16 # Number of attention heads
|
| 49 |
-
DROP: 0 # Dropout rate
|
| 50 |
-
USE_POSITIONAL_ENCODING: True # Enable 2D sinusoidal positional encodings
|
| 51 |
-
USE_FINAL_NORM: True # Enable final LayerNorm before output projection
|
| 52 |
-
USE_LOCAL_PRIOR: True # Blend local mean prior for masked seeds
|
| 53 |
-
LOCAL_PRIOR_WEIGHT: 0.5 # Weight for local prior blending (1.0 = only mask_token, 0.0 = only local mean)
|
| 54 |
-
LOCAL_PRIOR_KERNEL: 5 # Kernel size for local prior blending (> 1)
|
| 55 |
-
SEED_NOISE_STD: 0.02 # Standard deviation of noise added to masked seeds during training
|
| 56 |
-
INPAINT_MASK_DILATION:
|
| 57 |
-
value: 1 # Dilation kernel size (pixels) for inpaint mask - Must be odd
|
| 58 |
-
USE_TORCH_COMPILE: # Enable PyTorch 2.0 torch.compile for faster training (experimental)
|
| 59 |
-
value: False
|
| 60 |
-
|
| 61 |
-
### DATA
|
| 62 |
-
DATASETS:
|
| 63 |
-
value:
|
| 64 |
-
SCRREAM:
|
| 65 |
-
VAL_SCENES: ["scene10_full_00","scene11_full_00","scene044_full_00","scene04_reduced_00","scene04_reduced_01","scene04_reduced_02"] # List of validation scene names
|
| 66 |
-
TARGET_SIZE: [448,448] # Target image size [height, width] in pixels
|
| 67 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode: "resize", "crop", "resize+crop", or "pad"
|
| 68 |
-
FEW_IMAGES: False # If True, load only first 10 images per scene (for quick debugging)
|
| 69 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame from each scene (1 = all frames, 4 = every 4th frame)
|
| 70 |
-
LOAD_RGB_ONLY: True # If True, ignore polarization data and load only RGB images
|
| 71 |
-
|
| 72 |
-
HOUSECAT6D:
|
| 73 |
-
VAL_SCENES: ["val_scene1","val_scene2"] # Validation scene names
|
| 74 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 75 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 76 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 77 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame
|
| 78 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 79 |
-
|
| 80 |
-
CROMO:
|
| 81 |
-
TRAIN_SCENES: ["kitchen"] # Training scene names (list or string)
|
| 82 |
-
# VAL_SCENES: "station" # Validation scene names (optional)
|
| 83 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 84 |
-
RESIZE_MODE: "resize" # Image resizing mode
|
| 85 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 86 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame
|
| 87 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 88 |
-
|
| 89 |
-
PSD:
|
| 90 |
-
TRAIN_SCENES: "PSD_Train" # Training scene name (string or list)
|
| 91 |
-
VAL_SCENES: "PSD_Val" # Validation scene name (string or list)
|
| 92 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 93 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 94 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 95 |
-
SAMPLE_EVERY_N: 1 # Load every Nth frame (1 = all frames)
|
| 96 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 97 |
-
|
| 98 |
-
SCARED:
|
| 99 |
-
VAL_SCENES: ["v22","v23","v24","v25","v26","v27","v28","v29","v30","v31","v32","v33","v34"] # Validation scene names
|
| 100 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 101 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 102 |
-
SAMPLE_EVERY_N: 8 # Load every Nth frame
|
| 103 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 104 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 105 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing in dataset
|
| 106 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection (0-1)
|
| 107 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 108 |
-
HIGHLIGHT_RECT_SIZE: [1000, 1000] # Size of highlight rectangle region [height, width]
|
| 109 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 110 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 111 |
-
|
| 112 |
-
STEREOMIS_TRACKING:
|
| 113 |
-
VAL_SCENES: ["P2_2"] # Validation scene names
|
| 114 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 115 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 116 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 117 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 118 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 119 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 120 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 121 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 122 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 123 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 124 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 125 |
-
|
| 126 |
-
CHOLEC80:
|
| 127 |
-
VAL_SCENES: ["val"] # Validation scene names
|
| 128 |
-
TARGET_SIZE: [448,448] # Target image size [height, width]
|
| 129 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 130 |
-
SAMPLE_EVERY_N: 10 # Load every Nth frame
|
| 131 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 132 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 133 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 134 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 135 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 136 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 137 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 138 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 139 |
-
|
| 140 |
-
# POLARGB:
|
| 141 |
-
# TRAIN_SCENES: "train"
|
| 142 |
-
# VAL_SCENES: "test"
|
| 143 |
-
# TARGET_SIZE: [448,448]
|
| 144 |
-
# RESIZE_MODE: "resize+crop"
|
| 145 |
-
# SAMPLE_EVERY_N: 1
|
| 146 |
-
# LOAD_RGB_ONLY: True
|
| 147 |
-
|
| 148 |
-
BATCH_SIZE: # Max batch size with img size 448 is 32
|
| 149 |
-
value: 32 # Number of samples per batch (adjust based on GPU memory)
|
| 150 |
-
NUM_WORKERS:
|
| 151 |
-
value: 8 # Number of data loading worker processes (0 = main process only)
|
| 152 |
-
SHUFFLE:
|
| 153 |
-
value: True # Shuffle training data each epoch (False for validation/test)
|
| 154 |
-
PIN_MEMORY:
|
| 155 |
-
value: True # Pin memory in DataLoader for faster GPU transfer (recommended: True)
|
| 156 |
-
PREFETCH_FACTOR:
|
| 157 |
-
value: 2 # Number of batches to prefetch per worker (higher = more memory usage)
|
| 158 |
-
|
| 159 |
-
### HIGHLIGHTS
|
| 160 |
-
MOGE_MODEL:
|
| 161 |
-
value: "Ruicheng/moge-2-vits-normal" # MoGe model name for normal estimation (HuggingFace format)
|
| 162 |
-
SURFACE_ROUGHNESS:
|
| 163 |
-
value: 8.0 # Blinn-Phong surface roughness exponent (higher = sharper highlights)
|
| 164 |
-
INTENSITY:
|
| 165 |
-
value: 0.0 # Specular highlight intensity multiplier
|
| 166 |
-
LIGHT_DISTANCE_RANGE:
|
| 167 |
-
value: [0.0, 1] # Range for light source distance sampling [min, max] (normalized)
|
| 168 |
-
LIGHT_LEFT_RIGHT_ANGLE:
|
| 169 |
-
value: [0, 360] # Range for light source horizontal angle [min, max] in degrees
|
| 170 |
-
LIGHT_ABOVE_BELOW_ANGLE:
|
| 171 |
-
value: [0, 360] # Range for light source vertical angle [min, max] in degrees
|
| 172 |
-
DATASET_HIGHLIGHT_DILATION:
|
| 173 |
-
value: 25 # Dilation kernel size (pixels) for dataset highlight masks
|
| 174 |
-
DATASET_HIGHLIGHT_THRESHOLD:
|
| 175 |
-
value: 0.9 # Brightness/luminance threshold (0-1) for detecting highlights in dataset images
|
| 176 |
-
DATASET_HIGHLIGHT_USE_LUMINANCE:
|
| 177 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B) for dataset highlights; if False, use simple mean brightness
|
| 178 |
-
HIGHLIGHT_COLOR:
|
| 179 |
-
value: [1.0, 1.0, 1.0] # RGB color for synthetic highlights (normalized 0-1)
|
| 180 |
-
CLAMP_RECONSTRUCTION:
|
| 181 |
-
value: True # Clamp reconstructed images to [0, 1] range if True
|
| 182 |
-
|
| 183 |
-
### OPTIMIZATION
|
| 184 |
-
LEARNING_RATE:
|
| 185 |
-
value: 1.0e-3 # Base learning rate for optimizer
|
| 186 |
-
WEIGHT_DECAY:
|
| 187 |
-
value: 0.0 # L2 regularization weight (0.0 = no weight decay)
|
| 188 |
-
EPOCHS:
|
| 189 |
-
value: 25 # Maximum number of training epochs
|
| 190 |
-
GRADIENT_ACCUMULATION_STEPS:
|
| 191 |
-
value: 1 # Number of steps to accumulate gradients before optimizer step (1 = no accumulation)
|
| 192 |
-
WARMUP:
|
| 193 |
-
value: 200 # Number of warmup steps for learning rate schedule (linear warmup from 0 to LR)
|
| 194 |
-
GRADIENT_CLIPPING_MAX_NORM:
|
| 195 |
-
value: 8 # Maximum gradient norm for clipping (set to -1 to disable clipping)
|
| 196 |
-
LR_SCHEDULER:
|
| 197 |
-
value:
|
| 198 |
-
ONPLATEAU: # ReduceLROnPlateau scheduler (reduces LR when validation metric plateaus)
|
| 199 |
-
PATIENCE: 5 # Number of epochs to wait before reducing LR
|
| 200 |
-
FACTOR: 0.1 # Factor by which LR is reduced (new_lr = old_lr * factor)
|
| 201 |
-
# COSINE: # CosineAnnealingLR scheduler (cosine annealing schedule)
|
| 202 |
-
# N_PERIODS: 5 # Number of cosine periods over training
|
| 203 |
-
STEPWISE: # StepLR scheduler (reduces LR at fixed step intervals)
|
| 204 |
-
N_STEPS: 4 # Number of times to reduce LR during training
|
| 205 |
-
GAMMA: 0.5 # Factor by which LR is reduced at each step (new_lr = old_lr * gamma)
|
| 206 |
-
# EXPONENTIAL: # ExponentialLR scheduler (exponential decay)
|
| 207 |
-
# GAMMA: 0.5 # Multiplicative factor for exponential decay
|
| 208 |
-
|
| 209 |
-
SWITCH_OPTIMIZER_EPOCH:
|
| 210 |
-
value: null # Epoch number to switch from bootstrap to refining optimizer (null = no switch)
|
| 211 |
-
OPTIMIZER_BOOTSTRAP_NAME:
|
| 212 |
-
value: "AdamW" # Optimizer name for initial training phase ("Adam", "SGD", etc.)
|
| 213 |
-
OPTIMIZER_REFINING_NAME:
|
| 214 |
-
value: "AdamW" # Optimizer name for refining phase (used after SWITCH_OPTIMIZER_EPOCH)
|
| 215 |
-
EARLY_STOPPING_PATIENCE:
|
| 216 |
-
value: 10 # Number of epochs without improvement before stopping training
|
| 217 |
-
SAVE_INTERVAL:
|
| 218 |
-
value: 1000 # Number of training steps between model checkpoints
|
| 219 |
-
|
| 220 |
-
DATASET_HIGHLIGHT_SUPERVISION_THRESHOLD:
|
| 221 |
-
value: 0.1 # Pixel highlights above this threshold (should be low) are excluded from supervision
|
| 222 |
-
|
| 223 |
-
### LOSS WEIGHTS (relative to the total loss, NOT NORMALIZED LATER)
|
| 224 |
-
SPECULAR_LOSS_WEIGHT:
|
| 225 |
-
value: 0.0 # Weight for specular component reconstruction loss
|
| 226 |
-
DIFFUSE_LOSS_WEIGHT:
|
| 227 |
-
value: 1.0 # Weight for diffuse component reconstruction loss
|
| 228 |
-
HIGHLIGHT_LOSS_WEIGHT:
|
| 229 |
-
value: 0.0 # Weight for highlight mask regression loss
|
| 230 |
-
IMAGE_RECONSTRUCTION_LOSS_WEIGHT:
|
| 231 |
-
value: 0.0 # Weight for full image reconstruction loss
|
| 232 |
-
SATURATION_RING_LOSS_WEIGHT:
|
| 233 |
-
value: 0.0 # Weight for saturation ring consistency loss (around highlight regions)
|
| 234 |
-
RING_KERNEL_SIZE:
|
| 235 |
-
value: 11 # Kernel size (odd number) for saturation ring dilation around highlights
|
| 236 |
-
RING_VAR_WEIGHT:
|
| 237 |
-
value: 0.5 # Weight for variance matching in saturation ring loss (vs mean matching)
|
| 238 |
-
RING_TEXTURE_WEIGHT:
|
| 239 |
-
value: 1.0 # Weight for texture consistency term in saturation ring loss
|
| 240 |
-
HLREG_W_L1:
|
| 241 |
-
value: 1.0 # Weight for L1 loss in highlight regression
|
| 242 |
-
HLREG_USE_CHARB:
|
| 243 |
-
value: True # Use Charbonnier loss (smooth L1) instead of standard L1 if True
|
| 244 |
-
HLREG_W_DICE:
|
| 245 |
-
value: 0.2 # Weight for Dice loss in highlight regression (for mask overlap)
|
| 246 |
-
HLREG_W_SSIM:
|
| 247 |
-
value: 0.0 # Weight for SSIM loss in highlight regression
|
| 248 |
-
HLREG_W_GRAD:
|
| 249 |
-
value: 0.0 # Weight for gradient loss in highlight regression
|
| 250 |
-
HLREG_W_TV:
|
| 251 |
-
value: 0.0 # Weight for total variation loss in highlight regression
|
| 252 |
-
HLREG_BALANCE_MODE:
|
| 253 |
-
value: "auto" # Class balancing mode for highlight regression: 'none' | 'auto' | 'pos_weight'
|
| 254 |
-
HLREG_POS_WEIGHT:
|
| 255 |
-
value: 1.0 # Positive class weight (used only if BALANCE_MODE == 'pos_weight')
|
| 256 |
-
HLREG_FOCAL_GAMMA:
|
| 257 |
-
value: 2.0 # Focal loss gamma parameter (0.0 = standard BCE, 1.0-2.0 helps with gradient vanishing)
|
| 258 |
-
|
| 259 |
-
WEIGHT_TOKEN_INPAINT:
|
| 260 |
-
value: 0.0 # Weight for token-space inpainting loss (L1 + cosine similarity in feature space)
|
| 261 |
-
WEIGHT_CONTEXT_IDENTITY:
|
| 262 |
-
value: 0.0 # LEAVE TO 0.0: Weight for L1 loss on context (non-masked) regions (identity preservation)
|
| 263 |
-
WEIGHT_TV_IN_HOLE:
|
| 264 |
-
value: 0.0 # LEAVE TO 0.0: Weight for total variation loss inside masked/hole regions
|
| 265 |
-
RING_DILATE_KERNEL:
|
| 266 |
-
value: 17 # Dilation kernel size (odd number) for creating ring mask around highlights
|
| 267 |
-
WEIGHT_SEAM:
|
| 268 |
-
value: 0.5 # Weight for gradient matching loss on saturation ring
|
| 269 |
-
SEAM_USE_CHARB:
|
| 270 |
-
value: True # Use Charbonnier loss instead of L1 in seam loss (smooth L1 for boundary consistency)
|
| 271 |
-
SEAM_WEIGHT_GRAD:
|
| 272 |
-
value: 0.0 # Weight for gradient matching term inside seam loss (0.0 = disable gradient term)
|
| 273 |
-
TOKEN_FEAT_ALPHA:
|
| 274 |
-
value: 0.5 # Mixing factor for token feature loss: alpha * L1 + (1-alpha) * (1-cosine_sim)
|
| 275 |
-
|
| 276 |
-
### DIFFUSE HIGHLIGHT PENALTY
|
| 277 |
-
WEIGHT_DIFFUSE_HIGHLIGHT_PENALTY:
|
| 278 |
-
value: 0.0 # Weight for penalty loss on highlights in diffuse decoder output (0.0 = disabled)
|
| 279 |
-
DIFFUSE_HL_THRESHOLD:
|
| 280 |
-
value: 0.8 # Brightness/luminance threshold for detecting highlights in diffuse (0.0-1.0)
|
| 281 |
-
DIFFUSE_HL_USE_CHARB:
|
| 282 |
-
value: True # Use Charbonnier loss instead of L1 for diffuse highlight penalty
|
| 283 |
-
DIFFUSE_HL_PENALTY_MODE:
|
| 284 |
-
value: "brightness" # Penalty mode: "brightness" (penalize brightness/luminance above threshold) or "pixel" (penalize RGB values directly)
|
| 285 |
-
DIFFUSE_HL_TARGET_BRIGHTNESS:
|
| 286 |
-
value: null # Target brightness/luminance for penalized pixels (null = use threshold value)
|
| 287 |
-
DIFFUSE_HL_USE_LUMINANCE:
|
| 288 |
-
value: False # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B); if False, use simple mean brightness
|
| 289 |
-
|
| 290 |
-
### LOGGING, RESULTS AND WANDB
|
| 291 |
-
LOG_INTERVAL:
|
| 292 |
-
value: 1 # Number of training steps between console log outputs
|
| 293 |
-
WANDB_LOG_INTERVAL:
|
| 294 |
-
value: 1 # Number of training steps between WandB metric logs
|
| 295 |
-
IMAGE_LOG_INTERVAL:
|
| 296 |
-
value: 5 # Number of training steps between image logging to WandB
|
| 297 |
-
NO_WANDB:
|
| 298 |
-
value: False # Disable WandB logging if True (useful for local debugging)
|
| 299 |
-
MODEL_WATCHER_FREQ_WANDB:
|
| 300 |
-
value: 50 # Frequency (in steps) for logging model parameter histograms to WandB
|
| 301 |
-
WANDB_ENTITY:
|
| 302 |
-
value: "unreflect-anything" # WandB organization/entity name
|
| 303 |
-
WANDB_PROJECT:
|
| 304 |
-
value: "UnReflectAnything" # WandB project name
|
| 305 |
-
NOTES:
|
| 306 |
-
value: "Final - TKI Learns, Decoder Learns faster" # Notes/description for this training run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
configs/pretrain_tki.yaml
DELETED
|
@@ -1,306 +0,0 @@
|
|
| 1 |
-
### BASELINE: CONVERGES AFTER LONG
|
| 2 |
-
|
| 3 |
-
parameters:
|
| 4 |
-
|
| 5 |
-
### MODEL ARCHITECTURE
|
| 6 |
-
MODEL:
|
| 7 |
-
value:
|
| 8 |
-
MODEL_CLASS: "UnReflect_Model_TokenInpainter" # Main model class name (must match class in models.py)
|
| 9 |
-
MODEL_MODULE: "models" # Module name to import model classes from (default: "models")
|
| 10 |
-
RGB_ENCODER:
|
| 11 |
-
ENCODER: "facebook/dinov3-vitl16-pretrain-lvd1689m" # DINOv3 encoder model name (HuggingFace format)
|
| 12 |
-
IMAGE_SIZE: 896 # Input image size (height and width in pixels)
|
| 13 |
-
RETURN_SELECTED_LAYERS: [3, 6, 9 , 12] # Transformer layer indices to extract features from (0-indexed)
|
| 14 |
-
RGB_ENCODER_LR: 0.0 # Learning rate for RGB encoder (0.0 = frozen, must be explicitly set)
|
| 15 |
-
DECODERS:
|
| 16 |
-
diffuse:
|
| 17 |
-
USE_FILM: False # Enable FiLM (Feature-wise Linear Modulation) conditioning in decoder
|
| 18 |
-
FEATURE_DIM: 1024 # Feature dimension for decoder (should match encoder output)
|
| 19 |
-
REASSEMBLE_OUT_CHANNELS: [768,1024,1536,2048] # Output channels for each decoder stage (DPT-style reassembly)
|
| 20 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 21 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder ("ignore", "project", etc.)
|
| 22 |
-
FROM_PRETRAINED: "weights/decoder_896.pth" # Path to pretrained decoder weights (optional)
|
| 23 |
-
USE_BN: False # Use batch normalization in decoder
|
| 24 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 25 |
-
OUTPUT_IMAGE_SIZE: [896,896] # Output image resolution [height, width]
|
| 26 |
-
OUTPUT_CHANNELS: 3 # Number of output channels (3 for RGB diffuse image)
|
| 27 |
-
DECODER_LR: 0.0 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 28 |
-
NUM_FUSION_BLOCKS_TRAINABLE: 1 # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 29 |
-
TRAIN_RGB_HEAD: True # Whether to train RGB head (true/false, null = train if DECODER_LR != 0)
|
| 30 |
-
highlight:
|
| 31 |
-
USE_FILM: False # Enable FiLM conditioning in highlight decoder
|
| 32 |
-
FEATURE_DIM: 1024 # Feature dimension for highlight decoder
|
| 33 |
-
REASSEMBLE_OUT_CHANNELS: [96,192,384,768] # Output channels for each decoder stage
|
| 34 |
-
REASSEMBLE_FACTORS: [4.0, 2.0, 1.0, 0.5] # Spatial upsampling factors for each stage
|
| 35 |
-
READOUT_TYPE: "ignore" # Readout type for DPT decoder
|
| 36 |
-
USE_BN: False # Use batch normalization in decoder
|
| 37 |
-
DROPOUT: 0.1 # Dropout rate in decoder layers
|
| 38 |
-
OUTPUT_IMAGE_SIZE: [896,896] # Output image resolution [height, width]
|
| 39 |
-
OUTPUT_CHANNELS: 1 # Number of output channels (1 for highlight mask)
|
| 40 |
-
DECODER_LR: 5.0e-4 # Custom learning rate for decoder (0.0 = frozen, 1.0 = same as base LR)
|
| 41 |
-
NUM_FUSION_BLOCKS_TRAINABLE: null # Number of fusion blocks to train (0-4, null = train all if DECODER_LR != 0)
|
| 42 |
-
TOKEN_INPAINTER:
|
| 43 |
-
TOKEN_INPAINTER_CLASS: "TokenInpainter_Prior" # Token inpainter class name
|
| 44 |
-
TOKEN_INPAINTER_MODULE: "token_inpainters" # Module name to import token inpainter from
|
| 45 |
-
# FROM_PRETRAINED: "weights/token_inpainter.pth" # Path to pretrained token inpainter weights (optional)
|
| 46 |
-
TOKEN_INPAINTER_LR: 1.0e-4 # Learning rate for token inpainter (can differ from base LR)
|
| 47 |
-
DEPTH: 6 # Number of transformer blocks
|
| 48 |
-
HEADS: 16 # Number of attention heads
|
| 49 |
-
DROP: 0 # Dropout rate
|
| 50 |
-
USE_POSITIONAL_ENCODING: True # Enable 2D sinusoidal positional encodings
|
| 51 |
-
USE_FINAL_NORM: True # Enable final LayerNorm before output projection
|
| 52 |
-
USE_LOCAL_PRIOR: True # Blend local mean prior for masked seeds
|
| 53 |
-
LOCAL_PRIOR_WEIGHT: 0.5 # Weight for local prior blending (1.0 = only mask_token, 0.0 = only local mean)
|
| 54 |
-
LOCAL_PRIOR_KERNEL: 5 # Kernel size for local prior blending (> 1)
|
| 55 |
-
SEED_NOISE_STD: 0.02 # Standard deviation of noise added to masked seeds during training
|
| 56 |
-
INPAINT_MASK_DILATION:
|
| 57 |
-
value: 1 # Dilation kernel size (pixels) for inpaint mask - Must be odd
|
| 58 |
-
USE_TORCH_COMPILE: # Enable PyTorch 2.0 torch.compile for faster training (experimental)
|
| 59 |
-
value: False
|
| 60 |
-
|
| 61 |
-
### DATA
|
| 62 |
-
DATASETS:
|
| 63 |
-
value:
|
| 64 |
-
SCRREAM:
|
| 65 |
-
VAL_SCENES: ["scene10_full_00","scene11_full_00","scene044_full_00","scene04_reduced_00","scene04_reduced_01","scene04_reduced_02"] # List of validation scene names
|
| 66 |
-
TARGET_SIZE: [896,896] # Target image size [height, width] in pixels
|
| 67 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode: "resize", "crop", "resize+crop", or "pad"
|
| 68 |
-
FEW_IMAGES: False # If True, load only first 10 images per scene (for quick debugging)
|
| 69 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame from each scene (1 = all frames, 4 = every 4th frame)
|
| 70 |
-
LOAD_RGB_ONLY: True # If True, ignore polarization data and load only RGB images
|
| 71 |
-
|
| 72 |
-
HOUSECAT6D:
|
| 73 |
-
VAL_SCENES: ["val_scene1","val_scene2"] # Validation scene names
|
| 74 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 75 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 76 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 77 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame
|
| 78 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 79 |
-
|
| 80 |
-
CROMO:
|
| 81 |
-
TRAIN_SCENES: ["kitchen"] # Training scene names (list or string)
|
| 82 |
-
# VAL_SCENES: "station" # Validation scene names (optional)
|
| 83 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 84 |
-
RESIZE_MODE: "resize" # Image resizing mode
|
| 85 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 86 |
-
SAMPLE_EVERY_N: 2 # Load every Nth frame
|
| 87 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 88 |
-
|
| 89 |
-
PSD:
|
| 90 |
-
TRAIN_SCENES: "PSD_Train" # Training scene name (string or list)
|
| 91 |
-
VAL_SCENES: "PSD_Val" # Validation scene name (string or list)
|
| 92 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 93 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 94 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 95 |
-
SAMPLE_EVERY_N: 1 # Load every Nth frame (1 = all frames)
|
| 96 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 97 |
-
|
| 98 |
-
SCARED:
|
| 99 |
-
VAL_SCENES: ["v22","v23","v24","v25","v26","v27","v28","v29","v30","v31","v32","v33","v34"] # Validation scene names
|
| 100 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 101 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 102 |
-
SAMPLE_EVERY_N: 8 # Load every Nth frame
|
| 103 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 104 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 105 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing in dataset
|
| 106 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection (0-1)
|
| 107 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 108 |
-
HIGHLIGHT_RECT_SIZE: [1000, 1000] # Size of highlight rectangle region [height, width]
|
| 109 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 110 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 111 |
-
|
| 112 |
-
STEREOMIS_TRACKING:
|
| 113 |
-
VAL_SCENES: ["P2_2"] # Validation scene names
|
| 114 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 115 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 116 |
-
SAMPLE_EVERY_N: 4 # Load every Nth frame
|
| 117 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 118 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 119 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 120 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 121 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 122 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 123 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 124 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 125 |
-
|
| 126 |
-
CHOLEC80:
|
| 127 |
-
VAL_SCENES: ["val"] # Validation scene names
|
| 128 |
-
TARGET_SIZE: [896,896] # Target image size [height, width]
|
| 129 |
-
RESIZE_MODE: "resize+crop" # Image resizing mode
|
| 130 |
-
SAMPLE_EVERY_N: 10 # Load every Nth frame
|
| 131 |
-
LOAD_RGB_ONLY: True # Ignore polarization data if True
|
| 132 |
-
FEW_IMAGES: False # Load only first 10 images if True
|
| 133 |
-
HIGHLIGHT_ENABLE: False # Enable highlight detection/processing
|
| 134 |
-
HIGHLIGHT_BRIGHTNESS_THRESHOLD: 0.9 # Brightness threshold for highlight detection
|
| 135 |
-
HIGHLIGHT_RETURN_MASK: True # Return highlight mask in dataset output
|
| 136 |
-
HIGHLIGHT_RECT_SIZE: [800, 800] # Size of highlight rectangle region
|
| 137 |
-
HIGHLIGHT_RETURN_RECT_AS_RGB: False # Return highlight rectangle as RGB if True
|
| 138 |
-
HIGHLIGHT_RETURN_RECT: True # Return highlight rectangle region if True
|
| 139 |
-
|
| 140 |
-
# POLARGB:
|
| 141 |
-
# TRAIN_SCENES: "train"
|
| 142 |
-
# VAL_SCENES: "test"
|
| 143 |
-
# TARGET_SIZE: [896,896]
|
| 144 |
-
# RESIZE_MODE: "resize+crop"
|
| 145 |
-
# SAMPLE_EVERY_N: 1
|
| 146 |
-
# LOAD_RGB_ONLY: True
|
| 147 |
-
|
| 148 |
-
BATCH_SIZE: # Max batch size with img size 896 is 32
|
| 149 |
-
value: 16 # Number of samples per batch (adjust based on GPU memory)
|
| 150 |
-
NUM_WORKERS:
|
| 151 |
-
value: 8 # Number of data loading worker processes (0 = main process only)
|
| 152 |
-
SHUFFLE:
|
| 153 |
-
value: True # Shuffle training data each epoch (False for validation/test)
|
| 154 |
-
PIN_MEMORY:
|
| 155 |
-
value: True # Pin memory in DataLoader for faster GPU transfer (recommended: True)
|
| 156 |
-
PREFETCH_FACTOR:
|
| 157 |
-
value: 2 # Number of batches to prefetch per worker (higher = more memory usage)
|
| 158 |
-
|
| 159 |
-
### HIGHLIGHTS
|
| 160 |
-
MOGE_MODEL:
|
| 161 |
-
value: "Ruicheng/moge-2-vits-normal" # MoGe model name for normal estimation (HuggingFace format)
|
| 162 |
-
SURFACE_ROUGHNESS:
|
| 163 |
-
value: 8.0 # Blinn-Phong surface roughness exponent (higher = sharper highlights)
|
| 164 |
-
INTENSITY:
|
| 165 |
-
value: 2.0 # Specular highlight intensity multiplier
|
| 166 |
-
LIGHT_DISTANCE_RANGE:
|
| 167 |
-
value: [0.0, 1] # Range for light source distance sampling [min, max] (normalized)
|
| 168 |
-
LIGHT_LEFT_RIGHT_ANGLE:
|
| 169 |
-
value: [0, 360] # Range for light source horizontal angle [min, max] in degrees
|
| 170 |
-
LIGHT_ABOVE_BELOW_ANGLE:
|
| 171 |
-
value: [0, 360] # Range for light source vertical angle [min, max] in degrees
|
| 172 |
-
DATASET_HIGHLIGHT_DILATION:
|
| 173 |
-
value: 25 # Dilation kernel size (pixels) for dataset highlight masks
|
| 174 |
-
DATASET_HIGHLIGHT_THRESHOLD:
|
| 175 |
-
value: 0.9 # Brightness/luminance threshold (0-1) for detecting highlights in dataset images
|
| 176 |
-
DATASET_HIGHLIGHT_USE_LUMINANCE:
|
| 177 |
-
value: True # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B) for dataset highlights; if False, use simple mean brightness
|
| 178 |
-
HIGHLIGHT_COLOR:
|
| 179 |
-
value: [1.0, 1.0, 1.0] # RGB color for synthetic highlights (normalized 0-1)
|
| 180 |
-
CLAMP_RECONSTRUCTION:
|
| 181 |
-
value: True # Clamp reconstructed images to [0, 1] range if True
|
| 182 |
-
|
| 183 |
-
### OPTIMIZATION
|
| 184 |
-
LEARNING_RATE:
|
| 185 |
-
value: 1.0e-3 # Base learning rate for optimizer
|
| 186 |
-
WEIGHT_DECAY:
|
| 187 |
-
value: 0.0 # L2 regularization weight (0.0 = no weight decay)
|
| 188 |
-
EPOCHS:
|
| 189 |
-
value: 25 # Maximum number of training epochs
|
| 190 |
-
GRADIENT_ACCUMULATION_STEPS:
|
| 191 |
-
value: 1 # Number of steps to accumulate gradients before optimizer step (1 = no accumulation)
|
| 192 |
-
WARMUP:
|
| 193 |
-
value: 200 # Number of warmup steps for learning rate schedule (linear warmup from 0 to LR)
|
| 194 |
-
GRADIENT_CLIPPING_MAX_NORM:
|
| 195 |
-
value: 8 # Maximum gradient norm for clipping (set to -1 to disable clipping)
|
| 196 |
-
LR_SCHEDULER:
|
| 197 |
-
value:
|
| 198 |
-
ONPLATEAU: # ReduceLROnPlateau scheduler (reduces LR when validation metric plateaus)
|
| 199 |
-
PATIENCE: 5 # Number of epochs to wait before reducing LR
|
| 200 |
-
FACTOR: 0.1 # Factor by which LR is reduced (new_lr = old_lr * factor)
|
| 201 |
-
# COSINE: # CosineAnnealingLR scheduler (cosine annealing schedule)
|
| 202 |
-
# N_PERIODS: 5 # Number of cosine periods over training
|
| 203 |
-
STEPWISE: # StepLR scheduler (reduces LR at fixed step intervals)
|
| 204 |
-
N_STEPS: 4 # Number of times to reduce LR during training
|
| 205 |
-
GAMMA: 0.5 # Factor by which LR is reduced at each step (new_lr = old_lr * gamma)
|
| 206 |
-
# EXPONENTIAL: # ExponentialLR scheduler (exponential decay)
|
| 207 |
-
# GAMMA: 0.5 # Multiplicative factor for exponential decay
|
| 208 |
-
|
| 209 |
-
SWITCH_OPTIMIZER_EPOCH:
|
| 210 |
-
value: null # Epoch number to switch from bootstrap to refining optimizer (null = no switch)
|
| 211 |
-
OPTIMIZER_BOOTSTRAP_NAME:
|
| 212 |
-
value: "AdamW" # Optimizer name for initial training phase ("Adam", "SGD", etc.)
|
| 213 |
-
OPTIMIZER_REFINING_NAME:
|
| 214 |
-
value: "AdamW" # Optimizer name for refining phase (used after SWITCH_OPTIMIZER_EPOCH)
|
| 215 |
-
EARLY_STOPPING_PATIENCE:
|
| 216 |
-
value: 10 # Number of epochs without improvement before stopping training
|
| 217 |
-
SAVE_INTERVAL:
|
| 218 |
-
value: 1000 # Number of training steps between model checkpoints
|
| 219 |
-
|
| 220 |
-
DATASET_HIGHLIGHT_SUPERVISION_THRESHOLD:
|
| 221 |
-
value: 0.1 # Pixel highlights above this threshold (should be low) are excluded from supervision
|
| 222 |
-
|
| 223 |
-
### LOSS WEIGHTS (relative to the total loss, NOT NORMALIZED LATER)
|
| 224 |
-
SPECULAR_LOSS_WEIGHT:
|
| 225 |
-
value: 0.0 # Weight for specular component reconstruction loss
|
| 226 |
-
DIFFUSE_LOSS_WEIGHT:
|
| 227 |
-
value: 0.0 # Weight for diffuse component reconstruction loss
|
| 228 |
-
HIGHLIGHT_LOSS_WEIGHT:
|
| 229 |
-
value: 0.0 # Weight for highlight mask regression loss
|
| 230 |
-
IMAGE_RECONSTRUCTION_LOSS_WEIGHT:
|
| 231 |
-
value: 0.0 # Weight for full image reconstruction loss
|
| 232 |
-
SATURATION_RING_LOSS_WEIGHT:
|
| 233 |
-
value: 0.0 # Weight for saturation ring consistency loss (around highlight regions)
|
| 234 |
-
RING_KERNEL_SIZE:
|
| 235 |
-
value: 11 # Kernel size (odd number) for saturation ring dilation around highlights
|
| 236 |
-
RING_VAR_WEIGHT:
|
| 237 |
-
value: 0.5 # Weight for variance matching in saturation ring loss (vs mean matching)
|
| 238 |
-
RING_TEXTURE_WEIGHT:
|
| 239 |
-
value: 1.0 # Weight for texture consistency term in saturation ring loss
|
| 240 |
-
HLREG_W_L1:
|
| 241 |
-
value: 1.0 # Weight for L1 loss in highlight regression
|
| 242 |
-
HLREG_USE_CHARB:
|
| 243 |
-
value: True # Use Charbonnier loss (smooth L1) instead of standard L1 if True
|
| 244 |
-
HLREG_W_DICE:
|
| 245 |
-
value: 0.2 # Weight for Dice loss in highlight regression (for mask overlap)
|
| 246 |
-
HLREG_W_SSIM:
|
| 247 |
-
value: 0.0 # Weight for SSIM loss in highlight regression
|
| 248 |
-
HLREG_W_GRAD:
|
| 249 |
-
value: 0.0 # Weight for gradient loss in highlight regression
|
| 250 |
-
HLREG_W_TV:
|
| 251 |
-
value: 0.0 # Weight for total variation loss in highlight regression
|
| 252 |
-
HLREG_BALANCE_MODE:
|
| 253 |
-
value: "auto" # Class balancing mode for highlight regression: 'none' | 'auto' | 'pos_weight'
|
| 254 |
-
HLREG_POS_WEIGHT:
|
| 255 |
-
value: 1.0 # Positive class weight (used only if BALANCE_MODE == 'pos_weight')
|
| 256 |
-
HLREG_FOCAL_GAMMA:
|
| 257 |
-
value: 2.0 # Focal loss gamma parameter (0.0 = standard BCE, 1.0-2.0 helps with gradient vanishing)
|
| 258 |
-
|
| 259 |
-
WEIGHT_TOKEN_INPAINT:
|
| 260 |
-
value: 1.0 # Weight for token-space inpainting loss (L1 + cosine similarity in feature space)
|
| 261 |
-
WEIGHT_CONTEXT_IDENTITY:
|
| 262 |
-
value: 0.0 # LEAVE TO 0.0: Weight for L1 loss on context (non-masked) regions (identity preservation)
|
| 263 |
-
WEIGHT_TV_IN_HOLE:
|
| 264 |
-
value: 0.0 # LEAVE TO 0.0: Weight for total variation loss inside masked/hole regions
|
| 265 |
-
RING_DILATE_KERNEL:
|
| 266 |
-
value: 17 # Dilation kernel size (odd number) for creating ring mask around highlights
|
| 267 |
-
WEIGHT_SEAM:
|
| 268 |
-
value: 0.5 # Weight for gradient matching loss on saturation ring
|
| 269 |
-
SEAM_USE_CHARB:
|
| 270 |
-
value: True # Use Charbonnier loss instead of L1 in seam loss (smooth L1 for boundary consistency)
|
| 271 |
-
SEAM_WEIGHT_GRAD:
|
| 272 |
-
value: 0.0 # Weight for gradient matching term inside seam loss (0.0 = disable gradient term)
|
| 273 |
-
TOKEN_FEAT_ALPHA:
|
| 274 |
-
value: 0.5 # Mixing factor for token feature loss: alpha * L1 + (1-alpha) * (1-cosine_sim)
|
| 275 |
-
|
| 276 |
-
### DIFFUSE HIGHLIGHT PENALTY
|
| 277 |
-
WEIGHT_DIFFUSE_HIGHLIGHT_PENALTY:
|
| 278 |
-
value: 0.1 # Weight for penalty loss on highlights in diffuse decoder output (0.0 = disabled)
|
| 279 |
-
DIFFUSE_HL_THRESHOLD:
|
| 280 |
-
value: 0.8 # Brightness/luminance threshold for detecting highlights in diffuse (0.0-1.0)
|
| 281 |
-
DIFFUSE_HL_USE_CHARB:
|
| 282 |
-
value: True # Use Charbonnier loss instead of L1 for diffuse highlight penalty
|
| 283 |
-
DIFFUSE_HL_PENALTY_MODE:
|
| 284 |
-
value: "brightness" # Penalty mode: "brightness" (penalize brightness/luminance above threshold) or "pixel" (penalize RGB values directly)
|
| 285 |
-
DIFFUSE_HL_TARGET_BRIGHTNESS:
|
| 286 |
-
value: null # Target brightness/luminance for penalized pixels (null = use threshold value)
|
| 287 |
-
DIFFUSE_HL_USE_LUMINANCE:
|
| 288 |
-
value: False # If True, use perceptually-weighted luminance (0.299*R + 0.587*G + 0.114*B); if False, use simple mean brightness
|
| 289 |
-
|
| 290 |
-
### LOGGING, RESULTS AND WANDB
|
| 291 |
-
LOG_INTERVAL:
|
| 292 |
-
value: 1 # Number of training steps between console log outputs
|
| 293 |
-
WANDB_LOG_INTERVAL:
|
| 294 |
-
value: 1 # Number of training steps between WandB metric logs
|
| 295 |
-
IMAGE_LOG_INTERVAL:
|
| 296 |
-
value: 5 # Number of training steps between image logging to WandB
|
| 297 |
-
NO_WANDB:
|
| 298 |
-
value: False # Disable WandB logging if True (useful for local debugging)
|
| 299 |
-
MODEL_WATCHER_FREQ_WANDB:
|
| 300 |
-
value: 50 # Frequency (in steps) for logging model parameter histograms to WandB
|
| 301 |
-
WANDB_ENTITY:
|
| 302 |
-
value: "unreflect-anything" # WandB organization/entity name
|
| 303 |
-
WANDB_PROJECT:
|
| 304 |
-
value: "UnReflectAnything" # WandB project name
|
| 305 |
-
NOTES:
|
| 306 |
-
value: "Final - TKI Learns, Decoder Learns faster" # Notes/description for this training run
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|