baseline / config.json
osv5m's picture
Update config.json
4b861af verified
{"model": {"_target_": "models.networks.network.ContrastiveHybridUnFrozenBackbone", "mode": "eval", "backbone": {"instance": {"_target_": "models.networks.backbones.CLIP", "path": "laion/CLIP-ViT-L-14-DataComp.XL-s13B-b90K"}, "output_dim": 1024}, "mid": {"activation": {"_target_": "torch.nn.GELU", "_partial_": true}, "norm": {"_target_": "torch.nn.GroupNorm", "_partial_": true}, "instance": {"_target_": "models.networks.mlp.MLPCentroid", "initial_dim": 1024, "hidden_dim": [1024, 512], "final_dim": 34197, "norm": {"_target_": "torch.nn.GroupNorm", "_partial_": true}, "activation": {"_target_": "torch.nn.GELU", "_partial_": true}}}, "head": {"target_key": "label", "final_dim": 34197, "instance": {"_target_": "models.networks.heads.hybrid.HybridHeadCentroid", "final_dim": 11399, "use_tanh": true, "scale_tanh": 1.2, "quadtree_path": "utils/quadtree_10_1000.csv"}}}, "transform": {"_target_": "torchvision.transforms.Compose", "transforms": [{"_target_": "torchvision.transforms.Resize", "size": 224, "interpolation": 3, "antialias": true}, {"_target_": "torchvision.transforms.CenterCrop", "size": 224}, {"_target_": "torchvision.transforms.ToTensor"}, {"_target_": "torchvision.transforms.Normalize", "mean": [0.48145466, 0.4578275, 0.40821073], "std": [0.26862954, 0.26130258, 0.27577711]}]}}