Spaces:
Runtime error
Runtime error
clip-path
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ image_model_config_dict = OrderedDict({
|
|
34 |
"ASLDM-256-obj": {
|
35 |
# "config": "./configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
|
36 |
# "ckpt_path": "./checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
|
37 |
-
"config": "
|
38 |
"ckpt_path": "checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
|
39 |
},
|
40 |
})
|
@@ -115,15 +115,17 @@ def load_model(model_name: str, model_config_dict: dict, inference_model: Infere
|
|
115 |
|
116 |
config_ckpt_path = model_config_dict[model_name]
|
117 |
|
118 |
-
raw_config_file = config_ckpt_path["config"]
|
119 |
-
raw_config = OmegaConf.load(raw_config_file)
|
120 |
-
raw_clip_ckpt_path = raw_config['model']['params']['first_stage_config']['params']['aligned_module_cfg']['params']['clip_model_version']
|
121 |
-
|
122 |
-
|
123 |
-
raw_config['model']['params']['
|
124 |
-
|
125 |
-
|
126 |
-
|
|
|
|
|
127 |
if hasattr(model_config, "model"):
|
128 |
model_config = model_config.model
|
129 |
|
|
|
34 |
"ASLDM-256-obj": {
|
35 |
# "config": "./configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
|
36 |
# "ckpt_path": "./checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
|
37 |
+
"config": "./configs/image_cond_diffuser_asl/image-ASLDM-256.yaml",
|
38 |
"ckpt_path": "checkpoints/image_cond_diffuser_asl/image-ASLDM-256.ckpt",
|
39 |
},
|
40 |
})
|
|
|
115 |
|
116 |
config_ckpt_path = model_config_dict[model_name]
|
117 |
|
118 |
+
# raw_config_file = config_ckpt_path["config"]
|
119 |
+
# raw_config = OmegaConf.load(raw_config_file)
|
120 |
+
# raw_clip_ckpt_path = raw_config['model']['params']['first_stage_config']['params']['aligned_module_cfg']['params']['clip_model_version']
|
121 |
+
|
122 |
+
# clip_ckpt_path = os.path.join(model_path, raw_clip_ckpt_path)
|
123 |
+
# raw_config['model']['params']['first_stage_config']['params']['aligned_module_cfg']['params']['clip_model_version'] = clip_ckpt_path
|
124 |
+
# raw_config['model']['params']['cond_stage_config']['params']['version'] = clip_ckpt_path
|
125 |
+
# OmegaConf.save(raw_config, 'current_config.yaml')
|
126 |
+
|
127 |
+
# model_config = get_config_from_file('current_config.yaml')
|
128 |
+
model_config = get_config_from_file(config_ckpt_path["config"])
|
129 |
if hasattr(model_config, "model"):
|
130 |
model_config = model_config.model
|
131 |
|
configs/image_cond_diffuser_asl/image-ASLDM-256.yaml
CHANGED
@@ -24,7 +24,7 @@ model:
|
|
24 |
target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
|
25 |
params:
|
26 |
# clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"
|
27 |
-
clip_model_version: "
|
28 |
|
29 |
loss_cfg:
|
30 |
target: torch.nn.Identity
|
@@ -33,7 +33,7 @@ model:
|
|
33 |
target: michelangelo.models.conditional_encoders.encoder_factory.FrozenCLIPImageGridEmbedder
|
34 |
params:
|
35 |
# version: "./checkpoints/clip/clip-vit-large-patch14"
|
36 |
-
version: "
|
37 |
zero_embedding_radio: 0.1
|
38 |
|
39 |
first_stage_key: "surface"
|
|
|
24 |
target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
|
25 |
params:
|
26 |
# clip_model_version: "./checkpoints/clip/clip-vit-large-patch14"
|
27 |
+
clip_model_version: "openai/clip-vit-large-patch14"
|
28 |
|
29 |
loss_cfg:
|
30 |
target: torch.nn.Identity
|
|
|
33 |
target: michelangelo.models.conditional_encoders.encoder_factory.FrozenCLIPImageGridEmbedder
|
34 |
params:
|
35 |
# version: "./checkpoints/clip/clip-vit-large-patch14"
|
36 |
+
version: "openai/clip-vit-large-patch14"
|
37 |
zero_embedding_radio: 0.1
|
38 |
|
39 |
first_stage_key: "surface"
|
configs/text_cond_diffuser_asl/text-ASLDM-256.yaml
CHANGED
@@ -23,7 +23,7 @@ model:
|
|
23 |
aligned_module_cfg:
|
24 |
target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
|
25 |
params:
|
26 |
-
clip_model_version: "
|
27 |
|
28 |
loss_cfg:
|
29 |
target: torch.nn.Identity
|
@@ -31,7 +31,7 @@ model:
|
|
31 |
cond_stage_config:
|
32 |
target: michelangelo.models.conditional_encoders.encoder_factory.FrozenAlignedCLIPTextEmbedder
|
33 |
params:
|
34 |
-
version: "
|
35 |
zero_embedding_radio: 0.1
|
36 |
max_length: 77
|
37 |
|
|
|
23 |
aligned_module_cfg:
|
24 |
target: michelangelo.models.tsal.clip_asl_module.CLIPAlignedShapeAsLatentModule
|
25 |
params:
|
26 |
+
clip_model_version: "openai/clip-vit-large-patch14"
|
27 |
|
28 |
loss_cfg:
|
29 |
target: torch.nn.Identity
|
|
|
31 |
cond_stage_config:
|
32 |
target: michelangelo.models.conditional_encoders.encoder_factory.FrozenAlignedCLIPTextEmbedder
|
33 |
params:
|
34 |
+
version: "openai/clip-vit-large-patch14"
|
35 |
zero_embedding_radio: 0.1
|
36 |
max_length: 77
|
37 |
|