Spaces:
Running
on
Zero
Running
on
Zero
parokshsaxena
commited on
Commit
β’
91c7a78
1
Parent(s):
4209785
commenting out code for enhanced garment net generated from claude as it was failing the flow
Browse files- app.py +5 -4
- src/tryon_pipeline.py +2 -2
app.py
CHANGED
@@ -53,8 +53,9 @@ unet = UNet2DConditionModel.from_pretrained(
|
|
53 |
)
|
54 |
unet.requires_grad_(False)
|
55 |
|
56 |
-
|
57 |
-
enhancedGarmentNet
|
|
|
58 |
|
59 |
tokenizer_one = AutoTokenizer.from_pretrained(
|
60 |
base_path,
|
@@ -128,7 +129,7 @@ pipe = TryonPipeline.from_pretrained(
|
|
128 |
torch_dtype=torch.float16,
|
129 |
)
|
130 |
pipe.unet_encoder = UNet_Encoder
|
131 |
-
pipe.garment_net = enhancedGarmentNet
|
132 |
|
133 |
# Standard size of shein images
|
134 |
#WIDTH = int(4160/5)
|
@@ -159,7 +160,7 @@ def start_tryon(human_img_dict,garm_img,garment_des, background_img, is_checked,
|
|
159 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
160 |
pipe.to(device)
|
161 |
pipe.unet_encoder.to(device)
|
162 |
-
pipe.garment_net.to(device)
|
163 |
|
164 |
human_img_orig = human_img_dict["background"].convert("RGB") # ImageEditor
|
165 |
#human_img_orig = human_img_dict.convert("RGB") # Image
|
|
|
53 |
)
|
54 |
unet.requires_grad_(False)
|
55 |
|
56 |
+
# This is suggestion from Claude for enhanced garment net
|
57 |
+
#enhancedGarmentNet = EnhancedGarmentNetWithTimestep()
|
58 |
+
#enhancedGarmentNet.to(dtype=torch.float16)
|
59 |
|
60 |
tokenizer_one = AutoTokenizer.from_pretrained(
|
61 |
base_path,
|
|
|
129 |
torch_dtype=torch.float16,
|
130 |
)
|
131 |
pipe.unet_encoder = UNet_Encoder
|
132 |
+
# pipe.garment_net = enhancedGarmentNet
|
133 |
|
134 |
# Standard size of shein images
|
135 |
#WIDTH = int(4160/5)
|
|
|
160 |
openpose_model.preprocessor.body_estimation.model.to(device)
|
161 |
pipe.to(device)
|
162 |
pipe.unet_encoder.to(device)
|
163 |
+
# pipe.garment_net.to(device)
|
164 |
|
165 |
human_img_orig = human_img_dict["background"].convert("RGB") # ImageEditor
|
166 |
#human_img_orig = human_img_dict.convert("RGB") # Image
|
src/tryon_pipeline.py
CHANGED
@@ -1789,8 +1789,8 @@ class StableDiffusionXLInpaintPipeline(
|
|
1789 |
added_cond_kwargs["image_embeds"] = image_embeds
|
1790 |
print("Calling unet encoder for garment feature extraction")
|
1791 |
# down,reference_features = self.UNet_Encoder(cloth,t, text_embeds_cloth,added_cond_kwargs= {"text_embeds": pooled_prompt_embeds_c, "time_ids": add_time_ids},return_dict=False)
|
1792 |
-
|
1793 |
-
garment_out, reference_features = self.garment_net(cloth, t, text_embeds_cloth)
|
1794 |
print(type(reference_features))
|
1795 |
print(reference_features)
|
1796 |
reference_features = list(reference_features)
|
|
|
1789 |
added_cond_kwargs["image_embeds"] = image_embeds
|
1790 |
print("Calling unet encoder for garment feature extraction")
|
1791 |
# down,reference_features = self.UNet_Encoder(cloth,t, text_embeds_cloth,added_cond_kwargs= {"text_embeds": pooled_prompt_embeds_c, "time_ids": add_time_ids},return_dict=False)
|
1792 |
+
down,reference_features = self.unet_encoder(cloth,t, text_embeds_cloth,return_dict=False)
|
1793 |
+
# garment_out, reference_features = self.garment_net(cloth, t, text_embeds_cloth)
|
1794 |
print(type(reference_features))
|
1795 |
print(reference_features)
|
1796 |
reference_features = list(reference_features)
|