Spaces:
Paused
Paused
Update run/gradio_ootd.py
Browse files- run/gradio_ootd.py +14 -9
run/gradio_ootd.py
CHANGED
@@ -1,5 +1,4 @@
|
|
1 |
from flask import Flask, request, jsonify
|
2 |
-
import os
|
3 |
import torch
|
4 |
from PIL import Image, ImageOps
|
5 |
|
@@ -11,6 +10,7 @@ from ootd.inference_ootd_dc import OOTDiffusionDC
|
|
11 |
|
12 |
app = Flask(__name__)
|
13 |
|
|
|
14 |
openpose_model_hd = OpenPose(0)
|
15 |
parsing_model_hd = Parsing(0)
|
16 |
ootd_model_hd = OOTDiffusionHD(0)
|
@@ -19,6 +19,9 @@ openpose_model_dc = OpenPose(1)
|
|
19 |
parsing_model_dc = Parsing(1)
|
20 |
ootd_model_dc = OOTDiffusionDC(1)
|
21 |
|
|
|
|
|
|
|
22 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
23 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
24 |
|
@@ -35,11 +38,12 @@ def process_hd():
|
|
35 |
model_type = 'hd'
|
36 |
category = 0 # 0:upperbody; 1:lowerbody; 2:dress
|
37 |
|
|
|
38 |
with torch.no_grad():
|
39 |
-
openpose_model_hd.preprocessor.body_estimation.model.to(
|
40 |
-
ootd_model_hd.pipe.to(
|
41 |
-
ootd_model_hd.image_encoder.to(
|
42 |
-
ootd_model_hd.text_encoder.to(
|
43 |
|
44 |
garm_img = Image.open(garm_img).resize((768, 1024))
|
45 |
vton_img = Image.open(vton_img).resize((768, 1024))
|
@@ -86,11 +90,12 @@ def process_dc():
|
|
86 |
else:
|
87 |
category = 2
|
88 |
|
|
|
89 |
with torch.no_grad():
|
90 |
-
openpose_model_dc.preprocessor.body_estimation.model.to(
|
91 |
-
ootd_model_dc.pipe.to(
|
92 |
-
ootd_model_dc.image_encoder.to(
|
93 |
-
ootd_model_dc.text_encoder.to(
|
94 |
|
95 |
garm_img = Image.open(garm_img).resize((768, 1024))
|
96 |
vton_img = Image.open(vton_img).resize((768, 1024))
|
|
|
1 |
from flask import Flask, request, jsonify
|
|
|
2 |
import torch
|
3 |
from PIL import Image, ImageOps
|
4 |
|
|
|
10 |
|
11 |
app = Flask(__name__)
|
12 |
|
13 |
+
# Charger les modèles une seule fois au démarrage de l'application
|
14 |
openpose_model_hd = OpenPose(0)
|
15 |
parsing_model_hd = Parsing(0)
|
16 |
ootd_model_hd = OOTDiffusionHD(0)
|
|
|
19 |
parsing_model_dc = Parsing(1)
|
20 |
ootd_model_dc = OOTDiffusionDC(1)
|
21 |
|
22 |
+
# Définir la configuration GPU
|
23 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
24 |
+
|
25 |
category_dict = ['upperbody', 'lowerbody', 'dress']
|
26 |
category_dict_utils = ['upper_body', 'lower_body', 'dresses']
|
27 |
|
|
|
38 |
model_type = 'hd'
|
39 |
category = 0 # 0:upperbody; 1:lowerbody; 2:dress
|
40 |
|
41 |
+
# Charger les modèles en mémoire GPU
|
42 |
with torch.no_grad():
|
43 |
+
openpose_model_hd.preprocessor.body_estimation.model.to(device)
|
44 |
+
ootd_model_hd.pipe.to(device)
|
45 |
+
ootd_model_hd.image_encoder.to(device)
|
46 |
+
ootd_model_hd.text_encoder.to(device)
|
47 |
|
48 |
garm_img = Image.open(garm_img).resize((768, 1024))
|
49 |
vton_img = Image.open(vton_img).resize((768, 1024))
|
|
|
90 |
else:
|
91 |
category = 2
|
92 |
|
93 |
+
# Charger les modèles en mémoire GPU
|
94 |
with torch.no_grad():
|
95 |
+
openpose_model_dc.preprocessor.body_estimation.model.to(device)
|
96 |
+
ootd_model_dc.pipe.to(device)
|
97 |
+
ootd_model_dc.image_encoder.to(device)
|
98 |
+
ootd_model_dc.text_encoder.to(device)
|
99 |
|
100 |
garm_img = Image.open(garm_img).resize((768, 1024))
|
101 |
vton_img = Image.open(vton_img).resize((768, 1024))
|