sidharthism commited on
Commit
e38903d
1 Parent(s): a05ef67

Updates - app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -158
app.py CHANGED
@@ -7,23 +7,10 @@ Automatically generated by Colaboratory.
7
  # !rm -rf sample_data
8
  # !rm -rf fashion-eye-try-on/
9
 
10
- import sys
11
- from threading import Thread
12
- import gradio as gr
13
- import torch
14
- from collections import OrderedDict
15
- from PIL import Image
16
- import torch.nn.functional as F
17
- import torchvision.transforms as transforms
18
- from cloth_segmentation.networks import U2NET
19
- import gdown
20
- from os.path import exists, join, basename, splitext
21
- import subprocess
22
- import os
23
  BASE_DIR = "/home/user/app/fashion-eye-try-on"
24
 
25
- os.system(
26
- f"git clone https://huggingface.co/spaces/sidharthism/fashion-eye-try-on {BASE_DIR}")
27
 
28
  # !pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
29
  # !pip install -r /content/fashion-eye-try-on/requirements.txt
@@ -34,31 +21,22 @@ os.system(f"cd {BASE_DIR}")
34
 
35
  # Download and save checkpoints for cloth mask generation
36
  os.system(f"rm -rf {BASE_DIR}/cloth_segmentation/checkpoints/")
37
- os.system(
38
- f"gdown --id 1mhF3yqd7R-Uje092eypktNl-RoZNuiCJ -O {BASE_DIR}/cloth_segmentation/checkpoints/")
39
-
40
- os.system(
41
- f"git clone https://github.com/shadow2496/VITON-HD {BASE_DIR}/VITON-HD")
42
-
43
- # checkpoints
44
- os.system(
45
- f"gdown 1RM4OthSM6V4r7kWCu8SbPIPY14Oz8B2u -O {BASE_DIR}/VITON-HD/checkpoints/alias_final.pth")
46
- os.system(
47
- f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth")
48
- os.system(
49
- f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth")
50
- os.system(
51
- f"gdown 17U1sooR3mVIbe8a7rZuFIF3kukPchHfZ -O {BASE_DIR}/VITON-HD/checkpoints/seg_final.pth")
52
- # test data
53
- os.system(
54
- f"gdown 1ncEHn_6liOot8sgt3A2DOFJBffvx8tW8 -O {BASE_DIR}/VITON-HD/datasets/test_pairs.txt")
55
- os.system(
56
- f"gdown 1ZA2C8yMOprwc0TV4hvrt0X-ljZugrClq -O {BASE_DIR}/VITON-HD/datasets/test.zip")
57
-
58
- os.system(
59
- f"unzip {BASE_DIR}/VITON-HD/datasets/test.zip -d {BASE_DIR}/VITON-HD/datasets/")
60
-
61
- # @title To clear all the already existing test data
62
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image
63
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse
64
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/cloth
@@ -66,10 +44,13 @@ os.system(
66
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-img
67
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-json
68
 
69
- """Paddle"""
70
 
71
- os.system(
72
- f"git clone https://huggingface.co/spaces/sidharthism/pipeline_paddle {BASE_DIR}/pipeline_paddle")
 
 
 
73
 
74
  # Required for paddle and gradio (Jinja2 dependency)
75
  os.system("pip install paddlepaddle-gpu pymatting")
@@ -77,11 +58,10 @@ os.system(f"pip install -r {BASE_DIR}/pipeline_paddle/requirements.txt")
77
 
78
  os.system(f"rm -rf {BASE_DIR}/pipeline_paddle/models")
79
  if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams"):
80
- if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models"):
81
- os.mkdir(f"{BASE_DIR}/pipeline_paddle/models")
82
- os.system(
83
- f"wget https://paddleseg.bj.bcebos.com/matting/models/ppmatting-hrnet_w18-human_1024.pdparams -O {BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams")
84
- # !wget "https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz" -O "/content/fashion-eye-try-on/pipeline_paddle/models/hrnet_w18_ssld.tar.gz"
85
 
86
  """Initialization
87
 
@@ -91,44 +71,44 @@ Pose estimator - open pose
91
  # Clone openpose model repo
92
  # os.system(f"git clone https://github.com/CMU-Perceptual-Computing-Lab/openpose.git {BASE_DIR}/openpose")
93
 
 
 
94
 
95
- # @ Building and Installation of openpose model
 
 
 
96
 
97
 
98
  project_name = f"{BASE_DIR}/openpose"
99
  print(project_name)
100
  if not exists(project_name):
101
- # see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949
102
- # install new CMake becaue of CUDA10
103
- os.system(
104
- f"wget -q https://cmake.org/files/v3.13/cmake-3.13.0-Linux-x86_64.tar.gz")
105
- os.system(
106
- f"tar xfz cmake-3.13.0-Linux-x86_64.tar.gz --strip-components=1 -C /usr/local")
107
- # clone openpose
108
- os.system(
109
- f"cd {BASE_DIR} && git clone -q --depth 1 https://github.com/CMU-Perceptual-Computing-Lab/openpose.git")
110
- os.system(
111
- "sed -i 's/execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/execute_process(COMMAND git checkout f019d0dfe86f49d1140961f8c7dec22130c83154 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/g' %s/openpose/CMakeLists.txt" % (BASE_DIR, ))
112
- # install system dependencies
113
- os.system("apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev")
114
- # build openpose
115
- print("Building openpose ... May take nearly 15 mins to build ...")
116
- os.system(f"cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`")
117
- print("Openpose successfully build and installed.")
118
- # subprocess.Popen(f"cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`")
119
- # subprocess.call(["cd", f"{BASE_DIR}/openpose"])
120
- # subprocess.check_output(["rm", "-rf", f"{BASE_DIR}/openpose/build || true"])
121
- # subprocess.check_output(["mkdir", f"{BASE_DIR}/openpose/build"])
122
- # subprocess.check_output(["cd", f"{BASE_DIR}/openpose/build"])
123
- # subprocess.check_output(["cmake", ".."])
124
- # subprocess.check_output(["make","-j`nproc`"])
125
 
126
  # !cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`
127
 
128
  """Self correction human parsing"""
129
 
130
- os.system(
131
- f"git clone https://github.com/PeikeLi/Self-Correction-Human-Parsing.git {BASE_DIR}/human_parse")
132
 
133
  os.system(f"cd {BASE_DIR}/human_parse")
134
  os.system(f"mkdir {BASE_DIR}/human_parse/checkpoints")
@@ -137,6 +117,7 @@ os.system(f"mkdir {BASE_DIR}/human_parse/checkpoints")
137
 
138
  dataset = 'lip'
139
 
 
140
 
141
  dataset_url = 'https://drive.google.com/uc?id=1k4dllHpu0bdx38J7H28rVVLpU-kOHmnH'
142
  output = f'{BASE_DIR}/human_parse/checkpoints/final.pth'
@@ -145,19 +126,19 @@ gdown.download(dataset_url, output, quiet=False)
145
  # For human parse
146
  os.system("pip install ninja")
147
 
148
- """Preprocessing"""
149
 
150
- # png to jpg
151
 
 
152
 
 
153
  def convert_to_jpg(path):
154
  from PIL import Image
155
  import os
156
  if os.path.exists(path):
157
- cl = Image.open(path)
158
- jpg_path = path[:-4] + ".jpg"
159
- cl.save(jpg_path)
160
-
161
 
162
  def resize_img(path):
163
  from PIL import Image
@@ -166,19 +147,13 @@ def resize_img(path):
166
  im = im.resize((768, 1024), Image.BICUBIC)
167
  im.save(path)
168
 
169
-
170
  def remove_ipynb_checkpoints():
171
  import os
172
- os.system(
173
- f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/image/.ipynb_checkpoints")
174
- os.system(
175
- f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth/.ipynb_checkpoints")
176
- os.system(
177
- f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth-mask/.ipynb_checkpoints")
178
 
179
  # os.chdir('/content/fashion-eye-try-on')
180
-
181
-
182
  def preprocess():
183
  remove_ipynb_checkpoints()
184
  for path in os.listdir(f'{BASE_DIR}/VITON-HD/datasets/test/image/'):
@@ -188,13 +163,12 @@ def preprocess():
188
  # for path in os.listdir('/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/'):
189
  # resize_img(f'/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/{path}')
190
 
 
191
 
192
- """Paddle - removing background"""
193
 
194
  # PPMatting hrnet 1024
195
  # --fg_estimate True - for higher quality output but slower prediction
196
-
197
-
198
  def upload_remove_background_and_save_person_image(person_img):
199
  # !export CUDA_VISIBLE_DEVICES=0
200
  person_img = person_img.resize((768, 1024), Image.BICUBIC)
@@ -210,7 +184,7 @@ def upload_remove_background_and_save_person_image(person_img):
210
  --background 'w' \
211
  --save_dir {BASE_DIR}/VITON-HD/datasets/test/image \
212
  --fg_estimate True")
213
- # --save_dir /content/fashion-eye-try-on/pipeline_paddle/output \
214
  try:
215
  convert_to_jpg(f"{BASE_DIR}/VITON-HD/datasets/test/image/person.png")
216
  # os.remove("/content/fashion-eye-try-on/pipeline_paddle/output/person_alpha.png")
@@ -222,14 +196,11 @@ def upload_remove_background_and_save_person_image(person_img):
222
  print(e)
223
  os.system(f"cd {BASE_DIR}")
224
 
225
-
226
- # @title If multiple GPU available,uncomment and try this code
227
  os.system("export CUDA_VISIBLE_DEVICES=0")
228
 
229
  # Openpose pose estimation
230
  # Ubuntu and Mac
231
-
232
-
233
  def estimate_pose():
234
  os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_json {BASE_DIR}/VITON-HD/datasets/test/openpose-json/ --display 0 --face --hand --render_pose 0")
235
  os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_images {BASE_DIR}/VITON-HD/datasets/test/openpose-img/ --display 0 --hand --render_pose 1 --disable_blending true")
@@ -238,8 +209,6 @@ def estimate_pose():
238
 
239
  # Run self correction human parser
240
  # !python3 /content/fashion-eye-try-on/human_parse/simple_extractor.py --dataset 'lip' --model-restore '/content/fashion-eye-try-on/human_parse/checkpoints/final.pth' --input-dir '/content/fashion-eye-try-on/image' --output-dir '/content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse'
241
-
242
-
243
  def generate_human_segmentation_map():
244
  # remove_ipynb_checkpoints()
245
  os.system(f"python3 {BASE_DIR}/human_parse/simple_extractor.py --dataset 'lip' --model-restore '{BASE_DIR}/human_parse/checkpoints/final.pth' --input-dir '{BASE_DIR}/VITON-HD/datasets/test/image' --output-dir '{BASE_DIR}/VITON-HD/datasets/test/image-parse'")
@@ -251,25 +220,29 @@ def generate_human_segmentation_map():
251
  # with open('/content/fashion-eye-try-on/VITON-HD/datasets/test_pairs.txt', 'w') as file:
252
  # for model, cloth in pairs:
253
  # file.write(f"{model} {cloth}\n")
254
-
255
-
256
  def generate_test_pairs_txt():
257
  with open(f"{BASE_DIR}/VITON-HD/datasets/test_pairs.txt", 'w') as file:
258
  file.write(f"person.jpg cloth.jpg\n")
259
 
260
  # VITON-HD
261
  # Transfer the cloth to the model
262
-
263
-
264
  def generate_viton_hd():
265
  os.system(f"python {BASE_DIR}/VITON-HD/test.py --name output --dataset_list {BASE_DIR}/VITON-HD/datasets/test_pairs.txt --dataset_dir {BASE_DIR}/VITON-HD/datasets/ --checkpoint_dir {BASE_DIR}/VITON-HD/checkpoints --save_dir {BASE_DIR}/")
266
 
267
-
268
  # To resolve ModuleNotFoundError during imports
269
  if BASE_DIR not in sys.path:
270
  sys.path.append(BASE_DIR)
271
  sys.path.append(f"{BASE_DIR}/cloth_segmentation")
272
 
 
 
 
 
 
 
 
 
273
 
274
  device = 'cuda' if torch.cuda.is_available() else "cpu"
275
 
@@ -405,78 +378,73 @@ def generate_cloth_mask(img_dir, output_dir, chkpt_dir):
405
  output_img = output_img.convert('L')
406
  output_img.save(os.path.join(result_dir, image_name[:-4]+'.jpg'))
407
 
408
-
409
  os.system(f"cd {BASE_DIR}")
410
-
411
-
412
  def upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(cloth_img):
413
  os.system(f"cd {BASE_DIR}")
414
  cloth_img = cloth_img.resize((768, 1024), Image.BICUBIC)
415
  cloth_img.save(f"{BASE_DIR}/cloth/cloth.jpg")
416
  cloth_img.save(f"{BASE_DIR}/VITON-HD/datasets/test/cloth/cloth.jpg")
417
  try:
418
- generate_cloth_mask(f"{BASE_DIR}/cloth", f"{BASE_DIR}/cloth_mask",
419
- f"{BASE_DIR}/cloth_segmentation/checkpoints/cloth_segm_u2net_latest.pth")
420
  cloth_mask_img = Image.open(f"{BASE_DIR}/cloth_mask/cloth.jpg")
421
- cloth_mask_img.save(
422
- f"{BASE_DIR}/VITON-HD/datasets/test/cloth-mask/cloth.jpg")
423
  except Exception as e:
424
- print(e)
425
-
426
 
427
  # Gradio
428
  os.system("pip install gradio")
429
 
 
430
  # import cv2
431
- IMAGEPATH = '/content/fashion-eye-try-on/VITON-HD/datasets/test/image'
432
- CLOTHPATH = '/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth'
433
- CLOTHMASKPATH = '/content/fashion-eye-try-on/VITON-HD/datasets/test/image'
 
434
 
 
435
 
436
  def fashion_eye_tryon(person_img, cloth_img):
437
- result_img = person_img
438
- # img.save(IMAGEPATH + "person.jpg")
439
- # dress.save(CLOTHPATH + "cloth.jpg")
440
-
441
- # txt = open("/content/VITON-HD/datasets/test_pairs.txt", "a")
442
- # txt.write("person_img.jpg dress_img.jpg\n")
443
- # txt.close()
444
- # # result
445
- # print(person_img.info, cloth_img.info)
446
- # p_t1 = Thread(target=upload_remove_background_and_save_person_image, args=(person_img, ))
447
- # c_t2 = Thread(target=upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs, args=(cloth_img, ))
448
- # p_t1.start()
449
- # c_t2.start()
450
- # p_t1.join()
451
- # c_t2.join()
452
- # Estimate pose
453
- try:
454
- upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(
455
- cloth_img)
456
- upload_remove_background_and_save_person_image(person_img)
457
- remove_ipynb_checkpoints()
458
- estimate_pose()
459
- # Generate human parse
460
- remove_ipynb_checkpoints()
461
- generate_human_segmentation_map()
462
- generate_test_pairs_txt()
463
- remove_ipynb_checkpoints()
464
- generate_viton_hd()
465
- for p in ["/content/fashion-eye-try-on/output/person_cloth.jpg", "/content/fashion-eye-try-on/output/person.jpg_cloth.jpg"]:
466
- if os.path.exists(p):
467
- result_img = Image.open(p)
468
- except Exception as e:
469
- print(e)
470
- return
471
- return result_img
472
-
473
 
474
  # res = fashion_eye_tryon("", "")
475
  # res.show()
476
- gr.Interface(fn=fashion_eye_tryon,
477
- inputs=[gr.Image(type="pil", label="Your image"),
478
- gr.Image(type="pil", label="Dress")],
479
  outputs="image"
480
- ).launch(debug=True, inbrowser=True, share=True)
481
 
482
- # !pip freeze > /content/requirements_final.txt
 
7
  # !rm -rf sample_data
8
  # !rm -rf fashion-eye-try-on/
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  BASE_DIR = "/home/user/app/fashion-eye-try-on"
11
 
12
+ import os
13
+ os.system(f"git clone https://huggingface.co/spaces/sidharthism/fashion-eye-try-on {BASE_DIR}")
14
 
15
  # !pip install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
16
  # !pip install -r /content/fashion-eye-try-on/requirements.txt
 
21
 
22
  # Download and save checkpoints for cloth mask generation
23
  os.system(f"rm -rf {BASE_DIR}/cloth_segmentation/checkpoints/")
24
+ os.system(f"gdown --id 1mhF3yqd7R-Uje092eypktNl-RoZNuiCJ -O {BASE_DIR}/cloth_segmentation/checkpoints/")
25
+
26
+ os.system(f"git clone https://github.com/shadow2496/VITON-HD {BASE_DIR}/VITON-HD")
27
+
28
+ #checkpoints
29
+ os.system(f"gdown 1RM4OthSM6V4r7kWCu8SbPIPY14Oz8B2u -O {BASE_DIR}/VITON-HD/checkpoints/alias_final.pth")
30
+ os.system(f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth")
31
+ os.system(f"gdown 1MBHBddaAs7sy8W40jzLmNL83AUh035F1 -O {BASE_DIR}/VITON-HD/checkpoints/gmm_final.pth")
32
+ os.system(f"gdown 17U1sooR3mVIbe8a7rZuFIF3kukPchHfZ -O {BASE_DIR}/VITON-HD/checkpoints/seg_final.pth")
33
+ #test data
34
+ os.system(f"gdown 1ncEHn_6liOot8sgt3A2DOFJBffvx8tW8 -O {BASE_DIR}/VITON-HD/datasets/test_pairs.txt")
35
+ os.system(f"gdown 1ZA2C8yMOprwc0TV4hvrt0X-ljZugrClq -O {BASE_DIR}/VITON-HD/datasets/test.zip")
36
+
37
+ os.system(f"unzip {BASE_DIR}/VITON-HD/datasets/test.zip -d {BASE_DIR}/VITON-HD/datasets/")
38
+
39
+ #@title To clear all the already existing test data
 
 
 
 
 
 
 
 
 
40
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image
41
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse
42
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/cloth
 
44
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-img
45
  # !rm -rf /content/fashion-eye-try-on/VITON-HD/datasets/test/openpose-json
46
 
47
+ """Paddle
48
 
49
+
50
+
51
+ """
52
+
53
+ os.system(f"git clone https://huggingface.co/spaces/sidharthism/pipeline_paddle {BASE_DIR}/pipeline_paddle")
54
 
55
  # Required for paddle and gradio (Jinja2 dependency)
56
  os.system("pip install paddlepaddle-gpu pymatting")
 
58
 
59
  os.system(f"rm -rf {BASE_DIR}/pipeline_paddle/models")
60
  if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams"):
61
+ if not os.path.exists(f"{BASE_DIR}/pipeline_paddle/models"):
62
+ os.mkdir(f"{BASE_DIR}/pipeline_paddle/models")
63
+ os.system(f"wget https://paddleseg.bj.bcebos.com/matting/models/ppmatting-hrnet_w18-human_1024.pdparams -O {BASE_DIR}/pipeline_paddle/models/ppmatting-hrnet_w18-human_1024.pdparams")
64
+ # !wget "https://bj.bcebos.com/paddleseg/dygraph/hrnet_w18_ssld.tar.gz" -O "/content/fashion-eye-try-on/pipeline_paddle/models/hrnet_w18_ssld.tar.gz"
 
65
 
66
  """Initialization
67
 
 
71
  # Clone openpose model repo
72
  # os.system(f"git clone https://github.com/CMU-Perceptual-Computing-Lab/openpose.git {BASE_DIR}/openpose")
73
 
74
+ !rm -rf /content/fashion-eye-try-on/openpose
75
+ !rm /content/cmake-3.13.0-Linux-x86_64.tar.gz
76
 
77
+ #@ Building and Installation of openpose model
78
+ import os
79
+ import subprocess
80
+ from os.path import exists, join, basename, splitext
81
 
82
 
83
  project_name = f"{BASE_DIR}/openpose"
84
  print(project_name)
85
  if not exists(project_name):
86
+ # see: https://github.com/CMU-Perceptual-Computing-Lab/openpose/issues/949
87
+ # install new CMake becaue of CUDA10
88
+ os.system(f"wget -q https://cmake.org/files/v3.13/cmake-3.13.0-Linux-x86_64.tar.gz")
89
+ os.system(f"tar xfz cmake-3.13.0-Linux-x86_64.tar.gz --strip-components=1 -C /usr/local")
90
+ # clone openpose
91
+ os.system(f"cd {BASE_DIR} && git clone -q --depth 1 https://github.com/CMU-Perceptual-Computing-Lab/openpose.git")
92
+ os.system("sed -i 's/execute_process(COMMAND git checkout master WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/execute_process(COMMAND git checkout f019d0dfe86f49d1140961f8c7dec22130c83154 WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}\/3rdparty\/caffe)/g' %s/openpose/CMakeLists.txt" % (BASE_DIR, ))
93
+ # install system dependencies
94
+ os.system("apt-get -qq install -y libatlas-base-dev libprotobuf-dev libleveldb-dev libsnappy-dev libhdf5-serial-dev protobuf-compiler libgflags-dev libgoogle-glog-dev liblmdb-dev opencl-headers ocl-icd-opencl-dev libviennacl-dev")
95
+ # build openpose
96
+ print("Building openpose ... May take nearly 15 mins to build ...")
97
+ os.system(f"cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`")
98
+ print("Openpose successfully build and installed.")
99
+ # subprocess.Popen(f"cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`")
100
+ # subprocess.call(["cd", f"{BASE_DIR}/openpose"])
101
+ # subprocess.check_output(["rm", "-rf", f"{BASE_DIR}/openpose/build || true"])
102
+ # subprocess.check_output(["mkdir", f"{BASE_DIR}/openpose/build"])
103
+ # subprocess.check_output(["cd", f"{BASE_DIR}/openpose/build"])
104
+ # subprocess.check_output(["cmake", ".."])
105
+ # subprocess.check_output(["make","-j`nproc`"])
 
 
 
 
106
 
107
  # !cd {BASE_DIR}/openpose && rm -rf {BASE_DIR}/openpose/build || true && mkdir {BASE_DIR}/openpose/build && cd {BASE_DIR}/openpose/build && cmake .. && make -j`nproc`
108
 
109
  """Self correction human parsing"""
110
 
111
+ os.system(f"git clone https://github.com/PeikeLi/Self-Correction-Human-Parsing.git {BASE_DIR}/human_parse")
 
112
 
113
  os.system(f"cd {BASE_DIR}/human_parse")
114
  os.system(f"mkdir {BASE_DIR}/human_parse/checkpoints")
 
117
 
118
  dataset = 'lip'
119
 
120
+ import gdown
121
 
122
  dataset_url = 'https://drive.google.com/uc?id=1k4dllHpu0bdx38J7H28rVVLpU-kOHmnH'
123
  output = f'{BASE_DIR}/human_parse/checkpoints/final.pth'
 
126
  # For human parse
127
  os.system("pip install ninja")
128
 
129
+ """Preprocessing
130
 
 
131
 
132
+ """
133
 
134
+ # png to jpg
135
  def convert_to_jpg(path):
136
  from PIL import Image
137
  import os
138
  if os.path.exists(path):
139
+ cl = Image.open(path)
140
+ jpg_path = path[:-4] + ".jpg"
141
+ cl.save(jpg_path)
 
142
 
143
  def resize_img(path):
144
  from PIL import Image
 
147
  im = im.resize((768, 1024), Image.BICUBIC)
148
  im.save(path)
149
 
 
150
  def remove_ipynb_checkpoints():
151
  import os
152
+ os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/image/.ipynb_checkpoints")
153
+ os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth/.ipynb_checkpoints")
154
+ os.system(f"rm -rf {BASE_DIR}/VITON-HD/datasets/test/cloth-mask/.ipynb_checkpoints")
 
 
 
155
 
156
  # os.chdir('/content/fashion-eye-try-on')
 
 
157
  def preprocess():
158
  remove_ipynb_checkpoints()
159
  for path in os.listdir(f'{BASE_DIR}/VITON-HD/datasets/test/image/'):
 
163
  # for path in os.listdir('/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/'):
164
  # resize_img(f'/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth-mask/{path}')
165
 
166
+ """Paddle - removing background
167
 
168
+ """
169
 
170
  # PPMatting hrnet 1024
171
  # --fg_estimate True - for higher quality output but slower prediction
 
 
172
  def upload_remove_background_and_save_person_image(person_img):
173
  # !export CUDA_VISIBLE_DEVICES=0
174
  person_img = person_img.resize((768, 1024), Image.BICUBIC)
 
184
  --background 'w' \
185
  --save_dir {BASE_DIR}/VITON-HD/datasets/test/image \
186
  --fg_estimate True")
187
+ # --save_dir /content/fashion-eye-try-on/pipeline_paddle/output \
188
  try:
189
  convert_to_jpg(f"{BASE_DIR}/VITON-HD/datasets/test/image/person.png")
190
  # os.remove("/content/fashion-eye-try-on/pipeline_paddle/output/person_alpha.png")
 
196
  print(e)
197
  os.system(f"cd {BASE_DIR}")
198
 
199
+ #@title If multiple GPU available,uncomment and try this code
 
200
  os.system("export CUDA_VISIBLE_DEVICES=0")
201
 
202
  # Openpose pose estimation
203
  # Ubuntu and Mac
 
 
204
  def estimate_pose():
205
  os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_json {BASE_DIR}/VITON-HD/datasets/test/openpose-json/ --display 0 --face --hand --render_pose 0")
206
  os.system(f"cd {BASE_DIR}/openpose && ./build/examples/openpose/openpose.bin --image_dir {BASE_DIR}/VITON-HD/datasets/test/image --write_images {BASE_DIR}/VITON-HD/datasets/test/openpose-img/ --display 0 --hand --render_pose 1 --disable_blending true")
 
209
 
210
  # Run self correction human parser
211
  # !python3 /content/fashion-eye-try-on/human_parse/simple_extractor.py --dataset 'lip' --model-restore '/content/fashion-eye-try-on/human_parse/checkpoints/final.pth' --input-dir '/content/fashion-eye-try-on/image' --output-dir '/content/fashion-eye-try-on/VITON-HD/datasets/test/image-parse'
 
 
212
  def generate_human_segmentation_map():
213
  # remove_ipynb_checkpoints()
214
  os.system(f"python3 {BASE_DIR}/human_parse/simple_extractor.py --dataset 'lip' --model-restore '{BASE_DIR}/human_parse/checkpoints/final.pth' --input-dir '{BASE_DIR}/VITON-HD/datasets/test/image' --output-dir '{BASE_DIR}/VITON-HD/datasets/test/image-parse'")
 
220
  # with open('/content/fashion-eye-try-on/VITON-HD/datasets/test_pairs.txt', 'w') as file:
221
  # for model, cloth in pairs:
222
  # file.write(f"{model} {cloth}\n")
 
 
223
  def generate_test_pairs_txt():
224
  with open(f"{BASE_DIR}/VITON-HD/datasets/test_pairs.txt", 'w') as file:
225
  file.write(f"person.jpg cloth.jpg\n")
226
 
227
  # VITON-HD
228
  # Transfer the cloth to the model
 
 
229
  def generate_viton_hd():
230
  os.system(f"python {BASE_DIR}/VITON-HD/test.py --name output --dataset_list {BASE_DIR}/VITON-HD/datasets/test_pairs.txt --dataset_dir {BASE_DIR}/VITON-HD/datasets/ --checkpoint_dir {BASE_DIR}/VITON-HD/checkpoints --save_dir {BASE_DIR}/")
231
 
232
+ import sys
233
  # To resolve ModuleNotFoundError during imports
234
  if BASE_DIR not in sys.path:
235
  sys.path.append(BASE_DIR)
236
  sys.path.append(f"{BASE_DIR}/cloth_segmentation")
237
 
238
+ from cloth_segmentation.networks import U2NET
239
+ import torchvision.transforms as transforms
240
+ import torch.nn.functional as F
241
+ import os
242
+ from PIL import Image
243
+ from collections import OrderedDict
244
+
245
+ import torch
246
 
247
  device = 'cuda' if torch.cuda.is_available() else "cpu"
248
 
 
378
  output_img = output_img.convert('L')
379
  output_img.save(os.path.join(result_dir, image_name[:-4]+'.jpg'))
380
 
 
381
  os.system(f"cd {BASE_DIR}")
382
+ from PIL import Image
 
383
  def upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(cloth_img):
384
  os.system(f"cd {BASE_DIR}")
385
  cloth_img = cloth_img.resize((768, 1024), Image.BICUBIC)
386
  cloth_img.save(f"{BASE_DIR}/cloth/cloth.jpg")
387
  cloth_img.save(f"{BASE_DIR}/VITON-HD/datasets/test/cloth/cloth.jpg")
388
  try:
389
+ generate_cloth_mask(f"{BASE_DIR}/cloth", f"{BASE_DIR}/cloth_mask", f"{BASE_DIR}/cloth_segmentation/checkpoints/cloth_segm_u2net_latest.pth")
 
390
  cloth_mask_img = Image.open(f"{BASE_DIR}/cloth_mask/cloth.jpg")
391
+ cloth_mask_img.save(f"{BASE_DIR}/VITON-HD/datasets/test/cloth-mask/cloth.jpg")
 
392
  except Exception as e:
393
+ print(e)
 
394
 
395
  # Gradio
396
  os.system("pip install gradio")
397
 
398
+ import gradio as gr
399
  # import cv2
400
+ from PIL import Image
401
+ IMAGEPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/image'
402
+ CLOTHPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/cloth'
403
+ CLOTHMASKPATH='/content/fashion-eye-try-on/VITON-HD/datasets/test/image'
404
 
405
+ from threading import Thread
406
 
407
  def fashion_eye_tryon(person_img, cloth_img):
408
+ result_img = person_img
409
+ # img.save(IMAGEPATH + "person.jpg")
410
+ # dress.save(CLOTHPATH + "cloth.jpg")
411
+
412
+ # txt = open("/content/VITON-HD/datasets/test_pairs.txt", "a")
413
+ # txt.write("person_img.jpg dress_img.jpg\n")
414
+ # txt.close()
415
+ # # result
416
+ # print(person_img.info, cloth_img.info)
417
+ # p_t1 = Thread(target=upload_remove_background_and_save_person_image, args=(person_img, ))
418
+ # c_t2 = Thread(target=upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs, args=(cloth_img, ))
419
+ # p_t1.start()
420
+ # c_t2.start()
421
+ # p_t1.join()
422
+ # c_t2.join()
423
+ # Estimate pose
424
+ try:
425
+ upload_resize_generate_cloth_mask_and_move_to_viton_hd_test_inputs(cloth_img)
426
+ upload_remove_background_and_save_person_image(person_img)
427
+ remove_ipynb_checkpoints()
428
+ estimate_pose()
429
+ # Generate human parse
430
+ remove_ipynb_checkpoints()
431
+ generate_human_segmentation_map()
432
+ generate_test_pairs_txt()
433
+ remove_ipynb_checkpoints()
434
+ generate_viton_hd()
435
+ for p in ["/content/fashion-eye-try-on/output/person_cloth.jpg", "/content/fashion-eye-try-on/output/person.jpg_cloth.jpg"]:
436
+ if os.path.exists(p):
437
+ result_img = Image.open(p)
438
+ except Exception as e:
439
+ print(e)
440
+ return
441
+ return result_img
 
 
442
 
443
  # res = fashion_eye_tryon("", "")
444
  # res.show()
445
+ gr.Interface(fn=fashion_eye_tryon,
446
+ inputs=[gr.Image(type = "pil", label="Your image"), gr.Image(type="pil", label="Dress")],
 
447
  outputs="image"
448
+ ).launch(debug=True, inbrowser=True, share = True)
449
 
450
+ # !pip freeze > /content/requirements_final.txt