avans06 commited on
Commit
0f09e67
1 Parent(s): d56014c

1. Change model download from the wget command to the requests module functionality.

Browse files

2. Add inbrowser=True parameter to Gradio.

3. Upgrade the project's Gradio version to 5.8.0

4. Replace facexlib with the improved version implemented by sczhou.1. Add weights_only=True parameter to torch.load to avoid FutureWarning.

5. Modify GFPGAN to a custom version to avoid encountering the error: ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'.

6. Change **basicsr** to the GitHub **BasicSR** version to avoid encountering the error: `ModuleNotFoundError: No module named 'torchvision.transforms.functional_tensor'`.

7. Face Restoration and RealESR can now be freely combined in various ways, or one can be set to "None" to use only the other model.

Files changed (5) hide show
  1. .gitignore +143 -0
  2. README.md +1 -1
  3. app.py +185 -78
  4. requirements.txt +14 -6
  5. webui.bat +72 -0
.gitignore ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ignored folders
2
+ datasets/*
3
+ experiments/*
4
+ results/*
5
+ tb_logger/*
6
+ wandb/*
7
+ tmp/*
8
+
9
+ version.py
10
+
11
+ # Byte-compiled / optimized / DLL files
12
+ __pycache__/
13
+ *.py[cod]
14
+ *$py.class
15
+
16
+ # C extensions
17
+ *.so
18
+
19
+ # Distribution / packaging
20
+ .Python
21
+ build/
22
+ develop-eggs/
23
+ dist/
24
+ downloads/
25
+ eggs/
26
+ .eggs/
27
+ lib/
28
+ lib64/
29
+ parts/
30
+ sdist/
31
+ var/
32
+ wheels/
33
+ pip-wheel-metadata/
34
+ share/python-wheels/
35
+ *.egg-info/
36
+ .installed.cfg
37
+ *.egg
38
+ MANIFEST
39
+
40
+ # PyInstaller
41
+ # Usually these files are written by a python script from a template
42
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
43
+ *.manifest
44
+ *.spec
45
+
46
+ # Installer logs
47
+ pip-log.txt
48
+ pip-delete-this-directory.txt
49
+
50
+ # Unit test / coverage reports
51
+ htmlcov/
52
+ .tox/
53
+ .nox/
54
+ .coverage
55
+ .coverage.*
56
+ .cache
57
+ nosetests.xml
58
+ coverage.xml
59
+ *.cover
60
+ *.py,cover
61
+ .hypothesis/
62
+ .pytest_cache/
63
+
64
+ # Translations
65
+ *.mo
66
+ *.pot
67
+
68
+ # Django stuff:
69
+ *.log
70
+ local_settings.py
71
+ db.sqlite3
72
+ db.sqlite3-journal
73
+
74
+ # Flask stuff:
75
+ instance/
76
+ .webassets-cache
77
+
78
+ # Scrapy stuff:
79
+ .scrapy
80
+
81
+ # Sphinx documentation
82
+ docs/_build/
83
+
84
+ # PyBuilder
85
+ target/
86
+
87
+ # Jupyter Notebook
88
+ .ipynb_checkpoints
89
+
90
+ # IPython
91
+ profile_default/
92
+ ipython_config.py
93
+
94
+ # pyenv
95
+ .python-version
96
+
97
+ # pipenv
98
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
100
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
101
+ # install all needed dependencies.
102
+ #Pipfile.lock
103
+
104
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105
+ __pypackages__/
106
+
107
+ # Celery stuff
108
+ celerybeat-schedule
109
+ celerybeat.pid
110
+
111
+ # SageMath parsed files
112
+ *.sage.py
113
+
114
+ # Environments
115
+ .env
116
+ .venv
117
+ env/
118
+ venv/
119
+ ENV/
120
+ env.bak/
121
+ venv.bak/
122
+
123
+ # Spyder project settings
124
+ .spyderproject
125
+ .spyproject
126
+
127
+ # Rope project settings
128
+ .ropeproject
129
+
130
+ # mkdocs documentation
131
+ /site
132
+
133
+ # mypy
134
+ .mypy_cache/
135
+ .dmypy.json
136
+ dmypy.json
137
+
138
+ # Pyre type checker
139
+ .pyre/
140
+ .vs
141
+ output
142
+ weights
143
+ *.jpg
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 📈
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
- sdk_version: 3.1.7
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
 
4
  colorFrom: blue
5
  colorTo: gray
6
  sdk: gradio
7
+ sdk_version: 5.8.0
8
  app_file: app.py
9
  pinned: false
10
  license: apache-2.0
app.py CHANGED
@@ -1,55 +1,84 @@
1
  import os
2
-
3
  import cv2
 
 
4
  import gradio as gr
5
  import torch
6
- from basicsr.archs.srvgg_arch import SRVGGNetCompact
 
 
7
  from gfpgan.utils import GFPGANer
8
  from realesrgan.utils import RealESRGANer
 
9
 
10
- os.system("pip freeze")
11
- # download weights
12
- if not os.path.exists('realesr-general-x4v3.pth'):
13
- os.system("wget https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth -P .")
14
- if not os.path.exists('GFPGANv1.2.pth'):
15
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth -P .")
16
- if not os.path.exists('GFPGANv1.3.pth'):
17
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth -P .")
18
- if not os.path.exists('GFPGANv1.4.pth'):
19
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth -P .")
20
- if not os.path.exists('RestoreFormer.pth'):
21
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth -P .")
22
- if not os.path.exists('CodeFormer.pth'):
23
- os.system("wget https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth -P .")
24
-
25
- torch.hub.download_url_to_file(
26
- 'https://thumbs.dreamstime.com/b/tower-bridge-traditional-red-bus-black-white-colors-view-to-tower-bridge-london-black-white-colors-108478942.jpg',
27
- 'a1.jpg')
28
- torch.hub.download_url_to_file(
29
- 'https://media.istockphoto.com/id/523514029/photo/london-skyline-b-w.jpg?s=612x612&w=0&k=20&c=kJS1BAtfqYeUDaORupj0sBPc1hpzJhBUUqEFfRnHzZ0=',
30
- 'a2.jpg')
31
- torch.hub.download_url_to_file(
32
- 'https://i.guim.co.uk/img/media/06f614065ed82ca0e917b149a32493c791619854/0_0_3648_2789/master/3648.jpg?width=700&quality=85&auto=format&fit=max&s=05764b507c18a38590090d987c8b6202',
33
- 'a3.jpg')
34
- torch.hub.download_url_to_file(
35
- 'https://i.pinimg.com/736x/46/96/9e/46969eb94aec2437323464804d27706d--victorian-london-victorian-era.jpg',
36
- 'a4.jpg')
37
-
38
- # background enhancer with RealESRGAN
39
- model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=4, act_type='prelu')
40
- model_path = 'realesr-general-x4v3.pth'
41
- half = True if torch.cuda.is_available() else False
42
- upsampler = RealESRGANer(scale=4, model_path=model_path, model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
43
 
 
 
44
  os.makedirs('output', exist_ok=True)
45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
- # def inference(img, version, scale, weight):
48
- def inference(img, version, scale):
49
- # weight /= 100
50
  print(img, version, scale)
51
  try:
52
- extension = os.path.splitext(os.path.basename(str(img)))[1]
 
53
  img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
54
  if len(img.shape) == 3 and img.shape[2] == 4:
55
  img_mode = 'RGBA'
@@ -62,50 +91,115 @@ def inference(img, version, scale):
62
  h, w = img.shape[0:2]
63
  if h < 300:
64
  img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
 
 
 
 
 
65
 
66
- if version == 'v1.2':
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  face_enhancer = GFPGANer(
68
- model_path='GFPGANv1.2.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
69
- elif version == 'v1.3':
70
  face_enhancer = GFPGANer(
71
- model_path='GFPGANv1.3.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
72
- elif version == 'v1.4':
73
  face_enhancer = GFPGANer(
74
- model_path='GFPGANv1.4.pth', upscale=2, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
75
- elif version == 'RestoreFormer':
76
  face_enhancer = GFPGANer(
77
- model_path='RestoreFormer.pth', upscale=2, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler)
78
- elif version == 'CodeFormer':
79
- face_enhancer = GFPGANer(
80
- model_path='CodeFormer.pth', upscale=2, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler)
81
- elif version == 'RealESR-General-x4v3':
82
  face_enhancer = GFPGANer(
83
- model_path='realesr-general-x4v3.pth', upscale=2, arch='realesr-general', channel_multiplier=2, bg_upsampler=upsampler)
84
-
 
 
85
  try:
86
- # _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True, weight=weight)
87
- _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  except RuntimeError as error:
 
89
  print('Error', error)
 
 
 
 
 
 
 
 
90
 
91
  try:
92
  if scale != 2:
93
  interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
94
  h, w = img.shape[0:2]
95
- output = cv2.resize(output, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
96
  except Exception as error:
97
- print('wrong scale input.', error)
98
- if img_mode == 'RGBA': # RGBA images should be saved in png format
99
- extension = 'png'
100
- else:
101
- extension = 'jpg'
102
- save_path = f'output/out.{extension}'
103
- cv2.imwrite(save_path, output)
104
 
105
- output = cv2.cvtColor(output, cv2.COLOR_BGR2RGB)
106
- return output, save_path
 
 
107
  except Exception as error:
108
- print('global exception', error)
 
109
  return None, None
110
 
111
 
@@ -122,21 +216,34 @@ article = r"""
122
  """
123
  demo = gr.Interface(
124
  inference, [
125
- gr.inputs.Image(type="filepath", label="Input"),
126
- # gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer', 'CodeFormer'], type="value", default='v1.4', label='version'),
127
- gr.inputs.Radio(['v1.2', 'v1.3', 'v1.4', 'RestoreFormer','CodeFormer','RealESR-General-x4v3'], type="value", default='v1.4', label='version'),
128
- gr.inputs.Number(label="Rescaling factor", default=2),
129
- # gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', default=50)
 
 
 
 
 
 
 
 
 
 
 
 
130
  ], [
131
- gr.outputs.Image(type="numpy", label="Output (The whole image)"),
132
- gr.outputs.File(label="Download the output image")
133
  ],
134
  title=title,
135
  description=description,
136
  article=article,
137
- # examples=[['AI-generate.jpg', 'v1.4', 2, 50], ['lincoln.jpg', 'v1.4', 2, 50], ['Blake_Lively.jpg', 'v1.4', 2, 50],
138
- # ['10045.png', 'v1.4', 2, 50]]).launch()
139
- examples=[['a1.jpg', 'v1.4', 2], ['a2.jpg', 'v1.4', 2], ['a3.jpg', 'v1.4', 2],['a4.jpg', 'v1.4', 2]])
 
140
 
141
- demo.queue(concurrency_count=4)
142
- demo.launch()
 
1
  import os
2
+ import gc
3
  import cv2
4
+ import requests
5
+ import numpy as np
6
  import gradio as gr
7
  import torch
8
+ import traceback
9
+ from tqdm import tqdm
10
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact
11
  from gfpgan.utils import GFPGANer
12
  from realesrgan.utils import RealESRGANer
13
+ from basicsr.archs.rrdbnet_arch import RRDBNet
14
 
15
+ # Define URLs and their corresponding local storage paths
16
+ face_model = {
17
+ "GFPGANv1.2.pth": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.2.pth",
18
+ "GFPGANv1.3.pth": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.3.pth",
19
+ "GFPGANv1.4.pth": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth",
20
+ "RestoreFormer.pth": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth",
21
+ "CodeFormer.pth": "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/CodeFormer.pth",
22
+ }
23
+ realesr_model = {
24
+ "realesr-general-x4v3.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-general-x4v3.pth",
25
+ "realesr-animevideov3.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.5.0/realesr-animevideov3.pth",
26
+ "RealESRGAN_x4plus_anime_6B.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.2.4/RealESRGAN_x4plus_anime_6B.pth",
27
+ "RealESRGAN_x2plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth",
28
+ "RealESRNet_x4plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.1/RealESRNet_x4plus.pth",
29
+ "RealESRGAN_x4plus.pth": "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth",
30
+ "4x-AnimeSharp.pth": "https://huggingface.co/utnah/esrgan/resolve/main/4x-AnimeSharp.pth?download=true",
31
+ }
32
+ files_to_download = [
33
+ ( "a1.jpg",
34
+ "https://thumbs.dreamstime.com/b/tower-bridge-traditional-red-bus-black-white-colors-view-to-tower-bridge-london-black-white-colors-108478942.jpg" ),
35
+ ( "a2.jpg",
36
+ "https://media.istockphoto.com/id/523514029/photo/london-skyline-b-w.jpg?s=612x612&w=0&k=20&c=kJS1BAtfqYeUDaORupj0sBPc1hpzJhBUUqEFfRnHzZ0=" ),
37
+ ( "a3.jpg",
38
+ "https://i.guim.co.uk/img/media/06f614065ed82ca0e917b149a32493c791619854/0_0_3648_2789/master/3648.jpg?width=700&quality=85&auto=format&fit=max&s=05764b507c18a38590090d987c8b6202" ),
39
+ ( "a4.jpg",
40
+ "https://i.pinimg.com/736x/46/96/9e/46969eb94aec2437323464804d27706d--victorian-london-victorian-era.jpg" ),
41
+ ]
 
 
 
 
 
 
42
 
43
+ # Ensure the target directory exists
44
+ os.makedirs("weights", exist_ok=True)
45
  os.makedirs('output', exist_ok=True)
46
 
47
+ def download_from_url(output_path, url):
48
+ try:
49
+ # Check if the file already exists
50
+ if os.path.exists(output_path):
51
+ print(f"File already exists, skipping download: {output_path}")
52
+ return
53
+
54
+ print(f"Downloading: {url}")
55
+ with requests.get(url, stream=True) as response, open(output_path, "wb") as f:
56
+ total_size = int(response.headers.get('content-length', 0))
57
+ with tqdm(total=total_size, unit='B', unit_scale=True) as pbar:
58
+ for chunk in response.iter_content(chunk_size=8192):
59
+ f.write(chunk)
60
+ pbar.update(len(chunk))
61
+ print(f"Download successful: {output_path}")
62
+ except requests.RequestException as e:
63
+ print(f"Download failed: {url}, Error: {e}")
64
+
65
+
66
+ # Iterate through each file
67
+ for output_path, url in files_to_download:
68
+ # Check if the file already exists
69
+ if os.path.exists(output_path):
70
+ print(f"File already exists, skipping download: {output_path}")
71
+ continue
72
+
73
+ # Start downloading
74
+ download_from_url(output_path, url)
75
+
76
 
77
+ def inference(img, version, realesr, scale: float):
 
 
78
  print(img, version, scale)
79
  try:
80
+ img_name = os.path.basename(str(img))
81
+ basename, extension = os.path.splitext(img_name)
82
  img = cv2.imread(img, cv2.IMREAD_UNCHANGED)
83
  if len(img.shape) == 3 and img.shape[2] == 4:
84
  img_mode = 'RGBA'
 
91
  h, w = img.shape[0:2]
92
  if h < 300:
93
  img = cv2.resize(img, (w * 2, h * 2), interpolation=cv2.INTER_LANCZOS4)
94
+
95
+ if version:
96
+ download_from_url(os.path.join("weights", version), face_model[version])
97
+ if realesr:
98
+ download_from_url(os.path.join("weights", realesr), realesr_model[realesr])
99
 
100
+ # background enhancer with RealESRGAN
101
+ if realesr == 'RealESRGAN_x4plus.pth': # x4 RRDBNet model
102
+ netscale = 4
103
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=netscale)
104
+ elif realesr == 'RealESRNet_x4plus.pth': # x4 RRDBNet model
105
+ netscale = 4
106
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=netscale)
107
+ elif realesr == 'RealESRGAN_x4plus_anime_6B.pth': # x4 RRDBNet model with 6 blocks
108
+ netscale = 4
109
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=netscale)
110
+ elif realesr == 'RealESRGAN_x2plus.pth': # x2 RRDBNet model
111
+ netscale = 2
112
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=netscale)
113
+ elif realesr == 'realesr-animevideov3.pth': # x4 VGG-style model (XS size)
114
+ netscale = 4
115
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=netscale, act_type='prelu')
116
+ elif realesr == 'realesr-general-x4v3.pth': # x4 VGG-style model (S size)
117
+ netscale = 4
118
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=32, upscale=netscale, act_type='prelu')
119
+ # elif realesr == '4x-AnimeSharp.pth': # 4x-AnimeSharp
120
+ # netscale = 4
121
+ # model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=netscale)
122
+
123
+ half = True if torch.cuda.is_available() else False
124
+ upsampler = RealESRGANer(scale=netscale, model_path=os.path.join("weights", realesr), model=model, tile=0, tile_pad=10, pre_pad=0, half=half)
125
+
126
+ face_enhancer = None
127
+ if version == 'GFPGANv1.2.pth':
128
  face_enhancer = GFPGANer(
129
+ model_path='weights/GFPGANv1.2.pth', upscale=scale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
130
+ elif version == 'GFPGANv1.3.pth':
131
  face_enhancer = GFPGANer(
132
+ model_path='weights/GFPGANv1.3.pth', upscale=scale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
133
+ elif version == 'GFPGANv1.4.pth':
134
  face_enhancer = GFPGANer(
135
+ model_path='weights/GFPGANv1.4.pth', upscale=scale, arch='clean', channel_multiplier=2, bg_upsampler=upsampler)
136
+ elif version == 'RestoreFormer.pth':
137
  face_enhancer = GFPGANer(
138
+ model_path='weights/RestoreFormer.pth', upscale=scale, arch='RestoreFormer', channel_multiplier=2, bg_upsampler=upsampler)
139
+ elif version == 'CodeFormer.pth':
 
 
 
140
  face_enhancer = GFPGANer(
141
+ model_path='weights/CodeFormer.pth', upscale=scale, arch='CodeFormer', channel_multiplier=2, bg_upsampler=upsampler)
142
+
143
+ files = []
144
+ outputs = []
145
  try:
146
+ if face_enhancer:
147
+ cropped_faces, restored_aligned, restored_img = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
148
+ # save faces
149
+ if cropped_faces and restored_aligned:
150
+ for idx, (cropped_face, restored_face) in enumerate(zip(cropped_faces, restored_aligned)):
151
+ # save cropped face
152
+ save_crop_path = f"output/{basename}{idx:02d}_cropped_faces.png"
153
+ cv2.imwrite(save_crop_path, cropped_face)
154
+ # save restored face
155
+ save_restore_path = f"output/{basename}{idx:02d}_restored_faces.png"
156
+ cv2.imwrite(save_restore_path, restored_face)
157
+ # save comparison image
158
+ save_cmp_path = f"output/{basename}{idx:02d}_cmp.png"
159
+ cmp_img = np.concatenate((cropped_face, restored_face), axis=1)
160
+ cv2.imwrite(save_cmp_path, cmp_img)
161
+
162
+ files.append(save_crop_path)
163
+ files.append(save_restore_path)
164
+ files.append(save_cmp_path)
165
+ outputs.append(cv2.cvtColor(cropped_face, cv2.COLOR_BGR2RGB))
166
+ outputs.append(cv2.cvtColor(restored_face, cv2.COLOR_BGR2RGB))
167
+ outputs.append(cv2.cvtColor(cmp_img, cv2.COLOR_BGR2RGB))
168
+ else:
169
+ restored_img, _ = upsampler.enhance(img, outscale=scale)
170
  except RuntimeError as error:
171
+ print(traceback.format_exc())
172
  print('Error', error)
173
+ finally:
174
+ if face_enhancer:
175
+ face_enhancer._cleanup()
176
+ else:
177
+ # Free GPU memory and clean up resources
178
+ torch.cuda.empty_cache()
179
+ gc.collect()
180
+
181
 
182
  try:
183
  if scale != 2:
184
  interpolation = cv2.INTER_AREA if scale < 2 else cv2.INTER_LANCZOS4
185
  h, w = img.shape[0:2]
186
+ restored_img = cv2.resize(restored_img, (int(w * scale / 2), int(h * scale / 2)), interpolation=interpolation)
187
  except Exception as error:
188
+ print(traceback.format_exc())
189
+ print("wrong scale input.", error)
190
+
191
+ if not extension:
192
+ extension = ".png" if img_mode == "RGBA" else ".jpg" # RGBA images should be saved in png format
193
+ save_path = f"output/{basename}{extension}"
194
+ cv2.imwrite(save_path, restored_img)
195
 
196
+ restored_img = cv2.cvtColor(restored_img, cv2.COLOR_BGR2RGB)
197
+ files.append(save_path)
198
+ outputs.append(restored_img)
199
+ return outputs, files
200
  except Exception as error:
201
+ print(traceback.format_exc())
202
+ print("global exception", error)
203
  return None, None
204
 
205
 
 
216
  """
217
  demo = gr.Interface(
218
  inference, [
219
+ gr.Image(type="filepath", label="Input", format="png"),
220
+ gr.Dropdown(["GFPGANv1.2.pth",
221
+ "GFPGANv1.3.pth",
222
+ "GFPGANv1.4.pth",
223
+ "RestoreFormer.pth",
224
+ # "CodeFormer.pth",
225
+ None], type="value", value='GFPGANv1.4.pth', label='Face Restoration version', info="Face Restoration and RealESR can be freely combined in different ways, or one can be set to \"None\" to use only the other model. Face Restoration is primarily used for face restoration in real-life images, while RealESR serves as a background restoration model."),
226
+ gr.Dropdown(["realesr-general-x4v3.pth",
227
+ "realesr-animevideov3.pth",
228
+ "RealESRGAN_x4plus_anime_6B.pth",
229
+ "RealESRGAN_x2plus.pth",
230
+ "RealESRNet_x4plus.pth",
231
+ "RealESRGAN_x4plus.pth",
232
+ # "4x-AnimeSharp.pth",
233
+ None], type="value", value='realesr-general-x4v3.pth', label='RealESR version'),
234
+ gr.Number(label="Rescaling factor", value=2),
235
+ # gr.Slider(0, 100, label='Weight, only for CodeFormer. 0 for better quality, 100 for better identity', value=50)
236
  ], [
237
+ gr.Gallery(type="numpy", label="Output (The whole image)", format="png"),
238
+ gr.File(label="Download the output image")
239
  ],
240
  title=title,
241
  description=description,
242
  article=article,
243
+ examples=[['a1.jpg', 'GFPGANv1.4.pth', "realesr-general-x4v3.pth", 2],
244
+ ['a2.jpg', 'GFPGANv1.4.pth', "realesr-general-x4v3.pth", 2],
245
+ ['a3.jpg', 'GFPGANv1.4.pth', "realesr-general-x4v3.pth", 2],
246
+ ['a4.jpg', 'GFPGANv1.4.pth', "realesr-general-x4v3.pth", 2]])
247
 
248
+ demo.queue(default_concurrency_limit=4)
249
+ demo.launch(inbrowser=True)
requirements.txt CHANGED
@@ -1,11 +1,19 @@
1
- torch>=1.7
2
- basicsr>=1.4.2
3
- facexlib>=0.2.5
4
- gfpgan>=1.3.7
5
- realesrgan>=0.2.5
 
 
 
6
  numpy
7
  opencv-python
8
- torchvision
 
 
 
 
 
9
  scipy
10
  tqdm
11
  lmdb
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu124
2
+
3
+ gradio==5.8.0
4
+
5
+ basicsr @ git+https://github.com/XPixelGroup/BasicSR
6
+ facexlib @ git+https://github.com/avan06/facexlib
7
+ gfpgan @ git+https://github.com/avan06/GFPGAN
8
+ realesrgan @ git+https://github.com/avan06/Real-ESRGAN
9
  numpy
10
  opencv-python
11
+
12
+ torch==2.5.0+cu124; sys_platform != 'darwin'
13
+ torchvision==0.20.0+cu124; sys_platform != 'darwin'
14
+ torch==2.5.0; sys_platform == 'darwin'
15
+ torchvision==0.20.0; sys_platform == 'darwin'
16
+
17
  scipy
18
  tqdm
19
  lmdb
webui.bat ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ @echo off
2
+
3
+ :: The source of the webui.bat file is stable-diffusion-webui
4
+
5
+ if not defined PYTHON (set PYTHON=python)
6
+ if not defined VENV_DIR (set "VENV_DIR=%~dp0%venv")
7
+
8
+ mkdir tmp 2>NUL
9
+
10
+ %PYTHON% -c "" >tmp/stdout.txt 2>tmp/stderr.txt
11
+ if %ERRORLEVEL% == 0 goto :check_pip
12
+ echo Couldn't launch python
13
+ goto :show_stdout_stderr
14
+
15
+ :check_pip
16
+ %PYTHON% -mpip --help >tmp/stdout.txt 2>tmp/stderr.txt
17
+ if %ERRORLEVEL% == 0 goto :start_venv
18
+ if "%PIP_INSTALLER_LOCATION%" == "" goto :show_stdout_stderr
19
+ %PYTHON% "%PIP_INSTALLER_LOCATION%" >tmp/stdout.txt 2>tmp/stderr.txt
20
+ if %ERRORLEVEL% == 0 goto :start_venv
21
+ echo Couldn't install pip
22
+ goto :show_stdout_stderr
23
+
24
+ :start_venv
25
+ if ["%VENV_DIR%"] == ["-"] goto :skip_venv
26
+ if ["%SKIP_VENV%"] == ["1"] goto :skip_venv
27
+
28
+ dir "%VENV_DIR%\Scripts\Python.exe" >tmp/stdout.txt 2>tmp/stderr.txt
29
+ if %ERRORLEVEL% == 0 goto :activate_venv
30
+
31
+ for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
32
+ echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
33
+ %PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
34
+ if %ERRORLEVEL% == 0 goto :activate_venv
35
+ echo Unable to create venv in directory "%VENV_DIR%"
36
+ goto :show_stdout_stderr
37
+
38
+ :activate_venv
39
+ set PYTHON="%VENV_DIR%\Scripts\Python.exe"
40
+ echo venv %PYTHON%
41
+
42
+ :skip_venv
43
+ goto :launch
44
+
45
+ :launch
46
+ %PYTHON% app.py %COMMANDLINE_ARGS% %*
47
+ pause
48
+ exit /b
49
+
50
+ :show_stdout_stderr
51
+
52
+ echo.
53
+ echo exit code: %errorlevel%
54
+
55
+ for /f %%i in ("tmp\stdout.txt") do set size=%%~zi
56
+ if %size% equ 0 goto :show_stderr
57
+ echo.
58
+ echo stdout:
59
+ type tmp\stdout.txt
60
+
61
+ :show_stderr
62
+ for /f %%i in ("tmp\stderr.txt") do set size=%%~zi
63
+ if %size% equ 0 goto :show_stderr
64
+ echo.
65
+ echo stderr:
66
+ type tmp\stderr.txt
67
+
68
+ :endofscript
69
+
70
+ echo.
71
+ echo Launch unsuccessful. Exiting.
72
+ pause