Maol commited on
Commit
dea6e07
1 Parent(s): b117d08

Upload inference_realesrgan_video.py

Browse files
Files changed (1) hide show
  1. inference_realesrgan_video.py +199 -0
inference_realesrgan_video.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import mimetypes
4
+ import os
5
+ import queue
6
+ import shutil
7
+ import torch
8
+ from basicsr.archs.rrdbnet_arch import RRDBNet
9
+ from basicsr.utils.logger import AvgTimer
10
+ from tqdm import tqdm
11
+
12
+ from realesrgan import IOConsumer, PrefetchReader, RealESRGANer
13
+ from realesrgan.archs.srvgg_arch import SRVGGNetCompact
14
+
15
+
16
+ def main():
17
+ """Inference demo for Real-ESRGAN.
18
+ It mainly for restoring anime videos.
19
+
20
+ """
21
+ parser = argparse.ArgumentParser()
22
+ parser.add_argument('-i', '--input', type=str, default='inputs', help='Input image or folder')
23
+ parser.add_argument(
24
+ '-n',
25
+ '--model_name',
26
+ type=str,
27
+ default='RealESRGAN_x4plus',
28
+ help=('Model names: RealESRGAN_x4plus | RealESRNet_x4plus | RealESRGAN_x4plus_anime_6B | RealESRGAN_x2plus'
29
+ 'RealESRGANv2-anime-xsx2 | RealESRGANv2-animevideo-xsx2-nousm | RealESRGANv2-animevideo-xsx2'
30
+ 'RealESRGANv2-anime-xsx4 | RealESRGANv2-animevideo-xsx4-nousm | RealESRGANv2-animevideo-xsx4'))
31
+ parser.add_argument('-o', '--output', type=str, default='results', help='Output folder')
32
+ parser.add_argument('-s', '--outscale', type=float, default=4, help='The final upsampling scale of the image')
33
+ parser.add_argument('--suffix', type=str, default='out', help='Suffix of the restored video')
34
+ parser.add_argument('-t', '--tile', type=int, default=0, help='Tile size, 0 for no tile during testing')
35
+ parser.add_argument('--tile_pad', type=int, default=10, help='Tile padding')
36
+ parser.add_argument('--pre_pad', type=int, default=0, help='Pre padding size at each border')
37
+ parser.add_argument('--face_enhance', action='store_true', help='Use GFPGAN to enhance face')
38
+ parser.add_argument('--half', action='store_true', help='Use half precision during inference')
39
+ parser.add_argument('-v', '--video', action='store_true', help='Output a video using ffmpeg')
40
+ parser.add_argument('-a', '--audio', action='store_true', help='Keep audio')
41
+ parser.add_argument('--fps', type=float, default=None, help='FPS of the output video')
42
+ parser.add_argument('--consumer', type=int, default=4, help='Number of IO consumers')
43
+
44
+ parser.add_argument(
45
+ '--alpha_upsampler',
46
+ type=str,
47
+ default='realesrgan',
48
+ help='The upsampler for the alpha channels. Options: realesrgan | bicubic')
49
+ parser.add_argument(
50
+ '--ext',
51
+ type=str,
52
+ default='auto',
53
+ help='Image extension. Options: auto | jpg | png, auto means using the same extension as inputs')
54
+ args = parser.parse_args()
55
+
56
+ # ---------------------- determine models according to model names ---------------------- #
57
+ args.model_name = args.model_name.split('.')[0]
58
+ if args.model_name in ['RealESRGAN_x4plus', 'RealESRNet_x4plus']: # x4 RRDBNet model
59
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
60
+ netscale = 4
61
+ elif args.model_name in ['RealESRGAN_x4plus_anime_6B']: # x4 RRDBNet model with 6 blocks
62
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=6, num_grow_ch=32, scale=4)
63
+ netscale = 4
64
+ elif args.model_name in ['RealESRGAN_x2plus']: # x2 RRDBNet model
65
+ model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=2)
66
+ netscale = 2
67
+ elif args.model_name in [
68
+ 'RealESRGANv2-anime-xsx2', 'RealESRGANv2-animevideo-xsx2-nousm', 'RealESRGANv2-animevideo-xsx2'
69
+ ]: # x2 VGG-style model (XS size)
70
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=2, act_type='prelu')
71
+ netscale = 2
72
+ elif args.model_name in [
73
+ 'RealESRGANv2-anime-xsx4', 'RealESRGANv2-animevideo-xsx4-nousm', 'RealESRGANv2-animevideo-xsx4'
74
+ ]: # x4 VGG-style model (XS size)
75
+ model = SRVGGNetCompact(num_in_ch=3, num_out_ch=3, num_feat=64, num_conv=16, upscale=4, act_type='prelu')
76
+ netscale = 4
77
+
78
+ # ---------------------- determine model paths ---------------------- #
79
+ model_path = os.path.join('experiments/pretrained_models', args.model_name + '.pth')
80
+ if not os.path.isfile(model_path):
81
+ model_path = os.path.join('realesrgan/weights', args.model_name + '.pth')
82
+ if not os.path.isfile(model_path):
83
+ raise ValueError(f'Model {args.model_name} does not exist.')
84
+
85
+ # restorer
86
+ upsampler = RealESRGANer(
87
+ scale=netscale,
88
+ model_path=model_path,
89
+ model=model,
90
+ tile=args.tile,
91
+ tile_pad=args.tile_pad,
92
+ pre_pad=args.pre_pad,
93
+ half=args.half)
94
+
95
+ if args.face_enhance: # Use GFPGAN for face enhancement
96
+ from gfpgan import GFPGANer
97
+ face_enhancer = GFPGANer(
98
+ model_path='https://github.com/TencentARC/GFPGAN/releases/download/v0.2.0/GFPGANCleanv1-NoCE-C2.pth',
99
+ upscale=args.outscale,
100
+ arch='clean',
101
+ channel_multiplier=2,
102
+ bg_upsampler=upsampler)
103
+ os.makedirs(args.output, exist_ok=True)
104
+ # for saving restored frames
105
+ save_frame_folder = os.path.join(args.output, 'frames_tmpout')
106
+ os.makedirs(save_frame_folder, exist_ok=True)
107
+
108
+ if mimetypes.guess_type(args.input)[0].startswith('video'): # is a video file
109
+ video_name = os.path.splitext(os.path.basename(args.input))[0]
110
+ frame_folder = os.path.join('tmp_frames', video_name)
111
+ os.makedirs(frame_folder, exist_ok=True)
112
+ # use ffmpeg to extract frames
113
+ os.system(f'ffmpeg -i {args.input} -qscale:v 1 -qmin 1 -qmax 1 -vsync 0 {frame_folder}/frame%08d.png')
114
+ # get image path list
115
+ paths = sorted(glob.glob(os.path.join(frame_folder, '*')))
116
+ if args.video:
117
+ if args.fps is None:
118
+ # get input video fps
119
+ import ffmpeg
120
+ probe = ffmpeg.probe(args.input)
121
+ video_streams = [stream for stream in probe['streams'] if stream['codec_type'] == 'video']
122
+ args.fps = eval(video_streams[0]['avg_frame_rate'])
123
+ elif mimetypes.guess_type(args.input)[0].startswith('image'): # is an image file
124
+ paths = [args.input]
125
+ video_name = 'video'
126
+ else:
127
+ paths = sorted(glob.glob(os.path.join(args.input, '*')))
128
+ video_name = 'video'
129
+
130
+ timer = AvgTimer()
131
+ timer.start()
132
+ pbar = tqdm(total=len(paths), unit='frame', desc='inference')
133
+ # set up prefetch reader
134
+ reader = PrefetchReader(paths, num_prefetch_queue=4)
135
+ reader.start()
136
+
137
+ que = queue.Queue()
138
+ consumers = [IOConsumer(args, que, f'IO_{i}') for i in range(args.consumer)]
139
+ for consumer in consumers:
140
+ consumer.start()
141
+
142
+ for idx, (path, img) in enumerate(zip(paths, reader)):
143
+ imgname, extension = os.path.splitext(os.path.basename(path))
144
+ if len(img.shape) == 3 and img.shape[2] == 4:
145
+ img_mode = 'RGBA'
146
+ else:
147
+ img_mode = None
148
+
149
+ try:
150
+ if args.face_enhance:
151
+ _, _, output = face_enhancer.enhance(img, has_aligned=False, only_center_face=False, paste_back=True)
152
+ else:
153
+ output, _ = upsampler.enhance(img, outscale=args.outscale)
154
+ except RuntimeError as error:
155
+ print('Error', error)
156
+ print('If you encounter CUDA out of memory, try to set --tile with a smaller number.')
157
+
158
+ else:
159
+ if args.ext == 'auto':
160
+ extension = extension[1:]
161
+ else:
162
+ extension = args.ext
163
+ if img_mode == 'RGBA': # RGBA images should be saved in png format
164
+ extension = 'png'
165
+ save_path = os.path.join(save_frame_folder, f'{imgname}_out.{extension}')
166
+
167
+ que.put({'output': output, 'save_path': save_path})
168
+
169
+ pbar.update(1)
170
+ torch.cuda.synchronize()
171
+ timer.record()
172
+ avg_fps = 1. / (timer.get_avg_time() + 1e-7)
173
+ pbar.set_description(f'idx {idx}, fps {avg_fps:.2f}')
174
+
175
+ for _ in range(args.consumer):
176
+ que.put('quit')
177
+ for consumer in consumers:
178
+ consumer.join()
179
+ pbar.close()
180
+
181
+ # merge frames to video
182
+ if args.video:
183
+ video_save_path = os.path.join(args.output, f'{video_name}_{args.suffix}.mp4')
184
+ if args.audio:
185
+ os.system(
186
+ f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} -i {args.input}'
187
+ f' -map 0:v:0 -map 1:a:0 -c:a copy -c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
188
+ else:
189
+ os.system(f'ffmpeg -r {args.fps} -i {save_frame_folder}/frame%08d_out.{extension} '
190
+ f'-c:v libx264 -r {args.fps} -pix_fmt yuv420p {video_save_path}')
191
+
192
+ # delete tmp file
193
+ shutil.rmtree(save_frame_folder)
194
+ if os.path.isdir(frame_folder):
195
+ shutil.rmtree(frame_folder)
196
+
197
+
198
+ if __name__ == '__main__':
199
+ main()