swzamir commited on
Commit
f5bf8e5
1 Parent(s): 412b0a2

Create demo_gradio.py

Browse files
Files changed (1) hide show
  1. demo_gradio.py +83 -0
demo_gradio.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Restormer: Efficient Transformer for High-Resolution Image Restoration
2
+ ## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, and Ming-Hsuan Yang
3
+ ## https://arxiv.org/abs/2111.09881
4
+
5
+
6
+ import torch
7
+ import torch.nn.functional as F
8
+ import os
9
+ from runpy import run_path
10
+ from skimage import img_as_ubyte
11
+ import cv2
12
+ from tqdm import tqdm
13
+ import argparse
14
+
15
+ parser = argparse.ArgumentParser(description='Test Restormer on your own images')
16
+ parser.add_argument('--input_path', default='./demo/degraded/', type=str, help='Directory of input images or path of single image')
17
+ parser.add_argument('--result_dir', default='./demo/restored/', type=str, help='Directory for restored results')
18
+ parser.add_argument('--task', required=True, type=str, help='Task to run', choices=['Motion_Deblurring',
19
+ 'Single_Image_Defocus_Deblurring',
20
+ 'Deraining',
21
+ 'Real_Denoising',
22
+ 'Gaussian_Gray_Denoising',
23
+ 'Gaussian_Color_Denoising'])
24
+
25
+ args = parser.parse_args()
26
+
27
+
28
+ def get_weights_and_parameters(task, parameters):
29
+ if task == 'Motion_Deblurring':
30
+ weights = os.path.join('Motion_Deblurring', 'pretrained_models', 'motion_deblurring.pth')
31
+ elif task == 'Single_Image_Defocus_Deblurring':
32
+ weights = os.path.join('Defocus_Deblurring', 'pretrained_models', 'single_image_defocus_deblurring.pth')
33
+ elif task == 'Deraining':
34
+ weights = os.path.join('Deraining', 'pretrained_models', 'deraining.pth')
35
+ elif task == 'Real_Denoising':
36
+ weights = os.path.join('Denoising', 'pretrained_models', 'real_denoising.pth')
37
+ parameters['LayerNorm_type'] = 'BiasFree'
38
+ return weights, parameters
39
+
40
+ task = args.task
41
+ out_dir = os.path.join(args.result_dir, task)
42
+
43
+ os.makedirs(out_dir, exist_ok=True)
44
+
45
+ # Get model weights and parameters
46
+ parameters = {'inp_channels':3, 'out_channels':3, 'dim':48, 'num_blocks':[4,6,6,8], 'num_refinement_blocks':4, 'heads':[1,2,4,8], 'ffn_expansion_factor':2.66, 'bias':False, 'LayerNorm_type':'WithBias', 'dual_pixel_task':False}
47
+ weights, parameters = get_weights_and_parameters(task, parameters)
48
+
49
+ load_arch = run_path(os.path.join('basicsr', 'models', 'archs', 'restormer_arch.py'))
50
+ model = load_arch['Restormer'](**parameters)
51
+
52
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
53
+ # device = torch.device('cpu')
54
+
55
+ model = model.to(device)
56
+ checkpoint = torch.load(weights)
57
+ model.load_state_dict(checkpoint['params'])
58
+
59
+ model.eval()
60
+
61
+
62
+ img_multiple_of = 8
63
+
64
+
65
+ with torch.inference_mode():
66
+
67
+ img = cv2.cvtColor(cv2.imread(args.input_path), cv2.COLOR_BGR2RGB)
68
+
69
+ input_ = torch.from_numpy(img).float().div(255.).permute(2,0,1).unsqueeze(0).to(device)
70
+
71
+ # Pad the input if not_multiple_of 8
72
+ h,w = input_.shape[2], input_.shape[3]
73
+ H,W = ((h+img_multiple_of)//img_multiple_of)*img_multiple_of, ((w+img_multiple_of)//img_multiple_of)*img_multiple_of
74
+ padh = H-h if h%img_multiple_of!=0 else 0
75
+ padw = W-w if w%img_multiple_of!=0 else 0
76
+ input_ = F.pad(input_, (0,padw,0,padh), 'reflect')
77
+
78
+ restored = torch.clamp(model(input_),0,1)
79
+
80
+ # Unpad the output
81
+ restored = img_as_ubyte(restored[:,:,:h,:w].permute(0, 2, 3, 1).cpu().detach().numpy()[0])
82
+
83
+ cv2.imwrite(os.path.join(out_dir, os.path.split(args.input_path)[-1]),cv2.cvtColor(restored, cv2.COLOR_RGB2BGR))