52Hz commited on
Commit
715282a
1 Parent(s): e0e3b59

Update main_test_SRMNet.py

Browse files
Files changed (1) hide show
  1. main_test_SRMNet.py +0 -68
main_test_SRMNet.py CHANGED
@@ -1,24 +1,10 @@
1
- import argparse
2
  import cv2
3
- import glob
4
- import numpy as np
5
  from collections import OrderedDict
6
- from skimage import img_as_ubyte
7
- import os
8
  import torch
9
- import requests
10
- from PIL import Image
11
- import torchvision.transforms.functional as TF
12
- import torch.nn.functional as F
13
-
14
- from model.SRMNet import SRMNet
15
- from utils import util_calculate_psnr_ssim as util
16
-
17
 
18
  def save_img(filepath, img):
19
  cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
20
 
21
-
22
  def load_checkpoint(model, weights):
23
  checkpoint = torch.load(weights)
24
  try:
@@ -31,62 +17,8 @@ def load_checkpoint(model, weights):
31
  new_state_dict[name] = v
32
  model.load_state_dict(new_state_dict)
33
 
34
-
35
- def main():
36
- parser = argparse.ArgumentParser(description='Demo Image Denoising')
37
- parser.add_argument('--input_dir', default='test/', type=str, help='Input images')
38
- parser.add_argument('--result_dir', default='result/', type=str, help='Directory for results')
39
- parser.add_argument('--weights',
40
- default='experiments/pretrained_models/AWGN_denoising_SRMNet.pth', type=str,
41
- help='Path to weights')
42
-
43
- args = parser.parse_args()
44
-
45
- inp_dir = args.input_dir
46
- out_dir = args.result_dir
47
-
48
- os.makedirs(out_dir, exist_ok=True)
49
-
50
- files = sorted(glob.glob(os.path.join(inp_dir, '*.PNG')))
51
-
52
- if len(files) == 0:
53
- raise Exception(f"No files found at {inp_dir}")
54
-
55
- # Load corresponding models architecture and weights
56
- model = SRMNet()
57
- model.cuda()
58
-
59
- load_checkpoint(model, args.weights)
60
- model.eval()
61
-
62
- mul = 16
63
- for file_ in files:
64
- img = Image.open(file_).convert('RGB')
65
- input_ = TF.to_tensor(img).unsqueeze(0).cuda()
66
-
67
- # Pad the input if not_multiple_of 8
68
- h, w = input_.shape[2], input_.shape[3]
69
- H, W = ((h + mul) // mul) * mul, ((w + mul) // mul) * mul
70
- padh = H - h if h % mul != 0 else 0
71
- padw = W - w if w % mul != 0 else 0
72
- input_ = F.pad(input_, (0, padw, 0, padh), 'reflect')
73
- with torch.no_grad():
74
- restored = model(input_)
75
-
76
- restored = torch.clamp(restored, 0, 1)
77
- restored = restored[:, :, :h, :w]
78
- restored = restored.permute(0, 2, 3, 1).cpu().detach().numpy()
79
- restored = img_as_ubyte(restored[0])
80
-
81
- f = os.path.splitext(os.path.split(file_)[-1])[0]
82
- save_img((os.path.join(out_dir, f + '.png')), restored)
83
-
84
-
85
  def setup(args):
86
  save_dir = 'result/'
87
  folder = 'test/'
88
 
89
  return folder, save_dir
90
-
91
- if __name__ == '__main__':
92
- main()
 
 
1
  import cv2
 
 
2
  from collections import OrderedDict
 
 
3
  import torch
 
 
 
 
 
 
 
 
4
 
5
  def save_img(filepath, img):
6
  cv2.imwrite(filepath, cv2.cvtColor(img, cv2.COLOR_RGB2BGR))
7
 
 
8
  def load_checkpoint(model, weights):
9
  checkpoint = torch.load(weights)
10
  try:
 
17
  new_state_dict[name] = v
18
  model.load_state_dict(new_state_dict)
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def setup(args):
21
  save_dir = 'result/'
22
  folder = 'test/'
23
 
24
  return folder, save_dir