File size: 5,717 Bytes
95ed4d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 |
import os
import math
import torch
import numpy as np
from rrdbnet_arch import RRDBNet
from torch.nn import functional as F
class RealESRNet(object):
def __init__(self, base_dir='./', model=None, scale=2, tile_size=0, tile_pad=10, device='cuda'):
self.base_dir = base_dir
self.scale = scale
self.tile_size = tile_size
self.tile_pad = tile_pad
self.device = device
self.load_srmodel(base_dir, model)
def load_srmodel(self, base_dir, model):
self.srmodel = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=32, num_block=23, num_grow_ch=32, scale=self.scale)
if model is None:
loadnet = torch.load(os.path.join(self.base_dir, 'weights', 'realesrnet_x%d.pth'%self.scale))
else:
loadnet = torch.load(os.path.join(self.base_dir, 'weights', model+'_x%d.pth'%self.scale))
#print(loadnet['params_ema'].keys)
self.srmodel.load_state_dict(loadnet['params_ema'], strict=True)
self.srmodel.eval()
self.srmodel = self.srmodel.to(self.device)
def tile_process(self, img):
"""It will first crop input images to tiles, and then process each tile.
Finally, all the processed tiles are merged into one images.
Modified from: https://github.com/ata4/esrgan-launcher
"""
batch, channel, height, width = img.shape
output_height = height * self.scale
output_width = width * self.scale
output_shape = (batch, channel, output_height, output_width)
# start with black image
output = img.new_zeros(output_shape)
tiles_x = math.ceil(width / self.tile_size)
tiles_y = math.ceil(height / self.tile_size)
# loop over all tiles
for y in range(tiles_y):
for x in range(tiles_x):
# extract tile from input image
ofs_x = x * self.tile_size
ofs_y = y * self.tile_size
# input tile area on total image
input_start_x = ofs_x
input_end_x = min(ofs_x + self.tile_size, width)
input_start_y = ofs_y
input_end_y = min(ofs_y + self.tile_size, height)
# input tile area on total image with padding
input_start_x_pad = max(input_start_x - self.tile_pad, 0)
input_end_x_pad = min(input_end_x + self.tile_pad, width)
input_start_y_pad = max(input_start_y - self.tile_pad, 0)
input_end_y_pad = min(input_end_y + self.tile_pad, height)
# input tile dimensions
input_tile_width = input_end_x - input_start_x
input_tile_height = input_end_y - input_start_y
tile_idx = y * tiles_x + x + 1
input_tile = img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad]
# upscale tile
try:
with torch.no_grad():
output_tile = self.srmodel(input_tile)
except RuntimeError as error:
print('Error', error)
return None
if tile_idx%10==0: print(f'\tTile {tile_idx}/{tiles_x * tiles_y}')
# output tile area on total image
output_start_x = input_start_x * self.scale
output_end_x = input_end_x * self.scale
output_start_y = input_start_y * self.scale
output_end_y = input_end_y * self.scale
# output tile area without padding
output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale
output_end_x_tile = output_start_x_tile + input_tile_width * self.scale
output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale
output_end_y_tile = output_start_y_tile + input_tile_height * self.scale
# put tile into output image
output[:, :, output_start_y:output_end_y,
output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile,
output_start_x_tile:output_end_x_tile]
return output
def process(self, img):
img = img.astype(np.float32) / 255.
img = torch.from_numpy(np.transpose(img[:, :, [2, 1, 0]], (2, 0, 1))).float()
img = img.unsqueeze(0).to(self.device)
if self.scale == 2:
mod_scale = 2
elif self.scale == 1:
mod_scale = 4
else:
mod_scale = None
if mod_scale is not None:
h_pad, w_pad = 0, 0
_, _, h, w = img.size()
if (h % mod_scale != 0):
h_pad = (mod_scale - h % mod_scale)
if (w % mod_scale != 0):
w_pad = (mod_scale - w % mod_scale)
img = F.pad(img, (0, w_pad, 0, h_pad), 'reflect')
try:
with torch.no_grad():
if self.tile_size > 0:
output = self.tile_process(img)
else:
output = self.srmodel(img)
del img
# remove extra pad
if mod_scale is not None:
_, _, h, w = output.size()
output = output[:, :, 0:h - h_pad, 0:w - w_pad]
output = output.data.squeeze().float().cpu().clamp_(0, 1).numpy()
output = np.transpose(output[[2, 1, 0], :, :], (1, 2, 0))
output = (output * 255.0).round().astype(np.uint8)
return output
except Exception as e:
print('sr failed:', e)
return None |