|
|
|
|
|
|
|
|
|
|
|
import gc |
|
|
|
import numpy as np |
|
import torch |
|
|
|
|
|
def bgr_to_rgb(image: torch.Tensor) -> torch.Tensor: |
|
|
|
|
|
out: torch.Tensor = image.flip(-3) |
|
|
|
return out |
|
|
|
|
|
def rgb_to_bgr(image: torch.Tensor) -> torch.Tensor: |
|
|
|
return bgr_to_rgb(image) |
|
|
|
|
|
def bgra_to_rgba(image: torch.Tensor) -> torch.Tensor: |
|
out: torch.Tensor = image[[2, 1, 0, 3], :, :] |
|
return out |
|
|
|
|
|
def rgba_to_bgra(image: torch.Tensor) -> torch.Tensor: |
|
|
|
return bgra_to_rgba(image) |
|
|
|
|
|
def auto_split_upscale( |
|
lr_img: np.ndarray, |
|
upscale_function, |
|
scale: int = 4, |
|
overlap: int = 32, |
|
max_depth: int = None, |
|
current_depth: int = 1, |
|
): |
|
|
|
if max_depth is None or max_depth == current_depth: |
|
try: |
|
print(f"auto_split_upscale, current depth: {current_depth}") |
|
result, _ = upscale_function(lr_img, scale) |
|
return result, current_depth |
|
except RuntimeError as e: |
|
|
|
if "CUDA" in str(e): |
|
|
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
else: |
|
raise RuntimeError(e) |
|
finally: |
|
|
|
torch.cuda.empty_cache() |
|
gc.collect() |
|
|
|
h, w, c = lr_img.shape |
|
|
|
|
|
top_left = lr_img[: h // 2 + overlap, : w // 2 + overlap, :] |
|
top_right = lr_img[: h // 2 + overlap, w // 2 - overlap :, :] |
|
bottom_left = lr_img[h // 2 - overlap :, : w // 2 + overlap, :] |
|
bottom_right = lr_img[h // 2 - overlap :, w // 2 - overlap :, :] |
|
|
|
|
|
|
|
top_left_rlt, depth = auto_split_upscale( |
|
top_left, |
|
upscale_function, |
|
scale=scale, |
|
overlap=overlap, |
|
max_depth=max_depth, |
|
current_depth=current_depth + 1, |
|
) |
|
top_right_rlt, _ = auto_split_upscale( |
|
top_right, |
|
upscale_function, |
|
scale=scale, |
|
overlap=overlap, |
|
max_depth=depth, |
|
current_depth=current_depth + 1, |
|
) |
|
bottom_left_rlt, _ = auto_split_upscale( |
|
bottom_left, |
|
upscale_function, |
|
scale=scale, |
|
overlap=overlap, |
|
max_depth=depth, |
|
current_depth=current_depth + 1, |
|
) |
|
bottom_right_rlt, _ = auto_split_upscale( |
|
bottom_right, |
|
upscale_function, |
|
scale=scale, |
|
overlap=overlap, |
|
max_depth=depth, |
|
current_depth=current_depth + 1, |
|
) |
|
|
|
|
|
out_h = h * scale |
|
out_w = w * scale |
|
|
|
|
|
output_img = np.zeros((out_h, out_w, c), np.uint8) |
|
|
|
|
|
output_img[: out_h // 2, : out_w // 2, :] = top_left_rlt[ |
|
: out_h // 2, : out_w // 2, : |
|
] |
|
output_img[: out_h // 2, -out_w // 2 :, :] = top_right_rlt[ |
|
: out_h // 2, -out_w // 2 :, : |
|
] |
|
output_img[-out_h // 2 :, : out_w // 2, :] = bottom_left_rlt[ |
|
-out_h // 2 :, : out_w // 2, : |
|
] |
|
output_img[-out_h // 2 :, -out_w // 2 :, :] = bottom_right_rlt[ |
|
-out_h // 2 :, -out_w // 2 :, : |
|
] |
|
|
|
return output_img, depth |
|
|