Spaces:
Running
Running
#!/usr/bin/env python | |
import gradio as gr | |
import torch | |
from torch import Tensor | |
from torchvision import transforms | |
import numpy as np | |
import sys | |
model = None | |
def load_model(): | |
global model | |
model = torch.jit.load("model.pt") | |
def denormalize_and_clip_as_tensor (im: Tensor) -> Tensor: | |
return torch.clip(im * 0.5 + 0.5, 0.0, 1.0) | |
def denormalize_and_clip_as_numpy (im: Tensor) -> np.ndarray: | |
im = im.squeeze(0) | |
return np.ascontiguousarray(denormalize_and_clip_as_tensor(im).permute(1,2,0).detach().cpu().numpy()) | |
def undo_antialiasing(im): | |
im_torch = torch.from_numpy (im).permute(2,0,1).unsqueeze(0).float() / 255.0 | |
im_torch = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(im_torch) | |
with torch.no_grad(): | |
output_torch = model(im_torch) | |
output = denormalize_and_clip_as_numpy(output_torch.rgb) | |
return (output*255.99).astype(np.uint8) | |
load_model() | |
iface = gr.Interface(fn=undo_antialiasing, | |
inputs=gr.inputs.Image(), | |
outputs=gr.outputs.Image(), | |
examples=[['examples/Bowling.png'], ['examples/opencv.png']]) | |
iface.launch() | |