from huggingface_hub import hf_hub_url, hf_hub_download import gradio as gr import numpy as np import requests import torch from torchvision import transforms from torch.autograd import Variable from PIL import Image import warnings warnings.filterwarnings('ignore') path_to_model = hf_hub_download(repo_id="opetrova/face-frontalization", filename="generator_v0.pt") # Download network.py into the current directory network_url = hf_hub_url(repo_id="opetrova/face-frontalization", filename="network.py") r = requests.get(network_url, allow_redirects=True) open('network.py', 'wb').write(r.content) saved_model = torch.load(path_to_model, map_location=torch.device('cpu')) def frontalize(image): # Convert the test image to a [1, 3, 128, 128]-shaped torch tensor # (as required by the frontalization model) preprocess = transforms.Compose((transforms.ToPILImage(), transforms.Resize(size = (128, 128)), transforms.ToTensor())) input_tensor = torch.unsqueeze(preprocess(image), 0) # Use the saved model to generate an output (whose values go between -1 and 1, # and this will need to get fixed before the output is displayed) generated_image = saved_model(Variable(input_tensor.type('torch.FloatTensor'))) generated_image = generated_image.detach().squeeze().permute(1, 2, 0).numpy() generated_image = (generated_image + 1.0) / 2.0 return generated_image iface = gr.Interface(frontalize, gr.inputs.Image(type="numpy"), "image", title='Face Frontalization', description='PyTorch implementation of a supervised GAN (see blog post)', examples=["amos.png", "clarissa.png"], ) iface.launch()