import math import numpy as np import pandas as pd import gradio as gr from huggingface_hub import from_pretrained_fastai from fastai.vision.all import * from torchvision.models import vgg19, vgg16 from utils import * pascal_source = '.' EXAMPLES_PATH = Path('./examples') repo_id = "hugginglearners/fastai-style-transfer" def _inner(feat_net, hooks, x): feat_net(x) return hooks.stored def _get_layers(arch:str, pretrained=True): "Get the layers and arch for a VGG Model (16 and 19 are supported only)" feat_net = vgg19(pretrained=pretrained) if arch.find('9') > 1 else vgg16(pretrained=pretrained) config = _vgg_config.get(arch) features = feat_net.features.eval() for p in features.parameters(): p.requires_grad=False return feat_net, [features[i] for i in config] _vgg_config = { 'vgg16' : [1, 11, 18, 25, 20], 'vgg19' : [1, 6, 11, 20, 29, 22] } feat_net, layers = _get_layers('vgg19', True) hooks = hook_outputs(layers, detach=False) learner = from_pretrained_fastai(repo_id) def infer(img): pred = learner.predict(img) image = pred[0].numpy() image = image.transpose((1, 2, 0)) plt.imshow(image) return plt.gcf() #pred[0].show() # get the inputs inputs = gr.inputs.Image(shape=(192, 192)) # the app outputs two segmented images output = gr.Plot() # it's good practice to pass examples, description and a title to guide users title = 'Style transfer' description = '' article = "Author: Nhu Hoang. " examples = [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()] gr.Interface(infer, inputs, output, examples= examples, allow_flagging='never', cache_examples=False, title=title, description=description, article=article, live=False).launch(enable_queue=True, debug=False, inbrowser=False)