Spaces:
Runtime error
Runtime error
File size: 3,181 Bytes
7a4e8ef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 |
from pydoc import describe
import re
import numpy as np
from PIL import Image
import torch
from torchvision import transforms
import gradio as gr
from model import TransformerNet
style_model = TransformerNet()
device=torch.device("cpu")
styles_map = {"Kandinsky, Several circles": "kand_circles.model",
"Haring, Dance": "haring_dance.model",
"Picasso, The weeping woman": "picasso_weeping.model",
"Van Gogh, Wheatfield with crows": "vangogh_crows.model"}
content_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Lambda(lambda x: x.mul(255))
])
def run(content_image, style):
content_image.thumbnail((1080, 1080))
img = content_transform(content_image)
img = img.unsqueeze(0).to(device)
model = styles_map[style]
state_dict = torch.load(f"./models/{model}")
for k in list(state_dict.keys()):
if re.search(r'in\d+\.running_(mean|var)$', k):
del state_dict[k]
style_model.load_state_dict(state_dict)
style_model.to(device)
with torch.no_grad():
output = style_model(img)
img = output[0].clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
return img
content_image_input = gr.inputs.Image(label="Content Image", type="pil")
style_input = gr.inputs.Dropdown(list(styles_map.keys()), type="value", default="Kandinsky, Several circles", label="Style")
description="Fast Neural Style Transfer demo (trained from scratch!). Upload a content image. Select an artwork. Enjoy."
article="""
**References**\n\n
You can find <a href='https://francescopochetti.com/fast-neural-style-transfer-deploying-pytorch-models-to-aws-lambda/' target='_blank'>here</a> a post I put together
describing the approach I used to train models and deploy them on <a href='http://visualneurons.com/fast.html' target='_blank'>visualneurons.com</a> using AWS Lambda. \n
<a href='https://github.com/FraPochetti/examples/blob/master/fast_neural_style/neural_style/FastStyleTransferPytorch.ipynb' target='_blank'>Here</a> is instead the Jupyter notebook
with the training logic. \n
<br>
<hr>
**Kandinsky, Several circles**
<img src='https://style-transfer-webapptest.s3.eu-west-1.amazonaws.com/small_images_hf/Several_Circles.jpeg'>
<hr>
**Haring, Dance**
<img src='https://style-transfer-webapptest.s3.eu-west-1.amazonaws.com/small_images_hf/Haring.jpeg'>
<hr>
**Picasso, The weeping woman**
<img src='https://style-transfer-webapptest.s3.eu-west-1.amazonaws.com/small_images_hf/weeping.png'>
<hr>
**Van Gogh, Wheatfield with crows**
<img src='https://style-transfer-webapptest.s3.eu-west-1.amazonaws.com/small_images_hf/Wheatfield_with_crows.jpeg'>
"""
example = ["dog.jpeg", "Kandinsky, Several circles"]
app_interface = gr.Interface(fn=run,
inputs=[content_image_input, style_input],
outputs="image",
title="Fast Neural Style Transfer",
description=description,
examples=[example],
article=article)
app_interface.launch() |