faceanime4u / app.py
echolee's picture
.
488e489
from PIL import Image
import torch
import gradio as gr
model2 = torch.hub.load(
"AK391/animegan2-pytorch:main",
"generator",
pretrained=True,
device="cpu",
progress=False
)
model1 = torch.hub.load("AK391/animegan2-pytorch:main",
"generator", pretrained="face_paint_512_v1", device="cpu")
face2paint = torch.hub.load(
'AK391/animegan2-pytorch:main', 'face2paint',
size=512, device="cpu", side_by_side=False
)
def inference(img, ver):
if ver == 'version 2 (🔺 robustness,🔻 stylization)':
out = face2paint(model2, img)
else:
out = face2paint(model1, img)
return out
title = "Face Anime For You"
description = "Online Demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Please use a cropped portrait picture for best results similar to the examples below.\n" + \
"基于AnimeGanv2-动漫风格人脸迁移的在线应用示例。点击下面的窗口以上传图片,或者点击以加载Examples中的图片。请使用与示例中的图片风格相近的照片,确保图片的主体是人脸、五官清晰、没有遮挡,光线良好。"
article = "<p style='text-align: center'>❤ from Bruce</p>"
examples = [['elon.png', 'version 2 (🔺 robustness,🔻 stylization)'],
['IU.png', 'version 2 (🔺 robustness,🔻 stylization)']]
gr.Interface(inference, [gr.inputs.Image(type="pil"), gr.inputs.Radio(['version 1 (🔺 stylization, 🔻 robustness)', 'version 2 (🔺 robustness,🔻 stylization)'], type="value", default='version 2 (🔺 robustness,🔻 stylization)', label='version')
], gr.outputs.Image(type="pil"), title=title, description=description, article=article, enable_queue=True, examples=examples, allow_flagging=False, allow_screenshot=False).launch()