File size: 1,391 Bytes
eaee3de
 
 
 
 
 
 
 
ffd64f5
eaee3de
 
 
 
 
 
 
 
 
5124b07
eaee3de
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
from PIL import Image
import torch
import gradio as gr

model = torch.hub.load("bryandlee/animegan2-pytorch:main", "generator", pretrained="face_paint_512_v1")

face2paint = torch.hub.load(
    'bryandlee/animegan2-pytorch:main', 'face2paint', 
    size=512, device="cpu"
)
def inference(img):
    out = face2paint(model, img)
    return out
      
  
title = "Animeganv1"
description = "Gradio demo for AnimeGanv1 Face Portrait v1. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
examples=[['bill.png']]
gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples).launch()