AnimeGANv2 / app.py
Ahsen Khaliq
pytorch to onnx
2061b93
import onnxruntime
print(onnxruntime.get_device())
import os
import gradio as gr
os.system("pip install gdown")
os.system("gdown https://drive.google.com/uc?id=1riNxV1BWMAXfmWZ3LrQbEkvzV8f7lOCp")
onnx_session = onnxruntime.InferenceSession("face_paint_512_v2_0.onnx")
input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name
side_length = 512
import cv2 as cv
import numpy as np
from PIL import Image
def inference(img):
image = np.array(img)
image = image[:, :, ::-1].copy()
image = cv.resize(image, dsize=(side_length, side_length))
x = cv.cvtColor(image, cv.COLOR_BGR2RGB)
x = np.array(x, dtype=np.float32)
x = x.transpose(2, 0, 1)
x = x * 2 - 1
x = x.reshape(-1, 3, side_length, side_length)
onnx_result = onnx_session.run([output_name], {input_name: x})
onnx_result = np.array(onnx_result).squeeze()
onnx_result = (onnx_result * 0.5 + 0.5).clip(0, 1)
onnx_result = onnx_result * 255
onnx_result = onnx_result.transpose(1, 2, 0).astype('uint8')
onnx_result = cv.cvtColor(onnx_result, cv.COLOR_RGB2BGR)
img = cv.cvtColor(onnx_result, cv.COLOR_BGR2RGB)
im_pil = Image.fromarray(img)
return im_pil
title = "Animeganv2"
description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
examples=[['groot.jpeg'],['bill.png'],['tony.png'],['elon.png'],['IU.png'],['billie.png'],['will.png'],['beyonce.jpeg'],['gongyoo.jpeg']]
gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,examples=examples).launch()