animeganv2-onnx / app.py
Ahsen Khaliq
Update app.py
fdc7bb9
import onnxruntime
print(onnxruntime.get_device())
import os
import gradio as gr
os.system("pip install gdown")
os.system("gdown https://drive.google.com/uc?id=1riNxV1BWMAXfmWZ3LrQbEkvzV8f7lOCp")
opts = onnxruntime.SessionOptions()
opts.intra_op_num_threads = 16
onnx_session = onnxruntime.InferenceSession("/home/user/app/face_paint_512_v2_0.onnx",sess_options=opts)
input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name
side_length = 512
import cv2 as cv
import numpy as np
from PIL import Image
def inference(img):
image = np.array(img)
image = image[:, :, ::-1].copy()
image = cv.resize(image, dsize=(side_length, side_length))
x = cv.cvtColor(image, cv.COLOR_BGR2RGB)
x = np.array(x, dtype=np.float32)
x = x.transpose(2, 0, 1)
x = x * 2 - 1
x = x.reshape(-1, 3, side_length, side_length)
onnx_result = onnx_session.run([output_name], {input_name: x})
onnx_result = np.array(onnx_result).squeeze()
onnx_result = (onnx_result * 0.5 + 0.5).clip(0, 1)
onnx_result = onnx_result * 255
onnx_result = onnx_result.transpose(1, 2, 0).astype('uint8')
onnx_result = cv.cvtColor(onnx_result, cv.COLOR_RGB2BGR)
img = cv.cvtColor(onnx_result, cv.COLOR_BGR2RGB)
im_pil = Image.fromarray(img)
return im_pil
title = "Animeganv2"
description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"
gr.Interface(inference, gr.inputs.Image(type="pil", source="webcam"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True,live=True).launch()