File size: 2,365 Bytes
2592f72
a488f5b
431168d
52504c8
431168d
 
2592f72
ba1f1ff
9139725
ba1f1ff
2592f72
 
 
 
 
 
 
 
a78ce6b
2592f72
 
c8b967a
2592f72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bc3aebd
2592f72
 
 
 
 
 
def2079
2592f72
 
d8d6db9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import onnxruntime
print(onnxruntime.get_device())
import os
import gradio as gr
os.system("pip install gdown")
os.system("gdown https://drive.google.com/uc?id=1riNxV1BWMAXfmWZ3LrQbEkvzV8f7lOCp")

opts = onnxruntime.SessionOptions()
opts.intra_op_num_threads = 16
onnx_session = onnxruntime.InferenceSession("/home/user/app/face_paint_512_v2_0.onnx",sess_options=opts)

input_name = onnx_session.get_inputs()[0].name
output_name = onnx_session.get_outputs()[0].name

side_length = 512

import cv2 as cv
import numpy as np
from PIL import Image

def inference(img):
    image = np.array(img) 
    image = image[:, :, ::-1].copy() 
    image = cv.resize(image, dsize=(side_length, side_length))
    x = cv.cvtColor(image, cv.COLOR_BGR2RGB)
    
    x = np.array(x, dtype=np.float32)
    x = x.transpose(2, 0, 1)
    x = x * 2 - 1
    x = x.reshape(-1, 3, side_length, side_length)
    
    onnx_result = onnx_session.run([output_name], {input_name: x})
    
    onnx_result = np.array(onnx_result).squeeze()
    onnx_result = (onnx_result * 0.5 + 0.5).clip(0, 1)
    onnx_result = onnx_result * 255
    
    onnx_result = onnx_result.transpose(1, 2, 0).astype('uint8')
    onnx_result = cv.cvtColor(onnx_result, cv.COLOR_RGB2BGR)
    

    img = cv.cvtColor(onnx_result, cv.COLOR_BGR2RGB)
    im_pil = Image.fromarray(img)
    return im_pil
      
  
title = "Animeganv2"
description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below"
article = "<p style='text-align: center'><a href='https://github.com/bryandlee/animegan2-pytorch' target='_blank'>Github Repo Pytorch</a> | <a href='https://github.com/Kazuhito00/AnimeGANv2-ONNX-Sample' target='_blank'>Github Repo ONNX</a></p><p style='text-align: center'>samples from repo: <img src='https://user-images.githubusercontent.com/26464535/129888683-98bb6283-7bb8-4d1a-a04a-e795f5858dcf.gif' alt='animation'/> <img src='https://user-images.githubusercontent.com/26464535/137619176-59620b59-4e20-4d98-9559-a424f86b7f24.jpg' alt='animation'/></p>"


gr.Interface(inference, gr.inputs.Image(type="pil", source="webcam"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True).launch()