import onnxruntime print(onnxruntime.get_device()) import os os.system("pip install gdown") os.system("gdown https://drive.google.com/uc?id=1riNxV1BWMAXfmWZ3LrQbEkvzV8f7lOCp") onnx_session = onnxruntime.InferenceSession("/home/user/app/face_paint_512_v2_0.onnx") input_name = onnx_session.get_inputs()[0].name output_name = onnx_session.get_outputs()[0].name side_length = 512 import cv2 as cv import numpy as np def inference(img): image = numpy.array(img) image = image[:, :, ::-1].copy() image = cv.resize(image, dsize=(side_length, side_length)) x = cv.cvtColor(image, cv.COLOR_BGR2RGB) x = np.array(x, dtype=np.float32) x = x.transpose(2, 0, 1) x = x * 2 - 1 x = x.reshape(-1, 3, side_length, side_length) onnx_result = onnx_session.run([output_name], {input_name: x}) onnx_result = np.array(onnx_result).squeeze() onnx_result = (onnx_result * 0.5 + 0.5).clip(0, 1) onnx_result = onnx_result * 255 onnx_result = onnx_result.transpose(1, 2, 0).astype('uint8') onnx_result = cv.cvtColor(onnx_result, cv.COLOR_RGB2BGR) img = cv2.cvtColor(onnx_result, cv2.COLOR_BGR2RGB) im_pil = Image.fromarray(img) return im_pil title = "Animeganv2" description = "Gradio demo for AnimeGanv2 Face Portrait v2. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below. Please use a cropped portrait picture for best results similar to the examples below" article = "

Github Repo

samples from repo: animation animation

" gr.Interface(inference, gr.inputs.Image(type="pil"), gr.outputs.Image(type="pil"),title=title,description=description,article=article,enable_queue=True).launch()