PumpkinHeads / app.py
Norod78's picture
Update app.py
be0891a verified
import os
import onnxruntime as rt
import sys
import PIL
from PIL import Image, ImageOps, ImageFile
import numpy as np
from pathlib import Path
import collections
from typing import Union, List
import scipy.ndimage
import requests
MODEL_FILE = "ffhq2pumpkinheads_pix2pixHD_e03_itr4500-simp.onnx"
so = rt.SessionOptions()
so.inter_op_num_threads = 4
so.intra_op_num_threads = 4
session = rt.InferenceSession(MODEL_FILE, sess_options=so,providers=['CPUExecutionProvider'])
input_name = session.get_inputs()[0].name
print("input_name = " + str(input_name))
output_name = session.get_outputs()[0].name
print("output_name = " + str(output_name))
import face_detection
def array_to_image(array_in):
array_in = np.squeeze(255*(array_in + 1)/2)
array_in = np.transpose(array_in, (1, 2, 0))
im = Image.fromarray(array_in.astype(np.uint8))
return im
def image_as_array(image_in):
im_array = np.array(image_in, np.float32)
im_array = (im_array/255)*2 - 1
im_array = np.transpose(im_array, (2, 0, 1))
im_array = np.expand_dims(im_array, 0)
return im_array
def find_aligned_face(image_in, size=1024):
aligned_image, n_faces, quad = face_detection.align(image_in, face_index=0, output_size=size)
return aligned_image, n_faces, quad
def align_first_face(image_in, size=1024):
aligned_image, n_faces, quad = find_aligned_face(image_in,size=size)
if n_faces == 0:
try:
image_in = ImageOps.exif_transpose(image_in)
except:
print("exif problem, not rotating")
image_in = image_in.resize((size, size))
im_array = image_as_array(image_in)
else:
im_array = image_as_array(aligned_image)
return im_array
def img_concat_h(im1, im2):
dst = Image.new('RGB', (im1.width + im2.width, im1.height))
dst.paste(im1, (0, 0))
dst.paste(im2, (im1.width, 0))
return dst
import gradio as gr
def face2vintage(
img: Image.Image,
size: int
) -> Image.Image:
aligned_img = align_first_face(img)
if aligned_img is None:
output=None
else:
output = session.run([output_name], {input_name: aligned_img})[0]
output = array_to_image(output)
aligned_img = array_to_image(aligned_img).resize((output.width, output.height))
output = img_concat_h(aligned_img, output)
return output
def inference(img):
out = face2vintage(img, 1024)
return out
title = "Pumpkin head Pix2PixHD"
description = "Pumpkinize your head. Upload an image with a face, or click on one of the examples below. If a face could not be detected, an image will still be created."
article = "<hr><p style='text-align: center'>See the <a href='https://github.com/justinpinkney/pix2pixHD' target='_blank'>Github Repo</a></p><p>The \"Pumpkin Head\" Pix2PixHD model was trained by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a></p>"
examples=[['Example00001.jpg'],['Example00002.jpg'],['Example00003.jpg'],['Example00004.jpg']]
demo = gr.Interface(
inference,
inputs=[gr.Image(type="pil", label="Input")],
outputs=[gr.Image(type="pil", label="Output")],
title=title,
description=description,
article=article,
examples=examples,
allow_flagging=False
)
demo.queue()
demo.launch()