ToonClip / app.py
Jacopo's picture
Update app.py
7c148f2
import gradio as gr
import numpy as np
from huggingface_hub import hf_hub_url, cached_download
import PIL
import onnx
import onnxruntime
config_file_url = hf_hub_url("Jacopo/ToonClip", filename="model.onnx")
model_file = cached_download(config_file_url)
onnx_model = onnx.load(model_file)
onnx.checker.check_model(onnx_model)
opts = onnxruntime.SessionOptions()
opts.intra_op_num_threads = 16
ort_session = onnxruntime.InferenceSession(model_file, sess_options=opts)
input_name = ort_session.get_inputs()[0].name
output_name = ort_session.get_outputs()[0].name
def normalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
# x = (x - mean) / std
x = np.asarray(x, dtype=np.float32)
if len(x.shape) == 4:
for dim in range(3):
x[:, dim, :, :] = (x[:, dim, :, :] - mean[dim]) / std[dim]
if len(x.shape) == 3:
for dim in range(3):
x[dim, :, :] = (x[dim, :, :] - mean[dim]) / std[dim]
return x
def denormalize(x, mean=(0., 0., 0.), std=(1.0, 1.0, 1.0)):
# x = (x * std) + mean
x = np.asarray(x, dtype=np.float32)
if len(x.shape) == 4:
for dim in range(3):
x[:, dim, :, :] = (x[:, dim, :, :] * std[dim]) + mean[dim]
if len(x.shape) == 3:
for dim in range(3):
x[dim, :, :] = (x[dim, :, :] * std[dim]) + mean[dim]
return x
def nogan(input_img):
i = np.asarray(input_img)
i = i.astype("float32")
i = np.transpose(i, (2, 0, 1))
i = np.expand_dims(i, 0)
i = i / 255.0
i = normalize(i, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
ort_outs = ort_session.run([output_name], {input_name: i})
output = ort_outs
output = output[0][0]
output = denormalize(output, (0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
output = output * 255.0
output = output.astype('uint8')
output = np.transpose(output, (1, 2, 0))
output_image = PIL.Image.fromarray(output, 'RGB')
return output_image
title = "ToonClip Comics Hero Demo"
description = """
Gradio demo for ToonClip, a UNet++ network with MobileNet v3 backbone optimized for mobile frameworks and trained with VGG Perceptual Feature Loss using PyTorch Lighting.
To use it, simply upload an image with a face or choose an example from the list below.
"""
article = """
<style>
.boxes{
width:50%;
float:left;
}
#mainDiv{
width:50%;
margin:auto;
}
img{
max-width:100%;
}
</style>
<p style='text-align: center'>The \"ToonClip\" model was trained by <a href='https://twitter.com/JacopoMangia' target='_blank'>Jacopo Mangiavacchi</a> and available at <a href='https://github.com/jacopomangiavacchi/ComicsHeroMobileUNet' target='_blank'>Github Repo ComicsHeroMobileUNet</a></p>
<p style='text-align: center'>The \"Comics Hero dataset\" used to train this model was produced by <a href='https://linktr.ee/Norod78' target='_blank'>Doron Adler</a> and available at <a href='https://github.com/Norod/U-2-Net-StyleTransfer' target='_blank'>Github Repo Comics hero U2Net</a></p>
<p style='text-align: center'>The \"ToonClip\" iOS mobile app using a CoreML version of this model is available on Apple App Store at <a href='https://apps.apple.com/us/app/toonclip/id1536285338' target='_blank'>ToonClip</a></p>
<p style='text-align: center'>Blog post on <a href='https://medium.com/@JMangia/optimize-a-face-to-cartoon-style-transfer-model-trained-quickly-on-small-style-dataset-and-50594126e792' target='_blank'>Medium</a></p>
<br>
<p style='text-align: center'>Example images from untrained FFHQ validation set: </p>
<p>
<div id='mainDiv'>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i01.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o01.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i02.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o02.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i03.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o03.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i04.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o04.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i05.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o05.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i06.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o06.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i07.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o07.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i08.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o08.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i09.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o09.png' alt='Output01'/>
</div>
<div id='divOne' class='boxes'>
<img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/i10.jpeg' alt='Example01'/>
</div>
<div id='divTwo' class='boxes'>
<img <img src='https://hf.space/gradioiframe/Jacopo/ToonClip/file/o10.png' alt='Output01'/>
</div>
</div>
</p>
"""
examples=[['i01.jpeg'], ['i02.jpeg'], ['i03.jpeg'], ['i04.jpeg'], ['i05.jpeg'], ['i06.jpeg'], ['i07.jpeg'], ['i08.jpeg'], ['i09.jpeg'], ['i10.jpeg']]
iface = gr.Interface(
nogan,
gr.inputs.Image(type="pil", shape=(1024, 1024)),
gr.outputs.Image(type="pil"),
title=title,
description=description,
article=article,
examples=examples)
iface.launch()