import logging import os import gradio as gr import numpy as np from PIL import Image from huggingface_hub import hf_hub_url, cached_download from inference.face_detector import StatRetinaFaceDetector from inference.model_pipeline import VSNetModelPipeline from inference.onnx_model import ONNXModel logging.basicConfig( format='%(asctime)s %(levelname)-8s %(message)s', level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S') MODEL_IMG_SIZE = 256 usage_count = 35 # Based on hugging face logs def load_model(): REPO_ID = "Podtekatel/ARCNEGAN" FILENAME_OLD = "arcane_exp_203_ep_399.onnx" FILENAME_NEW = "arcane_exp_206_ep_138.onnx" global model_old global model_new global pipeline_old global pipeline_new # Old model model_path = cached_download( hf_hub_url(REPO_ID, FILENAME_OLD), use_auth_token=os.getenv('HF_TOKEN') ) model_old = ONNXModel(model_path) pipeline_old = VSNetModelPipeline(model_old, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024) # New model model_path = cached_download( hf_hub_url(REPO_ID, FILENAME_NEW), use_auth_token=os.getenv('HF_TOKEN') ) model_new = ONNXModel(model_path) pipeline_new = VSNetModelPipeline(model_new, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024) return model_old, model_new load_model() def inference(img, ver): img = np.array(img) if ver == 'version 2': out_img = pipeline_new(img) else: out_img = pipeline_old(img) out_img = Image.fromarray(out_img) global usage_count usage_count += 1 logging.info(f'Usage count is {usage_count}') return out_img title = "ARCNStyleTransfer" description = "Gradio Demo for Arcane Season 1 style transfer. To use it, simply upload your image, or click one of the examples to load them. Press ❤️ if you like this space!" article = "This is one of my successful experiments on style transfer. I've built my own pipeline, generator model and private dataset to train this model
" \ "" \ "" \ "" \ "Model pipeline which used in project is improved CartoonGAN.
" \ "This model was trained on RTX 2080 Ti 1.5 days with batch size 7.
" \ "Model weights 64 MB in ONNX fp32 format, infers 25 ms on GPU and 150 ms on CPU at 256x256 resolution.
" \ "If you want to use this app or integrate this model into yours, please contact me at email 'neuromancer.ai.lover@gmail.com'." imgs_folder = 'demo' examples = [[os.path.join(imgs_folder, img_filename), version] for img_filename in sorted(os.listdir(imgs_folder)) for version in ['version 2']] demo = gr.Interface( fn=inference, inputs=[gr.inputs.Image(type="pil"), gr.inputs.Radio(['version 1', 'version 2'], type="value", default='version 2', label='version')], outputs=gr.outputs.Image(type="pil"), title=title, description=description, article=article, examples=examples) demo.queue(concurrency_count=1) demo.launch()