Ahsen Khaliq
Create app.py
d61c863
raw
history blame
1.91 kB
import torch
import gradio as gr
model = torch.hub.load("PeterL1n/RobustVideoMatting", "mobilenetv3") # or "resnet50"
convert_video = torch.hub.load("PeterL1n/RobustVideoMatting", "converter")
def inference(video):
convert_video(
model, # The loaded model, can be on any device (cpu or cuda).
input_source=video, # A video file or an image sequence directory.
input_resize=None, # [Optional] Resize the input (also the output).
downsample_ratio=0.25, # [Optional] If None, make downsampled max size be 512px.
output_type='video', # Choose "video" or "png_sequence"
output_composition='com.mp4', # File path if video; directory path if png sequence.
output_alpha="pha.mp4", # [Optional] Output the raw alpha prediction.
output_foreground="fgr.mp4", # [Optional] Output the raw foreground prediction.
output_video_mbps=4, # Output video mbps. Not needed for png sequence.
seq_chunk=12, # Process n frames at once for better parallelism.
num_workers=1, # Only for image sequence input. Reader threads.
progress=True # Print conversion progress.
)
return 'com.mp4'
title = "Anime2Sketch"
description = "demo for Anime2Sketch. To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2104.05703'>Adversarial Open Domain Adaption for Sketch-to-Photo Synthesis</a> | <a href='https://github.com/Mukosame/Anime2Sketch'>Github Repo</a></p>"
gr.Interface(
inference,
gr.inputs.Video(label="Input"),
gr.outputs.Video(label="Output"),
title=title,
description=description,
article=article,
enable_queue=True
).launch(debug=True)