import os import sys import gradio as gr from PIL import Image ## environment settup os.system("git clone https://github.com/codeslake/RefVSR.git") os.chdir("RefVSR") os.system("./install/install_cudnn113.sh") os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png") os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png") os.mkdir("ckpt") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch") os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch") sys.path.append("RefVSR") ## RefVSR LR_path = "test/RealMCVSR/test/HR/UW/0000" Ref_path = "test/RealMCVSR/test/HR/W/0000" Ref_path_T = "test/RealMCVSR/test/HR/T/0000" os.makedirs(LR_path) os.makedirs(Ref_path) os.makedirs(Ref_path_T) os.makedirs('result') def resize(width,img): basewidth = width wpercent = (basewidth/float(img.size[0])) hsize = int((float(img.size[1])*float(wpercent))) img = img.resize((basewidth,hsize), Image.ANTIALIAS) return img def inference(LR, Ref): #LR = resize(256, LR) #Ref = resize(256, Ref) LR.save(os.path.join(LR_path, '0000.png')) Ref.save(os.path.join(Ref_path, '0000.png')) Ref.save(os.path.join(Ref_path_T, '0000.png')) # os.system("python inference_realbasicvsr.py configs/realbasicvsr_x4.py RealBasicVSR_x4.pth test/ results/demo_000") os.system("python -B run.py \ --mode amp_RefVSR_small_MFID_8K \ --config config_RefVSR_small_MFID_8K \ --data RealMCVSR \ --ckpt_abs_name ckpt/RefVSR_small_MFID_8K.pytorch \ --data_offset ./test \ --output_offset ./result \ --qualitative_only \ --cpu \ --is_gradio") return "result/0000.png" title="RefVSR" #description="Demo application for Reference-based Video Super-Resolution (RefVSR).\nInstruction: Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively.\nNote 1: This demo only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model might not take advantage of temporal frames. \nNote 2: The model is our small 8K model trained with the proposed two-stage training strategy. \nNote 3: The spatial size of input LR and Ref frames is 1920x1080 (HD), in the PNG format." description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively." article = "
This demo only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model might not take advantage of temporal frames.
The model is our small 8K model trained with the proposed two-stage training strategy.
The spatial size of input LR and Ref frames is 1920x1080 (HD), in the PNG format.
" examples=[[['LR.png'], ['Ref.png']]] gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)