codeslake commited on
Commit
d84731a
1 Parent(s): 1e01e69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -28,7 +28,7 @@ os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png")
28
  os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
29
 
30
  def resize(img):
31
- max_side = 1024
32
  w = img.size[0]
33
  h = img.size[1]
34
  if max(h, w) > max_side:
@@ -64,10 +64,9 @@ description="Demo application for Reference-based Video Super-Resolution (RefVSR
64
 
65
  article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is the small-sized model trained with the proposed two-stage training strategy.</p><p style='text-align: center'>The sample frames are in HD resolution (1920x1080) and in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
66
 
67
- #LR = resize(Image.open('LR.png'))
68
- #Ref = resize(Image.open('Ref.png'))
69
- #LR.save('LR.png')
70
- #Ref.save('Ref.png')
71
  examples=[['LR.png', 'Ref.png']]
72
 
73
  gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
28
  os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
29
 
30
  def resize(img):
31
+ max_side = 512
32
  w = img.size[0]
33
  h = img.size[1]
34
  if max(h, w) > max_side:
64
 
65
  article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is the small-sized model trained with the proposed two-stage training strategy.</p><p style='text-align: center'>The sample frames are in HD resolution (1920x1080) and in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
66
 
67
+ LR = resize(Image.open('LR.png')).save('LR.png')
68
+ Ref = resize(Image.open('Ref.png')).save('Ref.png')
69
+
 
70
  examples=[['LR.png', 'Ref.png']]
71
 
72
  gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)