codeslake commited on
Commit
0e0ccfd
1 Parent(s): a2bcb35

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -21
app.py CHANGED
@@ -7,18 +7,18 @@ from PIL import Image
7
  os.system("git clone https://github.com/codeslake/RefVSR.git")
8
  os.chdir("RefVSR")
9
  os.system("./install/install_cudnn113.sh")
10
-
11
-
12
  os.mkdir("ckpt")
13
- os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch")
 
14
  os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID_8K.pytorch -O ckpt/RefVSR_MFID_8K.pytorch")
15
- os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID.pytorch -O ckpt/RefVSR_MFID.pytorch")
16
 
17
- os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch")
 
18
 
19
  sys.path.append("RefVSR")
20
 
21
- ## RefVSR
22
  HR_LR_path = "test/RealMCVSR/test/HR/UW/0000"
23
  HR_Ref_path = "test/RealMCVSR/test/HR/W/0000"
24
  HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
@@ -32,13 +32,10 @@ os.makedirs(HR_LR_path)
32
  os.makedirs(HR_Ref_path)
33
  os.makedirs(HR_Ref_path_T)
34
  os.makedirs('result')
35
-
36
- #os.system("wget https://www.dropbox.com/s/xv6inxwy0so4ni0/LR.png -O LR.png")
37
- #os.system("wget https://www.dropbox.com/s/abydd1oczs1163l/Ref.png -O Ref.png")
38
  os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
39
  os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
40
 
41
-
42
  def resize(img):
43
  max_side = 512
44
  w = img.size[0]
@@ -49,40 +46,45 @@ def resize(img):
49
  hsize=int(h*scale_ratio)
50
  img = img.resize((wsize,hsize), Image.ANTIALIAS)
51
  return img
52
-
 
53
  def inference(LR, Ref):
 
54
  #LR = resize(LR)
55
  #Ref = resize(Ref)
56
-
 
57
  LR.save(os.path.join(LR_path, '0000.png'))
58
  Ref.save(os.path.join(Ref_path, '0000.png'))
59
  Ref.save(os.path.join(Ref_path_T, '0000.png'))
60
  LR.save(os.path.join(HR_LR_path, '0000.png'))
61
  Ref.save(os.path.join(HR_Ref_path, '0000.png'))
62
  Ref.save(os.path.join(HR_Ref_path_T, '0000.png'))
63
-
64
-
65
  os.system("python -B run.py \
66
- --mode RefVSR_MFID_8K \
67
- --config config_RefVSR_MFID \
68
  --data RealMCVSR \
69
- --ckpt_abs_name ckpt/RefVSR_MFID_8K.pytorch \
70
  --data_offset ./test \
71
  --output_offset ./result \
72
  --qualitative_only \
73
  --cpu \
74
- --is_gradio")
75
-
76
  return "result/0000.png"
77
 
78
  title="RefVSR (under construction)"
79
- description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively."
80
 
81
  article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained proposed two-stage training strategy, and the sample frames are in 430x270 resolution and saved in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
82
 
 
83
  #LR = resize(Image.open('LR.png')).save('LR.png')
84
  #Ref = resize(Image.open('Ref.png')).save('Ref.png')
85
 
 
86
  examples=[['LR.png', 'Ref.png']]
87
 
88
- gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)
 
7
  os.system("git clone https://github.com/codeslake/RefVSR.git")
8
  os.chdir("RefVSR")
9
  os.system("./install/install_cudnn113.sh")
 
 
10
  os.mkdir("ckpt")
11
+ os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/SPyNet.pytorch -O ckpt/SPyNet.pytorch")
12
+
13
  os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID_8K.pytorch -O ckpt/RefVSR_MFID_8K.pytorch")
14
+ os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID_8K.pytorch")
15
 
16
+ os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_MFID.pytorch -O ckpt/RefVSR_MFID.pytorch")
17
+ os.system("wget https://huggingface.co/spaces/codeslake/RefVSR/resolve/main/RefVSR_small_MFID_8K.pytorch -O ckpt/RefVSR_small_MFID.pytorch")
18
 
19
  sys.path.append("RefVSR")
20
 
21
+ ## Input setup (creates folders and places inputs corresponding to the original RefVSR code)
22
  HR_LR_path = "test/RealMCVSR/test/HR/UW/0000"
23
  HR_Ref_path = "test/RealMCVSR/test/HR/W/0000"
24
  HR_Ref_path_T = "test/RealMCVSR/test/HR/T/0000"
32
  os.makedirs(HR_Ref_path)
33
  os.makedirs(HR_Ref_path_T)
34
  os.makedirs('result')
 
 
 
35
  os.system("wget https://www.dropbox.com/s/vqekqdz80d85gi4/UW.png -O LR.png")
36
  os.system("wget https://www.dropbox.com/s/lsopmquhpm87v83/W.png -O Ref.png")
37
 
38
+ ## resize if necessary (not used)
39
  def resize(img):
40
  max_side = 512
41
  w = img.size[0]
46
  hsize=int(h*scale_ratio)
47
  img = img.resize((wsize,hsize), Image.ANTIALIAS)
48
  return img
49
+
50
+ ## inference
51
  def inference(LR, Ref):
52
+ ## resize for user selected input (not used)
53
  #LR = resize(LR)
54
  #Ref = resize(Ref)
55
+
56
+ ## Input setup (creates folders and places inputs corresponding to the original RefVSR code)
57
  LR.save(os.path.join(LR_path, '0000.png'))
58
  Ref.save(os.path.join(Ref_path, '0000.png'))
59
  Ref.save(os.path.join(Ref_path_T, '0000.png'))
60
  LR.save(os.path.join(HR_LR_path, '0000.png'))
61
  Ref.save(os.path.join(HR_Ref_path, '0000.png'))
62
  Ref.save(os.path.join(HR_Ref_path_T, '0000.png'))
63
+
64
+ ## Run RefVSR model
65
  os.system("python -B run.py \
66
+ --mode amp_RefVSR_small_MFID_8K \
67
+ --config config_RefVSR_small_MFID \
68
  --data RealMCVSR \
69
+ --ckpt_abs_name ckpt/RefVSR_small_MFID_8K.pytorch \
70
  --data_offset ./test \
71
  --output_offset ./result \
72
  --qualitative_only \
73
  --cpu \
74
+ --is_gradio")
 
75
  return "result/0000.png"
76
 
77
  title="RefVSR (under construction)"
78
+ description="Demo application for Reference-based Video Super-Resolution (RefVSR). Upload a low-resolution frame and a reference frame to 'LR' and 'Ref' input windows, respectively. The demo runs on CPUs and takes about 150s"
79
 
80
  article = "<p style='text-align: center'><b>To check the full capability of the module, we recommend to clone Github repository and run RefVSR models on videos using GPUs.</b></p><p style='text-align: center'>This demo runs on CPUs and only supports RefVSR for a single LR and Ref frame due to computational complexity. Hence, the model will not take advantage of temporal LR and Ref frames.</p><p style='text-align: center'>The model is trained proposed two-stage training strategy, and the sample frames are in 430x270 resolution and saved in the PNG format. </p><p style='text-align: center'><a href='https://junyonglee.me/projects/RefVSR' target='_blank'>Project</a> | <a href='https://arxiv.org/abs/2203.14537' target='_blank'>arXiv</a> | <a href='https://github.com/codeslake/RefVSR' target='_blank'>Github</a></p>"
81
 
82
+ ## resize for sample (not used)
83
  #LR = resize(Image.open('LR.png')).save('LR.png')
84
  #Ref = resize(Image.open('Ref.png')).save('Ref.png')
85
 
86
+ ## input
87
  examples=[['LR.png', 'Ref.png']]
88
 
89
+ ## interface
90
+ gr.Interface(inference,[gr.inputs.Image(type="pil"), gr.inputs.Image(type="pil")],gr.outputs.Image(type="file"),title=title,description=description,article=article,theme ="peach",examples=examples).launch(enable_queue=True)