Eugene Siow commited on
Commit
a26597f
β€’
1 Parent(s): 05b61bd

Initial commit.

Browse files
Files changed (3) hide show
  1. .gitignore +4 -0
  2. README.md +3 -3
  3. app.py +73 -0
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ .idea/
2
+ venv/
3
+ tmp/
4
+ *.bmp
README.md CHANGED
@@ -1,8 +1,8 @@
1
  ---
2
  title: Super Image
3
- emoji: ⚑
4
- colorFrom: gray
5
- colorTo: indigo
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
1
  ---
2
  title: Super Image
3
+ emoji: πŸ”
4
+ colorFrom: green
5
+ colorTo: purple
6
  sdk: gradio
7
  app_file: app.py
8
  pinned: false
app.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import gradio as gr
3
+ from random import randint
4
+ from pathlib import Path
5
+ from super_image import ImageLoader, EdsrModel, MsrnModel, MdsrModel, AwsrnModel, A2nModel, CarnModel, PanModel, \
6
+ HanModel, DrlnModel, RcanModel
7
+
8
+ title = "super-image"
9
+ description = "State of the Art Image Super-Resolution Models."
10
+ article = "<p style='text-align: center'><a href='https://github.com/eugenesiow/super-image'>Github Repo</a>" \
11
+ "| <a href='https://eugenesiow.github.io/super-image/'>Documentation</a> " \
12
+ "| <a href='https://github.com/eugenesiow/super-image#scale-x2'>Models</a></p>"
13
+
14
+
15
+ def inference(img, scale_str, model_name):
16
+ _id = randint(1, 1000)
17
+ output_dir = Path('./tmp/')
18
+ output_dir.mkdir(parents=True, exist_ok=True)
19
+ output_file = output_dir / ('output_image' + str(_id) + '.jpg')
20
+ scale = int(scale_str.replace('x', ''))
21
+ if model_name == 'EDSR':
22
+ model = EdsrModel.from_pretrained('eugenesiow/edsr', scale=scale)
23
+ elif model_name == 'MSRN':
24
+ model = MsrnModel.from_pretrained('eugenesiow/msrn', scale=scale)
25
+ elif model_name == 'MDSR':
26
+ model = MdsrModel.from_pretrained('eugenesiow/mdsr', scale=scale)
27
+ elif model_name == 'AWSRN-BAM':
28
+ model = AwsrnModel.from_pretrained('eugenesiow/awsrn-bam', scale=scale)
29
+ elif model_name == 'A2N':
30
+ model = A2nModel.from_pretrained('eugenesiow/a2n', scale=scale)
31
+ elif model_name == 'CARN':
32
+ model = CarnModel.from_pretrained('eugenesiow/carn', scale=scale)
33
+ elif model_name == 'PAN':
34
+ model = PanModel.from_pretrained('eugenesiow/pan', scale=scale)
35
+ elif model_name == 'HAN':
36
+ model = HanModel.from_pretrained('eugenesiow/han', scale=scale)
37
+ elif model_name == 'DRLN':
38
+ model = DrlnModel.from_pretrained('eugenesiow/drln', scale=scale)
39
+ elif model_name == 'RCAN':
40
+ model = RcanModel.from_pretrained('eugenesiow/rcan', scale=scale)
41
+ else:
42
+ model = EdsrModel.from_pretrained('eugenesiow/edsr-base', scale=scale)
43
+ inputs = ImageLoader.load_image(img)
44
+ preds = model(inputs)
45
+ output_file_str = str(output_file.resolve())
46
+ ImageLoader.save_image(preds, output_file_str)
47
+ return output_file_str
48
+
49
+
50
+ torch.hub.download_url_to_file('http://people.rennes.inria.fr/Aline.Roumy/results/images_SR_BMVC12/input_groundtruth/baby_mini_d3_gaussian.bmp',
51
+ 'baby.bmp')
52
+ torch.hub.download_url_to_file('http://people.rennes.inria.fr/Aline.Roumy/results/images_SR_BMVC12/input_groundtruth/woman_mini_d3_gaussian.bmp',
53
+ 'woman.bmp')
54
+
55
+ gr.Interface(
56
+ inference,
57
+ [
58
+ gr.inputs.Image(type="pil", label="Input"),
59
+ gr.inputs.Radio(["x2", "x3", "x4"], label='scale'),
60
+ gr.inputs.Dropdown(choices=['EDSR-base', 'EDSR', 'MSRN', 'MDSR', 'AWSRN-BAM', 'A2N', 'CARN', 'PAN', 'HAN',
61
+ 'DRLN', 'RCAN'],
62
+ label='Model')
63
+ ],
64
+ gr.outputs.Image(type="file", label="Output"),
65
+ title=title,
66
+ description=description,
67
+ article=article,
68
+ examples=[
69
+ ['baby.bmp', 'x2', 'EDSR-base'],
70
+ ['woman.bmp', 'x3', 'MSRN']
71
+ ],
72
+ enable_queue=True
73
+ ).launch(debug=True)