File size: 2,649 Bytes
9506988
 
 
6f608dd
0ce41d1
9506988
 
 
 
 
 
 
 
c4eeb84
 
 
 
40982a8
 
47a09c4
40982a8
 
 
 
9506988
40982a8
 
9506988
c4eeb84
 
 
40982a8
 
9506988
40982a8
 
 
 
 
 
 
 
 
c4eeb84
 
 
 
 
 
9506988
b76d559
9506988
c4eeb84
 
9506988
6f608dd
e6127cc
 
 
6f608dd
9506988
 
 
b76d559
9506988
 
e5b33fd
9506988
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import tensorflow as tf
import numpy as np
from PIL import Image
from glob import glob 
import pandas as pd
from tensorflow.keras.preprocessing.image import img_to_array
from huggingface_hub import from_pretrained_keras
import gradio as gr

model = from_pretrained_keras("keras-io/super-resolution")
model.summary()

def infer(image):

   nx=image.shape[0]
   ny=image.shape[1]

   img = Image.fromarray(image)
   # img = img.resize((100,100))
   # img = img.crop((0,100,0,100))
   ycbcr = img.convert("YCbCr")
   y, cb, cr = ycbcr.split()
   y = img_to_array(y)
   y = y.astype("float32") / 255.0

   input = np.expand_dims(y, axis=0)
   out = model.predict(input)

   nxo = out.squeeze().shape[0]
   nyo = out.squeeze().shape[1]

   out_img_y = out[0]
   out_img_y *= 255.0

   # Restore the image in RGB color space.
   out_img_y = out_img_y.clip(0, 255)
   out_img_y = out_img_y.reshape((np.shape(out_img_y)[0], np.shape(out_img_y)[1]))
   out_img_y = Image.fromarray(np.uint8(out_img_y), mode="L")
   out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
   out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
   out_img = Image.merge("YCbCr", (out_img_y, out_img_cb, out_img_cr)).convert(
   "RGB"
   )

   out = {}
   out.update( {'input image size': (nx,ny) } )
   out.update( {'output image size': (nxo,nyo) } )

   return (pd.DataFrame(data=out.values(), index=out.keys()).transpose(), img,out_img)
	
article = "<p style='text-align: center'><a href='https://arxiv.org/abs/1609.05158' target='_blank'>Real-Time Single Image and Video Super-Resolution Using an Efficient Sub-Pixel Convolutional Neural Network</a></p><center> <a href='https://keras.io/examples/vision/super_resolution_sub_pixel/' target='_blank'>Image Super-Resolution using an Efficient Sub-Pixel CNN</a></p>"

# examples = [['examples/2000-04-28-18-21-24_L5_rgb.jpg'],['examples/2000-08-02-18-23-18_L5_rgb.jpg'],
#             ['examples/2000-08-18-18-23-46_L5_rgb.jpg'],['examples/2000-09-19-18-24-18_L5_rgb.jpg'],['examples/2000-10-21-18-24-43_L5_rgb.jpg']]

examples= [[l] for l in glob('examples/tiles/*.jpg')]
out1 = gr.outputs.Dataframe(label='Summary', headers=["Input (px)", "Output (px)"], type='pandas')
out2 = gr.outputs.Image(label="Cropped input image", type='pil')
out3 = gr.outputs.Image(label="Super-resolution x3 image", type='pil')

iface = gr.Interface(
	fn=infer,
	title = " Satellite Super-resolution",
	description = "This space is a demo of Satellite image Super-Resolution using a Sub-Pixel Convolutional Neural Network",
	article = article,
	inputs=gr.inputs.Image(label="Input Image"),
	outputs=[out1,out2,out3],
	examples=examples,
).launch()