Spaces:
Runtime error
Runtime error
File size: 5,096 Bytes
6974707 27109fd 6974707 eadf2af 68316a2 eadf2af 6974707 27109fd 6974707 27109fd eac21ed 6974707 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from tensorflow.keras import layers
import gradio as gr
# Define EDSR custom model
class EDSRModel(tf.keras.Model):
def train_step(self, data):
# Unpack the data. Its structure depends on your model and
# on what you pass to `fit()`.
x, y = data
with tf.GradientTape() as tape:
y_pred = self(x, training=True) # Forward pass
# Compute the loss value
# (the loss function is configured in `compile()`)
loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)
# Compute gradients
trainable_vars = self.trainable_variables
gradients = tape.gradient(loss, trainable_vars)
# Update weights
self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# Update metrics (includes the metric that tracks the loss)
self.compiled_metrics.update_state(y, y_pred)
# Return a dict mapping metric names to current value
return {m.name: m.result() for m in self.metrics}
def predict_step(self, x):
# Adding dummy dimension using tf.expand_dims and converting to float32 using tf.cast
x = tf.cast(tf.expand_dims(x, axis=0), tf.float32)
# Passing low resolution image to model
super_resolution_img = self(x, training=False)
# Clips the tensor from min(0) to max(255)
super_resolution_img = tf.clip_by_value(super_resolution_img, 0, 255)
# Rounds the values of a tensor to the nearest integer
super_resolution_img = tf.round(super_resolution_img)
# Removes dimensions of size 1 from the shape of a tensor and converting to uint8
super_resolution_img = tf.squeeze(
tf.cast(super_resolution_img, tf.uint8), axis=0
)
return super_resolution_img
# Residual Block
def ResBlock(inputs):
x = layers.Conv2D(64, 3, padding="same", activation="relu")(inputs)
x = layers.Conv2D(64, 3, padding="same")(x)
x = layers.Add()([inputs, x])
return x
# Upsampling Block
def Upsampling(inputs, factor=2, **kwargs):
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(inputs)
x = tf.nn.depth_to_space(x, block_size=factor)
x = layers.Conv2D(64 * (factor ** 2), 3, padding="same", **kwargs)(x)
x = tf.nn.depth_to_space(x, block_size=factor)
return x
def make_model(num_filters, num_of_residual_blocks):
# Flexible Inputs to input_layer
input_layer = layers.Input(shape=(None, None, 3))
# Scaling Pixel Values
x = layers.Rescaling(scale=1.0 / 255)(input_layer)
x = x_new = layers.Conv2D(num_filters, 3, padding="same")(x)
# 16 residual blocks
for _ in range(num_of_residual_blocks):
x_new = ResBlock(x_new)
x_new = layers.Conv2D(num_filters, 3, padding="same")(x_new)
x = layers.Add()([x, x_new])
x = Upsampling(x)
x = layers.Conv2D(3, 3, padding="same")(x)
output_layer = layers.Rescaling(scale=255)(x)
return EDSRModel(input_layer, output_layer)
# Define PSNR metric
def PSNR(super_resolution, high_resolution):
"""Compute the peak signal-to-noise ratio, measures quality of image."""
# Max value of pixel is 255
psnr_value = tf.image.psnr(high_resolution, super_resolution, max_val=255)[0]
return psnr_value
custom_objects = {"EDSRModel":EDSRModel}
with keras.utils.custom_object_scope(custom_objects):
new_model = keras.models.load_model("./trained.h5", custom_objects={'PSNR':PSNR})
def process_image(img):
lowres = tf.convert_to_tensor(img, dtype=tf.uint8)
lowres = tf.image.random_crop(lowres, (150, 150, 3))
preds = new_model.predict_step(lowres)
preds = preds.numpy()
lowres = lowres.numpy()
return (lowres, preds)
image = gr.inputs.Image()
image_out = gr.outputs.Image()
markdown_part = """
This space is the demo for the EDSR (Enhanced Deep Residual Networks for Single Image Super-Resolution) model. This model surpassed the performace of the current available SOTA models.
Paper Link - https://arxiv.org/pdf/1707.02921
Keras Example link - https://keras.io/examples/vision/edsr/
TODO:
Hack to make this work for any image size. Currently the model takes input of image size 150 x 150.
We pad the input image with transparent pixels so that it is a square image, which is a multiple of 150 x 150
Then we chop the image into multiple 150 x 150 sub images
Upscale it and stitch it together.
The output image might look a bit off, because each sub-image dosent have data about other sub-images.
This approach assumes that the subimage has enough data about its surroundings
"""
gr.Interface(
process_image,
title="EDSR - Enhanced Deep Residual Networks for Single Image Super-Resolution",
description="SuperResolution",
inputs = image,
outputs = gr.Gallery(label="Outputs, First image is low res, next one is High Res",visible=True),
article = markdown_part,
interpretation='default',
allow_flagging='never'
).launch() |