Spaces:
Build error
Build error
simayhosmeyve
commited on
Commit
•
2a20e60
1
Parent(s):
318b6a8
Delete pix2pix.py
Browse files- pix2pix.py +0 -477
pix2pix.py
DELETED
@@ -1,477 +0,0 @@
|
|
1 |
-
import tensorflow as tf
|
2 |
-
import os
|
3 |
-
import pathlib
|
4 |
-
import time
|
5 |
-
import datetime
|
6 |
-
from matplotlib import pyplot as plt
|
7 |
-
from IPython import display
|
8 |
-
from glob import glob
|
9 |
-
import numpy as np
|
10 |
-
import cv2
|
11 |
-
import math
|
12 |
-
import keras
|
13 |
-
|
14 |
-
import zipfile
|
15 |
-
with zipfile.ZipFile("NewDataSet.zip", 'r') as zip_ref:
|
16 |
-
zip_ref.extractall("")
|
17 |
-
|
18 |
-
#tuz-karabiber gürültüsü
|
19 |
-
def saltpepperNoise(image):
|
20 |
-
row,col,ch = image.shape
|
21 |
-
s_vs_p = 0.5
|
22 |
-
amount = 0.004
|
23 |
-
out = image
|
24 |
-
# Salt mode
|
25 |
-
num_salt = np.ceil(amount * image.size * s_vs_p)
|
26 |
-
coords = [np.random.randint(0, i - 1, int(num_salt))
|
27 |
-
for i in image.shape]
|
28 |
-
out[coords] = 1
|
29 |
-
|
30 |
-
# Pepper mode
|
31 |
-
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
|
32 |
-
coords = [np.random.randint(0, i - 1, int(num_pepper))
|
33 |
-
for i in image.shape]
|
34 |
-
out[coords] = 0
|
35 |
-
return out
|
36 |
-
|
37 |
-
def color_imread(path):
|
38 |
-
img = cv2.imread(path)
|
39 |
-
img = cv2.cvtColor(img , cv2.COLOR_BGR2RGB)
|
40 |
-
img = (img/127.5) - 1
|
41 |
-
img = img.astype(np.float32)
|
42 |
-
return img
|
43 |
-
|
44 |
-
def gray_imread(path):
|
45 |
-
img = cv2.imread(path)
|
46 |
-
img = cv2.cvtColor(img ,cv2.COLOR_BGR2GRAY)
|
47 |
-
img = img.astype(np.float32)
|
48 |
-
return img
|
49 |
-
|
50 |
-
def load():
|
51 |
-
input_paths = sorted(glob('NewDataSet/*'))
|
52 |
-
real_paths = sorted(glob('NewDataSet/*'))
|
53 |
-
input_images = []
|
54 |
-
real_images = []
|
55 |
-
for path in input_paths:
|
56 |
-
image = gray_imread(path)
|
57 |
-
input_images.append(image)
|
58 |
-
for path in real_paths:
|
59 |
-
image = color_imread(path)
|
60 |
-
real_images.append(image)
|
61 |
-
|
62 |
-
return input_images , real_images
|
63 |
-
|
64 |
-
def reshape(gray_img):
|
65 |
-
gray_img = np.asarray(gray_img)
|
66 |
-
gray_img = gray_img.reshape(256,256,1)
|
67 |
-
return gray_img
|
68 |
-
|
69 |
-
input_images , real_images = load()
|
70 |
-
|
71 |
-
#test = gray_imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png")
|
72 |
-
#test = cv2.resize(test,(256,256))
|
73 |
-
|
74 |
-
for i in range(len(input_images)):
|
75 |
-
input_images[i] = reshape(input_images[i])
|
76 |
-
|
77 |
-
#test = reshape(test)
|
78 |
-
|
79 |
-
#print(np.asarray(test).shape)
|
80 |
-
|
81 |
-
def ssim(original,predict):
|
82 |
-
ssim = tf.image.ssim(original, predict, max_val=1.0, filter_size=11, filter_sigma=1.5, k1=0.01, k2=0.03)
|
83 |
-
return ssim
|
84 |
-
|
85 |
-
def psnr(img1, img2):
|
86 |
-
psnr = tf.image.psnr(img1, img2, max_val=255)
|
87 |
-
return psnr
|
88 |
-
|
89 |
-
array_Gen_loss=[]
|
90 |
-
|
91 |
-
def histogram_graphic(img):
|
92 |
-
hist,bins = np.histogram(img.flatten(),256,[0,256])
|
93 |
-
cdf = hist.cumsum()
|
94 |
-
cdf_normalized = cdf * float(hist.max()) / cdf.max()
|
95 |
-
plt.plot(cdf_normalized, color = 'b')
|
96 |
-
plt.hist(img.flatten(),256,[0,256], color = 'r')
|
97 |
-
plt.xlim([0, 230])
|
98 |
-
plt.legend(('cdf','histogram'), loc = 'upper left')
|
99 |
-
plt.show()
|
100 |
-
|
101 |
-
def preprocessing(path):
|
102 |
-
img = cv2.imread(path)
|
103 |
-
img = np.asarray(img).reshape(256,256,3)
|
104 |
-
#print(img.shape)
|
105 |
-
#cv2.imshow(img)
|
106 |
-
#cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/enhance/Before_hist_equalizer.png",img)
|
107 |
-
|
108 |
-
#Işık ayarı
|
109 |
-
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) #hsv formatında gerekiyor
|
110 |
-
hue, sat, val = cv2.split(hsv)
|
111 |
-
|
112 |
-
mid = 0.5
|
113 |
-
mean = np.mean(val)
|
114 |
-
gamma = math.log(mid*255)/math.log(mean)
|
115 |
-
#print("Gamma:",gamma)
|
116 |
-
#Çıkan gamma değerine göre ters işlem uygulayacak
|
117 |
-
|
118 |
-
#value kanalında gamma correction
|
119 |
-
val_gamma = np.power(val, gamma).clip(0,255).astype(np.uint8)
|
120 |
-
|
121 |
-
# yeni value kanalı orijinal hue ve sat kanallarıyla birleştiriliyor
|
122 |
-
hsv_gamma = cv2.merge([hue, sat, val_gamma])
|
123 |
-
img_gamma = cv2.cvtColor(hsv_gamma, cv2.COLOR_HSV2BGR)
|
124 |
-
cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/img_gamma.png",img_gamma)
|
125 |
-
#cv2.imshow(img_gamma)
|
126 |
-
|
127 |
-
#Adaptive Histogram Equalization
|
128 |
-
gamma_path = "/content/drive/MyDrive/ColabNotebooks/img_gamma.png"
|
129 |
-
img2 = cv2.imread(gamma_path,0)
|
130 |
-
img2 = np.asarray(img2).reshape(256,256,1)
|
131 |
-
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
132 |
-
#clipLimit -> Kontrast sınırı
|
133 |
-
clahe_equ = clahe.apply(img2)
|
134 |
-
cv2.imshow(clahe_equ)
|
135 |
-
cv2.imwrite("/content/drive/MyDrive/ColabNotebooks/enhance/After_clahe_equalizer.png",clahe_equ)
|
136 |
-
#return clahe_equ
|
137 |
-
|
138 |
-
#preprocessing("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png")
|
139 |
-
|
140 |
-
def image_colorfulness(image):
|
141 |
-
# split the image into its respective RGB components
|
142 |
-
(B, G, R) = cv2.split(image.astype("float"))
|
143 |
-
|
144 |
-
# compute rg = R - G
|
145 |
-
rg = np.absolute(R - G)
|
146 |
-
|
147 |
-
# compute yb = 0.5 * (R + G) - B
|
148 |
-
yb = np.absolute(0.5 * (R + G) - B)
|
149 |
-
|
150 |
-
# compute the mean and standard deviation of both `rg` and `yb`
|
151 |
-
(rbMean, rbStd) = (np.mean(rg), np.std(rg))
|
152 |
-
(ybMean, ybStd) = (np.mean(yb), np.std(yb))
|
153 |
-
|
154 |
-
# combine the mean and standard deviations
|
155 |
-
stdRoot = np.sqrt((rbStd ** 2) + (ybStd ** 2))
|
156 |
-
meanRoot = np.sqrt((rbMean ** 2) + (ybMean ** 2))
|
157 |
-
|
158 |
-
# derive the "colorfulness" metric and return it
|
159 |
-
return stdRoot + (0.3 * meanRoot) # sınırı 24
|
160 |
-
|
161 |
-
from PIL import Image, ImageEnhance
|
162 |
-
def add_saturation(path):
|
163 |
-
clr = cv2.imread(path)
|
164 |
-
value = image_colorfulness(clr)
|
165 |
-
print(value)
|
166 |
-
img = Image.open(path)
|
167 |
-
enhanced_obj = ImageEnhance.Color(img)
|
168 |
-
if value<30 : #renk doygunluğu iyi durumda çıkanları da bir miktar arttırmak için sınırı 30 yapıyoruz
|
169 |
-
enhanced_obj.enhance((30-value)*0.1 + 0.75).save("enhance/deneme_sat.jpg")
|
170 |
-
|
171 |
-
#add_saturation("/content/drive/MyDrive/ColabNotebooks/enhance/cikti2.jpeg")
|
172 |
-
|
173 |
-
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
|
174 |
-
"""Return a sharpened version of the image, using an unsharp mask."""
|
175 |
-
blurred = cv2.GaussianBlur(image, kernel_size, sigma)
|
176 |
-
sharpened = float(amount + 1) * image - float(amount) * blurred
|
177 |
-
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
|
178 |
-
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
|
179 |
-
sharpened = sharpened.round().astype(np.uint8)
|
180 |
-
if threshold > 0:
|
181 |
-
low_contrast_mask = np.absolute(image - blurred) < threshold
|
182 |
-
np.copyto(sharpened, image, where=low_contrast_mask)
|
183 |
-
return sharpened
|
184 |
-
|
185 |
-
def example(image,name):
|
186 |
-
sharpened_image = unsharp_mask(image)
|
187 |
-
cv2.imwrite(name, sharpened_image)
|
188 |
-
|
189 |
-
#s_img= cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/deneme.jpg")
|
190 |
-
#example(s_img,"/content/drive/MyDrive/ColabNotebooks/enhance/deneme_sharp.jpg")
|
191 |
-
|
192 |
-
#img2 = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png")
|
193 |
-
#newimg2 = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/Output/nadam_image9.png")
|
194 |
-
|
195 |
-
#psnr(img2,newimg2)
|
196 |
-
#ssim(img2,newimg2)
|
197 |
-
|
198 |
-
import math
|
199 |
-
import cv2
|
200 |
-
import numpy as np
|
201 |
-
|
202 |
-
#original = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png",0)
|
203 |
-
#contrast = cv2.imread("/content/drive/MyDrive/ColabNotebooks/enhance/After_clahe_equalizer_with_gamma.png",0)
|
204 |
-
|
205 |
-
print(original.dtype)
|
206 |
-
def psnr(img1, img2, MAX=None):
|
207 |
-
if MAX is None:
|
208 |
-
MAX = np.iinfo(img1.dtype).max
|
209 |
-
mse = np.mean((img1 - img2) ** 2)
|
210 |
-
if mse == 0:
|
211 |
-
return 100
|
212 |
-
return 20 * math.log10(MAX / math.sqrt(mse))
|
213 |
-
|
214 |
-
|
215 |
-
#db = psnr(original, contrast)
|
216 |
-
#print(db)
|
217 |
-
|
218 |
-
OUTPUT_CHANNELS = 3
|
219 |
-
|
220 |
-
def downsample(filters, size, apply_batchnorm=True):
|
221 |
-
initializer = tf.random_normal_initializer(0., 0.02)
|
222 |
-
|
223 |
-
result = tf.keras.Sequential()
|
224 |
-
result.add(tf.keras.layers.Conv2D(filters, size, strides=2, padding='same',kernel_initializer=initializer, use_bias=False))
|
225 |
-
# Burada 2'ye bölüyoruz 256 --> 128
|
226 |
-
if apply_batchnorm:
|
227 |
-
result.add(tf.keras.layers.BatchNormalization())
|
228 |
-
|
229 |
-
result.add(tf.keras.layers.LeakyReLU())
|
230 |
-
|
231 |
-
return result
|
232 |
-
|
233 |
-
def upsample(filters, size, apply_dropout=False):
|
234 |
-
initializer = tf.random_normal_initializer(0., 0.02)
|
235 |
-
|
236 |
-
result = tf.keras.Sequential()
|
237 |
-
result.add(
|
238 |
-
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
|
239 |
-
padding='same',
|
240 |
-
kernel_initializer=initializer,
|
241 |
-
use_bias=False))
|
242 |
-
# burada da 2 kat arttırıyoruz
|
243 |
-
result.add(tf.keras.layers.BatchNormalization())
|
244 |
-
|
245 |
-
if apply_dropout:
|
246 |
-
result.add(tf.keras.layers.Dropout(0.5))
|
247 |
-
|
248 |
-
result.add(tf.keras.layers.ReLU())
|
249 |
-
|
250 |
-
return result
|
251 |
-
|
252 |
-
def Generator(tpu=False):
|
253 |
-
inputs = tf.keras.layers.Input(shape=[256, 256, 1])
|
254 |
-
|
255 |
-
down_stack = [
|
256 |
-
downsample(64, 4, apply_batchnorm=False), # (batch_size, 128, 128, 64)
|
257 |
-
downsample(128, 4), # (batch_size, 64, 64, 128)
|
258 |
-
downsample(256, 4), # (batch_size, 32, 32, 256)
|
259 |
-
downsample(512, 4), # (batch_size, 16, 16, 512)
|
260 |
-
downsample(512, 4), # (batch_size, 8, 8, 512)
|
261 |
-
downsample(512, 4), # (batch_size, 4, 4, 512)
|
262 |
-
downsample(512, 4), # (batch_size, 2, 2, 512)
|
263 |
-
downsample(512, 4), # (batch_size, 1, 1, 512)
|
264 |
-
]
|
265 |
-
|
266 |
-
up_stack = [
|
267 |
-
upsample(512, 4, apply_dropout=True), # (batch_size, 2, 2, 1024)
|
268 |
-
upsample(512, 4, apply_dropout=True), # (batch_size, 4, 4, 1024)
|
269 |
-
upsample(512, 4, apply_dropout=True), # (batch_size, 8, 8, 1024)
|
270 |
-
upsample(512, 4), # (batch_size, 16, 16, 1024)
|
271 |
-
upsample(256, 4), # (batch_size, 32, 32, 512)
|
272 |
-
upsample(128, 4), # (batch_size, 64, 64, 256)
|
273 |
-
upsample(64, 4), # (batch_size, 128, 128, 128)
|
274 |
-
]
|
275 |
-
initializer = tf.random_normal_initializer(0., 0.02)
|
276 |
-
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,
|
277 |
-
strides=2,
|
278 |
-
padding='same',
|
279 |
-
kernel_initializer=initializer,
|
280 |
-
activation='tanh') # (batch_size, 256, 256, 3)
|
281 |
-
# Build U-NET
|
282 |
-
x = inputs
|
283 |
-
|
284 |
-
# Downsampling through the model
|
285 |
-
skips = []
|
286 |
-
for down in down_stack:
|
287 |
-
x = down(x)
|
288 |
-
skips.append(x)
|
289 |
-
|
290 |
-
skips = reversed(skips[:-1]) # son elemani almadan terste yazdirir
|
291 |
-
|
292 |
-
# Upsampling and establishing the skip connections
|
293 |
-
for up, skip in zip(up_stack, skips):
|
294 |
-
x = up(x)
|
295 |
-
x = tf.keras.layers.Concatenate()([x, skip])
|
296 |
-
|
297 |
-
x = last(x)
|
298 |
-
model = tf.keras.Model(inputs=inputs, outputs=x)
|
299 |
-
|
300 |
-
return model
|
301 |
-
|
302 |
-
pre_trained = Generator()
|
303 |
-
pre_trained.compile(optimizer='adam',loss=tf.keras.losses.BinaryCrossentropy(from_logits=True))
|
304 |
-
|
305 |
-
from tensorflow.keras.models import *
|
306 |
-
from tensorflow.keras.layers import *
|
307 |
-
from tensorflow.keras.optimizers import *
|
308 |
-
|
309 |
-
integer = (input_images[0]+1)*127.5
|
310 |
-
|
311 |
-
LAMBDA = 100
|
312 |
-
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
|
313 |
-
|
314 |
-
def generator_loss(disc_generated_output, gen_output, target,total_loop):
|
315 |
-
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
|
316 |
-
# Mean absolute error
|
317 |
-
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
|
318 |
-
total_gen_loss = gan_loss + (LAMBDA * l1_loss)
|
319 |
-
if total_loop % 2 == 0:
|
320 |
-
array_Gen_loss.append(total_gen_loss)
|
321 |
-
return total_gen_loss, gan_loss, l1_loss
|
322 |
-
|
323 |
-
ssim_results = []
|
324 |
-
psnr_results = []
|
325 |
-
|
326 |
-
def ssim_psnr(pre,target):
|
327 |
-
ssim_res = ssim(pre,target)
|
328 |
-
psnr_res = psnr(pre,target)
|
329 |
-
ssim_results.append(ssim_res)
|
330 |
-
psnr_results.append(ssim_results)
|
331 |
-
|
332 |
-
def Discriminator():
|
333 |
-
initializer = tf.random_normal_initializer(0., 0.02)
|
334 |
-
|
335 |
-
inp = tf.keras.layers.Input(shape=[256, 256, 1], name='input_image') # lr
|
336 |
-
tar = tf.keras.layers.Input(shape=[256, 256, 3], name='target_image') # hr
|
337 |
-
|
338 |
-
x = tf.keras.layers.concatenate([inp, tar]) # (batch_size, 256, 256, channels*2)
|
339 |
-
|
340 |
-
down1 = downsample(64, 4, False)(x) # (batch_size, 128, 128, 64)
|
341 |
-
down2 = downsample(128, 4)(down1) # (batch_size, 64, 64, 128)
|
342 |
-
down3 = downsample(256, 4)(down2) # (batch_size, 32, 32, 256)
|
343 |
-
|
344 |
-
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (batch_size, 34, 34, 256)
|
345 |
-
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
|
346 |
-
kernel_initializer=initializer,
|
347 |
-
use_bias=False)(zero_pad1) # (batch_size, 31, 31, 512)
|
348 |
-
|
349 |
-
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
|
350 |
-
|
351 |
-
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
|
352 |
-
|
353 |
-
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (batch_size, 33, 33, 512)
|
354 |
-
|
355 |
-
last = tf.keras.layers.Conv2D(1, 4, strides=1,
|
356 |
-
kernel_initializer=initializer)(zero_pad2) # (batch_size, 30, 30, 1)
|
357 |
-
|
358 |
-
return tf.keras.Model(inputs=[inp, tar], outputs=last)
|
359 |
-
|
360 |
-
discriminator = Discriminator()
|
361 |
-
|
362 |
-
def discriminator_loss(disc_real_output, disc_generated_output):
|
363 |
-
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
|
364 |
-
|
365 |
-
generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
|
366 |
-
|
367 |
-
total_disc_loss = real_loss + generated_loss # 0.5 ile de çarpabilirsin
|
368 |
-
|
369 |
-
return total_disc_loss
|
370 |
-
|
371 |
-
generator_optimizer = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name="Nadam")
|
372 |
-
discriminator_optimizer = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name="Nadam")
|
373 |
-
|
374 |
-
def generate_images(model, test_input,step):
|
375 |
-
if (step%1 == 0):
|
376 |
-
prediction = model(test_input, training=True)
|
377 |
-
pre = prediction[0]
|
378 |
-
pre = (pre+1)*127.5
|
379 |
-
pre = np.uint8(pre)
|
380 |
-
name = 'image{step}.png'.format(step=step)
|
381 |
-
plt.imsave(name,pre)
|
382 |
-
|
383 |
-
test = np.array(test).reshape(1,256,256,1)
|
384 |
-
input_images = np.array(input_images).reshape(-1,1,256,256,1)
|
385 |
-
real_images = np.array(real_images).reshape(-1,1,256,256,3)
|
386 |
-
print(real_images[0].shape)
|
387 |
-
|
388 |
-
def train_step(input_image, target, step):
|
389 |
-
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
|
390 |
-
gen_output = pre_trained(input_image, training=True)
|
391 |
-
|
392 |
-
disc_real_output = discriminator([input_image, target], training=True)
|
393 |
-
disc_generated_output = discriminator([input_image, gen_output], training=True)
|
394 |
-
|
395 |
-
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target,10)
|
396 |
-
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
|
397 |
-
|
398 |
-
generator_gradients = gen_tape.gradient(gen_total_loss,
|
399 |
-
pre_trained.trainable_variables)
|
400 |
-
discriminator_gradients = disc_tape.gradient(disc_loss,
|
401 |
-
discriminator.trainable_variables)
|
402 |
-
|
403 |
-
generator_optimizer.apply_gradients(zip(generator_gradients,
|
404 |
-
pre_trained.trainable_variables))
|
405 |
-
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
|
406 |
-
discriminator.trainable_variables))
|
407 |
-
|
408 |
-
def fit(input_images,real_images,test,steps):
|
409 |
-
example_input = test
|
410 |
-
start = time.time()
|
411 |
-
step = 0
|
412 |
-
i = 0
|
413 |
-
while step<steps:
|
414 |
-
print("Step = ",step)
|
415 |
-
while i < len(input_images):
|
416 |
-
train_step(input_images[i], real_images[i], step)
|
417 |
-
if (i%200 == 0):
|
418 |
-
print('i= ',i)
|
419 |
-
i +=1
|
420 |
-
generate_images(pre_trained, example_input,step)
|
421 |
-
step+=1
|
422 |
-
i = 0
|
423 |
-
generate_images(pre_trained, example_input,step)
|
424 |
-
|
425 |
-
fit(input_images,real_images,test,10)
|
426 |
-
|
427 |
-
#pre_trained.save("enhance/pix2pix.h5")
|
428 |
-
|
429 |
-
a = array_Gen_loss
|
430 |
-
a = np.asarray(a)
|
431 |
-
plt.plot(a)
|
432 |
-
plt.ylabel('Loss Percent')
|
433 |
-
plt.xlabel('Epochs')
|
434 |
-
plt.show()
|
435 |
-
|
436 |
-
pre_trained = keras.models.load_model("gradio_pix2pix.h5")
|
437 |
-
|
438 |
-
#pre_trained.summary()
|
439 |
-
|
440 |
-
#pre_trained.optimizer
|
441 |
-
|
442 |
-
#path2 = '/content/drive/MyDrive/ColabNotebooks/enhance/landscape.png'
|
443 |
-
#image = gray_imread(path2)
|
444 |
-
#image = saltpepperNoise(image)
|
445 |
-
#image = np.array(image).reshape(1,256,256,1)
|
446 |
-
|
447 |
-
#prediction = pre_trained(image,training=True)
|
448 |
-
#pre = prediction[0]
|
449 |
-
#pre = (pre+1)*127.5
|
450 |
-
#pre = np.uint8(pre)
|
451 |
-
#name = '/content/drive/MyDrive/ColabNotebooks/enhance/pre_trained.png'
|
452 |
-
#plt.imsave(name,pre)
|
453 |
-
#cv2.imshow(pre)
|
454 |
-
|
455 |
-
def result(Input):
|
456 |
-
size0 = Input.shape[0]
|
457 |
-
size1 = Input.shape[1]
|
458 |
-
Input = cv2.resize(Input, (256,256), interpolation = cv2.INTER_AREA)
|
459 |
-
Input = cv2.cvtColor(Input , cv2.COLOR_BGR2GRAY)
|
460 |
-
Input = np.array(Input).reshape(1,256,256,1)
|
461 |
-
prediction = pre_trained(Input,training=True)
|
462 |
-
Input = prediction[0]
|
463 |
-
Input = (Input+1)*127.5
|
464 |
-
Input = np.uint8(Input)
|
465 |
-
Input = cv2.resize(Input, (size1,size0), interpolation = cv2.INTER_AREA)
|
466 |
-
return Input
|
467 |
-
|
468 |
-
#lst = cv2.imread('/content/drive/MyDrive/ColabNotebooks/enhance/low-sat.jpg')
|
469 |
-
#r = result(lst)
|
470 |
-
#cv2.imshow(r)
|
471 |
-
|
472 |
-
pip install gradio
|
473 |
-
import gradio as gr
|
474 |
-
|
475 |
-
iface = gr.Interface(fn=result, inputs=gr.inputs.Image(type="numpy",image_mode="RGB"), outputs=gr.outputs.Image( type="auto", label=None),theme="grass"
|
476 |
-
,allow_flagging="never",css=""" body {background-color: rgba(127,191,63,0.48)} """,title="Image Colorization")
|
477 |
-
iface.launch(debug='False')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|