NEXAS commited on
Commit
d068d1d
1 Parent(s): 9c789d0

Upload 6 files

Browse files
Files changed (6) hide show
  1. .gitignore.txt +41 -0
  2. low_light.py +350 -0
  3. model.h5 +3 -0
  4. myModel.json +0 -0
  5. myModelw.h5 +3 -0
  6. requirements.txt +6 -0
.gitignore.txt ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Python related
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.egg-info/
6
+ *.egg
7
+ *.venv
8
+ *.virtualenv
9
+ *.pypirc
10
+
11
+ # IDEs and Editors
12
+ ## Visual Studio Code
13
+ .vscode/
14
+ ## PyCharm
15
+ .idea/
16
+ ## Jupyter notebooks
17
+ .ipynb_checkpoints/
18
+
19
+ # Operating Systems
20
+ ## macOS
21
+ .DS_Store
22
+ ## Windows
23
+ Thumbs.db
24
+ ehthumbs.db
25
+ Desktop.ini
26
+ ## Linux
27
+ *~
28
+
29
+ # TensorFlow specific (if you save models or have checkpoints)
30
+ *.h5
31
+ *.ckpt
32
+
33
+ # Other
34
+ # Directories or files for logs, databases, and datasets that you don't want to commit
35
+ logs/
36
+ datasets/
37
+ *.log
38
+ *.sqlite
39
+
40
+ # If you use virtual environments and you name them `env` or similar
41
+ env/
low_light.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """low_light.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1vJJW7kOOjTkw9HwKoamjX-KNhLS6aASP
8
+ """
9
+
10
+ import os
11
+ import cv2
12
+ import random
13
+ import numpy as np
14
+ from glob import glob
15
+ from PIL import Image, ImageOps
16
+ import matplotlib.pyplot as plt
17
+
18
+ import tensorflow as tf
19
+ from tensorflow import keras
20
+ from tensorflow.keras import layers
21
+
22
+ !gdown https://drive.google.com/uc?id=1DdGIJ4PZPlF2ikl8mNM9V-PdVxVLbQi6
23
+ !unzip -q lol_dataset.zip
24
+
25
+ random.seed(10)
26
+
27
+ IMAGE_SIZE = 128
28
+ BATCH_SIZE = 4
29
+ MAX_TRAIN_IMAGES = 300
30
+
31
+
32
+ def read_image(image_path):
33
+ image = tf.io.read_file(image_path)
34
+ image = tf.image.decode_png(image, channels=3)
35
+ image.set_shape([None, None, 3])
36
+ image = tf.cast(image, dtype=tf.float32) / 255.0
37
+ return image
38
+
39
+
40
+ def random_crop(low_image, enhanced_image):
41
+ low_image_shape = tf.shape(low_image)[:2]
42
+ low_w = tf.random.uniform(
43
+ shape=(), maxval=low_image_shape[1] - IMAGE_SIZE + 1, dtype=tf.int32
44
+ )
45
+ low_h = tf.random.uniform(
46
+ shape=(), maxval=low_image_shape[0] - IMAGE_SIZE + 1, dtype=tf.int32
47
+ )
48
+ enhanced_w = low_w
49
+ enhanced_h = low_h
50
+ low_image_cropped = low_image[
51
+ low_h : low_h + IMAGE_SIZE, low_w : low_w + IMAGE_SIZE
52
+ ]
53
+ enhanced_image_cropped = enhanced_image[
54
+ enhanced_h : enhanced_h + IMAGE_SIZE, enhanced_w : enhanced_w + IMAGE_SIZE
55
+ ]
56
+ return low_image_cropped, enhanced_image_cropped
57
+
58
+
59
+ def load_data(low_light_image_path, enhanced_image_path):
60
+ low_light_image = read_image(low_light_image_path)
61
+ enhanced_image = read_image(enhanced_image_path)
62
+ low_light_image, enhanced_image = random_crop(low_light_image, enhanced_image)
63
+ return low_light_image, enhanced_image
64
+
65
+
66
+ def get_dataset(low_light_images, enhanced_images):
67
+ dataset = tf.data.Dataset.from_tensor_slices((low_light_images, enhanced_images))
68
+ dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
69
+ dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
70
+ return dataset
71
+
72
+
73
+ train_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[:MAX_TRAIN_IMAGES]
74
+ train_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[:MAX_TRAIN_IMAGES]
75
+
76
+ val_low_light_images = sorted(glob("./lol_dataset/our485/low/*"))[MAX_TRAIN_IMAGES:]
77
+ val_enhanced_images = sorted(glob("./lol_dataset/our485/high/*"))[MAX_TRAIN_IMAGES:]
78
+
79
+ test_low_light_images = sorted(glob("./lol_dataset/eval15/low/*"))
80
+ test_enhanced_images = sorted(glob("./lol_dataset/eval15/high/*"))
81
+
82
+
83
+ train_dataset = get_dataset(train_low_light_images, train_enhanced_images)
84
+ val_dataset = get_dataset(val_low_light_images, val_enhanced_images)
85
+
86
+
87
+ print("Train Dataset:", train_dataset)
88
+ print("Val Dataset:", val_dataset)
89
+
90
+ def selective_kernel_feature_fusion(
91
+ multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3
92
+ ):
93
+ channels = list(multi_scale_feature_1.shape)[-1]
94
+ combined_feature = layers.Add()(
95
+ [multi_scale_feature_1, multi_scale_feature_2, multi_scale_feature_3]
96
+ )
97
+ gap = layers.GlobalAveragePooling2D()(combined_feature)
98
+ channel_wise_statistics = tf.reshape(gap, shape=(-1, 1, 1, channels))
99
+ compact_feature_representation = layers.Conv2D(
100
+ filters=channels // 8, kernel_size=(1, 1), activation="relu"
101
+ )(channel_wise_statistics)
102
+ feature_descriptor_1 = layers.Conv2D(
103
+ channels, kernel_size=(1, 1), activation="softmax"
104
+ )(compact_feature_representation)
105
+ feature_descriptor_2 = layers.Conv2D(
106
+ channels, kernel_size=(1, 1), activation="softmax"
107
+ )(compact_feature_representation)
108
+ feature_descriptor_3 = layers.Conv2D(
109
+ channels, kernel_size=(1, 1), activation="softmax"
110
+ )(compact_feature_representation)
111
+ feature_1 = multi_scale_feature_1 * feature_descriptor_1
112
+ feature_2 = multi_scale_feature_2 * feature_descriptor_2
113
+ feature_3 = multi_scale_feature_3 * feature_descriptor_3
114
+ aggregated_feature = layers.Add()([feature_1, feature_2, feature_3])
115
+ return aggregated_feature
116
+
117
+ def spatial_attention_block(input_tensor):
118
+ average_pooling = tf.reduce_max(input_tensor, axis=-1)
119
+ average_pooling = tf.expand_dims(average_pooling, axis=-1)
120
+ max_pooling = tf.reduce_mean(input_tensor, axis=-1)
121
+ max_pooling = tf.expand_dims(max_pooling, axis=-1)
122
+ concatenated = layers.Concatenate(axis=-1)([average_pooling, max_pooling])
123
+ feature_map = layers.Conv2D(1, kernel_size=(1, 1))(concatenated)
124
+ feature_map = tf.nn.sigmoid(feature_map)
125
+ return input_tensor * feature_map
126
+
127
+
128
+ def channel_attention_block(input_tensor):
129
+ channels = list(input_tensor.shape)[-1]
130
+ average_pooling = layers.GlobalAveragePooling2D()(input_tensor)
131
+ feature_descriptor = tf.reshape(average_pooling, shape=(-1, 1, 1, channels))
132
+ feature_activations = layers.Conv2D(
133
+ filters=channels // 8, kernel_size=(1, 1), activation="relu"
134
+ )(feature_descriptor)
135
+ feature_activations = layers.Conv2D(
136
+ filters=channels, kernel_size=(1, 1), activation="sigmoid"
137
+ )(feature_activations)
138
+ return input_tensor * feature_activations
139
+
140
+
141
+ def dual_attention_unit_block(input_tensor):
142
+ channels = list(input_tensor.shape)[-1]
143
+ feature_map = layers.Conv2D(
144
+ channels, kernel_size=(3, 3), padding="same", activation="relu"
145
+ )(input_tensor)
146
+ feature_map = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(
147
+ feature_map
148
+ )
149
+ channel_attention = channel_attention_block(feature_map)
150
+ spatial_attention = spatial_attention_block(feature_map)
151
+ concatenation = layers.Concatenate(axis=-1)([channel_attention, spatial_attention])
152
+ concatenation = layers.Conv2D(channels, kernel_size=(1, 1))(concatenation)
153
+ return layers.Add()([input_tensor, concatenation])
154
+
155
+ # Recursive Residual Modules
156
+
157
+
158
+ def down_sampling_module(input_tensor):
159
+ channels = list(input_tensor.shape)[-1]
160
+ main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
161
+ input_tensor
162
+ )
163
+ main_branch = layers.Conv2D(
164
+ channels, kernel_size=(3, 3), padding="same", activation="relu"
165
+ )(main_branch)
166
+ main_branch = layers.MaxPooling2D()(main_branch)
167
+ main_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(main_branch)
168
+ skip_branch = layers.MaxPooling2D()(input_tensor)
169
+ skip_branch = layers.Conv2D(channels * 2, kernel_size=(1, 1))(skip_branch)
170
+ return layers.Add()([skip_branch, main_branch])
171
+
172
+
173
+ def up_sampling_module(input_tensor):
174
+ channels = list(input_tensor.shape)[-1]
175
+ main_branch = layers.Conv2D(channels, kernel_size=(1, 1), activation="relu")(
176
+ input_tensor
177
+ )
178
+ main_branch = layers.Conv2D(
179
+ channels, kernel_size=(3, 3), padding="same", activation="relu"
180
+ )(main_branch)
181
+ main_branch = layers.UpSampling2D()(main_branch)
182
+ main_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(main_branch)
183
+ skip_branch = layers.UpSampling2D()(input_tensor)
184
+ skip_branch = layers.Conv2D(channels // 2, kernel_size=(1, 1))(skip_branch)
185
+ return layers.Add()([skip_branch, main_branch])
186
+
187
+
188
+ # MRB Block
189
+ def multi_scale_residual_block(input_tensor, channels):
190
+ # features
191
+ level1 = input_tensor
192
+ level2 = down_sampling_module(input_tensor)
193
+ level3 = down_sampling_module(level2)
194
+ # DAU
195
+ level1_dau = dual_attention_unit_block(level1)
196
+ level2_dau = dual_attention_unit_block(level2)
197
+ level3_dau = dual_attention_unit_block(level3)
198
+ # SKFF
199
+ level1_skff = selective_kernel_feature_fusion(
200
+ level1_dau,
201
+ up_sampling_module(level2_dau),
202
+ up_sampling_module(up_sampling_module(level3_dau)),
203
+ )
204
+ level2_skff = selective_kernel_feature_fusion(
205
+ down_sampling_module(level1_dau), level2_dau, up_sampling_module(level3_dau)
206
+ )
207
+ level3_skff = selective_kernel_feature_fusion(
208
+ down_sampling_module(down_sampling_module(level1_dau)),
209
+ down_sampling_module(level2_dau),
210
+ level3_dau,
211
+ )
212
+ # DAU 2
213
+ level1_dau_2 = dual_attention_unit_block(level1_skff)
214
+ level2_dau_2 = up_sampling_module((dual_attention_unit_block(level2_skff)))
215
+ level3_dau_2 = up_sampling_module(
216
+ up_sampling_module(dual_attention_unit_block(level3_skff))
217
+ )
218
+ # SKFF 2
219
+ skff_ = selective_kernel_feature_fusion(level1_dau_2, level2_dau_2, level3_dau_2)
220
+ conv = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(skff_)
221
+ return layers.Add()([input_tensor, conv])
222
+
223
+ def recursive_residual_group(input_tensor, num_mrb, channels):
224
+ conv1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
225
+ for _ in range(num_mrb):
226
+ conv1 = multi_scale_residual_block(conv1, channels)
227
+ conv2 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(conv1)
228
+ return layers.Add()([conv2, input_tensor])
229
+
230
+
231
+ def mirnet_model(num_rrg, num_mrb, channels):
232
+ input_tensor = keras.Input(shape=[None, None, 3])
233
+ x1 = layers.Conv2D(channels, kernel_size=(3, 3), padding="same")(input_tensor)
234
+ for _ in range(num_rrg):
235
+ x1 = recursive_residual_group(x1, num_mrb, channels)
236
+ conv = layers.Conv2D(3, kernel_size=(3, 3), padding="same")(x1)
237
+ output_tensor = layers.Add()([input_tensor, conv])
238
+ return keras.Model(input_tensor, output_tensor)
239
+
240
+
241
+ model = mirnet_model(num_rrg=3, num_mrb=2, channels=64)
242
+
243
+ def charbonnier_loss(y_true, y_pred):
244
+ return tf.reduce_mean(tf.sqrt(tf.square(y_true - y_pred) + tf.square(1e-3)))
245
+
246
+
247
+ def peak_signal_noise_ratio(y_true, y_pred):
248
+ return tf.image.psnr(y_pred, y_true, max_val=255.0)
249
+
250
+
251
+ optimizer = keras.optimizers.Adam(learning_rate=1e-4)
252
+ model.compile(
253
+ optimizer=optimizer, loss=charbonnier_loss, metrics=[peak_signal_noise_ratio]
254
+ )
255
+
256
+ history = model.fit(
257
+ train_dataset,
258
+ validation_data=val_dataset,
259
+ epochs=50,
260
+ callbacks=[
261
+ keras.callbacks.ReduceLROnPlateau(
262
+ monitor="val_peak_signal_noise_ratio",
263
+ factor=0.5,
264
+ patience=5,
265
+ verbose=1,
266
+ min_delta=1e-7,
267
+ mode="max",
268
+ )
269
+ ],
270
+ )
271
+
272
+ plt.plot(history.history["loss"], label="train_loss")
273
+ plt.plot(history.history["val_loss"], label="val_loss")
274
+ plt.xlabel("Epochs")
275
+ plt.ylabel("Loss")
276
+ plt.title("Train and Validation Losses Over Epochs", fontsize=14)
277
+ plt.legend()
278
+ plt.grid()
279
+ plt.show()
280
+
281
+
282
+ plt.plot(history.history["peak_signal_noise_ratio"], label="train_psnr")
283
+ plt.plot(history.history["val_peak_signal_noise_ratio"], label="val_psnr")
284
+ plt.xlabel("Epochs")
285
+ plt.ylabel("PSNR")
286
+ plt.title("Train and Validation PSNR Over Epochs", fontsize=14)
287
+ plt.legend()
288
+ plt.grid()
289
+ plt.show()
290
+
291
+ # equivalent to: model.save("model.h5")
292
+ from tensorflow.keras.models import save_model
293
+ save_model(model, "model.h5")
294
+
295
+ from keras.models import model_from_json
296
+
297
+ save_model_json = model.to_json()
298
+ with open("myModel.json", "w") as json_file:
299
+ json_file.write(save_model_json )
300
+
301
+ model.save_weights("myModelw.h5")
302
+
303
+ from google.colab import drive
304
+ drive.mount('/content/drive')
305
+
306
+ !ls /content/drive/MyDrive/
307
+
308
+ !zip -r model.zip model.pkl
309
+
310
+ def plot_results(images, titles, figure_size=(12, 12)):
311
+ fig = plt.figure(figsize=figure_size)
312
+ for i in range(len(images)):
313
+ fig.add_subplot(1, len(images), i + 1).set_title(titles[i])
314
+ _ = plt.imshow(images[i])
315
+ plt.axis("off")
316
+ plt.show()
317
+
318
+
319
+ def infer(original_image):
320
+ image = keras.utils.img_to_array(original_image)
321
+ image = image.astype("float32") / 255.0
322
+ image = np.expand_dims(image, axis=0)
323
+ output = model.predict(image)
324
+ output_image = output[0] * 255.0
325
+ output_image = output_image.clip(0, 255)
326
+ output_image = output_image.reshape(
327
+ (np.shape(output_image)[0], np.shape(output_image)[1], 3)
328
+ )
329
+ output_image = Image.fromarray(np.uint8(output_image))
330
+ original_image = Image.fromarray(np.uint8(original_image))
331
+ return output_image
332
+
333
+ print(low_light_image)
334
+
335
+ for low_light_image in random.sample(test_low_light_images, 6):
336
+ original_image = Image.open(low_light_image)
337
+ enhanced_image = infer(original_image)
338
+ plot_results(
339
+ [original_image, ImageOps.autocontrast(original_image), enhanced_image],
340
+ ["Original", "PIL Autocontrast", "MIRNet Enhanced"],
341
+ (20, 12),
342
+ )
343
+
344
+ original_image = Image.open('/content/wallpaperflare.com_wallpaper.jpg')
345
+ enhanced_image = infer(original_image)
346
+ plot_results(
347
+ [original_image, ImageOps.autocontrast(original_image), enhanced_image],
348
+ ["Original", "PIL Autocontrast", "MIRNet Enhanced"],
349
+ (20, 12),
350
+ )
model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24bd00b4e32959cafe4aa4de2c6fc61538bfe8799d9b1015454c4438322f10f6
3
+ size 535443440
myModel.json ADDED
The diff for this file is too large to render. See raw diff
 
myModelw.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7329a0c663261d7b0c519e763d4ed9192359898603b696bfc13dd07639a06e1f
3
+ size 179371144
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ opencv-python
2
+ numpy
3
+ Pillow
4
+ matplotlib
5
+ tensorflow
6
+ gdown