Upload 13 files
Browse files- .gitattributes +3 -0
- 1.png +0 -0
- 1cleaned.png +0 -0
- 2.bmp +3 -0
- 2cleaned.bmp +3 -0
- 4014.png +0 -0
- 4014cleaned.png +0 -0
- 960.png +0 -0
- 960cleaned.png +0 -0
- binarize.bmp +3 -0
- blur.png +0 -0
- models.py +113 -0
- watermark.png +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
2.bmp filter=lfs diff=lfs merge=lfs -text
|
37 |
+
2cleaned.bmp filter=lfs diff=lfs merge=lfs -text
|
38 |
+
binarize.bmp filter=lfs diff=lfs merge=lfs -text
|
1.png
ADDED
1cleaned.png
ADDED
2.bmp
ADDED
Git LFS Details
|
2cleaned.bmp
ADDED
Git LFS Details
|
4014.png
ADDED
4014cleaned.png
ADDED
960.png
ADDED
960cleaned.png
ADDED
binarize.bmp
ADDED
Git LFS Details
|
blur.png
ADDED
models.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from tensorflow import keras
|
3 |
+
import numpy as np
|
4 |
+
import tensorflow as tf
|
5 |
+
import matplotlib.pyplot as plt
|
6 |
+
from tensorflow.keras import regularizers
|
7 |
+
from tensorflow.keras import metrics
|
8 |
+
import scipy.misc
|
9 |
+
import os
|
10 |
+
import numpy as np
|
11 |
+
from tensorflow.keras.models import *
|
12 |
+
from tensorflow.keras.layers import *
|
13 |
+
from tensorflow.keras.optimizers import *
|
14 |
+
|
15 |
+
|
16 |
+
def get_optimizer():
|
17 |
+
return Adam(lr=1e-4)
|
18 |
+
|
19 |
+
def generator_model(pretrained_weights = None,input_size = (256,256,1),biggest_layer = 512):
|
20 |
+
|
21 |
+
inputs = Input(input_size)
|
22 |
+
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(inputs)
|
23 |
+
conv1 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv1)
|
24 |
+
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
|
25 |
+
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool1)
|
26 |
+
conv2 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv2)
|
27 |
+
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
|
28 |
+
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool2)
|
29 |
+
conv3 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv3)
|
30 |
+
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
|
31 |
+
conv4 = Conv2D(biggest_layer//2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool3)
|
32 |
+
conv4 = Conv2D(biggest_layer//2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv4)
|
33 |
+
drop4 = Dropout(0.5)(conv4)
|
34 |
+
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
|
35 |
+
|
36 |
+
conv5 = Conv2D(biggest_layer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(pool4)
|
37 |
+
conv5 = Conv2D(biggest_layer, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv5)
|
38 |
+
drop5 = Dropout(0.5)(conv5)
|
39 |
+
|
40 |
+
up6 = Conv2D(512, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(drop5))
|
41 |
+
|
42 |
+
merge6 = concatenate ([drop4,up6])
|
43 |
+
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge6)
|
44 |
+
conv6 = Conv2D(512, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv6)
|
45 |
+
|
46 |
+
up7 = Conv2D(256, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv6))
|
47 |
+
merge7 = concatenate ([conv3,up7])
|
48 |
+
|
49 |
+
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge7)
|
50 |
+
conv7 = Conv2D(256, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv7)
|
51 |
+
|
52 |
+
up8 = Conv2D(128, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv7))
|
53 |
+
merge8 = concatenate ([conv2,up8])
|
54 |
+
|
55 |
+
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge8)
|
56 |
+
conv8 = Conv2D(128, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv8)
|
57 |
+
|
58 |
+
up9 = Conv2D(64, 2, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(UpSampling2D(size = (2,2))(conv8))
|
59 |
+
|
60 |
+
merge9 = concatenate ([conv1,up9])
|
61 |
+
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(merge9)
|
62 |
+
conv9 = Conv2D(64, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
|
63 |
+
conv9 = Conv2D(2, 3, activation = 'relu', padding = 'same', kernel_initializer = 'he_normal')(conv9)
|
64 |
+
conv10 = Conv2D(1, 1, activation = 'sigmoid')(conv9)
|
65 |
+
|
66 |
+
model = Model(inputs = inputs, outputs = conv10)
|
67 |
+
|
68 |
+
return model
|
69 |
+
|
70 |
+
|
71 |
+
|
72 |
+
def discriminator_model(input_size = (256,256,1)):
|
73 |
+
|
74 |
+
def d_layer(layer_input, filters, f_size=4, bn=True):
|
75 |
+
|
76 |
+
d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
|
77 |
+
d = LeakyReLU(alpha=0.2)(d)
|
78 |
+
if bn:
|
79 |
+
d = BatchNormalization(momentum=0.8)(d)
|
80 |
+
return d
|
81 |
+
|
82 |
+
img_A = Input(input_size)
|
83 |
+
img_B = Input(input_size)
|
84 |
+
|
85 |
+
df=64
|
86 |
+
|
87 |
+
combined_imgs = Concatenate(axis=-1)([img_A, img_B])
|
88 |
+
|
89 |
+
d1 = d_layer(combined_imgs, df, bn=False)
|
90 |
+
d2 = d_layer(d1, df*2)
|
91 |
+
d3 = d_layer(d2, df*4)
|
92 |
+
d4 = d_layer(d3, df*4)
|
93 |
+
|
94 |
+
validity = Conv2D(1, kernel_size=4, strides=1, padding='same', activation='sigmoid')(d4)
|
95 |
+
|
96 |
+
discriminator = Model([img_A, img_B], validity)
|
97 |
+
discriminator.compile(loss='mse', optimizer=get_optimizer(), metrics = ['accuracy'])
|
98 |
+
|
99 |
+
return discriminator
|
100 |
+
|
101 |
+
|
102 |
+
|
103 |
+
def get_gan_network(discriminator, generator, input_size = (256,256,1)):
|
104 |
+
discriminator.trainable = False
|
105 |
+
|
106 |
+
gan_input2 = Input(input_size)
|
107 |
+
|
108 |
+
x = generator(gan_input2)
|
109 |
+
valid = discriminator([x,gan_input2])
|
110 |
+
gan = Model(inputs=[gan_input2], outputs=[valid,x])
|
111 |
+
gan.compile(loss=['mse','binary_crossentropy'],loss_weights=[1, 100], optimizer=get_optimizer(),metrics = ['accuracy'])
|
112 |
+
return gan
|
113 |
+
|
watermark.png
ADDED