sanzgiri commited on
Commit
da326a1
1 Parent(s): 735b805

adding files

Browse files
.gitattributes CHANGED
@@ -25,3 +25,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ saved_model/checkpoint filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/model-33999.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
30
+ saved_model/model-33999.index filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import numpy as np
4
+ import tensorflow as tf
5
+ import tensorflow.contrib.slim as slim
6
+ import streamlit as st
7
+ from PIL import Image
8
+
9
+
10
+ def tf_box_filter(x, r):
11
+ k_size = int(2 * r + 1)
12
+ ch = x.get_shape().as_list()[-1]
13
+ weight = 1 / (k_size ** 2)
14
+ box_kernel = weight * np.ones((k_size, k_size, ch, 1))
15
+ box_kernel = np.array(box_kernel).astype(np.float32)
16
+ output = tf.nn.depthwise_conv2d(x, box_kernel, [1, 1, 1, 1], 'SAME')
17
+ return output
18
+
19
+
20
+ def guided_filter(x, y, r, eps=1e-2):
21
+ x_shape = tf.shape(x)
22
+ # y_shape = tf.shape(y)
23
+
24
+ N = tf_box_filter(tf.ones((1, x_shape[1], x_shape[2], 1), dtype=x.dtype), r)
25
+
26
+ mean_x = tf_box_filter(x, r) / N
27
+ mean_y = tf_box_filter(y, r) / N
28
+ cov_xy = tf_box_filter(x * y, r) / N - mean_x * mean_y
29
+ var_x = tf_box_filter(x * x, r) / N - mean_x * mean_x
30
+
31
+ A = cov_xy / (var_x + eps)
32
+ b = mean_y - A * mean_x
33
+
34
+ mean_A = tf_box_filter(A, r) / N
35
+ mean_b = tf_box_filter(b, r) / N
36
+
37
+ output = mean_A * x + mean_b
38
+
39
+ return output
40
+
41
+
42
+ def fast_guided_filter(lr_x, lr_y, hr_x, r=1, eps=1e-8):
43
+ # assert lr_x.shape.ndims == 4 and lr_y.shape.ndims == 4 and hr_x.shape.ndims == 4
44
+
45
+ lr_x_shape = tf.shape(lr_x)
46
+ # lr_y_shape = tf.shape(lr_y)
47
+ hr_x_shape = tf.shape(hr_x)
48
+
49
+ N = tf_box_filter(tf.ones((1, lr_x_shape[1], lr_x_shape[2], 1), dtype=lr_x.dtype), r)
50
+
51
+ mean_x = tf_box_filter(lr_x, r) / N
52
+ mean_y = tf_box_filter(lr_y, r) / N
53
+ cov_xy = tf_box_filter(lr_x * lr_y, r) / N - mean_x * mean_y
54
+ var_x = tf_box_filter(lr_x * lr_x, r) / N - mean_x * mean_x
55
+
56
+ A = cov_xy / (var_x + eps)
57
+ b = mean_y - A * mean_x
58
+
59
+ mean_A = tf.image.resize_images(A, hr_x_shape[1: 3])
60
+ mean_b = tf.image.resize_images(b, hr_x_shape[1: 3])
61
+
62
+ output = mean_A * hr_x + mean_b
63
+
64
+ return output
65
+
66
+
67
+ def resblock(inputs, out_channel=32, name='resblock'):
68
+ with tf.variable_scope(name):
69
+ x = slim.convolution2d(inputs, out_channel, [3, 3],
70
+ activation_fn=None, scope='conv1')
71
+ x = tf.nn.leaky_relu(x)
72
+ x = slim.convolution2d(x, out_channel, [3, 3],
73
+ activation_fn=None, scope='conv2')
74
+
75
+ return x + inputs
76
+
77
+
78
+ def unet_generator(inputs, channel=32, num_blocks=4, name='generator', reuse=False):
79
+ with tf.variable_scope(name, reuse=reuse):
80
+ x0 = slim.convolution2d(inputs, channel, [7, 7], activation_fn=None)
81
+ x0 = tf.nn.leaky_relu(x0)
82
+
83
+ x1 = slim.convolution2d(x0, channel, [3, 3], stride=2, activation_fn=None)
84
+ x1 = tf.nn.leaky_relu(x1)
85
+ x1 = slim.convolution2d(x1, channel * 2, [3, 3], activation_fn=None)
86
+ x1 = tf.nn.leaky_relu(x1)
87
+
88
+ x2 = slim.convolution2d(x1, channel * 2, [3, 3], stride=2, activation_fn=None)
89
+ x2 = tf.nn.leaky_relu(x2)
90
+ x2 = slim.convolution2d(x2, channel * 4, [3, 3], activation_fn=None)
91
+ x2 = tf.nn.leaky_relu(x2)
92
+
93
+ for idx in range(num_blocks):
94
+ x2 = resblock(x2, out_channel=channel * 4, name='block_{}'.format(idx))
95
+
96
+ x2 = slim.convolution2d(x2, channel * 2, [3, 3], activation_fn=None)
97
+ x2 = tf.nn.leaky_relu(x2)
98
+
99
+ h1, w1 = tf.shape(x2)[1], tf.shape(x2)[2]
100
+ x3 = tf.image.resize_bilinear(x2, (h1 * 2, w1 * 2))
101
+ x3 = slim.convolution2d(x3 + x1, channel * 2, [3, 3], activation_fn=None)
102
+ x3 = tf.nn.leaky_relu(x3)
103
+ x3 = slim.convolution2d(x3, channel, [3, 3], activation_fn=None)
104
+ x3 = tf.nn.leaky_relu(x3)
105
+
106
+ h2, w2 = tf.shape(x3)[1], tf.shape(x3)[2]
107
+ x4 = tf.image.resize_bilinear(x3, (h2 * 2, w2 * 2))
108
+ x4 = slim.convolution2d(x4 + x0, channel, [3, 3], activation_fn=None)
109
+ x4 = tf.nn.leaky_relu(x4)
110
+ x4 = slim.convolution2d(x4, 3, [7, 7], activation_fn=None)
111
+
112
+ return x4
113
+
114
+
115
+ def resize_crop(image):
116
+ h, w, c = np.shape(image)
117
+ #st.write(h, w, c)
118
+ if min(h, w) > 720:
119
+ if h > w:
120
+ h, w = int(720 * h / w), 720
121
+ else:
122
+ h, w = 720, int(720 * w / h)
123
+ w = int(w / 2)
124
+ h = int(h / 2)
125
+ st.image(image, caption=f'Your image', width=w)
126
+ image = cv2.resize(np.float32(image), (w, h),
127
+ interpolation=cv2.INTER_AREA)
128
+ h, w = (h // 8) * 8, (w // 8) * 8
129
+ #st.write(h,w)
130
+ image = image[:h, :w, :]
131
+ return image
132
+
133
+
134
+ def cartoonize(infile, outfile, model_path):
135
+
136
+ input_photo = tf.placeholder(tf.float32, [1, None, None, 3])
137
+ network_out = unet_generator(input_photo)
138
+ final_out = guided_filter(input_photo, network_out, r=1, eps=5e-3)
139
+
140
+ all_vars = tf.trainable_variables()
141
+ gene_vars = [var for var in all_vars if 'generator' in var.name]
142
+ saver = tf.train.Saver(var_list=gene_vars)
143
+
144
+ config = tf.ConfigProto()
145
+ #config.gpu_options.allow_growth = True
146
+ sess = tf.Session(config=config)
147
+
148
+ sess.run(tf.global_variables_initializer())
149
+ saver.restore(sess, tf.train.latest_checkpoint(model_path))
150
+
151
+ #image = cv2.imread(infile)
152
+ image = infile
153
+ image = resize_crop(image)
154
+ batch_image = image.astype(np.float32) / 127.5 - 1
155
+ batch_image = np.expand_dims(batch_image, axis=0)
156
+ output = sess.run(final_out, feed_dict={input_photo: batch_image})
157
+ output = (np.squeeze(output) + 1) * 127.5
158
+ output = np.clip(output, 0, 255).astype(np.uint8)
159
+ cv2.imwrite(outfile, output)
160
+
161
+
162
+ def main():
163
+
164
+ model_path = 'saved_model'
165
+ outfile = "result.jpg"
166
+ if os.path.exists(outfile):
167
+ os.system(f"rm -f {outfile}")
168
+
169
+ st.title('Cartoonify!')
170
+ infile = st.file_uploader("Choose an image file to cartoonify", type=["jpg", "jpeg"])
171
+
172
+ if infile is not None:
173
+ image = Image.open(infile)
174
+ #st.image(image, caption=f'Your image', use_column_width=True)
175
+ cartoonize(image, outfile, model_path)
176
+
177
+ omage = Image.open(outfile)
178
+ st.image(omage, caption=f'Cartoonized version: {outfile}')
179
+
180
+
181
+
182
+ if __name__ == "__main__":
183
+ main()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ tensorflow==1.15.4
2
+ scikit-image==0.14.5
3
+ opencv-python-headless
4
+ streamlit
saved_model/checkpoint ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f13dda4dbb22ce6dd5803d5a00a1ff4c00c89ca658fb54af531be313acfbfef
3
+ size 121
saved_model/model-33999.data-00000-of-00001 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e2df1a5aa86faa4f979720bfc2436f79333a480876f8d6790b7671cf50fe75b
3
+ size 5868300
saved_model/model-33999.index ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6df57ba6ac8ca4e45d04ab7f5948c8c5de180173a6098ac7cdcb85565ed075d
3
+ size 1561