Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -37,6 +37,97 @@ style_layer_names = [
|
|
37 |
# The layer to use for the content loss.
|
38 |
content_layer_name = "block5_conv2"
|
39 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
@tf.function
|
41 |
def compute_loss_and_grads(combination_image, base_image, style_reference_image):
|
42 |
with tf.GradientTape() as tape:
|
@@ -68,7 +159,6 @@ def get_imgs(base_image_path, style_reference_image_path):
|
|
68 |
|
69 |
title = "Neural style transfer"
|
70 |
description = "Gradio Demo for Neural style transfer. To use it, simply upload a base image and a style image"
|
71 |
-
|
72 |
content = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False)
|
73 |
style = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False)
|
74 |
gr.Interface(get_imgs, inputs=[content, style], outputs=["image"],
|
|
|
37 |
# The layer to use for the content loss.
|
38 |
content_layer_name = "block5_conv2"
|
39 |
|
40 |
+
|
41 |
+
def preprocess_image(image_path):
|
42 |
+
# Util function to open, resize and format pictures into appropriate tensors
|
43 |
+
img = keras.preprocessing.image.load_img(
|
44 |
+
image_path, target_size=(img_nrows, img_ncols)
|
45 |
+
)
|
46 |
+
img = keras.preprocessing.image.img_to_array(img)
|
47 |
+
img = np.expand_dims(img, axis=0)
|
48 |
+
img = vgg19.preprocess_input(img)
|
49 |
+
return tf.convert_to_tensor(img)
|
50 |
+
|
51 |
+
def deprocess_image(x):
|
52 |
+
# Util function to convert a tensor into a valid image
|
53 |
+
x = x.reshape((img_nrows, img_ncols, 3))
|
54 |
+
# Remove zero-center by mean pixel
|
55 |
+
x[:, :, 0] += 103.939
|
56 |
+
x[:, :, 1] += 116.779
|
57 |
+
x[:, :, 2] += 123.68
|
58 |
+
# 'BGR'->'RGB'
|
59 |
+
x = x[:, :, ::-1]
|
60 |
+
x = np.clip(x, 0, 255).astype("uint8")
|
61 |
+
return x
|
62 |
+
|
63 |
+
# The gram matrix of an image tensor (feature-wise outer product)
|
64 |
+
|
65 |
+
def gram_matrix(x):
|
66 |
+
x = tf.transpose(x, (2, 0, 1))
|
67 |
+
features = tf.reshape(x, (tf.shape(x)[0], -1))
|
68 |
+
gram = tf.matmul(features, tf.transpose(features))
|
69 |
+
return gram
|
70 |
+
|
71 |
+
# The "style loss" is designed to maintain
|
72 |
+
# the style of the reference image in the generated image.
|
73 |
+
# It is based on the gram matrices (which capture style) of
|
74 |
+
# feature maps from the style reference image
|
75 |
+
# and from the generated image
|
76 |
+
|
77 |
+
def style_loss(style, combination):
|
78 |
+
S = gram_matrix(style)
|
79 |
+
C = gram_matrix(combination)
|
80 |
+
channels = 3
|
81 |
+
size = img_nrows * img_ncols
|
82 |
+
return tf.reduce_sum(tf.square(S - C)) / (4.0 * (channels ** 2) * (size ** 2))
|
83 |
+
|
84 |
+
# An auxiliary loss function
|
85 |
+
# designed to maintain the "content" of the
|
86 |
+
# base image in the generated image
|
87 |
+
|
88 |
+
def content_loss(base, combination):
|
89 |
+
return tf.reduce_sum(tf.square(combination - base))
|
90 |
+
|
91 |
+
# The 3rd loss function, total variation loss,
|
92 |
+
# designed to keep the generated image locally coherent
|
93 |
+
|
94 |
+
def total_variation_loss(x):
|
95 |
+
a = tf.square(
|
96 |
+
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, 1:, : img_ncols - 1, :]
|
97 |
+
)
|
98 |
+
b = tf.square(
|
99 |
+
x[:, : img_nrows - 1, : img_ncols - 1, :] - x[:, : img_nrows - 1, 1:, :]
|
100 |
+
)
|
101 |
+
return tf.reduce_sum(tf.pow(a + b, 1.25))
|
102 |
+
|
103 |
+
def compute_loss(combination_image, base_image, style_reference_image):
|
104 |
+
input_tensor = tf.concat(
|
105 |
+
[base_image, style_reference_image, combination_image], axis=0
|
106 |
+
)
|
107 |
+
features = feature_extractor(input_tensor)
|
108 |
+
|
109 |
+
# Initialize the loss
|
110 |
+
loss = tf.zeros(shape=())
|
111 |
+
|
112 |
+
# Add content loss
|
113 |
+
layer_features = features[content_layer_name]
|
114 |
+
base_image_features = layer_features[0, :, :, :]
|
115 |
+
combination_features = layer_features[2, :, :, :]
|
116 |
+
loss = loss + content_weight * content_loss(
|
117 |
+
base_image_features, combination_features
|
118 |
+
)
|
119 |
+
# Add style loss
|
120 |
+
for layer_name in style_layer_names:
|
121 |
+
layer_features = features[layer_name]
|
122 |
+
style_reference_features = layer_features[1, :, :, :]
|
123 |
+
combination_features = layer_features[2, :, :, :]
|
124 |
+
sl = style_loss(style_reference_features, combination_features)
|
125 |
+
loss += (style_weight / len(style_layer_names)) * sl
|
126 |
+
|
127 |
+
# Add total variation loss
|
128 |
+
loss += total_variation_weight * total_variation_loss(combination_image)
|
129 |
+
return loss
|
130 |
+
|
131 |
@tf.function
|
132 |
def compute_loss_and_grads(combination_image, base_image, style_reference_image):
|
133 |
with tf.GradientTape() as tape:
|
|
|
159 |
|
160 |
title = "Neural style transfer"
|
161 |
description = "Gradio Demo for Neural style transfer. To use it, simply upload a base image and a style image"
|
|
|
162 |
content = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False)
|
163 |
style = gr.inputs.Image(shape=None, image_mode="RGB", invert_colors=False, source="upload", tool="editor", type="filepath", label=None, optional=False)
|
164 |
gr.Interface(get_imgs, inputs=[content, style], outputs=["image"],
|