Spaces:
Sleeping
Sleeping
init space
Browse files- .gitattributes +1 -0
- .gitignore +3 -0
- app.py +57 -0
- examples/contents/01.jpg +3 -0
- examples/contents/02.jpg +3 -0
- examples/contents/03.jpg +3 -0
- examples/styles/01.jpg +3 -0
- examples/styles/02.jpg +3 -0
- examples/styles/03.jpg +3 -0
- examples/styles/04.jpg +3 -0
- examples/styles/05.jpg +3 -0
- examples/styles/06.jpg +3 -0
- examples/styles/07.jpg +3 -0
- examples/styles/08.jpg +3 -0
- examples/styles/09.jpg +3 -0
- examples/styles/10.jpg +3 -0
- examples/styles/11.jpg +3 -0
- examples/styles/12.jpg +3 -0
- style_transfer.py +63 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.idea/
|
2 |
+
__pycache__/
|
3 |
+
get.py
|
app.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from style_transfer import StyleTransfer
|
3 |
+
|
4 |
+
style = StyleTransfer()
|
5 |
+
|
6 |
+
|
7 |
+
def predict(content_image, style_image):
|
8 |
+
return style.transfer(content_image, style_image)
|
9 |
+
|
10 |
+
|
11 |
+
footer = r"""
|
12 |
+
<center>
|
13 |
+
<b>
|
14 |
+
Demo for <a href='https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization'>Style Transfer</a>
|
15 |
+
</b>
|
16 |
+
</center>
|
17 |
+
"""
|
18 |
+
|
19 |
+
coffe = r"""
|
20 |
+
<center>
|
21 |
+
<a href="https://www.buymeacoffee.com/leonelhs"> <img
|
22 |
+
src="https://img.buymeacoffee.com/button-api/?text=Buy me a
|
23 |
+
coffee&emoji=&slug=leonelhs&button_colour=FFDD00&font_colour=000000&font_family=Cookie&outline_colour=000000
|
24 |
+
&coffee_colour=ffffff" /></a>
|
25 |
+
</center>
|
26 |
+
"""
|
27 |
+
|
28 |
+
with gr.Blocks(title="Style Transfer") as app:
|
29 |
+
gr.HTML("<center><h1>Style Transfer</h1></center>")
|
30 |
+
gr.HTML("<center><h3>Fast Style Transfer for Arbitrary Styles</h3></center>")
|
31 |
+
with gr.Row(equal_height=False):
|
32 |
+
with gr.Column():
|
33 |
+
content_img = gr.Image(type="filepath", label="Content image")
|
34 |
+
style_img = gr.Image(type="filepath", label="Style image")
|
35 |
+
run_btn = gr.Button(variant="primary")
|
36 |
+
with gr.Column():
|
37 |
+
output_img = gr.Image(type="pil", label="Output image")
|
38 |
+
gr.ClearButton(components=[content_img, style_img, output_img], variant="stop")
|
39 |
+
|
40 |
+
run_btn.click(predict, [content_img, style_img], [output_img])
|
41 |
+
|
42 |
+
with gr.Row():
|
43 |
+
blobs_c = [[f"examples/contents/{x:02d}.jpg"] for x in range(1, 4)]
|
44 |
+
examples_c = gr.Dataset(components=[content_img], samples=blobs_c)
|
45 |
+
examples_c.click(lambda x: x[0], [examples_c], [content_img])
|
46 |
+
with gr.Row():
|
47 |
+
blobs_s = [[f"examples/styles/{x:02d}.jpg"] for x in range(1, 12)]
|
48 |
+
examples_s = gr.Dataset(components=[style_img], samples=blobs_s)
|
49 |
+
examples_s.click(lambda x: x[0], [examples_s], [style_img])
|
50 |
+
|
51 |
+
with gr.Row():
|
52 |
+
gr.HTML(footer)
|
53 |
+
with gr.Row():
|
54 |
+
gr.HTML(coffe)
|
55 |
+
|
56 |
+
app.launch(share=False, debug=True, show_error=True)
|
57 |
+
app.queue()
|
examples/contents/01.jpg
ADDED
Git LFS Details
|
examples/contents/02.jpg
ADDED
Git LFS Details
|
examples/contents/03.jpg
ADDED
Git LFS Details
|
examples/styles/01.jpg
ADDED
Git LFS Details
|
examples/styles/02.jpg
ADDED
Git LFS Details
|
examples/styles/03.jpg
ADDED
Git LFS Details
|
examples/styles/04.jpg
ADDED
Git LFS Details
|
examples/styles/05.jpg
ADDED
Git LFS Details
|
examples/styles/06.jpg
ADDED
Git LFS Details
|
examples/styles/07.jpg
ADDED
Git LFS Details
|
examples/styles/08.jpg
ADDED
Git LFS Details
|
examples/styles/09.jpg
ADDED
Git LFS Details
|
examples/styles/10.jpg
ADDED
Git LFS Details
|
examples/styles/11.jpg
ADDED
Git LFS Details
|
examples/styles/12.jpg
ADDED
Git LFS Details
|
style_transfer.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#############################################################################
|
2 |
+
#
|
3 |
+
# Source from:
|
4 |
+
# https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization
|
5 |
+
# Forked from:
|
6 |
+
# Reimplemented by: Leonel Hernández
|
7 |
+
#
|
8 |
+
##############################################################################
|
9 |
+
|
10 |
+
import PIL.Image
|
11 |
+
import numpy as np
|
12 |
+
import tensorflow as tf
|
13 |
+
from huggingface_hub import snapshot_download
|
14 |
+
|
15 |
+
print("TF Version: ", tf.__version__)
|
16 |
+
print("Eager mode enabled: ", tf.executing_eagerly())
|
17 |
+
print("GPU available: ", tf.config.list_physical_devices('GPU'))
|
18 |
+
|
19 |
+
STYLE_REPO_ID = "leonelhs/arbitrary-image-stylization-v1"
|
20 |
+
|
21 |
+
|
22 |
+
def crop_center(image):
|
23 |
+
"""Returns a cropped square image."""
|
24 |
+
shape = image.shape
|
25 |
+
new_shape = min(shape[1], shape[2])
|
26 |
+
offset_y = max(shape[1] - shape[2], 0) // 2
|
27 |
+
offset_x = max(shape[2] - shape[1], 0) // 2
|
28 |
+
image = tf.image.crop_to_bounding_box(
|
29 |
+
image, offset_y, offset_x, new_shape, new_shape)
|
30 |
+
return image
|
31 |
+
|
32 |
+
|
33 |
+
def load_image(image_path, image_size=(256, 256)):
|
34 |
+
"""Loads and preprocesses images."""
|
35 |
+
img = tf.io.decode_image(
|
36 |
+
tf.io.read_file(image_path),
|
37 |
+
channels=3, dtype=tf.float32)[tf.newaxis, ...]
|
38 |
+
img = crop_center(img)
|
39 |
+
img = tf.image.resize(img, image_size, preserve_aspect_ratio=True)
|
40 |
+
return img
|
41 |
+
|
42 |
+
|
43 |
+
def tensor_to_image(tensor):
|
44 |
+
tensor = tensor * 255
|
45 |
+
tensor = np.array(tensor, dtype=np.uint8)
|
46 |
+
if np.ndim(tensor) > 3:
|
47 |
+
assert tensor.shape[0] == 1
|
48 |
+
tensor = tensor[0]
|
49 |
+
return PIL.Image.fromarray(tensor)
|
50 |
+
|
51 |
+
|
52 |
+
class StyleTransfer:
|
53 |
+
|
54 |
+
def __init__(self):
|
55 |
+
model_path = snapshot_download(STYLE_REPO_ID)
|
56 |
+
self.model = tf.saved_model.load(model_path)
|
57 |
+
|
58 |
+
def transfer(self, content_image, style_image):
|
59 |
+
content_image = load_image(content_image, (384, 384))
|
60 |
+
style_image = load_image(style_image, (256, 256))
|
61 |
+
style_image = tf.nn.avg_pool(style_image, ksize=[3, 3], strides=[1, 1], padding='SAME')
|
62 |
+
stylized_image = self.model(tf.constant(content_image), tf.constant(style_image))
|
63 |
+
return tensor_to_image(stylized_image[0])
|