Spaces:
Build error
Build error
Sophie98
commited on
Commit
β’
5e8f5b8
1
Parent(s):
e4e8de7
fix error still...
Browse files- app.py +13 -9
- requirements.txt +2 -1
- styleTransfer.py +17 -4
app.py
CHANGED
@@ -102,21 +102,25 @@ def style_sofa(input_img: np.ndarray, style_img: np.ndarray):
|
|
102 |
image = gr.inputs.Image()
|
103 |
style = gr.inputs.Image()
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
demo = gr.Interface(
|
106 |
style_sofa,
|
107 |
-
[image,style],
|
108 |
-
'image',
|
109 |
-
examples=[
|
110 |
-
['sofa_example1.jpg','style_example1.jpg'],
|
111 |
-
['sofa_example1.jpg','style_example2.jpg'],
|
112 |
-
['sofa_example1.jpg','style_example3.jpg'],
|
113 |
-
['sofa_example1.jpg','style_example4.jpg'],
|
114 |
-
['sofa_example1.jpg','style_example5.jpg'],
|
115 |
-
],
|
116 |
title="π Style your sofa π ",
|
117 |
description="Customize your sofa to your wildest dreams!\
|
118 |
\nProvide a picture of your sofa and a desired pattern\
|
119 |
or choose one of the examples below",
|
|
|
|
|
|
|
120 |
)
|
121 |
|
122 |
if __name__ == "__main__":
|
102 |
image = gr.inputs.Image()
|
103 |
style = gr.inputs.Image()
|
104 |
|
105 |
+
# Examples
|
106 |
+
example1 = ['sofa_example1.jpg','style_example1.jpg'],
|
107 |
+
example2 = ['sofa_example1.jpg','style_example2.jpg'],
|
108 |
+
example3 = ['sofa_example1.jpg','style_example3.jpg'],
|
109 |
+
example4 = ['sofa_example1.jpg','style_example4.jpg'],
|
110 |
+
example5 = ['sofa_example1.jpg','style_example5.jpg'],
|
111 |
+
|
112 |
demo = gr.Interface(
|
113 |
style_sofa,
|
114 |
+
inputs = [image,style],
|
115 |
+
outputs = 'image',
|
116 |
+
examples=[example1,example2,example3,example4,example5],
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
title="π Style your sofa π ",
|
118 |
description="Customize your sofa to your wildest dreams!\
|
119 |
\nProvide a picture of your sofa and a desired pattern\
|
120 |
or choose one of the examples below",
|
121 |
+
# article="**References**\n\n"
|
122 |
+
# "<a href='https://www.tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization' target='_blank'>1. Tutorial to implement Fast Neural Style Transfer using the pretrained model from TensorFlow Hub</a> \n"
|
123 |
+
# "<a href='https://huggingface.co/spaces/luca-martial/neural-style-transfer' target='_blank'>2. The idea to build a neural style transfer application was inspired from this Hugging Face Space </a>"
|
124 |
)
|
125 |
|
126 |
if __name__ == "__main__":
|
requirements.txt
CHANGED
@@ -9,4 +9,5 @@ gradio
|
|
9 |
|
10 |
segmentation_models
|
11 |
opencv-python-headless
|
12 |
-
tensorflow-cpu
|
|
9 |
|
10 |
segmentation_models
|
11 |
opencv-python-headless
|
12 |
+
tensorflow-cpu
|
13 |
+
tensorflow_hub
|
styleTransfer.py
CHANGED
@@ -8,7 +8,13 @@ import transformer as transformer
|
|
8 |
import StyTR as StyTR
|
9 |
import numpy as np
|
10 |
from collections import OrderedDict
|
|
|
|
|
11 |
|
|
|
|
|
|
|
|
|
12 |
def test_transform(size, crop):
|
13 |
transform_list = []
|
14 |
|
@@ -84,11 +90,8 @@ def StyleTransformer(content_img: Image, style_img: Image,
|
|
84 |
network.eval()
|
85 |
network.to(device)
|
86 |
|
87 |
-
|
88 |
-
|
89 |
content_tf = test_transform(content_size, crop)
|
90 |
style_tf = test_transform(style_size, crop)
|
91 |
-
|
92 |
|
93 |
content_tf1 = content_transform()
|
94 |
content = content_tf(content_img.convert("RGB"))
|
@@ -104,9 +107,19 @@ def StyleTransformer(content_img: Image, style_img: Image,
|
|
104 |
with torch.no_grad():
|
105 |
output= network(content,style)
|
106 |
output = output[0].cpu()
|
107 |
-
|
|
|
108 |
return output
|
109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
def create_styledSofa(sofa:Image, style:Image):
|
111 |
styled_sofa = StyleTransformer(sofa,style)
|
112 |
return styled_sofa
|
8 |
import StyTR as StyTR
|
9 |
import numpy as np
|
10 |
from collections import OrderedDict
|
11 |
+
import tensorflow_hub as hub
|
12 |
+
import tensorflow as tf
|
13 |
|
14 |
+
style_transfer_model = hub.load("https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2")
|
15 |
+
|
16 |
+
|
17 |
+
############################################# TRANSFORMER ############################################
|
18 |
def test_transform(size, crop):
|
19 |
transform_list = []
|
20 |
|
90 |
network.eval()
|
91 |
network.to(device)
|
92 |
|
|
|
|
|
93 |
content_tf = test_transform(content_size, crop)
|
94 |
style_tf = test_transform(style_size, crop)
|
|
|
95 |
|
96 |
content_tf1 = content_transform()
|
97 |
content = content_tf(content_img.convert("RGB"))
|
107 |
with torch.no_grad():
|
108 |
output= network(content,style)
|
109 |
output = output[0].cpu()
|
110 |
+
torch2PIL = transforms.ToPILImage()
|
111 |
+
output = torch2PIL(output)
|
112 |
return output
|
113 |
|
114 |
+
############################################## STYLE-GAN #############################################
|
115 |
+
|
116 |
+
def perform_style_transfer(content_image, style_image):
|
117 |
+
content_image = tf.convert_to_tensor(content_image, np.float32)[tf.newaxis, ...] / 255.
|
118 |
+
style_image = tf.convert_to_tensor(style_image, np.float32)[tf.newaxis, ...] / 255.
|
119 |
+
output = style_transfer_model(content_image, style_image)
|
120 |
+
stylized_image = output[0]
|
121 |
+
return Image.fromarray(np.uint8(stylized_image[0] * 255))
|
122 |
+
|
123 |
def create_styledSofa(sofa:Image, style:Image):
|
124 |
styled_sofa = StyleTransformer(sofa,style)
|
125 |
return styled_sofa
|