Spaces:
Build error
Build error
from random import randint | |
from typing import Tuple | |
import gradio as gr | |
import numpy as np | |
from PIL import Image | |
from Segmentation.segmentation import get_mask, replace_sofa | |
from StyleTransfer.styleTransfer import create_styledSofa | |
def resize_sofa(img: Image.Image) -> Tuple[Image.Image, tuple]: | |
""" | |
This function adds padding to make the original image square | |
and 640by640. It also returns the original ratio of the image, | |
such that it can be reverted later. | |
Parameters: | |
img = original image | |
Return: | |
im1 = squared image | |
box = parameters to later crop the image to it original ratio | |
""" | |
width, height = img.size | |
idx = np.argmin([width, height]) | |
newsize = (640, 640) # parameters from test script | |
if idx == 0: | |
img1 = Image.new(img.mode, (height, height), (255, 255, 255)) | |
img1.paste(img, ((height - width) // 2, 0)) | |
box = ( | |
newsize[0] * (1 - width / height) // 2, | |
0, | |
newsize[0] - newsize[0] * (1 - width / height) // 2, | |
newsize[1], | |
) | |
else: | |
img1 = Image.new(img.mode, (width, width), (255, 255, 255)) | |
img1.paste(img, (0, (width - height) // 2)) | |
box = ( | |
0, | |
newsize[1] * (1 - height / width) // 2, | |
newsize[0], | |
newsize[1] - newsize[1] * (1 - height / width) // 2, | |
) | |
im1 = img1.resize(newsize) | |
return im1, box | |
def resize_style(img: Image.Image) -> Image.Image: | |
""" | |
This function generates a zoomed out version of the style | |
image and resizes it to a 640by640 square. | |
Parameters: | |
img = image containing the style/pattern | |
Return: | |
dst = a zoomed-out and resized version of the pattern | |
""" | |
width, height = img.size | |
idx = np.argmin([width, height]) | |
# Makes the image square by cropping | |
if idx == 0: | |
top = (height - width) // 2 | |
bottom = height - (height - width) // 2 | |
left = 0 | |
right = width | |
else: | |
left = (width - height) // 2 | |
right = width - (width - height) // 2 | |
top = 0 | |
bottom = height | |
newsize = (640, 640) # parameters from test script | |
im1 = img.crop((left, top, right, bottom)) | |
# Constructs a zoomed-out version | |
copies = 8 | |
resize = (newsize[0] // copies, newsize[1] // copies) | |
dst = Image.new("RGB", (resize[0] * copies, resize[1] * copies)) | |
im2 = im1.resize((resize)) | |
for row in range(copies): | |
im2 = im2.transpose(Image.FLIP_LEFT_RIGHT) | |
for column in range(copies): | |
im2 = im2.transpose(Image.FLIP_TOP_BOTTOM) | |
dst.paste(im2, (resize[0] * row, resize[1] * column)) | |
dst = dst.resize((newsize)) | |
return dst | |
def style_sofa( | |
Input_image: np.ndarray, Style_image: np.ndarray, Choice_of_algorithm: str | |
) -> Image.Image: | |
""" | |
Styles (all) the sofas in the image to the given style. | |
This function uses a transformer to combine the image with | |
the desired style according to a generated mask of the sofas | |
in the image. | |
Input: | |
input_img = image containing a sofa | |
style_img = image containing a style | |
choice = Style transfer algorithm to use | |
Return: | |
new_sofa = image containing the styled sofa | |
""" | |
id = randint(0, 10) | |
print("Starting job ", id) | |
# preprocess input images to fit requirements of the segmentation model | |
input_img, style_img = Image.fromarray(Input_image), Image.fromarray(Style_image) | |
resized_img, box = resize_sofa(input_img) | |
resized_style = resize_style(style_img) | |
# resized_style.save('resized_style.jpg') | |
# generate mask for image | |
print("generating mask...") | |
mask = get_mask(resized_img) | |
# mask.save('mask.jpg') | |
# Created a styled sofa | |
print("Styling sofa...") | |
styled_sofa = create_styledSofa(resized_img, resized_style, Choice_of_algorithm) | |
# styled_sofa.save('styled_sofa.jpg') | |
# postprocess the final image | |
print("Replacing sofa...") | |
new_sofa = replace_sofa(resized_img, mask, styled_sofa) | |
new_sofa = new_sofa.crop(box) | |
print("Finishing job", id) | |
return new_sofa | |
demo = gr.Interface( | |
style_sofa, | |
inputs=[ | |
gr.inputs.Image(), | |
gr.inputs.Image(), | |
gr.inputs.Radio( | |
["Style Transformer", "Style FAST", "Style Projection"], | |
default="Style FAST", | |
), | |
], | |
outputs="image", | |
examples=[ | |
[ | |
"figures/sofa_example1.jpg", | |
"figures/style_example1.jpg", | |
"Style Transformer", | |
], | |
[ | |
"figures/sofa_example3.jpg", | |
"figures/style_example10.jpg", | |
"Style FAST", | |
], | |
[ | |
"figures/sofa_example2.jpg", | |
"figures/style_example6.jpg", | |
"Style Projection", | |
], | |
], | |
title="π Style your sofa π ", | |
description="Customize your sofa to your wildest dreams π!\ | |
\nProvide a picture of your sofa, a desired pattern\ | |
and (optionally) choose one of the algorithms.\ | |
\nOr just pick one of the examples below. β¬", | |
theme="huggingface", | |
#enable_queue=True, | |
article="**References**\n\n" | |
"<a href='https://tianchi.aliyun.com/specials/promotion/alibaba-3d-future' \ | |
target='_blank'>\ | |
1. The data that was used to train the segmentation model. \ | |
</a> \n" | |
"<a href='https://github.com/qubvel/segmentation_models' \ | |
target='_blank'> \ | |
2. Github repository used to train a segmentation model with transfer. \ | |
learning.\ | |
</a> \n" | |
"<a href='https://github.com/diyiiyiii/StyTR-2' \ | |
target='_blank'> \ | |
3. The github repository that is used for the style transformer. \ | |
</a> \n" | |
"<a href='https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2' \ | |
target='_blank'> \ | |
4. A tensorflow model for fast arbitrary image style transfer. \ | |
</a> \n" | |
"<a href='https://github.com/PaddlePaddle/PaddleHub/tree/release/v2.2/modules/image/Image_gan/style_transfer/stylepro_artistic' \ | |
target='_blank'> \ | |
5. A paddleHub model for parameter free style transfer. \ | |
</a> \n", | |
) | |
if __name__ == "__main__": | |
demo.launch(cache_examples=True) | |