Spaces:
Build error
Build error
import os | |
import gradio as gr | |
from pathlib import Path | |
from diffusers import StableDiffusionPipeline | |
from PIL import Image | |
from huggingface_hub import notebook_login | |
from huggingface_hub import notebook_login | |
#if not (Path.home()/'.huggingface'/'token').exists(): | |
#token = os.environ.get("HUGGING_FACE_HUB_TOKEN") | |
#token = os.environ.get('HF_TOKEN_SD') | |
token = "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" | |
import utils.shared_utils as st | |
import torch, logging | |
logging.disable(logging.WARNING) | |
torch.cuda.empty_cache() | |
torch.manual_seed(3407) | |
from torch import autocast | |
from contextlib import nullcontext | |
torch.backends.cudnn.benchmark = True | |
model_id = "CompVis/stable-diffusion-v1-4" | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
context = autocast if device == "cuda" else nullcontext | |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16,use_auth_token=token).to(device) | |
def infer(prompt,samples): | |
images=[] | |
with context(device): | |
for _ in range(samples): | |
image = pipe([prompt],num_inference_steps=50, guidance_scale=7.5, height=400, width=400).images[0] | |
images.append(image) | |
return images | |
# Apply the transformations needed | |
def select_input(input_img,webcm_img): | |
if input_img is None: | |
img= webcm_img | |
else: | |
img=input_img | |
return img | |
# def infer(prompt,samples): | |
# images= [] | |
# with context(device): | |
# for _ in range(samples): | |
# back_img = st.stableDiffusionAPICall(prompt) | |
# images.append(back_img) | |
# return images | |
# def newstyleimage(choice): | |
# print(choice) | |
# if choice == "yes": | |
# return gr.Image.update(visible=True,interactive=True) | |
# return | |
def styleimpose(final_input_img, ref_img): | |
return st.superimpose(final_input_img, ref_img)[0] | |
def change_bg_option(choice): | |
if choice == "I have an Image": | |
return gr.Image(shape=(800, 800)) | |
elif choice == "Generate one for me": | |
return gr.update(lines=8, visible=True, value="Please enter a text prompt") | |
else: | |
return gr.update(visible=False) | |
# TEXT | |
title = "Green-Screen Image Composition-Transfer" | |
DEFAULT_TEXT = "Photorealistic scenery of bookshelf in a room" | |
description = """ | |
<center><a href="https://docs.google.com/document/d/1fde8XKIMT1nNU72859ytd2c58LFBxepS3od9KFBrJbM/edit?usp=sharing">[PAPER - Documentation]</a> </center> | |
<details> | |
<summary><b>Instructions</b></summary> | |
<p style="margin-top: -3px;">With this app, you can generate a suitable background image to overlay your portrait!<br />You have several ways to set how your final auto-edited image will look like:<br /></p> | |
<ul style="margin-top: -20px;margin-bottom: -15px;"> | |
<li style="margin-bottom: -10px;margin-left: 20px;">Use the "<i>Inputs</i>" tab to either upload an image from your device OR allow the use of your webcam to capture</li> | |
<li style="margin-left: 20px;">Use the "<i>Background Image Inputs</i>" to upload your own background. OR</li> | |
<li style="margin-left: 20px;">Use the "<i>Text prompt</i>" tab to generate a satisfactory background image using Stable Diffusion.</li> | |
</ul> | |
<p>After deciding, just hit "<i>Select</i>" to ensure those images are processed.<br />The final image will be available for download <br /> <b>Enjoy!<b><p> | |
</details> | |
""" | |
running = """ | |
### Instructions for running the 3 S's in sequence | |
* **Superimpose** - This button allows you to isolate the foreground from your image and overlay it on the background. Remove background using alpha matting | |
* **Style-Transfer** - This button transfers the style from your original image to re-map your new background realistically. Uses Nvidia FastPhotoStyle | |
* **Smoothing** - Given that the image resolutions and clarity can be an issue, this smoothing button makes your final image, crisp after the stylization transfer. Fair warning - this last process can take 5-10 mins | |
""" | |
style_message = """ | |
This image above will be the content image. By default, a good choice for the style is input foreground image. | |
If you have a different image in mind, you can remove the default and upload it here. | |
Ideally transfer works better if your input foreground is also superimposed on the style image, so you may want to create it using the same steps | |
.""" | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>" + title + "</h1>") | |
with gr.Box(): | |
with gr.Row(): | |
with gr.Column(): | |
gr.Markdown(description) | |
with gr.Column(): | |
gr.Image("capture.png") | |
# First row - Inputs | |
with gr.Row(scale=1): | |
with gr.Column(): | |
with gr.Tabs(): | |
with gr.TabItem("Upload "): | |
input_img = gr.Image(shape=(800, 800), interactive=True, label="You") | |
with gr.TabItem("Webcam Capture"): | |
webcm_img = gr.Image(source="webcam", streaming=True, shape=(800, 800), interactive=True) | |
inp_select_btn = gr.Button("Select") | |
with gr.Column(): | |
with gr.Tabs(): | |
with gr.TabItem("Upload"): | |
bgm_img = gr.Image(shape=(800, 800), type="pil", interactive=True, label="The Background") | |
bgm_select_btn = gr.Button("Select") | |
with gr.TabItem("Generate via Text Prompt"): | |
with gr.Box(): | |
with gr.Row().style(mobile_collapse=False, equal_height=True): | |
text = gr.Textbox(lines=7, label= "Prompt", | |
placeholder="Enter your prompt to generate a background image... something like - Photorealistic scenery of bookshelf in a room") | |
samples = gr.Slider(label="Number of Images", minimum=1, maximum=5, value=2, step=1) | |
btn = gr.Button("Generate images",variant="primary") | |
gallery = gr.Gallery(label="Generated images", show_label=True).style(grid=(1, 3), height="auto") | |
# image_options = gr.Radio(label="Pick", interactive=True, choices=None, type="value") | |
text.submit(infer, inputs=[text, samples], outputs=gallery) | |
btn.click(infer, inputs=[text, samples], outputs=gallery, show_progress=True, status_tracker=None) | |
# Second Row - Backgrounds | |
with gr.Row(scale=1): | |
with gr.Column(): | |
final_input_img = gr.Image(shape=(800, 800), type="pil", label="Foreground") | |
with gr.Column(): | |
final_back_img = gr.Image(shape=(800, 800), type="pil", label="Background", interactive=True) | |
bgm_select_btn.click(fn=lambda x: x, inputs=bgm_img, outputs=final_back_img) | |
inp_select_btn.click(select_input, [input_img, webcm_img], final_input_img) | |
with gr.Row(scale=1): | |
with gr.Box(): | |
gr.Markdown(running) | |
with gr.Row(scale=1): | |
with gr.Box(): | |
with gr.Column(scale=1): | |
supimp_btn = gr.Button("SuperImpose") | |
overlay_img = gr.Image(shape=(800, 800), label="Overlay/Content Image", type="pil") | |
gr.Markdown(style_message) | |
#img_choice = gr.Radio(choices= ["yes"],interactive=True,type='value') | |
ref_img = gr.Image(shape=(800, 800),label="Style Reference Image", type="pil",interactive=True) | |
# ref_img2 = gr.Image(shape=(800, 800), label="Style Reference", type="pil", interactive=True, visible=False) | |
# ref_btn = gr.Button("Use this style",variant="primary") | |
# | |
# ref_btn.click(fn=styleimpose, inputs=[final_input_img, ref_img], outputs=[ref_img]) | |
with gr.Column(scale=1): | |
style_btn = gr.Button("Composition-Transfer",variant="primary") | |
style_img = gr.Image(shape=(800, 800),label="Style-Transfer Image",type="pil") | |
with gr.Column(scale=1): | |
submit_btn = gr.Button("Smoothen",variant="primary") | |
output_img = gr.Image(shape=(800, 800),label="FinalSmoothened Image",type="pil") | |
supimp_btn.click(fn=st.superimpose, inputs=[final_input_img, final_back_img], outputs=[overlay_img,ref_img]) | |
style_btn.click(fn=st.style_transfer, inputs=[overlay_img,ref_img], outputs=[style_img]) | |
submit_btn.click(fn=st.smoother, inputs=[style_img,overlay_img], outputs=[output_img]) | |
gr.Examples(examples=[["profile_new.png","back_img.png"]], label="AlphaMatting- Remove BG", | |
inputs=[final_input_img, final_back_img], outputs=[overlay_img]) | |
gr.Examples(examples=[["profile_new.png", | |
"bedroom with a bookshelf in the background and a small stool to sit on the right side, photorealistic", | |
3]],inputs= [final_input_img,text,samples], label="Text2Img - Stable Diffisuon") | |
gr.Examples(examples=[["cont_img.png","ref_img.png"]],inputs=[overlay_img, ref_img], label = "Nvidia - FastPhotoStyle") | |
demo.queue(concurrency_count=40, max_size=20).launch(enable_queue=True,max_threads=150) | |