File size: 8,324 Bytes
9d8d63c
4158574
9d8d63c
 
 
 
 
 
 
 
 
4158574
9d8d63c
4158574
9d8d63c
 
 
 
 
4158574
 
9d8d63c
 
 
4158574
 
9d8d63c
 
 
 
 
 
 
 
 
 
 
 
4158574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d8d63c
 
 
 
 
4158574
9d8d63c
 
4158574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d8d63c
4158574
 
 
 
9d8d63c
 
 
4158574
9d8d63c
4158574
 
 
 
 
 
 
 
 
 
 
 
9d8d63c
 
 
 
 
 
 
4158574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d8d63c
4158574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9d8d63c
 
 
 
 
 
 
 
 
4158574
9d8d63c
4158574
 
 
 
 
 
 
 
 
9d8d63c
 
4158574
 
9d8d63c
 
 
 
4158574
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import os
import gradio as gr
from pathlib import Path
from diffusers import StableDiffusionPipeline
from PIL import Image
from huggingface_hub import notebook_login

from huggingface_hub import notebook_login
#if not (Path.home()/'.huggingface'/'token').exists():
#token = os.environ.get("HUGGING_FACE_HUB_TOKEN")
token = "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"

import src.utils.shared_utils as st


import torch, logging
logging.disable(logging.WARNING)
torch.cuda.empty_cache()
torch.manual_seed(3407)
from torch import autocast
from contextlib import nullcontext
torch.backends.cudnn.benchmark = True

model_id = "CompVis/stable-diffusion-v1-4"
device = "cuda" if torch.cuda.is_available() else "cpu"
context = autocast if device == "cuda" else nullcontext

# pipe = StableDiffusionPipeline.from_pretrained(model_id,use_auth_token=token).to(device)
#
#
# def infer_original(prompt,samples):
#     with context(device):
#         images = pipe(samples*[prompt], guidance_scale=7.5).images
#     return images




# Apply the transformations needed



def select_input(input_img,webcm_img):
    if input_img is None:
        img= webcm_img
    else:
        img=input_img
    return img


def infer(prompt,samples):
    images= []
    selections = ["Img_{}".format(str(i+1).zfill(2)) for i in range(samples)]
    with context(device):
        for _ in range(samples):
            back_img = st.stableDiffusionAPICall(prompt)
            images.append(back_img)
    return images


# def newstyleimage(choice):
#     print(choice)
#     if choice == "yes":
#         return gr.Image.update(visible=True,interactive=True)
#     return

def styleimpose(final_input_img, ref_img):
    return st.superimpose(final_input_img, ref_img)[0]

def change_bg_option(choice):
    if choice == "I have an Image":
        return gr.Image(shape=(800, 800))

    elif choice == "Generate one for me":
        return gr.update(lines=8, visible=True, value="Please enter a text prompt")
    else:
        return gr.update(visible=False)


# TEXT
title = "FSDL- One-Shot, Green-Screen,   Composition-Transfer"
DEFAULT_TEXT = "Photorealistic scenery of bookshelf in a room"
description = """
<center><a href="https://docs.google.com/document/d/1fde8XKIMT1nNU72859ytd2c58LFBxepS3od9KFBrJbM/edit?usp=sharing">[PAPER - Documentation]</a> </center>
<details>
<summary><b>Instructions</b></summary>
<p style="margin-top: -3px;">With this app, you can generate a suitable background image to overlay your portrait!<br />You have several ways to set how your final auto-edited image will look like:<br /></p>
 <ul style="margin-top: -20px;margin-bottom: -15px;">
  <li style="margin-bottom: -10px;margin-left: 20px;">Use the "<i>Inputs</i>" tab to either upload an image from your device OR allow the use of your webcam to capture</li>
  <li style="margin-left: 20px;">Use the "<i>Background Image Inputs</i>" to upload your own background. OR</li>
  <li style="margin-left: 20px;">Use the "<i>Text prompt</i>" tab to generate a satisfactory background image using Stable Diffusion.</li>
</ul> 
<p>After deciding, just hit "<i>Select</i>" to ensure those images are processed.<br />The final image will be available for download <br /> <b>Enjoy!<b><p>
</details>
"""

running = """

### Instructions for running the 3 S's in sequence

* **Superimpose** - This button allows you to isolate the foreground from your image and overlay it on the background. Remove background using alpha matting 
* **Style-Transfer** - This button transfer the style from your original image to re-map your new background realistically. Uses Nvidia FastPhotoStyle
* **Smoothing** - Given than image resolutions and clarity can be an issue, this smoothing button makes your final image crisp after the stylization transfer. Fair warning - this last process can take 5-10 mins
"""

style_message = """ 
This image above will be the content image. By default, the style will be copied from the input foreground image.

If you have a different image in mind,  would you like to upload a different image?  
Click yes to add a new style reference image"""



demo = gr.Blocks()

with demo:
    gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>" + title + "</h1>")
    with gr.Box():
        gr.Markdown(description)
    # First row - Inputs
    with gr.Row(scale=1):
        with gr.Column():
            with gr.Tabs():
                with gr.TabItem("Upload "):
                    input_img = gr.Image(shape=(800, 800), interactive=True, label="You")
                with gr.TabItem("Webcam Capture"):
                    webcm_img = gr.Image(source="webcam", streaming=True, shape=(800, 800), interactive=True)
            inp_select_btn = gr.Button("Select")

        with gr.Column():
            with gr.Tabs():
                with gr.TabItem("Upload"):
                    bgm_img = gr.Image(shape=(800, 800), type="pil", interactive=True, label="The Background")
                    bgm_select_btn = gr.Button("Select")

                with gr.TabItem("Generate via Text Prompt"):
                    with gr.Box():
                        with gr.Row().style(mobile_collapse=False, equal_height=True):
                            text = gr.Textbox(lines=7,
                                              placeholder="Enter your prompt to generate a background image... something like - Photorealistic scenery of bookshelf in a room")

                            samples = gr.Slider(label="Number of Images", minimum=1, maximum=5, value=2, step=1)
                            btn = gr.Button("Generate images",variant="primary")

                    gallery = gr.Gallery(label="Generated images", show_label=True).style(grid=(1, 3), height="auto")
                    # image_options = gr.Radio(label="Pick", interactive=True, choices=None, type="value")
                    text.submit(infer, inputs=[text, samples], outputs=gallery)
                    btn.click(infer, inputs=[text, samples], outputs=gallery, show_progress=True, status_tracker=None)


    # Second Row - Backgrounds
    with gr.Row(scale=1):
        with gr.Column():
            final_input_img = gr.Image(shape=(800, 800), type="pil", label="Foreground")

        with gr.Column():
            final_back_img = gr.Image(shape=(800, 800), type="pil", label="Background", interactive=True)

        bgm_select_btn.click(fn=lambda x: x, inputs=bgm_img, outputs=final_back_img)

    inp_select_btn.click(select_input, [input_img, webcm_img], final_input_img)

    with gr.Row(scale=1):
        with gr.Box():
            gr.Markdown(running)

    with gr.Row(scale=1):

        with gr.Box():
            with gr.Column(scale=1):
                supimp_btn = gr.Button("SuperImpose")
                overlay_img = gr.Image(shape=(800, 800), label="Overlay", type="pil")
                gr.Markdown(style_message)
                #img_choice = gr.Radio(choices= ["yes"],interactive=True,type='value')
                ref_img = gr.Image(shape=(800, 800),label="Style Reference", type="pil",interactive=True)
                ref_img2 = gr.Image(shape=(800, 800), label="Style Reference", type="pil", interactive=True, visible=False)
                ref_btn = gr.Button("Use this style")

        ref_btn.click(fn=styleimpose, inputs=[final_input_img, ref_img], outputs=[ref_img2])

        with gr.Column(scale=1):
            style_btn = gr.Button("Composition-Transfer",variant="primary")
            style_img = gr.Image(shape=(800, 800),label="Style-Transfer Image",type="pil")

        with gr.Column(scale=1):
            submit_btn = gr.Button("Smoothen",variant="primary")
            output_img = gr.Image(shape=(800, 800),label="FinalSmoothened Image",type="pil")

        supimp_btn.click(fn=st.superimpose, inputs=[final_input_img, final_back_img], outputs=[overlay_img,ref_img])
        style_btn.click(fn=st.style_transfer, inputs=[overlay_img,ref_img2], outputs=[style_img])
        submit_btn.click(fn=st.smoother, inputs=[style_img,overlay_img], outputs=[output_img])


    gr.Examples([["profile_new.png","back_img.png"]],[final_input_img, final_back_img])
    gr.Examples([["profile_new.png","bedroom with a bookshelf in the background and a small stool to sit on the right side, photorealistic",3]], [final_input_img,text,samples])

demo.queue()
demo.launch()